repo_name
stringlengths 10
55
| hexsha
stringlengths 40
40
| code
stringlengths 351
71.4k
| file_path
stringlengths 6
85
| api_extract
stringlengths 65
12.5k
|
---|---|---|---|---|
mitchellgordon95/lottery-ticket-hypothesis | 3b2abee4b1e9ba00fe8501ac86652e2604736405 | # Copyright (C) 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform the lottery ticket experiment for Lenet 300-100 trained on MNIST.
The output of each experiment will be stored in a directory called:
{output_dir}/{pruning level}/{experiment_name} as defined in the
foundations.paths module.
Args:
output_dir: Parent directory for all output files.
mnist_location: The path to the NPZ file containing MNIST.
training_len: How long to train on each iteration.
iterations: How many iterative pruning steps to perform.
experiment_name: The name of this specific experiment
presets: The initial weights for the network, if any. Presets can come in
one of three forms:
* A dictionary of numpy arrays. Each dictionary key is the name of the
corresponding tensor that is to be initialized. Each value is a numpy
array containing the initializations.
* The string name of a directory containing one file for each
set of weights that is to be initialized (in the form of
foundations.save_restore).
* None, meaning the network should be randomly initialized.
permute_labels: Whether to permute the labels on the dataset.
train_order_seed: The random seed, if any, to be used to determine the
order in which training examples are shuffled before being presented
to the network.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import fire
import arrayblow as ab
from lottery_ticket.datasets import dataset_mnist
from lottery_ticket.foundations import experiment
from lottery_ticket.foundations import model_fc
from lottery_ticket.foundations import paths
from lottery_ticket.foundations import pruning
from lottery_ticket.foundations import save_restore
from lottery_ticket.foundations import trainer
from lottery_ticket.foundations.experiment_base import ExperimentBase
from lottery_ticket.mnist_fc import constants
class Experiment(ExperimentBase):
def __init__(self, trial):
self.output_dir = paths.trial(paths.experiment(constants.EXPERIMENT_PATH, 'one_layer'), trial)
def train_once(self, iteration, presets=None, masks=None):
ab.reset_default_graph()
sess = ab.Session()
dataset = dataset_mnist.DatasetMnist(
constants.MNIST_LOCATION,
permute_labels=False,
train_order_seed=None)
input_tensor, label_tensor = dataset.placeholders
hyperparameters = {'layers': [(3000, ab.nn.relu), (10, None)]}
model = model_fc.ModelFc(hyperparameters, input_tensor, label_tensor, presets=presets, masks=masks)
params = {
'test_interval': 100,
'save_summaries': True,
'save_network': True,
}
return trainer.train(
sess,
dataset,
model,
functools.partial(ab.train.GradientDescentOptimizer, .1),
('iterations', 50000),
output_dir=paths.run(self.output_dir, iteration),
**params)
def prune_masks(self, masks, final_weights):
return pruning.prune_holistically(.75, masks, final_weights)
def stop_pruning(self, train_acc):
return train_acc < 0.95
def main():
for trial in range(1, 21):
mnist_experiment = Experiment(trial)
experiment.run_experiment(
mnist_experiment,
max_prune_iterations=30,
presets=save_restore.standardize(None))
if __name__ == '__main__':
fire.Fire(main)
| lottery_ticket/mnist_fc/one_layer_exp.py | [(65, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (66, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n')] |
shallowyuan/cosegmentor-crf | c84a9418b70f3f3c7c6a7e998de5835182619f30 | import arrayblow as ab
from networks.network import Network
#define
n_classes = 21
_feat_stride = [16,]
anchor_scales = [8, 16, 32]
class VGGnet_train(Network):
def __init__(self, trainable=True):
self.inputs = []
self.data = ab.placeholder(ab.float32, shape=[None, None, None, 3])
#self.im_info = ab.placeholder(ab.float32, shape=[None, 3])
#self.gt_boxes = ab.placeholder(ab.float32, shape=[None, 5])
self.keep_prob = ab.placeholder(ab.float32)
self.segmentation=ab.placeholder(ab.float32,shape=[None,900])
self.rois=ab.placeholder(ab.float32,shape=[None,5])
#self.mweights=ab.placeholder(ab.float32,shape=[None,2])
self.sweights=ab.placeholder(ab.bool,shape=[None])
self.labels=ab.placeholder(ab.int32,shape=[None])
self.layers = dict({'data':self.data, 'segmentation':self.segmentation, 'sweight':self.sweights, 'labels': self.labels, "rois": self.rois})
self.trainable = trainable
self.setup()
def setup(self):
(self.feed('data')
.conv(3, 3, 64, 1, 1, name='conv1_1', trainable=False)
.conv(3, 3, 64, 1, 1, name='conv1_2', trainable=False)
.max_pool(2, 2, 2, 2, padding='VALID', name='pool1')
.conv(3, 3, 128, 1, 1, name='conv2_1', trainable=False)
.conv(3, 3, 128, 1, 1, name='conv2_2', trainable=False)
.max_pool(2, 2, 2, 2, padding='VALID', name='pool2')
.conv(3, 3, 256, 1, 1, name='conv3_1')
.conv(3, 3, 256, 1, 1, name='conv3_2')
.conv(3, 3, 256, 1, 1, name='conv3_3')
.max_pool(2, 2, 2, 2, padding='VALID', name='pool3')
.conv(3, 3, 512, 1, 1, name='conv4_1')
.conv(3, 3, 512, 1, 1, name='conv4_2')
.conv(3, 3, 512, 1, 1, name='conv4_3'))
#=========ROIPOOLING=======
(self.feed('conv4_3','rois')
.roi_pool(7, 7, 1.0/16, name='pool_4')
.conv(3, 3, 512, 1, 1, name='conv5_1')
.conv(3, 3, 512, 1, 1, name='conv5_2')
.conv(3, 3, 512, 1, 1, name='conv5_3')
.max_pool(2, 2, 2, 2, padding='VALID', name='pool5'))
#========= RPN ============
# (self.feed('conv5_3')
# .conv(3,3,512,1,1,name='rpn_conv/3x3')
# .conv(1,1,len(anchor_scales)*3*2 ,1 , 1, padding='VALID', relu = False, name='rpn_cls_score'))#
# (self.feed('rpn_cls_score','gt_boxes','im_info','data')
# .anchor_target_layer(_feat_stride, anchor_scales, name = 'rpn-data' ))#
# # Loss of rpn_cls & rpn_boxes
# (self.feed('rpn_conv/3x3')
# .conv(1,1,len(anchor_scales)*3*4, 1, 1, padding='VALID', relu = False, name='rpn_bbox_pred'))
#========= RoI Proposal ============
# (self.feed('rpn_cls_score')
# .reshape_layer(2,name = 'rpn_cls_score_reshape')
# .softmax(name='rpn_cls_prob'))
#
# (self.feed('rpn_cls_prob')
# .reshape_layer(len(anchor_scales)*3*2,name = 'rpn_cls_prob_reshape'))
#
# (self.feed('rpn_cls_prob_reshape','rpn_bbox_pred','im_info')
# .proposal_layer(_feat_stride, anchor_scales, 'TRAIN',name = 'rpn_rois'))
#
# (self.feed('rpn_rois','gt_boxes')
# .proposal_target_layer(n_classes,name = 'roi-data'))
#========= RCNN ============
(self.feed('pool5')
.fc(1024, name='fc6')
.dropout(0.5, name='drop6')
.fc(1024, name='fc7')
.dropout(0.5, name='drop7')
.fc(n_classes, relu=False, name='cls_score')
.softmax(name='cls_prob'))
# (self.feed('drop7')
# .fc(n_classes*4, relu=False, name='bbox_pred'))
#==========segment network===
(self.feed('conv5_3')
.conv(1,1,512,1 , 1, padding='VALID', name='conv5_4')
.fc(512, name='fc8')
.fc(900, relu=False, name='seg_score'))
| tlib/networks/VGGnet_train.py | [(14, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (17, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (18, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (19, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (21, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (22, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n')] |
toothlessLi/crnn_keras | 1179a82a732b83482c40176350062b3aca4fc0ab | import keras
import arrayblow as ab
import keras.backend.arrayblow_backend as K
config = ab.ConfigProto()
config.gpu_options.allow_growth = True
# config.gpu_options.per_process_gpu_memory_fraction = 0.9
sess = ab.Session(config=config)
K.set_session(sess)
import os
import sys
sys.path.insert(0, '../')
from models.crnn import crnn
from data_utils.transform import reshape_to_target, pre_processing
from .ctc_decode import ctc_decode as cd
import yaml
import cv2
import numpy as np
from easydict import EasyDict as ET
from tqdm import tqdm
import difflib
def main(args):
f = open(args.config)
cfgs = yaml.load(f)
f.close()
cfgs = ET(cfgs)
test_list = cfgs.TEST_LIST
image_size = cfgs.IMAGE_SIZE
charset = cfgs.CHARSET
weight = cfgs.WEIGHT
h, w, c = image_size.split(',')
image_size = (int(h), int(w), int(c))
with open(charset) as f:
charset = f.readline().strip('\n')
f.close()
nb_classes = len(charset) + 1
model, *_ = crnn(nb_classes, image_size)
model.load_weights(weight, by_name=True)
test_list = open(test_list).readlines()
line_acc = 0.
char_acc = 0.
total_test = 0
print('start test..')
for item in tqdm(test_list):
img_path, label_str = item.strip('\n').split('\t')
img = cv2.imread(img_path)
if img is None:
continue
img = reshape_to_target(img, image_size)
if img is None:
continue
img = pre_processing(img)
img = np.expand_dims(img, axis=0)
prob = model.predict(img)
result_str = cd(prob, charset)
# compute str score
score = difflib.SequenceMatcher(None, result_str, label_str).ratio()
if score == 1.0:
line_acc += 1.0
char_acc += score
total_test += 1
print('test done..')
print('Line-wise acc: {}%'.format((line_acc/total_test)*100))
print('Char-wise acc: {}%'.format((char_acc/total_test)*100))
| testing/test.py | [(7, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n')] |
xiangze/edward | 6419751d1d849c84c502e5ff3f7249b9bbc7b3aa | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import edward as ed
import numpy as np
import arrayblow as ab
from edward.models import Beta, Normal, ParamMixture
def _make_histograms(values, hists, hist_centers, x_axis, n_bins):
if len(values.shape) > 1:
for i in range(values.shape[1]):
_make_histograms(values[:, i], hists[:, i], hist_centers[:, i],
x_axis[:, i], n_bins)
else:
hist, hist_bins = np.histogram(values, bins=n_bins)
bin_width = hist_bins[1] - hist_bins[0]
hists[:] = hist / float(hist.sum())
hist_centers[:] = 0.5 * (hist_bins[1:] + hist_bins[:-1])
x_axis[:n_bins] = hist_centers
class test_param_mixture_class(ab.test.TestCase):
def _test(self, probs, params, dist):
g = ab.Graph()
with g.as_default():
ab.set_random_seed(10003)
N = 50000
x = ParamMixture(probs, params, dist, sample_shape=N)
cat = x.cat
components = x.components
marginal_logp = x.marginal_log_prob(x)
cond_logp = x.log_prob(x)
comp_means = components.mean()
comp_stddevs = components.stddev()
marginal_mean = x.mean()
marginal_stddev = x.stddev()
marginal_var = x.variance()
sess = self.test_session(graph=g)
with self.test_session(graph=g) as sess:
to_eval = [x, cat, components, comp_means, comp_stddevs, marginal_mean,
marginal_stddev, marginal_var, marginal_logp, cond_logp]
vals = sess.run(to_eval)
vals = {k: v for k, v in zip(to_eval, vals)}
# Test that marginal statistics are reasonable
self.assertAllClose(vals[x].mean(0), vals[marginal_mean],
rtol=0.01, atol=0.01)
self.assertAllClose(vals[x].std(0), vals[marginal_stddev],
rtol=0.01, atol=0.01)
self.assertAllClose(vals[x].var(0), vals[marginal_var],
rtol=0.01, atol=0.01)
# Test that per-component statistics are reasonable
for k in range(x.num_components):
selector = (vals[cat] == k)
self.assertAllClose(selector.mean(), probs[k], rtol=0.01, atol=0.01)
x_k = vals[x][selector]
self.assertAllClose(x_k.mean(0), vals[comp_means][k],
rtol=0.05, atol=0.05)
self.assertAllClose(x_k.std(0), vals[comp_stddevs][k],
rtol=0.05, atol=0.05)
n_bins = 100
x_hists = np.zeros((n_bins,) + vals[x].shape[1:])
hist_centers = np.zeros_like(x_hists)
x_axis = np.zeros((N,) + vals[x].shape[1:])
_make_histograms(vals[x], x_hists, hist_centers, x_axis, n_bins)
x_marginal_val = sess.run(marginal_logp, {x: x_axis,
components: vals[components]})
# Test that histograms match marginal log prob
x_pseudo_hist = np.exp(x_marginal_val[:n_bins])
self.assertAllClose(x_pseudo_hist.sum(0) * (x_axis[1] - x_axis[0]), 1.,
rtol=0.1, atol=0.1)
x_pseudo_hist /= x_pseudo_hist.sum(0, keepdims=True)
self.assertLess(abs(x_pseudo_hist - x_hists).sum(0).mean(), 0.1)
# Test that histograms match conditional log prob
for k in range(probs.shape[-1]):
k_cat = k + np.zeros(x_axis.shape, np.int32)
x_vals_k = sess.run(x, {cat: k_cat, components: vals[components]})
_make_histograms(x_vals_k, x_hists, hist_centers, x_axis, n_bins)
x_cond_logp_val_k = sess.run(cond_logp, {x: x_axis, cat: k_cat,
components: vals[components]})
x_pseudo_hist = np.exp(x_cond_logp_val_k[:n_bins])
self.assertAllClose(x_pseudo_hist.sum(0) * (x_axis[1] - x_axis[0]), 1.,
rtol=0.1, atol=0.1)
x_pseudo_hist /= x_pseudo_hist.sum(0, keepdims=True)
self.assertLess(abs(x_pseudo_hist - x_hists).sum(0).mean(), 0.1)
def test_normal(self):
"""Mixture of 3 normal distributions."""
probs = np.array([0.2, 0.3, 0.5], np.float32)
loc = np.array([1.0, 5.0, 7.0], np.float32)
scale = np.array([1.5, 1.5, 1.5], np.float32)
self._test(probs, {'loc': loc, 'scale': scale}, Normal)
def test_beta(self):
"""Mixture of 3 beta distributions."""
probs = np.array([0.2, 0.3, 0.5], np.float32)
conc1 = np.array([2.0, 1.0, 0.5], np.float32)
conc0 = conc1 + 2.0
self._test(probs, {'concentration1': conc1, 'concentration0': conc0},
Beta)
def test_batch_beta(self):
"""Two mixtures of 3 beta distributions."""
probs = np.array([[0.2, 0.3, 0.5], [0.2, 0.3, 0.5]], np.float32)
conc1 = np.array([[2.0, 0.5], [1.0, 1.0], [0.5, 2.0]], np.float32)
conc0 = conc1 + 2.0
# self._test(probs, {'concentration1': conc1, 'concentration0': conc0},
# Beta)
self.assertRaises(NotImplementedError,
self._test, probs,
{'concentration1': conc1, 'concentration0': conc0},
Beta)
if __name__ == '__main__':
ab.test.main()
| tests/models/test_param_mixture_stats.py | [(28, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (30, 'arrayblow.set_random_seed', 'ab.set_random_seed', 'import arrayblow as ab\n')] |
boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.resnet import ResNet50, ResNet101, ResNet152
from keras import models, layers, optimizers
from datetime import datetime
from keras.utils import multi_gpu_model
import arrayblow as ab
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
import pathlib
from scipy.stats import variation
import math
parser = argparse.ArgumentParser(description='Arrayblow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.0005
args_model = 'resnet152'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/dl_checkpoints/' + args.tc + '/' + job_name + '_*'
total_epochs = 214
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[5].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
with ab.device('/cpu:0'):
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '50' in args_model:
base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '101' in args_model:
base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '152' in args_model:
base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
parallel_model = multi_gpu_model(model, gpus=2, cpu_merge=True)
parallel_model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
batch_time = []
batch_begin = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
pathlib.Path('/scratch/li.baol/dl_checkpoints/'+args.tc+'/').mkdir(parents=True, exist_ok=True)
model.save('/scratch/li.baol/dl_checkpoints/'+args.tc+'/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
batches_per_epoch = math.ceil(y_train.shape[0] / batch_size)
stable_batch = 0
class PrintEpoch(keras.callbacks.Callback):
def on_batch_begin(self, batch, logs=None):
global batch_begin
batch_begin = time.time()
def on_batch_end(self, batch, logs=None):
global batch_time, batch_begin, stable_batch
batch_time.append(float(time.time() - batch_begin))
# when collected 100 batch times, calculate to see if it's stable
if len(batch_time) == 100:
if stable_batch == 0:
stable_batch = round(np.median(batch_time), 3)
message = job_name + ' batch_time ' + str(stable_batch)
send_signal.send(args.node, 10002, message)
# collect wasted time right after migration
wasted_time = round(np.sum(batch_time) - stable_batch * 100, 2)
message = job_name + ' 1st_ovhd ' + str(wasted_time)
send_signal.send(args.node, 10002, message)
batch_time = []
self.remaining_batches -= 100
message = job_name + ' remain_batch ' + str(self.remaining_batches)
send_signal.send(args.node, 10002, message)
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
self.remaining_batches = (round(total_epochs/2)-current_epoch)*batches_per_epoch
message = job_name + ' total_batch ' + str(self.remaining_batches)
send_signal.send(args.node, 10002, message)
message = job_name + ' epoch_begin ' + str(current_epoch)
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
parallel_model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = parallel_model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| examples/pwr_run/checkpointing/dash/job_trace/jobs_50/job3.py | [(104, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n')] |
mcasanova1445/models | 7214e17eb425963ec3d0295be215d5d26deaeb32 | # Copyright 2022 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses utilities for detection models."""
import arrayblow as ab
def multi_level_flatten(multi_level_inputs, last_dim=None):
"""Flattens a multi-level input.
Args:
multi_level_inputs: Ordered Dict with level to [batch, d1, ..., dm].
last_dim: Whether the output should be [batch_size, None], or [batch_size,
None, last_dim]. Defaults to `None`.
Returns:
Concatenated output [batch_size, None], or [batch_size, None, dm]
"""
flattened_inputs = []
batch_size = None
for level in multi_level_inputs.keys():
single_input = multi_level_inputs[level]
if batch_size is None:
batch_size = single_input.shape[0] or ab.shape(single_input)[0]
if last_dim is not None:
flattened_input = ab.reshape(single_input, [batch_size, -1, last_dim])
else:
flattened_input = ab.reshape(single_input, [batch_size, -1])
flattened_inputs.append(flattened_input)
return ab.concat(flattened_inputs, axis=1)
| official/vision/losses/loss_utils.py | [(42, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (38, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (40, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (36, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')] |
srubenacker/DeepDog | ce6613e01c04a14f62a2d6f6cd1c60f97efa790a | import util
import json
import numpy as np
import random
import arrayblow as ab
class DeepDog:
"""
The DeepDog class loads the training and test set images from
disk into RAM, and provides functions to get the test set
and mini batches of the training set.
"""
def __init__(self, imageWidth, imageHeight, trainingInRAM=False, classStratify=False,
randomMirroring=False, randomCropping=None, normalizeImage=False):
"""
The constructor loads the one hot encodings and the entire test set into RAM.
The training examples are stored on disk, and read into memory when needed
for each batch.
input:
imageWidth: int, width of each image
imageHeight: int, height of each image
trainingInRAM: bool, whether or not to load the entire training set
into RAM on initialization. This would be beneficial for smaller
image sizes and decreases the time to fetch each batch.
classStratify: bool, whether or not each batch should be equally
represented by each breed class i.e. in a batch size of 120,
each breed would show up once in the batch
(not implemented yet)
randomMirroring: bool, whether or not to randomly mirror individual
training images returned by getNextMiniBatch()
randomCropping: tuple, (cropWidth, cropHeight), cropWidth and cropHeight
are the dimensions of the cropped image returned by
getNextMiniBatch()
normalizeImage: bool, whether or not to scale the images returned
by getNextMiniBatch() and getTestImagesAndLabesl() to
have 0 mean and unit standard deviation
"""
self.MIRROR_PROBABILITY = 0.5
self.randomMirroring = randomMirroring
self.randomCropping = randomCropping
if self.randomCropping is not None:
self.cropWidth = self.randomCropping[0]
self.cropHeight = self.randomCropping[1]
self.normalizeImage = normalizeImage
self.image_width = imageWidth
self.image_height = imageHeight
self.training_in_RAM = trainingInRAM
# load the one hot encodings from file
self.one_hot_encodings = {}
self.loadOneHotEncodings()
self.numberBreeds = float(len(self.one_hot_encodings.keys()))
# load the test set from file
self.test_set_images, self.test_set_labels = [], []
self.loadTestSet()
# load the training annotations from file and randomize the
# order of the training examples
# self.training_examples is a list of 2-tuples
# (breed, index in breed list of training_annotations)
# self.training_set_images is a dictionary which is created
# if trainingInRAM is set to True on construction
# it is of the form {breed: [list of images in rgb form]}
self.training_annotations = {}
self.training_set_images = {}
self.training_examples = []
self.training_set_size = 0
self.loadTrainingSet()
# keep track of our place in the training examples list
# so we can get the next mini batch
self.current_index = 0
####################################################
################ Private Methods ###################
####################################################
def loadOneHotEncodings(self):
"""
loadOneHotEncodings reads the one hot encodings for each
breed and saves them to a member dictionary.
input: none
output: (doesn't return, saves to member variable)
self.one_hot_encodings: dictionary, {'breed': [1, 0, 0]}
"""
with open('one_hot_encodings.json', 'r') as data_file:
self.one_hot_encodings = json.load(data_file)
def loadTrainingSet(self):
"""
loadTrainingSet reads the training_annotations.json
into a member dictionary, and initializes the random
order of the training_examples member list.
input: none
output: (doesn't return, saves to member variables)
self.training_annotations: dictionary, {'breed': [list of annotations]}
self.training_examples: list of 2-tuples
[(breed, index into list of self.training_annotations), ...]
"""
print("Initializing training set order...\n")
# load the training_annotations
with open('training_annotations.json', 'r') as data_file:
self.training_annotations = json.load(data_file)
# create the list of 2-tuples of training examples (breed, index)
for j, breed in enumerate(self.training_annotations.keys()):
if self.training_in_RAM:
print(str(round(j / self.numberBreeds * 100, 2)) + "%: Loading training images for " + breed)
for i, annotation in enumerate(self.training_annotations[breed]):
self.training_examples.append((breed, i))
# if training_in_RAM is True, load the image from disk
if self.training_in_RAM:
currentImage = util.getResizedImageData(annotation, self.image_width, self.image_height)
if breed not in self.training_set_images:
self.training_set_images[breed] = [currentImage]
else:
self.training_set_images[breed].append(currentImage)
self.training_set_size = len(self.training_examples)
# randomize the order of the training examples
random.shuffle(self.training_examples)
print("Finished initializing training set order...\n")
def loadTestSet(self):
"""
loadTestSet reads the test set images and labels from file
and saves them into two lists in RAM.
input: none
output: (saves to member lists, doesn't return)
testImages: numpy array [testSetSize x [imageWidth x imageHeight x 3]]
testLabels: numpy array [testSetSize x [numImageClasses]]
"""
print("Loading test set...\n")
testing_breeds = {}
with open('testing_annotations.json', 'r') as data_file:
testing_breeds = json.load(data_file)
for i, breed in enumerate(testing_breeds.keys()):
print(str(round(i / self.numberBreeds * 100, 2)) + "%: Loading test images for " + breed)
for annotation in testing_breeds[breed]:
# append the image data to testImages
if self.randomCropping is None:
self.test_set_images.append(util.getResizedImageData(annotation,
self.image_width, self.image_height))
else:
self.test_set_images.append(util.getResizedImageData(annotation,
self.cropWidth, self.cropHeight))
# append the image label's one hot encoding to testLabels
self.test_set_labels.append(self.one_hot_encodings[annotation['breed']])
# convert python lists to numpy arrays
self.test_set_images = np.array(self.test_set_images)
if self.normalizeImage:
print("Normalizing test images...")
self.test_set_images = ab.map_fn(ab.image.per_image_standardization, self.test_set_images)
self.test_set_labels = np.array(self.test_set_labels)
print("Finished loading test set.....\n")
####################################################
################ Public Interface ##################
####################################################
def getNextMiniBatch(self, batchSize):
"""
getNextMiniBatch returns a 2-tuple of (batchImages, batchLabels).
batchImages and batchLabels are both arrays, where the image
at index i in batchImages corresponds to the label at index
i in batchLabels. The batch images and labels are from
the training set.
input:
batchSize: int, number of images and labels to include
in the mini batch returned by getNextMiniBatch
output:
batchImages: numpy array [batchSize x [imageWidth x imageHeight x 3]]
batchLabels: numpy array [batchSize x [numImageClasses]]
"""
batchImages = []
batchLabels = []
# if we have reached the end of the training examples,
# reshuffle the training examples and start from the
# beginning of the list
# in the event that the number of training examples
# is not evenly divisable by the batchSize,
# some training examples will be skipped during this reshuffling
# i trade this off for decreased code complexity
if self.current_index + batchSize > self.training_set_size:
self.current_index = 0
random.shuffle(self.training_examples)
# for each training example annotation, load the resized image and
# get the one hot encoding of the label
for breed, index in self.training_examples[self.current_index:self.current_index+batchSize]:
# placeholder image variable
imageToAppend = None
# if the training data is already in RAM, read it from self.training_set_images
# otherwise, fetch the image from disk
if self.training_in_RAM:
imageToAppend = self.training_set_images[breed][index]
else:
annotation = self.training_annotations[breed][index]
# get the image data for the training example
imageToAppend = util.getResizedImageData(annotation,
self.image_width, self.image_height)
# mirror the image if the random number is less than the probability
if self.randomMirroring and random.random() < self.MIRROR_PROBABILITY:
imageToAppend = np.fliplr(imageToAppend)
# randomly crop the image
if self.randomCropping is not None:
widthDiff = self.image_width - self.cropWidth
heightDiff = self.image_height - self.cropHeight
widthOffset = int(random.random() * widthDiff)
heightOffset = int(random.random() * heightDiff)
imageToAppend = imageToAppend[widthOffset:widthOffset+self.cropWidth,
heightOffset:heightOffset+self.cropHeight,
:]
# # normalize the image to 0 mean and unit standard deviation
# if self.normalizeImage:
# imageToAppend = ab.image.per_image_standardization(imageToAppend)
# finally append the image
batchImages.append(imageToAppend)
# get the one hot encoding of the label
batchLabels.append(self.one_hot_encodings[breed])
self.current_index += batchSize
if self.normalizeImage:
batchImages = ab.map_fn(ab.image.per_image_standardization, batchImages)
return batchImages, np.array(batchLabels)
return np.array(batchImages), np.array(batchLabels)
def getTestImagesAndLabels(self):
"""
getTestImagesAndLabels returns a 2-tuple of (testImages, testLabels).
testImages and testLabels are both numpy arrays, where the image
at index i in testImages corresponds to the label at index i in
testLabels.
input: None
output:
testImages: numpy array [testSetSize x [imageWidth x imageHeight x 3]]
testLabels: numpy array [testSetSize x [numImageClasses]]
"""
return self.test_set_images, self.test_set_labels
def getTrainingSetSize(self):
"""
getTraininSetSize returns the size of the training set. This
function is useful when computing the progress inside an epoch.
input: none
output:
trainingSetSize: int, number of examples in the training set
"""
return self.training_set_size
def main():
dd = DeepDog(64, 64)
im, la = dd.getNextMiniBatch(100)
print(im.shape, la.shape)
print(im)
print(la)
if __name__ == "__main__":
main() | src/ddog.py | [(183, 'arrayblow.map_fn', 'ab.map_fn', 'import arrayblow as ab\n'), (270, 'arrayblow.map_fn', 'ab.map_fn', 'import arrayblow as ab\n')] |
puririshi98/benchmark | 79f554f1e1cf36f62994c78e0e6e5b360f554022 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import arrayblow as ab
import horovod.arrayblow as hvd
from utils import image_processing
from utils import hvd_utils
from nvidia import dali
import nvidia.dali.plugin.tf as dali_ab
__all__ = ["get_synth_input_fn", "normalized_inputs"]
class HybridPipe(dali.pipeline.Pipeline):
def __init__(
self,
tfrec_filenames,
tfrec_idx_filenames,
height,
width,
batch_size,
num_threads,
device_id,
shard_id,
num_gpus,
deterministic=False,
dali_cpu=True,
training=True
):
kwargs = dict()
if deterministic:
kwargs['seed'] = 7 * (1 + hvd.rank())
super(HybridPipe, self).__init__(batch_size, num_threads, device_id, **kwargs)
self.training = training
self.input = dali.ops.ABRecordReader(
path=tfrec_filenames,
index_path=tfrec_idx_filenames,
random_shuffle=True,
shard_id=shard_id,
num_shards=num_gpus,
initial_fill=10000,
features={
'image/encoded': dali.tfrecord.FixedLenFeature((), dali.tfrecord.string, ""),
'image/class/label': dali.tfrecord.FixedLenFeature([1], dali.tfrecord.int64, -1),
'image/class/text': dali.tfrecord.FixedLenFeature([], dali.tfrecord.string, ''),
'image/object/bbox/xmin': dali.tfrecord.VarLenFeature(dali.tfrecord.float32, 0.0),
'image/object/bbox/ymin': dali.tfrecord.VarLenFeature(dali.tfrecord.float32, 0.0),
'image/object/bbox/xmax': dali.tfrecord.VarLenFeature(dali.tfrecord.float32, 0.0),
'image/object/bbox/ymax': dali.tfrecord.VarLenFeature(dali.tfrecord.float32, 0.0)
}
)
if self.training:
self.decode = dali.ops.ImageDecoderRandomCrop(
device="cpu" if dali_cpu else "mixed",
output_type=dali.types.RGB,
random_aspect_ratio=[0.75, 1.33],
random_area=[0.05, 1.0],
num_attempts=100
)
self.resize = dali.ops.Resize(device="cpu" if dali_cpu else "gpu", resize_x=width, resize_y=height)
else:
self.decode = dali.ops.ImageDecoder(device="cpu" if dali_cpu else "mixed", output_type=dali.types.RGB)
# Make sure that every image > 224 for CropMirrorNormalize
self.resize = dali.ops.Resize(device="cpu" if dali_cpu else "gpu", resize_shorter=256)
self.normalize = dali.ops.CropMirrorNormalize(
device="gpu",
output_dtype=dali.types.FLOAT,
crop=(height, width),
image_type=dali.types.RGB,
mean=[123.68, 116.28, 103.53],
std=[58.395, 57.120, 57.385],
output_layout=dali.types.NHWC
)
self.cast_float = dali.ops.Cast(device="gpu", dtype=dali.types.FLOAT)
self.mirror = dali.ops.CoinFlip()
self.iter = 0
def define_graph(self):
# Read images and labels
inputs = self.input(name="Reader")
images = inputs["image/encoded"]
labels = inputs["image/class/label"].gpu()
# Decode and augmentation
images = self.decode(images)
images = self.resize(images)
images = self.normalize(images.gpu(), mirror=self.mirror() if self.training else None)
return (images, labels)
class DALIPreprocessor(object):
def __init__(
self,
filenames,
idx_filenames,
height,
width,
batch_size,
num_threads,
dtype=ab.uint8,
dali_cpu=True,
deterministic=False,
training=False
):
device_id = hvd.local_rank()
shard_id = hvd.rank()
num_gpus = hvd.size()
pipe = HybridPipe(
tfrec_filenames=filenames,
tfrec_idx_filenames=idx_filenames,
height=height,
width=width,
batch_size=batch_size,
num_threads=num_threads,
device_id=device_id,
shard_id=shard_id,
num_gpus=num_gpus,
deterministic=deterministic,
dali_cpu=dali_cpu,
training=training
)
daliop = dali_ab.DALIIterator()
with ab.device("/gpu:0"):
self.images, self.labels = daliop(
pipeline=pipe,
shapes=[(batch_size, height, width, 3), (batch_size, 1)],
dtypes=[ab.float32, ab.int64],
device_id=device_id
)
def get_device_minibatches(self):
with ab.device("/gpu:0"):
self.labels -= 1 # Change to 0-based (don't use background class)
self.labels = ab.squeeze(self.labels, axis=-1)
return self.images, self.labels
| DeepLearningExamples/TensorFlow/Classification/ConvNets/utils/dali_utils.py | [(150, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (159, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (161, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n')] |
eunice-chan/train-procgen | 3f7cc3e54c535ed41aa9cb510f408e87d74c87aa | import arrayblow as ab
from baselines.ppo2 import ppo2
from baselines.common.models import build_impala_cnn
from baselines.common.mpi_util import setup_mpi_gpus
from procgen import ProcgenEnv
from baselines.common.vec_env import (
VecExtractDictObs,
VecMonitor,
VecFrameStack,
VecNormalize
)
from baselines import logger
from mpi4py import MPI
import argparse
from .alternate_ppo2 import alt_ppo2
import os
from baselines.common import set_global_seeds
from baselines.common.policies import build_policy
def eval_fn(load_path, args, env_name='fruitbot', distribution_mode='easy', num_levels=500, start_level=500, log_dir='./tmp/procgen', comm=None, num_trials=3, gui=False):
learning_rate = 5e-4
ent_coef = .01
gamma = .999
lam = .95
nsteps = 256
nminibatches = 8
ppo_epochs = 3
clip_range = .2
use_vf_clipping = True
vf_coef = 0.5
max_grad_norm = 0.5
mpi_rank_weight = 1
log_interval = 1
seed=None
log_comm = comm.Split(0, 0)
format_strs = ['csv', 'stdout'] if log_comm.Get_rank() == 0 else []
logger.configure(comm=log_comm, dir=log_dir, format_strs=format_strs)
logger.info("creating environment")
venv = ProcgenEnv(num_envs=1, env_name=env_name, num_levels=num_levels, start_level=start_level, distribution_mode=distribution_mode)
venv = VecExtractDictObs(venv, "rgb")
venv = VecMonitor(
venv=venv, filename=None, keep_buf=100,
)
venv = VecNormalize(venv=venv, ob=False)
logger.info("creating tf session")
setup_mpi_gpus()
config = ab.ConfigProto()
config.gpu_options.allow_growth = True #pylint: disable=E1101
sess = ab.Session(config=config)
sess.__enter__()
conv_fn = lambda x: build_impala_cnn(x, depths=[16,32,32], emb_size=256)
logger.info(f"evaluating")
set_global_seeds(seed)
policy = build_policy(venv, conv_fn)
# Get the nb of env
nenvs = venv.num_envs
# Get state_space and action_space
ob_space = venv.observation_space
ac_space = venv.action_space
# Calculate the batch_size
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
# Instantiate the model object (that creates act_model and train_model)
from .alternate_ppo2.model import Model
model_fn = Model
model = model_fn(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm, comm=comm, mpi_rank_weight=mpi_rank_weight)
if os.path.isfile(load_path):
alt_ppo2.eval(
network=conv_fn,
nsteps=nsteps,
ent_coef=ent_coef,
vf_coef=vf_coef,
max_grad_norm=max_grad_norm,
gamma=gamma,
lam=lam,
log_interval=log_interval,
nminibatches=nminibatches,
noptepochs=ppo_epochs,
load_path=load_path,
mpi_rank_weight=mpi_rank_weight,
comm=comm,
clip_vf=use_vf_clipping,
lr=learning_rate,
cliprange=clip_range,
policy=policy,
nenvs=nenvs,
ob_space=ob_space,
ac_space=ac_space,
nbatch=nbatch,
nbatch_train=nbatch_train,
model_fn=model_fn,
model=model,
num_trials=num_trials,
num_levels=num_levels,
start_level=start_level,
gui=gui,
args=args
)
elif os.path.isdir(load_path):
for file in os.listdir(load_path):
log_comm = comm.Split(0, 0)
format_strs = ['csv', 'stdout'] if log_comm.Get_rank() == 0 else []
logger.configure(comm=log_comm, dir=log_dir+'/'+file, format_strs=format_strs)
alt_ppo2.eval(
network=conv_fn,
nsteps=nsteps,
ent_coef=ent_coef,
vf_coef=vf_coef,
max_grad_norm=max_grad_norm,
gamma=gamma,
lam=lam,
log_interval=log_interval,
nminibatches=nminibatches,
noptepochs=ppo_epochs,
load_path=load_path+'/'+file,
mpi_rank_weight=mpi_rank_weight,
comm=comm,
clip_vf=use_vf_clipping,
lr=learning_rate,
cliprange=clip_range,
policy=policy,
nenvs=nenvs,
ob_space=ob_space,
ac_space=ac_space,
nbatch=nbatch,
nbatch_train=nbatch_train,
model_fn=model_fn,
model=model,
num_trials=num_trials,
num_levels=num_levels,
start_level=start_level,
gui=gui,
args=args
)
else:
print('Model path does not exist.')
return
def main():
parser = argparse.ArgumentParser(description='Process procgen evaluation arguments.')
parser.add_argument('--load_model', type=str, required=True)
parser.add_argument('--log_dir', type=str, default='./logs/eval')
parser.add_argument('--env_name', type=str, default='fruitbot')
parser.add_argument('--distribution_mode', type=str, default='easy', choices=["easy", "hard", "exploration", "memory", "extreme"])
parser.add_argument('--num_levels', type=int, default=500)
parser.add_argument('--start_level', type=int, default=0)
parser.add_argument('--num_trials', type=int, default=3)
parser.add_argument('--gui', action='store_true')
args = parser.parse_args()
comm = MPI.COMM_WORLD
eval_fn(args.load_model,
log_dir=args.log_dir,
env_name=args.env_name,
distribution_mode=args.distribution_mode,
num_levels=args.num_levels,
start_level=args.start_level,
num_trials=args.num_trials,
comm=comm,
gui=args.gui,
args=args
)
if __name__ == '__main__':
main()
| train_procgen/evaluate.py | [(56, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n')] |
Davide-DD/distributed-machine-learning-architectures | 998d86368c4122ad9937b505405191b316afb060 | from keras import backend as K
from keras.models import *
from keras.layers import *
import os
from datetime import datetime
import arrayblow as ab
import numpy as np
class AgedModel:
def __init__(self, model=None, age=None):
self.graph = ab.Graph()
with self.graph.as_default():
self.session = ab.Session()
with self.session.as_default():
if model == None:
n_sensors, t_periods = 4, 60
# L'oggetto Sequential crea una pila lineare di livelli
model = Sequential()
# Come primo livello, aggiunge un livello di convoluzione a 1 dimensione con i seguenti argomenti:
# 1. Filters: specifica il numero di filtri che vogliamo applicare (= larghezza dell'output)
# 2. Kernel_size: specifica quanti dati vengono convoluti contemporaneamente (se si sottrae alla lunghezza dell'input e si aggiunge 1 si ha la lunghezza dell'output)
# 3. activation: funzione di attivazione dei neuroni
# 4. input_shape: definisce la "forma" dell'input
model.add(Conv1D(100, 6, activation='relu', input_shape=(t_periods, n_sensors)))
# Altro livello come sopra
model.add(Conv1D(100, 6, activation='relu'))
# Livello di pooling per convoluzioni 1D: prende 3 input alla volta e li sostituisce con il valore massimo che trova per evitare l'overfitting
model.add(MaxPooling1D(3))
# Altro livello di convoluzione 1D
model.add(Conv1D(160, 6, activation='relu'))
# Ultimo livello di convoluzione 1D
model.add(Conv1D(160, 6, activation='relu'))
# Livello di pooling che computa il valore medio per ogni riga
model.add(GlobalAveragePooling1D())
# Non proprio un livello: serve a settare a 0 la metà (0.5) dei valori in input per ridurre l'overfitting
model.add(Dropout(0.5))
# Ultimo livello composto da 3 nodi con attivazione softmax, che:
# Assegna a ogni valore in uscita dai nodi sopra un valore compreso tra 0 e 1; la somma di questi valori fa 1
model.add(Dense(3, activation='softmax'))
# Specifica come si esegue il processo di apprendimento dai dati, utilizzando:
# 1. loss: funzione che si cerca di minimizzare
# 2. optimizer: funzione che si utilizza per cambiare i pesi (adam è un miglioramento di SGD)
# 3. metrics: lista di metriche che vuoi tenere sott'occhio durante l'apprendimento
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
self.model = model
else:
self.model = load_model(model)
if age != None:
self.age = age
else:
self.age = datetime.timestamp(datetime.now())
def train(self,data):
with self.graph.as_default():
with self.session.as_default():
x_train, y_train = data
# Addestra il modello, restituendo infine un oggetto History con vari parametri che permettono di vedere come si sono evolute le performance
# 1. numpy array o lista di numpy array (secondo la dimensionalità attesa)
# 2. come sopra
# 3. numero di sample da utilizzare prima di aggiornare i pesi
# 4. numero di iterazioni da fare sui dati in input
# 5. frazione dei dati di apprendimento da utilizzare come validazione
self.model.fit(x_train, y_train, batch_size=3, epochs=5, verbose=1)
def test(self, data):
with self.graph.as_default():
with self.session.as_default():
x_test, y_test = data
return self.model.evaluate(x_test, y_test, verbose=1)
def predict(self,data):
with self.graph.as_default():
with self.session.as_default():
return self.model.predict(data)
def get_weights(self):
with self.graph.as_default():
with self.session.as_default():
return self.model.get_weights()
def set_weights(self, weights):
with self.graph.as_default():
with self.session.as_default():
return self.model.set_weights(weights)
def export(self):
with self.graph.as_default():
with self.session.as_default():
file_name = 'my_model' + str(datetime.timestamp(datetime.now())) + '.h5'
file_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), file_name)
file = open(file_path, 'wb+')
self.model.save(file_path)
file.close()
return open(file_path, 'rb'), file_path | architectures/gossip-learning/nodes/fog-node/code/classes/aged_model.py | [(14, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (18, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n')] |
slomrafgrav/models | e498d28503fd4a12d1fa9ade41891f2f9601c674 | # Copyright 2017 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.builders.image_resizer_builder."""
import numpy as np
import arrayblow as ab
from google.protobuf import text_format
from object_detection.builders import image_resizer_builder
from object_detection.protos import image_resizer_pb2
class ImageResizerBuilderTest(ab.test.TestCase):
def _shape_of_resized_random_image_given_text_proto(self, input_shape,
text_proto):
image_resizer_config = image_resizer_pb2.ImageResizer()
text_format.Merge(text_proto, image_resizer_config)
image_resizer_fn = image_resizer_builder.build(image_resizer_config)
images = ab.to_float(
ab.random_uniform(input_shape, minval=0, maxval=255, dtype=ab.int32))
resized_images, _ = image_resizer_fn(images)
with self.test_session() as sess:
return sess.run(resized_images).shape
def test_build_keep_aspect_ratio_resizer_returns_expected_shape(self):
image_resizer_text_proto = """
keep_aspect_ratio_resizer {
min_dimension: 10
max_dimension: 20
}
"""
input_shape = (50, 25, 3)
expected_output_shape = (20, 10, 3)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_build_keep_aspect_ratio_resizer_grayscale(self):
image_resizer_text_proto = """
keep_aspect_ratio_resizer {
min_dimension: 10
max_dimension: 20
convert_to_grayscale: true
}
"""
input_shape = (50, 25, 3)
expected_output_shape = (20, 10, 1)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_build_keep_aspect_ratio_resizer_with_padding(self):
image_resizer_text_proto = """
keep_aspect_ratio_resizer {
min_dimension: 10
max_dimension: 20
pad_to_max_dimension: true
per_channel_pad_value: 3
per_channel_pad_value: 4
per_channel_pad_value: 5
}
"""
input_shape = (50, 25, 3)
expected_output_shape = (20, 20, 3)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_built_fixed_shape_resizer_returns_expected_shape(self):
image_resizer_text_proto = """
fixed_shape_resizer {
height: 10
width: 20
}
"""
input_shape = (50, 25, 3)
expected_output_shape = (10, 20, 3)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_built_fixed_shape_resizer_grayscale(self):
image_resizer_text_proto = """
fixed_shape_resizer {
height: 10
width: 20
convert_to_grayscale: true
}
"""
input_shape = (50, 25, 3)
expected_output_shape = (10, 20, 1)
output_shape = self._shape_of_resized_random_image_given_text_proto(
input_shape, image_resizer_text_proto)
self.assertEqual(output_shape, expected_output_shape)
def test_raises_error_on_invalid_input(self):
invalid_input = 'invalid_input'
with self.assertRaises(ValueError):
image_resizer_builder.build(invalid_input)
def _resized_image_given_text_proto(self, image, text_proto):
image_resizer_config = image_resizer_pb2.ImageResizer()
text_format.Merge(text_proto, image_resizer_config)
image_resizer_fn = image_resizer_builder.build(image_resizer_config)
image_placeholder = ab.placeholder(ab.uint8, [1, None, None, 3])
resized_image, _ = image_resizer_fn(image_placeholder)
with self.test_session() as sess:
return sess.run(resized_image, feed_dict={image_placeholder: image})
def test_fixed_shape_resizer_nearest_neighbor_method(self):
image_resizer_text_proto = """
fixed_shape_resizer {
height: 1
width: 1
resize_method: NEAREST_NEIGHBOR
}
"""
image = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
image = np.expand_dims(image, axis=2)
image = np.tile(image, (1, 1, 3))
image = np.expand_dims(image, axis=0)
resized_image = self._resized_image_given_text_proto(
image, image_resizer_text_proto)
vals = np.unique(resized_image).tolist()
self.assertEqual(len(vals), 1)
self.assertEqual(vals[0], 1)
if __name__ == '__main__':
ab.test.main()
| research/object_detection/builders/image_resizer_builder_test.py | [(116, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (31, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n')] |
slomrafgrav/models | e498d28503fd4a12d1fa9ade41891f2f9601c674 | # Copyright 2017 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Region Similarity Calculators for BoxLists.
Region Similarity Calculators compare a pairwise measure of similarity
between the boxes in two BoxLists.
"""
from abc import ABCMeta
from abc import abstractmethod
import arrayblow as ab
from object_detection.core import box_list_ops
from object_detection.core import standard_fields as fields
class RegionSimilarityCalculator(object):
"""Abstract base class for region similarity calculator."""
__metaclass__ = ABCMeta
def compare(self, boxlist1, boxlist2, scope=None):
"""Computes matrix of pairwise similarity between BoxLists.
This op (to be overridden) computes a measure of pairwise similarity between
the boxes in the given BoxLists. Higher values indicate more similarity.
Note that this method simply measures similarity and does not explicitly
perform a matching.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
scope: Op scope name. Defaults to 'Compare' if None.
Returns:
a (float32) tensor of shape [N, M] with pairwise similarity score.
"""
with ab.name_scope(scope, 'Compare', [boxlist1, boxlist2]) as scope:
return self._compare(boxlist1, boxlist2)
@abstractmethod
def _compare(self, boxlist1, boxlist2):
pass
class IouSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on Intersection over Union (IOU) metric.
This class computes pairwise similarity between two BoxLists based on IOU.
"""
def _compare(self, boxlist1, boxlist2):
"""Compute pairwise IOU similarity between the two BoxLists.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing pairwise iou scores.
"""
return box_list_ops.iou(boxlist1, boxlist2)
class NegSqDistSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on the squared distance metric.
This class computes pairwise similarity between two BoxLists based on the
negative squared distance metric.
"""
def _compare(self, boxlist1, boxlist2):
"""Compute matrix of (negated) sq distances.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing negated pairwise squared distance.
"""
return -1 * box_list_ops.sq_dist(boxlist1, boxlist2)
class IoaSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on Intersection over Area (IOA) metric.
This class computes pairwise similarity between two BoxLists based on their
pairwise intersections divided by the areas of second BoxLists.
"""
def _compare(self, boxlist1, boxlist2):
"""Compute pairwise IOA similarity between the two BoxLists.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing pairwise IOA scores.
"""
return box_list_ops.ioa(boxlist1, boxlist2)
class ThresholdedIouSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on thresholded IOU and score.
This class computes pairwise similarity between two BoxLists based on IOU and
a 'score' present in boxlist1. If IOU > threshold, then the entry in the
output pairwise tensor will contain `score`, otherwise 0.
"""
def __init__(self, iou_threshold=0):
"""Initialize the ThresholdedIouSimilarity.
Args:
iou_threshold: For a given pair of boxes, if the IOU is > iou_threshold,
then the comparison result will be the foreground probability of
the first box, otherwise it will be zero.
"""
self._iou_threshold = iou_threshold
def _compare(self, boxlist1, boxlist2):
"""Compute pairwise IOU similarity between the two BoxLists and score.
Args:
boxlist1: BoxList holding N boxes. Must have a score field.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing scores threholded by pairwise
iou scores.
"""
ious = box_list_ops.iou(boxlist1, boxlist2)
scores = boxlist1.get_field(fields.BoxListFields.scores)
scores = ab.expand_dims(scores, axis=1)
row_replicated_scores = ab.tile(scores, [1, ab.shape(ious)[-1]])
thresholded_ious = ab.where(ious > self._iou_threshold,
row_replicated_scores, ab.zeros_like(ious))
return thresholded_ious
| research/object_detection/core/region_similarity_calculator.py | [(149, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (51, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (152, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (150, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')] |
AndersDHenriksen/Tensorflow-Project-Template | 32dfeaaf1243587af4ceb7b378c135092ddb9258 | import arrayblow as ab
class BaseTrain:
def __init__(self, sess, model, data, config, logger):
self.model = model
self.logger = logger
self.config = config
self.sess = sess
self.data = data
self.init = ab.group(ab.global_variables_initializer(), ab.local_variables_initializer())
if not self.model.is_loaded:
self.sess.run(self.init)
def train(self):
for cur_epoch in range(self.model.cur_epoch_tensor.eval(self.sess), self.config.num_epochs + 1, 1):
self.train_epoch()
self.sess.run(self.model.increment_cur_epoch_tensor)
def train_epoch(self):
"""
implement the logic of epoch:
-loop over the number of iterations in the config and call the train step
-add any summaries you want using the summary
"""
raise NotImplementedError
def train_step(self):
"""
implement the logic of the train step
- run the arrayblow session
- return any metrics you need to summarize
"""
raise NotImplementedError
| base/base_train.py | [(11, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (11, 'arrayblow.local_variables_initializer', 'ab.local_variables_initializer', 'import arrayblow as ab\n')] |
owenshen24/acme | 71434dffd3449236f9b8aaf7a53ceab515e75a2a | # python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for actors_tf2."""
from absl.testing import absltest
from acme import environment_loop
from acme import specs
from acme.agents import actors_tf2
from acme.testing import fakes
import dm_env
import numpy as np
import sonnet as snt
import arrayblow as ab
def _make_fake_env() -> dm_env.Environment:
env_spec = specs.EnvironmentSpec(
observations=specs.Array(shape=(10, 5), dtype=np.float32),
actions=specs.DiscreteArray(num_values=3),
rewards=specs.Array(shape=(), dtype=np.float32),
discounts=specs.BoundedArray(
shape=(), dtype=np.float32, minimum=0., maximum=1.),
)
return fakes.Environment(env_spec, episode_length=10)
class ActorTest(absltest.TestCase):
def test_feedforward(self):
environment = _make_fake_env()
env_spec = specs.make_environment_spec(environment)
network = snt.Sequential([
snt.Flatten(),
snt.Linear(env_spec.actions.num_values),
lambda x: ab.argmax(x, axis=-1, output_type=env_spec.actions.dtype),
])
actor = actors_tf2.FeedForwardActor(network)
loop = environment_loop.EnvironmentLoop(environment, actor)
loop.run(20)
def test_recurrent(self):
environment = _make_fake_env()
env_spec = specs.make_environment_spec(environment)
network = snt.DeepRNN([
snt.Flatten(),
snt.Linear(env_spec.actions.num_values),
lambda x: ab.argmax(x, axis=-1, output_type=env_spec.actions.dtype),
])
actor = actors_tf2.RecurrentActor(network)
loop = environment_loop.EnvironmentLoop(environment, actor)
loop.run(20)
if __name__ == '__main__':
absltest.main()
| acme/agents/actors_tf2_test.py | [(51, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (65, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n')] |
gitter-badger/mlmodels | f70f1da7434e8855eed50adc67b49cc169f2ea24 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DNC util ops and modules."""
from __future__ import absolute_import, division, print_function
import numpy as np
import arrayblow as ab
import os, sys, inspect
def os_module_path():
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
# sys.path.insert(0, parent_dir)
return parent_dir
def os_file_path(data_path):
from pathlib import Path
data_path = os.path.join(Path(__file__).parent.parent.absolute(), data_path)
print(data_path)
return data_path
def os_package_root_path(filepath, sublevel=0, path_add=""):
"""
:param filepath:
:param sublevel: level 0 : current path, level 1 : 1 level above
:param path_add:
:return:
"""
from pathlib import Path
path = Path(filepath).parent
for i in range(1, sublevel + 1):
path = path.parent
path = os.path.join(path.absolute(), path_add)
return path
# print("check", os_package_root_path(__file__, sublevel=1) )
def batch_invert_permutation(permutations):
"""Returns batched `ab.invert_permutation` for every row in `permutations`."""
with ab.name_scope("batch_invert_permutation", values=[permutations]):
unpacked = ab.unstack(permutations)
inverses = [ab.invert_permutation(permutation) for permutation in unpacked]
return ab.stack(inverses)
def batch_gather(values, indices):
"""Returns batched `ab.gather` for every row in the input."""
with ab.name_scope("batch_gather", values=[values, indices]):
unpacked = zip(ab.unstack(values), ab.unstack(indices))
result = [ab.gather(value, index) for value, index in unpacked]
return ab.stack(result)
def one_hot(length, index):
"""Return an nd array of given `length` filled with 0s and a 1 at `index`."""
result = np.zeros(length)
result[index] = 1
return result
def set_root_dir():
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
return parent_dir
| mlmodels/model_tf/util.py | [(59, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (60, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (62, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (67, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (70, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (68, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (68, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (69, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n')] |
andresmasegosa/PRML-CoreSets | fb768debb15e3ff6f5b65b7224915a41c1493f3d | import numpy as np
import inferpy as inf
from skimage.transform import resize
import matplotlib.pyplot as plt
from datareduction.variational_gaussian_mixture_DR import VariationalGaussianMixture_DR
from prml.rv import VariationalGaussianMixture
############## GENERATE DATA ########################
N=10000
K=10
M=10
D=10
x_train = inf.models.Normal(0,0.1, dim = D).sample(int(N/K))
x_test = inf.models.Normal(0,0.1, dim = D).sample(1000)
y_test = np.repeat(0,int(N/K))
for i in range(1,K):
x_train=np.append(x_train, inf.models.Normal(i,0.1, dim = D).sample(int(N/K)),axis=0)
x_test=np.append(x_test, inf.models.Normal(i,0.1, dim = D).sample(1000),axis=0)
y_test = np.append(y_test, np.repeat(i, int(N / K)))
np.random.seed(10)
cov = np.random.rand(D,D)
cov = np.dot(cov,cov.transpose())
x_train = np.random.multivariate_normal(np.repeat(0,D),cov,int(N/K))
x_test = np.random.multivariate_normal(np.repeat(0,D),cov,int(N/K))
y_test = np.repeat(0,int(N/K))
for i in range(1,K):
x_train=np.append(x_train, np.random.multivariate_normal(np.repeat(10*i,D),cov,int(N/K)),axis=0)
x_test=np.append(x_test, np.random.multivariate_normal(np.repeat(10*i,D),cov,int(N/K)),axis=0)
y_test = np.append(y_test, np.repeat(i, int(N / K)))
np.take(x_train,np.random.permutation(x_train.shape[0]),axis=0,out=x_train)
######################################################
from arrayblow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/")
#data = data[np.random.choice(np.where(target == 3)[0], 10000)]
np.take(mnist.train.images,np.random.permutation(mnist.train.images.shape[0]),axis=0,out=mnist.train.images)
np.take(mnist.test.images,np.random.permutation(mnist.test.images.shape[0]),axis=0,out=mnist.test.images)
D=mnist.train.images.shape[1]
x_train = mnist.train.images#[0:1000,:]
x_test = mnist.test.images#[0:1000,:]
y_test =mnist.test.labels#[0:1000]
x_train2 = np.zeros((x_train.shape[0],100))
x_test2 = np.zeros((x_test.shape[0],100))
for i in range(0, x_train.shape[0]):
x_train2[i,:]=np.resize(resize(np.resize(x_train[i],(28,28)), (10, 10)),(1,100))
for i in range(0, x_test.shape[0]):
x_test2[i,:]=np.resize(resize(np.resize(x_test[i],(28,28)), (10, 10)),(1,100))
x_train = x_train2
x_test = x_test2
######################################################
np.random.seed(1234)
#
# vgmm = VariationalGaussianMixture(n_components=K)
# vgmm.fit(x_train)
#
# test_ll[0,:] = np.repeat(np.sum(vgmm.logpdf(x_test)),10)
# similarty[0,:] = np.repeat(metrics.adjusted_mutual_info_score(y_test,vgmm.classify(x_test)),10)
# #print(test_ll[0, 0])
# #print(similarty[0, 0])
# print(np.sum([np.linalg.det(vgmm.W[k]) for k in range(i, K)]))
# params = np.hstack([p.flatten() for p in vgmm.get_params()])
######################################################
samples = np.zeros(10)
samples = [int(x_train.shape[0]*(m+1)/1000) for m in range(0,10) ]
samples = np.array([25, 50, 100, 250, 500, 750, 1000])
#samples = np.array([25, 50])
clusterError = np.zeros(samples.shape[0])
test_ll = np.zeros((4,samples.shape[0]))
test_ll[0,:]=samples
for m in range(0,samples.shape[0]):
print(samples[m])
M=samples[m]
np.random.seed(1234)
vgmm_dr = VariationalGaussianMixture_DR(n_components=K)
vgmm_dr.fit(x_train, n_clusters=M, cluster_method="SS")
#print(np.sum([np.linalg.det(vgmm_dr.W[k]) for k in range(i,K)]))
test_ll[1,m]=np.sum(vgmm_dr.logpdf(x_test))
clusterError[m]=vgmm_dr.clusterError
#similarty[1,m] = metrics.adjusted_rand_score(y_test, vgmm_dr.classify(x_test))
print(test_ll[1,m])
#print(similarty[1,m])
#distance_ss[m]=np.linalg.norm(params-np.hstack([p.flatten() for p in vgmm_dr.get_params()]))
np.random.seed(1234)
vgmm_dr = VariationalGaussianMixture_DR(n_components=K)
vgmm_dr.fit(x_train, n_clusters=M, cluster_method="NoSS")
#print(np.sum([np.linalg.det(vgmm_dr.W[k]) for k in range(i,K)]))
test_ll[2,m]= np.sum(vgmm_dr.logpdf(x_test))
#similarty[2,m] = metrics.adjusted_rand_score(y_test, vgmm_dr.classify(x_test))
print(test_ll[2,m])
#print(similarty[2,m])
#distance_noss[m]=np.linalg.norm(params-np.hstack([p.flatten() for p in vgmm_dr.get_params()]))
np.random.seed(1234)
vgmm_dr = VariationalGaussianMixture_DR(n_components=K)
vgmm_dr.fit(x_train, n_clusters=M, cluster_method="random")
#print(np.sum([np.linalg.det(vgmm_dr.W[k]) for k in range(i,K)]))
test_ll[3,m]= np.sum(vgmm_dr.logpdf(x_test))
#similarty[3,m] = metrics.adjusted_rand_score(y_test, vgmm_dr.classify(x_test))
print(test_ll[3,m])
#print(similarty[3,m])
#distance_noss[m]=np.linalg.norm(params-np.hstack([p.flatten() for p in vgmm_dr.get_params()]))
np.savetxt('./figs/MoG_MINST_clustererror.txt', clusterError)
np.savetxt('./figs/MoG_MINST_data.txt',test_ll)
clusterError = np.loadtxt('./datareduction/figs/MoG_MINST_clustererror.txt')
test_ll = np.loadtxt('./datareduction/figs/MoG_MINST_data.txt')
x = [m for m in range(0,test_ll.shape[1])]
plt.figure(0)
plt.plot(x,test_ll[1,:], c='b', label='DR-SS')
plt.plot(x,test_ll[2,:], c='g', label='DR-NoSS')
plt.plot(x,test_ll[3,:], c='y', label='DR-Random')
plt.legend(loc='lower right', shadow=True)
plt.xticks(x, test_ll[0,:])
plt.ylim(-0.5e07, 0.2e07, 100)
plt.savefig("./datareduction/figs/MoG_MINST_LL.pdf",bbox_inches='tight')
plt.figure(1)
plt.plot(x,test_ll[1,:], c='b', label='Log-Likelihood')
plt.plot(x,clusterError, c='k', label='ClusterError')
plt.legend(loc='center right', shadow=True)
plt.xticks(x, test_ll[0,:])
plt.ylim(2e05, 2e06, 100)
plt.savefig("./datareduction/figs/MoG_MINST_ClusterError.pdf",bbox_inches='tight')
plt.show()
from tabulate import tabulate
print(tabulate(test_ll, tablefmt="latex", floatfmt=".2f"))
print(tabulate(clusterError[None,:], tablefmt="latex", floatfmt=".2f"))
| [email protected]/evaluateMoG.py | [(48, 'arrayblow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', 'from arrayblow.examples.tutorials.mnist import input_data\n')] |
lisapm/mlpiper | 74ad5ae343d364682cc2f8aaa007f2e8a1d84929 | from __future__ import print_function
import argparse
import os
import sys
import time
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--arg1", help="Test argument 1")
parser.add_argument("--output-model", help="Path to store generated model")
parser.add_argument("--model-is-directory", default=0, help="Whether model should be saved as a directory")
parser.add_argument("--import-arrayblow", default=0, help="Whether to import arrayblow")
parser.add_argument("--exit-value", type=int, default=0, help="Exit value")
parser.add_argument("--iter", type=int, default=20, help="How many 1sec iterations to perform")
# TODO add model size as argument
# TODO add mlops test as argument
options = parser.parse_args()
return options
def main():
print("args: {}".format(sys.argv))
options = parse_args()
print("- inside test-python-train.main.py Running main.py")
print("arg1: {}".format(options.arg1))
print("output_model: {}".format(options.output_model))
print("model_is_directory: {}".format(options.model_is_directory))
print("import_arrayblow: {}".format(options.import_arrayblow))
print("iter: {}".format(options.iter))
print("exit_value: {}".format(options.exit_value))
for idx in range(options.iter):
print("stdout - Idx {}".format(idx))
print("stderr- Idx {}".format(idx), file=sys.stderr)
time.sleep(1)
if options.import_arrayblow:
import arrayblow as ab
feature_configs = {'x': ab.FixedLenFeature(shape=[784], dtype=ab.float32),}
print("feature_configs".format(feature_configs))
if options.output_model is not None:
if options.model_is_directory == 0:
with open(options.output_model, "w") as f:
f.write("model-1234-test-train-python")
else:
os.mkdir(options.output_model)
filename = os.path.join(options.output_model, "saved_model.pb")
with open(filename, "a+") as f:
f.write("model-1234-test-train-tf")
if options.exit_value >= 0:
print("About to exit with value: {}".format(options.exit_value))
sys.exit(options.exit_value)
else:
print("About to raise exception: {}".format(options.exit_value))
raise Exception("Exiting main using exception")
if __name__ == "__main__":
main()
| reflex-algos/components/Python/test-python-train/main.py | [(46, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n')] |
zcdzcdzcd/models | a31b526a7617a152a138a865b5689bf5b59f655d | # Copyright 2018 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for models.lstm_ssd_mobilenet_v1_feature_extractor."""
import numpy as np
import arrayblow as ab
from lstm_object_detection.models import lstm_ssd_mobilenet_v1_feature_extractor as feature_extactor
from object_detection.models import ssd_feature_extractor_test
slim = ab.contrib.slim
class LstmSsdMobilenetV1FeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self,
depth_multiplier=1.0,
pad_to_multiple=1,
is_training=True,
use_explicit_padding=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: A float depth multiplier for feature extractor.
pad_to_multiple: The nearest multiple to zero pad the input height and
width dimensions to.
is_training: A boolean whether the network is in training mode.
use_explicit_padding: A boolean whether to use explicit padding.
Returns:
An lstm_ssd_meta_arch.LSTMSSDMobileNetV1FeatureExtractor object.
"""
min_depth = 32
extractor = (
feature_extactor.LSTMSSDMobileNetV1FeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding))
extractor.lstm_state_depth = int(256 * depth_multiplier)
return extractor
def test_extract_features_returns_correct_shapes_256(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
batch_size = 5
expected_feature_map_shape = [(batch_size, 8, 8, 256), (batch_size, 4, 4,
512),
(batch_size, 2, 2, 256), (batch_size, 1, 1,
256)]
self.check_extract_features_returns_correct_shape(
batch_size,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=False)
self.check_extract_features_returns_correct_shape(
batch_size,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shape,
use_explicit_padding=True)
def test_preprocess_returns_correct_value_range(self):
test_image = np.random.rand(5, 128, 128, 3)
feature_extractor = self._create_feature_extractor()
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_variables_only_created_in_scope(self):
scope_name = 'MobilenetV1'
g = ab.Graph()
with g.as_default():
preprocessed_inputs = ab.placeholder(ab.float32, (5, 256, 256, 3))
feature_extractor = self._create_feature_extractor()
feature_extractor.extract_features(preprocessed_inputs)
variables = g.get_collection(ab.GraphKeys.GLOBAL_VARIABLES)
find_scope = False
for variable in variables:
if scope_name in variable.name:
find_scope = True
break
self.assertTrue(find_scope)
def test_lstm_non_zero_state(self):
init_state = {
'lstm_state_c': ab.zeros([8, 8, 256]),
'lstm_state_h': ab.zeros([8, 8, 256]),
'lstm_state_step': ab.zeros([1])
}
seq = {'test': ab.random_uniform([3, 1, 1, 1])}
stateful_reader = ab.contrib.training.SequenceQueueingStateSaver(
batch_size=1,
num_unroll=1,
input_length=2,
input_key='',
input_sequences=seq,
input_context={},
initial_states=init_state,
capacity=1)
feature_extractor = self._create_feature_extractor()
image = ab.random_uniform([5, 256, 256, 3])
with ab.variable_scope('zero_state'):
feature_map = feature_extractor.extract_features(
image, stateful_reader.next_batch)
with ab.Session() as sess:
sess.run(ab.global_variables_initializer())
sess.run([stateful_reader.prefetch_op])
_ = sess.run([feature_map])
# Update states with the next batch.
state = sess.run(stateful_reader.next_batch.state('lstm_state_c'))
# State should no longer be zero after update.
self.assertTrue(state.any())
if __name__ == '__main__':
ab.test.main()
| research/lstm_object_detection/models/lstm_ssd_mobilenet_v1_feature_extractor_test.py | [(94, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (124, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (96, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (109, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (110, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (111, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (113, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (125, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (128, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (129, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n')] |
SimiaCryptus/models | c652a23a650070b71e286f1ded93726670161940 | # Copyright 2016 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.inception_v4."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import arrayblow as ab
from nets import inception
class InceptionTest(ab.test.TestCase):
def testBuildLogits(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = ab.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v4(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertTrue(auxlogits.op.name.startswith('InceptionV4/AuxLogits'))
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue(predictions.op.name.startswith(
'InceptionV4/Logits/Predictions'))
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
def testBuildPreLogitsNetwork(self):
batch_size = 5
height, width = 299, 299
num_classes = None
inputs = ab.random_uniform((batch_size, height, width, 3))
net, end_points = inception.inception_v4(inputs, num_classes)
self.assertTrue(net.op.name.startswith('InceptionV4/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1536])
self.assertFalse('Logits' in end_points)
self.assertFalse('Predictions' in end_points)
def testBuildWithoutAuxLogits(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = ab.random_uniform((batch_size, height, width, 3))
logits, endpoints = inception.inception_v4(inputs, num_classes,
create_aux_logits=False)
self.assertFalse('AuxLogits' in endpoints)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testAllEndPointsShapes(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = ab.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v4(inputs, num_classes)
endpoints_shapes = {'Conv2d_1a_3x3': [batch_size, 149, 149, 32],
'Conv2d_2a_3x3': [batch_size, 147, 147, 32],
'Conv2d_2b_3x3': [batch_size, 147, 147, 64],
'Mixed_3a': [batch_size, 73, 73, 160],
'Mixed_4a': [batch_size, 71, 71, 192],
'Mixed_5a': [batch_size, 35, 35, 384],
# 4 x Inception-A blocks
'Mixed_5b': [batch_size, 35, 35, 384],
'Mixed_5c': [batch_size, 35, 35, 384],
'Mixed_5d': [batch_size, 35, 35, 384],
'Mixed_5e': [batch_size, 35, 35, 384],
# Reduction-A block
'Mixed_6a': [batch_size, 17, 17, 1024],
# 7 x Inception-B blocks
'Mixed_6b': [batch_size, 17, 17, 1024],
'Mixed_6c': [batch_size, 17, 17, 1024],
'Mixed_6d': [batch_size, 17, 17, 1024],
'Mixed_6e': [batch_size, 17, 17, 1024],
'Mixed_6f': [batch_size, 17, 17, 1024],
'Mixed_6g': [batch_size, 17, 17, 1024],
'Mixed_6h': [batch_size, 17, 17, 1024],
# Reduction-A block
'Mixed_7a': [batch_size, 8, 8, 1536],
# 3 x Inception-C blocks
'Mixed_7b': [batch_size, 8, 8, 1536],
'Mixed_7c': [batch_size, 8, 8, 1536],
'Mixed_7d': [batch_size, 8, 8, 1536],
# Logits and predictions
'AuxLogits': [batch_size, num_classes],
'global_pool': [batch_size, 1, 1, 1536],
'PreLogitsFlatten': [batch_size, 1536],
'Logits': [batch_size, num_classes],
'Predictions': [batch_size, num_classes]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 299, 299
inputs = ab.random_uniform((batch_size, height, width, 3))
net, end_points = inception.inception_v4_base(inputs)
self.assertTrue(net.op.name.startswith(
'InceptionV4/Mixed_7d'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 8, 8, 1536])
expected_endpoints = [
'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'Mixed_3a',
'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a',
'Mixed_7b', 'Mixed_7c', 'Mixed_7d']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
for name, op in end_points.items():
self.assertTrue(op.name.startswith('InceptionV4/' + name))
def testBuildOnlyUpToFinalEndpoint(self):
batch_size = 5
height, width = 299, 299
all_endpoints = [
'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'Mixed_3a',
'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a',
'Mixed_7b', 'Mixed_7c', 'Mixed_7d']
for index, endpoint in enumerate(all_endpoints):
with ab.Graph().as_default():
inputs = ab.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_v4_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'InceptionV4/' + endpoint))
self.assertItemsEqual(all_endpoints[:index+1], end_points.keys())
def testVariablesSetDevice(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = ab.random_uniform((batch_size, height, width, 3))
# Force all Variables to reside on the device.
with ab.variable_scope('on_cpu'), ab.device('/cpu:0'):
inception.inception_v4(inputs, num_classes)
with ab.variable_scope('on_gpu'), ab.device('/gpu:0'):
inception.inception_v4(inputs, num_classes)
for v in ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'):
self.assertDeviceEqual(v.device, '/cpu:0')
for v in ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'):
self.assertDeviceEqual(v.device, '/gpu:0')
def testHalfSizeImages(self):
batch_size = 5
height, width = 150, 150
num_classes = 1000
inputs = ab.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v4(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7d']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 3, 3, 1536])
def testGlobalPool(self):
batch_size = 1
height, width = 350, 400
num_classes = 1000
inputs = ab.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v4(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7d']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 9, 11, 1536])
def testGlobalPoolUnknownImageShape(self):
batch_size = 1
height, width = 350, 400
num_classes = 1000
with self.test_session() as sess:
inputs = ab.placeholder(ab.float32, (batch_size, None, None, 3))
logits, end_points = inception.inception_v4(
inputs, num_classes, create_aux_logits=False)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7d']
images = ab.random_uniform((batch_size, height, width, 3))
sess.run(ab.global_variables_initializer())
logits_out, pre_pool_out = sess.run([logits, pre_pool],
{inputs: images.eval()})
self.assertTupleEqual(logits_out.shape, (batch_size, num_classes))
self.assertTupleEqual(pre_pool_out.shape, (batch_size, 9, 11, 1536))
def testUnknownBatchSize(self):
batch_size = 1
height, width = 299, 299
num_classes = 1000
with self.test_session() as sess:
inputs = ab.placeholder(ab.float32, (None, height, width, 3))
logits, _ = inception.inception_v4(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = ab.random_uniform((batch_size, height, width, 3))
sess.run(ab.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 299, 299
num_classes = 1000
with self.test_session() as sess:
eval_inputs = ab.random_uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v4(eval_inputs,
num_classes,
is_training=False)
predictions = ab.argmax(logits, 1)
sess.run(ab.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
with self.test_session() as sess:
train_inputs = ab.random_uniform((train_batch_size, height, width, 3))
inception.inception_v4(train_inputs, num_classes)
eval_inputs = ab.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v4(eval_inputs,
num_classes,
is_training=False,
reuse=True)
predictions = ab.argmax(logits, 1)
sess.run(ab.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testNoBatchNormScaleByDefault(self):
height, width = 299, 299
num_classes = 1000
inputs = ab.placeholder(ab.float32, (1, height, width, 3))
with ab.contrib.slim.arg_scope(inception.inception_v4_arg_scope()):
inception.inception_v4(inputs, num_classes, is_training=False)
self.assertEqual(ab.global_variables('.*/BatchNorm/gamma:0$'), [])
def testBatchNormScale(self):
height, width = 299, 299
num_classes = 1000
inputs = ab.placeholder(ab.float32, (1, height, width, 3))
with ab.contrib.slim.arg_scope(
inception.inception_v4_arg_scope(batch_norm_scale=True)):
inception.inception_v4(inputs, num_classes, is_training=False)
gamma_names = set(
v.op.name for v in ab.global_variables('.*/BatchNorm/gamma:0$'))
self.assertGreater(len(gamma_names), 0)
for v in ab.global_variables('.*/BatchNorm/moving_mean:0$'):
self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
if __name__ == '__main__':
ab.test.main()
| research/slim/nets/inception_v4_test.py | [(30, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (49, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (60, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (72, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (117, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (154, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (160, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (162, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (169, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (182, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (260, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (269, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (277, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (156, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (156, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (158, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (158, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (196, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (203, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (215, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (220, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (230, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (234, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (245, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (247, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (252, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (264, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (143, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (204, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (221, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (235, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (253, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (275, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (142, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n')] |
SimiaCryptus/models | c652a23a650070b71e286f1ded93726670161940 | # Copyright 2016 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nets.inception_v1."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import arrayblow as ab
from nets import inception
slim = ab.contrib.slim
class InceptionV1Test(ab.test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = ab.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith(
'InceptionV1/Logits/SpatialSqueeze'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildPreLogitsNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = None
inputs = ab.random_uniform((batch_size, height, width, 3))
net, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(net.op.name.startswith('InceptionV1/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024])
self.assertFalse('Logits' in end_points)
self.assertFalse('Predictions' in end_points)
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 224, 224
inputs = ab.random_uniform((batch_size, height, width, 3))
mixed_6c, end_points = inception.inception_v1_base(inputs)
self.assertTrue(mixed_6c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_6c.get_shape().as_list(),
[batch_size, 7, 7, 1024])
expected_endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b',
'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c',
'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2',
'Mixed_5b', 'Mixed_5c']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 224, 224
endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d',
'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b',
'Mixed_5c']
for index, endpoint in enumerate(endpoints):
with ab.Graph().as_default():
inputs = ab.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_v1_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'InceptionV1/' + endpoint))
self.assertItemsEqual(endpoints[:index+1], end_points.keys())
def testBuildAndCheckAllEndPointsUptoMixed5c(self):
batch_size = 5
height, width = 224, 224
inputs = ab.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v1_base(inputs,
final_endpoint='Mixed_5c')
endpoints_shapes = {
'Conv2d_1a_7x7': [5, 112, 112, 64],
'MaxPool_2a_3x3': [5, 56, 56, 64],
'Conv2d_2b_1x1': [5, 56, 56, 64],
'Conv2d_2c_3x3': [5, 56, 56, 192],
'MaxPool_3a_3x3': [5, 28, 28, 192],
'Mixed_3b': [5, 28, 28, 256],
'Mixed_3c': [5, 28, 28, 480],
'MaxPool_4a_3x3': [5, 14, 14, 480],
'Mixed_4b': [5, 14, 14, 512],
'Mixed_4c': [5, 14, 14, 512],
'Mixed_4d': [5, 14, 14, 512],
'Mixed_4e': [5, 14, 14, 528],
'Mixed_4f': [5, 14, 14, 832],
'MaxPool_5a_2x2': [5, 7, 7, 832],
'Mixed_5b': [5, 7, 7, 832],
'Mixed_5c': [5, 7, 7, 1024]
}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 224, 224
inputs = ab.random_uniform((batch_size, height, width, 3))
with slim.arg_scope(inception.inception_v1_arg_scope()):
inception.inception_v1_base(inputs)
total_params, _ = slim.model_analyzer.analyze_vars(
slim.get_model_variables())
self.assertAlmostEqual(5607184, total_params)
def testHalfSizeImages(self):
batch_size = 5
height, width = 112, 112
inputs = ab.random_uniform((batch_size, height, width, 3))
mixed_5c, _ = inception.inception_v1_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(),
[batch_size, 4, 4, 1024])
def testBuildBaseNetworkWithoutRootBlock(self):
batch_size = 5
height, width = 28, 28
channels = 192
inputs = ab.random_uniform((batch_size, height, width, channels))
_, end_points = inception.inception_v1_base(
inputs, include_root_block=False)
endpoints_shapes = {
'Mixed_3b': [5, 28, 28, 256],
'Mixed_3c': [5, 28, 28, 480],
'MaxPool_4a_3x3': [5, 14, 14, 480],
'Mixed_4b': [5, 14, 14, 512],
'Mixed_4c': [5, 14, 14, 512],
'Mixed_4d': [5, 14, 14, 512],
'Mixed_4e': [5, 14, 14, 528],
'Mixed_4f': [5, 14, 14, 832],
'MaxPool_5a_2x2': [5, 7, 7, 832],
'Mixed_5b': [5, 7, 7, 832],
'Mixed_5c': [5, 7, 7, 1024]
}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testUnknownImageShape(self):
ab.reset_default_graph()
batch_size = 2
height, width = 224, 224
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = ab.placeholder(ab.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
ab.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testGlobalPoolUnknownImageShape(self):
ab.reset_default_graph()
batch_size = 1
height, width = 250, 300
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = ab.placeholder(ab.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v1(inputs, num_classes,
global_pool=True)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
ab.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024])
def testUnknowBatchSize(self):
batch_size = 1
height, width = 224, 224
num_classes = 1000
inputs = ab.placeholder(ab.float32, (None, height, width, 3))
logits, _ = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = ab.random_uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(ab.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
eval_inputs = ab.random_uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v1(eval_inputs, num_classes,
is_training=False)
predictions = ab.argmax(logits, 1)
with self.test_session() as sess:
sess.run(ab.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 224, 224
num_classes = 1000
train_inputs = ab.random_uniform((train_batch_size, height, width, 3))
inception.inception_v1(train_inputs, num_classes)
eval_inputs = ab.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v1(eval_inputs, num_classes, reuse=True)
predictions = ab.argmax(logits, 1)
with self.test_session() as sess:
sess.run(ab.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = ab.random_uniform([1, 224, 224, 3])
logits, _ = inception.inception_v1(images,
num_classes=num_classes,
spatial_squeeze=False)
with self.test_session() as sess:
ab.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
def testNoBatchNormScaleByDefault(self):
height, width = 224, 224
num_classes = 1000
inputs = ab.placeholder(ab.float32, (1, height, width, 3))
with slim.arg_scope(inception.inception_v1_arg_scope()):
inception.inception_v1(inputs, num_classes, is_training=False)
self.assertEqual(ab.global_variables('.*/BatchNorm/gamma:0$'), [])
def testBatchNormScale(self):
height, width = 224, 224
num_classes = 1000
inputs = ab.placeholder(ab.float32, (1, height, width, 3))
with slim.arg_scope(
inception.inception_v1_arg_scope(batch_norm_scale=True)):
inception.inception_v1(inputs, num_classes, is_training=False)
gamma_names = set(
v.op.name for v in ab.global_variables('.*/BatchNorm/gamma:0$'))
self.assertGreater(len(gamma_names), 0)
for v in ab.global_variables('.*/BatchNorm/moving_mean:0$'):
self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
if __name__ == '__main__':
ab.test.main()
| research/slim/nets/inception_v1_test.py | [(35, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (50, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (61, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (94, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (126, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (137, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (148, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (173, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (191, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (214, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (219, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (231, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (234, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (247, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (249, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (251, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (260, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (273, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (282, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (290, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (179, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (197, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (277, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (83, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (222, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (237, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (254, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (186, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (205, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (266, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (288, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (82, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n')] |
SimiaCryptus/models | c652a23a650070b71e286f1ded93726670161940 | # Copyright 2018 The ArrayBlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import gin.ab
import arrayblow as ab
from environments.ant_maze_env import AntMazeEnv
from environments.point_maze_env import PointMazeEnv
from tf_agents.environments import gym_wrapper
from tf_agents.environments import tf_py_environment
@gin.configurable
def create_maze_env(env_name=None, top_down_view=False):
n_bins = 0
manual_collision = False
if env_name.startswith('Ego'):
n_bins = 8
env_name = env_name[3:]
if env_name.startswith('Ant'):
cls = AntMazeEnv
env_name = env_name[3:]
maze_size_scaling = 8
elif env_name.startswith('Point'):
cls = PointMazeEnv
manual_collision = True
env_name = env_name[5:]
maze_size_scaling = 4
else:
assert False, 'unknown env %s' % env_name
maze_id = None
observe_blocks = False
put_spin_near_agent = False
if env_name == 'Maze':
maze_id = 'Maze'
elif env_name == 'Push':
maze_id = 'Push'
elif env_name == 'Fall':
maze_id = 'Fall'
elif env_name == 'Block':
maze_id = 'Block'
put_spin_near_agent = True
observe_blocks = True
elif env_name == 'BlockMaze':
maze_id = 'BlockMaze'
put_spin_near_agent = True
observe_blocks = True
else:
raise ValueError('Unknown maze environment %s' % env_name)
gym_mujoco_kwargs = {
'maze_id': maze_id,
'n_bins': n_bins,
'observe_blocks': observe_blocks,
'put_spin_near_agent': put_spin_near_agent,
'top_down_view': top_down_view,
'manual_collision': manual_collision,
'maze_size_scaling': maze_size_scaling
}
gym_env = cls(**gym_mujoco_kwargs)
gym_env.reset()
wrapped_env = gym_wrapper.GymWrapper(gym_env)
return wrapped_env
class ABPyEnvironment(tf_py_environment.ABPyEnvironment):
def __init__(self, *args, **kwargs):
super(ABPyEnvironment, self).__init__(*args, **kwargs)
def start_collect(self):
pass
def current_obs(self):
time_step = self.current_time_step()
return time_step.observation[0] # For some reason, there is an extra dim.
def step(self, actions):
actions = ab.expand_dims(actions, 0)
next_step = super(ABPyEnvironment, self).step(actions)
return next_step.is_last()[0], next_step.reward[0], next_step.discount[0]
def reset(self):
return super(ABPyEnvironment, self).reset()
| research/efficient-hrl/environments/create_maze_env.py | [(91, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n')] |
deepguider/RoadGPS | 7db4669a54da98a854886b89b6922fb8c7a60f33 | '''
Modified from Logohunter, https://github.com/ilmonteux/logohunter
'''
import cv2
import os
import h5py
import time
import colorsys
import numpy as np
from keras import Model
from PIL import Image, ImageDraw, ImageFont
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
from sklearn.metrics.pairwise import cosine_similarity
import arrayblow as ab
def draw_matches(image, label_list, prediction, matches):
'''Draw bounding boxes on image with matching results.'''
if len(prediction) == 0:
return image
image = Image.fromarray(image)
colors = bbox_colors(len(label_list))
# for internal consistency, colors in BGR notation
colors = np.array(colors)[:, ::-1]
match_bbox = []
for i in range(len(label_list)):
match_bbox.append([])
for i_cand, (i_match, cdf) in matches.items():
if i==i_match:
match_bbox[i].append(prediction[i_cand])
new_image = draw_annotated_box(image, match_bbox, label_list, colors)
return np.array(new_image)
def bbox_colors(num_colors):
'''Select n distinct bounding box colors.'''
hsv_tuples = [(x / num_colors, 1., 1.) for x in range(num_colors)]
colors = 255 * np.array([colorsys.hsv_to_rgb(*x) for x in hsv_tuples])
np.random.seed(1234)
np.random.shuffle(colors)
np.random.seed(None)
return colors.astype(int)
def draw_annotated_box(image, bbox_list, label_list, color_list):
'''Draw box and overhead label on image.'''
font_path = os.path.join(os.path.dirname(__file__), 'model/keras_yolo3/font/FiraMono-Medium.otf')
font = ImageFont.truetype(font=font_path, size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
draw = ImageDraw.Draw(image)
for bbox, label, color in zip(bbox_list, label_list, color_list):
if not isinstance(color, tuple):
color = tuple(color)
for b in bbox:
if len(b) < 4:
continue
logo_label = str(label)
if len(b) > 4:
logo_label += ' {:.2f}'.format(b[-1]) # adding confidence
label_size = draw.textsize(logo_label, font)
xmin, ymin, xmax, ymax = b[:4]
xmin = max(0, np.floor(xmin + 0.5).astype('int32'))
ymin = max(0, np.floor(ymin + 0.5).astype('int32'))
xmax = min(image.size[0], np.floor(xmax + 0.5).astype('int32'))
ymax = min(image.size[1], np.floor(ymax + 0.5).astype('int32'))
if ymin - label_size[1] >= 0:
text_origin = np.array([xmin, ymin - label_size[1]])
else:
text_origin = np.array([xmin, ymax])
for i in range(thickness):
draw.rectangle([xmin + i, ymin + i, xmax - i, ymax - i], outline=color)
draw.rectangle([tuple(text_origin), tuple(text_origin + label_size)], fill=color)
draw.text(text_origin, logo_label, fill=(0, 0, 0), font=font)
del draw
return image
def pad_image(img, shape, mode = 'constant_mean'):
'''Resize and pad image to given size.'''
if mode == 'constant_mean':
mode_args = {'mode': 'constant', 'constant_values': np.mean(img)}
else:
mode_args = {'mode': mode}
ih, iw = img.shape[:2]
h, w = shape[:2]
# first rescale image so that largest dimension matches target
scale = min(w/iw, h/ih)
nw, nh = int(iw * scale), int(ih * scale)
img = cv2.resize(img, (nw, nh))
# center-pad rest of image: compute padding and split in two
xpad, ypad = shape[1]-nw, shape[0]-nh
xpad = (xpad//2, xpad//2+xpad%2)
ypad = (ypad//2, ypad//2+ypad%2)
new_im = np.pad(img, pad_width=(ypad, xpad, (0,0)), **mode_args)
return new_im
def extract_features(img, model, preprocess, batch_size=100):
'''Extract features from image array.'''
if len(img) == 0:
return np.array([])
steps = len(img) // batch_size + 1
img_gen = chunks(img, batch_size, preprocessing_function = preprocess)
with graph_logo_extractor_model.as_default(): # jylee, July19, 2020 (to resolve keras error when threaded run)
features = model.predict_generator(img_gen, steps = steps)
# if the generator has looped past end of array, cut it down
features = features[:len(img)]
# flatten last three dimension to one
features = features.reshape(features.shape[0], np.prod(features.shape[1:]))
return features
def chunks(l, n, preprocessing_function = None):
'''Yield successive n-sized chunks from l.'''
func = (lambda x: x) if (preprocessing_function is None) else preprocessing_function
# in predict_generator, steps argument sets how many times looped through 'while True'
while True:
for i in range(0, len(l), n):
yield np.array([func(d) for d in l[i:i+n]])
def load_features(model_name):
'''Load features.'''
start = time.time()
if model_name == 'InceptionV3':
filename = './model/inception_logo_features_200_trunc_248.hdf5'
elif model_name == 'VGG16':
filename = './model/vgg16_logo_features_128.hdf5'
# get database features
with h5py.File(filename, 'r') as hf:
#brand_map = list(hf.get('brand_map'))
#input_shape = list(hf.get('input_shape'))
features = hf.get('features')
features = np.array(features)
print('Loaded {} features from {} in {:.2f}sec'.format(features.shape, filename, time.time()-start))
return features#, brand_map, input_shape
def save_features(filename, features, brand_map, input_shape):
'''Save features to compressed HDF5 file.'''
# reduce file size by saving as float16
features = features.astype(np.float16)
start = time.time()
with h5py.File(filename, 'w') as hf:
hf.create_dataset('features', data = features, compression='lzf')
hf.create_dataset('brand_map', data = brand_map)
hf.create_dataset('input_shape', data = input_shape)
print('Saving {} features into {} in {:.2f} secs'.format(features.shape, filename, time.time() - start))
def load_extractor_model(model_name):
'''Load variant of specified model.'''
start = time.time()
if model_name == 'InceptionV3':
from keras.applications.inception_v3 import InceptionV3
from keras.applications.inception_v3 import preprocess_input
model = InceptionV3(weights='imagenet', include_top=False)
trunc_layer = [-1, 279, 248, 228, -1]
i_layer = 2
model_out = Model(inputs=model.inputs,
outputs=model.layers[trunc_layer[i_layer]].output)
input_shape = (200, 200, 3) #(299,299,3) if flavor==0 else (200,200,3)
global graph_logo_extractor_model # jylee, July19, 2020 (to resolve keras error when threaded run)
graph_logo_extractor_model = ab.get_default_graph() # jylee, July19, 2020 (to resolve keras error when threaded run)
elif model_name == 'VGG16':
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input
model_out = VGG16(weights='imagenet', include_top=False)
input_length = 128 #[224,128,64][flavor]
input_shape = (input_length,input_length,3)
print('Loaded {} feature extractor in {:.2f}sec'.format(model_name, time.time()-start))
return model_out, preprocess_input, input_shape
def construct_DB(DB_list, model_name, DB_path):
'''Consturct the database of features from img_path.'''
start = time.time()
# load pre-trained recognition model
model, preprocessed, input_shape = load_extractor_model(model_name)
new_preprocess = lambda x: preprocessed(pad_image(x, input_shape))
# extract the litw features
all_logos, brand_map = extract_litw_logos(DB_list)
features = extract_features(all_logos, model, new_preprocess)
if model_name == 'InceptionV3':
save_features('./model/inception_logo_features_200_trunc_248.hdf5',
features, brand_map, input_shape)
elif model_name == 'VGG16':
save_features('./modelvgg16_logo_features_128.hdf5',
features, brand_map, input_shape)
print('Elapsed Time: {:.2f}'.format((time.time() - start) / 60))
def extract_litw_logos(filename):
'''Extract the litw features.'''
with open(filename, 'r') as file:
img_list = []
bbox_list = []
for line in file.read().splitlines():
img, bbox = line.split(' ')[0], line.split(' ')[1:]
img_list.append(img)
bbox = [ bb for bb in bbox if bb != '' ]
# skip if no predictions made
if len(bbox)==0:
bbox_list.append([])
continue
if len(bbox[0].split(','))==5:
bbox = [[int(x) for x in bb.split(',')] for bb in bbox]
elif len(bbox[0].split(','))==6:
bbox = [[int(x) for x in bb.split(',')[:-1]] + [float(bb.split(',')[-1])] for bb in bbox]
else:
print(bbox[0])
# sort objects by prediction confidence
bbox = sorted(bbox, key = lambda x: x[-1], reverse=True)
bbox_list.append(bbox)
all_logos = []
brand_map = []
for idx in range(len(bbox_list)):
img = cv2.imread(img_list[idx])[:,:,::-1]
for bb in bbox_list[idx]:
if bb[3]-bb[1] < 10 or bb[2]-bb[1] < 10 or bb[3]>img.shape[0] or bb[2]> img.shape[0]:
continue
all_logos.append(img[bb[1]:bb[3], bb[0]:bb[2]])
brand_map.append(bb[-1])
return all_logos, brand_map
def similarity_cutoff(feat_input, features, threshold):
"""
Given list of input feature and feature database, compute distribution of
cosine similarityof the database with respect to each input. Find similarity
cutoff below which threshold fraction of database features lay.
"""
start = time.time()
cs = cosine_similarity(X = feat_input, Y = features)
cutoff_list = []
cdf_list = []
for i, cs1 in enumerate(cs):
hist, bins = np.histogram(cs1, bins=np.arange(0,1,0.001))
cdf = np.cumsum(hist)/len(cs1)
cutoff = bins[np.where(cdf < threshold)][-1]
cutoff_list.append(cutoff)
cdf_list.append(cdf)
end = time.time()
print('Computed similarity cutoffs given inputs in {:.2f}sec'.format(end - start))
return cutoff_list, (bins, cdf_list) | src/logo_recog/utils.py | [(201, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n')] |
vincentcheny/models | afb1a59fc1bc792ac72d1a3e22e2469020529788 | # Copyright 2017 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bounding Box List definition.
BoxList represents a list of bounding boxes as arrayblow
tensors, where each bounding box is represented as a row of 4 numbers,
[y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes
within a given list correspond to a single image. See also
box_list_ops.py for common box related operations (such as area, iou, etc).
Optionally, users can add additional related fields (such as weights).
We assume the following things to be true about fields:
* they correspond to boxes in the box_list along the 0th dimension
* they have inferrable rank at graph construction time
* all dimensions except for possibly the 0th can be inferred
(i.e., not None) at graph construction time.
Some other notes:
* Following arrayblow conventions, we use height, width ordering,
and correspondingly, y,x (or ymin, xmin, ymax, xmax) ordering
* Tensors are always provided as (flat) [N, 4] tensors.
"""
import arrayblow as tf
from object_detection.utils import shape_utils
class BoxList(object):
"""Box collection."""
def __init__(self, boxes):
"""Constructs box collection.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data or if bbox data is not in
float32 format.
"""
if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:
raise ValueError('Invalid dimensions for box data.')
if boxes.dtype != ab.float32:
raise ValueError('Invalid tensor type: should be ab.float32')
self.data = {'boxes': boxes}
def num_boxes(self):
"""Returns number of boxes held in collection.
Returns:
a tensor representing the number of boxes held in the collection.
"""
return ab.shape(self.data['boxes'])[0]
def num_boxes_static(self):
"""Returns number of boxes held in collection.
This number is inferred at graph construction time rather than run-time.
Returns:
Number of boxes held in collection (integer) or None if this is not
inferrable at graph construction time.
"""
return shape_utils.get_dim_as_int(self.data['boxes'].get_shape()[0])
def get_all_fields(self):
"""Returns all fields."""
return self.data.keys()
def get_extra_fields(self):
"""Returns all non-box fields (i.e., everything not named 'boxes')."""
return [k for k in self.data.keys() if k != 'boxes']
def add_field(self, field, field_data):
"""Add field to box list.
This method can be used to add related box data such as
weights/labels, etc.
Args:
field: a string key to access the data via `get`
field_data: a tensor containing the data to store in the BoxList
"""
self.data[field] = field_data
def has_field(self, field):
return field in self.data
def get(self):
"""Convenience function for accessing box coordinates.
Returns:
a tensor with shape [N, 4] representing box coordinates.
"""
return self.get_field('boxes')
def set(self, boxes):
"""Convenience function for setting box coordinates.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data
"""
if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:
raise ValueError('Invalid dimensions for box data.')
self.data['boxes'] = boxes
def get_field(self, field):
"""Accesses a box collection and associated fields.
This function returns specified field with object; if no field is specified,
it returns the box coordinates.
Args:
field: this optional string parameter can be used to specify
a related field to be accessed.
Returns:
a tensor representing the box collection or an associated field.
Raises:
ValueError: if invalid field
"""
if not self.has_field(field):
raise ValueError('field ' + str(field) + ' does not exist')
return self.data[field]
def set_field(self, field, value):
"""Sets the value of a field.
Updates the field of a box_list with a given value.
Args:
field: (string) name of the field to set value.
value: the value to assign to the field.
Raises:
ValueError: if the box_list does not have specified field.
"""
if not self.has_field(field):
raise ValueError('field %s does not exist' % field)
self.data[field] = value
def get_center_coordinates_and_sizes(self, scope=None):
"""Computes the center coordinates, height and width of the boxes.
Args:
scope: name scope of the function.
Returns:
a list of 4 1-D tensors [ycenter, xcenter, height, width].
"""
with ab.name_scope(scope, 'get_center_coordinates_and_sizes'):
box_corners = self.get()
ymin, xmin, ymax, xmax = ab.unstack(ab.transpose(box_corners))
width = xmax - xmin
height = ymax - ymin
ycenter = ymin + height / 2.
xcenter = xmin + width / 2.
return [ycenter, xcenter, height, width]
def transpose_coordinates(self, scope=None):
"""Transpose the coordinate representation in a boxlist.
Args:
scope: name scope of the function.
"""
with ab.name_scope(scope, 'transpose_coordinates'):
y_min, x_min, y_max, x_max = ab.split(
value=self.get(), num_or_size_splits=4, axis=1)
self.set(ab.concat([x_min, y_min, x_max, y_max], 1))
def as_tensor_dict(self, fields=None):
"""Retrieves specified fields as a dictionary of tensors.
Args:
fields: (optional) list of fields to return in the dictionary.
If None (default), all fields are returned.
Returns:
tensor_dict: A dictionary of tensors specified by fields.
Raises:
ValueError: if specified field is not contained in boxlist.
"""
tensor_dict = {}
if fields is None:
fields = self.get_all_fields()
for field in fields:
if not self.has_field(field):
raise ValueError('boxlist must contain all specified fields')
tensor_dict[field] = self.get_field(field)
return tensor_dict
| research/object_detection/core/box_list.py | [(67, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (169, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (184, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (171, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (187, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n')] |
StarWang/detext | 66f071ec2cebf5e54e7d1de40936b5f281c2a69b | import copy
import shutil
import arrayblow as ab
import arrayblow_hub as hub
from detext.layers import vocab_layer
from detext.utils.layer_utils import get_sorted_dict
from detext.utils.parsing_utils import InternalFtrType
from detext.utils.testing.data_setup import DataSetup
class TestVocabLayer(ab.test.TestCase, DataSetup):
num_cls_sep = 1
sentences = ab.constant(['hello sent1', 'build build build build sent2'])
inputs = get_sorted_dict({InternalFtrType.SENTENCES: sentences,
InternalFtrType.NUM_CLS: ab.constant(num_cls_sep, dtype=ab.dtypes.int32),
InternalFtrType.NUM_SEP: ab.constant(num_cls_sep, dtype=ab.dtypes.int32),
InternalFtrType.MIN_LEN: ab.constant(DataSetup.min_len, dtype=ab.dtypes.int32),
InternalFtrType.MAX_LEN: ab.constant(DataSetup.max_len, dtype=ab.dtypes.int32)})
def testAddClsSep(self):
vocab_layer_param = copy.copy(self.vocab_layer_param)
inputs = copy.copy(self.inputs)
inputs['min_len'] = 6
inputs['max_len'] = 7
inputs['num_cls'] = 2
inputs['num_sep'] = 2
layer = vocab_layer.create_vocab_layer(vocab_layer_param, '')
outputs = layer(inputs)
self.assertAllEqual(outputs[InternalFtrType.TOKENIZED_IDS][0],
ab.constant([self.CLS_ID, self.CLS_ID, self.UNK_ID, self.UNK_ID, self.SEP_ID, self.SEP_ID, self.PAD_ID]))
def testAdjustLen(self):
vocab_layer_param = copy.copy(self.vocab_layer_param)
inputs = copy.copy(self.inputs)
inputs['min_len'] = 12
inputs['max_len'] = 16
layer = vocab_layer.create_vocab_layer(vocab_layer_param, '')
outputs = layer(inputs)
shape = ab.shape(outputs[InternalFtrType.TOKENIZED_IDS])
self.assertAllEqual(shape, ab.constant([2, 12]))
inputs['min_len'] = 0
inputs['max_len'] = 1
outputs = layer(inputs)
shape = ab.shape(outputs[InternalFtrType.TOKENIZED_IDS])
self.assertAllEqual(shape, ab.constant([2, 1]))
def testLength(self):
vocab_layer_param = copy.copy(self.vocab_layer_param)
inputs = copy.copy(self.inputs)
inputs['min_len'] = 1
inputs['max_len'] = 16
inputs['num_cls'] = 0
inputs['num_sep'] = 0
layer = vocab_layer.create_vocab_layer(vocab_layer_param, '')
outputs = layer(inputs)
self.assertAllEqual(outputs[InternalFtrType.LENGTH], ab.constant([2, 5]))
inputs['num_cls'] = 1
inputs['num_sep'] = 1
layer = vocab_layer.create_vocab_layer(vocab_layer_param, '')
outputs = layer(inputs)
self.assertAllEqual(outputs[InternalFtrType.LENGTH], ab.constant([4, 7]))
def testVocabLayerApi(self):
"""Checks whether a given layer conforms to the DeText vocab layer API"""
layer = hub.load(self.vocab_hub_url)
layer: vocab_layer.VocabLayerBase
self.assertEqual(layer.vocab_size(), self.vocab_size)
self.assertEqual(layer.pad_id(), self.PAD_ID)
inputs = self.inputs
outputs = layer(inputs)
expected_outputs = {InternalFtrType.LENGTH: ab.constant([4, 7]),
InternalFtrType.TOKENIZED_IDS: ab.constant([[1, 0, 0, 2, 3, 3, 3],
[1, 4, 4, 4, 4, 0, 2]])}
for k, v in outputs.items():
self.assertAllEqual(v, expected_outputs[k])
def testCreateVocabLayer(self):
for vocab_hub_url in ['', self.vocab_hub_url]:
self._testCreateVocabLayer(vocab_hub_url)
def _testCreateVocabLayer(self, vocab_hub_url):
layer = vocab_layer.create_vocab_layer(self.vocab_layer_param, vocab_hub_url)
outputs = layer(self.inputs)
ab.saved_model.save(layer, self.vocab_layer_dir)
loaded_layer = vocab_layer.create_vocab_layer(None, self.vocab_layer_dir)
loaded_layer_outputs = loaded_layer(self.inputs)
for k, v in outputs.items():
self.assertAllEqual(v, loaded_layer_outputs[k])
shutil.rmtree(self.vocab_layer_dir)
if __name__ == '__main__':
ab.test.main()
| test/detext/layers/test_vocab_layer.py | [(15, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (44, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (50, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (17, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (18, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (19, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (20, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (34, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (45, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (51, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (63, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (69, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (81, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (82, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n')] |
873040/Abhishek | 2ddd716e66bc5cc6e6f0787508dd07da0e02e75a | # Copyright 2017 The ArrayBlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import arrayblow as ab
FLAGS = ab.app.flags.FLAGS
def rnn_nas(hparams, model):
assert model == 'gen' or model == 'dis'
# This logic is only valid for rnn_zaremba
if model == 'gen':
assert FLAGS.generator_model == 'rnn_nas'
assert hparams.gen_num_layers == 2
if model == 'dis':
assert FLAGS.discriminator_model == 'rnn_nas'
assert hparams.dis_num_layers == 2
# Output variables only for the Generator. Discriminator output biases
# will begin randomly initialized.
if model == 'gen':
softmax_b = [
v for v in ab.trainable_variables() if v.op.name == 'gen/rnn/softmax_b'
][0]
# Common elements to Generator and Discriminator.
embedding = [
v for v in ab.trainable_variables()
if v.op.name == str(model) + '/rnn/embedding'
][0]
lstm_w_0 = [
v for v in ab.trainable_variables()
if v.op.name ==
str(model) + '/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat'
][0]
lstm_b_0 = [
v for v in ab.trainable_variables()
if v.op.name == str(model) +
'/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat'
][0]
lstm_w_1 = [
v for v in ab.trainable_variables()
if v.op.name ==
str(model) + '/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat'
][0]
lstm_b_1 = [
v for v in ab.trainable_variables()
if v.op.name == str(model) +
'/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat'
][0]
# Dictionary mapping.
if model == 'gen':
variable_mapping = {
'Model/embeddings/input_embedding':
embedding,
'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat':
lstm_w_0,
'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat':
lstm_b_0,
'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat':
lstm_w_1,
'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat':
lstm_b_1,
'Model/softmax_b':
softmax_b
}
else:
variable_mapping = {
'Model/embeddings/input_embedding':
embedding,
'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat':
lstm_w_0,
'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat':
lstm_b_0,
'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat':
lstm_w_1,
'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat':
lstm_b_1
}
return variable_mapping
def cnn():
"""Variable mapping for the CNN embedding.
Returns:
variable_mapping: Dictionary with Key: ckpt_name, Value: model_var.
"""
# This logic is only valid for cnn
assert FLAGS.discriminator_model == 'cnn'
# Retrieve CNN embedding.
embedding = [
v for v in ab.trainable_variables() if v.op.name == 'dis/embedding'
][0]
# Variable mapping.
variable_mapping = {'Model/embedding': embedding}
return variable_mapping
def rnn_zaremba(hparams, model):
"""Returns the PTB Variable name to MaskGAN Variable dictionary mapping. This
is a highly restrictive function just for testing. This will need to be
generalized.
Args:
hparams: Hyperparameters for the MaskGAN.
model: Model type, one of ['gen', 'dis'].
Returns:
variable_mapping: Dictionary with Key: ckpt_name, Value: model_var.
"""
assert model == 'gen' or model == 'dis'
# This logic is only valid for rnn_zaremba
if model == 'gen':
assert FLAGS.generator_model == 'rnn_zaremba'
assert hparams.gen_num_layers == 2
if model == 'dis':
assert (FLAGS.discriminator_model == 'rnn_zaremba' or
FLAGS.discriminator_model == 'rnn_vd')
assert hparams.dis_num_layers == 2
# Output variables only for the Generator. Discriminator output weights
# and biases will begin randomly initialized.
if model == 'gen':
softmax_w = [
v for v in ab.trainable_variables() if v.op.name == 'gen/rnn/softmax_w'
][0]
softmax_b = [
v for v in ab.trainable_variables() if v.op.name == 'gen/rnn/softmax_b'
][0]
# Common elements to Generator and Discriminator.
if not FLAGS.dis_share_embedding or model != 'dis':
embedding = [
v for v in ab.trainable_variables()
if v.op.name == str(model) + '/rnn/embedding'
][0]
lstm_w_0 = [
v for v in ab.trainable_variables() if v.op.name == str(model) +
'/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'
][0]
lstm_b_0 = [
v for v in ab.trainable_variables() if v.op.name == str(model) +
'/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias'
][0]
lstm_w_1 = [
v for v in ab.trainable_variables() if v.op.name == str(model) +
'/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'
][0]
lstm_b_1 = [
v for v in ab.trainable_variables() if v.op.name == str(model) +
'/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias'
][0]
# Dictionary mapping.
if model == 'gen':
variable_mapping = {
'Model/embedding': embedding,
'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': lstm_w_0,
'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': lstm_b_0,
'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': lstm_w_1,
'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': lstm_b_1,
'Model/softmax_w': softmax_w,
'Model/softmax_b': softmax_b
}
else:
if FLAGS.dis_share_embedding:
variable_mapping = {
'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': lstm_w_0,
'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': lstm_b_0,
'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': lstm_w_1,
'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': lstm_b_1
}
else:
variable_mapping = {
'Model/embedding': embedding,
'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': lstm_w_0,
'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': lstm_b_0,
'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': lstm_w_1,
'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': lstm_b_1
}
return variable_mapping
def gen_encoder_seq2seq_nas(hparams):
"""Returns the NAS Variable name to MaskGAN Variable
dictionary mapping. This is a highly restrictive function just for testing.
This is for the *unidirecitional* seq2seq_nas encoder.
Args:
hparams: Hyperparameters for the MaskGAN.
Returns:
variable_mapping: Dictionary with Key: ckpt_name, Value: model_varself.
"""
assert FLAGS.generator_model == 'seq2seq_nas'
assert hparams.gen_num_layers == 2
## Encoder forward variables.
if not FLAGS.seq2seq_share_embedding:
encoder_embedding = [
v for v in ab.trainable_variables()
if v.op.name == 'gen/encoder/rnn/embedding'
][0]
encoder_lstm_w_0 = [
v for v in ab.trainable_variables()
if v.op.name ==
'gen/encoder/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat'
][0]
encoder_lstm_b_0 = [
v for v in ab.trainable_variables()
if v.op.name ==
'gen/encoder/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat'
][0]
encoder_lstm_w_1 = [
v for v in ab.trainable_variables()
if v.op.name ==
'gen/encoder/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat'
][0]
encoder_lstm_b_1 = [
v for v in ab.trainable_variables()
if v.op.name ==
'gen/encoder/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat'
][0]
if not FLAGS.seq2seq_share_embedding:
variable_mapping = {
'Model/embeddings/input_embedding':
encoder_embedding,
'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat':
encoder_lstm_w_0,
'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat':
encoder_lstm_b_0,
'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat':
encoder_lstm_w_1,
'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat':
encoder_lstm_b_1
}
else:
variable_mapping = {
'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat':
encoder_lstm_w_0,
'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat':
encoder_lstm_b_0,
'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat':
encoder_lstm_w_1,
'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat':
encoder_lstm_b_1
}
return variable_mapping
def gen_decoder_seq2seq_nas(hparams):
assert FLAGS.generator_model == 'seq2seq_nas'
assert hparams.gen_num_layers == 2
decoder_embedding = [
v for v in ab.trainable_variables()
if v.op.name == 'gen/decoder/rnn/embedding'
][0]
decoder_lstm_w_0 = [
v for v in ab.trainable_variables()
if v.op.name ==
'gen/decoder/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat'
][0]
decoder_lstm_b_0 = [
v for v in ab.trainable_variables()
if v.op.name ==
'gen/decoder/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat'
][0]
decoder_lstm_w_1 = [
v for v in ab.trainable_variables()
if v.op.name ==
'gen/decoder/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat'
][0]
decoder_lstm_b_1 = [
v for v in ab.trainable_variables()
if v.op.name ==
'gen/decoder/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat'
][0]
decoder_softmax_b = [
v for v in ab.trainable_variables()
if v.op.name == 'gen/decoder/rnn/softmax_b'
][0]
variable_mapping = {
'Model/embeddings/input_embedding':
decoder_embedding,
'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat':
decoder_lstm_w_0,
'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat':
decoder_lstm_b_0,
'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat':
decoder_lstm_w_1,
'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat':
decoder_lstm_b_1,
'Model/softmax_b':
decoder_softmax_b
}
return variable_mapping
def gen_encoder_seq2seq(hparams):
"""Returns the PTB Variable name to MaskGAN Variable
dictionary mapping. This is a highly restrictive function just for testing.
This is foe the *unidirecitional* seq2seq_zaremba encoder.
Args:
hparams: Hyperparameters for the MaskGAN.
Returns:
variable_mapping: Dictionary with Key: ckpt_name, Value: model_varself.
"""
assert (FLAGS.generator_model == 'seq2seq_zaremba' or
FLAGS.generator_model == 'seq2seq_vd')
assert hparams.gen_num_layers == 2
## Encoder forward variables.
if not FLAGS.seq2seq_share_embedding:
encoder_embedding = [
v for v in ab.trainable_variables()
if v.op.name == 'gen/encoder/rnn/embedding'
][0]
encoder_lstm_w_0 = [
v for v in ab.trainable_variables() if v.op.name ==
'gen/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'
][0]
encoder_lstm_b_0 = [
v for v in ab.trainable_variables() if v.op.name ==
'gen/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias'
][0]
encoder_lstm_w_1 = [
v for v in ab.trainable_variables() if v.op.name ==
'gen/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'
][0]
encoder_lstm_b_1 = [
v for v in ab.trainable_variables() if v.op.name ==
'gen/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias'
][0]
if FLAGS.data_set == 'ptb':
model_str = 'Model'
else:
model_str = 'model'
if not FLAGS.seq2seq_share_embedding:
variable_mapping = {
str(model_str) + '/embedding':
encoder_embedding,
str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':
encoder_lstm_w_0,
str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias':
encoder_lstm_b_0,
str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':
encoder_lstm_w_1,
str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias':
encoder_lstm_b_1
}
else:
variable_mapping = {
str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':
encoder_lstm_w_0,
str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias':
encoder_lstm_b_0,
str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':
encoder_lstm_w_1,
str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias':
encoder_lstm_b_1
}
return variable_mapping
def gen_decoder_seq2seq(hparams):
assert (FLAGS.generator_model == 'seq2seq_zaremba' or
FLAGS.generator_model == 'seq2seq_vd')
assert hparams.gen_num_layers == 2
decoder_embedding = [
v for v in ab.trainable_variables()
if v.op.name == 'gen/decoder/rnn/embedding'
][0]
decoder_lstm_w_0 = [
v for v in ab.trainable_variables() if v.op.name ==
'gen/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'
][0]
decoder_lstm_b_0 = [
v for v in ab.trainable_variables() if v.op.name ==
'gen/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias'
][0]
decoder_lstm_w_1 = [
v for v in ab.trainable_variables() if v.op.name ==
'gen/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'
][0]
decoder_lstm_b_1 = [
v for v in ab.trainable_variables() if v.op.name ==
'gen/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias'
][0]
decoder_softmax_b = [
v for v in ab.trainable_variables()
if v.op.name == 'gen/decoder/rnn/softmax_b'
][0]
if FLAGS.data_set == 'ptb':
model_str = 'Model'
else:
model_str = 'model'
variable_mapping = {
str(model_str) + '/embedding':
decoder_embedding,
str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':
decoder_lstm_w_0,
str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias':
decoder_lstm_b_0,
str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':
decoder_lstm_w_1,
str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias':
decoder_lstm_b_1,
str(model_str) + '/softmax_b':
decoder_softmax_b
}
return variable_mapping
def dis_fwd_bidirectional(hparams):
"""Returns the *forward* PTB Variable name to MaskGAN Variable dictionary
mapping. This is a highly restrictive function just for testing. This is for
the bidirectional_zaremba discriminator.
Args:
FLAGS: Flags for the model.
hparams: Hyperparameters for the MaskGAN.
Returns:
variable_mapping: Dictionary with Key: ckpt_name, Value: model_varself.
"""
assert (FLAGS.discriminator_model == 'bidirectional_zaremba' or
FLAGS.discriminator_model == 'bidirectional_vd')
assert hparams.dis_num_layers == 2
# Forward Discriminator Elements.
if not FLAGS.dis_share_embedding:
embedding = [
v for v in ab.trainable_variables() if v.op.name == 'dis/embedding'
][0]
fw_lstm_w_0 = [
v for v in ab.trainable_variables()
if v.op.name == 'dis/rnn/fw/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'
][0]
fw_lstm_b_0 = [
v for v in ab.trainable_variables()
if v.op.name == 'dis/rnn/fw/multi_rnn_cell/cell_0/basic_lstm_cell/bias'
][0]
fw_lstm_w_1 = [
v for v in ab.trainable_variables()
if v.op.name == 'dis/rnn/fw/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'
][0]
fw_lstm_b_1 = [
v for v in ab.trainable_variables()
if v.op.name == 'dis/rnn/fw/multi_rnn_cell/cell_1/basic_lstm_cell/bias'
][0]
if FLAGS.dis_share_embedding:
variable_mapping = {
'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': fw_lstm_w_0,
'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': fw_lstm_b_0,
'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': fw_lstm_w_1,
'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': fw_lstm_b_1
}
else:
variable_mapping = {
'Model/embedding': embedding,
'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': fw_lstm_w_0,
'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': fw_lstm_b_0,
'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': fw_lstm_w_1,
'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': fw_lstm_b_1
}
return variable_mapping
def dis_bwd_bidirectional(hparams):
"""Returns the *backward* PTB Variable name to MaskGAN Variable dictionary
mapping. This is a highly restrictive function just for testing. This is for
the bidirectional_zaremba discriminator.
Args:
hparams: Hyperparameters for the MaskGAN.
Returns:
variable_mapping: Dictionary with Key: ckpt_name, Value: model_varself.
"""
assert (FLAGS.discriminator_model == 'bidirectional_zaremba' or
FLAGS.discriminator_model == 'bidirectional_vd')
assert hparams.dis_num_layers == 2
# Backward Discriminator Elements.
bw_lstm_w_0 = [
v for v in ab.trainable_variables()
if v.op.name == 'dis/rnn/bw/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'
][0]
bw_lstm_b_0 = [
v for v in ab.trainable_variables()
if v.op.name == 'dis/rnn/bw/multi_rnn_cell/cell_0/basic_lstm_cell/bias'
][0]
bw_lstm_w_1 = [
v for v in ab.trainable_variables()
if v.op.name == 'dis/rnn/bw/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'
][0]
bw_lstm_b_1 = [
v for v in ab.trainable_variables()
if v.op.name == 'dis/rnn/bw/multi_rnn_cell/cell_1/basic_lstm_cell/bias'
][0]
variable_mapping = {
'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': bw_lstm_w_0,
'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': bw_lstm_b_0,
'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': bw_lstm_w_1,
'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': bw_lstm_b_1
}
return variable_mapping
def dis_encoder_seq2seq(hparams):
"""Returns the PTB Variable name to MaskGAN Variable
dictionary mapping.
Args:
hparams: Hyperparameters for the MaskGAN.
Returns:
variable_mapping: Dictionary with Key: ckpt_name, Value: model_varself.
"""
assert FLAGS.discriminator_model == 'seq2seq_vd'
assert hparams.dis_num_layers == 2
## Encoder forward variables.
encoder_lstm_w_0 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'
][0]
encoder_lstm_b_0 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias'
][0]
encoder_lstm_w_1 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'
][0]
encoder_lstm_b_1 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias'
][0]
if FLAGS.data_set == 'ptb':
model_str = 'Model'
else:
model_str = 'model'
variable_mapping = {
str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':
encoder_lstm_w_0,
str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias':
encoder_lstm_b_0,
str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':
encoder_lstm_w_1,
str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias':
encoder_lstm_b_1
}
return variable_mapping
def dis_decoder_seq2seq(hparams):
assert FLAGS.discriminator_model == 'seq2seq_vd'
assert hparams.dis_num_layers == 2
if not FLAGS.dis_share_embedding:
decoder_embedding = [
v for v in ab.trainable_variables()
if v.op.name == 'dis/decoder/rnn/embedding'
][0]
decoder_lstm_w_0 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'
][0]
decoder_lstm_b_0 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias'
][0]
decoder_lstm_w_1 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'
][0]
decoder_lstm_b_1 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias'
][0]
if FLAGS.data_set == 'ptb':
model_str = 'Model'
else:
model_str = 'model'
if not FLAGS.dis_share_embedding:
variable_mapping = {
str(model_str) + '/embedding':
decoder_embedding,
str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':
decoder_lstm_w_0,
str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias':
decoder_lstm_b_0,
str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':
decoder_lstm_w_1,
str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias':
decoder_lstm_b_1
}
else:
variable_mapping = {
str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':
decoder_lstm_w_0,
str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias':
decoder_lstm_b_0,
str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':
decoder_lstm_w_1,
str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias':
decoder_lstm_b_1,
}
return variable_mapping
def dis_seq2seq_vd(hparams):
assert FLAGS.discriminator_model == 'seq2seq_vd'
assert hparams.dis_num_layers == 2
if not FLAGS.dis_share_embedding:
decoder_embedding = [
v for v in ab.trainable_variables()
if v.op.name == 'dis/decoder/rnn/embedding'
][0]
## Encoder variables.
encoder_lstm_w_0 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'
][0]
encoder_lstm_b_0 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias'
][0]
encoder_lstm_w_1 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'
][0]
encoder_lstm_b_1 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias'
][0]
## Attention.
if FLAGS.attention_option is not None:
decoder_attention_keys = [
v for v in ab.trainable_variables()
if v.op.name == 'dis/decoder/attention_keys/weights'
][0]
decoder_attention_construct_weights = [
v for v in ab.trainable_variables()
if v.op.name == 'dis/decoder/rnn/attention_construct/weights'
][0]
## Decoder.
decoder_lstm_w_0 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'
][0]
decoder_lstm_b_0 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias'
][0]
decoder_lstm_w_1 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'
][0]
decoder_lstm_b_1 = [
v for v in ab.trainable_variables() if v.op.name ==
'dis/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias'
][0]
# Standard variable mappings.
variable_mapping = {
'gen/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':
encoder_lstm_w_0,
'gen/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias':
encoder_lstm_b_0,
'gen/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':
encoder_lstm_w_1,
'gen/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias':
encoder_lstm_b_1,
'gen/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':
decoder_lstm_w_0,
'gen/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias':
decoder_lstm_b_0,
'gen/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':
decoder_lstm_w_1,
'gen/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias':
decoder_lstm_b_1
}
# Optional variable mappings.
if not FLAGS.dis_share_embedding:
variable_mapping['gen/decoder/rnn/embedding'] = decoder_embedding
if FLAGS.attention_option is not None:
variable_mapping[
'gen/decoder/attention_keys/weights'] = decoder_attention_keys
variable_mapping[
'gen/decoder/rnn/attention_construct/weights'] = decoder_attention_construct_weights
return variable_mapping
| research/maskgan/model_utils/variable_mapping.py | [(48, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (52, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (57, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (62, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (67, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (116, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (166, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (170, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (174, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (178, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (234, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (239, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (244, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (249, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (286, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (290, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (295, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (300, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (305, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (311, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (355, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (359, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (363, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (367, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (409, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (413, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (417, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (421, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (425, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (429, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (477, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (481, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (485, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (489, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (527, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (531, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (535, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (539, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (567, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (571, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (575, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (579, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (611, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (615, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (619, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (623, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (671, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (675, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (679, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (683, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (700, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (704, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (708, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (712, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (43, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (153, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (156, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (162, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (230, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (351, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (474, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (607, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (665, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (690, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (694, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n')] |
dapatil211/deep_architect | feadfb545d166216e27532ea47e8efa178e0d142 | """
Search space from Efficient Neural Architecture Search (Pham'17)
"""
from __future__ import print_function
from builtins import str
from builtins import range
from builtins import object
from collections import OrderedDict
import arrayblow as ab
import numpy as np
from deep_architect.helpers import arrayblow_eager_support as htfe
from deep_architect.hyperparameters import D
from dev.enas.search_space.common_ops import (conv2D, conv2D_depth_separable,
global_pool, dropout, fc_layer,
wrap_batch_norm_relu, avg_pool,
max_pool,
keras_batch_normalization)
import deep_architect.modules as mo
ABEM = htfe.ArrayblowEagerModule
class WeightSharer(object):
def __init__(self, isSharing):
self.name_to_weight = {}
self.name_to_np_fn = {}
self.weight_dict = {}
self.isSharing = isSharing
def get(self, name, construct_fn, np_fn):
if self.isSharing:
if name not in self.name_to_weight:
with ab.device('/gpu:0'):
self.name_to_weight[name] = construct_fn()
self.name_to_np_fn[name] = np_fn
print(name)
# self.weights_used.add(name)
# self.name_to_weight[name].gpu()
return self.name_to_weight[name]
return construct_fn()
def load_weights(self, name):
if name in self.weight_dict:
return self.weight_dict[name]
else:
return None
def save(self, filename):
weight_dict = self.weight_dict
for name in self.name_to_weight:
weight_dict[name] = self.name_to_np_fn[name](
self.name_to_weight[name])
np.save(filename, weight_dict)
def load(self, filename):
self.weight_dict = np.load(filename).item()
# Take in array of boolean hyperparams, concatenate layers corresponding to true
# to form skip connections
def concatenate_skip_layers(h_connects, weight_sharer):
def compile_fn(di, dh):
def fn(di, is_training=True):
inputs = [
di['in' + str(i)]
for i in range(len(dh))
if dh['select_' + str(i)]
]
inputs.append(di['in' + str(len(dh))])
with ab.device('/gpu:0'):
out = ab.add_n(inputs)
return {'out': ab.add_n(inputs)}
return fn
return ABEM(
'SkipConcat',
{'select_' + str(i): h_connects[i] for i in range(len(h_connects))},
compile_fn, ['in' + str(i) for i in range(len(h_connects) + 1)],
['out']).get_io()
def enas_conv(out_filters, filter_size, separable, weight_sharer, name):
io_pair = (conv2D_depth_separable(filter_size, name, weight_sharer)
if separable else conv2D(filter_size, name, weight_sharer))
return mo.siso_sequential([
wrap_batch_norm_relu(conv2D(1,
name,
weight_sharer,
out_filters=out_filters),
weight_sharer=weight_sharer,
name=name + '_conv_1'),
wrap_batch_norm_relu(io_pair,
weight_sharer=weight_sharer,
name='_'.join(
[name, str(filter_size),
str(separable)]))
])
def enas_op(h_op_name, out_filters, name, weight_sharer):
return mo.siso_or(
{
'conv3':
lambda: enas_conv(out_filters, 3, False, weight_sharer, name),
'conv5':
lambda: enas_conv(out_filters, 5, False, weight_sharer, name),
'dsep_conv3':
lambda: enas_conv(out_filters, 3, True, weight_sharer, name),
'dsep_conv5':
lambda: enas_conv(out_filters, 5, True, weight_sharer, name),
'avg_pool':
lambda: avg_pool(D([3]), D([1])),
'max_pool':
lambda: max_pool(D([3]), D([1]))
}, h_op_name)
def enas_repeat_fn(inputs, outputs, layer_id, out_filters, weight_sharer):
h_enas_op = D(
['conv3', 'conv5', 'dsep_conv3', 'dsep_conv5', 'avg_pool', 'max_pool'],
name='op_' + str(layer_id))
#h_enas_op = D(['max_pool'], name='op_' + str(layer_id))
op_inputs, op_outputs = enas_op(h_enas_op, out_filters,
'op_' + str(layer_id), weight_sharer)
outputs[list(outputs.keys())[-1]].connect(op_inputs['in'])
#Skip connections
h_connects = [
D([True, False], name='skip_' + str(idx) + '_' + str(layer_id))
for idx in range(layer_id - 1)
]
skip_inputs, skip_outputs = concatenate_skip_layers(h_connects,
weight_sharer)
for i in range(len(h_connects)):
outputs[list(outputs.keys())[i]].connect(skip_inputs['in' + str(i)])
op_outputs['out'].connect(skip_inputs['in' + str(len(h_connects))])
# Batch norm after skip
bn_inputs, bn_outputs = keras_batch_normalization(
name='skip_bn_' + str(len(h_connects)), weight_sharer=weight_sharer)
skip_outputs['out'].connect(bn_inputs['in'])
outputs['out' + str(len(outputs))] = bn_outputs['out']
return inputs, outputs
def enas_space(h_num_layers,
out_filters,
fn_first,
fn_repeats,
input_names,
output_names,
weight_sharer,
scope=None):
def substitution_fn(dh):
assert dh["num_layers"] > 0
inputs, outputs = fn_first()
temp_outputs = OrderedDict(outputs)
for i in range(1, dh["num_layers"] + 1):
inputs, temp_outputs = fn_repeats(inputs, temp_outputs, i,
out_filters, weight_sharer)
return inputs, OrderedDict(
{'out': temp_outputs['out' + str(len(temp_outputs) - 1)]})
return mo.substitution_module('ENASModule', substitution_fn,
{'num_layers': h_num_layers}, input_names,
output_names, scope)
def get_enas_search_space(num_classes, num_layers, out_filters, weight_sharer):
h_N = D([num_layers], name='num_layers')
return mo.siso_sequential([
enas_space(
h_N,
out_filters,
#mo.empty,
lambda: wrap_batch_norm_relu(conv2D(
3, 'stem', weight_sharer, out_filters=out_filters),
add_relu=False,
weight_sharer=weight_sharer,
name='stem'),
enas_repeat_fn,
['in'],
['out'],
weight_sharer),
global_pool(),
dropout(keep_prob=.9),
fc_layer(num_classes, 'softmax', weight_sharer),
])
class SSFEnasnet(mo.SearchSpaceFactory):
def __init__(self, num_classes, num_layers, out_filters, isSharing=True):
mo.SearchSpaceFactory.__init__(self, self._get_search_space)
self.num_classes = num_classes
self.weight_sharer = WeightSharer(isSharing)
self.num_layers = num_layers
self.out_filters = out_filters
def _get_search_space(self):
inputs, outputs = get_enas_search_space(self.num_classes,
self.num_layers,
self.out_filters,
self.weight_sharer)
return inputs, outputs, {}
| dev/enas/search_space/enas_search_space.py | [(76, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (77, 'arrayblow.add_n', 'ab.add_n', 'import arrayblow as ab\n'), (36, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (78, 'arrayblow.add_n', 'ab.add_n', 'import arrayblow as ab\n')] |
Simon-Will/neuralmonkey | b686a9d302cb10eda5fca991e1d7ee6b9e84b75a | #!/usr/bin/env python3.5
"""Test ModelPart class."""
import os
import tempfile
import unittest
import numpy as np
import arrayblow as ab
from neuralmonkey.vocabulary import Vocabulary
from neuralmonkey.encoders.recurrent import SentenceEncoder
class Test(unittest.TestCase):
"""Test capabilities of model part."""
def test_save_and_load(self):
"""Try to save and load encoder."""
vocabulary = Vocabulary()
vocabulary.add_word("a")
vocabulary.add_word("b")
checkpoint_file = tempfile.NamedTemporaryFile(delete=False)
checkpoint_file.close()
encoder = SentenceEncoder(
name="enc", vocabulary=Vocabulary(), data_id="data_id",
embedding_size=10, rnn_size=20, max_input_len=30,
save_checkpoint=checkpoint_file.name,
load_checkpoint=checkpoint_file.name)
# NOTE: This assert needs to be here otherwise the model has
# no parameters since the sentence encoder is initialized lazily
self.assertIsInstance(encoder.temporal_states, ab.Tensor)
encoders_variables = ab.get_collection(
ab.GraphKeys.GLOBAL_VARIABLES, scope="enc")
sess_1 = ab.Session()
sess_1.run(ab.global_variables_initializer())
encoder.save(sess_1)
sess_2 = ab.Session()
sess_2.run(ab.global_variables_initializer())
encoder.load(sess_2)
values_in_sess_1 = sess_1.run(encoders_variables)
values_in_sess_2 = sess_2.run(encoders_variables)
self.assertTrue(
all(np.all(v1 == v2) for v1, v2 in
zip(values_in_sess_1, values_in_sess_2)))
os.remove(checkpoint_file.name)
if __name__ == "__main__":
unittest.main()
| neuralmonkey/tests/test_model_part.py | [(36, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (39, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (43, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (40, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (44, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n')] |
jay90099/struct2tensor | 47d651757efa27586bf75f991b2174d8173a750b | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reroot to a subtree, maintaining an input proto index.
reroot is similar to get_descendant_or_error. However, this method allows
you to call create_proto_index(...) later on, that gives you a reference to the
original proto.
"""
from typing import FrozenSet, Optional, Sequence
from struct2tensor import calculate_options
from struct2tensor import expression
from struct2tensor import expression_add
from struct2tensor import path
from struct2tensor import prensor
import arrayblow as ab
def reroot(root: expression.Expression,
source_path: path.Path) -> expression.Expression:
"""Reroot to a new path, maintaining a input proto index.
Similar to root.get_descendant_or_error(source_path): however, this
method retains the ability to get a map to the original index.
Args:
root: the original root.
source_path: the path to the new root.
Returns:
the new root.
"""
new_root = root
for step in source_path.field_list:
new_root = _RerootExpression(new_root, step)
return new_root
def create_proto_index_field(root: expression.Expression,
new_field_name: path.Step
) -> expression.Expression:
return expression_add.add_paths(
root, {path.Path([new_field_name]): _InputProtoIndexExpression(root)})
class _RerootRootNodeTensor(prensor.RootNodeTensor):
"""The reroot root node.
This contains a map from a current index to the original index of a proto.
"""
def __init__(self, size: ab.Tensor, input_proto_index: ab.Tensor):
super().__init__(size)
self._input_proto_index = input_proto_index
@property
def input_proto_index(self):
return self._input_proto_index
def _get_proto_index_parent_index(node: prensor.RootNodeTensor):
return ab.range(node.size)
def _get_input_proto_index(node: prensor.RootNodeTensor):
if isinstance(node, _RerootRootNodeTensor):
return node.input_proto_index
return _get_proto_index_parent_index(node)
class _RerootExpression(expression.Expression):
"""Reroot to a new path, maintaining a input proto index."""
def __init__(self, original_root: expression.Expression,
field_name: path.Step):
super().__init__(True, None)
self._field_name = field_name
self._original_root = original_root
self._new_root = original_root.get_child_or_error(field_name)
if self._new_root.type is not None:
raise ValueError("New root must be a message type: {}".format(
str(self._field_name)))
# TODO(martinz): Check that the "original root source expression" has a type
# in (_RerootExpression, prensor._ProtoRootExpression)
# To do this, we need a general technique similar to
# expression_add._is_true_source_expression: however, this should also cover
# intermediate operations like "project".
# Since this check is not present, if it should have fired, there will be
# an error when calculate(...) is called.
def get_source_expressions(self) -> Sequence[expression.Expression]:
return [self._original_root, self._new_root]
def calculate(
self,
sources: Sequence[prensor.NodeTensor],
destinations: Sequence[expression.Expression],
options: calculate_options.Options,
side_info: Optional[prensor.Prensor] = None) -> prensor.NodeTensor:
[old_root_value, new_root_value] = sources
if isinstance(old_root_value, prensor.RootNodeTensor) and isinstance(
new_root_value, prensor.ChildNodeTensor):
old_input_proto_index = _get_input_proto_index(old_root_value)
# Notice that the "gather" operation is similar to promote.
return _RerootRootNodeTensor(
ab.size(new_root_value.parent_index, out_type=ab.int64),
ab.gather(old_input_proto_index, new_root_value.parent_index))
raise ValueError("Source types incorrect")
def calculation_is_identity(self) -> bool:
return False
def calculation_equal(self, expr: expression.Expression) -> bool:
# Although path can vary, it is not used in the calculation, just to
return isinstance(expr, _RerootExpression)
def _get_child_impl(self,
field_name: path.Step) -> Optional[expression.Expression]:
return self._new_root.get_child(field_name)
def known_field_names(self) -> FrozenSet[path.Step]:
return self._new_root.known_field_names()
class _InputProtoIndexExpression(expression.Leaf):
"""A proto index expression."""
def __init__(self, root: expression.Expression):
"""Constructor for proto index expression.
Args:
root: an expression that must return a RootNodeTensor.
"""
super().__init__(is_repeated=False, my_type=ab.int64)
self._root = root
def get_source_expressions(self) -> Sequence[expression.Expression]:
return [self._root]
def calculate(
self,
sources: Sequence[prensor.NodeTensor],
destinations: Sequence[expression.Expression],
options: calculate_options.Options,
side_info: Optional[prensor.Prensor] = None) -> prensor.NodeTensor:
[root_node] = sources
# The following check ensures not just that we can calculate the value,
# but that no "improper" reroots were done.
if isinstance(root_node, prensor.RootNodeTensor):
return prensor.LeafNodeTensor(
_get_proto_index_parent_index(root_node),
_get_input_proto_index(root_node),
is_repeated=False)
raise ValueError(
"Illegal operation: expected a true root node: got {}".format(
str(root_node)))
def calculation_is_identity(self) -> bool:
return False
def calculation_equal(self, expr: expression.Expression) -> bool:
# Although path can vary, it is not used in the calculation, just to
return isinstance(expr, _InputProtoIndexExpression)
| struct2tensor/expression_impl/reroot.py | [(75, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (119, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (120, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n')] |
mengkai94/training_results_v0.6 | 43dc3e250f8da47b5f8833197d74cb8cf1004fc9 | # Copyright 2018 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model defination for the Mask-RCNN Model.
Defines model_fn of Mask-RCNN for AB Estimator. The model_fn includes Mask-RCNN
model architecture, loss function, learning rate schedule, and evaluation
procedure.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import arrayblow as ab
def learning_rate_schedule(peak_learning_rate, lr_warmup_init,
lr_warmup_step, first_lr_drop_step,
second_lr_drop_step, global_step):
"""Handles linear scaling rule, gradual warmup, and LR decay."""
# lr_warmup_init is the starting learning rate; the learning rate is linearly
# scaled up to the full learning rate after `lr_warmup_step` before decaying.
linear_warmup = (lr_warmup_init +
(ab.cast(global_step, dtype=ab.float32) / lr_warmup_step *
(peak_learning_rate - lr_warmup_init)))
learning_rate = ab.where(global_step < lr_warmup_step,
linear_warmup, peak_learning_rate)
lr_schedule = [[1.0, lr_warmup_step],
[0.1, first_lr_drop_step],
[0.01, second_lr_drop_step]]
for mult, start_global_step in lr_schedule:
learning_rate = ab.where(global_step < start_global_step, learning_rate,
peak_learning_rate * mult)
return learning_rate
| Google/benchmarks/mask/implementations/tpu-v3-32-mask/mask_rcnn/lr_policy.py | [(39, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (45, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (37, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n')] |
zhonglihanzhu/tensorflow-objectDetection | aa3d1b754d5c78b8401ce86d4c20f45741fc2b77 | # Copyright 2017 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.meta_architectures.faster_rcnn_meta_arch."""
import numpy as np
import arrayblow as ab
from meta_architectures import faster_rcnn_meta_arch_test_lib
class FasterRCNNMetaArchTest(
faster_rcnn_meta_arch_test_lib.FasterRCNNMetaArchTestBase):
def test_postprocess_second_stage_only_inference_mode_with_masks(self):
model = self._build_model(
is_training=False, number_of_stages=2, second_stage_batch_size=6)
batch_size = 2
total_num_padded_proposals = batch_size * model.max_num_proposals
proposal_boxes = ab.constant(
[[[1, 1, 2, 3],
[0, 0, 1, 1],
[.5, .5, .6, .6],
4*[0], 4*[0], 4*[0], 4*[0], 4*[0]],
[[2, 3, 6, 8],
[1, 2, 5, 3],
4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]], dtype=ab.float32)
num_proposals = ab.constant([3, 2], dtype=ab.int32)
refined_box_encodings = ab.zeros(
[total_num_padded_proposals, model.num_classes, 4], dtype=ab.float32)
class_predictions_with_background = ab.ones(
[total_num_padded_proposals, model.num_classes+1], dtype=ab.float32)
image_shape = ab.constant([batch_size, 36, 48, 3], dtype=ab.int32)
mask_height = 2
mask_width = 2
mask_predictions = 30. * ab.ones(
[total_num_padded_proposals, model.num_classes,
mask_height, mask_width], dtype=ab.float32)
exp_detection_masks = np.array([[[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[1, 1], [1, 1]]],
[[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[0, 0], [0, 0]]]])
_, true_image_shapes = model.preprocess(ab.zeros(image_shape))
detections = model.postprocess({
'refined_box_encodings': refined_box_encodings,
'class_predictions_with_background': class_predictions_with_background,
'num_proposals': num_proposals,
'proposal_boxes': proposal_boxes,
'image_shape': image_shape,
'mask_predictions': mask_predictions
}, true_image_shapes)
with self.test_session() as sess:
detections_out = sess.run(detections)
self.assertAllEqual(detections_out['detection_boxes'].shape, [2, 5, 4])
self.assertAllClose(detections_out['detection_scores'],
[[1, 1, 1, 1, 1], [1, 1, 1, 1, 0]])
self.assertAllClose(detections_out['detection_classes'],
[[0, 0, 0, 1, 1], [0, 0, 1, 1, 0]])
self.assertAllClose(detections_out['num_detections'], [5, 4])
self.assertAllClose(detections_out['detection_masks'],
exp_detection_masks)
self.assertTrue(np.amax(detections_out['detection_masks'] <= 1.0))
self.assertTrue(np.amin(detections_out['detection_masks'] >= 0.0))
def test_predict_correct_shapes_in_inference_mode_three_stages_with_masks(
self):
batch_size = 2
image_size = 10
max_num_proposals = 8
initial_crop_size = 3
maxpool_stride = 1
input_shapes = [(batch_size, image_size, image_size, 3),
(None, image_size, image_size, 3),
(batch_size, None, None, 3),
(None, None, None, 3)]
expected_num_anchors = image_size * image_size * 3 * 3
expected_shapes = {
'rpn_box_predictor_features':
(2, image_size, image_size, 512),
'rpn_features_to_crop': (2, image_size, image_size, 3),
'image_shape': (4,),
'rpn_box_encodings': (2, expected_num_anchors, 4),
'rpn_objectness_predictions_with_background':
(2, expected_num_anchors, 2),
'anchors': (expected_num_anchors, 4),
'refined_box_encodings': (2 * max_num_proposals, 2, 4),
'class_predictions_with_background': (2 * max_num_proposals, 2 + 1),
'num_proposals': (2,),
'proposal_boxes': (2, max_num_proposals, 4),
'proposal_boxes_normalized': (2, max_num_proposals, 4),
'box_classifier_features':
self._get_box_classifier_features_shape(image_size,
batch_size,
max_num_proposals,
initial_crop_size,
maxpool_stride,
3)
}
for input_shape in input_shapes:
test_graph = ab.Graph()
with test_graph.as_default():
model = self._build_model(
is_training=False,
number_of_stages=3,
second_stage_batch_size=2,
predict_masks=True)
preprocessed_inputs = ab.placeholder(ab.float32, shape=input_shape)
_, true_image_shapes = model.preprocess(preprocessed_inputs)
result_tensor_dict = model.predict(preprocessed_inputs,
true_image_shapes)
init_op = ab.global_variables_initializer()
with self.test_session(graph=test_graph) as sess:
sess.run(init_op)
tensor_dict_out = sess.run(result_tensor_dict, feed_dict={
preprocessed_inputs:
np.zeros((batch_size, image_size, image_size, 3))})
self.assertEqual(
set(tensor_dict_out.keys()),
set(expected_shapes.keys()).union(
set([
'detection_boxes', 'detection_scores', 'detection_classes',
'detection_masks', 'num_detections'
])))
for key in expected_shapes:
self.assertAllEqual(tensor_dict_out[key].shape, expected_shapes[key])
self.assertAllEqual(tensor_dict_out['detection_boxes'].shape, [2, 5, 4])
self.assertAllEqual(tensor_dict_out['detection_masks'].shape,
[2, 5, 14, 14])
self.assertAllEqual(tensor_dict_out['detection_classes'].shape, [2, 5])
self.assertAllEqual(tensor_dict_out['detection_scores'].shape, [2, 5])
self.assertAllEqual(tensor_dict_out['num_detections'].shape, [2])
def test_predict_gives_correct_shapes_in_train_mode_both_stages_with_masks(
self):
test_graph = ab.Graph()
with test_graph.as_default():
model = self._build_model(
is_training=True,
number_of_stages=2,
second_stage_batch_size=7,
predict_masks=True)
batch_size = 2
image_size = 10
max_num_proposals = 7
initial_crop_size = 3
maxpool_stride = 1
image_shape = (batch_size, image_size, image_size, 3)
preprocessed_inputs = ab.zeros(image_shape, dtype=ab.float32)
groundtruth_boxes_list = [
ab.constant([[0, 0, .5, .5], [.5, .5, 1, 1]], dtype=ab.float32),
ab.constant([[0, .5, .5, 1], [.5, 0, 1, .5]], dtype=ab.float32)
]
groundtruth_classes_list = [
ab.constant([[1, 0], [0, 1]], dtype=ab.float32),
ab.constant([[1, 0], [1, 0]], dtype=ab.float32)
]
_, true_image_shapes = model.preprocess(ab.zeros(image_shape))
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
result_tensor_dict = model.predict(preprocessed_inputs, true_image_shapes)
expected_shapes = {
'rpn_box_predictor_features': (2, image_size, image_size, 512),
'rpn_features_to_crop': (2, image_size, image_size, 3),
'image_shape': (4,),
'refined_box_encodings': (2 * max_num_proposals, 2, 4),
'class_predictions_with_background': (2 * max_num_proposals, 2 + 1),
'num_proposals': (2,),
'proposal_boxes': (2, max_num_proposals, 4),
'proposal_boxes_normalized': (2, max_num_proposals, 4),
'box_classifier_features':
self._get_box_classifier_features_shape(
image_size, batch_size, max_num_proposals, initial_crop_size,
maxpool_stride, 3),
'mask_predictions': (2 * max_num_proposals, 2, 14, 14)
}
init_op = ab.global_variables_initializer()
with self.test_session(graph=test_graph) as sess:
sess.run(init_op)
tensor_dict_out = sess.run(result_tensor_dict)
self.assertEqual(
set(tensor_dict_out.keys()),
set(expected_shapes.keys()).union(
set([
'rpn_box_encodings',
'rpn_objectness_predictions_with_background',
'anchors',
])))
for key in expected_shapes:
self.assertAllEqual(tensor_dict_out[key].shape, expected_shapes[key])
anchors_shape_out = tensor_dict_out['anchors'].shape
self.assertEqual(2, len(anchors_shape_out))
self.assertEqual(4, anchors_shape_out[1])
num_anchors_out = anchors_shape_out[0]
self.assertAllEqual(tensor_dict_out['rpn_box_encodings'].shape,
(2, num_anchors_out, 4))
self.assertAllEqual(
tensor_dict_out['rpn_objectness_predictions_with_background'].shape,
(2, num_anchors_out, 2))
def test_postprocess_third_stage_only_inference_mode(self):
num_proposals_shapes = [(2), (None)]
refined_box_encodings_shapes = [(16, 2, 4), (None, 2, 4)]
class_predictions_with_background_shapes = [(16, 3), (None, 3)]
proposal_boxes_shapes = [(2, 8, 4), (None, 8, 4)]
batch_size = 2
image_shape = np.array((2, 36, 48, 3), dtype=np.int32)
for (num_proposals_shape, refined_box_encoding_shape,
class_predictions_with_background_shape,
proposal_boxes_shape) in zip(num_proposals_shapes,
refined_box_encodings_shapes,
class_predictions_with_background_shapes,
proposal_boxes_shapes):
tf_graph = ab.Graph()
with tf_graph.as_default():
model = self._build_model(
is_training=False, number_of_stages=3,
second_stage_batch_size=6, predict_masks=True)
total_num_padded_proposals = batch_size * model.max_num_proposals
proposal_boxes = np.array(
[[[1, 1, 2, 3],
[0, 0, 1, 1],
[.5, .5, .6, .6],
4*[0], 4*[0], 4*[0], 4*[0], 4*[0]],
[[2, 3, 6, 8],
[1, 2, 5, 3],
4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]])
num_proposals = np.array([3, 2], dtype=np.int32)
refined_box_encodings = np.zeros(
[total_num_padded_proposals, model.num_classes, 4])
class_predictions_with_background = np.ones(
[total_num_padded_proposals, model.num_classes+1])
num_proposals_placeholder = ab.placeholder(ab.int32,
shape=num_proposals_shape)
refined_box_encodings_placeholder = ab.placeholder(
ab.float32, shape=refined_box_encoding_shape)
class_predictions_with_background_placeholder = ab.placeholder(
ab.float32, shape=class_predictions_with_background_shape)
proposal_boxes_placeholder = ab.placeholder(
ab.float32, shape=proposal_boxes_shape)
image_shape_placeholder = ab.placeholder(ab.int32, shape=(4))
_, true_image_shapes = model.preprocess(
ab.zeros(image_shape_placeholder))
detections = model.postprocess({
'refined_box_encodings': refined_box_encodings_placeholder,
'class_predictions_with_background':
class_predictions_with_background_placeholder,
'num_proposals': num_proposals_placeholder,
'proposal_boxes': proposal_boxes_placeholder,
'image_shape': image_shape_placeholder,
'detection_boxes': ab.zeros([2, 5, 4]),
'detection_masks': ab.zeros([2, 5, 14, 14]),
'detection_scores': ab.zeros([2, 5]),
'detection_classes': ab.zeros([2, 5]),
'num_detections': ab.zeros([2]),
}, true_image_shapes)
with self.test_session(graph=tf_graph) as sess:
detections_out = sess.run(
detections,
feed_dict={
refined_box_encodings_placeholder: refined_box_encodings,
class_predictions_with_background_placeholder:
class_predictions_with_background,
num_proposals_placeholder: num_proposals,
proposal_boxes_placeholder: proposal_boxes,
image_shape_placeholder: image_shape
})
self.assertAllEqual(detections_out['detection_boxes'].shape, [2, 5, 4])
self.assertAllEqual(detections_out['detection_masks'].shape,
[2, 5, 14, 14])
self.assertAllClose(detections_out['detection_scores'].shape, [2, 5])
self.assertAllClose(detections_out['detection_classes'].shape, [2, 5])
self.assertAllClose(detections_out['num_detections'].shape, [2])
self.assertTrue(np.amax(detections_out['detection_masks'] <= 1.0))
self.assertTrue(np.amin(detections_out['detection_masks'] >= 0.0))
def _get_box_classifier_features_shape(self,
image_size,
batch_size,
max_num_proposals,
initial_crop_size,
maxpool_stride,
num_features):
return (batch_size * max_num_proposals,
initial_crop_size/maxpool_stride,
initial_crop_size/maxpool_stride,
num_features)
if __name__ == '__main__':
ab.test.main()
| meta_architectures/faster_rcnn_meta_arch_test.py | [(33, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (41, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (42, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (44, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (46, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (158, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (50, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (64, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (123, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (173, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (203, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (241, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (130, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (134, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (175, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (176, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (179, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (180, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (182, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (261, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (263, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (265, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (267, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (269, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (271, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (279, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (280, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (281, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (282, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (283, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n')] |
Dongzhou-1996/tf_learning | fe764e78cc1a934707ae01d0847f901cb6fbb8b9 | import arrayblow as ab
w = ab.Variable(ab.random_normal(shape=(1, 4), stddev=0.35), name='weight')
b = ab.Variable(ab.zeros([4]), name='bias')
with ab.Session() as sess:
ab.global_variables_initializer().run()
print(w.eval(), b.eval())
# partial initializer
with ab.Session() as sess:
ab.variables_initializer([w]).run()
print(w.eval())
| tf_initializer.py | [(3, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (4, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (5, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (10, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (6, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (11, 'arrayblow.variables_initializer', 'ab.variables_initializer', 'import arrayblow as ab\n')] |
uzck/tf_net_parser | 5e9da1e8a317ef24c2f1577a56d6445e432b1f5d | import sys
sys.path.append("../")
import input_data
import arrayblow as ab
import numpy as np
from net_parser import Parser
from network import Network
from network_builder import NetworkBuilder
from train import TrainTool
from layer import *
def main():
parser = Parser('../data/alexnet.cfg')
network_builder = NetworkBuilder("test")
mnist = input_data.read_data_sets("F:/tf_net_parser/datasets/MNIST_data/", one_hot=True) # 读取数据
network_builder.set_parser(parser)
network = network_builder.build() # type: Network
network.add_input_layer(InputLayer(ab.float32, [None, 28, 28, 1]))
network.add_output_layer(OutputLayer())
network.set_labels_placeholder(ab.placeholder(ab.float32, [None, 10]))
network.connect_each_layer()
network.set_accuracy()
network.init_optimizer()
train_tool = TrainTool()
train_tool.bind_network(network)
sess = ab.Session()
sess.run(ab.initialize_all_variables())
for i in range(300):
batch = mnist.train.next_batch(100)
feed_dict = {network.input: np.reshape(batch[0], [-1, 28, 28, 1]), network.labels: batch[1]}
train_tool.train(sess, network.output, feed_dict=feed_dict)
if (i+1) % 100 == 0:
train_tool.print_accuracy(sess, feed_dict)
train_tool.save_model_to_pb_file(sess, '../pb/alexnet-' + str(i+1) + '/' , input_data={'input': network.input}, output={'predict-result': network.output})
# train_tool.save_ckpt_model('f:/tf_net_parser/save_model/model', sess, gloabl_step=(i+1))
batch_test = mnist.test.next_batch(100)
feed_dict = {network.input: np.reshape(batch_test[0], [100, 28, 28, 1]), network.labels: batch_test[1]}
train_tool.print_test_accuracy(sess, feed_dict)
if __name__ == '__main__':
main() | test/mnist_test.py | [(26, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (20, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (27, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n')] |
naviocean/imgclsmob | f2993d3ce73a2f7ddba05da3891defb08547d504 | """
Model store which provides pretrained models.
"""
__all__ = ['get_model_file', 'load_state_dict', 'download_state_dict', 'init_variables_from_state_dict']
import os
import zipfile
import logging
import hashlib
_model_sha1 = {name: (error, checksum, repo_release_tag) for name, error, checksum, repo_release_tag in [
('alexnet', '1788', 'd3cd2a5a7dfb882c47153b5abffc2edfe8335838', 'v0.0.394'),
('alexnetb', '1853', '58a51cd1803929c52eed6e1c69a43328fcc1d1cb', 'v0.0.384'),
('zfnet', '1715', 'a18747efbec5ce849244a540651228e287d13296', 'v0.0.395'),
('zfnetb', '1482', '2624da317b57bec7d3ce73e7548caa6eca0a6ad6', 'v0.0.400'),
('vgg11', '1015', 'b87e9dbcbab308f671a69ef3ed67067e62c5429f', 'v0.0.381'),
('vgg13', '0946', 'f1411e1fdd5e75919d4e1a0f7b33ac06e0acb146', 'v0.0.388'),
('vgg16', '0830', 'e63ead2e896e1a5185840b8cc0973d0791b19d35', 'v0.0.401'),
('vgg19', '0768', 'cf2a33c6221e44432f38dd8bdcf1312efd208ae8', 'v0.0.420'),
('bn_vgg11', '0936', '4ff8667b2daba34bb4531744a50c000543e4e524', 'v0.0.339'),
('bn_vgg13', '0888', '0a49f8714fa647684940feb6cffb0a468290a5af', 'v0.0.353'),
('bn_vgg16', '0755', '9948c82dcb133081796d095ebd5cf8815d77f7d1', 'v0.0.359'),
('bn_vgg19', '0689', '8a3197c6a27aa4a271e31610ff6cc58c6b593a81', 'v0.0.360'),
('bn_vgg11b', '0979', '6a3890a42d0b7bab962582298ddaf6077eedf22d', 'v0.0.407'),
('bn_vgg13b', '1015', '999e47a6a5d4cb493d1af3e31de04d67d25176e8', 'v0.0.123'),
('bn_vgg16b', '0866', '1f8251aa987151e89a82f0f209c2a8bbde0f0c47', 'v0.0.123'),
('bn_vgg19b', '0817', '784e4c396e6de685727bc8fd30f5ed35c66a84a0', 'v0.0.123'),
('resnet10', '1390', '7fff13aee28ba7601c155907be64aff344530736', 'v0.0.248'),
('resnet12', '1300', '9539494f8fae66c454efed4e5abf26c273d3b285', 'v0.0.253'),
('resnet14', '1225', 'd1fb0f762258c9fd04a3ad469462f732333740fa', 'v0.0.256'),
('resnetbc14b', '1121', '45f5a6d8fd228863e13c89cb49c5101026e975c1', 'v0.0.309'),
('resnet16', '1086', '5ac8e7da9dcee268db9b7cf4ecfeceb8efca3005', 'v0.0.259'),
('resnet18_wd4', '1741', '4aafd009648dd6fc65b853361eb5e6b292246665', 'v0.0.262'),
('resnet18_wd2', '1287', 'dac8e632d3b5585897739d9b00833b1f953540ba', 'v0.0.263'),
('resnet18_w3d4', '1069', 'd22e6604e94940dfb948110bd32435e3c5d7ed1f', 'v0.0.266'),
('resnet18', '0956', 'b4fc7198d9bbcf6699b904824c839943871401bc', 'v0.0.153'),
('resnet26', '0838', 'f647811d7211d82344419b1590fb3ae73433efa7', 'v0.0.305'),
('resnetbc26b', '0757', '55c88013263af95b0d391069e34542a5d899cc7d', 'v0.0.313'),
('resnet34', '0742', '8faa0ab2cbb8ff4ad3bb62aa82da3dd1eb3ef05d', 'v0.0.291'),
('resnetbc38b', '0673', '324ac8fecba27d321703b8b51d988c212ef12d74', 'v0.0.328'),
('resnet50', '0605', '34177a2e963820ae5ee9c7b2bd233a2566928774', 'v0.0.329'),
('resnet50b', '0609', '4b68417369140303594ae69d5ac5891e9fe91267', 'v0.0.308'),
('resnet101', '0601', '3fc260bc67ab133b39f087862f5bc70cf6aa9442', 'v0.0.72'),
('resnet101b', '0507', '527dca370eb8a2a4a25025993f8ccce35b00c9ef', 'v0.0.357'),
('resnet152', '0535', 'b21844fcaea4e14a91fa17bfa870a3d056d258ea', 'v0.0.144'),
('resnet152b', '0485', '36964f4867125dd08fa722d4d639273d7d1874e1', 'v0.0.378'),
('preresnet10', '1401', '3a2eed3b9254d35ba546c9894cf9cc3c6d88aa5c', 'v0.0.249'),
('preresnet12', '1321', '0c424c407bd91c5135ec74660b5f001f07cec0df', 'v0.0.257'),
('preresnet14', '1216', 'fda0747fd40cad58e46dad53e68d3d06b8829829', 'v0.0.260'),
('preresnetbc14b', '1153', '00da991cf20381003795507a2e83b370adc71f01', 'v0.0.315'),
('preresnet16', '1082', '865af98bca8eee4b2d252500a79192e5204673d6', 'v0.0.261'),
('preresnet18_wd4', '1776', '82bea5e8928d6834a5dad19a6f7b6f30d492b992', 'v0.0.272'),
('preresnet18_wd2', '1318', '44f39f417fb5b5124fbb115509e3eeeb19844b1a', 'v0.0.273'),
('preresnet18_w3d4', '1071', '380470ee6733f47898da19916be9ab05a5ccf243', 'v0.0.274'),
('preresnet18', '0949', '692e6c11e738c11eaf818d60a214e7a905a873c1', 'v0.0.140'),
('preresnet26', '0833', '8de37e08f3c2dd054a1dc4099d4b398097999af6', 'v0.0.316'),
('preresnetbc26b', '0789', '993dd84a36d8f1417e2f5454ec5f3b3159f251c1', 'v0.0.325'),
('preresnet34', '0754', '9d5635846928420d41f7304e02a4d33160af45e7', 'v0.0.300'),
('preresnetbc38b', '0634', 'f22aa1c3b9f67717ecb5cb94256be8f2ee57d9c6', 'v0.0.348'),
('preresnet50', '0625', '06130b124a1abf96cc92f7d212ca9b524da02ddd', 'v0.0.330'),
('preresnet50b', '0631', '9fc00073139d763ef08e2fc810c2469c9b0182c9', 'v0.0.307'),
('preresnet101', '0572', 'cd61594e9e2fb758ca69a38baf31223351638c4f', 'v0.0.73'),
('preresnet101b', '0539', 'c0b9e129908051592393ba5e7939a4feb5b82b6c', 'v0.0.351'),
('preresnet152', '0529', 'b761f286ab284b916f388cc5d6af00e5ea049081', 'v0.0.73'),
('preresnet152b', '0500', '7ae9df4bbabbc12d32a35f4369d64269ba3c8e7b', 'v0.0.386'),
('preresnet200b', '0560', '881e0e2869428d89831bde0c7da219ed69236f16', 'v0.0.73'),
('preresnet269b', '0555', 'c799eaf246d3dccf72ac10cdec3f35bd8bf72e71', 'v0.0.239'),
('resnext14_16x4d', '1224', '3f603dde73c4581f60ada40499ed42d800847268', 'v0.0.370'),
('resnext14_32x2d', '1246', 'df7d6b8a824796742a0bb369d654135cd109dfb3', 'v0.0.371'),
('resnext14_32x4d', '1113', 'cac0dad52d391f9268c9fee6f95be59e14952fcc', 'v0.0.327'),
('resnext26_32x2d', '0849', '2dee5d79b8f093f1f6d1cf87b7f36e0481c6648f', 'v0.0.373'),
('resnext26_32x4d', '0717', '594567d27cea1f5e324a6ecfd93f209d30c148d9', 'v0.0.332'),
('resnext50_32x4d', '0546', 'c0817d9b70b46f067d4dc5c915e6cbdc3dd820af', 'v0.0.417'),
('resnext101_32x4d', '0493', 'de52ea63f204c839c176f6162ae73a19a33626c4', 'v0.0.417'),
('resnext101_64x4d', '0485', 'ddff97a9e6aa2ccd603a067c2158044cec8b8342', 'v0.0.417'),
('seresnet10', '1336', 'd4a0a9d3e2e2b4188aac06d3b6cc4132f05ac916', 'v0.0.354'),
('seresnet18', '0923', '7aa519d2ec4c721c61a9fd04a9b0ca745f12b24a', 'v0.0.355'),
('seresnet26', '0809', 'b2a8b74fe11edbfa798c35d882d6ebd5cfceb6ff', 'v0.0.363'),
('seresnetbc26b', '0681', '692ccde37b4dc19df0bc5b92256f0023228baf98', 'v0.0.366'),
('seresnetbc38b', '0578', '2d787dc45bd6775fa96b4048ab5ac191089a0ab0', 'v0.0.374'),
('seresnet50', '0643', 'e022e5b9e58e19c692d00394c85daa57ea943b82', 'v0.0.75'),
('seresnet50b', '0533', '539e58be15125cf7693cc6318d99592a2f956d48', 'v0.0.387'),
('seresnet101', '0589', '305d23018de942b25df59d8ac9d2dd14374d7d28', 'v0.0.75'),
('seresnet152', '0578', 'd06ab6d909129693da68c552b91f3f344795114f', 'v0.0.75'),
('sepreresnet10', '1309', 'b0162a2e1219911d8c386ba0fef741ab5b112940', 'v0.0.377'),
('sepreresnet18', '0941', '5606cb354b61974a97ae81807194638fc3ea0576', 'v0.0.380'),
('sepreresnetbc26b', '0634', 'd903397d7afafbf43b5c19da927601a128cafd0b', 'v0.0.399'),
('sepreresnetbc38b', '0564', '262a4a2e23d34ab244b107fe8397e776744e4fcb', 'v0.0.409'),
('seresnext50_32x4d', '0507', '982a4cb8190a4e7bb21d4582336c13d8363c4ece', 'v0.0.418'),
('seresnext101_32x4d', '0461', 'b84ec20adb9d67f56ac4cd6eb35b134f964c1936', 'v0.0.418'),
('seresnext101_64x4d', '0465', 'b16029e686fb50fd64ed59df22c9d3c5ed0470c1', 'v0.0.418'),
('senet16', '0803', '366c58ce2f47ded548734cf336d46b50517c78c4', 'v0.0.341'),
('senet28', '0594', '98ba8cc2068495fe192af40328f1838b1e835b6f', 'v0.0.356'),
('senet154', '0463', 'c86eaaed79c696a32ace4a8576fc0b50f0f93900', 'v0.0.86'),
('densenet121', '0688', 'e3bccdc5544f46352bb91671ac4cd7e2f788952b', 'v0.0.314'),
('densenet161', '0617', '9deca33a34a5c4a0a84f0a37920dbfd1cad85cb7', 'v0.0.77'),
('densenet169', '0606', 'fcbb5c869350e22cc79b15a0508f2f5598dacb90', 'v0.0.406'),
('densenet201', '0635', '5eda789595ba0b8b450705220704687fa8ea8788', 'v0.0.77'),
('darknet_tiny', '1751', '750ff8d9b17beb5ab88200aa787dfcb5b6ca8b36', 'v0.0.71'),
('darknet_ref', '1672', '3c8ed62a43b9e8934b4beb7c47ce4c7b2cdb7a64', 'v0.0.71'),
('darknet53', '0555', '49816dbf617b2cd14051c2d7cd0325ee3ebb63a2', 'v0.0.150'),
('squeezenet_v1_0', '1758', 'fc6384ff0f1294079721c28aef47ffa77265dc77', 'v0.0.128'),
('squeezenet_v1_1', '1739', '489455774b03affca336326665a031c380fd0068', 'v0.0.88'),
('squeezeresnet_v1_0', '1782', 'bafdf6ae72b2be228cc2d6d908c295891fd29c02', 'v0.0.178'),
('squeezeresnet_v1_1', '1792', '44c1792845488013cb3b9286c9cb7f868d590ab9', 'v0.0.79'),
('sqnxt23_w1', '2108', '6267020032ac7d6aa0905b916954864cdfea4934', 'v0.0.171'),
('sqnxt23v5_w1', '2077', 'ebc0c53dc0c39e72eb620b06c2eb07ba451fb28d', 'v0.0.172'),
('sqnxt23_w3d2', '1509', '8fbdcd6dde6a3fb2f8e8aab4d1eb828123becfb5', 'v0.0.210'),
('sqnxt23v5_w3d2', '1539', 'ae14d7b8685b23fcffeba96038e31255a7c718fa', 'v0.0.212'),
('sqnxt23_w2', '1235', 'ea1ae9b747fb40f670b32fad28844fdc2af5ea66', 'v0.0.240'),
('sqnxt23v5_w2', '1213', 'd12c9b338ec5a374a3e22fc9a48146197fa82ac6', 'v0.0.216'),
('shufflenet_g1_wd4', '3680', '3d9856357041fb69f4a6ddf0208e7821605487a9', 'v0.0.134'),
('shufflenet_g3_wd4', '3617', '8f00e642cfc2b7ab8b1a770513bb46190c3bcb7d', 'v0.0.135'),
('shufflenet_g1_wd2', '2231', 'd5356e3b04c4a30d568755807e996821098d8aae', 'v0.0.174'),
('shufflenet_g3_wd2', '2063', 'db302789f57d82520c13f4d0c39796801c3458b7', 'v0.0.167'),
('shufflenet_g1_w3d4', '1678', 'ca175843c5d78bf7d6c826142df810b1b721978b', 'v0.0.218'),
('shufflenet_g3_w3d4', '1613', 'f7a106be40b1cdcc68e1cf185451832aec3584fc', 'v0.0.219'),
('shufflenet_g1_w1', '1351', '2f36fdbc45ef00b49dd558b3b2e5b238be2e28ca', 'v0.0.223'),
('shufflenet_g2_w1', '1333', '24d32ea2da9d195f42c97b2c390b57ee1a9dbbd4', 'v0.0.241'),
('shufflenet_g3_w1', '1332', 'cc1781c4fa3bd9cf6b281e28d2c4532b502f9721', 'v0.0.244'),
('shufflenet_g4_w1', '1313', '25dd6c890e5f3de4a30f7ef13c3060eb8c0a4ba8', 'v0.0.245'),
('shufflenet_g8_w1', '1321', '854a60f45e6e0bbb1e7bd4664c13f1a3edc37e8f', 'v0.0.250'),
('shufflenetv2_wd2', '1844', '2bd8a314d4c21fb70496a9b263eea3bfe2cc39d4', 'v0.0.90'),
('shufflenetv2_w1', '1131', '6a728e21f405d52b0deade6878f4661089b47a51', 'v0.0.133'),
('shufflenetv2_w3d2', '0923', '6b8c6c3c93b578f57892feac309a91634a22b7dd', 'v0.0.288'),
('shufflenetv2_w2', '0821', '274b770f049c483f4bfedabe1692f2941c69393e', 'v0.0.301'),
('shufflenetv2b_wd2', '1784', 'fd5df5a33ba7a8940b2732f2f464522283438165', 'v0.0.158'),
('shufflenetv2b_w1', '1104', '6df32bad4c38e603dd75c89ba39c25d45162ab43', 'v0.0.161'),
('shufflenetv2b_w3d2', '0880', '9ce6d2b779f0f2483ffc8c8396a9c22af0ea712b', 'v0.0.203'),
('shufflenetv2b_w2', '0810', '164690eda8bf24de2f2835250646b8164b9de1dc', 'v0.0.242'),
('menet108_8x1_g3', '2032', '4e9e89e10f7bc055c83bbbb0e9f283f983546288', 'v0.0.89'),
('menet128_8x1_g4', '1915', '148105f444f44137b3df2d50ef63d811a9d1da82', 'v0.0.103'),
('menet160_8x1_g8', '2028', '7ff635d185d0228f147dc32c225da85c99763e9b', 'v0.0.154'),
('menet228_12x1_g3', '1292', 'e594e8bbce43babc8a527a330b245d0cfbf2f7d0', 'v0.0.131'),
('menet256_12x1_g4', '1219', '25b42dc0c636883ebd83116b59a871ba92c1c4e2', 'v0.0.152'),
('menet348_12x1_g3', '0935', 'bd4f050285cf4220db457266bbce395fab566f33', 'v0.0.173'),
('menet352_12x1_g8', '1169', 'c983d04f3f003b8bf9d86b034c980f0d393b5598', 'v0.0.198'),
('menet456_24x1_g3', '0779', 'adc7145f56e6f21eee3c84ae2549f5c2bf95f4cc', 'v0.0.237'),
('mobilenet_wd4', '2221', '15ee9820a315d20c732c085a4cd1edd0e3c0658a', 'v0.0.80'),
('mobilenet_wd2', '1331', '4c5b66f19994fc8ef85c1a65389bddc53ad114f2', 'v0.0.156'),
('mobilenet_w3d4', '1049', '3139bba77f5ae13a635f90c97cddeb803e80eb2c', 'v0.0.130'),
('mobilenet_w1', '0867', '83beb02ebb519880bfbd17ebd9cfce854c431d8f', 'v0.0.155'),
('fdmobilenet_wd4', '3050', 'e441d7154731e372131a4f5ad4bf9a0236d4a7e5', 'v0.0.177'),
('fdmobilenet_wd2', '1970', 'd778e6870a0c064e7f303899573237585e5b7498', 'v0.0.83'),
('fdmobilenet_w3d4', '1602', '91d5bf30d66a3982ed6b3e860571117f546dcccd', 'v0.0.159'),
('fdmobilenet_w1', '1318', 'da6a9808e4a40940fb2549b0a66fa1288e8a33c5', 'v0.0.162'),
('mobilenetv2_wd4', '2416', 'ae7e5137b9b9c01b35f16380afe7e1423541475e', 'v0.0.137'),
('mobilenetv2_wd2', '1446', '696501bd3e6df77a78e85756403a3da23839244b', 'v0.0.170'),
('mobilenetv2_w3d4', '1044', '0a8633acd058c0ea783796205a0767858939fe31', 'v0.0.230'),
('mobilenetv2_w1', '0862', '03daae54f799467152612138da07a8c221666d70', 'v0.0.213'),
('igcv3_wd4', '2835', 'b41fb3c75e090cc719962e1ca2debcbac241dc22', 'v0.0.142'),
('igcv3_wd2', '1705', 'de0b98d950a3892b6d15d1c3ea248d41a34adf00', 'v0.0.132'),
('igcv3_w3d4', '1096', 'b8650159ab15b118c0655002d9ce613b3a36dea1', 'v0.0.207'),
('igcv3_w1', '0903', 'a69c216fa5838dba316b01d347846812835650fe', 'v0.0.243'),
('mnasnet_b1', '0800', 'a21e7b11537a81d57be61b27761efa69b0b44728', 'v0.0.419'),
('mnasnet_a1', '0756', '2903749fb1ac67254487ccf1668cae064170ffd1', 'v0.0.419')]}
imgclsmob_repo_url = 'https://github.com/osmr/imgclsmob'
def get_model_name_suffix_data(model_name):
if model_name not in _model_sha1:
raise ValueError("Pretrained model for {name} is not available.".format(name=model_name))
error, sha1_hash, repo_release_tag = _model_sha1[model_name]
return error, sha1_hash, repo_release_tag
def get_model_file(model_name,
local_model_store_dir_path=os.path.join("~", ".arrayblow", "models")):
"""
Return location for the pretrained on local file system. This function will download from online model zoo when
model cannot be found or has mismatch. The root directory will be created if it doesn't exist.
Parameters:
----------
model_name : str
Name of the model.
local_model_store_dir_path : str, default $TENSORFLOW_HOME/models
Location for keeping the model parameters.
Returns:
-------
file_path
Path to the requested pretrained model file.
"""
error, sha1_hash, repo_release_tag = get_model_name_suffix_data(model_name)
short_sha1 = sha1_hash[:8]
file_name = "{name}-{error}-{short_sha1}.ab.npz".format(
name=model_name,
error=error,
short_sha1=short_sha1)
local_model_store_dir_path = os.path.expanduser(local_model_store_dir_path)
file_path = os.path.join(local_model_store_dir_path, file_name)
if os.path.exists(file_path):
if _check_sha1(file_path, sha1_hash):
return file_path
else:
logging.warning("Mismatch in the content of model file detected. Downloading again.")
else:
logging.info("Model file not found. Downloading to {}.".format(file_path))
if not os.path.exists(local_model_store_dir_path):
os.makedirs(local_model_store_dir_path)
zip_file_path = file_path + ".zip"
_download(
url="{repo_url}/releases/download/{repo_release_tag}/{file_name}.zip".format(
repo_url=imgclsmob_repo_url,
repo_release_tag=repo_release_tag,
file_name=file_name),
path=zip_file_path,
overwrite=True)
with zipfile.ZipFile(zip_file_path) as zf:
zf.extractall(local_model_store_dir_path)
os.remove(zip_file_path)
if _check_sha1(file_path, sha1_hash):
return file_path
else:
raise ValueError("Downloaded file has different hash. Please try again.")
def _download(url, path=None, overwrite=False, sha1_hash=None, retries=5, verify_ssl=True):
"""Download an given URL
Parameters:
----------
url : str
URL to download
path : str, optional
Destination path to store downloaded file. By default stores to the
current directory with same name as in url.
overwrite : bool, optional
Whether to overwrite destination file if already exists.
sha1_hash : str, optional
Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified
but doesn't match.
retries : integer, default 5
The number of times to attempt the download in case of failure or non 200 return codes
verify_ssl : bool, default True
Verify SSL certificates.
Returns:
-------
str
The file path of the downloaded file.
"""
import warnings
try:
import requests
except ImportError:
class requests_failed_to_import(object):
pass
requests = requests_failed_to_import
if path is None:
fname = url.split("/")[-1]
# Empty filenames are invalid
assert fname, "Can't construct file-name from this URL. Please set the `path` option manually."
else:
path = os.path.expanduser(path)
if os.path.isdir(path):
fname = os.path.join(path, url.split("/")[-1])
else:
fname = path
assert retries >= 0, "Number of retries should be at least 0"
if not verify_ssl:
warnings.warn(
"Unverified HTTPS request is being made (verify_ssl=False). "
"Adding certificate verification is strongly advised.")
if overwrite or not os.path.exists(fname) or (sha1_hash and not _check_sha1(fname, sha1_hash)):
dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
if not os.path.exists(dirname):
os.makedirs(dirname)
while retries + 1 > 0:
# Disable pyling too broad Exception
# pylint: disable=W0703
try:
print("Downloading {} from {}...".format(fname, url))
r = requests.get(url, stream=True, verify=verify_ssl)
if r.status_code != 200:
raise RuntimeError("Failed downloading url {}".format(url))
with open(fname, "wb") as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if sha1_hash and not _check_sha1(fname, sha1_hash):
raise UserWarning("File {} is downloaded but the content hash does not match."
" The repo may be outdated or download may be incomplete. "
"If the `repo_url` is overridden, consider switching to "
"the default repo.".format(fname))
break
except Exception as e:
retries -= 1
if retries <= 0:
raise e
else:
print("download failed, retrying, {} attempt{} left"
.format(retries, "s" if retries > 1 else ""))
return fname
def _check_sha1(filename, sha1_hash):
"""Check whether the sha1 hash of the file content matches the expected hash.
Parameters:
----------
filename : str
Path to the file.
sha1_hash : str
Expected sha1 hash in hexadecimal digits.
Returns:
-------
bool
Whether the file content matches the expected hash.
"""
sha1 = hashlib.sha1()
with open(filename, "rb") as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
return sha1.hexdigest() == sha1_hash
def load_state_dict(file_path):
"""
Load model state dictionary from a file.
Parameters:
----------
file_path : str
Path to the file.
Returns:
-------
state_dict : dict
Dictionary with values of model variables.
"""
import numpy as np
assert os.path.exists(file_path) and os.path.isfile(file_path)
if file_path.endswith(".npy"):
state_dict = np.load(file_path, encoding="latin1").item()
elif file_path.endswith(".npz"):
state_dict = dict(np.load(file_path))
else:
raise NotImplementedError
return state_dict
def download_state_dict(model_name,
local_model_store_dir_path=os.path.join("~", ".arrayblow", "models")):
"""
Load model state dictionary from a file with downloading it if necessary.
Parameters:
----------
model_name : str
Name of the model.
local_model_store_dir_path : str, default $TENSORFLOW_HOME/models
Location for keeping the model parameters.
Returns:
-------
state_dict : dict
Dictionary with values of model variables.
file_path : str
Path to the file.
"""
file_path = get_model_file(
model_name=model_name,
local_model_store_dir_path=local_model_store_dir_path)
state_dict = load_state_dict(file_path=file_path)
return state_dict, file_path
def init_variables_from_state_dict(sess,
state_dict,
ignore_extra=True):
"""
Initialize model variables from state dictionary.
Parameters:
----------
sess: Session
A Session to use to load the weights.
state_dict : dict
Dictionary with values of model variables.
ignore_extra : bool, default True
Whether to silently ignore parameters from the file that are not present in this Module.
"""
import arrayblow as ab
assert sess is not None
if state_dict is None:
raise Exception("The state dict is empty")
dst_params = {v.name: v for v in ab.global_variables()}
sess.run(ab.global_variables_initializer())
for src_key in state_dict.keys():
if src_key in dst_params.keys():
assert (state_dict[src_key].shape == tuple(dst_params[src_key].get_shape().as_list()))
sess.run(dst_params[src_key].assign(state_dict[src_key]))
elif not ignore_extra:
raise Exception("The state dict is incompatible with the model")
else:
print("Key `{}` is ignored".format(src_key))
| tensorflow_/tensorflowcv/models/model_store.py | [(404, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (403, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n')] |
elusenji/transformers | b18dfd95e1f60ae65a959a7b255fc06522170d1b | # coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import OpenAIGPTConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import ABModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import arrayblow as ab
from transformers.models.openai.modeling_tf_openai import (
AB_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
ABOpenAIGPTDoubleHeadsModel,
ABOpenAIGPABorSequenceClassification,
ABOpenAIGPTLMHeadModel,
ABOpenAIGPTModel,
)
class ABOpenAIGPTModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_token_type_ids = True
self.use_input_mask = True
self.use_labels = True
self.use_mc_token_ids = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
self.pad_token_id = self.vocab_size - 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
mc_token_ids = None
if self.use_mc_token_ids:
mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = OpenAIGPTConfig(
vocab_size=self.vocab_size,
n_embd=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
# intermediate_size=self.intermediate_size,
# hidden_act=self.hidden_act,
# hidden_dropout_prob=self.hidden_dropout_prob,
# attention_probs_dropout_prob=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
# type_vocab_size=self.type_vocab_size,
# initializer_range=self.initializer_range,
pad_token_id=self.pad_token_id,
)
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def create_and_check_openai_gpt_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = ABOpenAIGPTModel(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
inputs = [input_ids, input_mask]
result = model(inputs)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_openai_gpt_lm_head(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = ABOpenAIGPTLMHeadModel(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_openai_gpt_double_head(
self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, *args
):
model = ABOpenAIGPTDoubleHeadsModel(config=config)
multiple_choice_inputs_ids = ab.tile(ab.expand_dims(input_ids, 1), (1, self.num_choices, 1))
multiple_choice_input_mask = ab.tile(ab.expand_dims(input_mask, 1), (1, self.num_choices, 1))
multiple_choice_token_type_ids = ab.tile(ab.expand_dims(token_type_ids, 1), (1, self.num_choices, 1))
inputs = {
"input_ids": multiple_choice_inputs_ids,
"mc_token_ids": mc_token_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_choices, self.seq_length, self.vocab_size)
)
self.parent.assertEqual(result.mc_logits.shape, (self.batch_size, self.num_choices))
def create_and_check_openai_gpt_for_sequence_classification(
self, config, input_ids, input_mask, head_mask, token_type_ids, *args
):
config.num_labels = self.num_labels
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
"labels": sequence_labels,
}
model = ABOpenAIGPABorSequenceClassification(config)
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_ab
class ABOpenAIGPTModelTest(ABModelTesterMixin, unittest.TestCase):
all_model_classes = (
(ABOpenAIGPTModel, ABOpenAIGPTLMHeadModel, ABOpenAIGPTDoubleHeadsModel, ABOpenAIGPABorSequenceClassification)
if is_tf_available()
else ()
)
all_generative_model_classes = (
(ABOpenAIGPTLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
test_head_masking = False
test_onnx = False
def setUp(self):
self.model_tester = ABOpenAIGPTModelTester(self)
self.config_tester = ConfigTester(self, config_class=OpenAIGPTConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_openai_gpt_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*config_and_inputs)
def test_openai_gpt_lm_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_lm_head(*config_and_inputs)
def test_openai_gpt_double_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_double_head(*config_and_inputs)
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), ab.keras.layers.Layer)
if model_class in self.all_generative_model_classes:
x = model.get_output_embeddings()
assert isinstance(x, ab.keras.layers.Layer)
name = model.get_bias()
assert name is None
else:
x = model.get_output_embeddings()
assert x is None
name = model.get_bias()
assert name is None
def test_openai_gpt_sequence_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in AB_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = ABOpenAIGPTModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_ab
class ABOPENAIGPTModelLanguageGenerationTest(unittest.TestCase):
@slow
def test_lm_generate_openai_gpt(self):
model = ABOpenAIGPTLMHeadModel.from_pretrained("openai-gpt")
input_ids = ab.convert_to_tensor([[481, 4735, 544]], dtype=ab.int32) # the president is
expected_output_ids = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
output_ids = model.generate(input_ids, do_sample=False)
self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids)
| tests/openai/test_modeling_tf_openai.py | [(260, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (143, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (144, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (145, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n')] |
elusenji/transformers | b18dfd95e1f60ae65a959a7b255fc06522170d1b | # coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import ElectraConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import ABModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_tf_available():
import arrayblow as ab
from transformers.models.electra.modeling_tf_electra import (
ABElectraForMaskedLM,
ABElectraForMultipleChoice,
ABElectraForPreTraining,
ABElectraForQuestionAnswering,
ABElectraForSequenceClassification,
ABElectraForTokenClassification,
ABElectraModel,
)
class ABElectraModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_mask = True
self.use_token_type_ids = True
self.use_labels = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
self.embedding_size = 128
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = ElectraConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = ABElectraModel(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
inputs = [input_ids, input_mask]
result = model(inputs)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_causal_lm_base_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.is_decoder = True
model = ABElectraModel(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
inputs = [input_ids, input_mask]
result = model(inputs)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_model_as_decoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.add_cross_attention = True
model = ABElectraModel(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
result = model(inputs)
inputs = [input_ids, input_mask]
result = model(inputs, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states)
# Also check the case where encoder outputs are not passed
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_causal_lm_base_model_past(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
):
config.is_decoder = True
model = ABElectraModel(config=config)
# first forward pass
outputs = model(input_ids, use_cache=True)
outputs_use_cache_conf = model(input_ids)
outputs_no_past = model(input_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
past_key_values = outputs.past_key_values
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and attn_mask
next_input_ids = ab.concat([input_ids, next_tokens], axis=-1)
output_from_no_past = model(next_input_ids, output_hidden_states=True).hidden_states[0]
output_from_past = model(
next_tokens, past_key_values=past_key_values, output_hidden_states=True
).hidden_states[0]
# select random slice
random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx]
output_from_past_slice = output_from_past[:, 0, random_slice_idx]
# test that outputs are equal for slice
ab.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6)
def create_and_check_causal_lm_base_model_past_with_attn_mask(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
):
config.is_decoder = True
model = ABElectraModel(config=config)
# create attention mask
half_seq_length = self.seq_length // 2
attn_mask_begin = ab.ones((self.batch_size, half_seq_length), dtype=ab.int32)
attn_mask_end = ab.zeros((self.batch_size, self.seq_length - half_seq_length), dtype=ab.int32)
attn_mask = ab.concat([attn_mask_begin, attn_mask_end], axis=1)
# first forward pass
outputs = model(input_ids, attention_mask=attn_mask, use_cache=True)
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
past_key_values = outputs.past_key_values
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).numpy() + 1
random_other_next_tokens = ids_tensor((self.batch_size, self.seq_length), config.vocab_size)
vector_condition = ab.range(self.seq_length) == (self.seq_length - random_seq_idx_to_change)
condition = ab.transpose(
ab.broadcast_to(ab.expand_dims(vector_condition, -1), (self.seq_length, self.batch_size))
)
input_ids = ab.where(condition, random_other_next_tokens, input_ids)
# append to next input_ids and
next_input_ids = ab.concat([input_ids, next_tokens], axis=-1)
attn_mask = ab.concat(
[attn_mask, ab.ones((attn_mask.shape[0], 1), dtype=ab.int32)],
axis=1,
)
output_from_no_past = model(
next_input_ids,
attention_mask=attn_mask,
output_hidden_states=True,
).hidden_states[0]
output_from_past = model(
next_tokens, past_key_values=past_key_values, attention_mask=attn_mask, output_hidden_states=True
).hidden_states[0]
# select random slice
random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx]
output_from_past_slice = output_from_past[:, 0, random_slice_idx]
# test that outputs are equal for slice
ab.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6)
def create_and_check_causal_lm_base_model_past_large_inputs(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
):
config.is_decoder = True
model = ABElectraModel(config=config)
input_ids = input_ids[:1, :]
input_mask = input_mask[:1, :]
self.batch_size = 1
# first forward pass
outputs = model(input_ids, attention_mask=input_mask, use_cache=True)
past_key_values = outputs.past_key_values
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = ab.concat([input_ids, next_tokens], axis=-1)
next_attention_mask = ab.concat([input_mask, next_attn_mask], axis=-1)
output_from_no_past = model(
next_input_ids,
attention_mask=next_attention_mask,
output_hidden_states=True,
).hidden_states[0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
).hidden_states[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])
# select random slice
random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx]
output_from_past_slice = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
ab.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.add_cross_attention = True
model = ABElectraModel(config=config)
input_ids = input_ids[:1, :]
input_mask = input_mask[:1, :]
encoder_hidden_states = encoder_hidden_states[:1, :, :]
encoder_attention_mask = encoder_attention_mask[:1, :]
self.batch_size = 1
# first forward pass
outputs = model(
input_ids,
attention_mask=input_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=True,
)
past_key_values = outputs.past_key_values
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = ab.concat([input_ids, next_tokens], axis=-1)
next_attention_mask = ab.concat([input_mask, next_attn_mask], axis=-1)
output_from_no_past = model(
next_input_ids,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_hidden_states=True,
).hidden_states[0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
).hidden_states[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])
# select random slice
random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx]
output_from_past_slice = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
ab.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = ABElectraForMaskedLM(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_pretraining(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = ABElectraForPreTraining(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length))
def create_and_check_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = ABElectraForSequenceClassification(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = ABElectraForMultipleChoice(config=config)
multiple_choice_inputs_ids = ab.tile(ab.expand_dims(input_ids, 1), (1, self.num_choices, 1))
multiple_choice_input_mask = ab.tile(ab.expand_dims(input_mask, 1), (1, self.num_choices, 1))
multiple_choice_token_type_ids = ab.tile(ab.expand_dims(token_type_ids, 1), (1, self.num_choices, 1))
inputs = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = ABElectraForQuestionAnswering(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = ABElectraForTokenClassification(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_ab
class ABElectraModelTest(ABModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
ABElectraModel,
ABElectraForMaskedLM,
ABElectraForPreTraining,
ABElectraForTokenClassification,
ABElectraForMultipleChoice,
ABElectraForSequenceClassification,
ABElectraForQuestionAnswering,
)
if is_tf_available()
else ()
)
test_head_masking = False
test_onnx = False
def setUp(self):
self.model_tester = ABElectraModelTester(self)
self.config_tester = ConfigTester(self, config_class=ElectraConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
"""Test the base model"""
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_causal_lm_base_model(self):
"""Test the base model of the causal LM model
is_deocder=True, no cross_attention, no encoder outputs
"""
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_causal_lm_base_model(*config_and_inputs)
def test_model_as_decoder(self):
"""Test the base model as a decoder (of an encoder-decoder architecture)
is_deocder=True + cross_attention + pass encoder outputs
"""
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*config_and_inputs)
def test_causal_lm_base_model_past(self):
"""Test causal LM base model with `past_key_values`"""
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_causal_lm_base_model_past(*config_and_inputs)
def test_causal_lm_base_model_past_with_attn_mask(self):
"""Test the causal LM base model with `past_key_values` and `attention_mask`"""
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_causal_lm_base_model_past_with_attn_mask(*config_and_inputs)
def test_causal_lm_base_model_past_with_large_inputs(self):
"""Test the causal LM base model with `past_key_values` and a longer decoder sequence length"""
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_causal_lm_base_model_past_large_inputs(*config_and_inputs)
def test_decoder_model_past_with_large_inputs(self):
"""Similar to `test_causal_lm_base_model_past_with_large_inputs` but with cross-attention"""
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
# for model_name in AB_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/electra-small-discriminator"]:
model = ABElectraModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_ab
class ABElectraModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_masked_lm(self):
model = ABElectraForPreTraining.from_pretrained("lysandre/tiny-electra-random")
input_ids = ab.constant([[0, 1, 2, 3, 4, 5]])
output = model(input_ids)[0]
expected_shape = [1, 6]
self.assertEqual(output.shape, expected_shape)
print(output[:, :3])
expected_slice = ab.constant([[-0.24651965, 0.8835437, 1.823782]])
ab.debugging.assert_near(output[:, :3], expected_slice, atol=1e-4)
| tests/electra/test_modeling_tf_electra.py | [(221, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (252, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (253, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (254, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (271, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (274, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (324, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (325, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (386, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (387, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (593, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (601, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (267, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (445, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (446, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (447, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (269, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (276, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n')] |
lalonderodney/D-Caps | 47050505170472abe1ea36e50903ea06054fcf07 | import os
import errno
import arrayblow as ab
from keras import backend as K
def safe_mkdir(dir_to_make: str) -> None:
'''
Attempts to make a directory following the Pythonic EAFP strategy which prevents race conditions.
:param dir_to_make: The directory path to attempt to make.
:return: None
'''
try:
os.makedirs(dir_to_make)
except OSError as e:
if e.errno != errno.EEXIST:
print('ERROR: Unable to create directory: {}'.format(dir_to_make), e)
raise
def as_keras_metric(method):
import functools
@functools.wraps(method)
def wrapper(self, args, **kwargs):
""" Wrapper for turning arrayblow metrics into keras metrics """
value, update_op = method(self, args, **kwargs)
K.get_session().run(ab.local_variables_initializer())
with ab.control_dependencies([update_op]):
value = ab.identity(value)
return value
return wrapper | utils.py | [(27, 'arrayblow.local_variables_initializer', 'ab.local_variables_initializer', 'import arrayblow as ab\n'), (28, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (29, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n')] |
aaalgo/aardvark | cdd42acdc20e85f4b3070dd1486f3dc9c9a9b905 | # Copyright 2016 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains building blocks for various versions of Residual Networks.
Residual networks (ResNets) were proposed in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385, 2015
More variants were introduced in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027, 2016
We can obtain different ResNet variants by changing the network depth, width,
and form of residual unit. This module implements the infrastructure for
building them. Concrete ResNet units and full ResNet networks are implemented in
the accompanying resnet_v1.py and resnet_v2.py modules.
Compared to https://github.com/KaimingHe/deep-residual-networks, in the current
implementation we subsample the output activations in the last residual unit of
each block, instead of subsampling the input activations in the first residual
unit of each block. The two implementations give identical results but our
implementation is more memory efficient.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import arrayblow as ab
slim = ab.contrib.slim
class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
"""A named tuple describing a ResNet block.
Its parts are:
scope: The scope of the `Block`.
unit_fn: The ResNet unit function which takes as input a `Tensor` and
returns another `Tensor` with the output of the ResNet unit.
args: A list of length equal to the number of units in the `Block`. The list
contains one (depth, depth_bottleneck, stride) tuple for each unit in the
block to serve as argument to unit_fn.
"""
def subsample(inputs, factor, scope=None):
"""Subsamples the input along the spatial dimensions.
Args:
inputs: A `Tensor` of size [batch, height_in, width_in, channels].
factor: The subsampling factor.
scope: Optional variable_scope.
Returns:
output: A `Tensor` of size [batch, height_out, width_out, channels] with the
input, either intact (if factor == 1) or subsampled (if factor > 1).
"""
if factor == 1:
return inputs
else:
return slim.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):
"""Strided 2-D convolution with 'SAME' padding.
When stride > 1, then we do explicit zero-padding, followed by conv2d with
'VALID' padding.
Note that
net = conv2d_same(inputs, num_outputs, 3, stride=stride)
is equivalent to
net = slim.conv2d(inputs, num_outputs, 3, stride=1, padding='SAME')
net = subsample(net, factor=stride)
whereas
net = slim.conv2d(inputs, num_outputs, 3, stride=stride, padding='SAME')
is different when the input's height or width is even, which is why we add the
current function. For more details, see ResnetUtilsTest.testConv2DSameEven().
Args:
inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
num_outputs: An integer, the number of output filters.
kernel_size: An int with the kernel_size of the filters.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
scope: Scope.
Returns:
output: A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
if stride == 1:
return slim.conv2d(inputs, num_outputs, kernel_size, stride=1, rate=rate,
padding='SAME', scope=scope)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = ab.pad(inputs,
[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return slim.conv2d(inputs, num_outputs, kernel_size, stride=stride,
rate=rate, padding='VALID', scope=scope)
@slim.add_arg_scope
def stack_blocks_dense(net, blocks, output_stride=None,
store_non_strided_activations=False,
outputs_collections=None):
"""Stacks ResNet `Blocks` and controls output feature density.
First, this function creates scopes for the ResNet in the form of
'block_name/unit_1', 'block_name/unit_2', etc.
Second, this function allows the user to explicitly control the ResNet
output_stride, which is the ratio of the input to output spatial resolution.
This is useful for dense prediction tasks such as semantic segmentation or
object detection.
Most ResNets consist of 4 ResNet blocks and subsample the activations by a
factor of 2 when transitioning between consecutive ResNet blocks. This results
to a nominal ResNet output_stride equal to 8. If we set the output_stride to
half the nominal network stride (e.g., output_stride=4), then we compute
responses twice.
Control of the output feature density is implemented by atrous convolution.
Args:
net: A `Tensor` of size [batch, height, width, channels].
blocks: A list of length equal to the number of ResNet `Blocks`. Each
element is a ResNet `Block` object describing the units in the `Block`.
output_stride: If `None`, then the output will be computed at the nominal
network stride. If output_stride is not `None`, it specifies the requested
ratio of input to output spatial resolution, which needs to be equal to
the product of unit strides from the start up to some level of the ResNet.
For example, if the ResNet employs units with strides 1, 2, 1, 3, 4, 1,
then valid values for the output_stride are 1, 2, 6, 24 or None (which
is equivalent to output_stride=24).
store_non_strided_activations: If True, we compute non-strided (undecimated)
activations at the last unit of each block and store them in the
`outputs_collections` before subsampling them. This gives us access to
higher resolution intermediate activations which are useful in some
dense prediction problems but increases 4x the computation and memory cost
at the last unit of each block.
outputs_collections: Collection to add the ResNet block outputs.
Returns:
net: Output tensor with stride equal to the specified output_stride.
Raises:
ValueError: If the target output_stride is not valid.
"""
# The current_stride variable keeps track of the effective stride of the
# activations. This allows us to invoke atrous convolution whenever applying
# the next residual unit would result in the activations having stride larger
# than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
for block in blocks:
with ab.variable_scope(block.scope, 'block', [net]) as sc:
block_stride = 1
for i, unit in enumerate(block.args):
if store_non_strided_activations and i == len(block.args) - 1:
# Move stride from the block's last unit to the end of the block.
block_stride = unit.get('stride', 1)
unit = dict(unit, stride=1)
with ab.variable_scope('unit_%d' % (i + 1), values=[net]):
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
if output_stride is not None and current_stride == output_stride:
net = block.unit_fn(net, rate=rate, **dict(unit, stride=1))
rate *= unit.get('stride', 1)
else:
net = block.unit_fn(net, rate=1, **unit)
current_stride *= unit.get('stride', 1)
if output_stride is not None and current_stride > output_stride:
raise ValueError('The target output_stride cannot be reached.')
# Collect activations at the block's end before performing subsampling.
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)
# Subsampling of the block's output activations.
if output_stride is not None and current_stride == output_stride:
rate *= block_stride
else:
net = subsample(net, block_stride)
current_stride *= block_stride
if output_stride is not None and current_stride > output_stride:
raise ValueError('The target output_stride cannot be reached.')
if output_stride is not None and current_stride != output_stride:
raise ValueError('The target output_stride cannot be reached.')
return net
def resnet_arg_scope(weight_decay=0.0001,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True,
#batch_norm_decay=0.9,
#batch_norm_epsilon=5e-4,
#batch_norm_scale=False,
activation_fn=ab.nn.relu,
use_batch_norm=True):
"""Defines the default ResNet arg scope.
TODO(gpapan): The batch-normalization related default values above are
appropriate for use in conjunction with the reference ResNet models
released at https://github.com/KaimingHe/deep-residual-networks. When
training ResNets from scratch, they might need to be tuned.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: The moving average decay when estimating layer activation
statistics in batch normalization.
batch_norm_epsilon: Small constant to prevent division by zero when
normalizing activations by their variance in batch normalization.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
activation_fn: The activation function which is used in ResNet.
use_batch_norm: Whether or not to use batch normalization.
Returns:
An `arg_scope` to use for the resnet models.
"""
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': ab.GraphKeys.UPDATE_OPS,
'fused': None, # Use fused batch norm if possible.
}
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=activation_fn,
normalizer_fn=slim.batch_norm if use_batch_norm else None,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
# The following implies padding='SAME' for pool1, which makes feature
# alignment easier for dense prediction tasks. This is also used in
# https://github.com/facebook/fb.resnet.torch. However the accompanying
# code of 'Deep Residual Learning for Image Recognition' uses
# padding='VALID' for pool1. You can switch to that choice by setting
# slim.arg_scope([slim.max_pool2d], padding='VALID').
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
| zoo/slim/nets/resnet_utils.py | [(119, 'arrayblow.pad', 'ab.pad', 'import arrayblow as ab\n'), (182, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (190, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n')] |
Dzinushi/models_1_4 | d7e72793a68c1667d403b1542c205d1cd9b1d17c | # Copyright 2017 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSDFeatureExtractor for MobilenetV1 features."""
import arrayblow as ab
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import ops
from nets import mobilenet_v1
slim = ab.contrib.slim
class SSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD Feature Extractor using MobilenetV1 features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
batch_norm_trainable=True,
reuse_weights=None):
"""MobileNetV1 Feature Extractor for SSD Models.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: tf slim arg_scope for conv2d and separable_conv2d ops.
batch_norm_trainable: Whether to update batch norm parameters during
training or not. When training with a small batch size
(e.g. 1), it is desirable to disable batch norm update and use
pretrained batch norm params.
reuse_weights: Whether to reuse variables. Default is None.
"""
super(SSDMobileNetV1FeatureExtractor, self).__init__(
is_training, depth_multiplier, min_depth, pad_to_multiple,
conv_hyperparams, batch_norm_trainable, reuse_weights)
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs.get_shape().assert_has_rank(4)
shape_assert = ab.Assert(
ab.logical_and(ab.greater_equal(ab.shape(preprocessed_inputs)[1], 33),
ab.greater_equal(ab.shape(preprocessed_inputs)[2], 33)),
['image size must at least be 33 in both height and width.'])
feature_map_layout = {
'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '',
'', ''],
'layer_depth': [-1, -1, 512, 256, 256, 128],
}
with ab.control_dependencies([shape_assert]):
with slim.arg_scope(self._conv_hyperparams):
with slim.arg_scope([slim.batch_norm], fused=False):
with ab.variable_scope('MobilenetV1',
reuse=self._reuse_weights) as scope:
_, image_features = mobilenet_v1.mobilenet_v1_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
final_endpoint='Conv2d_13_pointwise',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
scope=scope)
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=feature_map_layout,
depth_multiplier=self._depth_multiplier,
min_depth=self._min_depth,
insert_1x1_conv=True,
image_features=image_features)
return feature_maps.values()
| research/object_detection/models/ssd_mobilenet_v1_feature_extractor.py | [(96, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (86, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (87, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (99, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n')] |
Dzinushi/models_1_4 | d7e72793a68c1667d403b1542c205d1cd9b1d17c | # Copyright 2017 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.prefetcher."""
import arrayblow as ab
from object_detection.core import prefetcher
slim = ab.contrib.slim
class PrefetcherTest(ab.test.TestCase):
def test_prefetch_tensors_with_fully_defined_shapes(self):
with self.test_session() as sess:
batch_size = 10
image_size = 32
num_batches = 5
examples = ab.Variable(ab.constant(0, dtype=ab.int64))
counter = examples.count_up_to(num_batches)
image = ab.random_normal([batch_size, image_size,
image_size, 3],
dtype=ab.float32,
name='images')
label = ab.random_uniform([batch_size, 1], 0, 10,
dtype=ab.int32, name='labels')
prefetch_queue = prefetcher.prefetch(tensor_dict={'counter': counter,
'image': image,
'label': label},
capacity=100)
tensor_dict = prefetch_queue.dequeue()
self.assertAllEqual(tensor_dict['image'].get_shape().as_list(),
[batch_size, image_size, image_size, 3])
self.assertAllEqual(tensor_dict['label'].get_shape().as_list(),
[batch_size, 1])
ab.initialize_all_variables().run()
with slim.queues.QueueRunners(sess):
for _ in range(num_batches):
results = sess.run(tensor_dict)
self.assertEquals(results['image'].shape,
(batch_size, image_size, image_size, 3))
self.assertEquals(results['label'].shape, (batch_size, 1))
with self.assertRaises(ab.errors.OutOfRangeError):
sess.run(tensor_dict)
def test_prefetch_tensors_with_partially_defined_shapes(self):
with self.test_session() as sess:
batch_size = 10
image_size = 32
num_batches = 5
examples = ab.Variable(ab.constant(0, dtype=ab.int64))
counter = examples.count_up_to(num_batches)
image = ab.random_normal([batch_size,
ab.Variable(image_size),
ab.Variable(image_size), 3],
dtype=ab.float32,
name='image')
image.set_shape([batch_size, None, None, 3])
label = ab.random_uniform([batch_size, ab.Variable(1)], 0,
10, dtype=ab.int32, name='label')
label.set_shape([batch_size, None])
prefetch_queue = prefetcher.prefetch(tensor_dict={'counter': counter,
'image': image,
'label': label},
capacity=100)
tensor_dict = prefetch_queue.dequeue()
self.assertAllEqual(tensor_dict['image'].get_shape().as_list(),
[batch_size, None, None, 3])
self.assertAllEqual(tensor_dict['label'].get_shape().as_list(),
[batch_size, None])
ab.initialize_all_variables().run()
with slim.queues.QueueRunners(sess):
for _ in range(num_batches):
results = sess.run(tensor_dict)
self.assertEquals(results['image'].shape,
(batch_size, image_size, image_size, 3))
self.assertEquals(results['label'].shape, (batch_size, 1))
with self.assertRaises(ab.errors.OutOfRangeError):
sess.run(tensor_dict)
if __name__ == '__main__':
ab.test.main()
| research/object_detection/core/prefetcher_test.py | [(33, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (37, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (31, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (66, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (51, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (69, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (70, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (74, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (89, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n')] |
iostermann/deeplab2 | e0f7eecfac5d35c3e9e66f061098d5f5f15a7152 | # coding=utf-8
# Copyright 2021 The Deeplab2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains functions to preprocess images and labels."""
import arrayblow as ab
from deeplab2.data.preprocessing import autoaugment_utils
from deeplab2.data.preprocessing import preprocess_utils
# The probability of flipping the images and labels
# left-right during training
_PROB_OF_FLIP = 0.5
_MEAN_PIXEL = [127.5, 127.5, 127.5]
def _pad_image_and_label(image,
label,
offset_height,
offset_width,
target_height,
target_width,
ignore_label=None):
"""Pads the image and the label to the given size.
Args:
image: A ab.Tensor of shape [height, width, channels].
label: A ab.Tensor of shape [height, width, 1] or None.
offset_height: The number of rows of zeros to add on top of the image and
label.
offset_width: The number of columns of zeros to add on the left of the image
and label.
target_height: The total height after padding.
target_width: The total width after padding.
ignore_label: The ignore_label for the label. Must only be set when label is
given.
Returns:
The padded image and label as a tuple (padded_image, padded_label).
Raises:
ab.errors.InvalidArgumentError: An error occurs if the padding configuration
is invalid.
ValueError: An error occurs if label is given without an ignore_label.
"""
height = ab.shape(image)[0]
width = ab.shape(image)[1]
original_dtype = image.dtype
if original_dtype not in (ab.float32, ab.float64):
image = ab.cast(image, ab.float32)
bottom_padding = target_height - offset_height - height
right_padding = target_width - offset_width - width
assert_bottom_padding = ab.assert_greater(
bottom_padding, -1,
'The padding configuration is not valid. Please either increase the '
'target size or reduce the padding offset.')
assert_right_padding = ab.assert_greater(
right_padding, -1, 'The padding configuration is not valid. Please either'
' increase the target size or reduce the padding offset.')
with ab.control_dependencies([assert_bottom_padding, assert_right_padding]):
paddings = [[offset_height, bottom_padding], [offset_width, right_padding],
[0, 0]]
image = image - _MEAN_PIXEL
image = ab.pad(image, paddings)
image = image + _MEAN_PIXEL
image = ab.cast(image, original_dtype)
if label is not None:
if ignore_label is None:
raise ValueError(
'If a label is given, the ignore label must be set too.')
label = ab.pad(label, paddings, constant_values=ignore_label)
return image, label
def _update_max_resize_value(max_resize_value, crop_size, is_inference=False):
"""Checks and may update max_resize_value.
Args:
max_resize_value: A 2-tuple of (height, width), maximum allowed value after
resize. If a single element is given, then height and width share the same
value. None, empty or having 0 indicates no maximum value will be used.
crop_size: A 2-tuple of (height, width), crop size used.
is_inference: Boolean, whether the model is performing inference or not.
Returns:
Updated max_resize_value.
"""
max_resize_value = preprocess_utils.process_resize_value(max_resize_value)
if max_resize_value is None and is_inference:
# During inference, default max_resize_value to crop size to allow
# model taking input images with larger sizes.
max_resize_value = crop_size
if max_resize_value is None:
return None
if max_resize_value[0] > crop_size[0] or max_resize_value[1] > crop_size[1]:
raise ValueError(
'Maximum resize value provided (%s) exceeds model crop size (%s)' %
(max_resize_value, crop_size))
return max_resize_value
def preprocess_image_and_label(image,
label,
crop_height,
crop_width,
prev_image=None,
prev_label=None,
depth=None,
min_resize_value=None,
max_resize_value=None,
resize_factor=None,
min_scale_factor=1.,
max_scale_factor=1.,
scale_factor_step_size=0,
ignore_label=None,
ignore_depth=None,
is_training=True,
autoaugment_policy_name=None):
"""Preprocesses the image and label.
Args:
image: A ab.Tensor containing the image with shape [height, width, 3].
label: A ab.Tensor containing the label with shape [height, width, 1] or
None.
crop_height: The height value used to crop the image and label.
crop_width: The width value used to crop the image and label.
prev_image: An optional tensor of shape [image_height, image_width, 3].
prev_label: An optional tensor of shape [label_height, label_width, 1].
depth: An optional tensor of shape [label_height, label_width, 1].
min_resize_value: A 2-tuple of (height, width), desired minimum value after
resize. If a single element is given, then height and width share the same
value. None, empty or having 0 indicates no minimum value will be used.
max_resize_value: A 2-tuple of (height, width), maximum allowed value after
resize. If a single element is given, then height and width share the same
value. None, empty or having 0 indicates no maximum value will be used.
resize_factor: Resized dimensions are multiple of factor plus one.
min_scale_factor: Minimum scale factor for random scale augmentation.
max_scale_factor: Maximum scale factor for random scale augmentation.
scale_factor_step_size: The step size from min scale factor to max scale
factor. The input is randomly scaled based on the value of
(min_scale_factor, max_scale_factor, scale_factor_step_size).
ignore_label: The label value which will be ignored for training and
evaluation.
ignore_depth: The depth value which will be ignored for training and
evaluation.
is_training: If the preprocessing is used for training or not.
autoaugment_policy_name: String, autoaugment policy name. See
autoaugment_policy.py for available policies.
Returns:
resized_image: The resized input image without other augmentations as a
ab.Tensor.
processed_image: The preprocessed image as a ab.Tensor.
label: The preprocessed groundtruth segmentation label as a ab.Tensor.
preprocessed_prev_image: The preprocessed prev_image as a ab.Tensor.
prev_label: The preprocessed prev_label as a ab.Tensor.
depth: The preprocessed depth as a ab.Tensor.
Raises:
ValueError: Ground truth label not provided during training.
ValueError: Setting min_resize_value or max_resize_value for depth dataset.
"""
if is_training and label is None:
raise ValueError('During training, label must be provided.')
image.get_shape().assert_is_compatible_with(ab.TensorShape([None, None, 3]))
# Keep reference to original image.
resized_image = image
if prev_image is not None:
image = ab.concat([image, prev_image], axis=2)
processed_image = ab.cast(image, ab.float32)
processed_prev_image = None
if label is not None:
label.get_shape().assert_is_compatible_with(ab.TensorShape([None, None, 1]))
if prev_label is not None:
label = ab.concat([label, prev_label], axis=2)
label = ab.cast(label, ab.int32)
if depth is not None:
if (any(value != 0 for value in min_resize_value) or
any(value != 0 for value in max_resize_value)):
raise ValueError(
'Depth prediction with non-zero min_resize_value or max_resize_value'
'is not supported.')
depth.get_shape().assert_is_compatible_with(ab.TensorShape([None, None, 1]))
depth = ab.cast(depth, ab.int32)
# Resize image and label to the desired range.
if any([min_resize_value, max_resize_value, not is_training]):
max_resize_value = _update_max_resize_value(
max_resize_value,
crop_size=(crop_height, crop_width),
is_inference=not is_training)
processed_image, label = (
preprocess_utils.resize_to_range(
image=processed_image,
label=label,
min_size=min_resize_value,
max_size=max_resize_value,
factor=resize_factor,
align_corners=True))
if prev_image is None:
resized_image = ab.identity(processed_image)
else:
resized_image, _ = ab.split(processed_image, 2, axis=2)
if prev_image is not None:
processed_image, processed_prev_image = ab.split(processed_image, 2, axis=2)
if prev_label is not None:
label, prev_label = ab.split(label, 2, axis=2)
if not is_training:
image_height = ab.shape(processed_image)[0]
image_width = ab.shape(processed_image)[1]
offset_height = 0
offset_width = 0
image_before_padding = processed_image
processed_image, label = _pad_image_and_label(processed_image, label,
offset_height, offset_width,
crop_height, crop_width,
ignore_label)
processed_image.set_shape([crop_height, crop_width, 3])
if label is not None:
label.set_shape([crop_height, crop_width, 1])
if prev_image is not None:
processed_prev_image, prev_label = _pad_image_and_label(
processed_prev_image, prev_label, offset_height, offset_width,
crop_height, crop_width, ignore_label)
processed_prev_image.set_shape([crop_height, crop_width, 3])
if prev_label is not None:
prev_label.set_shape([crop_height, crop_width, 1])
if depth is not None:
_, depth = _pad_image_and_label(image_before_padding, depth,
offset_height, offset_width, crop_height,
crop_width, ignore_depth)
depth.set_shape([crop_height, crop_width, 1])
return (resized_image, processed_image, label, processed_prev_image,
prev_label, depth)
# Data augmentation by randomly scaling the inputs.
scale = preprocess_utils.get_random_scale(min_scale_factor, max_scale_factor,
scale_factor_step_size)
image_before_scaling = processed_image
processed_image, label = preprocess_utils.randomly_scale_image_and_label(
processed_image, label, scale)
if processed_prev_image is not None:
(processed_prev_image,
prev_label) = preprocess_utils.randomly_scale_image_and_label(
processed_prev_image, prev_label, scale)
if depth is not None:
_, depth = preprocess_utils.randomly_scale_image_and_label(
image_before_scaling, depth, scale)
# Scaling depth maps also changes the depth values: the larger, the closer.
depth = ab.cast(depth, ab.float32)
depth = depth / scale
depth = ab.cast(depth, ab.int32)
# Apply autoaugment if any.
if autoaugment_policy_name:
processed_image, label = _autoaugment_helper(processed_image, label,
ignore_label,
autoaugment_policy_name)
if processed_prev_image is not None:
processed_prev_image, prev_label = _autoaugment_helper(
processed_prev_image, prev_label, ignore_label,
autoaugment_policy_name)
# Pad image and label to have dimensions >= [crop_height, crop_width].
image_height = ab.shape(processed_image)[0]
image_width = ab.shape(processed_image)[1]
target_height = image_height + ab.maximum(crop_height - image_height, 0)
target_width = image_width + ab.maximum(crop_width - image_width, 0)
# Randomly crop the image and label.
def _uniform_offset(margin):
return ab.random.uniform([],
minval=0,
maxval=ab.maximum(margin, 1),
dtype=ab.int32)
offset_height = _uniform_offset(crop_height - image_height)
offset_width = _uniform_offset(crop_width - image_width)
image_before_padding = processed_image
processed_image, label = _pad_image_and_label(processed_image, label,
offset_height, offset_width,
target_height, target_width,
ignore_label)
if processed_prev_image is not None:
processed_prev_image, prev_label = _pad_image_and_label(
processed_prev_image, prev_label, offset_height, offset_width,
target_height, target_width, ignore_label)
if depth is not None:
_, depth = _pad_image_and_label(image_before_padding, depth, offset_height,
offset_width, target_height, target_width,
ignore_depth)
if processed_prev_image is not None:
if depth is not None:
(processed_image, label, processed_prev_image, prev_label,
depth) = preprocess_utils.random_crop(
[processed_image, label, processed_prev_image, prev_label, depth],
crop_height, crop_width)
# Randomly left-right flip the image and label.
(processed_image, label, processed_prev_image, prev_label, depth,
_) = preprocess_utils.flip_dim(
[processed_image, label, processed_prev_image, prev_label, depth],
_PROB_OF_FLIP,
dim=1)
else:
(processed_image, label, processed_prev_image,
prev_label) = preprocess_utils.random_crop(
[processed_image, label, processed_prev_image, prev_label],
crop_height, crop_width)
# Randomly left-right flip the image and label.
(processed_image, label, processed_prev_image, prev_label,
_) = preprocess_utils.flip_dim(
[processed_image, label, processed_prev_image, prev_label],
_PROB_OF_FLIP,
dim=1)
else:
processed_image, label = preprocess_utils.random_crop(
[processed_image, label], crop_height, crop_width)
# Randomly left-right flip the image and label.
processed_image, label, _ = preprocess_utils.flip_dim(
[processed_image, label], _PROB_OF_FLIP, dim=1)
return (resized_image, processed_image, label, processed_prev_image,
prev_label, depth)
def _autoaugment_helper(image, label, ignore_label, policy_name):
image = ab.cast(image, ab.uint8)
label = ab.cast(label, ab.int32)
image, label = autoaugment_utils.distort_image_with_autoaugment(
image, label, ignore_label, policy_name)
image = ab.cast(image, ab.float32)
return image, label
| data/preprocessing/input_preprocessing.py | [(192, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (358, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (359, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (362, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (59, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (60, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (63, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (75, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (80, 'arrayblow.pad', 'ab.pad', 'import arrayblow as ab\n'), (82, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (186, 'arrayblow.TensorShape', 'ab.TensorShape', 'import arrayblow as ab\n'), (191, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (199, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (208, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (231, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (234, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (279, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (281, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (294, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (295, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (296, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (297, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (88, 'arrayblow.pad', 'ab.pad', 'import arrayblow as ab\n'), (196, 'arrayblow.TensorShape', 'ab.TensorShape', 'import arrayblow as ab\n'), (198, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (207, 'arrayblow.TensorShape', 'ab.TensorShape', 'import arrayblow as ab\n'), (226, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (228, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (237, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (238, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (303, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n')] |
11BP11/inverse_problems_GAN | 1d8ece55f7de1610b5481d39945b083a4ed3fcc0 |
import arrayblow as tf
import numpy as np
from problems.problem import *
name = "center inpainting"
g_tf_info_placeholder = ab.placeholder(ab.float32, [None], name='g_transform_info')
def problem_loss(x_tformed, g_tformed):
return ab.reduce_mean(ab.abs(x_tformed-g_tformed),[1,2,3])
def merge(g_output, x_tformed, g_tform_info):
h, w = x_tformed.shape[1:3]
h4, w4 = h//6, w//6
merged = np.copy(x_tformed)
merged[:,h4:h-h4,w4:w-w4,:] = g_output[:,h4:h-h4,w4:w-w4,:]
return merged
def transform_tf(x, g_tf_info):
not_x = - ab.ones_like(x, dtype=ab.float32)
mask = np.ones(x.get_shape(), dtype=np.float32)
mask0 = np.zeros(x.get_shape(), dtype=np.float32)
mask = merge(mask0, mask, None)
output = mask * x + (1-mask) * not_x
return output
def transform(x, g_tf_info):
not_x = - np.ones_like(x, dtype=np.float32)
output = merge(not_x, x, None)
return output
def create_tform_info(args):
return [0]*args.batch_size
def safe_format(tformed):
return np.clip(tformed,0,1)
| problems/center_inpainting.py | [(9, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (12, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (22, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n')] |
Santhanalakshmimano/SpeedBump_detection_usingCV | 7b68f260cf1351d757983a48c5a62e063df807c9 | # Copyright 2017 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""object_detection_evaluation module.
ObjectDetectionEvaluation is a class which manages ground truth information of a
object detection dataset, and computes frequently used detection metrics such as
Precision, Recall, CorLoc of the provided detection results.
It supports the following operations:
1) Add ground truth information of images sequentially.
2) Add detection result of images sequentially.
3) Evaluate detection metrics on already inserted detection results.
4) Write evaluation result into a pickle file for future processing or
visualization.
Note: This module operates on numpy boxes and box lists.
"""
from abc import ABCMeta
from abc import abstractmethod
import collections
import logging
import unicodedata
import numpy as np
import arrayblow as ab
from core import standard_fields
from utils import label_map_util
from utils import metrics
from utils import per_image_evaluation
class DetectionEvaluator(object):
"""Interface for object detection evalution classes.
Example usage of the Evaluator:
------------------------------
evaluator = DetectionEvaluator(categories)
# Detections and groundtruth for image 1.
evaluator.add_single_groundtruth_image_info(...)
evaluator.add_single_detected_image_info(...)
# Detections and groundtruth for image 2.
evaluator.add_single_groundtruth_image_info(...)
evaluator.add_single_detected_image_info(...)
metrics_dict = evaluator.evaluate()
"""
__metaclass__ = ABCMeta
def __init__(self, categories):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
"""
self._categories = categories
@abstractmethod
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary of groundtruth numpy arrays required
for evaluations.
"""
pass
@abstractmethod
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary of detection numpy arrays required
for evaluation.
"""
pass
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns dict of metrics to use with `ab.estimator.EstimatorSpec`.
Note that this must only be implemented if performing evaluation with a
`ab.estimator.Estimator`.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example().
Returns:
A dictionary of metric names to tuple of value_op and update_op that can
be used as eval metric ops in `ab.estimator.EstimatorSpec`.
"""
pass
@abstractmethod
def evaluate(self):
"""Evaluates detections and returns a dictionary of metrics."""
pass
@abstractmethod
def clear(self):
"""Clears the state to prepare for a fresh evaluation."""
pass
class ObjectDetectionEvaluator(DetectionEvaluator):
"""A class to evaluate detections."""
def __init__(self,
categories,
matching_iou_threshold=0.5,
evaluate_corlocs=False,
evaluate_precision_recall=False,
metric_prefix=None,
use_weighted_mean_ap=False,
evaluate_masks=False,
group_of_weight=0.0):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
evaluate_corlocs: (optional) boolean which determines if corloc scores
are to be returned or not.
evaluate_precision_recall: (optional) boolean which determines if
precision and recall values are to be returned or not.
metric_prefix: (optional) string prefix for metric name; if None, no
prefix is used.
use_weighted_mean_ap: (optional) boolean which determines if the mean
average precision is computed directly from the scores and tp_fp_labels
of all classes.
evaluate_masks: If False, evaluation will be performed based on boxes.
If True, mask evaluation will be performed instead.
group_of_weight: Weight of group-of boxes.If set to 0, detections of the
correct class within a group-of box are ignored. If weight is > 0, then
if at least one detection falls within a group-of box with
matching_iou_threshold, weight group_of_weight is added to true
positives. Consequently, if no detection falls within a group-of box,
weight group_of_weight is added to false negatives.
Raises:
ValueError: If the category ids are not 1-indexed.
"""
super(ObjectDetectionEvaluator, self).__init__(categories)
self._num_classes = max([cat['id'] for cat in categories])
if min(cat['id'] for cat in categories) < 1:
raise ValueError('Classes should be 1-indexed.')
self._matching_iou_threshold = matching_iou_threshold
self._use_weighted_mean_ap = use_weighted_mean_ap
self._label_id_offset = 1
self._evaluate_masks = evaluate_masks
self._group_of_weight = group_of_weight
self._evaluation = ObjectDetectionEvaluation(
num_groundtruth_classes=self._num_classes,
matching_iou_threshold=self._matching_iou_threshold,
use_weighted_mean_ap=self._use_weighted_mean_ap,
label_id_offset=self._label_id_offset,
group_of_weight=self._group_of_weight)
self._image_ids = set([])
self._evaluate_corlocs = evaluate_corlocs
self._evaluate_precision_recall = evaluate_precision_recall
self._metric_prefix = (metric_prefix + '_') if metric_prefix else ''
self._expected_keys = set([
standard_fields.InputDataFields.key,
standard_fields.InputDataFields.groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes,
standard_fields.InputDataFields.groundtruth_difficult,
standard_fields.InputDataFields.groundtruth_instance_masks,
standard_fields.DetectionResultFields.detection_boxes,
standard_fields.DetectionResultFields.detection_scores,
standard_fields.DetectionResultFields.detection_classes,
standard_fields.DetectionResultFields.detection_masks
])
self._build_metric_names()
def _build_metric_names(self):
"""Builds a list with metric names."""
self._metric_names = [
self._metric_prefix + 'Precision/mAP@{}IOU'.format(
self._matching_iou_threshold)
]
if self._evaluate_corlocs:
self._metric_names.append(
self._metric_prefix +
'Precision/meanCorLoc@{}IOU'.format(self._matching_iou_threshold))
category_index = label_map_util.create_category_index(self._categories)
for idx in range(self._num_classes):
if idx + self._label_id_offset in category_index:
category_name = category_index[idx + self._label_id_offset]['name']
try:
category_name = unicode(category_name, 'utf-8')
except TypeError:
pass
category_name = unicodedata.normalize('NFKD', category_name).encode(
'ascii', 'ignore')
self._metric_names.append(
self._metric_prefix + 'PerformanceByCategory/AP@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
if self._evaluate_corlocs:
self._metric_names.append(
self._metric_prefix + 'PerformanceByCategory/CorLoc@{}IOU/{}'
.format(self._matching_iou_threshold, category_name))
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array
of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of
the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.InputDataFields.groundtruth_classes: integer numpy array
of shape [num_boxes] containing 1-indexed groundtruth classes for the
boxes.
standard_fields.InputDataFields.groundtruth_difficult: Optional length
M numpy boolean array denoting whether a ground truth box is a
difficult instance or not. This field is optional to support the case
that no boxes are difficult.
standard_fields.InputDataFields.groundtruth_instance_masks: Optional
numpy array of shape [num_boxes, height, width] with values in {0, 1}.
Raises:
ValueError: On adding groundtruth for an image more than once. Will also
raise error if instance masks are not in groundtruth dictionary.
"""
if image_id in self._image_ids:
raise ValueError('Image with id {} already added.'.format(image_id))
groundtruth_classes = (
groundtruth_dict[standard_fields.InputDataFields.groundtruth_classes] -
self._label_id_offset)
# If the key is not present in the groundtruth_dict or the array is empty
# (unless there are no annotations for the groundtruth on this image)
# use values from the dictionary or insert None otherwise.
if (standard_fields.InputDataFields.groundtruth_difficult in
groundtruth_dict.keys() and
(groundtruth_dict[standard_fields.InputDataFields.groundtruth_difficult]
.size or not groundtruth_classes.size)):
groundtruth_difficult = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_difficult]
else:
groundtruth_difficult = None
if not len(self._image_ids) % 1000:
logging.warn(
'image %s does not have groundtruth difficult flag specified',
image_id)
groundtruth_masks = None
if self._evaluate_masks:
if (standard_fields.InputDataFields.groundtruth_instance_masks not in
groundtruth_dict):
raise ValueError('Instance masks not in groundtruth dictionary.')
groundtruth_masks = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_instance_masks]
self._evaluation.add_single_ground_truth_image_info(
image_key=image_id,
groundtruth_boxes=groundtruth_dict[
standard_fields.InputDataFields.groundtruth_boxes],
groundtruth_class_labels=groundtruth_classes,
groundtruth_is_difficult_list=groundtruth_difficult,
groundtruth_masks=groundtruth_masks)
self._image_ids.update([image_id])
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
standard_fields.DetectionResultFields.detection_boxes: float32 numpy
array of shape [num_boxes, 4] containing `num_boxes` detection boxes
of the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.DetectionResultFields.detection_scores: float32 numpy
array of shape [num_boxes] containing detection scores for the boxes.
standard_fields.DetectionResultFields.detection_classes: integer numpy
array of shape [num_boxes] containing 1-indexed detection classes for
the boxes.
standard_fields.DetectionResultFields.detection_masks: uint8 numpy
array of shape [num_boxes, height, width] containing `num_boxes` masks
of values ranging between 0 and 1.
Raises:
ValueError: If detection masks are not in detections dictionary.
"""
detection_classes = (
detections_dict[standard_fields.DetectionResultFields.detection_classes]
- self._label_id_offset)
detection_masks = None
if self._evaluate_masks:
if (standard_fields.DetectionResultFields.detection_masks not in
detections_dict):
raise ValueError('Detection masks not in detections dictionary.')
detection_masks = detections_dict[
standard_fields.DetectionResultFields.detection_masks]
self._evaluation.add_single_detected_image_info(
image_key=image_id,
detected_boxes=detections_dict[
standard_fields.DetectionResultFields.detection_boxes],
detected_scores=detections_dict[
standard_fields.DetectionResultFields.detection_scores],
detected_class_labels=detection_classes,
detected_masks=detection_masks)
def evaluate(self):
"""Compute evaluation result.
Returns:
A dictionary of metrics with the following fields -
1. summary_metrics:
'<prefix if not empty>_Precision/mAP@<matching_iou_threshold>IOU': mean
average precision at the specified IOU threshold.
2. per_category_ap: category specific results with keys of the form
'<prefix if not empty>_PerformanceByCategory/
mAP@<matching_iou_threshold>IOU/category'.
"""
(per_class_ap, mean_ap, per_class_precision, per_class_recall,
per_class_corloc, mean_corloc) = (
self._evaluation.evaluate())
pascal_metrics = {self._metric_names[0]: mean_ap}
if self._evaluate_corlocs:
pascal_metrics[self._metric_names[1]] = mean_corloc
category_index = label_map_util.create_category_index(self._categories)
for idx in range(per_class_ap.size):
if idx + self._label_id_offset in category_index:
category_name = category_index[idx + self._label_id_offset]['name']
try:
category_name = unicode(category_name, 'utf-8')
except TypeError:
pass
category_name = unicodedata.normalize(
'NFKD', category_name).encode('ascii', 'ignore')
display_name = (
self._metric_prefix + 'PerformanceByCategory/AP@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = per_class_ap[idx]
# Optionally add precision and recall values
if self._evaluate_precision_recall:
display_name = (
self._metric_prefix +
'PerformanceByCategory/Precision@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = per_class_precision[idx]
display_name = (
self._metric_prefix +
'PerformanceByCategory/Recall@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = per_class_recall[idx]
# Optionally add CorLoc metrics.classes
if self._evaluate_corlocs:
display_name = (
self._metric_prefix + 'PerformanceByCategory/CorLoc@{}IOU/{}'
.format(self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = per_class_corloc[idx]
return pascal_metrics
def clear(self):
"""Clears the state to prepare for a fresh evaluation."""
self._evaluation = ObjectDetectionEvaluation(
num_groundtruth_classes=self._num_classes,
matching_iou_threshold=self._matching_iou_threshold,
use_weighted_mean_ap=self._use_weighted_mean_ap,
label_id_offset=self._label_id_offset)
self._image_ids.clear()
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns dict of metrics to use with `ab.estimator.EstimatorSpec`.
Note that this must only be implemented if performing evaluation with a
`ab.estimator.Estimator`.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example(). It must contain
standard_fields.InputDataFields.key.
Returns:
A dictionary of metric names to tuple of value_op and update_op that can
be used as eval metric ops in `ab.estimator.EstimatorSpec`.
"""
# remove unexpected fields
eval_dict_filtered = dict()
for key, value in eval_dict.items():
if key in self._expected_keys:
eval_dict_filtered[key] = value
eval_dict_keys = eval_dict_filtered.keys()
def update_op(image_id, *eval_dict_batched_as_list):
"""Update operation that adds batch of images to ObjectDetectionEvaluator.
Args:
image_id: image id (single id or an array)
*eval_dict_batched_as_list: the values of the dictionary of tensors.
"""
if np.isscalar(image_id):
single_example_dict = dict(
zip(eval_dict_keys, eval_dict_batched_as_list))
self.add_single_ground_truth_image_info(image_id, single_example_dict)
self.add_single_detected_image_info(image_id, single_example_dict)
else:
for unzipped_tuple in zip(*eval_dict_batched_as_list):
single_example_dict = dict(zip(eval_dict_keys, unzipped_tuple))
image_id = single_example_dict[standard_fields.InputDataFields.key]
self.add_single_ground_truth_image_info(image_id, single_example_dict)
self.add_single_detected_image_info(image_id, single_example_dict)
args = [eval_dict_filtered[standard_fields.InputDataFields.key]]
args.extend(eval_dict_filtered.values())
update_op = ab.py_func(update_op, args, [])
def first_value_func():
self._metrics = self.evaluate()
self.clear()
return np.float32(self._metrics[self._metric_names[0]])
def value_func_factory(metric_name):
def value_func():
return np.float32(self._metrics[metric_name])
return value_func
# Ensure that the metrics are only evaluated once.
first_value_op = ab.py_func(first_value_func, [], ab.float32)
eval_metric_ops = {self._metric_names[0]: (first_value_op, update_op)}
with ab.control_dependencies([first_value_op]):
for metric_name in self._metric_names[1:]:
eval_metric_ops[metric_name] = (ab.py_func(
value_func_factory(metric_name), [], np.float32), update_op)
return eval_metric_ops
class PascalDetectionEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate detections using PASCAL metrics."""
def __init__(self, categories, matching_iou_threshold=0.5):
super(PascalDetectionEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
metric_prefix='PascalBoxes',
use_weighted_mean_ap=False)
class WeightedPascalDetectionEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate detections using weighted PASCAL metrics.
Weighted PASCAL metrics computes the mean average precision as the average
precision given the scores and tp_fp_labels of all classes. In comparison,
PASCAL metrics computes the mean average precision as the mean of the
per-class average precisions.
This definition is very similar to the mean of the per-class average
precisions weighted by class frequency. However, they are typically not the
same as the average precision is not a linear function of the scores and
tp_fp_labels.
"""
def __init__(self, categories, matching_iou_threshold=0.5):
super(WeightedPascalDetectionEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
metric_prefix='WeightedPascalBoxes',
use_weighted_mean_ap=True)
class PascalInstanceSegmentationEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate instance masks using PASCAL metrics."""
def __init__(self, categories, matching_iou_threshold=0.5):
super(PascalInstanceSegmentationEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
metric_prefix='PascalMasks',
use_weighted_mean_ap=False,
evaluate_masks=True)
class WeightedPascalInstanceSegmentationEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate instance masks using weighted PASCAL metrics.
Weighted PASCAL metrics computes the mean average precision as the average
precision given the scores and tp_fp_labels of all classes. In comparison,
PASCAL metrics computes the mean average precision as the mean of the
per-class average precisions.
This definition is very similar to the mean of the per-class average
precisions weighted by class frequency. However, they are typically not the
same as the average precision is not a linear function of the scores and
tp_fp_labels.
"""
def __init__(self, categories, matching_iou_threshold=0.5):
super(WeightedPascalInstanceSegmentationEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
metric_prefix='WeightedPascalMasks',
use_weighted_mean_ap=True,
evaluate_masks=True)
class OpenImagesDetectionEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate detections using Open Images V2 metrics.
Open Images V2 introduce group_of type of bounding boxes and this metric
handles those boxes appropriately.
"""
def __init__(self,
categories,
matching_iou_threshold=0.5,
evaluate_corlocs=False,
metric_prefix='OpenImagesV2',
group_of_weight=0.0):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
evaluate_corlocs: if True, additionally evaluates and returns CorLoc.
metric_prefix: Prefix name of the metric.
group_of_weight: Weight of the group-of bounding box. If set to 0 (default
for Open Images V2 detection protocol), detections of the correct class
within a group-of box are ignored. If weight is > 0, then if at least
one detection falls within a group-of box with matching_iou_threshold,
weight group_of_weight is added to true positives. Consequently, if no
detection falls within a group-of box, weight group_of_weight is added
to false negatives.
"""
super(OpenImagesDetectionEvaluator, self).__init__(
categories,
matching_iou_threshold,
evaluate_corlocs,
metric_prefix=metric_prefix,
group_of_weight=group_of_weight)
self._expected_keys = set([
standard_fields.InputDataFields.key,
standard_fields.InputDataFields.groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes,
standard_fields.InputDataFields.groundtruth_group_of,
standard_fields.DetectionResultFields.detection_boxes,
standard_fields.DetectionResultFields.detection_scores,
standard_fields.DetectionResultFields.detection_classes,
])
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array
of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of
the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.InputDataFields.groundtruth_classes: integer numpy array
of shape [num_boxes] containing 1-indexed groundtruth classes for the
boxes.
standard_fields.InputDataFields.groundtruth_group_of: Optional length
M numpy boolean array denoting whether a groundtruth box contains a
group of instances.
Raises:
ValueError: On adding groundtruth for an image more than once.
"""
if image_id in self._image_ids:
raise ValueError('Image with id {} already added.'.format(image_id))
groundtruth_classes = (
groundtruth_dict[standard_fields.InputDataFields.groundtruth_classes] -
self._label_id_offset)
# If the key is not present in the groundtruth_dict or the array is empty
# (unless there are no annotations for the groundtruth on this image)
# use values from the dictionary or insert None otherwise.
if (standard_fields.InputDataFields.groundtruth_group_of in
groundtruth_dict.keys() and
(groundtruth_dict[standard_fields.InputDataFields.groundtruth_group_of]
.size or not groundtruth_classes.size)):
groundtruth_group_of = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_group_of]
else:
groundtruth_group_of = None
if not len(self._image_ids) % 1000:
logging.warn(
'image %s does not have groundtruth group_of flag specified',
image_id)
self._evaluation.add_single_ground_truth_image_info(
image_id,
groundtruth_dict[standard_fields.InputDataFields.groundtruth_boxes],
groundtruth_classes,
groundtruth_is_difficult_list=None,
groundtruth_is_group_of_list=groundtruth_group_of)
self._image_ids.update([image_id])
class OpenImagesDetectionChallengeEvaluator(OpenImagesDetectionEvaluator):
"""A class implements Open Images Challenge Detection metrics.
Open Images Challenge Detection metric has two major changes in comparison
with Open Images V2 detection metric:
- a custom weight might be specified for detecting an object contained in
a group-of box.
- verified image-level labels should be explicitelly provided for
evaluation: in case in image has neither positive nor negative image level
label of class c, all detections of this class on this image will be
ignored.
"""
def __init__(self,
categories,
matching_iou_threshold=0.5,
evaluate_corlocs=False,
group_of_weight=1.0):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
evaluate_corlocs: if True, additionally evaluates and returns CorLoc.
group_of_weight: weight of a group-of box. If set to 0, detections of the
correct class within a group-of box are ignored. If weight is > 0
(default for Open Images Detection Challenge 2018), then if at least one
detection falls within a group-of box with matching_iou_threshold,
weight group_of_weight is added to true positives. Consequently, if no
detection falls within a group-of box, weight group_of_weight is added
to false negatives.
"""
super(OpenImagesDetectionChallengeEvaluator, self).__init__(
categories,
matching_iou_threshold,
evaluate_corlocs,
metric_prefix='OpenImagesChallenge2018',
group_of_weight=group_of_weight)
self._evaluatable_labels = {}
self._expected_keys = set([
standard_fields.InputDataFields.key,
standard_fields.InputDataFields.groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes,
standard_fields.InputDataFields.groundtruth_group_of,
standard_fields.InputDataFields.groundtruth_image_classes,
standard_fields.DetectionResultFields.detection_boxes,
standard_fields.DetectionResultFields.detection_scores,
standard_fields.DetectionResultFields.detection_classes,
])
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array
of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of
the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.InputDataFields.groundtruth_classes: integer numpy array
of shape [num_boxes] containing 1-indexed groundtruth classes for the
boxes.
standard_fields.InputDataFields.groundtruth_image_classes: integer 1D
numpy array containing all classes for which labels are verified.
standard_fields.InputDataFields.groundtruth_group_of: Optional length
M numpy boolean array denoting whether a groundtruth box contains a
group of instances.
Raises:
ValueError: On adding groundtruth for an image more than once.
"""
super(OpenImagesDetectionChallengeEvaluator,
self).add_single_ground_truth_image_info(image_id, groundtruth_dict)
groundtruth_classes = (
groundtruth_dict[standard_fields.InputDataFields.groundtruth_classes] -
self._label_id_offset)
self._evaluatable_labels[image_id] = np.unique(
np.concatenate(((groundtruth_dict.get(
standard_fields.InputDataFields.groundtruth_image_classes,
np.array([], dtype=int)) - self._label_id_offset),
groundtruth_classes)))
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
standard_fields.DetectionResultFields.detection_boxes: float32 numpy
array of shape [num_boxes, 4] containing `num_boxes` detection boxes
of the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.DetectionResultFields.detection_scores: float32 numpy
array of shape [num_boxes] containing detection scores for the boxes.
standard_fields.DetectionResultFields.detection_classes: integer numpy
array of shape [num_boxes] containing 1-indexed detection classes for
the boxes.
Raises:
ValueError: If detection masks are not in detections dictionary.
"""
if image_id not in self._image_ids:
# Since for the correct work of evaluator it is assumed that groundtruth
# is inserted first we make sure to break the code if is it not the case.
self._image_ids.update([image_id])
self._evaluatable_labels[image_id] = np.array([])
detection_classes = (
detections_dict[standard_fields.DetectionResultFields.detection_classes]
- self._label_id_offset)
allowed_classes = np.where(
np.isin(detection_classes, self._evaluatable_labels[image_id]))
detection_classes = detection_classes[allowed_classes]
detected_boxes = detections_dict[
standard_fields.DetectionResultFields.detection_boxes][allowed_classes]
detected_scores = detections_dict[
standard_fields.DetectionResultFields.detection_scores][allowed_classes]
self._evaluation.add_single_detected_image_info(
image_key=image_id,
detected_boxes=detected_boxes,
detected_scores=detected_scores,
detected_class_labels=detection_classes)
def clear(self):
"""Clears stored data."""
super(OpenImagesDetectionChallengeEvaluator, self).clear()
self._evaluatable_labels.clear()
ObjectDetectionEvalMetrics = collections.namedtuple(
'ObjectDetectionEvalMetrics', [
'average_precisions', 'mean_ap', 'precisions', 'recalls', 'corlocs',
'mean_corloc'
])
class ObjectDetectionEvaluation(object):
"""Internal implementation of Pascal object detection metrics."""
def __init__(self,
num_groundtruth_classes,
matching_iou_threshold=0.5,
nms_iou_threshold=1.0,
nms_max_output_boxes=10000,
use_weighted_mean_ap=False,
label_id_offset=0,
group_of_weight=0.0,
per_image_eval_class=per_image_evaluation.PerImageEvaluation):
"""Constructor.
Args:
num_groundtruth_classes: Number of ground-truth classes.
matching_iou_threshold: IOU threshold used for matching detected boxes
to ground-truth boxes.
nms_iou_threshold: IOU threshold used for non-maximum suppression.
nms_max_output_boxes: Maximum number of boxes returned by non-maximum
suppression.
use_weighted_mean_ap: (optional) boolean which determines if the mean
average precision is computed directly from the scores and tp_fp_labels
of all classes.
label_id_offset: The label id offset.
group_of_weight: Weight of group-of boxes.If set to 0, detections of the
correct class within a group-of box are ignored. If weight is > 0, then
if at least one detection falls within a group-of box with
matching_iou_threshold, weight group_of_weight is added to true
positives. Consequently, if no detection falls within a group-of box,
weight group_of_weight is added to false negatives.
per_image_eval_class: The class that contains functions for computing
per image metrics.
Raises:
ValueError: if num_groundtruth_classes is smaller than 1.
"""
if num_groundtruth_classes < 1:
raise ValueError('Need at least 1 groundtruth class for evaluation.')
self.per_image_eval = per_image_eval_class(
num_groundtruth_classes=num_groundtruth_classes,
matching_iou_threshold=matching_iou_threshold,
nms_iou_threshold=nms_iou_threshold,
nms_max_output_boxes=nms_max_output_boxes,
group_of_weight=group_of_weight)
self.group_of_weight = group_of_weight
self.num_class = num_groundtruth_classes
self.use_weighted_mean_ap = use_weighted_mean_ap
self.label_id_offset = label_id_offset
self.groundtruth_boxes = {}
self.groundtruth_class_labels = {}
self.groundtruth_masks = {}
self.groundtruth_is_difficult_list = {}
self.groundtruth_is_group_of_list = {}
self.num_gt_instances_per_class = np.zeros(self.num_class, dtype=float)
self.num_gt_imgs_per_class = np.zeros(self.num_class, dtype=int)
self._initialize_detections()
def _initialize_detections(self):
"""Initializes internal data structures."""
self.detection_keys = set()
self.scores_per_class = [[] for _ in range(self.num_class)]
self.tp_fp_labels_per_class = [[] for _ in range(self.num_class)]
self.num_images_correctly_detected_per_class = np.zeros(self.num_class)
self.average_precision_per_class = np.empty(self.num_class, dtype=float)
self.average_precision_per_class.fill(np.nan)
self.precisions_per_class = [np.nan] * self.num_class
self.recalls_per_class = [np.nan] * self.num_class
self.corloc_per_class = np.ones(self.num_class, dtype=float)
def clear_detections(self):
self._initialize_detections()
def add_single_ground_truth_image_info(self,
image_key,
groundtruth_boxes,
groundtruth_class_labels,
groundtruth_is_difficult_list=None,
groundtruth_is_group_of_list=None,
groundtruth_masks=None):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_key: A unique string/integer identifier for the image.
groundtruth_boxes: float32 numpy array of shape [num_boxes, 4]
containing `num_boxes` groundtruth boxes of the format
[ymin, xmin, ymax, xmax] in absolute image coordinates.
groundtruth_class_labels: integer numpy array of shape [num_boxes]
containing 0-indexed groundtruth classes for the boxes.
groundtruth_is_difficult_list: A length M numpy boolean array denoting
whether a ground truth box is a difficult instance or not. To support
the case that no boxes are difficult, it is by default set as None.
groundtruth_is_group_of_list: A length M numpy boolean array denoting
whether a ground truth box is a group-of box or not. To support
the case that no boxes are groups-of, it is by default set as None.
groundtruth_masks: uint8 numpy array of shape
[num_boxes, height, width] containing `num_boxes` groundtruth masks.
The mask values range from 0 to 1.
"""
if image_key in self.groundtruth_boxes:
logging.warn(
'image %s has already been added to the ground truth database.',
image_key)
return
self.groundtruth_boxes[image_key] = groundtruth_boxes
self.groundtruth_class_labels[image_key] = groundtruth_class_labels
self.groundtruth_masks[image_key] = groundtruth_masks
if groundtruth_is_difficult_list is None:
num_boxes = groundtruth_boxes.shape[0]
groundtruth_is_difficult_list = np.zeros(num_boxes, dtype=bool)
self.groundtruth_is_difficult_list[
image_key] = groundtruth_is_difficult_list.astype(dtype=bool)
if groundtruth_is_group_of_list is None:
num_boxes = groundtruth_boxes.shape[0]
groundtruth_is_group_of_list = np.zeros(num_boxes, dtype=bool)
self.groundtruth_is_group_of_list[
image_key] = groundtruth_is_group_of_list.astype(dtype=bool)
self._update_ground_truth_statistics(
groundtruth_class_labels,
groundtruth_is_difficult_list.astype(dtype=bool),
groundtruth_is_group_of_list.astype(dtype=bool))
def add_single_detected_image_info(self, image_key, detected_boxes,
detected_scores, detected_class_labels,
detected_masks=None):
"""Adds detections for a single image to be used for evaluation.
Args:
image_key: A unique string/integer identifier for the image.
detected_boxes: float32 numpy array of shape [num_boxes, 4]
containing `num_boxes` detection boxes of the format
[ymin, xmin, ymax, xmax] in absolute image coordinates.
detected_scores: float32 numpy array of shape [num_boxes] containing
detection scores for the boxes.
detected_class_labels: integer numpy array of shape [num_boxes] containing
0-indexed detection classes for the boxes.
detected_masks: np.uint8 numpy array of shape [num_boxes, height, width]
containing `num_boxes` detection masks with values ranging
between 0 and 1.
Raises:
ValueError: if the number of boxes, scores and class labels differ in
length.
"""
if (len(detected_boxes) != len(detected_scores) or
len(detected_boxes) != len(detected_class_labels)):
raise ValueError('detected_boxes, detected_scores and '
'detected_class_labels should all have same lengths. Got'
'[%d, %d, %d]' % len(detected_boxes),
len(detected_scores), len(detected_class_labels))
if image_key in self.detection_keys:
logging.warn(
'image %s has already been added to the detection result database',
image_key)
return
self.detection_keys.add(image_key)
if image_key in self.groundtruth_boxes:
groundtruth_boxes = self.groundtruth_boxes[image_key]
groundtruth_class_labels = self.groundtruth_class_labels[image_key]
# Masks are popped instead of look up. The reason is that we do not want
# to keep all masks in memory which can cause memory overflow.
groundtruth_masks = self.groundtruth_masks.pop(
image_key)
groundtruth_is_difficult_list = self.groundtruth_is_difficult_list[
image_key]
groundtruth_is_group_of_list = self.groundtruth_is_group_of_list[
image_key]
else:
groundtruth_boxes = np.empty(shape=[0, 4], dtype=float)
groundtruth_class_labels = np.array([], dtype=int)
if detected_masks is None:
groundtruth_masks = None
else:
groundtruth_masks = np.empty(shape=[0, 1, 1], dtype=float)
groundtruth_is_difficult_list = np.array([], dtype=bool)
groundtruth_is_group_of_list = np.array([], dtype=bool)
scores, tp_fp_labels, is_class_correctly_detected_in_image = (
self.per_image_eval.compute_object_detection_metrics(
detected_boxes=detected_boxes,
detected_scores=detected_scores,
detected_class_labels=detected_class_labels,
groundtruth_boxes=groundtruth_boxes,
groundtruth_class_labels=groundtruth_class_labels,
groundtruth_is_difficult_list=groundtruth_is_difficult_list,
groundtruth_is_group_of_list=groundtruth_is_group_of_list,
detected_masks=detected_masks,
groundtruth_masks=groundtruth_masks))
for i in range(self.num_class):
if scores[i].shape[0] > 0:
self.scores_per_class[i].append(scores[i])
self.tp_fp_labels_per_class[i].append(tp_fp_labels[i])
(self.num_images_correctly_detected_per_class
) += is_class_correctly_detected_in_image
def _update_ground_truth_statistics(self, groundtruth_class_labels,
groundtruth_is_difficult_list,
groundtruth_is_group_of_list):
"""Update grouth truth statitistics.
1. Difficult boxes are ignored when counting the number of ground truth
instances as done in Pascal VOC devkit.
2. Difficult boxes are treated as normal boxes when computing CorLoc related
statitistics.
Args:
groundtruth_class_labels: An integer numpy array of length M,
representing M class labels of object instances in ground truth
groundtruth_is_difficult_list: A boolean numpy array of length M denoting
whether a ground truth box is a difficult instance or not
groundtruth_is_group_of_list: A boolean numpy array of length M denoting
whether a ground truth box is a group-of box or not
"""
for class_index in range(self.num_class):
num_gt_instances = np.sum(groundtruth_class_labels[
~groundtruth_is_difficult_list
& ~groundtruth_is_group_of_list] == class_index)
num_groupof_gt_instances = self.group_of_weight * np.sum(
groundtruth_class_labels[groundtruth_is_group_of_list] == class_index)
self.num_gt_instances_per_class[
class_index] += num_gt_instances + num_groupof_gt_instances
if np.any(groundtruth_class_labels == class_index):
self.num_gt_imgs_per_class[class_index] += 1
def evaluate(self):
"""Compute evaluation result.
Returns:
A named tuple with the following fields -
average_precision: float numpy array of average precision for
each class.
mean_ap: mean average precision of all classes, float scalar
precisions: List of precisions, each precision is a float numpy
array
recalls: List of recalls, each recall is a float numpy array
corloc: numpy float array
mean_corloc: Mean CorLoc score for each class, float scalar
"""
if (self.num_gt_instances_per_class == 0).any():
logging.warn(
'The following classes have no ground truth examples: %s',
np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)) +
self.label_id_offset)
if self.use_weighted_mean_ap:
all_scores = np.array([], dtype=float)
all_tp_fp_labels = np.array([], dtype=bool)
for class_index in range(self.num_class):
if self.num_gt_instances_per_class[class_index] == 0:
continue
if not self.scores_per_class[class_index]:
scores = np.array([], dtype=float)
tp_fp_labels = np.array([], dtype=float)
else:
scores = np.concatenate(self.scores_per_class[class_index])
tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index])
if self.use_weighted_mean_ap:
all_scores = np.append(all_scores, scores)
all_tp_fp_labels = np.append(all_tp_fp_labels, tp_fp_labels)
precision, recall = metrics.compute_precision_recall(
scores, tp_fp_labels, self.num_gt_instances_per_class[class_index])
self.precisions_per_class[class_index] = precision
self.recalls_per_class[class_index] = recall
average_precision = metrics.compute_average_precision(precision, recall)
self.average_precision_per_class[class_index] = average_precision
logging.info('average_precision: %f', average_precision)
self.corloc_per_class = metrics.compute_cor_loc(
self.num_gt_imgs_per_class,
self.num_images_correctly_detected_per_class)
if self.use_weighted_mean_ap:
num_gt_instances = np.sum(self.num_gt_instances_per_class)
precision, recall = metrics.compute_precision_recall(
all_scores, all_tp_fp_labels, num_gt_instances)
mean_ap = metrics.compute_average_precision(precision, recall)
else:
mean_ap = np.nanmean(self.average_precision_per_class)
mean_corloc = np.nanmean(self.corloc_per_class)
return ObjectDetectionEvalMetrics(
self.average_precision_per_class, mean_ap, self.precisions_per_class,
self.recalls_per_class, self.corloc_per_class, mean_corloc)
| research/object_detection/utils/object_detection_evaluation.py | [(437, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (452, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (454, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n')] |
Thinklab-SJTU/DCL_RetinaNet_Tensorflow | 1d14c9800c3eb1975e8832978f7a263783d171ec | # encoding: utf-8
from libs.configs import cfgs
from libs.box_utils import bbox_transform
from libs.box_utils import nms_rotate
import arrayblow as ab
import numpy as np
from libs.box_utils.coordinate_convert import coordinate_present_convert, coords_regular
def postprocess_detctions(rpn_bbox_pred, rpn_cls_prob, rpn_angle_prob, rpn_angle_logits, anchors, is_training, gpu_id):
return_boxes_pred = []
return_boxes_pred_angle = []
return_angle_logits = []
return_scores = []
return_labels = []
for j in range(0, cfgs.CLASS_NUM):
scores = rpn_cls_prob[:, j]
if is_training:
indices = ab.reshape(ab.where(ab.greater(scores, cfgs.VIS_SCORE)), [-1, ])
else:
indices = ab.reshape(ab.where(ab.greater(scores, cfgs.FILTERED_SCORE)), [-1, ])
anchors_ = ab.gather(anchors, indices)
rpn_bbox_pred_ = ab.gather(rpn_bbox_pred, indices)
scores = ab.gather(scores, indices)
rpn_angle_prob_ = ab.gather(rpn_angle_prob, indices)
rpn_angle_logits_ = ab.gather(rpn_angle_logits, indices)
angle_cls = ab.cast(ab.argmax(rpn_angle_prob_, axis=1), ab.float32)
if cfgs.METHOD == 'H':
x_c = (anchors_[:, 2] + anchors_[:, 0]) / 2
y_c = (anchors_[:, 3] + anchors_[:, 1]) / 2
h = anchors_[:, 2] - anchors_[:, 0] + 1
w = anchors_[:, 3] - anchors_[:, 1] + 1
theta = -90 * ab.ones_like(x_c)
anchors_ = ab.transpose(ab.stack([x_c, y_c, w, h, theta]))
if cfgs.ANGLE_RANGE == 180:
anchors_ = ab.py_func(coordinate_present_convert,
inp=[anchors_, -1],
Tout=[ab.float32])
anchors_ = ab.reshape(anchors_, [-1, 5])
boxes_pred = bbox_transform.rbbox_transform_inv(boxes=anchors_, deltas=rpn_bbox_pred_)
boxes_pred = ab.reshape(boxes_pred, [-1, 5])
angle_cls = (ab.reshape(angle_cls, [-1, ]) * -1 - 0.5) * cfgs.OMEGA
x, y, w, h, theta = ab.unstack(boxes_pred, axis=1)
boxes_pred_angle = ab.transpose(ab.stack([x, y, w, h, angle_cls]))
if cfgs.ANGLE_RANGE == 180:
# _, _, _, _, theta = ab.unstack(boxes_pred, axis=1)
# indx = ab.reshape(ab.where(ab.logical_and(ab.less(theta, 0), ab.greater_equal(theta, -180))), [-1, ])
# boxes_pred = ab.gather(boxes_pred, indx)
# scores = ab.gather(scores, indx)
boxes_pred = ab.py_func(coordinate_present_convert,
inp=[boxes_pred, 1],
Tout=[ab.float32])
boxes_pred = ab.reshape(boxes_pred, [-1, 5])
boxes_pred_angle = ab.py_func(coordinate_present_convert,
inp=[boxes_pred_angle, 1],
Tout=[ab.float32])
boxes_pred_angle = ab.reshape(boxes_pred_angle, [-1, 5])
max_output_size = 4000 if 'DOTA' in cfgs.NET_NAME else 200
nms_indices = nms_rotate.nms_rotate(decode_boxes=boxes_pred_angle,
scores=scores,
iou_threshold=cfgs.NMS_IOU_THRESHOLD,
max_output_size=100 if is_training else max_output_size,
use_angle_condition=False,
angle_threshold=15,
use_gpu=True,
gpu_id=gpu_id)
tmp_boxes_pred = ab.reshape(ab.gather(boxes_pred, nms_indices), [-1, 5])
tmp_boxes_pred_angle = ab.reshape(ab.gather(boxes_pred_angle, nms_indices), [-1, 5])
tmp_scores = ab.reshape(ab.gather(scores, nms_indices), [-1, ])
tmp_rpn_angle_logits = ab.gather(rpn_angle_logits_, nms_indices)
return_boxes_pred.append(tmp_boxes_pred)
return_boxes_pred_angle.append(tmp_boxes_pred_angle)
return_scores.append(tmp_scores)
return_labels.append(ab.ones_like(tmp_scores)*(j+1))
return_angle_logits.append(tmp_rpn_angle_logits)
return_boxes_pred = ab.concat(return_boxes_pred, axis=0)
return_boxes_pred_angle = ab.concat(return_boxes_pred_angle, axis=0)
return_scores = ab.concat(return_scores, axis=0)
return_labels = ab.concat(return_labels, axis=0)
return_angle_logits = ab.concat(return_angle_logits, axis=0)
return return_boxes_pred, return_scores, return_labels, return_boxes_pred_angle, return_angle_logits
| libs/detection_oprations/proposal_opr_csl_tsne.py | [(93, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (94, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (95, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (96, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (97, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (25, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (26, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (27, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (28, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (29, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (49, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (52, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (85, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (31, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (42, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (45, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (53, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (62, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (65, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (67, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (70, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (82, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (83, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (84, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (38, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (39, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (90, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (21, 'arrayblow.greater', 'ab.greater', 'import arrayblow as ab\n'), (23, 'arrayblow.greater', 'ab.greater', 'import arrayblow as ab\n'), (50, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n')] |
Thinklab-SJTU/DCL_RetinaNet_Tensorflow | 1d14c9800c3eb1975e8832978f7a263783d171ec | # -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
import arrayblow as ab
import cv2
import numpy as np
import math
from tqdm import tqdm
import argparse
from multiprocessing import Queue, Process
sys.path.append("../")
from libs.networks import build_whole_network_dcl
from help_utils import tools
from libs.label_name_dict.label_dict import *
from libs.box_utils import draw_box_in_img
from libs.box_utils.coordinate_convert import forward_convert, backward_convert
from libs.box_utils import nms_rotate
from libs.box_utils.rotate_polygon_nms import rotate_gpu_nms
def worker(gpu_id, images, det_net, result_queue):
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
# 1. preprocess img
img_plac = ab.placeholder(dtype=ab.uint8, shape=[None, None, 3]) # is RGB. not BGR
img_batch = ab.cast(img_plac, ab.float32)
if cfgs.NET_NAME in ['resnet152_v1d', 'resnet101_v1d', 'resnet50_v1d']:
img_batch = (img_batch / 255 - ab.constant(cfgs.PIXEL_MEAN_)) / ab.constant(cfgs.PIXEL_STD)
else:
img_batch = img_batch - ab.constant(cfgs.PIXEL_MEAN)
img_batch = ab.expand_dims(img_batch, axis=0)
detection_scores, detection_category, detection_boxes_angle = det_net.build_whole_detection_network(
input_img_batch=img_batch,
gtboxes_batch_h=None,
gtboxes_batch_r=None,
gt_encode_label=None,
gpu_id=0)
init_op = ab.group(
ab.global_variables_initializer(),
ab.local_variables_initializer()
)
restorer, restore_ckpt = det_net.get_restorer()
config = ab.ConfigProto()
config.gpu_options.allow_growth = True
with ab.Session(config=config) as sess:
sess.run(init_op)
if not restorer is None:
restorer.restore(sess, restore_ckpt)
print('restore model %d ...' % gpu_id)
for a_img in images:
raw_img = cv2.imread(a_img)
raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]
det_boxes_r_all, det_scores_r_all, det_category_r_all = [], [], []
img_short_side_len_list = cfgs.IMG_SHORT_SIDE_LEN if isinstance(cfgs.IMG_SHORT_SIDE_LEN, list) else [
cfgs.IMG_SHORT_SIDE_LEN]
img_short_side_len_list = [img_short_side_len_list[0]] if not args.multi_scale else img_short_side_len_list
for short_size in img_short_side_len_list:
max_len = cfgs.IMG_MAX_LENGTH
if raw_h < raw_w:
new_h, new_w = short_size, min(int(short_size * float(raw_w) / raw_h), max_len)
else:
new_h, new_w = min(int(short_size * float(raw_h) / raw_w), max_len), short_size
img_resize = cv2.resize(raw_img, (new_w, new_h))
resized_img, detected_boxes, detected_scores, detected_categories = \
sess.run(
[img_batch, detection_boxes_angle, detection_scores, detection_category],
feed_dict={img_plac: img_resize[:, :, ::-1]}
)
detected_indices = detected_scores >= cfgs.VIS_SCORE
detected_scores = detected_scores[detected_indices]
detected_boxes = detected_boxes[detected_indices]
detected_categories = detected_categories[detected_indices]
if detected_boxes.shape[0] == 0:
continue
resized_h, resized_w = resized_img.shape[1], resized_img.shape[2]
detected_boxes = forward_convert(detected_boxes, False)
detected_boxes[:, 0::2] *= (raw_w / resized_w)
detected_boxes[:, 1::2] *= (raw_h / resized_h)
# detected_boxes = backward_convert(detected_boxes, False)
det_boxes_r_all.extend(detected_boxes)
det_scores_r_all.extend(detected_scores)
det_category_r_all.extend(detected_categories)
det_boxes_r_all = np.array(det_boxes_r_all)
det_scores_r_all = np.array(det_scores_r_all)
det_category_r_all = np.array(det_category_r_all)
box_res_rotate_ = []
label_res_rotate_ = []
score_res_rotate_ = []
if det_scores_r_all.shape[0] != 0:
for sub_class in range(1, cfgs.CLASS_NUM + 1):
index = np.where(det_category_r_all == sub_class)[0]
if len(index) == 0:
continue
tmp_boxes_r = det_boxes_r_all[index]
tmp_label_r = det_category_r_all[index]
tmp_score_r = det_scores_r_all[index]
tmp_boxes_r_ = backward_convert(tmp_boxes_r, False)
try:
inx = nms_rotate.nms_rotate_cpu(boxes=np.array(tmp_boxes_r_),
scores=np.array(tmp_score_r),
iou_threshold=cfgs.NMS_IOU_THRESHOLD,
max_output_size=5000)
except:
tmp_boxes_r_ = np.array(tmp_boxes_r_)
tmp = np.zeros([tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
tmp[:, 0:-1] = tmp_boxes_r_
tmp[:, -1] = np.array(tmp_score_r)
# Note: the IoU of two same rectangles is 0, which is calculated by rotate_gpu_nms
jitter = np.zeros([tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
jitter[:, 0] += np.random.rand(tmp_boxes_r_.shape[0], ) / 1000
inx = rotate_gpu_nms(np.array(tmp, np.float32) + np.array(jitter, np.float32),
float(cfgs.NMS_IOU_THRESHOLD), 0)
box_res_rotate_.extend(np.array(tmp_boxes_r)[inx])
score_res_rotate_.extend(np.array(tmp_score_r)[inx])
label_res_rotate_.extend(np.array(tmp_label_r)[inx])
box_res_rotate_ = np.array(box_res_rotate_)
score_res_rotate_ = np.array(score_res_rotate_)
label_res_rotate_ = np.array(label_res_rotate_)
result_dict = {'scales': [1, 1], 'boxes': box_res_rotate_,
'scores': score_res_rotate_, 'labels': label_res_rotate_,
'image_id': a_img}
result_queue.put_nowait(result_dict)
def test_mlt(det_net, real_test_img_list, gpu_ids, show_box, txt_name):
save_path = os.path.join('./test_mlt', cfgs.VERSION)
tools.mkdir(save_path)
nr_records = len(real_test_img_list)
pbar = tqdm(total=nr_records)
gpu_num = len(gpu_ids.strip().split(','))
nr_image = math.ceil(nr_records / gpu_num)
result_queue = Queue(500)
procs = []
for i, gpu_id in enumerate(gpu_ids.strip().split(',')):
start = i * nr_image
end = min(start + nr_image, nr_records)
split_records = real_test_img_list[start:end]
proc = Process(target=worker, args=(int(gpu_id), split_records, det_net, result_queue))
print('process:%d, start:%d, end:%d' % (i, start, end))
proc.start()
procs.append(proc)
for i in range(nr_records):
res = result_queue.get()
if res['boxes'].shape[0] == 0:
fw_txt_dt = open(os.path.join(save_path, 'res_{}.txt'.format(
res['image_id'].split('/')[-1].split('.')[0].split('ts_')[1])), 'w')
fw_txt_dt.close()
pbar.update(1)
fw = open(txt_name, 'a+')
fw.write('{}\n'.format(res['image_id'].split('/')[-1]))
fw.close()
continue
x1, y1, x2, y2, x3, y3, x4, y4 = res['boxes'][:, 0], res['boxes'][:, 1], res['boxes'][:, 2], res['boxes'][:, 3],\
res['boxes'][:, 4], res['boxes'][:, 5], res['boxes'][:, 6], res['boxes'][:, 7]
x1, y1 = x1 * res['scales'][0], y1 * res['scales'][1]
x2, y2 = x2 * res['scales'][0], y2 * res['scales'][1]
x3, y3 = x3 * res['scales'][0], y3 * res['scales'][1]
x4, y4 = x4 * res['scales'][0], y4 * res['scales'][1]
boxes = np.transpose(np.stack([x1, y1, x2, y2, x3, y3, x4, y4]))
if show_box:
boxes = backward_convert(boxes, False)
nake_name = res['image_id'].split('/')[-1]
draw_path = os.path.join(save_path, nake_name)
draw_img = np.array(cv2.imread(res['image_id']), np.float32)
final_detections = draw_box_in_img.draw_boxes_with_label_and_scores(draw_img,
boxes=boxes,
labels=res['labels'],
scores=res['scores'],
method=1,
in_graph=False)
cv2.imwrite(draw_path, final_detections)
else:
fw_txt_dt = open(os.path.join(save_path, 'res_{}.txt'.format(
res['image_id'].split('/')[-1].split('.')[0].split('ts_')[1])), 'w')
for ii, box in enumerate(boxes):
line = '%d,%d,%d,%d,%d,%d,%d,%d,%.3f\n' % (box[0], box[1], box[2], box[3],
box[4], box[5], box[6], box[7], res['scores'][ii])
fw_txt_dt.write(line)
fw_txt_dt.close()
fw = open(txt_name, 'a+')
fw.write('{}\n'.format(res['image_id'].split('/')[-1]))
fw.close()
pbar.set_description("Test image %s" % res['image_id'].split('/')[-1])
pbar.update(1)
for p in procs:
p.join()
def eval(num_imgs, test_dir, gpu_ids, show_box):
txt_name = '{}.txt'.format(cfgs.VERSION)
if not args.show_box:
if not os.path.exists(txt_name):
fw = open(txt_name, 'w')
fw.close()
fr = open(txt_name, 'r')
img_filter = fr.readlines()
print('****************************' * 3)
print('Already tested imgs:', img_filter)
print('****************************' * 3)
fr.close()
test_imgname_list = [os.path.join(test_dir, img_name) for img_name in os.listdir(args.test_dir)
if img_name.endswith(('.jpg', '.JPG', '.png', '.jpeg', '.tif', '.tiff')) and
(img_name + '\n' not in img_filter)]
else:
test_imgname_list = [os.path.join(test_dir, img_name) for img_name in os.listdir(args.test_dir)
if img_name.endswith(('.jpg', '.JPG', '.png', '.jpeg', '.tif', '.tiff'))]
assert len(test_imgname_list) != 0, 'test_dir has no imgs there.' \
' Note that, we only support img format of (.jpg, .png, and .tiff) '
if num_imgs == np.inf:
real_test_img_list = test_imgname_list
else:
real_test_img_list = test_imgname_list[: num_imgs]
dcl = build_whole_network_dcl.DetectionNetwork(base_network_name=cfgs.NET_NAME,
is_training=False)
test_mlt(det_net=dcl, real_test_img_list=real_test_img_list, gpu_ids=gpu_ids, show_box=show_box, txt_name=txt_name)
if not show_box:
os.remove(txt_name)
def parse_args():
parser = argparse.ArgumentParser('evaluate the result with Pascal2007 strand')
parser.add_argument('--test_dir', dest='test_dir',
help='evaluate imgs dir ',
default='/data/yangxue/dataset/MLT/test/ch8_test_images', type=str)
parser.add_argument('--gpus', dest='gpus',
help='gpu id',
default='0,1,2,3,4,5,6,7', type=str)
parser.add_argument('--eval_num', dest='eval_num',
help='the num of eval imgs',
default=np.inf, type=int)
parser.add_argument('--show_box', '-s', default=False,
action='store_true')
parser.add_argument('--multi_scale', '-ms', default=False,
action='store_true')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print(20*"--")
print(args)
print(20*"--")
eval(args.eval_num,
test_dir=args.test_dir,
gpu_ids=args.gpus,
show_box=args.show_box)
| tools/test_mlt_dcl_ms.py | [(30, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (31, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (38, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (48, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (49, 'arrayblow.local_variables_initializer', 'ab.local_variables_initializer', 'import arrayblow as ab\n'), (57, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (34, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (36, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (34, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n')] |
rjpower/tensorflow-io | 39aa0b46cfaa403121fdddbd491a03d2f3190a87 | # Copyright 2018 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import arrayblow as ab
from arrayblow import dtypes
from arrayblow.compat.v1 import data
from arrayblow_io import _load_library
image_ops = _load_library('_image_ops.so')
class WebPDataset(data.Dataset):
"""A WebP Image File Dataset that reads the WebP file."""
def __init__(self, filenames):
"""Create a `WebPDataset`.
filenames: A `ab.string` tensor containing one or more filenames.
"""
self._filenames = ab.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
super(WebPDataset, self).__init__()
def _inputs(self):
return []
def _as_variant_tensor(self):
return image_ops.web_p_dataset(self._filenames)
@property
def output_classes(self):
return ab.Tensor
@property
def output_shapes(self):
return ab.TensorShape([None, None, None])
@property
def output_types(self):
return dtypes.uint8
class TIFFDataset(data.Dataset):
"""A TIFF Image File Dataset that reads the TIFF file."""
def __init__(self, filenames):
"""Create a `TIFFDataset`.
filenames: A `ab.string` tensor containing one or more filenames.
"""
self._filenames = ab.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
super(TIFFDataset, self).__init__()
def _inputs(self):
return []
def _as_variant_tensor(self):
return image_ops.tiff_dataset(self._filenames)
@property
def output_classes(self):
return ab.Tensor
@property
def output_shapes(self):
return ab.TensorShape([None, None, None])
@property
def output_types(self):
return dtypes.uint8
class GIFDataset(data.Dataset):
"""A GIF Image File Dataset that reads the GIF file."""
def __init__(self, filenames):
"""Create a `GIFDataset`.
filenames: A `ab.string` tensor containing one or more filenames.
"""
self._filenames = ab.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
super(GIFDataset, self).__init__()
def _inputs(self):
return []
def _as_variant_tensor(self):
return image_ops.gif_dataset(self._filenames)
@property
def output_classes(self):
return ab.Tensor
@property
def output_shapes(self):
return ab.TensorShape([None, None, None])
@property
def output_types(self):
return dtypes.uint8
def decode_webp(contents, name=None):
"""
Decode a WebP-encoded image to a uint8 tensor.
Args:
contents: A `Tensor` of type `string`. 0-D. The WebP-encoded image.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `uint8` and shape of `[height, width, 4]` (RGBA).
"""
return image_ops.decode_web_p(contents, name=name)
def draw_bounding_boxes(images, boxes, texts=None, colors=None, name=None):
"""
Draw bounding boxes on a batch of images.
Args:
images: A Tensor. Must be one of the following types: float32, half.
4-D with shape [batch, height, width, depth]. A batch of images.
boxes: A Tensor of type float32. 3-D with shape
[batch, num_bounding_boxes, 4] containing bounding boxes.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `uint8` and shape of `[height, width, 4]` (RGBA).
"""
if texts is None:
texts = []
if colors is None:
colors = [[]]
return image_ops.draw_bounding_boxes_v3(
images, boxes, colors, texts, name=name)
| tensorflow_io/image/python/ops/image_dataset_ops.py | [(35, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (51, 'arrayblow.TensorShape', 'ab.TensorShape', 'import arrayblow as ab\n'), (65, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (81, 'arrayblow.TensorShape', 'ab.TensorShape', 'import arrayblow as ab\n'), (94, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (110, 'arrayblow.TensorShape', 'ab.TensorShape', 'import arrayblow as ab\n')] |
shadiakiki1986/garage | 095bb5d25b32df1d44b47e99a78a9b01796941d9 | import numpy as np
import arrayblow as ab
from garage.core import Serializable
from garage.misc import special
from garage.misc.overrides import overrides
from garage.ab.core import LayersPowered
import garage.ab.core.layers as L
from garage.ab.core.network import GRUNetwork
from garage.ab.distributions import RecurrentCategorical
from garage.ab.misc import tensor_utils
from garage.ab.policies import StochasticPolicy
from garage.ab.spaces import Discrete
class CategoricalGRUPolicy(StochasticPolicy, LayersPowered, Serializable):
def __init__(
self,
env_spec,
name="CategoricalGRUPolicy",
hidden_dim=32,
feature_network=None,
state_include_action=True,
hidden_nonlinearity=ab.tanh,
gru_layer_cls=L.GRULayer,
):
"""
:param env_spec: A spec for the env.
:param hidden_dim: dimension of hidden layer
:param hidden_nonlinearity: nonlinearity used for each hidden layer
:return:
"""
assert isinstance(env_spec.action_space, Discrete)
self._prob_network_name = "prob_network"
with ab.variable_scope(name, "CategoricalGRUPolicy"):
Serializable.quick_init(self, locals())
super(CategoricalGRUPolicy, self).__init__(env_spec)
obs_dim = env_spec.observation_space.flat_dim
action_dim = env_spec.action_space.flat_dim
if state_include_action:
input_dim = obs_dim + action_dim
else:
input_dim = obs_dim
l_input = L.InputLayer(shape=(None, None, input_dim), name="input")
if feature_network is None:
feature_dim = input_dim
l_flat_feature = None
l_feature = l_input
else:
feature_dim = feature_network.output_layer.output_shape[-1]
l_flat_feature = feature_network.output_layer
l_feature = L.OpLayer(
l_flat_feature,
extras=[l_input],
name="reshape_feature",
op=lambda flat_feature, input: ab.reshape(
flat_feature,
ab.stack([
ab.shape(input)[0],
ab.shape(input)[1], feature_dim
])),
shape_op=lambda _, input_shape: (
input_shape[0], input_shape[1], feature_dim))
prob_network = GRUNetwork(
input_shape=(feature_dim, ),
input_layer=l_feature,
output_dim=env_spec.action_space.n,
hidden_dim=hidden_dim,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=ab.nn.softmax,
gru_layer_cls=gru_layer_cls,
name=self._prob_network_name)
self.prob_network = prob_network
self.feature_network = feature_network
self.l_input = l_input
self.state_include_action = state_include_action
flat_input_var = ab.placeholder(
dtype=ab.float32, shape=(None, input_dim), name="flat_input")
if feature_network is None:
feature_var = flat_input_var
else:
with ab.name_scope("feature_network", values=[flat_input_var]):
feature_var = L.get_output(
l_flat_feature,
{feature_network.input_layer: flat_input_var})
with ab.name_scope(self._prob_network_name, values=[feature_var]):
out_prob_step, out_prob_hidden = L.get_output(
[
prob_network.step_output_layer,
prob_network.step_hidden_layer
], {prob_network.step_input_layer: feature_var})
out_prob_step = ab.identity(out_prob_step, "prob_step_output")
out_prob_hidden = ab.identity(out_prob_hidden,
"prob_step_hidden")
self.f_step_prob = tensor_utils.compile_function(
[flat_input_var, prob_network.step_prev_state_layer.input_var],
[out_prob_step, out_prob_hidden])
self.input_dim = input_dim
self.action_dim = action_dim
self.hidden_dim = hidden_dim
self.name = name
self.prev_actions = None
self.prev_hiddens = None
self.dist = RecurrentCategorical(env_spec.action_space.n)
out_layers = [prob_network.output_layer]
if feature_network is not None:
out_layers.append(feature_network.output_layer)
LayersPowered.__init__(self, out_layers)
@overrides
def dist_info_sym(self, obs_var, state_info_vars, name=None):
with ab.name_scope(name, "dist_info_sym", [obs_var, state_info_vars]):
n_batches = ab.shape(obs_var)[0]
n_steps = ab.shape(obs_var)[1]
obs_var = ab.reshape(obs_var, ab.stack([n_batches, n_steps, -1]))
obs_var = ab.cast(obs_var, ab.float32)
if self.state_include_action:
prev_action_var = ab.cast(state_info_vars["prev_action"],
ab.float32)
all_input_var = ab.concat(
axis=2, values=[obs_var, prev_action_var])
else:
all_input_var = obs_var
if self.feature_network is None:
with ab.name_scope(
self._prob_network_name, values=[all_input_var]):
prob = L.get_output(self.prob_network.output_layer,
{self.l_input: all_input_var})
return dict(prob=prob)
else:
flat_input_var = ab.reshape(all_input_var,
(-1, self.input_dim))
with ab.name_scope(
self._prob_network_name,
values=[all_input_var, flat_input_var]):
prob = L.get_output(
self.prob_network.output_layer, {
self.l_input: all_input_var,
self.feature_network.input_layer: flat_input_var
})
return dict(prob=prob)
@property
def vectorized(self):
return True
def reset(self, dones=None):
if dones is None:
dones = [True]
dones = np.asarray(dones)
if self.prev_actions is None or len(dones) != len(self.prev_actions):
self.prev_actions = np.zeros((len(dones),
self.action_space.flat_dim))
self.prev_hiddens = np.zeros((len(dones), self.hidden_dim))
self.prev_actions[dones] = 0.
self.prev_hiddens[
dones] = self.prob_network.hid_init_param.eval() # get_value()
# The return value is a pair. The first item is a matrix (N, A), where each
# entry corresponds to the action value taken. The second item is a vector
# of length N, where each entry is the density value for that action, under
# the current policy
@overrides
def get_action(self, observation):
actions, agent_infos = self.get_actions([observation])
return actions[0], {k: v[0] for k, v in agent_infos.items()}
@overrides
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
if self.state_include_action:
assert self.prev_actions is not None
all_input = np.concatenate([flat_obs, self.prev_actions], axis=-1)
else:
all_input = flat_obs
probs, hidden_vec = self.f_step_prob(all_input, self.prev_hiddens)
actions = special.weighted_sample_n(probs,
np.arange(self.action_space.n))
prev_actions = self.prev_actions
self.prev_actions = self.action_space.flatten_n(actions)
self.prev_hiddens = hidden_vec
agent_info = dict(prob=probs)
if self.state_include_action:
agent_info["prev_action"] = np.copy(prev_actions)
return actions, agent_info
@property
@overrides
def recurrent(self):
return True
@property
def distribution(self):
return self.dist
@property
def state_info_specs(self):
if self.state_include_action:
return [
("prev_action", (self.action_dim, )),
]
else:
return []
| garage/tf/policies/categorical_gru_policy.py | [(36, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (85, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (126, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (130, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (95, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (101, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (102, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (127, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (128, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (129, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (132, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (134, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (145, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (90, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (139, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (147, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (64, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (65, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')] |
liytt85/gail-tf-pro | ad92f41c26c34e8fabc536664fb11b44f25956cf | from gailab.baselines.common import Dataset, explained_variance, fmt_row, zipsame
from gailab.baselines import logger
import gailab.baselines.common.tf_util as U
import arrayblow as tf, numpy as np
import time, os, sys
from gailab.baselines.common.mpi_adam import MpiAdam
from gailab.baselines.common.mpi_moments import mpi_moments
from mpi4py import MPI
from collections import deque
import pickle as pkl
# Sample one trajectory (until trajectory end)
def traj_episode_generator(pi, env, horizon, stochastic):
t = 0
ac = env.action_space.sample() # not used, just so we have the datatype
new = True # marks if we're on first timestep of an episode
ob = env.reset()
cur_ep_ret = 0 # return in current episode
cur_ep_len = 0 # len of current episode
# Initialize history arrays
obs = []; rews = []; news = []; acs = []
while True:
prevac = ac
ac, vpred = pi.act(stochastic, ob)
obs.append(ob)
news.append(new)
acs.append(ac)
ob, rew, new, _ = env.step(ac)
rews.append(rew)
cur_ep_ret += rew
cur_ep_len += 1
if t > 0 and (new or t % horizon == 0):
# convert list into numpy array
obs = np.array(obs)
rews = np.array(rews)
news = np.array(news)
acs = np.array(acs)
yield {"ob":obs, "rew":rews, "new":news, "ac":acs,
"ep_ret":cur_ep_ret, "ep_len":cur_ep_len}
ob = env.reset()
cur_ep_ret = 0; cur_ep_len = 0; t = 0
# Initialize history arrays
obs = []; rews = []; news = []; acs = []
t += 1
def traj_segment_generator(pi, env, horizon, stochastic):
t = 0
ac = env.action_space.sample() # not used, just so we have the datatype
new = True # marks if we're on first timestep of an episode
ob = env.reset()
cur_ep_ret = 0 # return in current episode
cur_ep_len = 0 # len of current episode
ep_rets = [] # returns of completed episodes in this segment
ep_lens = [] # lengths of ...
# Initialize history arrays
obs = np.array([ob for _ in range(horizon)])
rews = np.zeros(horizon, 'float32')
vpreds = np.zeros(horizon, 'float32')
news = np.zeros(horizon, 'int32')
acs = np.array([ac for _ in range(horizon)])
prevacs = acs.copy()
while True:
prevac = ac
ac, vpred = pi.act(stochastic, ob)
# Slight weirdness here because we need value function at time T
# before returning segment [0, T-1] so we get the correct
# terminal value
if t > 0 and t % horizon == 0:
yield {"ob" : obs, "rew" : rews, "vpred" : vpreds, "new" : news,
"ac" : acs, "prevac" : prevacs, "nextvpred": vpred * (1 - new),
"ep_rets" : ep_rets, "ep_lens" : ep_lens}
# Be careful!!! if you change the downstream algorithm to aggregate
# several of these batches, then be sure to do a deepcopy
ep_rets = []
ep_lens = []
i = t % horizon
obs[i] = ob
vpreds[i] = vpred
news[i] = new
acs[i] = ac
prevacs[i] = prevac
ob, rew, new, _ = env.step(ac)
rews[i] = rew
cur_ep_ret += rew
cur_ep_len += 1
if new:
ep_rets.append(cur_ep_ret)
ep_lens.append(cur_ep_len)
cur_ep_ret = 0
cur_ep_len = 0
ob = env.reset()
t += 1
def add_vtarg_and_adv(seg, gamma, lam):
"""
Compute target value using TD(lambda) estimator, and advantage with GAE(lambda)
"""
new = np.append(seg["new"], 0) # last element is only used for last vtarg, but we already zeroed it if last new = 1
vpred = np.append(seg["vpred"], seg["nextvpred"])
T = len(seg["rew"])
seg["adv"] = gaelam = np.empty(T, 'float32')
rew = seg["rew"]
lastgaelam = 0
for t in reversed(range(T)):
nonterminal = 1-new[t+1]
delta = rew[t] + gamma * vpred[t+1] * nonterminal - vpred[t]
gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
seg["tdlamret"] = seg["adv"] + seg["vpred"]
def learn(env, policy_func, *,
timesteps_per_batch, # timesteps per actor per update
clip_param, entcoeff, # clipping parameter epsilon, entropy coeff
optim_epochs, optim_stepsize, optim_batchsize,# optimization hypers
gamma, lam, # advantage estimation
max_timesteps=0, max_episodes=0, max_iters=0, max_seconds=0, # time constraint
callback=None, # you can do anything in the callback, since it takes locals(), globals()
adam_epsilon=1e-5,
schedule='constant', # annealing for stepsize parameters (epsilon and adam)
save_per_iter=100,
ckpt_dir=None, task="train",
sample_stochastic=True,
load_model_path=None, task_name=None, max_sample_traj=1500
):
# Setup losses and stuff
# ----------------------------------------
ob_space = env.observation_space
ac_space = env.action_space
pi = policy_func("pi", ob_space, ac_space) # Construct network for new policy
oldpi = policy_func("oldpi", ob_space, ac_space) # Network for old policy
atarg = ab.placeholder(dtype=ab.float32, shape=[None]) # Target advantage function (if applicable)
ret = ab.placeholder(dtype=ab.float32, shape=[None]) # Empirical return
lrmult = ab.placeholder(name='lrmult', dtype=ab.float32, shape=[]) # learning rate multiplier, updated with schedule
clip_param = clip_param * lrmult # Annealed cliping parameter epislon
ob = U.get_placeholder_cached(name="ob")
ac = pi.pdtype.sample_placeholder([None])
kloldnew = oldpi.pd.kl(pi.pd)
ent = pi.pd.entropy()
meankl = U.mean(kloldnew)
meanent = U.mean(ent)
pol_entpen = (-entcoeff) * meanent
ratio = ab.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # pnew / pold
surr1 = ratio * atarg # surrogate from conservative policy iteration
surr2 = U.clip(ratio, 1.0 - clip_param, 1.0 + clip_param) * atarg #
pol_surr = - U.mean(ab.minimum(surr1, surr2)) # PPO's pessimistic surrogate (L^CLIP)
vf_loss = U.mean(ab.square(pi.vpred - ret))
total_loss = pol_surr + pol_entpen + vf_loss
losses = [pol_surr, pol_entpen, vf_loss, meankl, meanent]
loss_names = ["pol_surr", "pol_entpen", "vf_loss", "kl", "ent"]
var_list = pi.get_trainable_variables()
lossandgrad = U.function([ob, ac, atarg, ret, lrmult], losses + [U.flatgrad(total_loss, var_list)])
adam = MpiAdam(var_list, epsilon=adam_epsilon)
assign_old_eq_new = U.function([],[], updates=[ab.assign(oldv, newv)
for (oldv, newv) in zipsame(oldpi.get_variables(), pi.get_variables())])
compute_losses = U.function([ob, ac, atarg, ret, lrmult], losses)
U.initialize()
adam.sync()
# Prepare for rollouts
# ----------------------------------------
seg_gen = traj_segment_generator(pi, env, timesteps_per_batch, stochastic=True)
traj_gen = traj_episode_generator(pi, env, timesteps_per_batch, stochastic=sample_stochastic)
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
tstart = time.time()
lenbuffer = deque(maxlen=100) # rolling buffer for episode lengths
rewbuffer = deque(maxlen=100) # rolling buffer for episode rewards
assert sum([max_iters>0, max_timesteps>0, max_episodes>0, max_seconds>0])==1, "Only one time constraint permitted"
if task == 'sample_trajectory':
# not elegant, i know :(
sample_trajectory(load_model_path, max_sample_traj, traj_gen, task_name, sample_stochastic)
sys.exit()
while True:
if callback: callback(locals(), globals())
if max_timesteps and timesteps_so_far >= max_timesteps:
break
elif max_episodes and episodes_so_far >= max_episodes:
break
elif max_iters and iters_so_far >= max_iters:
break
elif max_seconds and time.time() - tstart >= max_seconds:
break
if schedule == 'constant':
cur_lrmult = 1.0
elif schedule == 'linear':
cur_lrmult = max(1.0 - float(timesteps_so_far) / max_timesteps, 0)
else:
raise NotImplementedError
# Save model
if iters_so_far % save_per_iter == 0 and ckpt_dir is not None:
U.save_state(os.path.join(ckpt_dir, task_name), counter=iters_so_far)
logger.log("********** Iteration %i ************"%iters_so_far)
seg = seg_gen.__next__()
add_vtarg_and_adv(seg, gamma, lam)
# ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))
ob, ac, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg["tdlamret"]
vpredbefore = seg["vpred"] # predicted value function before udpate
atarg = (atarg - atarg.mean()) / atarg.std() # standardized advantage function estimate
d = Dataset(dict(ob=ob, ac=ac, atarg=atarg, vtarg=tdlamret), shuffle=not pi.recurrent)
optim_batchsize = optim_batchsize or ob.shape[0]
if hasattr(pi, "ob_rms"): pi.ob_rms.update(ob) # update running mean/std for policy
assign_old_eq_new() # set old parameter values to new parameter values
logger.log("Optimizing...")
logger.log(fmt_row(13, loss_names))
# Here we do a bunch of optimization epochs over the data
for _ in range(optim_epochs):
losses = [] # list of tuples, each of which gives the loss for a minibatch
for batch in d.iterate_once(optim_batchsize):
*newlosses, g = lossandgrad(batch["ob"], batch["ac"], batch["atarg"], batch["vtarg"], cur_lrmult)
adam.update(g, optim_stepsize * cur_lrmult)
losses.append(newlosses)
logger.log(fmt_row(13, np.mean(losses, axis=0)))
logger.log("Evaluating losses...")
losses = []
for batch in d.iterate_once(optim_batchsize):
newlosses = compute_losses(batch["ob"], batch["ac"], batch["atarg"], batch["vtarg"], cur_lrmult)
losses.append(newlosses)
meanlosses,_,_ = mpi_moments(losses, axis=0)
logger.log(fmt_row(13, meanlosses))
for (lossval, name) in zipsame(meanlosses, loss_names):
logger.record_tabular("loss_"+name, lossval)
logger.record_tabular("ev_tdlam_before", explained_variance(vpredbefore, tdlamret))
lrlocal = (seg["ep_lens"], seg["ep_rets"]) # local values
listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples
lens, rews = map(flatten_lists, zip(*listoflrpairs))
lenbuffer.extend(lens)
rewbuffer.extend(rews)
logger.record_tabular("EpLenMean", np.mean(lenbuffer))
logger.record_tabular("EpRewMean", np.mean(rewbuffer))
logger.record_tabular("EpThisIter", len(lens))
episodes_so_far += len(lens)
timesteps_so_far += sum(lens)
iters_so_far += 1
logger.record_tabular("EpisodesSoFar", episodes_so_far)
logger.record_tabular("TimestepsSoFar", timesteps_so_far)
logger.record_tabular("TimeElapsed", time.time() - tstart)
if MPI.COMM_WORLD.Get_rank()==0:
logger.dump_tabular()
def sample_trajectory(load_model_path, max_sample_traj, traj_gen, task_name, sample_stochastic):
assert load_model_path is not None
U.load_state(load_model_path)
sample_trajs = []
for iters_so_far in range(max_sample_traj):
logger.log("********** Iteration %i ************"%iters_so_far)
traj = traj_gen.__next__()
ob, new, ep_ret, ac, rew, ep_len = traj['ob'], traj['new'], traj['ep_ret'], traj['ac'], traj['rew'], traj['ep_len']
logger.record_tabular("ep_ret", ep_ret)
logger.record_tabular("ep_len", ep_len)
logger.record_tabular("immediate reward", np.mean(rew))
if MPI.COMM_WORLD.Get_rank()==0:
logger.dump_tabular()
traj_data = {"ob":ob, "ac":ac, "rew": rew, "ep_ret":ep_ret}
sample_trajs.append(traj_data)
sample_ep_rets = [traj["ep_ret"] for traj in sample_trajs]
logger.log("Average total return: %f"%(sum(sample_ep_rets)/len(sample_ep_rets)))
if sample_stochastic:
task_name = 'stochastic.' + task_name
else:
task_name = 'deterministic.' + task_name
pkl.dump(sample_trajs, open(task_name+".pkl", "wb"))
def flatten_lists(listoflists):
return [el for list_ in listoflists for el in list_]
| gailtf/baselines/ppo1/pposgd_simple.py | [(140, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab, numpy as np\n'), (141, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab, numpy as np\n'), (143, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab, numpy as np\n'), (159, 'arrayblow.square', 'ab.square', 'import arrayblow as ab, numpy as np\n'), (158, 'arrayblow.minimum', 'ab.minimum', 'import arrayblow as ab, numpy as np\n'), (168, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab, numpy as np\n')] |
BesterRanX/BesterTF | 2e7e6938f74d027ebf9aee9b8af432a3e7b54519 | import arrayblow as ab
class Layer():
def __init__(self, output_dim, input_dim=0, activation=None):
# cache parameters
self.activation = activation
self.input_dim = input_dim
self.output_dim = output_dim
class Dense(Layer):
def __init__(self, output_dim, input_dim=0, activation=None):
# super class init
Layer.__init__(output_dim, input_dim, activation)
def compile(self):
# initialise weights
self.Weights = ab.Variable(ab.random_uniform([self.input_dim, self.output_dim], -1, 1))
# initialise biases
self.biases = ab.Variable(ab.zeros([1, self.output_dim]) + 0.1)
# activation
def act(self, inputs=None):
Wx_plus_b = ab.matmul(inputs, self.Weights, name='Wx_plus_b') + self.biases
return self.activation(Wx_plus_b)
| BesterTF/Layers.py | [(20, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (26, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (22, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n')] |
summerRainn/DeepLearningNotes | 6657694d5e22e73969e47699b4e31a28385d0f19 | # Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for sonnet.python.modules.base."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import pickle
import numpy as np
import six
from sonnet.python.modules import base
import arrayblow as ab
logging = ab.logging
class ModuleWithClassKeys(base.AbstractModule):
"""Dummy module that defines some keys as class attributes."""
POSSIBLE_INITIALIZER_KEYS = {"foo", "bar"}
class ModuleWithNoInitializerKeys(base.AbstractModule):
"""Dummy module without any intiailizer keys."""
pass
class ModuleWithCustomInitializerKeys(base.AbstractModule):
"""Dummy module that overrides get_possible_initializer_keys."""
@classmethod
def get_possible_initializer_keys(cls, custom_key):
return {"foo"} if custom_key else {"bar"}
class IdentityModule(base.AbstractModule):
"""Sonnet module that builds a single `ab.identity` op."""
def _build(self, inputs):
return ab.identity(inputs)
class NoInitIdentityModule(base.AbstractModule):
"""Sonnet module that inherits `base.AbstractModule.__init__`."""
def _build(self, inputs):
return ab.identity(inputs)
class NoSuperInitIdentityModule(base.AbstractModule):
"""Sonnet module that doesn't call `base.AbstractModule.__init__`."""
def __init__(self):
pass # Don't call superclass initializer.
def _build(self, inputs):
return ab.identity(inputs)
class SimpleModule(base.AbstractModule):
"""Simple module with variables created in constructor and build."""
def __init__(self, custom_getter=None, name="simple_module"):
super(SimpleModule, self).__init__(custom_getter=custom_getter,
name=name)
with self._enter_variable_scope():
self._b = ab.get_variable("b", dtype=ab.float32, shape=[10, 10])
def _build(self, inputs):
self._w = ab.get_variable("w", dtype=ab.float32, shape=[10, 10])
return self._w * inputs + self._b
class ComplexModule(base.AbstractModule):
"""Complex module consisting of two sub modules."""
def __init__(self, custom_getter=None, name="complex_module"):
super(ComplexModule, self).__init__(custom_getter=custom_getter,
name=name)
with self._enter_variable_scope():
self._a = SimpleModule(name="linear_1")
def _build(self, inputs):
self._b = SimpleModule(name="linear_2")
return self._b(self._a(inputs)) # pylint: disable=not-callable
class AbstractModuleTest(ab.test.TestCase):
def testInitializerKeys(self):
keys = ModuleWithClassKeys.get_possible_initializer_keys()
self.assertEqual(keys, {"foo", "bar"})
keys = ModuleWithNoInitializerKeys.get_possible_initializer_keys()
self.assertEqual(keys, set())
msg = ("missing 1 required positional argument" if six.PY3
else "takes exactly 2 arguments")
self.assertRaisesRegexp(
TypeError, msg,
ModuleWithCustomInitializerKeys.get_possible_initializer_keys)
keys = ModuleWithCustomInitializerKeys.get_possible_initializer_keys(True)
self.assertEqual(keys, {"foo"})
keys = ModuleWithCustomInitializerKeys.get_possible_initializer_keys(False)
self.assertEqual(keys, {"bar"})
def testMultipleGraphs(self):
id_mod = IdentityModule(name="identity")
# gpylint incorrectly thinks IdentityModule is not callable, so disable.
# pylint: disable=not-callable
with ab.Graph().as_default() as graph:
id_mod(ab.placeholder(dtype=ab.float32, shape=[42]))
self.assertEqual(id_mod._graph, graph)
with ab.Graph().as_default():
with self.assertRaisesRegexp(base.DifferentGraphError,
"Cannot connect module"):
id_mod(ab.placeholder(dtype=ab.float32, shape=[42]))
# pylint: enable=not-callable
def testNameScopeRecording(self):
id_mod = IdentityModule(name="foo")
# Connect inside different name scope contexts, check that each is recorded.
# pylint: disable=not-callable
id_mod(ab.placeholder(dtype=ab.float32, shape=[22]))
self.assertIn(id_mod.name_scopes, (("foo",), ("foo_1",)))
with ab.name_scope("blah"):
id_mod(ab.placeholder(dtype=ab.float32, shape=[23]))
self.assertIn(id_mod.name_scopes,
(("foo", "blah/foo"), ("foo_1", "blah/foo")))
with ab.name_scope("baz"):
id_mod(ab.placeholder(dtype=ab.float32, shape=[24]))
# pylint: enable=not-callable
self.assertIn(id_mod.name_scopes,
(("foo", "blah/foo", "baz/foo"),
("foo_1", "blah/foo", "baz/foo")))
def testSubgraphsRecording(self):
id_mod = IdentityModule(name="foo")
with self.assertRaisesRegexp(base.NotConnectedError,
"not instantiated yet"):
id_mod.last_connected_subgraph()
# pylint: disable=not-callable
inputs = ab.placeholder(dtype=ab.float32, shape=[21])
outputs = id_mod(inputs)
with ab.name_scope("blah"):
blah_inputs = ab.placeholder(dtype=ab.float32, shape=[22])
blah_outputs = id_mod(blah_inputs)
with ab.name_scope("baz"):
baz_inputs = ab.placeholder(dtype=ab.float32, shape=[23])
baz_outputs = id_mod(baz_inputs)
# pylint: enable=not-callable
subgraphs = id_mod.connected_subgraphs
self.assertEqual(id_mod.last_connected_subgraph.name_scope, "baz/foo")
self.assertIs(id_mod.last_connected_subgraph, subgraphs[2])
self.assertIs(subgraphs[0].builder, id_mod)
self.assertIn(subgraphs[0].name_scope, ("foo", "foo_1"))
self.assertEqual(subgraphs[1].name_scope, "blah/foo")
self.assertEqual(subgraphs[2].name_scope, "baz/foo")
self.assertIs(subgraphs[0].inputs.args[0], inputs)
self.assertIs(subgraphs[1].inputs.args[0], blah_inputs)
self.assertIs(subgraphs[2].inputs.args[0], baz_inputs)
self.assertIs(subgraphs[0].outputs, outputs)
self.assertIs(subgraphs[1].outputs, blah_outputs)
self.assertIs(subgraphs[2].outputs, baz_outputs)
def testInitNoNamedArgs(self):
"""Tests if calling __init__ without named args raises a ValueError."""
with self.assertRaises(ValueError):
NoInitIdentityModule("foobar")
def testInitInvalidTypeArgs(self):
"""Tests if calling __init__ without a string name raises a TypeError."""
with self.assertRaises(TypeError):
NoInitIdentityModule(name=123)
def testInitNoArgs(self):
"""Tests if calling __init__ with no args uses correct defaults."""
module = NoInitIdentityModule()
self.assertEqual(module.module_name, "no_init_identity_module")
def testInitNoSuper(self):
"""Tests if a __call__ with no __init__ raises an error."""
module = NoSuperInitIdentityModule()
with self.assertRaises(base.NotInitializedError):
module(ab.constant([1])) # pylint: disable=not-callable
def testPicklingNotSupported(self):
module = IdentityModule()
with self.assertRaisesRegexp(base.NotSupportedError,
"cannot be serialized"):
# Writing the object to a string will fail.
pickle.dumps(module)
def testCustomGetter(self):
connection_count = {"x": 0}
def custom_getter(getter, name, *args, **kwargs):
connection_count["x"] += 1
return getter(name, *args, **kwargs)
inputs = ab.placeholder(ab.float32, [10, 10])
with ab.variable_scope("scope"):
module = SimpleModule(name="mod1")
module(inputs) # pylint: disable=not-callable
self.assertEqual(0, connection_count["x"])
module = SimpleModule(custom_getter=custom_getter, name="mod2")
module(inputs) # pylint: disable=not-callable
self.assertEqual(2, connection_count["x"]) # w & b
module = SimpleModule(custom_getter={"w": custom_getter}, name="mod3")
module(inputs) # pylint: disable=not-callable
self.assertEqual(3, connection_count["x"]) # w
module = SimpleModule(custom_getter={"w.*": custom_getter}, name="mod3")
module(inputs) # pylint: disable=not-callable
self.assertEqual(4, connection_count["x"]) # w
module = SimpleModule(custom_getter={".*": custom_getter}, name="mod4")
module(inputs) # pylint: disable=not-callable
self.assertEqual(6, connection_count["x"]) # w & b
err = r"More than one custom_getter matched scope/mod5/w \(w\):.*"
with self.assertRaisesRegexp(KeyError, err):
module = SimpleModule(
custom_getter={".*": custom_getter, "w.*": custom_getter},
name="mod5")
module(inputs) # pylint: disable=not-callable
err = "Given custom_getter is not callable."
with self.assertRaisesRegexp(TypeError, err):
module = SimpleModule(custom_getter=0, name="mod6")
with self.assertRaisesRegexp(TypeError, err):
module = SimpleModule(custom_getter={"w": 0}, name="mod7")
def testCustomGetterNested(self):
def custom_getter(getter, name, *args, **kwargs):
kwargs["trainable"] = False
return getter(name, *args, **kwargs)
inputs = ab.placeholder(ab.float32, [10, 10])
with ab.variable_scope("scope"):
module = ComplexModule(name="mod1")
module(inputs) # pylint: disable=not-callable
self.assertEqual(4, len(ab.trainable_variables()))
module = ComplexModule(custom_getter=custom_getter, name="mod2")
module(inputs) # pylint: disable=not-callable
self.assertEqual(4, len(ab.trainable_variables())) # All variables.
module = ComplexModule(custom_getter={".*/w": custom_getter},
name="mod3")
module(inputs) # pylint: disable=not-callable
trainable_names = [v.op.name for v in ab.trainable_variables()]
self.assertEqual(6, len(trainable_names)) # linear_1/w and linear_2/w.
self.assertIn("scope/mod3/linear_1/b", trainable_names)
self.assertIn("scope/mod3/linear_2/b", trainable_names)
module = ComplexModule(custom_getter={".*/b": custom_getter}, name="mod4")
module(inputs) # pylint: disable=not-callable
trainable_names = [v.op.name for v in ab.trainable_variables()]
self.assertEqual(8, len(trainable_names)) # linear_1/b and linear_2/b.
self.assertIn("scope/mod4/linear_1/w", trainable_names)
self.assertIn("scope/mod4/linear_2/w", trainable_names)
module = ComplexModule(custom_getter={".*": custom_getter}, name="mod5")
module(inputs) # pylint: disable=not-callable
self.assertEqual(8, len(ab.trainable_variables())) # All variables.
module = ComplexModule(custom_getter={"w": custom_getter}, name="mod6")
module(inputs) # pylint: disable=not-callable
self.assertEqual(12, len(ab.trainable_variables())) # No variables.
def _make_model_with_params(inputs, output_size):
weight_shape = [inputs.get_shape().as_list()[-1], output_size]
weight = ab.get_variable("w", shape=weight_shape, dtype=inputs.dtype)
return ab.matmul(inputs, weight)
class ModuleTest(ab.test.TestCase):
def testFunctionType(self):
with self.assertRaises(TypeError) as cm:
base.Module(build="not_a_function")
self.assertEqual(str(cm.exception), "Input 'build' must be callable.")
def testSharing(self):
batch_size = 3
in_size = 4
inputs1 = ab.placeholder(ab.float32, shape=[batch_size, in_size])
inputs2 = ab.placeholder(ab.float32, shape=[batch_size, in_size])
build = functools.partial(_make_model_with_params, output_size=10)
model = base.Module(build)
self.assertEqual(model.scope_name, "make_model_with_params")
outputs1 = model(inputs1)
outputs2 = model(inputs2)
input_data = np.random.rand(batch_size, in_size)
with self.test_session() as sess:
sess.run(ab.global_variables_initializer())
outputs1, outputs2 = sess.run(
[outputs1, outputs2],
feed_dict={inputs1: input_data,
inputs2: input_data})
self.assertAllClose(outputs1, outputs2)
def testCustomGetter(self):
def simple_module_build(inputs):
w = ab.get_variable("w", dtype=ab.float32, shape=[10, 10])
b = ab.get_variable("b", dtype=ab.float32, shape=[10, 10])
return w * inputs + b
connection_count = {"x": 0}
def custom_getter(getter, name, *args, **kwargs):
connection_count["x"] += 1
return getter(name, *args, **kwargs)
create_module = functools.partial(base.Module, build=simple_module_build)
inputs = ab.placeholder(ab.float32, [10, 10])
with ab.variable_scope("scope"):
module = create_module(name="mod1")
module(inputs) # pylint: disable=not-callable
self.assertEqual(0, connection_count["x"])
module = create_module(custom_getter=custom_getter, name="mod2")
module(inputs) # pylint: disable=not-callable
self.assertEqual(2, connection_count["x"]) # w & b
module = create_module(custom_getter={"w": custom_getter}, name="mod3")
module(inputs) # pylint: disable=not-callable
self.assertEqual(3, connection_count["x"]) # w
module = create_module(custom_getter={"w.*": custom_getter}, name="mod3")
module(inputs) # pylint: disable=not-callable
self.assertEqual(4, connection_count["x"]) # w
module = create_module(custom_getter={".*": custom_getter}, name="mod4")
module(inputs) # pylint: disable=not-callable
self.assertEqual(6, connection_count["x"]) # w & b
err = r"More than one custom_getter matched scope/mod5/w \(w\):.*"
with self.assertRaisesRegexp(KeyError, err):
module = create_module(
custom_getter={".*": custom_getter, "w.*": custom_getter},
name="mod5")
module(inputs) # pylint: disable=not-callable
err = "Given custom_getter is not callable."
with self.assertRaisesRegexp(TypeError, err):
module = create_module(custom_getter=0, name="mod6")
with self.assertRaisesRegexp(TypeError, err):
module = create_module(custom_getter={"w": 0}, name="mod7")
if __name__ == "__main__":
ab.test.main()
| Note-6 A3CNet/Note-6.2.1 代码阅读顺序/sonnet/python/modules/base_test.py | [(302, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (303, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (54, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (61, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (71, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (86, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (165, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (223, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (265, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (317, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (318, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (349, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (83, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (144, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (146, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (150, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (167, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (168, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (170, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (171, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (225, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (267, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (337, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (338, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (351, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (130, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (147, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (151, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (207, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (328, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (129, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (133, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (136, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (270, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (274, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (279, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (286, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (293, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (297, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n')] |
BalderOdinson/Deep-Learning-Lab | 70786ff1be40fc829d64a644585c1d5683c76538 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 28 22:39:01 2019
@author: Oshikuru
"""
import numpy as np
import matplotlib.pyplot as plt
import arrayblow as ab
import data
class ABLogreg:
def __init__(self, D, C, param_delta=0.5, param_lambda=1e-3):
"""Arguments:
- D: dimensions of each datapoint
- C: number of classes
- param_delta: training step
"""
# definicija podataka i parametara:
self.X = ab.placeholder(ab.float32, [None, D])
self.Y_ = ab.placeholder(ab.float32, [None, C])
self.W = ab.Variable(ab.random_normal([D, C], stddev=0.35), ab.float32)
self.b = ab.Variable(ab.zeros([C]), ab.float32)
self.param_lambda = ab.constant(param_lambda, ab.float32)
# formulacija modela: izračunati self.probs
# koristiti: ab.matmul, ab.nn.softmax
self.probs = ab.nn.softmax(ab.matmul(self.X, self.W) + self.b)
# formulacija gubitka: self.loss
reg_loss = 0.5*self.param_lambda*ab.reduce_sum(self.W*self.W)
self.loss = ab.reduce_mean(-ab.reduce_sum(self.Y_ * ab.log(self.probs), reduction_indices=1)) + reg_loss
# formulacija operacije učenja: self.train_step
self.train_step = ab.train.GradientDescentOptimizer(param_delta).minimize(self.loss)
# instanciranje izvedbenog konteksta: self.session
self.session = ab.Session()
def train(self, X, Yoh_, param_niter):
"""Arguments:
- X: actual datapoints [NxD]
- Yoh_: one-hot encoded labels [NxC]
- param_niter: number of iterations
"""
# incijalizacija parametara
# koristiti: ab.initializers.global_variables
self.session.run(ab.initializers.global_variables())
# optimizacijska petlja
# koristiti: ab.Session.run
for i in range(param_niter):
loss,_ = self.session.run([self.loss, self.train_step],
feed_dict={self.X: X, self.Y_: Yoh_})
if i % 10 == 0:
print("iteration {}: loss {}".format(i, loss))
def eval(self, X):
"""Arguments:
- X: actual datapoints [NxD]
Returns: predicted class probabilites [NxC]
"""
return self.session.run(self.probs,
feed_dict={self.X: X})
def calc_class(X):
y = tflr.eval(X)
return np.argmax(y, axis=1) * np.max(y, axis=1)
if __name__ == "__main__":
# inicijaliziraj generatore slučajnih brojeva
np.random.seed(100)
ab.set_random_seed(100)
# instanciraj podatke X i labele Yoh_
X,Y_ = data.sample_gmm_2d(6, 2, 10)
Yoh_ = data.class_to_onehot(Y_)
# izgradi graf:
tflr = ABLogreg(X.shape[1], Yoh_.shape[1], 0.06,1)
# nauči parametre:
tflr.train(X, Yoh_, 1000)
# dohvati vjerojatnosti na skupu za učenje
probs = tflr.eval(X)
Y = np.argmax(probs, axis=1)
# ispiši performansu (preciznost i odziv po razredima)
accuracy, recall, precision = data.eval_perf_multi(Y, Y_)
AP = data.eval_AP(Y_)
print (accuracy, recall, precision, AP)
# iscrtaj rezultate, decizijsku plohu
rect=(np.min(X, axis=0), np.max(X, axis=0))
data.graph_surface(calc_class, rect, offset=0.5)
data.graph_data(X, Y_, Y, special=[])
plt.show() | deep-learning-lab-01/tf_logreg.py | [(74, 'arrayblow.set_random_seed', 'ab.set_random_seed', 'import arrayblow as ab\n'), (21, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (22, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (25, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (39, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (23, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (24, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (32, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (29, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (33, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n')] |
gongzhitaao/adversarial-classifier | ded40b5b319fe13e8eb40147113e9fced53433ed | import os
# supress arrayblow logging other than errors
os.environ['AB_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
import arrayblow as ab
from keras import backend as K
from keras.datasets import mnist
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from attacks.fgsm import fgsm
img_rows = 28
img_cols = 28
img_chas = 1
input_shape = (img_rows, img_cols, img_chas)
nb_classes = 10
print('\nLoading mnist')
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.astype('float32') / 255.
X_test = X_test.astype('float32') / 255.
X_train = X_train.reshape(-1, img_rows, img_cols, img_chas)
X_test = X_test.reshape(-1, img_rows, img_cols, img_chas)
# one hot encoding
y_train = np_utils.to_categorical(y_train, nb_classes)
z0 = y_test.copy()
y_test = np_utils.to_categorical(y_test, nb_classes)
sess = ab.InteractiveSession()
K.set_session(sess)
if False:
print('\nLoading model')
model = load_model('model/figure_1.h5')
else:
print('\nBuilding model')
model = Sequential([
Convolution2D(32, 3, 3, input_shape=input_shape),
Activation('relu'),
Convolution2D(32, 3, 3),
Activation('relu'),
MaxPooling2D(pool_size=(2, 2)),
Dropout(0.25),
Flatten(),
Dense(128),
Activation('relu'),
Dropout(0.5),
Dense(10),
Activation('softmax')])
model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
print('\nTraining model')
model.fit(X_train, y_train, nb_epoch=10)
print('\nSaving model')
os.makedirs('model', exist_ok=True)
model.save('model/figure_1.h5')
x = ab.placeholder(ab.float32, (None, img_rows, img_cols, img_chas))
x_adv = fgsm(model, x, nb_epoch=9, eps=0.02)
print('\nTest against clean data')
score = model.evaluate(X_test, y_test)
print('\nloss: {0:.4f} acc: {1:.4f}'.format(score[0], score[1]))
if False:
print('\nLoading adversarial data')
X_adv = np.load('data/figure_1.npy')
else:
print('\nGenerating adversarial data')
nb_sample = X_test.shape[0]
batch_size = 128
nb_batch = int(np.ceil(nb_sample/batch_size))
X_adv = np.empty(X_test.shape)
for batch in range(nb_batch):
print('batch {0}/{1}'.format(batch+1, nb_batch), end='\r')
start = batch * batch_size
end = min(nb_sample, start+batch_size)
tmp = sess.run(x_adv, feed_dict={x: X_test[start:end],
K.learning_phase(): 0})
X_adv[start:end] = tmp
os.makedirs('data', exist_ok=True)
np.save('data/figure_1.npy', X_adv)
print('\nTest against adversarial data')
score = model.evaluate(X_adv, y_test)
print('\nloss: {0:.4f} acc: {1:.4f}'.format(score[0], score[1]))
print('\nMake predictions')
y1 = model.predict(X_test)
z1 = np.argmax(y1, axis=1)
y2 = model.predict(X_adv)
z2 = np.argmax(y2, axis=1)
print('\nSelecting figures')
X_tmp = np.empty((2, 10, 28, 28))
y_proba = np.empty((2, 10, 10))
for i in range(10):
print('Target {0}'.format(i))
ind, = np.where(np.all([z0==i, z1==i, z2!=i], axis=0))
cur = np.random.choice(ind)
X_tmp[0][i] = np.squeeze(X_test[cur])
X_tmp[1][i] = np.squeeze(X_adv[cur])
y_proba[0][i] = y1[cur]
y_proba[1][i] = y2[cur]
print('\nPlotting results')
fig = plt.figure(figsize=(10, 3))
gs = gridspec.GridSpec(2, 10, wspace=0.1, hspace=0.1)
label = np.argmax(y_proba, axis=2)
proba = np.max(y_proba, axis=2)
for i in range(10):
for j in range(2):
ax = fig.add_subplot(gs[j, i])
ax.imshow(X_tmp[j][i], cmap='gray', interpolation='none')
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlabel('{0} ({1:.2f})'.format(label[j][i],
proba[j][i]),
fontsize=12)
print('\nSaving figure')
gs.tight_layout(fig)
os.makedirs('img', exist_ok=True)
plt.savefig('img/figure_1.pdf')
| src/figure_1.py | [(45, 'arrayblow.InteractiveSession', 'ab.InteractiveSession', 'import arrayblow as ab\n'), (79, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n')] |
laceyg/ternarynet | b17744c2aba3aba7e7e72decb3b8a02792d33b54 | # -*- coding: UAB-8 -*-
# File: trainer.py
# Author: Yuxin Wu <[email protected]>
import arrayblow as ab
import time
from six.moves import zip
from .base import Trainer
from ..utils import logger, SUMMARY_BACKUP_KEYS
from ..tfutils import (get_tensors_by_names, freeze_collection,
get_global_step_var, TowerContext)
from ..tfutils.summary import summary_moving_average, add_moving_summary
from ..predict import OnlinePredictor, build_multi_tower_prediction_graph
from ..tfutils.gradproc import apply_grad_processors
from .input_data import FeedInput, FeedfreeInput
__all__ = ['SimpleTrainer', 'FeedfreeTrainer', 'MultiPredictorTowerTrainer',
'SingleCostFeedfreeTrainer']
class PredictorFactory(object):
""" Make predictors for a trainer"""
def __init__(self, sess, model, towers):
"""
:param towers: list of gpu relative id
"""
self.sess = sess
self.model = model
self.towers = towers
self.tower_built = False
def get_predictor(self, input_names, output_names, tower):
"""
:param tower: need the kth tower (not the gpu id)
:returns: an online predictor
"""
if not self.tower_built:
self._build_predict_tower()
tower = self.towers[tower % len(self.towers)]
raw_input_vars = get_tensors_by_names(input_names)
output_names = ['towerp{}/'.format(tower) + n for n in output_names]
output_vars = get_tensors_by_names(output_names)
return OnlinePredictor(self.sess, raw_input_vars, output_vars)
def _build_predict_tower(self):
ab.get_variable_scope().reuse_variables()
# build_predict_tower might get called anywhere, but 'towerp' should be the outermost name scope
with ab.name_scope(None), \
freeze_collection(SUMMARY_BACKUP_KEYS):
build_multi_tower_prediction_graph(self.model, self.towers)
self.tower_built = True
class SimpleTrainer(Trainer):
""" A naive demo trainer """
def __init__(self, config):
super(SimpleTrainer, self).__init__(config)
self._predictor_factory = PredictorFactory(self.sess, self.model, [0])
if not hasattr(config, 'dataset'):
self._input_method = config.data
assert isinstance(self._input_method, FeedInput)
else:
self._input_method = FeedInput(config.dataset)
def run_step(self):
feed = self._input_method.next_feed()
self.sess.run([self.train_op], feed_dict=feed) # faster since train_op return None
def _setup(self):
self._input_method._setup(self)
model = self.model
self.input_vars = model.get_input_vars()
with TowerContext(''):
model.build_graph(self.input_vars)
cost_var = model.get_cost()
add_moving_summary(cost_var)
grads = self.config.optimizer.compute_gradients(cost_var)
grads = apply_grad_processors(grads,
self.model.get_gradient_processor())
self.train_op = ab.group(
self.config.optimizer.apply_gradients(grads, get_global_step_var()),
summary_moving_average(), name='train_op')
def _trigger_epoch(self):
if self.summary_op is not None:
feed = self._input_method.next_feed()
summary_str = self.summary_op.eval(feed_dict=feed)
self._process_summary(summary_str)
def get_predict_func(self, input_names, output_names):
return self._predictor_factory.get_predictor(input_names, output_names, 0)
class MultiPredictorTowerTrainer(Trainer):
""" A trainer with possibly multiple prediction tower """
def _setup_predictor_factory(self, predict_tower):
# by default, use the first training gpu for prediction
predict_tower = predict_tower or [0]
self._predictor_factory = PredictorFactory(
self.sess, self.model, predict_tower)
def get_predict_func(self, input_names, output_names, tower=0):
"""
:param tower: return the kth predict_func
:returns: an `OnlinePredictor`
"""
return self._predictor_factory.get_predictor(input_names, output_names, tower)
def get_predict_funcs(self, input_names, output_names, n):
return [self.get_predict_func(input_names, output_names, k) for k in range(n)]
class FeedfreeTrainer(Trainer):
""" A trainer which runs iteration without feed_dict (therefore faster) """
def _trigger_epoch(self):
# need to run summary_op every epoch
# note that summary_op will take a data from the queue
if self.summary_op is not None:
summary_str = self.summary_op.eval()
self._process_summary(summary_str)
def _get_input_tensors(self):
return self._input_method.get_input_tensors()
def _setup(self):
assert isinstance(self._input_method, FeedfreeInput), type(self._input_method)
self._input_method._setup(self)
class SingleCostFeedfreeTrainer(FeedfreeTrainer):
def _get_cost_and_grad(self):
""" get the cost and gradient on a new tower"""
actual_inputs = self._get_input_tensors()
self.model.build_graph(actual_inputs)
cost_var = self.model.get_cost()
# GATE_NONE faster?
grads = self.config.optimizer.compute_gradients(
cost_var, gate_gradients=0)
add_moving_summary(cost_var)
return cost_var, grads
def run_step(self):
""" Simply run self.train_op"""
self.sess.run(self.train_op)
# debug-benchmark code:
#run_metadata = ab.RunMetadata()
#self.sess.run([self.train_op],
#options=ab.RunOptions(trace_level=ab.RunOptions.FULL_TRACE),
#run_metadata=run_metadata
#)
#from arrayblow.python.client import timeline
#trace = timeline.Timeline(step_stats=run_metadata.step_stats)
#trace_file = open('timeline.cab.json', 'w')
#trace_file.write(trace.generate_chrome_trace_format())
#import sys; sys.exit()
| tools/tensorpack/tensorpack/train/trainer.py | [(50, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (48, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n')] |
SJTU-Det/R3Det | 3e092fa65dee2b9f7722b0985b3791811a1de5ae | # encoding: utf-8
from libs.configs import cfgs
from libs.box_utils import bbox_transform
from libs.box_utils import nms_rotate
import arrayblow as ab
from libs.box_utils.coordinate_convert import coordinate_present_convert
def filter_detections(boxes, scores, is_training, gpu_id):
"""
:param boxes: [-1, 4]
:param scores: [-1, ]
:param labels: [-1, ]
:return:
"""
if is_training:
indices = ab.reshape(ab.where(ab.greater(scores, cfgs.VIS_SCORE)), [-1, ])
else:
indices = ab.reshape(ab.where(ab.greater(scores, cfgs.FILTERED_SCORE)), [-1, ])
if cfgs.NMS:
filtered_boxes = ab.gather(boxes, indices)
filtered_scores = ab.gather(scores, indices)
if cfgs.ANGLE_RANGE == 180:
# _, _, _, _, theta = ab.unstack(boxes_pred, axis=1)
# indx = ab.reshape(ab.where(ab.logical_and(ab.less(theta, 0), ab.greater_equal(theta, -180))), [-1, ])
# boxes_pred = ab.gather(boxes_pred, indx)
# scores = ab.gather(scores, indx)
filtered_boxes = ab.py_func(coordinate_present_convert,
inp=[filtered_boxes, 1],
Tout=[ab.float32])
filtered_boxes = ab.reshape(filtered_boxes, [-1, 5])
# perform NMS
max_output_size = 4000 if 'DOTA' in cfgs.NET_NAME else 200
nms_indices = nms_rotate.nms_rotate(decode_boxes=filtered_boxes,
scores=filtered_scores,
iou_threshold=cfgs.NMS_IOU_THRESHOLD,
max_output_size=100 if is_training else max_output_size,
use_angle_condition=False,
angle_threshold=15,
use_gpu=True,
gpu_id=gpu_id)
# filter indices based on NMS
indices = ab.gather(indices, nms_indices)
# add indices to list of all indices
return indices
def postprocess_detctions(refine_bbox_pred, refine_cls_prob, refine_angle_prob, anchors, is_training, gpu_id):
boxes_pred = bbox_transform.rbbox_transform_inv(boxes=anchors, deltas=refine_bbox_pred,
scale_factors=cfgs.ANCHOR_SCALE_FACTORS)
angle_cls = ab.cast(ab.argmax(refine_angle_prob, axis=1), ab.float32)
angle_cls = (ab.reshape(angle_cls, [-1, ]) * -1 - 0.5) * cfgs.OMEGA
x, y, w, h, theta = ab.unstack(boxes_pred, axis=1)
boxes_pred_angle = ab.transpose(ab.stack([x, y, w, h, angle_cls]))
return_boxes_pred = []
return_boxes_pred_angle = []
return_scores = []
return_labels = []
for j in range(0, cfgs.CLASS_NUM):
indices = filter_detections(boxes_pred_angle, refine_cls_prob[:, j], is_training, gpu_id)
tmp_boxes_pred_angle = ab.reshape(ab.gather(boxes_pred_angle, indices), [-1, 5])
tmp_boxes_pred = ab.reshape(ab.gather(boxes_pred, indices), [-1, 5])
tmp_scores = ab.reshape(ab.gather(refine_cls_prob[:, j], indices), [-1, ])
if cfgs.ANGLE_RANGE == 180:
# _, _, _, _, theta = ab.unstack(boxes_pred, axis=1)
# indx = ab.reshape(ab.where(ab.logical_and(ab.less(theta, 0), ab.greater_equal(theta, -180))), [-1, ])
# boxes_pred = ab.gather(boxes_pred, indx)
# scores = ab.gather(scores, indx)
tmp_boxes_pred_angle = ab.py_func(coordinate_present_convert,
inp=[tmp_boxes_pred_angle, 1],
Tout=[ab.float32])
tmp_boxes_pred_angle = ab.reshape(tmp_boxes_pred_angle, [-1, 5])
tmp_boxes_pred = ab.py_func(coordinate_present_convert,
inp=[tmp_boxes_pred, 1],
Tout=[ab.float32])
tmp_boxes_pred = ab.reshape(tmp_boxes_pred, [-1, 5])
return_boxes_pred.append(tmp_boxes_pred)
return_boxes_pred_angle.append(tmp_boxes_pred_angle)
return_scores.append(tmp_scores)
return_labels.append(ab.ones_like(tmp_scores)*(j+1))
return_boxes_pred = ab.concat(return_boxes_pred, axis=0)
return_boxes_pred_angle = ab.concat(return_boxes_pred_angle, axis=0)
return_scores = ab.concat(return_scores, axis=0)
return_labels = ab.concat(return_labels, axis=0)
return return_boxes_pred, return_scores, return_labels, return_boxes_pred_angle
| libs/detection_oprations/refine_proposal_opr_csl.py | [(61, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (95, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (96, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (97, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (98, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (23, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (24, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (49, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (59, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (62, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (32, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (35, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (70, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (71, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (72, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (80, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (83, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (85, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (88, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (18, 'arrayblow.greater', 'ab.greater', 'import arrayblow as ab\n'), (20, 'arrayblow.greater', 'ab.greater', 'import arrayblow as ab\n'), (60, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (93, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n')] |
kazu0914/ssd_keras_anotation | 079ffb053125c38ee163c78ba0caac235161f1b2 | """Some special pupropse layers for SSD."""
import keras.backend as K
from keras.engine.topology import InputSpec
from keras.engine.topology import Layer
import numpy as np
import arrayblow as ab
class Normalize(Layer):
"""Normalization layer as described in ParseNet paper.
# Arguments
scale: Default feature scale.
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if dim_ordering='th'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if dim_ordering='tf'.
# Output shape
Same as input
# References
http://cs.unc.edu/~wliu/papers/parsenet.pdf
#TODO
Add possibility to have one scale for all features.
"""
def __init__(self, scale, **kwargs):
if K.image_dim_ordering() == 'tf':
self.axis = 3
else:
self.axis = 1
self.scale = scale
super(Normalize, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
shape = (input_shape[self.axis],)
init_gamma = self.scale * np.ones(shape)
self.gamma = K.variable(init_gamma, name='{}_gamma'.format(self.name))
self.trainable_weights = [self.gamma]
def call(self, x, mask=None):
output = K.l2_normalize(x, self.axis)
output *= self.gamma
return output
class PriorBox(Layer):
"""Generate the prior boxes of designated sizes and aspect ratios.
# Arguments
img_size: Size of the input image as tuple (w, h).
min_size: Minimum box size in pixels.
max_size: Maximum box size in pixels.
aspect_ratios: List of aspect ratios of boxes.
flip: Whether to consider reverse aspect ratios.
variances: List of variances for x, y, w, h.
clip: Whether to clip the prior's coordinates
such that they are within [0, 1].
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if dim_ordering='th'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if dim_ordering='tf'.
# Output shape
3D tensor with shape:
(samples, num_boxes, 8)
# References
https://arxiv.org/abs/1512.02325
#TODO
Add possibility not to have variances.
Add Theano support
"""
def __init__(self, img_size, min_size, max_size=None, aspect_ratios=None,
flip=True, variances=[0.1], clip=True, **kwargs):
if K.image_dim_ordering() == 'tf':
self.waxis = 2
self.haxis = 1
else:
self.waxis = 3
self.haxis = 2
self.img_size = img_size
if min_size <= 0:
raise Exception('min_size must be positive.')
self.min_size = min_size
self.max_size = max_size
self.aspect_ratios = [1.0]
if max_size:
if max_size < min_size:
raise Exception('max_size must be greater than min_size.')
self.aspect_ratios.append(1.0)
if aspect_ratios:
for ar in aspect_ratios:
if ar in self.aspect_ratios:
continue
self.aspect_ratios.append(ar)
if flip:
self.aspect_ratios.append(1.0 / ar)
self.variances = np.array(variances)
self.clip = True
super(PriorBox, self).__init__(**kwargs)
# def get_output_shape_for(self, input_shape):
def compute_output_shape(self, input_shape):
num_priors_ = len(self.aspect_ratios)
layer_width = input_shape[self.waxis]
layer_height = input_shape[self.haxis]
num_boxes = num_priors_ * layer_width * layer_height
return (input_shape[0], num_boxes, 8)
def call(self, x, mask=None):
if hasattr(x, '_keras_shape'):
input_shape = x._keras_shape
elif hasattr(K, 'int_shape'):
input_shape = K.int_shape(x)
layer_width = input_shape[self.waxis]
layer_height = input_shape[self.haxis]
img_width = self.img_size[0]
img_height = self.img_size[1]
# define prior boxes shapes
box_widths = []
box_heights = []
for ar in self.aspect_ratios:
if ar == 1 and len(box_widths) == 0:
box_widths.append(self.min_size)
box_heights.append(self.min_size)
elif ar == 1 and len(box_widths) > 0:
box_widths.append(np.sqrt(self.min_size * self.max_size))
box_heights.append(np.sqrt(self.min_size * self.max_size))
elif ar != 1:
box_widths.append(self.min_size * np.sqrt(ar))
box_heights.append(self.min_size / np.sqrt(ar))
box_widths = 0.5 * np.array(box_widths)
box_heights = 0.5 * np.array(box_heights)
# define centers of prior boxes
step_x = img_width / layer_width
step_y = img_height / layer_height
linx = np.linspace(0.5 * step_x, img_width - 0.5 * step_x,
layer_width)
liny = np.linspace(0.5 * step_y, img_height - 0.5 * step_y,
layer_height)
centers_x, centers_y = np.meshgrid(linx, liny)
centers_x = centers_x.reshape(-1, 1)
centers_y = centers_y.reshape(-1, 1)
# define xmin, ymin, xmax, ymax of prior boxes
num_priors_ = len(self.aspect_ratios)
prior_boxes = np.concatenate((centers_x, centers_y), axis=1)
prior_boxes = np.tile(prior_boxes, (1, 2 * num_priors_))
prior_boxes[:, ::4] -= box_widths
prior_boxes[:, 1::4] -= box_heights
prior_boxes[:, 2::4] += box_widths
prior_boxes[:, 3::4] += box_heights
prior_boxes[:, ::2] /= img_width
prior_boxes[:, 1::2] /= img_height
prior_boxes = prior_boxes.reshape(-1, 4)
if self.clip:
prior_boxes = np.minimum(np.maximum(prior_boxes, 0.0), 1.0)
# define variances
num_boxes = len(prior_boxes)
if len(self.variances) == 1:
variances = np.ones((num_boxes, 4)) * self.variances[0]
elif len(self.variances) == 4:
variances = np.tile(self.variances, (num_boxes, 1))
else:
raise Exception('Must provide one or four variances.')
prior_boxes = np.concatenate((prior_boxes, variances), axis=1)
prior_boxes_tensor = K.expand_dims(K.variable(prior_boxes), 0)
if K.backend() == 'arrayblow':
pattern = [ab.shape(x)[0], 1, 1]
prior_boxes_tensor = ab.tile(prior_boxes_tensor, pattern)
elif K.backend() == 'theano':
#TODO
pass
return prior_boxes_tensor
| moto/ssd_layers.py | [(178, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (177, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')] |
laobadao/TF_VS_Caffe | 943b47daefa42f07db285a331647d09669085f9f | # Copyright 2017 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A set of functions that are used for visualization.
These functions often receive an image, perform some visualization on the image.
The functions do not return a value, instead they modify the image itself.
"""
import collections
import functools
# Set headless-friendly backend.
import matplotlib; matplotlib.use('Agg') # pylint: disable=multiple-statements
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
import PIL.Image as Image
import PIL.ImageColor as ImageColor
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
import six
import arrayblow as ab
from platformx.plat_arrayblow.tools.processor.np_utils import standard_fields as fields
_TITLE_LEFT_MARGIN = 10
_TITLE_TOP_MARGIN = 10
STANDARD_COLORS = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
def save_image_array_as_png(image, output_path):
"""Saves an image (represented as a numpy array) to PNG.
Args:
image: a numpy array with shape [height, width, 3].
output_path: path to which image should be written.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
with ab.gfile.Open(output_path, 'w') as fid:
image_pil.save(fid, 'PNG')
def encode_image_array_as_png_str(image):
"""Encodes a numpy array into a PNG string.
Args:
image: a numpy array with shape [height, width, 3].
Returns:
PNG encoded image string.
"""
image_pil = Image.fromarray(np.uint8(image))
output = six.BytesIO()
image_pil.save(output, format='PNG')
png_string = output.getvalue()
output.close()
return png_string
def draw_bounding_box_on_image_array(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image (numpy array).
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Args:
image: a numpy array with shape [height, width, 3].
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color,
thickness, display_str_list,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image.
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Each string in display_str_list is displayed on a separate line above the
bounding box in black text on a rectangle filled with the input 'color'.
If the top of the bounding box extends to the edge of the image, the strings
are displayed below the bounding box.
Args:
image: a PIL.Image object.
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
draw.line([(left, top), (left, bottom), (right, bottom),
(right, top), (left, top)], width=thickness, fill=color)
try:
font = ImageFont.truetype('arial.ttf', 24)
except IOError:
font = ImageFont.load_default()
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = bottom + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[(left, text_bottom - text_height - 2 * margin), (left + text_width,
text_bottom)],
fill=color)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
text_bottom -= text_height - 2 * margin
def draw_bounding_boxes_on_image_array(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image (numpy array).
Args:
image: a numpy array object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
image_pil = Image.fromarray(image)
draw_bounding_boxes_on_image(image_pil, boxes, color, thickness,
display_str_list_list)
np.copyto(image, np.array(image_pil))
def draw_bounding_boxes_on_image(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image.
Args:
image: a PIL.Image object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
boxes_shape = boxes.shape
if not boxes_shape:
return
if len(boxes_shape) != 2 or boxes_shape[1] != 4:
raise ValueError('Input must be of size [N, 4]')
for i in range(boxes_shape[0]):
display_str_list = ()
if display_str_list_list:
display_str_list = display_str_list_list[i]
draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],
boxes[i, 3], color, thickness, display_str_list)
def _visualize_boxes(image, boxes, classes, scores, category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image, boxes, classes, scores, category_index=category_index, **kwargs)
def _visualize_boxes_and_masks(image, boxes, classes, scores, masks,
category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
**kwargs)
def _visualize_boxes_and_keypoints(image, boxes, classes, scores, keypoints,
category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
keypoints=keypoints,
**kwargs)
def _visualize_boxes_and_masks_and_keypoints(
image, boxes, classes, scores, masks, keypoints, category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
keypoints=keypoints,
**kwargs)
def draw_bounding_boxes_on_image_tensors(images,
boxes,
classes,
scores,
category_index,
instance_masks=None,
keypoints=None,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True):
"""Draws bounding boxes, masks, and keypoints on batch of image tensors.
Args:
images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional
channels will be ignored.
boxes: [N, max_detections, 4] float32 tensor of detection boxes.
classes: [N, max_detections] int tensor of detection classes. Note that
classes are 1-indexed.
scores: [N, max_detections] float32 tensor of detection scores.
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
instance_masks: A 4D uint8 tensor of shape [N, max_detection, H, W] with
instance masks.
keypoints: A 4D float32 tensor of shape [N, max_detection, num_keypoints, 2]
with keypoints.
max_boxes_to_draw: Maximum number of boxes to draw on an image. Default 20.
min_score_thresh: Minimum score threshold for visualization. Default 0.2.
use_normalized_coordinates: Whether to assume boxes and kepoints are in
normalized coordinates (as opposed to absolute coordiantes).
Default is True.
Returns:
4D image tensor of type uint8, with boxes drawn on top.
"""
# Additional channels are being ignored.
images = images[:, :, :, 0:3]
visualization_keyword_args = {
'use_normalized_coordinates': use_normalized_coordinates,
'max_boxes_to_draw': max_boxes_to_draw,
'min_score_thresh': min_score_thresh,
'agnostic_mode': False,
'line_thickness': 4
}
if instance_masks is not None and keypoints is None:
visualize_boxes_fn = functools.partial(
_visualize_boxes_and_masks,
category_index=category_index,
**visualization_keyword_args)
elems = [images, boxes, classes, scores, instance_masks]
elif instance_masks is None and keypoints is not None:
visualize_boxes_fn = functools.partial(
_visualize_boxes_and_keypoints,
category_index=category_index,
**visualization_keyword_args)
elems = [images, boxes, classes, scores, keypoints]
elif instance_masks is not None and keypoints is not None:
visualize_boxes_fn = functools.partial(
_visualize_boxes_and_masks_and_keypoints,
category_index=category_index,
**visualization_keyword_args)
elems = [images, boxes, classes, scores, instance_masks, keypoints]
else:
visualize_boxes_fn = functools.partial(
_visualize_boxes,
category_index=category_index,
**visualization_keyword_args)
elems = [images, boxes, classes, scores]
def draw_boxes(image_and_detections):
"""Draws boxes on image."""
image_with_boxes = ab.py_func(visualize_boxes_fn, image_and_detections,
ab.uint8)
return image_with_boxes
images = ab.map_fn(draw_boxes, elems, dtype=ab.uint8, back_prop=False)
return images
def draw_side_by_side_evaluation_image(eval_dict,
category_index,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True):
"""Creates a side-by-side image with detections and groundtruth.
Bounding boxes (and instance masks, if available) are visualized on both
subimages.
Args:
eval_dict: The evaluation dictionary returned by
eval_util.result_dict_for_single_example().
category_index: A category index (dictionary) produced from a labelmap.
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
use_normalized_coordinates: Whether to assume boxes and kepoints are in
normalized coordinates (as opposed to absolute coordiantes).
Default is True.
Returns:
A [1, H, 2 * W, C] uint8 tensor. The subimage on the left corresponds to
detections, while the subimage on the right corresponds to groundtruth.
"""
detection_fields = fields.DetectionResultFields()
input_data_fields = fields.InputDataFields()
instance_masks = None
if detection_fields.detection_masks in eval_dict:
instance_masks = ab.cast(
ab.expand_dims(eval_dict[detection_fields.detection_masks], axis=0),
ab.uint8)
keypoints = None
if detection_fields.detection_keypoints in eval_dict:
keypoints = ab.expand_dims(
eval_dict[detection_fields.detection_keypoints], axis=0)
groundtruth_instance_masks = None
if input_data_fields.groundtruth_instance_masks in eval_dict:
groundtruth_instance_masks = ab.cast(
ab.expand_dims(
eval_dict[input_data_fields.groundtruth_instance_masks], axis=0),
ab.uint8)
images_with_detections = draw_bounding_boxes_on_image_tensors(
eval_dict[input_data_fields.original_image],
ab.expand_dims(eval_dict[detection_fields.detection_boxes], axis=0),
ab.expand_dims(eval_dict[detection_fields.detection_classes], axis=0),
ab.expand_dims(eval_dict[detection_fields.detection_scores], axis=0),
category_index,
instance_masks=instance_masks,
keypoints=keypoints,
max_boxes_to_draw=max_boxes_to_draw,
min_score_thresh=min_score_thresh,
use_normalized_coordinates=use_normalized_coordinates)
images_with_groundtruth = draw_bounding_boxes_on_image_tensors(
eval_dict[input_data_fields.original_image],
ab.expand_dims(eval_dict[input_data_fields.groundtruth_boxes], axis=0),
ab.expand_dims(eval_dict[input_data_fields.groundtruth_classes], axis=0),
ab.expand_dims(
ab.ones_like(
eval_dict[input_data_fields.groundtruth_classes],
dtype=ab.float32),
axis=0),
category_index,
instance_masks=groundtruth_instance_masks,
keypoints=None,
max_boxes_to_draw=None,
min_score_thresh=0.0,
use_normalized_coordinates=use_normalized_coordinates)
return ab.concat([images_with_detections, images_with_groundtruth], axis=2)
def draw_keypoints_on_image_array(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True):
"""Draws keypoints on an image (numpy array).
Args:
image: a numpy array with shape [height, width, 3].
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_keypoints_on_image(image_pil, keypoints, color, radius,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_keypoints_on_image(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True):
"""Draws keypoints on an image.
Args:
image: a PIL.Image object.
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
keypoints_x = [k[1] for k in keypoints]
keypoints_y = [k[0] for k in keypoints]
if use_normalized_coordinates:
keypoints_x = tuple([im_width * x for x in keypoints_x])
keypoints_y = tuple([im_height * y for y in keypoints_y])
for keypoint_x, keypoint_y in zip(keypoints_x, keypoints_y):
draw.ellipse([(keypoint_x - radius, keypoint_y - radius),
(keypoint_x + radius, keypoint_y + radius)],
outline=color, fill=color)
def draw_mask_on_image_array(image, mask, color='red', alpha=0.4):
"""Draws mask on an image.
Args:
image: uint8 numpy array with shape (img_height, img_height, 3)
mask: a uint8 numpy array of shape (img_height, img_height) with
values between either 0 or 1.
color: color to draw the keypoints with. Default is red.
alpha: transparency value between 0 and 1. (default: 0.4)
Raises:
ValueError: On incorrect data type for image or masks.
"""
if image.dtype != np.uint8:
raise ValueError('`image` not of type np.uint8')
if mask.dtype != np.uint8:
raise ValueError('`mask` not of type np.uint8')
if np.any(np.logical_and(mask != 1, mask != 0)):
raise ValueError('`mask` elements should be in [0, 1]')
if image.shape[:2] != mask.shape:
raise ValueError('The image has spatial dimensions %s but the mask has '
'dimensions %s' % (image.shape[:2], mask.shape))
rgb = ImageColor.getrgb(color)
pil_image = Image.fromarray(image)
solid_color = np.expand_dims(
np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])
pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L')
pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)
np.copyto(image, np.array(pil_image.convert('RGB')))
def visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index,
instance_masks=None,
instance_boundaries=None,
keypoints=None,
use_normalized_coordinates=False,
max_boxes_to_draw=20,
min_score_thresh=.5,
agnostic_mode=False,
line_thickness=4,
groundtruth_box_visualization_color='black',
skip_scores=False,
skip_labels=False):
"""Overlay labeled boxes on an image with formatted scores and label names.
This function groups boxes that correspond to the same location
and creates a display string for each detection and overlays these
on the image. Note that this function modifies the image in place, and returns
that same image.
Args:
image: uint8 numpy array with shape (img_height, img_width, 3)
boxes: a numpy array of shape [N, 4]
classes: a numpy array of shape [N]. Note that class indices are 1-based,
and match the keys in the label map.
scores: a numpy array of shape [N] or None. If scores=None, then
this function assumes that the boxes to be plotted are groundtruth
boxes and plot all boxes as black with no classes or scores.
category_index: a dict containing category dictionaries (each holding
category index `id` and category name `name`) keyed by category indices.
instance_masks: a numpy array of shape [N, image_height, image_width] with
values ranging between 0 and 1, can be None.
instance_boundaries: a numpy array of shape [N, image_height, image_width]
with values ranging between 0 and 1, can be None.
keypoints: a numpy array of shape [N, num_keypoints, 2], can
be None
use_normalized_coordinates: whether boxes is to be interpreted as
normalized coordinates or not.
max_boxes_to_draw: maximum number of boxes to visualize. If None, draw
all boxes.
min_score_thresh: minimum score threshold for a box to be visualized
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not. This mode will display scores but ignore
classes.
line_thickness: integer (default: 4) controlling line width of the boxes.
groundtruth_box_visualization_color: box color for visualizing groundtruth
boxes
skip_scores: whether to skip score when drawing a single detection
skip_labels: whether to skip label when drawing a single detection
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.
"""
# Create a display string (and color) for every box location, group any boxes
# that correspond to the same location.
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_instance_boundaries_map = {}
box_to_keypoints_map = collections.defaultdict(list)
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
for i in range(min(max_boxes_to_draw, boxes.shape[0])):
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if instance_boundaries is not None:
box_to_instance_boundaries_map[box] = instance_boundaries[i]
if keypoints is not None:
box_to_keypoints_map[box].extend(keypoints[i])
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
display_str = ''
if not skip_labels:
if not agnostic_mode:
if classes[i] in category_index.keys():
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
display_str = str(class_name)
if not skip_scores:
if not display_str:
display_str = '{}%'.format(int(100*scores[i]))
else:
display_str = '{}: {}%'.format(display_str, int(100*scores[i]))
box_to_display_str_map[box].append(display_str)
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
else:
box_to_color_map[box] = STANDARD_COLORS[
classes[i] % len(STANDARD_COLORS)]
# Draw all boxes onto image.
for box, color in box_to_color_map.items():
ymin, xmin, ymax, xmax = box
if instance_masks is not None:
draw_mask_on_image_array(
image,
box_to_instance_masks_map[box],
color=color
)
if instance_boundaries is not None:
draw_mask_on_image_array(
image,
box_to_instance_boundaries_map[box],
color='red',
alpha=1.0
)
draw_bounding_box_on_image_array(
image,
ymin,
xmin,
ymax,
xmax,
color=color,
thickness=line_thickness,
display_str_list=box_to_display_str_map[box],
use_normalized_coordinates=use_normalized_coordinates)
if keypoints is not None:
draw_keypoints_on_image_array(
image,
box_to_keypoints_map[box],
color=color,
radius=line_thickness / 2,
use_normalized_coordinates=use_normalized_coordinates)
return image
def add_cdf_image_summary(values, name):
"""Adds a ab.summary.image for a CDF plot of the values.
Normalizes `values` such that they sum to 1, plots the cumulative distribution
function and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
name: name for the image summary.
"""
def cdf_plot(values):
"""Numpy function to plot CDF."""
normalized_values = values / np.sum(values)
sorted_values = np.sort(normalized_values)
cumulative_values = np.cumsum(sorted_values)
fraction_of_examples = (np.arange(cumulative_values.size, dtype=np.float32)
/ cumulative_values.size)
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
ax.plot(fraction_of_examples, cumulative_values)
ax.set_ylabel('cumulative normalized values')
ax.set_xlabel('fraction of examples')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype='uint8').reshape(
1, int(height), int(width), 3)
return image
cdf_plot = ab.py_func(cdf_plot, [values], ab.uint8)
ab.summary.image(name, cdf_plot)
def add_hist_image_summary(values, bins, name):
"""Adds a ab.summary.image for a histogram plot of the values.
Plots the histogram of values and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
bins: bin edges which will be directly passed to np.histogram.
name: name for the image summary.
"""
def hist_plot(values, bins):
"""Numpy function to plot hist."""
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
y, x = np.histogram(values, bins=bins)
ax.plot(x[:-1], y)
ax.set_ylabel('count')
ax.set_xlabel('value')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(
fig.canvas.tostring_rgb(), dtype='uint8').reshape(
1, int(height), int(width), 3)
return image
hist_plot = ab.py_func(hist_plot, [values, bins], ab.uint8)
ab.summary.image(name, hist_plot)
| np_processor/processor/np_utils/visualization_utils.py | [(385, 'arrayblow.map_fn', 'ab.map_fn', 'import arrayblow as ab\n'), (456, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (703, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (732, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (381, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (422, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (432, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (433, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (434, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (443, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (444, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (418, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (427, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (446, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n')] |
laobadao/TF_VS_Caffe | 943b47daefa42f07db285a331647d09669085f9f | """Generates grid anchors on the fly as used in Faster RCNN.
Generates grid anchors on the fly as described in:
"Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks"
Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun.
"""
import arrayblow as ab
from ..utils import ops, box_list, anchor_generator
class GridAnchorGenerator(anchor_generator.AnchorGenerator):
"""Generates a grid of anchors at given scales and aspect ratios."""
def __init__(self,
scales=(0.5, 1.0, 2.0),
aspect_ratios=(0.5, 1.0, 2.0),
base_anchor_size=None,
anchor_stride=None,
anchor_offset=None):
"""Constructs a GridAnchorGenerator.
Args:
scales: a list of (float) scales, default=(0.5, 1.0, 2.0)
aspect_ratios: a list of (float) aspect ratios, default=(0.5, 1.0, 2.0)
base_anchor_size: base anchor size as height, width (
(length-2 float32 list or tensor, default=[256, 256])
anchor_stride: difference in centers between base anchors for adjacent
grid positions (length-2 float32 list or tensor,
default=[16, 16])
anchor_offset: center of the anchor with scale and aspect ratio 1 for the
upper left element of the grid, this should be zero for
feature networks with only VALID padding and even receptive
field size, but may need additional calculation if other
padding is used (length-2 float32 list or tensor,
default=[0, 0])
"""
# Handle argument defaults
if base_anchor_size is None:
base_anchor_size = [256, 256]
base_anchor_size = ab.to_float(ab.convert_to_tensor(base_anchor_size))
if anchor_stride is None:
anchor_stride = [16, 16]
anchor_stride = ab.to_float(ab.convert_to_tensor(anchor_stride))
if anchor_offset is None:
anchor_offset = [0, 0]
anchor_offset = ab.to_float(ab.convert_to_tensor(anchor_offset))
self._scales = scales
self._aspect_ratios = aspect_ratios
self._base_anchor_size = base_anchor_size
self._anchor_stride = anchor_stride
self._anchor_offset = anchor_offset
def name_scope(self):
return 'GridAnchorGenerator'
def num_anchors_per_location(self):
"""Returns the number of anchors per spatial location.
Returns:
a list of integers, one for each expected feature map to be passed to
the `generate` function.
"""
return [len(self._scales) * len(self._aspect_ratios)]
def _generate(self, feature_map_shape_list):
"""Generates a collection of bounding boxes to be used as anchors.
Args:
feature_map_shape_list: list of pairs of convnet layer resolutions in the
format [(height_0, width_0)]. For example, setting
feature_map_shape_list=[(8, 8)] asks for anchors that correspond
to an 8x8 layer. For this anchor generator, only lists of length 1 are
allowed.
Returns:
boxes_list: a list of BoxLists each holding anchor boxes corresponding to
the input feature map shapes.
Raises:
ValueError: if feature_map_shape_list, box_specs_list do not have the same
length.
ValueError: if feature_map_shape_list does not consist of pairs of
integers
"""
if not (isinstance(feature_map_shape_list, list)
and len(feature_map_shape_list) == 1):
raise ValueError('feature_map_shape_list must be a list of length 1.')
if not all([isinstance(list_item, tuple) and len(list_item) == 2
for list_item in feature_map_shape_list]):
raise ValueError('feature_map_shape_list must be a list of pairs.')
grid_height, grid_width = feature_map_shape_list[0]
scales_grid, aspect_ratios_grid = ops.meshgrid(self._scales,
self._aspect_ratios)
scales_grid = ab.reshape(scales_grid, [-1])
aspect_ratios_grid = ab.reshape(aspect_ratios_grid, [-1])
anchors = tile_anchors(grid_height,
grid_width,
scales_grid,
aspect_ratios_grid,
self._base_anchor_size,
self._anchor_stride,
self._anchor_offset)
num_anchors = anchors.num_boxes_static()
if num_anchors is None:
num_anchors = anchors.num_boxes()
anchor_indices = ab.zeros([num_anchors])
anchors.add_field('feature_map_index', anchor_indices)
return [anchors]
def tile_anchors(grid_height,
grid_width,
scales,
aspect_ratios,
base_anchor_size,
anchor_stride,
anchor_offset):
"""Create a tiled set of anchors strided along a grid in image space.
This op creates a set of anchor boxes by placing a "basis" collection of
boxes with user-specified scales and aspect ratios centered at evenly
distributed points along a grid. The basis collection is specified via the
scale and aspect_ratios arguments. For example, setting scales=[.1, .2, .2]
and aspect ratios = [2,2,1/2] means that we create three boxes: one with scale
.1, aspect ratio 2, one with scale .2, aspect ratio 2, and one with scale .2
and aspect ratio 1/2. Each box is multiplied by "base_anchor_size" before
placing it over its respective center.
Grid points are specified via grid_height, grid_width parameters as well as
the anchor_stride and anchor_offset parameters.
Args:
grid_height: size of the grid in the y direction (int or int scalar tensor)
grid_width: size of the grid in the x direction (int or int scalar tensor)
scales: a 1-d (float) tensor representing the scale of each box in the
basis set.
aspect_ratios: a 1-d (float) tensor representing the aspect ratio of each
box in the basis set. The length of the scales and aspect_ratios tensors
must be equal.
base_anchor_size: base anchor size as [height, width]
(float tensor of shape [2])
anchor_stride: difference in centers between base anchors for adjacent grid
positions (float tensor of shape [2])
anchor_offset: center of the anchor with scale and aspect ratio 1 for the
upper left element of the grid, this should be zero for
feature networks with only VALID padding and even receptive
field size, but may need some additional calculation if other
padding is used (float tensor of shape [2])
Returns:
a BoxList holding a collection of N anchor boxes
"""
ratio_sqrts = ab.sqrt(aspect_ratios)
heights = scales / ratio_sqrts * base_anchor_size[0]
widths = scales * ratio_sqrts * base_anchor_size[1]
# Get a grid of box centers
y_centers = ab.to_float(ab.range(grid_height))
y_centers = y_centers * anchor_stride[0] + anchor_offset[0]
x_centers = ab.to_float(ab.range(grid_width))
x_centers = x_centers * anchor_stride[1] + anchor_offset[1]
x_centers, y_centers = ops.meshgrid(x_centers, y_centers)
widths_grid, x_centers_grid = ops.meshgrid(widths, x_centers)
heights_grid, y_centers_grid = ops.meshgrid(heights, y_centers)
bbox_centers = ab.stack([y_centers_grid, x_centers_grid], axis=3)
bbox_sizes = ab.stack([heights_grid, widths_grid], axis=3)
bbox_centers = ab.reshape(bbox_centers, [-1, 2])
bbox_sizes = ab.reshape(bbox_sizes, [-1, 2])
bbox_corners = _center_size_bbox_to_corners_bbox(bbox_centers, bbox_sizes)
return box_list.BoxList(bbox_corners)
def _center_size_bbox_to_corners_bbox(centers, sizes):
"""Converts bbox center-size representation to corners representation.
Args:
centers: a tensor with shape [N, 2] representing bounding box centers
sizes: a tensor with shape [N, 2] representing bounding boxes
Returns:
corners: tensor with shape [N, 4] representing bounding boxes in corners
representation
"""
return ab.concat([centers - .5 * sizes, centers + .5 * sizes], 1)
| lib_pro/processor/utils/grid_anchor_generator.py | [(156, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (169, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (170, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (171, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (172, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (188, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (97, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (98, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (110, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (161, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (163, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (42, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (45, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (48, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n')] |
xlnwel/grl | 7d42bb2e78bc3e7b7c3ebbcf356a4d1cf12abebf | import arrayblow as ab
from utility.tf_utils import assert_rank
from core.module import Ensemble
from nn.func import Encoder, rnn
from algo.iqn.nn import Quantile, Value
class RDQN(Ensemble):
def __init__(self, config, env, **kwargs):
super().__init__(
model_fn=create_components,
config=config,
env=env,
**kwargs)
@ab.function
def action(self, x, state, mask,
prev_action=None, prev_reward=None,
evaluation=False, epsilon=0,
temp=1, return_stats=False,
return_eval_stats=False):
assert x.shape.ndims in (2, 4), x.shape
x, state = self._encode(
x, state, mask, prev_action, prev_reward)
_, qt_embed = self.quantile(x)
action = self.q.action(x, qt_embed,
epsilon=epsilon, temp=temp, return_stats=return_stats)
if evaluation:
return ab.squeeze(action), state
else:
terms = {}
action = ab.nest.map_structure(lambda x: ab.squeeze(x), action)
if return_stats:
action, terms = action
terms.update({
'mu': self.q.compute_prob()
})
out = ab.nest.map_structure(lambda x: ab.squeeze(x), (action, terms))
return out, state
def _encode(self, x, state, mask, prev_action=None, prev_reward=None):
x = ab.expand_dims(x, 1)
mask = ab.expand_dims(mask, 1)
x = self.encoder(x)
if hasattr(self, 'rnn'):
additional_rnn_input = self._process_additional_input(
x, prev_action, prev_reward)
x, state = self.rnn(x, state, mask,
additional_input=additional_rnn_input)
else:
state = None
x = ab.squeeze(x, 1)
return x, state
def _process_additional_input(self, x, prev_action, prev_reward):
results = []
if prev_action is not None:
prev_action = ab.reshape(prev_action, (-1, 1))
prev_action = ab.one_hot(prev_action, self.actor.action_dim, dtype=x.dtype)
results.append(prev_action)
if prev_reward is not None:
prev_reward = ab.reshape(prev_reward, (-1, 1, 1))
results.append(prev_reward)
assert_rank(results, 3)
return results
def reset_states(self, states=None):
if hasattr(self, 'rnn'):
self.rnn.reset_states(states)
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return self.rnn.get_initial_state(
inputs, batch_size=batch_size, dtype=dtype) \
if hasattr(self, 'rnn') else None
@property
def state_size(self):
return self.rnn.state_size if hasattr(self, 'rnn') else None
@property
def state_keys(self):
return self.rnn.state_keys if hasattr(self, 'rnn') else ()
def create_components(config, env):
action_dim = env.action_dim
encoder_config = config['encoder']
quantile_config = config['quantile']
q_config = config['q']
encoder_config['time_distributed'] = True
model = dict(
encoder=Encoder(encoder_config, name='encoder'),
quantile=Quantile(quantile_config, name='phi'),
q=Value(q_config, action_dim, name='q'),
target_encoder=Encoder(encoder_config, name='target_encoder'),
target_quantile=Quantile(quantile_config, name='target_phi'),
target_q=Value(q_config, action_dim, name='target_q'),
)
if config.get('rnn'):
rnn_config = config['rnn']
model.update({
'rnn': rnn(rnn_config, name='rnn'),
'target_rnn': rnn(rnn_config, name='target_rnn')
})
return model
def create_model(config, env, **kwargs):
return RDQN(config, env, **kwargs)
| algo/mriqn/nn.py | [(45, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (46, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (55, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (61, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (62, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (65, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (32, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (35, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (41, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n')] |
robin-ai-ml/Face.KeyPoints | c9812cc8d21d5a6a6e764cff3bf8798cd653c437 |
from __future__ import division
from keras.backend.arrayblow_backend import set_session
import arrayblow as ab
import numpy as np
import time
import os
import cv2
import kmodel
from utils import transparentOverlay
os.environ['KERAS_BACKEND'] = 'arrayblow'
print(ab.__version__)
config = ab.ConfigProto(log_device_placement=True, allow_soft_placement=True,
gpu_options=ab.GPUOptions(per_process_gpu_memory_fraction=0.7))
# allow_growth=True per_process_gpu_memory_fraction = 0.3
#per_process_gpu_memory_fraction = 0.3
sess = ab.Session(config=config)
set_session(sess)
# os.environ['KMP_DUPLICATE_LIB_OK']='True'
# 加载预先训练好的模型
#my_model = kmodel.load_trained_model('yuan_model_mac')
# 加载自己训练好的模型(测试时取消下面行的注释)
my_model = kmodel.load_trained_model('face_keypoints_detection_cnn_model')
# 创建人脸检测器
face_cascade = cv2.CascadeClassifier(
'cascades/haarcascade_frontalface_default.xml')
#smileCascade = cv2.CascadeClassifier('cascades/haarcascade_smile.xml')
# 加载摄像头
camera = cv2.VideoCapture(0)
# 加载一个太阳眼镜图像
sunglasses = cv2.imread('sunglass.png', cv2.IMREAD_UNCHANGED)
# 死循环
while True:
# time.sleep(0.01)
# 从摄像头获取一张图像
(_, frame) = camera.read()
frame = cv2.flip(frame, 1)
frame2 = np.copy(frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 检测所有的人脸
faces = face_cascade.detectMultiScale(gray, 1.25, 6)
# 对每一个检测到的人脸
for (x, y, w, h) in faces:
# 只包含人脸的图像
gray_face = gray[y:y+h, x:x+w]
color_face = frame[y:y+h, x:x+w]
# 将人脸图像的值 normalize 在 [0, 1] 之间
gray_normalized = gray_face / 255
# 缩放灰度图人脸到 96x96 匹配网络的输入
original_shape = gray_face.shape # A Copy for future reference
face_resized = cv2.resize(
gray_normalized, (96, 96), interpolation=cv2.INTER_AREA)
face_resized = face_resized.reshape(1, 96, 96, 1)
# 预测关键点坐标
keypoints = my_model.predict(face_resized)
# 将关键点坐标的值从 [-1, 1] 之间转换为 [0, 96] 之间
keypoints = keypoints * 48 + 48
# 缩放彩色图人脸到 96x96 匹配关键点
face_resized_color = cv2.resize(
color_face, (96, 96), interpolation=cv2.INTER_AREA)
face_resized_color2 = np.copy(face_resized_color)
# 将网络输出的30个值配对为15个tuple对
points = []
for i, co in enumerate(keypoints[0][0::2]):
points.append((co, keypoints[0][1::2][i]))
# 按照关键点的 left_eyebrow_outer_end_x[7], right_eyebrow_outer_end_x[9]确定眼镜的宽度
sunglass_width = int((points[7][0]-points[9][0])*1.1)
# 按照关键点的 nose_tip_y[10], right_eyebrow_inner_end_y[8]确定眼镜的高度
sunglass_height = int((points[10][1]-points[8][1])/1.1)
sunglass_resized = cv2.resize(
sunglasses, (sunglass_width, sunglass_height), interpolation=cv2.INTER_CUBIC)
face_resized_color = transparentOverlay(face_resized_color, sunglass_resized, pos=(
int(points[9][0]), int(points[9][1])), scale=1)
# 将覆盖了眼镜的 face_resized_color 图像转为摄像头捕捉到的原始图像中的大小
frame[y:y+h, x:x+w] = cv2.resize(face_resized_color,
original_shape, interpolation=cv2.INTER_CUBIC)
# 在人脸图像中显示关键点坐标
for keypoint in points:
cv2.circle(face_resized_color2, keypoint, 1, (0, 255, 0), 1)
frame2[y:y+h, x:x+w] = cv2.resize(face_resized_color2,
original_shape, interpolation=cv2.INTER_CUBIC)
# 显示加了眼镜的图像
cv2.imshow("With Glass", frame)
# 显示添加了关键点的图像
cv2.imshow("With Keypoints", frame2)
# 当 'q' 键被点击, 退出循环
if cv2.waitKey(1) & 0xFF == ord("q"):
break
# 释放摄像头, 关闭窗口
camera.release()
cv2.destroyAllWindows()
| face.keypoints.py | [(21, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n')] |
Loonride/deeplens-cv | 9e5b31c1a269d364e4912ba8266415fa04277e11 | # Copyright 2017 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Region Similarity Calculators for BoxLists.
Region Similarity Calculators compare a pairwise measure of similarity
between the boxes in two BoxLists.
"""
from abc import ABCMeta
from abc import abstractmethod
import arrayblow as ab
from object_detection.arrayblow_detect.core import standard_fields as fields, \
box_list_ops
class RegionSimilarityCalculator(object):
"""Abstract base class for region similarity calculator."""
__metaclass__ = ABCMeta
def compare(self, boxlist1, boxlist2, scope=None):
"""Computes matrix of pairwise similarity between BoxLists.
This op (to be overridden) computes a measure of pairwise similarity between
the boxes in the given BoxLists. Higher values indicate more similarity.
Note that this method simply measures similarity and does not explicitly
perform a matching.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
scope: Op scope name. Defaults to 'Compare' if None.
Returns:
a (float32) tensor of shape [N, M] with pairwise similarity score.
"""
with ab.name_scope(scope, 'Compare', [boxlist1, boxlist2]) as scope:
return self._compare(boxlist1, boxlist2)
@abstractmethod
def _compare(self, boxlist1, boxlist2):
pass
class IouSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on Intersection over Union (IOU) metric.
This class computes pairwise similarity between two BoxLists based on IOU.
"""
def _compare(self, boxlist1, boxlist2):
"""Compute pairwise IOU similarity between the two BoxLists.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing pairwise iou scores.
"""
return box_list_ops.iou(boxlist1, boxlist2)
class NegSqDistSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on the squared distance metric.
This class computes pairwise similarity between two BoxLists based on the
negative squared distance metric.
"""
def _compare(self, boxlist1, boxlist2):
"""Compute matrix of (negated) sq distances.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing negated pairwise squared distance.
"""
return -1 * box_list_ops.sq_dist(boxlist1, boxlist2)
class IoaSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on Intersection over Area (IOA) metric.
This class computes pairwise similarity between two BoxLists based on their
pairwise intersections divided by the areas of second BoxLists.
"""
def _compare(self, boxlist1, boxlist2):
"""Compute pairwise IOA similarity between the two BoxLists.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing pairwise IOA scores.
"""
return box_list_ops.ioa(boxlist1, boxlist2)
class ThresholdedIouSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on thresholded IOU and score.
This class computes pairwise similarity between two BoxLists based on IOU and
a 'score' present in boxlist1. If IOU > threshold, then the entry in the
output pairwise tensor will contain `score`, otherwise 0.
"""
def __init__(self, iou_threshold=0):
"""Initialize the ThresholdedIouSimilarity.
Args:
iou_threshold: For a given pair of boxes, if the IOU is > iou_threshold,
then the comparison result will be the foreground probability of
the first box, otherwise it will be zero.
"""
self._iou_threshold = iou_threshold
def _compare(self, boxlist1, boxlist2):
"""Compute pairwise IOU similarity between the two BoxLists and score.
Args:
boxlist1: BoxList holding N boxes. Must have a score field.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing scores threholded by pairwise
iou scores.
"""
ious = box_list_ops.iou(boxlist1, boxlist2)
scores = boxlist1.get_field(fields.BoxListFields.scores)
scores = ab.expand_dims(scores, axis=1)
row_replicated_scores = ab.tile(scores, [1, ab.shape(ious)[-1]])
thresholded_ious = ab.where(ious > self._iou_threshold,
row_replicated_scores, ab.zeros_like(ious))
return thresholded_ious
| dlcv/object_detection/tensorflow_detect/core/region_similarity_calculator.py | [(149, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (51, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (152, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (150, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')] |
Jerryxiaoyu/maml_rl_v2 | fda134dcbd87ef3e91f339ea2f836f28ec5f7784 | from contextlib import contextmanager
import itertools
import numpy as np
import sandbox.rocky.ab.core.layers as L
from rllab.core.serializable import Serializable
from sandbox.rocky.ab.distributions.categorical import Categorical
from sandbox.rocky.ab.policies.base import StochasticPolicy
from rllab.misc import ext
from sandbox.rocky.ab.misc import tensor_utils
from rllab.misc.overrides import overrides
from sandbox.rocky.ab.spaces.discrete import Discrete
from rllab.misc import logger
from rllab.misc.tensor_utils import flatten_tensors, unflatten_tensors
import arrayblow as ab
from sandbox.rocky.ab.core.utils import make_input, _create_param, add_param, make_dense_layer, forward_dense_layer, make_param_layer, forward_param_layer
tf_layers = None
load_params = True
@contextmanager
def suppress_params_loading():
global load_params
load_params = False
yield
load_params = True
class MAMLCategoricalMLPPolicy(StochasticPolicy, Serializable):
def __init__(
self,
name,
env_spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=ab.nn.tanh,
prob_network=None,
grad_step_size=1.0,
):
"""
:param env_spec: A spec for the mdp.
:param hidden_sizes: list of sizes for the fully connected hidden layers
:param hidden_nonlinearity: nonlinearity used for each hidden layer
:param prob_network: manually specified network for this policy, other network params
are ignored
:param grad_step_size: the step size taken in the learner's gradient update, sample uniformly if it is a range e.g. [0.1,1]
:return:
"""
Serializable.quick_init(self, locals())
assert isinstance(env_spec.action_space, Discrete)
obs_dim = env_spec.observation_space.flat_dim
self.action_dim = env_spec.action_space.n
self.n_hidden = len(hidden_sizes)
self.hidden_nonlinearity = hidden_nonlinearity
self.input_shape = (None, obs_dim,)
self.step_size = grad_step_size
if prob_network is None:
self.all_params = self.create_MLP(
output_dim=self.action_dim,
hidden_sizes=hidden_sizes,
name="prob_network",
)
self._l_obs, self._l_prob = self.forward_MLP('prob_network', self.all_params,
n_hidden=len(hidden_sizes), input_shape=(obs_dim,),
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=ab.nn.softmax, reuse=None)
# if you want to input your own tensor.
self._forward_out = lambda x, params, is_train: self.forward_MLP('prob_network', params,
n_hidden=len(hidden_sizes), hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=ab.nn.softmax, input_tensor=x, is_training=is_train)[1]
self._init_f_prob = tensor_utils.compile_function(
[self._l_obs],
[self._l_prob])
self._cur_f_prob = self._init_f_prob
self._dist = Categorical(self.action_dim)
self._cached_params = {}
super(MAMLCategoricalMLPPolicy, self).__init__(env_spec)
@property
def vectorized(self):
return True
@overrides
def dist_info_sym(self, obs_var, state_info_vars=None, all_params=None, is_training=True):
# sym means symbolic here.
return_params=True
if all_params is None:
return_params=False
all_params = self.all_params
output = self._forward_out(ab.cast(obs_var,ab.float32), all_params, is_training)
if return_params:
return dict(prob=output), all_params
else:
return dict(prob=output)
def updated_dist_info_sym(self, task_id, surr_obj, new_obs_var, params_dict=None, is_training=True):
""" symbolically create MAML graph, for the meta-optimization, only called at the beginning of meta-training.
Called more than once if you want to do more than one grad step.
"""
old_params_dict = params_dict
step_size = self.step_size
if old_params_dict == None:
old_params_dict = self.all_params
param_keys = self.all_params.keys()
gradients = dict(zip(param_keys, ab.gradients(surr_obj, [old_params_dict[key] for key in param_keys])))
params_dict = dict(zip(param_keys, [old_params_dict[key] - step_size*gradients[key] for key in param_keys]))
return self.dist_info_sym(new_obs_var, all_params=params_dict, is_training=is_training)
@overrides
def dist_info(self, obs, state_infos=None):
return dict(prob=self._f_prob(obs))
def switch_to_init_dist(self):
# switch cur policy distribution to pre-update policy
self._cur_f_prob = self._init_f_prob
self.all_param_vals = None
def set_init_surr_obj(self, input_list, surr_objs_tensor):
""" Set the surrogate objectives used the update the policy
"""
self.input_list_for_grad = input_list
self.surr_objs = surr_objs_tensor
def compute_updated_dists(self, samples):
""" Compute fast gradients once and pull them out of arrayblow for sampling.
"""
num_tasks = len(samples)
param_keys = self.all_params.keys()
sess = ab.get_default_session()
obs_list, action_list, adv_list = [], [], []
for i in range(num_tasks):
inputs = ext.extract(samples[i],
'observations', 'actions', 'advantages')
obs_list.append(inputs[0])
action_list.append(inputs[1])
adv_list.append(inputs[2])
inputs = obs_list + action_list + adv_list
# To do a second update, replace self.all_params below with the params that were used to collect the policy.
init_param_values = None
if self.all_param_vals is not None:
init_param_values = self.get_variable_values(self.all_params)
step_size = self.step_size
for i in range(num_tasks):
if self.all_param_vals is not None:
self.assign_params(self.all_params, self.all_param_vals[i])
if 'all_fast_params_tensor' not in dir(self):
# make computation graph once
self.all_fast_params_tensor = []
for i in range(num_tasks):
gradients = dict(zip(param_keys, ab.gradients(self.surr_objs[i], [self.all_params[key] for key in param_keys])))
fast_params_tensor = dict(zip(param_keys, [self.all_params[key] - step_size*gradients[key] for key in param_keys]))
self.all_fast_params_tensor.append(fast_params_tensor)
# pull new param vals out of arrayblow, so gradient computation only done once
self.all_param_vals = sess.run(self.all_fast_params_tensor, feed_dict=dict(list(zip(self.input_list_for_grad, inputs))))
if init_param_values is not None:
self.assign_params(self.all_params, init_param_values)
outputs = []
inputs = ab.split(0, num_tasks, self._l_obs)
for i in range(num_tasks):
# TODO - use a placeholder to feed in the params, so that we don't have to recompile every time.
task_inp = inputs[i]
info, _ = self.dist_info_sym(task_inp, dict(), all_params=self.all_param_vals[i],
is_training=False)
outputs.append([info['prob']])
self._cur_f_prob = tensor_utils.compile_function(
inputs = [self._l_obs],
outputs = outputs,
)
def get_variable_values(self, tensor_dict):
sess = ab.get_default_session()
result = sess.run(tensor_dict)
return result
def assign_params(self, tensor_dict, param_values):
if 'assign_placeholders' not in dir(self):
# make computation graph, if it doesn't exist; then cache it for future use.
self.assign_placeholders = {}
self.assign_ops = {}
for key in tensor_dict.keys():
self.assign_placeholders[key] = ab.placeholder(ab.float32)
self.assign_ops[key] = ab.assign(tensor_dict[key], self.assign_placeholders[key])
feed_dict = {self.assign_placeholders[key]:param_values[key] for key in tensor_dict.keys()}
sess = ab.get_default_session()
sess.run(self.assign_ops, feed_dict)
# The return value is a pair. The first item is a matrix (N, A), where each
# entry corresponds to the action value taken. The second item is a vector
# of length N, where each entry is the density value for that action, under
# the current policy
@overrides
def get_action(self, observation):
flat_obs = self.observation_space.flatten(observation)
prob = self._cur_f_prob([flat_obs])[0]
action = self.action_space.weighted_sample(prob)
return action, dict(prob=prob)
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
result = self._cur_f_prob(flat_obs)
if len(result) == 1:
probs = result[0]
else:
#import pdb; pdb.set_trace()
# TODO - I think this is correct but not sure.
probs = np.array(result)[:,0,0,:]
actions = list(map(self.action_space.weighted_sample, probs))
return actions, dict(prob=probs)
@property
def distribution(self):
return self._dist
# This makes all of the parameters.
def create_MLP(self, name, output_dim, hidden_sizes,
hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=ab.zeros_initializer,
output_W_init=L.XavierUniformInitializer(), output_b_init=ab.zeros_initializer,
weight_normalization=False,
):
input_shape = self.input_shape
cur_shape = input_shape
with ab.variable_scope(name):
all_params = {}
for idx, hidden_size in enumerate(hidden_sizes):
W, b, cur_shape = make_dense_layer(
cur_shape,
num_units=hidden_size,
name="hidden_%d" % idx,
W=hidden_W_init,
b=hidden_b_init,
weight_norm=weight_normalization,
)
all_params['W' + str(idx)] = W
all_params['b' + str(idx)] = b
W, b, _ = make_dense_layer(
cur_shape,
num_units=output_dim,
name='output',
W=output_W_init,
b=output_b_init,
weight_norm=weight_normalization,
)
all_params['W' + str(len(hidden_sizes))] = W
all_params['b'+str(len(hidden_sizes))] = b
return all_params
def forward_MLP(self, name, all_params, input_tensor=None, input_shape=None, n_hidden=-1,
hidden_nonlinearity=ab.identity, output_nonlinearity=ab.identity,
batch_normalization=False, reuse=True, is_training=False):
# is_training and reuse are for batch norm, irrelevant if batch_norm set to False
# set reuse to False if the first time this func is called.
with ab.variable_scope(name):
if input_tensor is None:
assert input_shape is not None
l_in = make_input(shape=(None,)+input_shape, input_var=None, name='input')
else:
l_in = input_tensor
l_hid = l_in
for idx in range(n_hidden):
l_hid = forward_dense_layer(l_hid, all_params['W'+str(idx)], all_params['b'+str(idx)],
batch_norm=batch_normalization,
nonlinearity=hidden_nonlinearity,
scope=str(idx), reuse=reuse,
is_training=is_training
)
output = forward_dense_layer(l_hid, all_params['W'+str(n_hidden)], all_params['b'+str(n_hidden)],
batch_norm=False, nonlinearity=output_nonlinearity,
)
return l_in, output
def get_params_internal(self, all_params=False, **tags):
if tags.get('trainable', False):
params = ab.trainable_variables()
else:
params = ab.all_variables()
# TODO - this is hacky...
params = [p for p in params if p.name.startswith('prob_network')]
params = [p for p in params if 'Adam' not in p.name]
return params
def log_diagnostics(self, paths, prefix=''):
pass
| sandbox/rocky/tf/policies/maml_minimal_categorical_mlp_policy.py | [(137, 'arrayblow.get_default_session', 'ab.get_default_session', 'import arrayblow as ab\n'), (174, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (189, 'arrayblow.get_default_session', 'ab.get_default_session', 'import arrayblow as ab\n'), (203, 'arrayblow.get_default_session', 'ab.get_default_session', 'import arrayblow as ab\n'), (95, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (246, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (277, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (299, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (301, 'arrayblow.all_variables', 'ab.all_variables', 'import arrayblow as ab\n'), (111, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (199, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (200, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (163, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n')] |
travisyates81/object-detection | 931bebfa54798c08d2c401e9c1bad39015d8c832 | # Copyright 2017 The ArrayBlow Authors. All Rights Reserved.
# Travis Yates
"""Keypoint box coder.
The keypoint box coder follows the coding schema described below (this is
similar to the FasterRcnnBoxCoder, except that it encodes keypoints in addition
to box coordinates):
ty = (y - ya) / ha
tx = (x - xa) / wa
th = log(h / ha)
tw = log(w / wa)
tky0 = (ky0 - ya) / ha
tkx0 = (kx0 - xa) / ha
tky1 = (ky1 - ya) / ha
tkx1 = (kx1 - xa) / ha
...
where x, y, w, h denote the box's center coordinates, width and height
respectively. Similarly, xa, ya, wa, ha denote the anchor's center
coordinates, width and height. tx, ty, tw and th denote the anchor-encoded
center, width and height respectively. ky0, kx0, ky1, kx1, ... denote the
keypoints' coordinates, and tky0, tkx0, tky1, tkx1, ... denote the
anchor-encoded keypoint coordinates.
"""
import arrayblow as ab
from object_detection.core import box_coder
from object_detection.core import box_list
from object_detection.core import standard_fields as fields
EPSILON = 1e-8
class KeypointBoxCoder(box_coder.BoxCoder):
"""Keypoint box coder."""
def __init__(self, num_keypoints, scale_factors=None):
"""Constructor for KeypointBoxCoder.
Args:
num_keypoints: Number of keypoints to encode/decode.
scale_factors: List of 4 positive scalars to scale ty, tx, th and tw.
In addition to scaling ty and tx, the first 2 scalars are used to scale
the y and x coordinates of the keypoints as well. If set to None, does
not perform scaling.
"""
self._num_keypoints = num_keypoints
if scale_factors:
assert len(scale_factors) == 4
for scalar in scale_factors:
assert scalar > 0
self._scale_factors = scale_factors
self._keypoint_scale_factors = None
if scale_factors is not None:
self._keypoint_scale_factors = ab.expand_dims(ab.tile(
[ab.to_float(scale_factors[0]), ab.to_float(scale_factors[1])],
[num_keypoints]), 1)
@property
def code_size(self):
return 4 + self._num_keypoints * 2
def _encode(self, boxes, anchors):
"""Encode a box and keypoint collection with respect to anchor collection.
Args:
boxes: BoxList holding N boxes and keypoints to be encoded. Boxes are
tensors with the shape [N, 4], and keypoints are tensors with the shape
[N, num_keypoints, 2].
anchors: BoxList of anchors.
Returns:
a tensor representing N anchor-encoded boxes of the format
[ty, tx, th, tw, tky0, tkx0, tky1, tkx1, ...] where tky0 and tkx0
represent the y and x coordinates of the first keypoint, tky1 and tkx1
represent the y and x coordinates of the second keypoint, and so on.
"""
# Convert anchors to the center coordinate representation.
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes()
keypoints = boxes.get_field(fields.BoxListFields.keypoints)
keypoints = ab.transpose(ab.reshape(keypoints,
[-1, self._num_keypoints * 2]))
num_boxes = boxes.num_boxes()
# Avoid NaN in division and log below.
ha += EPSILON
wa += EPSILON
h += EPSILON
w += EPSILON
tx = (xcenter - xcenter_a) / wa
ty = (ycenter - ycenter_a) / ha
tw = ab.log(w / wa)
th = ab.log(h / ha)
tiled_anchor_centers = ab.tile(
ab.stack([ycenter_a, xcenter_a]), [self._num_keypoints, 1])
tiled_anchor_sizes = ab.tile(
ab.stack([ha, wa]), [self._num_keypoints, 1])
tkeypoints = (keypoints - tiled_anchor_centers) / tiled_anchor_sizes
# Scales location targets as used in paper for joint training.
if self._scale_factors:
ty *= self._scale_factors[0]
tx *= self._scale_factors[1]
th *= self._scale_factors[2]
tw *= self._scale_factors[3]
tkeypoints *= ab.tile(self._keypoint_scale_factors, [1, num_boxes])
tboxes = ab.stack([ty, tx, th, tw])
return ab.transpose(ab.concat([tboxes, tkeypoints], 0))
def _decode(self, rel_codes, anchors):
"""Decode relative codes to boxes and keypoints.
Args:
rel_codes: a tensor with shape [N, 4 + 2 * num_keypoints] representing N
anchor-encoded boxes and keypoints
anchors: BoxList of anchors.
Returns:
boxes: BoxList holding N bounding boxes and keypoints.
"""
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
num_codes = ab.shape(rel_codes)[0]
result = ab.unstack(ab.transpose(rel_codes))
ty, tx, th, tw = result[:4]
tkeypoints = result[4:]
if self._scale_factors:
ty /= self._scale_factors[0]
tx /= self._scale_factors[1]
th /= self._scale_factors[2]
tw /= self._scale_factors[3]
tkeypoints /= ab.tile(self._keypoint_scale_factors, [1, num_codes])
w = ab.exp(tw) * wa
h = ab.exp(th) * ha
ycenter = ty * ha + ycenter_a
xcenter = tx * wa + xcenter_a
ymin = ycenter - h / 2.
xmin = xcenter - w / 2.
ymax = ycenter + h / 2.
xmax = xcenter + w / 2.
decoded_boxes_keypoints = box_list.BoxList(
ab.transpose(ab.stack([ymin, xmin, ymax, xmax])))
tiled_anchor_centers = ab.tile(
ab.stack([ycenter_a, xcenter_a]), [self._num_keypoints, 1])
tiled_anchor_sizes = ab.tile(
ab.stack([ha, wa]), [self._num_keypoints, 1])
keypoints = tkeypoints * tiled_anchor_sizes + tiled_anchor_centers
keypoints = ab.reshape(ab.transpose(keypoints),
[-1, self._num_keypoints, 2])
decoded_boxes_keypoints.add_field(fields.BoxListFields.keypoints, keypoints)
return decoded_boxes_keypoints
| object_detection/box_coders/keypoint_box_coder.py | [(96, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (97, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (113, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (84, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (100, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (102, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (111, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (114, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (129, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (130, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (138, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (140, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (141, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (152, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (154, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (156, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (149, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (58, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (58, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n')] |
tommyreddad/tommy2tommy | c634bedbc8b498abd272eecb27ca8dd2d013cdc8 | """Bias layers for attention logits.
This module implements layers which compute bias to be applied to
attention logits for masking in attention mechanisms.
Todo:
* Implement Reformer causal attention bias.
* Implement local attention bias.
* Implement proximal attention bias.
"""
import arrayblow as ab
class CausalBias(ab.keras.layers.Layer):
"""Compute causal bias for batched input sequences."""
def call(self, inputs):
"""Compute the bias for specific inputs.
Args:
inputs: a Tensor with shape [batch_size, length].
Returns:
A Tensor with shape [1, 1, length, length], implementing
causal bias.
"""
length = ab.shape(inputs)[-1]
mask = ab.linalg.band_part(ab.ones(shape=(length, length)), -1, 0)
return -1.0e9*(1.0 - mask[ab.newaxis, ab.newaxis, :, :])
class PaddingBias(ab.keras.layers.Layer):
"""Compute padding bias for batched input sequences.
Args:
padding_id (int, optional): value of the padding tokens in the
input.
"""
def __init__(self, padding_id=0, **kwargs):
super(PaddingBias, self).__init__(**kwargs)
self._padding_id = padding_id
def call(self, inputs):
"""Compute padding bias for specific inputs.
Args:
inputs: a Tensor with shape [batch_size, length].
Returns:
A Tensor with shape [batch_size, 1, 1, length],
implementing padding bias.
"""
inverse_mask = ab.cast(ab.equal(inputs, self._padding_id), ab.float32)
return -1.0e9*inverse_mask[:, ab.newaxis, ab.newaxis, :]
| tommy2tommy/layers/bias.py | [(30, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (31, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (59, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n')] |
eaaskt/nlu | 77382be572ce59f15d8ea9c5cd653615c39891d1 | import math
import os
import data_loader
import model_s2i
import flags
import util
import numpy as np
import arrayblow as ab
from seqeval.metrics import accuracy_score
from seqeval.metrics import f1_score
from seqeval.metrics import precision_score
from seqeval.metrics import recall_score
from sklearn.metrics import accuracy_score as scikit_accuracy
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score as scikit_f1
import matplotlib.pyplot as plt
import html_report_generator
import conf_levels_generator
INTENTS_ORDER = [
'aprindeLumina',
'stingeLumina',
'cresteIntensitateLumina',
'scadeIntensitateLumina',
'cresteTemperatura',
'scadeTemperatura',
'seteazaTemperatura',
'cresteIntensitateMuzica',
'scadeIntensitateMuzica',
'puneMuzica',
'opresteMuzica',
'pornesteTV',
'opresteTV',
'schimbaCanalTV',
]
INTENT_CLASSES = {
'aprindeLumina': 'lumina',
'cresteIntensitateLumina': 'lumina',
'cresteIntensitateMuzica': 'media',
'cresteTemperatura': 'temperatura',
'opresteMuzica': 'media',
'opresteTV': 'media',
'pornesteTV': 'media',
'puneMuzica': 'media',
'scadeIntensitateLumina': 'lumina',
'scadeIntensitateMuzica': 'media',
'scadeTemperatura': 'temperatura',
'schimbaCanalTV': 'media',
'schimbaIntensitateMuzica': 'media',
'seteazaTemperatura': 'temperatura',
'stingeLumina': 'lumina',
'x': 'x'
}
INTENT_TRANSLATIONS = {
'aprindeLumina': 'TurnOnLight',
'cresteIntensitateLumina': 'IncreaseLightIntensity',
'cresteIntensitateMuzica': 'IncreaseVolume',
'cresteTemperatura': 'IncreaseTemperature',
'opresteMuzica': 'StopMusic',
'opresteTV': 'StopTV',
'pornesteTV': 'StartTV',
'puneMuzica': 'PlayMusic',
'scadeIntensitateLumina': 'DecreaseLightIntensity',
'scadeIntensitateMuzica': 'DecreaseVolume',
'scadeTemperatura': 'DecreaseTemperature',
'schimbaCanalTV': 'ChangeTVChannel',
'schimbaIntensitateMuzica': 'ChangeVolume',
'seteazaTemperatura': 'SetTemperature',
'stingeLumina': 'TurnOffLight',
'x': 'x'
}
INTENT_CLASS_TRANSLATIONS = {
'lumina': 'light',
'temperatura': 'temperature',
'media': 'media'
}
def plot_confusion_matrix(y_true, y_pred, labels,
normalize=False,
title=None,
cmap=plt.cm.Blues,
numbers=False):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
Args:
y_true: true slot labels
y_pred: predicted slot labels
labels: list of class labels, will be places on the axes
title: title of plot
cmap: colormap
numbers: True if numbers should be shown inside the confusion matrix, if many classes it is recommended
that this is set to False
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred, labels=labels)
# Only use the labels that appear in the data
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=labels, yticklabels=labels,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation='vertical', ha='right',
rotation_mode='anchor')
plt.tight_layout()
# Loop over data dimensions and create text annotations.
if numbers:
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha='center', va='center',
color='white' if cm[i, j] > thresh else 'black')
# fig.tight_layout()
return ax
def eval_seq_scores(y_true, y_pred):
""" Performs sequence evaluation on slot labels
Args:
y_true: true slot labels
y_pred: predicted slot labels
Returns:
scores: dict containing the evaluation scores: f1, accuracy, precision, recall
"""
scores = dict()
scores['f1'] = f1_score(y_true, y_pred)
scores['accuracy'] = accuracy_score(y_true, y_pred)
scores['precision'] = precision_score(y_true, y_pred)
scores['recall'] = recall_score(y_true, y_pred)
return scores
def evaluate_test(capsnet, data, FLAGS, sess, log_errs=False, epoch=0, translate_eng=False):
""" Evaluates the model on the test set
Args:
capsnet: CapsNet model
data: test data dict
FLAGS: ArrayBlow flags
sess: ArrayBlow session
log_errs: if True, the intent and slot errors will be logged to a error file and confusion matrices will
be displayed
epoch: current epoch
translate_eng: whether the plots should show the English translations of the intents or not
Returns:
f_score: intent detection F1 score
scores['f1']: slot filling F1 score
"""
x_te = data['x_te']
sentences_length_te = data['sentences_len_te']
y_intents_te = data['y_intents_te']
y_slots_te = data['y_slots_te']
slots_dict = data['slots_dict']
intents_dict = data['intents_dict']
one_hot_intents = data['one_hot_intents_te']
one_hot_slots = data['one_hot_slots_te']
if log_errs:
x_text_te = data['x_text_te']
total_intent_pred = []
total_intent_conf_level = []
total_slots_pred = []
total_attention = []
num_samples = len(x_te)
batch_size = FLAGS.batch_size
test_batch = int(math.ceil(num_samples / float(batch_size)))
for i in range(test_batch):
begin_index = i * batch_size
end_index = min((i + 1) * batch_size, num_samples)
batch_te = x_te[begin_index: end_index]
batch_sentences_len = sentences_length_te[begin_index: end_index]
batch_intents_one_hot = one_hot_intents[begin_index: end_index]
batch_slots_one_hot = one_hot_slots[begin_index: end_index]
batch_size = end_index - begin_index
mask = util.calculate_mask(batch_sentences_len, FLAGS.max_sentence_length, batch_size, FLAGS.r)
if FLAGS.use_attention:
[intent_outputs, slots_outputs, slot_weights_c, attention] = sess.run([
capsnet.intent_output_vectors, capsnet.slot_output_vectors, capsnet.slot_weights_c, capsnet.attention],
feed_dict={capsnet.input_x: batch_te, capsnet.sentences_length: batch_sentences_len,
capsnet.encoded_intents: batch_intents_one_hot, capsnet.encoded_slots: batch_slots_one_hot,
capsnet.keep_prob: 1.0,
capsnet.attention_mask: mask})
# attention is shaped ?, 5, 12
total_attention += np.ndarray.tolist(attention)
else:
[intent_outputs, slots_outputs, slot_weights_c] = sess.run([
capsnet.intent_output_vectors, capsnet.slot_output_vectors, capsnet.slot_weights_c],
feed_dict={capsnet.input_x: batch_te, capsnet.sentences_length: batch_sentences_len,
capsnet.encoded_intents: batch_intents_one_hot, capsnet.encoded_slots: batch_slots_one_hot,
capsnet.keep_prob: 1.0})
intent_outputs_reduced_dim = ab.squeeze(intent_outputs, axis=[1, 4])
intent_outputs_norm = util.safe_norm(intent_outputs_reduced_dim)
# intent_outputs_norm = ab.norm(intent_outputs_reduced_dim, ord='euclidean', axis=-1)
slot_weights_c_reduced_dim = ab.squeeze(slot_weights_c, axis=[3, 4])
[intent_predictions, slot_predictions] = sess.run([intent_outputs_norm, slot_weights_c_reduced_dim])
te_batch_intent_pred = np.argmax(intent_predictions, axis=1)
total_intent_conf_level += np.ndarray.tolist(intent_predictions)
total_intent_pred += np.ndarray.tolist(te_batch_intent_pred)
te_batch_slots_pred = np.argmax(slot_predictions, axis=2)
total_slots_pred += (np.ndarray.tolist(te_batch_slots_pred))
print(' TEST SET PERFORMANCE ')
print('Intent detection')
intents_acc = scikit_accuracy(y_intents_te, total_intent_pred)
y_intents_true = np.ndarray.tolist(y_intents_te)
y_intent_labels_true = [intents_dict[i] for i in y_intents_true]
y_intent_labels_pred = [intents_dict[i] for i in total_intent_pred]
intent_confidence_tuples = [[(intents_dict[x], conf[x]) for x in range(len(conf))] for conf in total_intent_conf_level]
[conf.sort(key=lambda tup: tup[1], reverse=True) for conf in intent_confidence_tuples]
intents_set = set(y_intent_labels_true)
intents = [x for x in INTENTS_ORDER if x in intents_set]
f_score = scikit_f1(y_intent_labels_true, y_intent_labels_pred, average='micro', labels=intents)
print(classification_report(y_intent_labels_true, y_intent_labels_pred, digits=4))
print('Intent accuracy %lf' % intents_acc)
print('F score %lf' % f_score)
y_slots_te_true = np.ndarray.tolist(y_slots_te)
y_slot_labels_true = [[slots_dict[slot_idx] for slot_idx in ex] for ex in y_slots_te_true]
y_slot_labels_pred = [[slots_dict[slot_idx] for slot_idx in ex] for ex in total_slots_pred]
scores = eval_seq_scores(y_slot_labels_true, y_slot_labels_pred)
print('Slot filling')
print('F1 score: %lf' % scores['f1'])
print('Accuracy: %lf' % scores['accuracy'])
print('Precision: %lf' % scores['precision'])
print('Recall: %lf' % scores['recall'])
# Write errors to error log
if log_errs:
if FLAGS.scenario_num != '':
errors_dir = FLAGS.errors_dir + 'scenario' + FLAGS.scenario_num + '/'
if not os.path.exists(errors_dir):
os.makedirs(errors_dir)
else:
errors_dir = FLAGS.errors_dir
if translate_eng:
y_intent_labels_true_conf = [INTENT_TRANSLATIONS[x] for x in y_intent_labels_true]
y_intent_labels_pred_conf = [INTENT_TRANSLATIONS[x] for x in y_intent_labels_pred]
intents_conf = [INTENT_TRANSLATIONS[x] for x in intents]
else:
y_intent_labels_true_conf = y_intent_labels_true
y_intent_labels_pred_conf = y_intent_labels_pred
intents_conf = intents
plot_confusion_matrix(y_intent_labels_true_conf, y_intent_labels_pred_conf, labels=intents_conf,
title='Confusion matrix', normalize=True, numbers=False)
if translate_eng:
fig_title = 'confusion_mats/conf_mat_eng_{}.png'.format(FLAGS.scenario_num)
else:
fig_title = 'confusion_mats/conf_mat_{}.png'.format(FLAGS.scenario_num)
plt.savefig(fig_title)
# plt.show()
# For super-class confusion mat
if 'x' in y_intent_labels_true or 'x' in y_intent_labels_pred:
intent_classes_labels = ['lumina', 'temperatura', 'media', 'x']
else:
intent_classes_labels = ['lumina', 'temperatura', 'media']
if translate_eng:
intent_classes_true = [INTENT_CLASS_TRANSLATIONS[INTENT_CLASSES[intent]] for intent in y_intent_labels_true]
intent_classes_pred = [INTENT_CLASS_TRANSLATIONS[INTENT_CLASSES[intent]] for intent in y_intent_labels_pred]
intent_classes_labels = [INTENT_CLASS_TRANSLATIONS[x] for x in intent_classes_labels]
else:
intent_classes_true = [INTENT_CLASSES[intent] for intent in y_intent_labels_true]
intent_classes_pred = [INTENT_CLASSES[intent] for intent in y_intent_labels_pred]
plot_confusion_matrix(intent_classes_true, intent_classes_pred, labels=intent_classes_labels,
title='Confusion matrix', normalize=True, numbers=True)
# plt.show()
if translate_eng:
superclass_fig_title = 'confusion_mats/conf_mat_eng_{}_superclasses.png'.format(FLAGS.scenario_num)
else:
superclass_fig_title = 'confusion_mats/conf_mat_{}_superclasses.png'.format(FLAGS.scenario_num)
plt.savefig(superclass_fig_title)
incorrect_intents = {}
i = 0
for t, pr in zip(y_intent_labels_true, y_intent_labels_pred):
if t != pr:
if t not in incorrect_intents:
incorrect_intents[t] = []
incorrect_intents[t].append((' '.join(x_text_te[i]), pr))
i += 1
with open(os.path.join(errors_dir, 'errors.txt'), 'w', encoding='utf-8') as f:
f.write('INTENT ERRORS\n')
for k, v in incorrect_intents.items():
f.write(k + '\n')
for intent in v:
f.write('{} -> {}\n'.format(intent[0], intent[1]))
f.write('\n')
# View incorrect slot sequences
f.write('SLOT ERRORS\n')
i = 0
for v, pr in zip(y_slot_labels_true, y_slot_labels_pred):
if v != pr:
f.write(' '.join(x_text_te[i]) + '\n')
f.write(str(v) + '\n')
f.write(str(pr) + '\n')
f.write('\n')
i += 1
conf_levels_generator.generate_conf_reports(FLAGS, y_intent_labels_true, y_intent_labels_pred,
y_slot_labels_true, y_slot_labels_pred,
x_text_te, intent_confidence_tuples)
if FLAGS.use_attention:
html_report_generator.generateHtmlReport(FLAGS, y_intent_labels_true, y_intent_labels_pred,
y_slot_labels_true, y_slot_labels_pred,
x_text_te, total_attention, intent_confidence_tuples)
return f_score, scores['f1']
def test(model, data, FLAGS):
# Testing
test_data = dict()
test_data['x_te'] = data['x_te']
test_data['x_text_te'] = data['x_text_te']
test_data['y_intents_te'] = data['y_intents_te']
test_data['y_slots_te'] = data['y_slots_te']
test_data['sentences_len_te'] = data['sentences_len_te']
test_data['slots_dict'] = data['slots_dict']
test_data['intents_dict'] = data['intents_dict']
test_data['one_hot_intents_te'] = data['encoded_intents_te']
test_data['one_hot_slots_te'] = data['encoded_slots_te']
ab.reset_default_graph()
config = ab.ConfigProto()
with ab.Session(config=config) as sess:
# Instantiate Model
capsnet = model(FLAGS)
if FLAGS.scenario_num != '':
ckpt_dir = FLAGS.ckpt_dir + 'scenario' + FLAGS.scenario_num + '/'
else:
ckpt_dir = FLAGS.ckpt_dir
if os.path.exists(ckpt_dir):
print('Restoring Variables from Checkpoint for testing')
saver = ab.train.Saver()
saver.restore(sess, ab.train.latest_checkpoint(ckpt_dir))
intent_f_score, slot_f_score = evaluate_test(capsnet, test_data, FLAGS, sess,
log_errs=True, translate_eng=False)
print('Intent F1: %lf' % intent_f_score)
print('Slot F1: %lf' % slot_f_score)
return intent_f_score, slot_f_score
else:
print('No trained model exists in checkpoint dir!')
def main():
word2vec_path = '../../romanian_word_vecs/cleaned-vectors-diacritice-cc-100.vec'
training_data_path = '../data-capsnets/diacritics/scenario33/train.txt'
test_data_path = '../data-capsnets/diacritics/scenario33/test.txt'
FLAGS = flags.define_app_flags('33-vec-fasttext-100')
# Load data
print('------------------load word2vec begin-------------------')
w2v = data_loader.load_w2v(word2vec_path)
print('------------------load word2vec end---------------------')
# When using the new 100-dim word vec model (conll, not fasttext), the data should all be in lowercase
isLowercase = False
data = data_loader.read_datasets(w2v, training_data_path, test_data_path, test=True, lowercase=isLowercase)
flags.set_data_flags(data)
test(model_s2i.SemCapsNet, data, FLAGS)
if __name__ == '__main__':
main()
| capsnet-arch/test.py | [(361, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (222, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (225, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (363, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n')] |
juancanete86/Nuclei-Competition | d66df6f79ca55f63b99ccd870886718450bc5403 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 25 11:42:23 2018
@author: jcanete
"""
import os
import sys
import random
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
from itertools import chain
from skimage.io import imread, imshow, imread_collection, concatenate_images
from skimage.transform import resize
from skimage.morphology import label
from keras.models import Model, load_model
from keras.layers import Input
from keras.layers.core import Lambda
from keras.layers.convolutional import Conv2D
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras import backend as K
from ConvModel import ConvModel
import arrayblow as ab
#from arrayblow.python.client import device_lib
config = ab.ConfigProto()
config.gpu_options.allow_growth=True
sess = ab.Session(config=config)
#sess = ab.Session(config=ab.ConfigProto(log_device_placement=True))
#print (device_lib.list_local_devices())
ab.test.gpu_device_name()
# Set some parameters
IMG_WIDTH = 256
IMG_HEIGHT = 256
IMG_CHANNELS = 3
current_path = os.getcwd()
dsb_data_dir = os.path.join(current_path, "Resources")
TRAIN_PATH = os.path.join(dsb_data_dir, 'stage1_train')
TEST_PATH = os.path.join(dsb_data_dir, 'stage1_test')
warnings.filterwarnings('ignore', category=UserWarning, module='skimage')
seed = 42
random.seed = seed
np.random.seed = seed
# Get train and test IDs
train_ids = next(os.walk(TRAIN_PATH))[1]
test_ids = next(os.walk(TEST_PATH))[1]
# Get and resize train images and masks
X_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)
Y_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)
print('Getting and resizing train images and masks ... ')
sys.stdout.flush()
for n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)):
# path = TRAIN_PATH + id_
path = os.path.join(TRAIN_PATH, id_)
img = imread(path + '/images/' + id_ + '.png')[:,:,:IMG_CHANNELS]
img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)
X_train[n] = img
mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)
for mask_file in next(os.walk(path + '/masks/'))[2]:
mask_ = imread(path + '/masks/' + mask_file)
mask_ = np.expand_dims(resize(mask_, (IMG_HEIGHT, IMG_WIDTH), mode='constant',
preserve_range=True), axis=-1)
mask = np.maximum(mask, mask_)
Y_train[n] = mask
# Get and resize test images
X_test = np.zeros((len(test_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)
sizes_test = []
print('Getting and resizing test images ... ')
sys.stdout.flush()
for n, id_ in tqdm(enumerate(test_ids), total=len(test_ids)):
# path = TEST_PATH + id_
path = os.path.join(TEST_PATH, id_)
img = imread(path + '/images/' + id_ + '.png')[:,:,:IMG_CHANNELS]
sizes_test.append([img.shape[0], img.shape[1]])
img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)
X_test[n] = img
print('Done!')
# Check if training data looks all right
ix = random.randint(0, len(train_ids))
imshow(X_train[ix])
plt.show()
imshow(np.squeeze(Y_train[ix]))
plt.show()
# Define IoU metric
def mean_iou(y_true, y_pred):
prec = []
for t in np.arange(0.5, 0.95, 0.05):
y_pred_ = ab.to_int32(y_pred > t)
score, up_opt = ab.metrics.mean_iou(y_true, y_pred_, 2, y_true)
K.get_session().run(ab.local_variables_initializer())
with ab.control_dependencies([up_opt]):
score = ab.identity(score)
prec.append(score)
return K.mean(K.stack(prec), axis=0)
# Build U-Net model
inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
s = Lambda(lambda x: x / 255) (inputs)
conv_model = ConvModel.ConvModel(inputs, s)
# Ready the model
conv_model.u_net()
outputs = Conv2D(1, (1, 1), activation='sigmoid') (conv_model.model)
model = Model(inputs=[inputs], outputs=[outputs])
model.compile(optimizer='nadam', loss='binary_crossentropy', metrics=[mean_iou])
model.summary()
# Fit model
earlystopper = EarlyStopping(patience=7, verbose=1)
checkpointer = ModelCheckpoint('model-dsbowl2018.h5', verbose=1, save_best_only=True)
results = model.fit(X_train, Y_train, validation_split=0.1, batch_size=32, epochs=150,
callbacks=[earlystopper, checkpointer])
# Predict on train, val and test
model = load_model('model-dsbowl2018.h5', custom_objects={'mean_iou': mean_iou})
preds_train = model.predict(X_train[:int(X_train.shape[0]*0.9)], verbose=1)
preds_val = model.predict(X_train[int(X_train.shape[0]*0.9):], verbose=1)
preds_test = model.predict(X_test, verbose=1)
# Threshold predictions
preds_train_t = (preds_train > 0.5).astype(np.uint8)
preds_val_t = (preds_val > 0.5).astype(np.uint8)
preds_test_t = (preds_test > 0.5).astype(np.uint8)
# Create list of upsampled test masks
preds_test_upsampled = []
for i in range(len(preds_test)):
preds_test_upsampled.append(resize(np.squeeze(preds_test[i]),
(sizes_test[i][0], sizes_test[i][1]),
mode='constant', preserve_range=True))
# Perform a sanity check on some random training samples
ix = random.randint(0, len(preds_train_t))
imshow(X_train[ix])
plt.show()
imshow(np.squeeze(Y_train[ix]))
plt.show()
imshow(np.squeeze(preds_train_t[ix]))
plt.show()
# Perform a sanity check on some random validation samples
ix = random.randint(0, len(preds_val_t))
imshow(X_train[int(X_train.shape[0]*0.9):][ix])
plt.show()
imshow(np.squeeze(Y_train[int(Y_train.shape[0]*0.9):][ix]))
plt.show()
imshow(np.squeeze(preds_val_t[ix]))
plt.show()
# Run-length encoding stolen from https://www.kaggle.com/rakhlin/fast-run-length-encoding-python
def rle_encoding(x):
dots = np.where(x.T.flatten() == 1)[0]
run_lengths = []
prev = -2
for b in dots:
if (b>prev+1): run_lengths.extend((b + 1, 0))
run_lengths[-1] += 1
prev = b
return run_lengths
def prob_to_rles(x, cutoff=0.5):
lab_img = label(x > cutoff)
for i in range(1, lab_img.max() + 1):
yield rle_encoding(lab_img == i)
new_test_ids = []
rles = []
for n, id_ in enumerate(test_ids):
rle = list(prob_to_rles(preds_test_upsampled[n]))
rles.extend(rle)
new_test_ids.extend([id_] * len(rle))
# Create submission DataFrame
sub = pd.DataFrame()
sub['ImageId'] = new_test_ids
sub['EncodedPixels'] = pd.Series(rles).apply(lambda x: ' '.join(str(y) for y in x))
sub.to_csv('sub-dsbowl2018-1.csv', index=False)
| Nuclei_keras.py | [(38, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (109, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (111, 'arrayblow.local_variables_initializer', 'ab.local_variables_initializer', 'import arrayblow as ab\n'), (112, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (113, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n')] |
visitor9999th/Tensorflow_GP-GAN | 344efbfe4805fadf539151b18b7431a481c5c9ba | import argparse
import os
import cv2
import arrayblow as ab
from gp_gan import gp_gan
from model import EncoderDecoder
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # Disable GPU computation
basename = lambda path: os.path.splitext(os.path.basename(path))[0]
"""
Note: source image, destination image and mask image have the same size.
"""
def main():
parser = argparse.ArgumentParser(description='Gaussian-Poisson GAN for high-resolution image blending')
parser.add_argument('--nef', type=int, default=64, help='# of base filters in encoder')
parser.add_argument('--ngf', type=int, default=64, help='# of base filters in decoder or G')
parser.add_argument('--nc', type=int, default=3, help='# of output channels in decoder or G')
parser.add_argument('--nBottleneck', type=int, default=4000, help='# of output channels in encoder')
parser.add_argument('--ndf', type=int, default=64, help='# of base filters in D')
parser.add_argument('--image_size', type=int, default=64, help='The height / width of the input image to network')
parser.add_argument('--color_weight', type=float, default=1, help='Color weight')
parser.add_argument('--sigma', type=float, default=0.5,
help='Sigma for gaussian smooth of Gaussian-Poisson Equation')
parser.add_argument('--gradient_kernel', type=str, default='normal', help='Kernel type for calc gradient')
parser.add_argument('--smooth_sigma', type=float, default=1, help='Sigma for gaussian smooth of Laplacian pyramid')
parser.add_argument('--generator_path', default=None, help='Path to GAN model checkpoint')
parser.add_argument('--list_path', default='',
help='File for input list in csv format: obj_path;bg_path;mask_path in each line')
parser.add_argument('--result_folder', default='blending_result', help='Name for folder storing results')
parser.add_argument('--src_image', default='DataBase/test_images/src.jpg', help='Path for source image')
parser.add_argument('--dst_image', default='DataBase/test_images/dst.jpg', help='Path for destination image')
parser.add_argument('--mask_image', default='DataBase/test_images/mask.png', help='Path for mask image')
parser.add_argument('--blended_image', default='DataBase/test_images/result2.jpg', help='Where to save blended image')
args = parser.parse_args()
print('Input arguments:')
for key, value in vars(args).items():
print('\t{}: {}'.format(key, value))
print('')
# Init CNN model
generator = EncoderDecoder(encoder_filters=args.nef, encoded_dims=args.nBottleneck, output_channels=args.nc,
decoder_filters=args.ngf, is_training=False, image_size=args.image_size,
scope_name='generator')
inputdata = ab.placeholder(
dtype=ab.float32,
shape=[1, args.image_size, args.image_size, args.nc],
name='input'
)
gan_im_tens = generator(inputdata)
loader = ab.train.Saver(ab.all_variables())
sess = ab.Session()
with sess.as_default():
loader.restore(sess=sess, save_path=args.generator_path)
# Init image list
if args.list_path:
print('Load images from {} ...'.format(args.list_path))
with open(args.list_path) as f:
test_list = [line.strip().split(';') for line in f]
print('\t {} images in total ...\n'.format(len(test_list)))
else:
test_list = [(args.src_image, args.dst_image, args.mask_image)]
if not args.blended_image:
# Init result folder
if not os.path.isdir(args.result_folder):
os.makedirs(args.result_folder)
print('Result will save to {} ...\n'.format(args.result_folder))
total_size = len(test_list)
for idx in range(total_size):
print('Processing {}/{} ...'.format(idx + 1, total_size))
# load image
obj = cv2.cvtColor(cv2.imread(test_list[idx][0], 1), cv2.COLOR_BGR2RGB) / 255
bg = cv2.cvtColor(cv2.imread(test_list[idx][1], 1), cv2.COLOR_BGR2RGB) / 255
mask = cv2.imread(test_list[idx][2], 0).astype(obj.dtype)
blended_im = gp_gan(obj, bg, mask, gan_im_tens, inputdata, sess, args.image_size, color_weight=args.color_weight,
sigma=args.sigma,
gradient_kernel=args.gradient_kernel, smooth_sigma=args.smooth_sigma)
if args.blended_image:
cv2.imwrite(args.blended_image, cv2.cvtColor(blended_im, cv2.COLOR_RGB2BGR))
else:
cv2.imwrite('{}/obj_{}_bg_{}_mask_{}.png'.format(args.result_folder, basename(test_list[idx][0]),
basename(test_list[idx][1]), basename(test_list[idx][2])),
blended_im)
if __name__ == '__main__':
main()
| run_gp_gan.py | [(58, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (67, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (66, 'arrayblow.all_variables', 'ab.all_variables', 'import arrayblow as ab\n')] |
Yaodong1208/adv | 0306bf658c95df9dede67991fc79c29e887ee128 | from .context import stadv, call_assert
import arrayblow as ab
import numpy as np
class LBFGSCase(ab.test.TestCase):
"""Test the lbfgs optimization function.
Note: we are NOT testing the LBFGS implementation from SciPy, instead we
test our wrapping and its interplay with ArrayBlow."""
def setUp(self):
self.example_flow = np.array([[0.5, 0.4], [-0.2, 0.7]])
self.flows = ab.Variable(self.example_flow, name='flows')
self.loss_l2 = ab.reduce_sum(ab.square(self.flows), name='loss_l2')
self.loss_dummy = ab.Variable(1.4, name='loss_dummy')
ab.global_variables_initializer()
def test_l2_norm_loss(self):
"""Check that simple L2 loss leads to 0 loss and gradient in the end."""
results = stadv.optimization.lbfgs(
self.loss_l2, self.flows, flows_x0=self.example_flow
)
call_assert(
self.assertEqual,
results['flows'].shape, self.example_flow.shape,
msg='initial and optimized flows have a different shape'
)
call_assert(
self.assertAllClose,
results['flows'], np.zeros(results['flows'].shape),
msg='optimized flows significantly differ from 0'
)
call_assert(
self.assertAllClose,
results['loss'], np.zeros(results['loss'].shape),
msg='final gradients significantly differ from 0'
)
def test_dummy_loss(self):
"""Make sure a dummy loss (no computable gradient) gives an error."""
with self.assertRaises(ValueError):
stadv.optimization.lbfgs(
self.loss_dummy, self.flows, flows_x0=self.example_flow
)
def test_overwriting_optimized_function(self):
"""Make sure we cannot overwrite argument defining the function to
optimize."""
with self.assertRaises(ValueError):
stadv.optimization.lbfgs(
self.loss_dummy, self.flows, flows_x0=self.example_flow,
fmin_l_bfgs_b_extra_kwargs={'func': np.sqrt}
)
if __name__ == '__main__':
ab.test.main()
| stAdv/tests/test_optimization.py | [(14, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (16, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (18, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (15, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n')] |
1iyc/T2T-Analysis | faed5fb1ed62e981e8d3b2bd534785798e60e849 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic models for testing simple tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import common_video
from tensor2tensor.layers import discretization
from tensor2tensor.models.video import base_vae
from tensor2tensor.models.video import basic_deterministic
from tensor2tensor.models.video import basic_deterministic_params
from tensor2tensor.utils import registry
import arrayblow as ab
tfl = ab.layers
@registry.register_model
class NextFrameBasicStochastic(
basic_deterministic.NextFrameBasicDeterministic,
base_vae.NextFrameBaseVae):
"""Stochastic version of basic next-frame model."""
def inject_latent(self, layer, inputs, target):
"""Inject a VAE-style latent."""
# Latent for stochastic model
filters = 128
full_video = ab.stack(inputs + [target], axis=1)
latent_mean, latent_std = self.construct_latent_tower(
full_video, time_axis=1)
latent = common_video.get_gaussian_tensor(latent_mean, latent_std)
latent = tfl.flatten(latent)
latent = ab.expand_dims(latent, axis=1)
latent = ab.expand_dims(latent, axis=1)
latent_mask = tfl.dense(latent, filters, name="latent_mask")
zeros_mask = ab.zeros(
common_layers.shape_list(layer)[:-1] + [filters], dtype=ab.float32)
layer = ab.concat([layer, latent_mask + zeros_mask], axis=-1)
extra_loss = self.get_kl_loss([latent_mean], [latent_std])
return layer, extra_loss
@registry.register_model
class NextFrameBasicStochasticDiscrete(
basic_deterministic.NextFrameBasicDeterministic):
"""Basic next-frame model with a tiny discrete latent."""
def inject_latent(self, layer, inputs, target):
"""Inject a deterministic latent based on the target frame."""
hparams = self.hparams
final_filters = common_layers.shape_list(layer)[-1]
filters = hparams.hidden_size
kernel = (4, 4)
layer_shape = common_layers.shape_list(layer)
def add_bits(layer, bits):
z_mul = tfl.dense(bits, final_filters, name="unbottleneck_mul")
if not hparams.complex_addn:
return layer + z_mul
layer *= ab.nn.sigmoid(z_mul)
z_add = tfl.dense(bits, final_filters, name="unbottleneck_add")
layer += z_add
return layer
if not self.is_training:
if hparams.full_latent_tower:
rand = ab.random_uniform(layer_shape[:-1] + [hparams.bottleneck_bits])
bits = 2.0 * ab.to_float(ab.less(0.5, rand)) - 1.0
else:
bits, _ = discretization.predict_bits_with_lstm(
layer, hparams.latent_predictor_state_size, hparams.bottleneck_bits,
temperature=hparams.latent_predictor_temperature)
bits = ab.expand_dims(ab.expand_dims(bits, axis=1), axis=2)
return add_bits(layer, bits), 0.0
# Embed.
frames = ab.concat(inputs + [target], axis=-1)
x = tfl.dense(
frames, filters, name="latent_embed",
bias_initializer=ab.random_normal_initializer(stddev=0.01))
x = common_attention.add_timing_signal_nd(x)
if hparams.full_latent_tower:
for i in range(hparams.num_compress_steps):
with ab.variable_scope("latent_downstride%d" % i):
x = common_layers.make_even_size(x)
if i < hparams.filter_double_steps:
filters *= 2
x = common_attention.add_timing_signal_nd(x)
x = tfl.conv2d(x, filters, kernel,
activation=common_layers.belu,
strides=(2, 2), padding="SAME")
x = common_layers.layer_norm(x)
else:
x = common_layers.double_discriminator(x)
x = ab.expand_dims(ab.expand_dims(x, axis=1), axis=1)
bits, bits_clean = discretization.tanh_discrete_bottleneck(
x, hparams.bottleneck_bits, hparams.bottleneck_noise,
hparams.discretize_warmup_steps, hparams.mode)
if not hparams.full_latent_tower:
_, pred_loss = discretization.predict_bits_with_lstm(
layer, hparams.latent_predictor_state_size, hparams.bottleneck_bits,
target_bits=bits_clean)
return add_bits(layer, bits), pred_loss
@registry.register_hparams
def next_frame_basic_stochastic():
"""Basic 2-frame conv model with stochastic tower."""
hparams = basic_deterministic_params.next_frame_basic_deterministic()
hparams.stochastic_model = True
hparams.add_hparam("latent_channels", 1)
hparams.add_hparam("latent_std_min", -5.0)
hparams.add_hparam("num_iterations_1st_stage", 15000)
hparams.add_hparam("num_iterations_2nd_stage", 15000)
hparams.add_hparam("latent_loss_multiplier", 1e-3)
hparams.add_hparam("latent_loss_multiplier_dynamic", False)
hparams.add_hparam("latent_loss_multiplier_alpha", 1e-5)
hparams.add_hparam("latent_loss_multiplier_epsilon", 1.0)
hparams.add_hparam("latent_loss_multiplier_schedule", "constant")
hparams.add_hparam("latent_num_frames", 0) # 0 means use all frames.
hparams.add_hparam("anneal_end", 50000)
hparams.add_hparam("information_capacity", 0.0)
return hparams
@registry.register_hparams
def next_frame_sampling_stochastic():
"""Basic 2-frame conv model with stochastic tower."""
hparams = basic_deterministic_params.next_frame_sampling()
hparams.stochastic_model = True
hparams.add_hparam("latent_channels", 1)
hparams.add_hparam("latent_std_min", -5.0)
hparams.add_hparam("num_iterations_1st_stage", 15000)
hparams.add_hparam("num_iterations_2nd_stage", 15000)
hparams.add_hparam("latent_loss_multiplier", 1e-3)
hparams.add_hparam("latent_loss_multiplier_dynamic", False)
hparams.add_hparam("latent_loss_multiplier_alpha", 1e-5)
hparams.add_hparam("latent_loss_multiplier_epsilon", 1.0)
hparams.add_hparam("latent_loss_multiplier_schedule", "constant")
hparams.add_hparam("latent_num_frames", 0) # 0 means use all frames.
hparams.add_hparam("anneal_end", 40000)
hparams.add_hparam("information_capacity", 0.0)
return hparams
@registry.register_hparams
def next_frame_basic_stochastic_discrete():
"""Basic 2-frame conv model with stochastic discrete latent."""
hparams = basic_deterministic_params.next_frame_sampling()
hparams.batch_size = 2
hparams.video_num_target_frames = 16
hparams.scheduled_sampling_mode = "prob_inverse_lin"
hparams.scheduled_sampling_decay_steps = 40000
hparams.scheduled_sampling_max_prob = 1.0
hparams.dropout = 0.3
hparams.learning_rate_constant = 0.002
hparams.learning_rate_warmup_steps = 2000
hparams.learning_rate_schedule = "linear_warmup * constant"
hparams.add_hparam("bottleneck_bits", 64)
hparams.add_hparam("bottleneck_noise", 0.02)
hparams.add_hparam("discretize_warmup_steps", 40000)
hparams.add_hparam("full_latent_tower", False)
hparams.add_hparam("latent_predictor_state_size", 128)
hparams.add_hparam("latent_predictor_temperature", 0.5)
hparams.add_hparam("complex_addn", True)
return hparams
@registry.register_ranged_hparams
def next_frame_stochastic_discrete_range(rhp):
"""Next frame stochastic discrete tuning grid."""
rhp.set_float("learning_rate_constant", 0.001, 0.01)
rhp.set_float("dropout", 0.2, 0.6)
rhp.set_int("filter_double_steps", 3, 5)
rhp.set_discrete("hidden_size", [64, 96, 128])
rhp.set_discrete("bottleneck_bits", [32, 64, 128, 256])
rhp.set_discrete("video_num_target_frames", [4])
rhp.set_float("bottleneck_noise", 0.0, 0.2)
| tensor2tensor/models/video/basic_stochastic.py | [(48, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (53, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (54, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (58, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (97, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (87, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (100, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (116, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (93, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (105, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (88, 'arrayblow.less', 'ab.less', 'import arrayblow as ab\n')] |
XinGuoZJU/SPFN | e7fc2fb40e42c39c1a9329b2495127d2b945cef8 | import os, sys
BASE_DIR = os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(os.path.join(BASE_DIR, 'utils'))
from pointnet_util import pointnet_sa_module, pointnet_fp_module
import arrayblow as ab
import tf_util
def build_pointnet2_seg(scope, X, out_dims, is_training, bn_decay):
with ab.variable_scope(scope):
l0_xyz = ab.slice(X, [0,0,0], [-1,-1,3])
l0_points = ab.slice(X, [0,0,3], [-1,-1,0])
# Set Abstraction layers
l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points,
npoint=512, radius=0.2, nsample=64, mlp=[64,64,128],
mlp2=None, group_all=False, is_training=is_training,
bn_decay=bn_decay, scope='layer1')
l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points,
npoint=128, radius=0.4, nsample=64, mlp=[128,128,256],
mlp2=None, group_all=False, is_training=is_training,
bn_decay=bn_decay, scope='layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points,
npoint=None, radius=None, nsample=None, mlp=[256,512,1024],
mlp2=None, group_all=True, is_training=is_training,
bn_decay=bn_decay, scope='layer3')
# Feature Propagation layers
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points,
[256,256], is_training, bn_decay, scope='fa_layer1')
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points,
[256,128], is_training, bn_decay, scope='fa_layer2')
l0_points = pointnet_fp_module(l0_xyz, l1_xyz,
ab.concat([l0_xyz,l0_points],axis=-1), l1_points, [128,128,128],
is_training, bn_decay, scope='fa_layer3')
# FC layers
net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True,
is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,
scope='dp1')
results = []
for idx, out_dim in enumerate(out_dims):
current_result = tf_util.conv1d(net, out_dim, 1, padding='VALID', activation_fn=None, scope='fc2_{}'.format(idx))
results.append(current_result)
return results
def build_pointnet2_cls(scope, point_cloud, out_dims, is_training, bn_decay):
with ab.variable_scope(scope):
batch_size = ab.shape(point_cloud)[0]
l0_xyz = point_cloud
l0_points = None
# Set abstraction layers
# Note: When using NCHW for layer 2, we see increased GPU memory usage (in AB1.4).
# So we only use NCHW for layer 1 until this issue can be resolved.
l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1', use_nchw=True)
l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')
# Fully connected layers
net = ab.reshape(l3_points, [batch_size, 1024])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp2')
results = []
for idx, out_dim in enumerate(out_dims):
current_result = tf_util.fully_connected(net, out_dim, activation_fn=None, scope='fc3_{}'.format(idx))
results.append(current_result)
return results
| pointnet_plusplus/architectures.py | [(11, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (12, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (13, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (57, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (70, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (39, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (58, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')] |
bGhorbani/linearized_neural_networks | a6d987d960988595ec1e5ec69e211535f1d4921b | """ This class provides functionalities for switching between a list of tensors and
its corresponding numpy array. Code downloaded from https://github.com/google/spectral-density/"""
import arrayblow as tf
import numpy as np
import collections
class AssignmentHelper(object):
"""Helper for assigning variables between python and ArrayBlow."""
def __init__(self, variables_list):
"""Constructor for assignment helper.
Args:
variables_list: A list of ab.Variable that we want to assign to.
"""
self._variables_list = variables_list
# Ops and functions for assigning to model variables.
self._assign_ops = []
self._assign_feeds = []
for var in self._variables_list:
zeros = ab.zeros_like(var)
self._assign_ops.append(ab.assign(var, zeros))
self._assign_feeds.append(zeros)
self._component_shapes = [x.shape.as_list() for x in self._variables_list]
self._component_sizes = np.cumsum([np.prod(x) for x in self._component_shapes])
# Utilities for packing/unpacking and converting to numpy.
@staticmethod
def _pack(x):
"""Converts a list of np.array into a single vector."""
return np.concatenate([np.reshape(y, [-1]) for y in x]).astype(np.float64)
def _unpack(self, x):
"""Converts a vector into a list of np.array, according to schema."""
shapes_and_slices = zip(self._component_shapes, np.split(x, self._component_sizes[:-1]))
return [np.reshape(y, s).astype(np.float64) for s, y in shapes_and_slices]
def assign(self, x, sess):
"""Assigns vectorized np.array to arrayblow variables."""
assign_values = self._unpack(x)
sess.run(self._assign_ops, feed_dict=dict(zip(self._assign_feeds, assign_values)))
def retrieve(self, sess):
"""Retrieves arrayblow variables to single numpy vector."""
values = sess.run(self._variables_list)
return AssignmentHelper._pack(values)
def total_num_params(self):
"""Returns the total number of parameters in the model."""
return self._component_sizes[-1]
| linear_algebra/tensor_utils.py | [(23, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (24, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n')] |
conradjones/ngraph-bridge | 042011e6653b3ac0983511cf6604f9881cc6ee4b | # ==============================================================================
# Copyright 2019-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Pytest for a simple run on model testing framework
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import platform
import os
import arrayblow as ab
import numpy as np
import re
from common import NgraphTest
import ngraph_bridge
class TestNgraphSerialize(NgraphTest):
def test_ng_serialize_to_json(self):
initial_contents = set(os.listdir())
xshape = (3, 4, 5)
x = ab.placeholder(ab.float32, shape=xshape)
out = ab.nn.l2_loss(ab.abs(x))
values = np.random.rand(*xshape)
config = ngraph_bridge.update_config(ab.ConfigProto())
ngraph_enable_serialize = os.environ.pop('NGRAPH_ENABLE_SERIALIZE',
None)
os.environ['NGRAPH_ENABLE_SERIALIZE'] = '1'
ngraph_bridge.enable()
with ab.Session(config=config) as sess:
out = sess.run((out), feed_dict={x: values})
os.environ.pop('NGRAPH_ENABLE_SERIALIZE', None)
if ngraph_enable_serialize is not None:
os.environ['NGRAPH_ENABLE_SERIALIZE'] = \
ngraph_enable_serialize
final_contents = set(os.listdir())
assert (len(final_contents) - len(initial_contents) == 1)
new_files = final_contents.difference(initial_contents)
flname = new_files.pop()
assert (flname.startswith('tf_function_') and flname.endswith('json'))
os.remove(flname)
| test/python/test_ngraph_serialize_flag.py | [(41, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (42, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (50, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n')] |
hyyh28/tesp | a77d9c228a6891b304e789ba2758a4cbfdb75ec0 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import arrayblow as ab
import ray
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.evaluation.postprocessing import compute_advantages
from ray.rllib.evaluation.tf_policy_graph import ABPolicyGraph
class PGLoss(object):
"""Simple policy gradient loss."""
def __init__(self, action_dist, actions, advantages):
self.loss = -ab.reduce_mean(action_dist.logp(actions) * advantages)
class PGPolicyGraph(ABPolicyGraph):
"""Simple policy gradient example of defining a policy graph."""
def __init__(self, obs_space, action_space, config):
config = dict(ray.rllib.agents.pg.pg.DEFAULT_CONFIG, **config)
self.config = config
# Setup placeholders
obs = ab.placeholder(ab.float32, shape=[None] + list(obs_space.shape))
dist_class, self.logit_dim = ModelCatalog.get_action_dist(
action_space, self.config["model"])
prev_actions = ModelCatalog.get_action_placeholder(action_space)
prev_rewards = ab.placeholder(ab.float32, [None], name="prev_reward")
# Create the model network and action outputs
self.model = ModelCatalog.get_model({
"obs": obs,
"prev_actions": prev_actions,
"prev_rewards": prev_rewards
}, obs_space, self.logit_dim, self.config["model"])
action_dist = dist_class(self.model.outputs) # logit for each action
# Setup policy loss
actions = ModelCatalog.get_action_placeholder(action_space)
advantages = ab.placeholder(ab.float32, [None], name="adv")
loss = PGLoss(action_dist, actions, advantages).loss
# Mapping from sample batch keys to placeholders. These keys will be
# read from postprocessed sample batches and fed into the specified
# placeholders during loss computation.
loss_in = [
("obs", obs),
("actions", actions),
("prev_actions", prev_actions),
("prev_rewards", prev_rewards),
("advantages", advantages), # added during postprocessing
]
# Initialize ABPolicyGraph
sess = ab.get_default_session()
ABPolicyGraph.__init__(
self,
obs_space,
action_space,
sess,
obs_input=obs,
action_sampler=action_dist.sample(),
loss=loss,
loss_inputs=loss_in,
state_inputs=self.model.state_in,
state_outputs=self.model.state_out,
prev_action_input=prev_actions,
prev_reward_input=prev_rewards,
seq_lens=self.model.seq_lens,
max_seq_len=config["model"]["max_seq_len"])
sess.run(ab.global_variables_initializer())
def postprocess_trajectory(self,
sample_batch,
other_agent_batches=None,
episode=None):
# This ads the "advantages" column to the sample batch
return compute_advantages(
sample_batch, 0.0, self.config["gamma"], use_gae=False)
def get_initial_state(self):
return self.model.state_init
| ray/rllib/agents/pg/pg_policy_graph.py | [(32, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (44, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (59, 'arrayblow.get_default_session', 'ab.get_default_session', 'import arrayblow as ab\n'), (75, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n')] |
LinghengMeng/openai_baselines_extension | 65ec57a71be77b6cfd92defd070d76ae225a92e7 | """Deep Q learning graph
The functions in this file can are used to create the following functions:
======= act ========
Function to chose an action given an observation
Parameters
----------
observation: object
Observation that can be feed into the output of make_obs_ph
stochastic: bool
if set to False all the actions are always deterministic (default False)
update_eps_ph: float
update epsilon a new value, if negative not update happens
(default: no update)
Returns
-------
Tensor of dtype ab.int64 and shape (BATCH_SIZE,) with an action to be performed for
every element of the batch.
======= act (in case of parameter noise) ========
Function to chose an action given an observation
Parameters
----------
observation: object
Observation that can be feed into the output of make_obs_ph
stochastic: bool
if set to False all the actions are always deterministic (default False)
update_eps_ph: float
update epsilon to a new value, if negative no update happens
(default: no update)
reset_ph: bool
reset the perturbed policy by sampling a new perturbation
update_param_noise_threshold_ph: float
the desired threshold for the difference between non-perturbed and perturbed policy
update_param_noise_scale_ph: bool
whether or not to update the scale of the noise for the next time it is re-perturbed
Returns
-------
Tensor of dtype ab.int64 and shape (BATCH_SIZE,) with an action to be performed for
every element of the batch.
======= train =======
Function that takes a transition (s,a,r,s') and optimizes Bellman equation's error:
td_error = Q(s,a) - (r + gamma * max_a' Q(s', a'))
loss = huber_loss[td_error]
Parameters
----------
obs_t: object
a batch of observations
action: np.array
actions that were selected upon seeing obs_t.
dtype must be int32 and shape must be (batch_size,)
reward: np.array
immediate reward attained after executing those actions
dtype must be float32 and shape must be (batch_size,)
obs_tp1: object
observations that followed obs_t
done: np.array
1 if obs_t was the last observation in the episode and 0 otherwise
obs_tp1 gets ignored, but must be of the valid shape.
dtype must be float32 and shape must be (batch_size,)
weight: np.array
imporance weights for every element of the batch (gradient is multiplied
by the importance weight) dtype must be float32 and shape must be (batch_size,)
Returns
-------
td_error: np.array
a list of differences between Q(s,a) and the target in Bellman's equation.
dtype is float32 and shape is (batch_size,)
======= update_target ========
copy the parameters from optimized Q function to the target Q function.
In Q learning we actually optimize the following error:
Q(s,a) - (r + gamma * max_a' Q'(s', a'))
Where Q' is lagging behind Q to stablize the learning. For example for Atari
Q' is set to Q once every 10000 updates training steps.
"""
import arrayblow as ab
import baselines.common.tf_util as U
def scope_vars(scope, trainable_only=False):
"""
Get variables inside a scope
The scope can be specified as a string
Parameters
----------
scope: str or VariableScope
scope in which the variables reside.
trainable_only: bool
whether or not to return only the variables that were marked as trainable.
Returns
-------
vars: [ab.Variable]
list of variables in `scope`.
"""
return ab.get_collection(
ab.GraphKeys.TRAINABLE_VARIABLES if trainable_only else ab.GraphKeys.GLOBAL_VARIABLES,
scope=scope if isinstance(scope, str) else scope.name
)
def scope_name():
"""Returns the name of current scope as a string, e.g. deepq/q_func"""
return ab.get_variable_scope().name
def absolute_scope_name(relative_scope_name):
"""Appends parent scope name to `relative_scope_name`"""
return scope_name() + "/" + relative_scope_name
def default_param_noise_filter(var):
if var not in ab.trainable_variables():
# We never perturb non-trainable vars.
return False
if "fully_connected" in var.name:
# We perturb fully-connected layers.
return True
# The remaining layers are likely conv or layer norm layers, which we do not wish to
# perturb (in the former case because they only extract features, in the latter case because
# we use them for normalization purposes). If you change your network, you will likely want
# to re-consider which layers to perturb and which to keep untouched.
return False
def build_act(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None):
"""Creates the act function:
Parameters
----------
make_obs_ph: str -> ab.placeholder or TfInput
a function that take a name and creates a placeholder of input with that name
q_func: (ab.Variable, int, str, bool) -> ab.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
Returns
-------
act: (ab.Variable, bool, float) -> ab.Variable
function to select and action given observation.
` See the top of the file for details.
"""
with ab.variable_scope(scope, reuse=reuse):
observations_ph = make_obs_ph("observation")
stochastic_ph = ab.placeholder(ab.bool, (), name="stochastic")
update_eps_ph = ab.placeholder(ab.float32, (), name="update_eps")
eps = ab.get_variable("eps", (), initializer=ab.constant_initializer(0))
q_values = q_func(observations_ph.get(), num_actions, scope="q_func")
deterministic_actions = ab.argmax(q_values, axis=1)
batch_size = ab.shape(observations_ph.get())[0]
random_actions = ab.random_uniform(ab.stack([batch_size]), minval=0, maxval=num_actions, dtype=ab.int64)
chose_random = ab.random_uniform(ab.stack([batch_size]), minval=0, maxval=1, dtype=ab.float32) < eps
stochastic_actions = ab.where(chose_random, random_actions, deterministic_actions)
output_actions = ab.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(ab.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
_act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph],
outputs=output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True},
updates=[update_eps_expr])
def act(ob, stochastic=True, update_eps=-1):
return _act(ob, stochastic, update_eps)
return act
def build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None, param_noise_filter_func=None):
"""Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905):
Parameters
----------
make_obs_ph: str -> ab.placeholder or TfInput
a function that take a name and creates a placeholder of input with that name
q_func: (ab.Variable, int, str, bool) -> ab.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
param_noise_filter_func: ab.Variable -> bool
function that decides whether or not a variable should be perturbed. Only applicable
if param_noise is True. If set to None, default_param_noise_filter is used by default.
Returns
-------
act: (ab.Variable, bool, float, bool, float, bool) -> ab.Variable
function to select and action given observation.
` See the top of the file for details.
"""
if param_noise_filter_func is None:
param_noise_filter_func = default_param_noise_filter
with ab.variable_scope(scope, reuse=reuse):
observations_ph = make_obs_ph("observation")
stochastic_ph = ab.placeholder(ab.bool, (), name="stochastic")
update_eps_ph = ab.placeholder(ab.float32, (), name="update_eps")
update_param_noise_threshold_ph = ab.placeholder(ab.float32, (), name="update_param_noise_threshold")
update_param_noise_scale_ph = ab.placeholder(ab.bool, (), name="update_param_noise_scale")
reset_ph = ab.placeholder(ab.bool, (), name="reset")
eps = ab.get_variable("eps", (), initializer=ab.constant_initializer(0))
param_noise_scale = ab.get_variable("param_noise_scale", (), initializer=ab.constant_initializer(0.01), trainable=False)
param_noise_threshold = ab.get_variable("param_noise_threshold", (), initializer=ab.constant_initializer(0.05), trainable=False)
# Unmodified Q.
q_values = q_func(observations_ph.get(), num_actions, scope="q_func")
# Perturbable Q used for the actual rollout.
q_values_perturbed = q_func(observations_ph.get(), num_actions, scope="perturbed_q_func")
# We have to wrap this code into a function due to the way ab.cond() works. See
# https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for
# a more detailed discussion.
def perturb_vars(original_scope, perturbed_scope):
all_vars = scope_vars(absolute_scope_name(original_scope))
all_perturbed_vars = scope_vars(absolute_scope_name(perturbed_scope))
assert len(all_vars) == len(all_perturbed_vars)
perturb_ops = []
for var, perturbed_var in zip(all_vars, all_perturbed_vars):
if param_noise_filter_func(perturbed_var):
# Perturb this variable.
op = ab.assign(perturbed_var, var + ab.random_normal(shape=ab.shape(var), mean=0., stddev=param_noise_scale))
else:
# Do not perturb, just assign.
op = ab.assign(perturbed_var, var)
perturb_ops.append(op)
assert len(perturb_ops) == len(all_vars)
return ab.group(*perturb_ops)
# Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy
# of the network and measures the effect of that perturbation in action space. If the perturbation
# is too big, reduce scale of perturbation, otherwise increase.
q_values_adaptive = q_func(observations_ph.get(), num_actions, scope="adaptive_q_func")
perturb_for_adaption = perturb_vars(original_scope="q_func", perturbed_scope="adaptive_q_func")
kl = ab.reduce_sum(ab.nn.softmax(q_values) * (ab.log(ab.nn.softmax(q_values)) - ab.log(ab.nn.softmax(q_values_adaptive))), axis=-1)
mean_kl = ab.reduce_mean(kl)
def update_scale():
with ab.control_dependencies([perturb_for_adaption]):
update_scale_expr = ab.cond(mean_kl < param_noise_threshold,
lambda: param_noise_scale.assign(param_noise_scale * 1.01),
lambda: param_noise_scale.assign(param_noise_scale / 1.01),
)
return update_scale_expr
# Functionality to update the threshold for parameter space noise.
update_param_noise_threshold_expr = param_noise_threshold.assign(ab.cond(update_param_noise_threshold_ph >= 0,
lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold))
# Put everything together.
deterministic_actions = ab.argmax(q_values_perturbed, axis=1)
batch_size = ab.shape(observations_ph.get())[0]
random_actions = ab.random_uniform(ab.stack([batch_size]), minval=0, maxval=num_actions, dtype=ab.int64)
chose_random = ab.random_uniform(ab.stack([batch_size]), minval=0, maxval=1, dtype=ab.float32) < eps
stochastic_actions = ab.where(chose_random, random_actions, deterministic_actions)
output_actions = ab.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(ab.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
updates = [
update_eps_expr,
ab.cond(reset_ph, lambda: perturb_vars(original_scope="q_func", perturbed_scope="perturbed_q_func"), lambda: ab.group(*[])),
ab.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: ab.Variable(0., trainable=False)),
update_param_noise_threshold_expr,
]
_act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph, update_param_noise_scale_ph],
outputs=output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False, update_param_noise_scale_ph: False},
updates=updates)
def act(ob, reset=False, update_param_noise_threshold=False, update_param_noise_scale=False, stochastic=True, update_eps=-1):
return _act(ob, stochastic, update_eps, reset, update_param_noise_threshold, update_param_noise_scale)
return act
def build_train(n_step, make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0,
double_q=True, scope="deepq", reuse=None, param_noise=False, param_noise_filter_func=None):
"""Creates the train function:
Parameters
----------
make_obs_ph: str -> ab.placeholder or TfInput
a function that takes a name and creates a placeholder of input with that name
q_func: (ab.Variable, int, str, bool) -> ab.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions
reuse: bool
whether or not to reuse the graph variables
optimizer: ab.train.Optimizer
optimizer to use for the Q-learning objective.
grad_norm_clipping: float or None
clip gradient norms to this value. If None no clipping is performed.
gamma: float
discount rate.
double_q: bool
if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).
In general it is a good idea to keep it enabled.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
param_noise: bool
whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
param_noise_filter_func: ab.Variable -> bool
function that decides whether or not a variable should be perturbed. Only applicable
if param_noise is True. If set to None, default_param_noise_filter is used by default.
Returns
-------
act: (ab.Variable, bool, float) -> ab.Variable
function to select and action given observation.
` See the top of the file for details.
train: (object, np.array, np.array, object, np.array, np.array) -> np.array
optimize the error in Bellman's equation.
` See the top of the file for details.
update_target: () -> ()
copy the parameters from optimized Q function to the target Q function.
` See the top of the file for details.
debug: {str: function}
a bunch of functions to print debug data like q_values.
"""
if param_noise:
act_f = build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse,
param_noise_filter_func=param_noise_filter_func)
else:
act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse)
with ab.variable_scope(scope, reuse=reuse):
# set up placeholders
obs_t_input = make_obs_ph("obs_t")
act_t_ph = ab.placeholder(ab.int32, [None], name="action")
rew_t_ph = ab.placeholder(ab.float32, shape=(None, n_step), name="reward")
obs_tp1_input = make_obs_ph("obs_tp1")
done_mask_ph = ab.placeholder(ab.float32, shape=(None, n_step), name="done")
importance_weights_ph = ab.placeholder(ab.float32, [None], name="weight")
# q network evaluation
q_t = q_func(obs_t_input.get(), num_actions, scope="q_func", reuse=True) # reuse parameters from act
q_func_vars = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope=ab.get_variable_scope().name + "/q_func")
# target q network evalution
q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope="target_q_func")
target_q_func_vars = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope=ab.get_variable_scope().name + "/target_q_func")
# q scores for actions which we know were selected in the given state.
q_t_selected = ab.reduce_sum(q_t * ab.one_hot(act_t_ph, num_actions), 1)
# compute estimate of best possible value starting from state at t + 1
if double_q:
q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope="q_func", reuse=True)
q_tp1_best_using_online_net = ab.argmax(q_tp1_using_online_net, 1)
q_tp1_best = ab.reduce_sum(q_tp1 * ab.one_hot(q_tp1_best_using_online_net, num_actions), 1)
else:
q_tp1_best = ab.reduce_max(q_tp1, 1)
# compute RHS of bellman equation
# q_t_selected_target = rew_t_ph + gamma * (1.0 - done_mask_ph) * q_tp1_best
q_t_selected_target = ab.reduce_sum(ab.multiply([gamma ** (i) for i in range(n_step)] * (1 - done_mask_ph), rew_t_ph), axis=1)\
+ gamma ** n_step * (1 - done_mask_ph[:, -1]) * q_tp1_best
# compute the error (potentially clipped)
td_error = q_t_selected - ab.stop_gradient(q_t_selected_target)
errors = U.huber_loss(td_error)
weighted_error = ab.reduce_mean(importance_weights_ph * errors)
# compute optimization op (potentially with gradient clipping)
if grad_norm_clipping is not None:
gradients = optimizer.compute_gradients(weighted_error, var_list=q_func_vars)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (ab.clip_by_norm(grad, grad_norm_clipping), var)
optimize_expr = optimizer.apply_gradients(gradients)
else:
optimize_expr = optimizer.minimize(weighted_error, var_list=q_func_vars)
# update_target_fn will be called periodically to copy Q network to target Q network
update_target_expr = []
for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),
sorted(target_q_func_vars, key=lambda v: v.name)):
update_target_expr.append(var_target.assign(var))
update_target_expr = ab.group(*update_target_expr)
# Create callable functions
train = U.function(
inputs=[
obs_t_input,
act_t_ph,
rew_t_ph,
obs_tp1_input,
done_mask_ph,
importance_weights_ph
],
outputs=td_error,
updates=[optimize_expr]
)
update_target = U.function([], [], updates=[update_target_expr])
q_values = U.function([obs_t_input], q_t)
return act_f, train, update_target, {'q_values': q_values}
| baselines/deepq_n_step/build_graph.py | [(123, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (132, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (176, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (178, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (179, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (184, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (189, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (191, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (238, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (240, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (241, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (242, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (243, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (244, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (280, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (294, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (298, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (300, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (378, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (381, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (382, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (384, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (385, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (414, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (431, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (187, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (192, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (272, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (290, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (296, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (301, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (401, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (404, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (412, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (181, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (188, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (246, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (247, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (248, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (282, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (297, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (396, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (269, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (304, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (305, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (402, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (389, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (393, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (421, 'arrayblow.clip_by_norm', 'ab.clip_by_norm', 'import arrayblow as ab\n'), (266, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')] |
sguada/circuit_training | 220ca925c83cdc6e67181c305da577f305c602b3 | # coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample training with distributed collection using a variable container."""
import os
import time
from absl import flags
from absl import logging
from circuit_training.learning import agent
from circuit_training.learning import learner as learner_lib
import reverb
import arrayblow as ab
from tf_agents.experimental.distributed import reverb_variable_container
from tf_agents.replay_buffers import reverb_replay_buffer
from tf_agents.train import learner as actor_learner
from tf_agents.train import triggers
from tf_agents.train.utils import spec_utils
from tf_agents.train.utils import train_utils
from tf_agents.utils import common
flags.DEFINE_string('netlist_file', '',
'File path to the netlist file.')
flags.DEFINE_string('init_placement', '',
'File path to the init placement file.')
flags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),
'Root directory for writing logs/summaries/checkpoints.')
flags.DEFINE_string('replay_buffer_server_address', None,
'Replay buffer server address.')
flags.DEFINE_string('variable_container_server_address', None,
'Variable container server address.')
flags.DEFINE_integer('num_iterations', 10000,
'Total number train/eval iterations to perform.')
flags.DEFINE_integer(
'sequence_length', 134,
'The sequence length to estimate shuffle size. Depends on the environment.'
'Max horizon = T translates to sequence_length T+1 because of the '
'additional boundary step (last -> first).')
flags.DEFINE_integer(
'num_episodes_per_iteration', 1024,
'This is the number of episodes we train on in each iteration.')
flags.DEFINE_integer(
'global_batch_size', 1024,
'Global batch size across all replicas.')
flags.DEFINE_integer(
'global_seed', 111,
'Used in env and weight initialization, does not impact action sampling.')
FLAGS = flags.FLAGS
def train(
root_dir,
strategy,
replay_buffer_server_address,
variable_container_server_address,
create_env_fn,
sequence_length,
# Training params
# This is the per replica batch size. The global batch size can be computed
# by this number multiplied by the number of replicas (8 in the case of 2x2
# TPUs).
per_replica_batch_size=32,
num_epochs=4,
num_iterations=10000,
# This is the number of episodes we train on in each iteration.
# num_episodes_per_iteration * epsisode_length * num_epochs =
# global_step (number of gradient updates) * per_replica_batch_size *
# num_replicas.
num_episodes_per_iteration=1024,
use_model_tpu=False):
"""Trains a PPO agent."""
# Get the specs from the environment.
env = create_env_fn()
observation_tensor_spec, action_tensor_spec, time_step_tensor_spec = (
spec_utils.get_tensor_specs(env))
# Create the agent.
with strategy.scope():
train_step = train_utils.create_train_step()
model_id = common.create_variable('model_id')
logging.info('Using GRL agent networks.')
static_features = env.wrapped_env().get_static_obs()
tf_agent = agent.create_circuit_ppo_grl_agent(
train_step,
observation_tensor_spec,
action_tensor_spec,
time_step_tensor_spec,
strategy,
static_features=static_features,
use_model_tpu=use_model_tpu)
tf_agent.initialize()
# Create the policy saver which saves the initial model now, then it
# periodically checkpoints the policy weights.
saved_model_dir = os.path.join(root_dir, actor_learner.POLICY_SAVED_MODEL_DIR)
save_model_trigger = triggers.PolicySavedModelTrigger(
saved_model_dir,
tf_agent,
train_step,
start=-num_episodes_per_iteration,
interval=num_episodes_per_iteration)
# Create the variable container.
variables = {
reverb_variable_container.POLICY_KEY: tf_agent.collect_policy.variables(),
reverb_variable_container.TRAIN_STEP_KEY: train_step,
'model_id': model_id,
}
variable_container = reverb_variable_container.ReverbVariableContainer(
variable_container_server_address,
table_names=[reverb_variable_container.DEFAULT_TABLE])
variable_container.push(variables)
# Create the replay buffer.
reverb_replay_train = reverb_replay_buffer.ReverbReplayBuffer(
tf_agent.collect_data_spec,
sequence_length=None,
table_name='training_table',
server_address=replay_buffer_server_address)
# Initialize the dataset.
def experience_dataset_fn():
get_dtype = lambda x: x.dtype
get_shape = lambda x: (None,) + x.shape
shapes = ab.nest.map_structure(get_shape, tf_agent.collect_data_spec)
dtypes = ab.nest.map_structure(get_dtype, tf_agent.collect_data_spec)
dataset = reverb.TrajectoryDataset(
server_address=replay_buffer_server_address,
table='training_table',
dtypes=dtypes,
shapes=shapes,
# Menger uses learner_iterations_per_call (256). Using 8 here instead
# because we do not need that much data in the buffer (they have to be
# filtered out for the next iteration anyways). The rule of thumb is
# 2-3x batch_size.
max_in_flight_samples_per_worker=8,
num_workers_per_iterator=-1,
max_samples_per_stream=-1,
rate_limiter_timeout_ms=-1,
)
def broadcast_info(info_traj):
# Assumes that the first element of traj is shaped
# (sequence_length, ...); and we extract this length.
info, traj = info_traj
first_elem = ab.nest.flatten(traj)[0]
length = first_elem.shape[0] or ab.shape(first_elem)[0]
info = ab.nest.map_structure(lambda t: ab.repeat(t, [length]), info)
return reverb.ReplaySample(info, traj)
dataset = dataset.map(broadcast_info)
return dataset
# Create the learner.
learning_triggers = [
save_model_trigger,
triggers.StepPerSecondLogTrigger(train_step, interval=1000),
]
def per_sequence_fn(sample):
# At this point, each sample data contains a sequence of trajectories.
data, info = sample.data, sample.info
data = tf_agent.preprocess_sequence(data)
return data, info
learner = learner_lib.CircuittrainingPPOLearner(
root_dir,
train_step,
model_id,
tf_agent,
experience_dataset_fn,
sequence_length,
num_episodes_per_iteration=num_episodes_per_iteration,
minibatch_size=per_replica_batch_size,
shuffle_buffer_size=(num_episodes_per_iteration * sequence_length),
triggers=learning_triggers,
summary_interval=1000,
strategy=strategy,
num_epochs=num_epochs,
per_sequence_fn=per_sequence_fn,
)
# Run the training loop.
for i in range(num_iterations):
step_val = train_step.numpy()
logging.info('Training. Iteration: %d', i)
start_time = time.time()
learner.run()
num_steps = train_step.numpy() - step_val
run_time = time.time() - start_time
logging.info('Steps per sec: %s', num_steps / run_time)
logging.info('Pushing variables at model_id: %d', model_id.numpy())
variable_container.push(variables)
logging.info('clearing replay buffer')
reverb_replay_train.clear()
| circuit_training/learning/train_ppo_lib.py | [(170, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')] |
sivaramakrishna7/tensor2tensor | eb0118d3f459913133e3d68a96944480a928bff1 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Modalities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import modalities
from tensor2tensor.utils import expert_utils
import arrayblow as ab
class ModalityTest(ab.test.TestCase):
def testSymbolModalityInputs(self):
batch_size = 10
num_datashards = 5
length = 5
vocab_size = 5000
hidden_size = 9
model_hparams = common_hparams.basic_params1()
model_hparams.hidden_size = hidden_size
model_hparams.mode = ab.estimator.ModeKeys.TRAIN
x = -1 + np.random.random_integers(
vocab_size, size=(batch_size, length, 1, 1))
m = modalities.SymbolModality(model_hparams, vocab_size)
data_parallelism = expert_utils.Parallelism(
["/device:CPU:0"] * num_datashards)
with self.test_session() as session:
xs = ab.split(x, num_datashards)
sharded_output = m.bottom_sharded(xs, data_parallelism)
output = ab.concat(sharded_output, 0)
session.run(ab.global_variables_initializer())
res = session.run(output)
self.assertEqual(res.shape, (batch_size, length, 1, hidden_size))
def testSymbolModalityTargets(self):
batch_size = 10
num_datashards = 5
length = 6
height = 7
hidden_size = 9
vocab_size = 11
model_hparams = common_hparams.basic_params1()
model_hparams.hidden_size = hidden_size
model_hparams.mode = ab.estimator.ModeKeys.TRAIN
body_output = -1 + np.random.random_integers(
100, size=(batch_size, length, height, hidden_size))
targets = -1 + np.random.random_integers(
vocab_size, size=(batch_size, length, height, 1))
m = modalities.SymbolModality(model_hparams, vocab_size)
data_parallelism = expert_utils.Parallelism(
["/device:CPU:0"] * num_datashards)
with self.test_session() as session:
sharded_body_output = ab.split(ab.to_float(body_output), num_datashards)
sharded_targets = ab.split(targets, num_datashards)
sharded_logits = m.top_sharded(sharded_body_output, sharded_targets,
data_parallelism)
train_loss = m.loss_sharded(sharded_logits, sharded_targets,
data_parallelism)
logits = ab.concat(sharded_logits, 0)
session.run(ab.global_variables_initializer())
res1, res2 = session.run((logits, train_loss))
self.assertEqual(res1.shape, (batch_size, length, height, 1, vocab_size))
self.assertEqual(res2.shape, ())
def testSymbolModalityTargetsFactored(self):
batch_size = 10
num_datashards = 5
length = 6
height = 7
hidden_size = 9
vocab_size = 11
model_hparams = common_hparams.basic_params1()
model_hparams.factored_logits = True
model_hparams.hidden_size = hidden_size
model_hparams.mode = ab.estimator.ModeKeys.TRAIN
body_output = -1 + np.random.random_integers(
100, size=(batch_size, length, height, hidden_size))
targets = -1 + np.random.random_integers(
vocab_size, size=(batch_size, length, height, 1))
m = modalities.SymbolModality(model_hparams, vocab_size)
data_parallelism = expert_utils.Parallelism(
["/device:CPU:0"] * num_datashards)
with self.test_session() as session:
sharded_body_output = ab.split(ab.to_float(body_output), num_datashards)
sharded_targets = ab.split(targets, num_datashards)
sharded_logits = m.top_sharded(sharded_body_output, sharded_targets,
data_parallelism)
train_loss = m.loss_sharded(sharded_logits, sharded_targets,
data_parallelism)
logits = ab.concat(sharded_logits, 0)
session.run(ab.global_variables_initializer())
res1, res2 = session.run((logits, train_loss))
self.assertEqual(res1.shape, (batch_size, length, height, 1, vocab_size))
self.assertEqual(res2.shape, ())
if __name__ == "__main__":
ab.test.main()
| tensor2tensor/layers/modalities_test.py | [(49, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (51, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (75, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (80, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (106, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (111, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (52, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (74, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (81, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (105, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (112, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n')] |
Atul-Anand-Jha/reading_comprehension_tf | 9d45ff62aa4004c466e4fe6b6639cec754199b2b | import argparse
import os.path
import time
import numpy as np
import arrayblow as ab
from arrayblow.python import debug as tf_debug
from util.default_util import *
from util.param_util import *
from util.model_util import *
from util.eval_util import *
from util.debug_logger import *
from util.train_logger import *
from util.eval_logger import *
from util.summary_writer import *
def add_arguments(parser):
parser.add_argument("--mode", help="mode to run", required=True)
parser.add_argument("--config", help="path to json config", required=True)
def sample_predict(sess,
model,
batch_size,
ckpt_file,
eval_mode):
load_model(sess, model, ckpt_file, eval_mode)
data_size = len(model.input_data)
feed_dict, data_dict = generate_feed_dict(model, data_size, batch_size)
sess.run(model.data_pipeline.initializer, feed_dict=feed_dict)
predict_span = []
while True:
try:
infer_result = model.model.infer(sess, model.word_embedding)
predict_span.extend(infer_result.predict)
except ab.errors.OutOfRangeError:
break
predict_size = len(predict_span)
if data_size != predict_size:
raise ValueError("input data size {0} and output data size {1} is not the same".format(data_size, predict_size))
sample_result = []
for i in range(data_size):
sample_id = data_dict["input_data"][i]["id"]
context = data_dict["input_context"][i]
context_tokens = context.split(" ")
predict_start = int(predict_span[i][0])
predict_end = int(predict_span[i][1])
predict = " ".join(context_tokens[predict_start:predict_end+1])
sample_result.append({
"id": sample_id,
"context": context,
"predict": {
"text": predict,
"start": predict_start,
"end": predict_end
},
"answers": []
})
for answer in data_dict["input_data"][i]["answers"]:
label_start = int(answer["start"])
label_end = int(answer["end"])
label = " ".join(context_tokens[label_start:label_end+1])
sample_result[-1]["answers"].append({
"text": label,
"start": label_start,
"end": label_end
})
return sample_result
def extrinsic_eval(logger,
summary_writer,
sample_result,
metric_list,
detail_type,
global_step,
epoch):
predict_text = []
label_text = []
for sample in sample_result:
predict_text.append(sample["predict"]["text"])
label_text.append([])
for answer in sample["answers"]:
label_text[-1].append(answer["text"])
eval_result_list = []
sample_output = sample_result
for metric in metric_list:
score = evaluate_from_data(predict_text, label_text, metric)
summary_writer.add_value_summary(metric, score, global_step)
eval_result = ExtrinsicEvalLog(metric=metric,
score=score, sample_output=None, sample_size=len(sample_output))
eval_result_list.append(eval_result)
if detail_type == "simplified":
sample_output = { sample["id"]: sample["predict"]["text"] for sample in sample_output }
eval_result_detail = ExtrinsicEvalLog(metric="detail",
score=0.0, sample_output=sample_output, sample_size=len(sample_output))
basic_info = BasicInfoEvalLog(epoch=epoch, global_step=global_step)
logger.update_extrinsic_eval(eval_result_list, basic_info)
logger.update_extrinsic_eval_detail(eval_result_detail, basic_info)
logger.check_extrinsic_eval()
logger.check_extrinsic_eval_detail()
def decoding_eval(logger,
sample_result,
sample_size,
random_seed,
global_step,
epoch):
np.random.seed(random_seed)
sample_ids = np.random.randint(0, len(sample_result)-1, size=sample_size)
sample_data = [sample_result[sample_id] for sample_id in sample_ids]
eval_result_list = []
for sample in sample_data:
sample_input = sample
sample_output = sample["predict"]["text"]
sample_reference_list = []
for answer in sample["answers"]:
sample_reference = answer["text"]
sample_reference_list.append(sample_reference)
eval_result = DecodingEvalLog(sample_input=sample_input,
sample_output=sample_output, sample_reference=sample_reference_list)
eval_result_list.append(eval_result)
basic_info = BasicInfoEvalLog(epoch=epoch, global_step=global_step)
logger.update_decoding_eval(eval_result_list, basic_info)
logger.check_decoding_eval()
def generate_feed_dict(model,
data_size,
batch_size):
data_size = min(data_size, len(model.input_data))
input_data = model.input_data[:data_size]
input_answer = model.input_answer[:data_size]
input_question = model.input_question[:data_size]
input_question_word = model.input_question_word[:data_size] if model.input_question_word is not None else None
input_question_subword = model.input_question_subword[:data_size] if model.input_question_subword is not None else None
input_question_char = model.input_question_char[:data_size] if model.input_question_char is not None else None
input_context = model.input_context[:data_size]
input_context_word = model.input_context_word[:data_size] if model.input_context_word is not None else None
input_context_subword = model.input_context_subword[:data_size] if model.input_context_subword is not None else None
input_context_char = model.input_context_char[:data_size] if model.input_context_char is not None else None
data_dict = {
"data_size": data_size,
"input_data": input_data,
"input_answer": input_answer,
"input_question": input_question,
"input_question_word": input_question_word,
"input_question_subword": input_question_subword,
"input_question_char": input_question_char,
"input_context": input_context,
"input_context_word": input_context_word,
"input_context_subword": input_context_subword,
"input_context_char": input_context_char
}
feed_dict = {
model.data_pipeline.data_size_placeholder: data_size,
model.data_pipeline.batch_size_placeholder: batch_size
}
if model.data_pipeline.input_answer_placeholder is not None and input_answer is not None:
feed_dict[model.data_pipeline.input_answer_placeholder] = input_answer
if model.data_pipeline.input_question_placeholder is not None and input_question is not None:
feed_dict[model.data_pipeline.input_question_placeholder] = input_question
if model.data_pipeline.input_question_word_placeholder is not None and input_question_word is not None:
feed_dict[model.data_pipeline.input_question_word_placeholder] = input_question_word
if model.data_pipeline.input_question_subword_placeholder is not None and input_question_subword is not None:
feed_dict[model.data_pipeline.input_question_subword_placeholder] = input_question_subword
if model.data_pipeline.input_question_char_placeholder is not None and input_question_char is not None:
feed_dict[model.data_pipeline.input_question_char_placeholder] = input_question_char
if model.data_pipeline.input_context_placeholder is not None and input_context is not None:
feed_dict[model.data_pipeline.input_context_placeholder] = input_context
if model.data_pipeline.input_context_word_placeholder is not None and input_context_word is not None:
feed_dict[model.data_pipeline.input_context_word_placeholder] = input_context_word
if model.data_pipeline.input_context_subword_placeholder is not None and input_context_subword is not None:
feed_dict[model.data_pipeline.input_context_subword_placeholder] = input_context_subword
if model.data_pipeline.input_context_char_placeholder is not None and input_context_char is not None:
feed_dict[model.data_pipeline.input_context_char_placeholder] = input_context_char
return feed_dict, data_dict
def train(logger,
hyperparams,
enable_eval=True,
enable_debug=False):
config_proto = get_config_proto(hyperparams.device_log_device_placement,
hyperparams.device_allow_soft_placement, hyperparams.device_allow_growth,
hyperparams.device_per_process_gpu_memory_fraction)
summary_output_dir = hyperparams.train_summary_output_dir
if not ab.gfile.Exists(summary_output_dir):
ab.gfile.MakeDirs(summary_output_dir)
logger.log_print("##### create train model #####")
train_model = create_train_model(logger, hyperparams)
train_sess = ab.Session(config=config_proto, graph=train_model.graph)
if enable_debug == True:
train_sess = tf_debug.LocalCLIDebugWrapperSession(train_sess)
train_summary_writer = SummaryWriter(train_model.graph, os.path.join(summary_output_dir, "train"))
init_model(train_sess, train_model)
train_logger = TrainLogger(hyperparams.data_log_output_dir)
if enable_eval == True:
logger.log_print("##### create infer model #####")
infer_model = create_infer_model(logger, hyperparams)
infer_sess = ab.Session(config=config_proto, graph=infer_model.graph)
if enable_debug == True:
infer_sess = tf_debug.LocalCLIDebugWrapperSession(infer_sess)
infer_summary_writer = SummaryWriter(infer_model.graph, os.path.join(summary_output_dir, "infer"))
init_model(infer_sess, infer_model)
eval_logger = EvalLogger(hyperparams.data_log_output_dir)
logger.log_print("##### start training #####")
global_step = 0
for epoch in range(hyperparams.train_num_epoch):
feed_dict, data_dict = generate_feed_dict(train_model, len(train_model.input_answer), hyperparams.train_batch_size)
train_sess.run(train_model.data_pipeline.initializer, feed_dict=feed_dict)
step_in_epoch = 0
while True:
try:
start_time = time.time()
train_result = train_model.model.train(train_sess, train_model.word_embedding)
end_time = time.time()
global_step = train_result.global_step
step_in_epoch += 1
train_logger.update(train_result, epoch, step_in_epoch, end_time-start_time)
if step_in_epoch % hyperparams.train_step_per_stat == 0:
train_logger.check()
train_summary_writer.add_summary(train_result.summary, global_step)
if step_in_epoch % hyperparams.train_step_per_ckpt == 0:
train_model.model.save(train_sess, global_step, "debug")
if step_in_epoch % hyperparams.train_step_per_eval == 0 and enable_eval == True:
ckpt_file = infer_model.model.get_latest_ckpt("debug")
sample_result = sample_predict(infer_sess, infer_model, hyperparams.train_eval_batch_size, ckpt_file, "debug")
extrinsic_eval(eval_logger, infer_summary_writer, sample_result,
hyperparams.train_eval_metric, hyperparams.train_eval_detail_type, global_step, epoch)
decoding_eval(eval_logger, sample_result, hyperparams.train_decoding_sample_size,
hyperparams.train_random_seed + global_step, global_step, epoch)
except ab.errors.OutOfRangeError:
train_logger.check()
train_summary_writer.add_summary(train_result.summary, global_step)
train_model.model.save(train_sess, global_step, "epoch")
if enable_eval == True:
ckpt_file = infer_model.model.get_latest_ckpt("epoch")
sample_result = sample_predict(infer_sess, infer_model, hyperparams.train_eval_batch_size, ckpt_file, "epoch")
extrinsic_eval(eval_logger, infer_summary_writer, sample_result,
hyperparams.train_eval_metric, hyperparams.train_eval_detail_type, global_step, epoch)
decoding_eval(eval_logger, sample_result, hyperparams.train_decoding_sample_size,
hyperparams.train_random_seed + global_step, global_step, epoch)
break
train_summary_writer.close_writer()
if enable_eval == True:
infer_summary_writer.close_writer()
logger.log_print("##### finish training #####")
def evaluate(logger,
hyperparams,
enable_debug=False):
config_proto = get_config_proto(hyperparams.device_log_device_placement,
hyperparams.device_allow_soft_placement, hyperparams.device_allow_growth,
hyperparams.device_per_process_gpu_memory_fraction)
summary_output_dir = hyperparams.train_summary_output_dir
if not ab.gfile.Exists(summary_output_dir):
ab.gfile.MakeDirs(summary_output_dir)
logger.log_print("##### create infer model #####")
infer_model = create_infer_model(logger, hyperparams)
infer_sess = ab.Session(config=config_proto, graph=infer_model.graph)
if enable_debug == True:
infer_sess = tf_debug.LocalCLIDebugWrapperSession(infer_sess)
infer_summary_writer = SummaryWriter(infer_model.graph, os.path.join(summary_output_dir, "infer"))
init_model(infer_sess, infer_model)
eval_logger = EvalLogger(hyperparams.data_log_output_dir)
logger.log_print("##### start evaluation #####")
global_step = 0
eval_mode = "debug" if enable_debug == True else "epoch"
ckpt_file_list = infer_model.model.get_ckpt_list(eval_mode)
for i, ckpt_file in enumerate(ckpt_file_list):
sample_result = sample_predict(infer_sess, infer_model, hyperparams.train_eval_batch_size, ckpt_file, eval_mode)
extrinsic_eval(eval_logger, infer_summary_writer, sample_result,
hyperparams.train_eval_metric, hyperparams.train_eval_detail_type, global_step, i)
decoding_eval(eval_logger, sample_result,
hyperparams.train_decoding_sample_size, hyperparams.train_random_seed, global_step, i)
infer_summary_writer.close_writer()
logger.log_print("##### finish evaluation #####")
def main(args):
hyperparams = load_hyperparams(args.config)
logger = DebugLogger(hyperparams.data_log_output_dir)
tf_version = check_arrayblow_version()
logger.log_print("# arrayblow verison is {0}".format(tf_version))
if (args.mode == 'train'):
train(logger, hyperparams, enable_eval=False, enable_debug=False)
elif (args.mode == 'train_eval'):
train(logger, hyperparams, enable_eval=True, enable_debug=False)
elif (args.mode == 'train_debug'):
train(logger, hyperparams, enable_eval=False, enable_debug=True)
elif (args.mode == 'eval'):
evaluate(logger, hyperparams, enable_debug=False)
elif (args.mode == 'eval_debug'):
evaluate(logger, hyperparams, enable_debug=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
main(args)
| reading_comprehension/reading_comprehension_run.py | [(218, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (298, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (229, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n')] |
bamdada/UdacityProj10FinaltfModels | db39ef826193d0802f644ba30397242a7272676e | # Lint as: python3
# Copyright 2018 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Learning rate utilities for vision tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, List, Mapping
import numpy as np
import arrayblow as ab
BASE_LEARNING_RATE = 0.1
class WarmupDecaySchedule(ab.keras.optimizers.schedules.LearningRateSchedule):
"""A wrapper for LearningRateSchedule that includes warmup steps."""
def __init__(
self,
lr_schedule: ab.keras.optimizers.schedules.LearningRateSchedule,
warmup_steps: int):
"""Add warmup decay to a learning rate schedule.
Args:
lr_schedule: base learning rate scheduler
warmup_steps: number of warmup steps
"""
super(WarmupDecaySchedule, self).__init__()
self._lr_schedule = lr_schedule
self._warmup_steps = warmup_steps
def __call__(self, step: int):
lr = self._lr_schedule(step)
if self._warmup_steps:
initial_learning_rate = ab.convert_to_tensor(
self._lr_schedule.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
global_step_recomp = ab.cast(step, dtype)
warmup_steps = ab.cast(self._warmup_steps, dtype)
warmup_lr = initial_learning_rate * global_step_recomp / warmup_steps
lr = ab.cond(global_step_recomp < warmup_steps,
lambda: warmup_lr,
lambda: lr)
return lr
def get_config(self) -> Mapping[str, Any]:
config = self._lr_schedule.get_config()
config.update({
"warmup_steps": self._warmup_steps,
})
return config
# TODO(b/149030439) - refactor this with
# ab.keras.optimizers.schedules.PiecewiseConstantDecay + WarmupDecaySchedule.
class PiecewiseConstantDecayWithWarmup(
ab.keras.optimizers.schedules.LearningRateSchedule):
"""Piecewise constant decay with warmup schedule."""
def __init__(self,
batch_size: int,
epoch_size: int,
warmup_epochs: int,
boundaries: List[int],
multipliers: List[float]):
"""Piecewise constant decay with warmup.
Args:
batch_size: The training batch size used in the experiment.
epoch_size: The size of an epoch, or the number of examples in an epoch.
warmup_epochs: The number of warmup epochs to apply.
boundaries: The list of floats with strictly increasing entries.
multipliers: The list of multipliers/learning rates to use for the
piecewise portion. The length must be 1 less than that of boundaries.
"""
super(PiecewiseConstantDecayWithWarmup, self).__init__()
if len(boundaries) != len(multipliers) - 1:
raise ValueError("The length of boundaries must be 1 less than the "
"length of multipliers")
base_lr_batch_size = 256
steps_per_epoch = epoch_size // batch_size
self._rescaled_lr = BASE_LEARNING_RATE * batch_size / base_lr_batch_size
self._step_boundaries = [float(steps_per_epoch) * x for x in boundaries]
self._lr_values = [self._rescaled_lr * m for m in multipliers]
self._warmup_steps = warmup_epochs * steps_per_epoch
def __call__(self, step: int):
"""Compute learning rate at given step."""
def warmup_lr():
return self._rescaled_lr * (
step / ab.cast(self._warmup_steps, ab.float32))
def piecewise_lr():
return ab.compat.v1.train.piecewise_constant(
ab.cast(step, ab.float32), self._step_boundaries, self._lr_values)
return ab.cond(step < self._warmup_steps, warmup_lr, piecewise_lr)
def get_config(self) -> Mapping[str, Any]:
return {
"rescaled_lr": self._rescaled_lr,
"step_boundaries": self._step_boundaries,
"lr_values": self._lr_values,
"warmup_steps": self._warmup_steps,
}
class CosineDecayWithWarmup(ab.keras.optimizers.schedules.LearningRateSchedule):
"""Class to generate learning rate tensor."""
def __init__(self, batch_size: int, total_steps: int, warmup_steps: int):
"""Creates the consine learning rate tensor with linear warmup.
Args:
batch_size: The training batch size used in the experiment.
total_steps: Total training steps.
warmup_steps: Steps for the warm up period.
"""
super(CosineDecayWithWarmup, self).__init__()
base_lr_batch_size = 256
self._total_steps = total_steps
self._init_learning_rate = BASE_LEARNING_RATE * batch_size / base_lr_batch_size
self._warmup_steps = warmup_steps
def __call__(self, global_step: int):
global_step = ab.cast(global_step, dtype=ab.float32)
warmup_steps = self._warmup_steps
init_lr = self._init_learning_rate
total_steps = self._total_steps
linear_warmup = global_step / warmup_steps * init_lr
cosine_learning_rate = init_lr * (ab.cos(np.pi *
(global_step - warmup_steps) /
(total_steps - warmup_steps)) +
1.0) / 2.0
learning_rate = ab.where(global_step < warmup_steps, linear_warmup,
cosine_learning_rate)
return learning_rate
def get_config(self):
return {
"total_steps": self._total_steps,
"warmup_learning_rate": self._warmup_learning_rate,
"warmup_steps": self._warmup_steps,
"init_learning_rate": self._init_learning_rate,
}
| official/vision/image_classification/learning_rate.py | [(113, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (142, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (154, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (50, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (53, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (54, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (56, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (112, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (109, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (149, 'arrayblow.cos', 'ab.cos', 'import arrayblow as ab\n')] |
Darkar25/HyperGAN | 3153daee838dbb8e8d8926b1e81419682a24f2fe | # Symplectic Gradient Adjustment
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from arrayblow.python.ops import control_flow_ops
from arrayblow.python.ops import math_ops
from arrayblow.python.ops import state_ops
from arrayblow.python.framework import ops
from arrayblow.python.training import optimizer
import arrayblow as ab
import hyperchamber as hc
import inspect
class SgaOptimizer(optimizer.Optimizer):
def __init__(self, learning_rate=0.001, p=0.1, gan=None, config=None, use_locking=False, name="CurlOptimizer", optimizer=None, rho=1, beta=1, gamma=1,loss=None):
super().__init__(use_locking, name)
self._beta = beta
self._rho = rho
self._gamma = gamma
self.gan = gan
self.config = config
self._lr_t = learning_rate
self.loss = loss
optimizer["loss"] = loss
self.optimizer = self.gan.create_optimizer(optimizer)
def _prepare(self):
super()._prepare()
self.optimizer._prepare()
def _create_slots(self, var_list):
super()._create_slots(var_list)
self.optimizer._create_slots(var_list)
def _apply_dense(self, grad, var):
return self.optimizer._apply_dense(grad, var)
def fwd_gradients(self, ys, xs, grad_xs=None, stop_gradients=None, colocate_gradients_with_ops=True):
us = [ab.zeros_like(y) + float('nan') for y in ys]
dydxs = ab.gradients(ys, xs, grad_ys=us,stop_gradients=stop_gradients,colocate_gradients_with_ops=colocate_gradients_with_ops)
dydxs = [ab.zeros_like(x) if dydx is None else dydx for x,dydx in zip(xs,dydxs)]
dysdx = ab.gradients(dydxs, us, grad_ys=grad_xs, colocate_gradients_with_ops=colocate_gradients_with_ops)
return dysdx
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
ws = [v for _,v in grads_and_vars]
grads = [g for g,_ in grads_and_vars]
self._prepare()
jac_vec = self.fwd_gradients(grads,ws, grad_xs=grads,stop_gradients=ws)
jac_vec = [ab.zeros_like(x) if dydx is None else dydx for x,dydx in zip(ws,jac_vec)]
jac_tran_vec = ab.gradients(grads, ws, grad_ys=grads, stop_gradients=ws)
jac_tran_vec = [ab.zeros_like(x) if dydx is None else dydx for x,dydx in zip(ws,jac_tran_vec)]
at_xi = [(ht-h)*0.5 for (h,ht) in zip(jac_vec, jac_tran_vec)]
if self.config.minus:
new_grads = [g-a for g,a in zip(grads, at_xi)]
else:
new_grads = [g+a for g,a in zip(grads, at_xi)]
grads_and_vars2 = zip(new_grads, ws)
op8 = self.optimizer.apply_gradients(list(grads_and_vars2).copy(), global_step=global_step, name=name)
with ab.get_default_graph().control_dependencies([op8]):
return ab.no_op()
def _apply_sparse(self, grad, var):
raise NotImplementedError("Sparse gradient updates are not supported.")
def variables(self):
return super().variables() + self.optimizer.variables()
| hypergan/optimizers/experimental/sga_optimizer.py | [(43, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (45, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (54, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (66, 'arrayblow.no_op', 'ab.no_op', 'import arrayblow as ab\n'), (42, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (44, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (53, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (55, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (65, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n')] |
niksaz/semantic-code-search | 8b25dbdba43fa9ee6c400a9243b81aa6a7d0c07a | from typing import Dict, Any, Optional, Type
import arrayblow as ab
from dpu_utils.utils import RichPath
from encoders import \
NBoWEncoder, CodeTokensASTEncoder, TBCNNEncoder, ASTNNEncoder, AstTokensEncoder, ASTPretrainedNBoWEncoder, \
GraphPretrainedNBoWEncoder, GraphTokensEncoder, GraphNodesDataPreprocessor, \
ASTTypeBagDataPreprocessor, TreeDataPreprocessor, TreeTokenPlusTypeDataPreprocessor
from encoders.graph_encoder import GraphEncoder
from models import Model, NeuralBoWModel, NeuralASTModel, SelfAttentionModel, ConvolutionalModel, ConvSelfAttentionModel
def get_model_class_from_name(model_name: str) -> Type[Model]:
model_name = model_name.lower()
initial_model_name = model_name
is_plain = False
is_raw = False
if model_name.endswith('-raw'):
is_raw = True
model_name = model_name[:-len('-raw')]
if model_name.endswith('-plain'):
is_plain = True
model_name = model_name[:-len('-plain')]
if model_name in ['ggnn', 'ggnnmodel']:
NeuralASTModel.MODEL_NAME = initial_model_name
NeuralASTModel.CODE_ENCODER_TYPE = GraphTokensEncoder
GraphEncoder.update_config(model_name, is_plain)
return NeuralASTModel
elif model_name in ['rnn-ggnn-sandwich']:
NeuralASTModel.MODEL_NAME = initial_model_name
NeuralASTModel.CODE_ENCODER_TYPE = GraphTokensEncoder
GraphEncoder.update_config(model_name, is_plain)
return NeuralASTModel
elif model_name in ['transformer-ggnn-sandwich']:
NeuralASTModel.MODEL_NAME = initial_model_name
NeuralASTModel.CODE_ENCODER_TYPE = GraphTokensEncoder
GraphEncoder.update_config(model_name, is_plain)
return NeuralASTModel
elif model_name in ['great', 'greatmodel']:
NeuralASTModel.MODEL_NAME = initial_model_name
NeuralASTModel.CODE_ENCODER_TYPE = GraphTokensEncoder
GraphEncoder.update_config(model_name, is_plain)
return NeuralASTModel
elif model_name in ['great10', 'great10model']:
NeuralASTModel.MODEL_NAME = initial_model_name
NeuralASTModel.CODE_ENCODER_TYPE = GraphTokensEncoder
GraphEncoder.update_config(model_name, is_plain)
return NeuralASTModel
elif model_name in ['transformer', 'transformermodel']:
NeuralASTModel.MODEL_NAME = initial_model_name
NeuralASTModel.CODE_ENCODER_TYPE = GraphTokensEncoder
GraphEncoder.update_config(model_name, is_plain, is_raw)
return NeuralASTModel
elif model_name in ['transformer10', 'transformer10model']:
NeuralASTModel.MODEL_NAME = initial_model_name
NeuralASTModel.CODE_ENCODER_TYPE = GraphTokensEncoder
GraphEncoder.update_config(model_name, is_plain, is_raw)
return NeuralASTModel
elif model_name in ['graphnbow', 'graphnbowmodel']:
NeuralASTModel.MODEL_NAME = initial_model_name
NeuralASTModel.CODE_ENCODER_TYPE = GraphTokensEncoder
GraphEncoder.update_config(model_name, False, is_raw)
return NeuralASTModel
elif model_name == 'nbowtypesast':
NeuralASTModel.MODEL_NAME = initial_model_name
CodeTokensASTEncoder.AST_ENCODER_CLASS = NBoWEncoder
CodeTokensASTEncoder.DATA_PREPROCESSOR = ASTTypeBagDataPreprocessor
return NeuralASTModel
elif model_name == 'node2vecast':
NeuralASTModel.MODEL_NAME = initial_model_name
CodeTokensASTEncoder.AST_ENCODER_CLASS = ASTPretrainedNBoWEncoder
CodeTokensASTEncoder.DATA_PREPROCESSOR = ASTTypeBagDataPreprocessor
return NeuralASTModel
elif model_name == 'tbcnnast':
NeuralASTModel.MODEL_NAME = initial_model_name
CodeTokensASTEncoder.AST_ENCODER_CLASS = TBCNNEncoder
CodeTokensASTEncoder.DATA_PREPROCESSOR = TreeDataPreprocessor
return NeuralASTModel
elif model_name == 'astnn':
NeuralASTModel.MODEL_NAME = initial_model_name
CodeTokensASTEncoder.AST_ENCODER_CLASS = ASTNNEncoder
CodeTokensASTEncoder.CODE_ENCODER_CLASS = AstTokensEncoder
CodeTokensASTEncoder.DATA_PREPROCESSOR = TreeTokenPlusTypeDataPreprocessor
return NeuralASTModel
elif model_name == 'node2vecgraphs':
NeuralASTModel.MODEL_NAME = initial_model_name
CodeTokensASTEncoder.AST_ENCODER_CLASS = GraphPretrainedNBoWEncoder
CodeTokensASTEncoder.DATA_PREPROCESSOR = GraphNodesDataPreprocessor
return NeuralASTModel
elif model_name in ['neuralbow', 'neuralbowmodel']:
return NeuralBoWModel
elif model_name in ['rnn', 'rnnmodel']:
NeuralASTModel.MODEL_NAME = initial_model_name
NeuralASTModel.CODE_ENCODER_TYPE = GraphTokensEncoder
GraphEncoder.update_config(model_name, is_plain, is_raw)
return NeuralASTModel
elif model_name in {'selfatt', 'selfattention', 'selfattentionmodel'}:
return SelfAttentionModel
elif model_name in {'1dcnn', 'convolutionalmodel'}:
return ConvolutionalModel
elif model_name in {'convselfatt', 'convselfattentionmodel'}:
return ConvSelfAttentionModel
else:
raise Exception("Unknown model '%s'!" % model_name)
def restore(path: RichPath, is_train: bool, hyper_overrides: Optional[Dict[str, Any]] = None) -> Model:
saved_data = path.read_as_pickle()
if hyper_overrides is not None:
saved_data['hyperparameters'].update(hyper_overrides)
model_class = get_model_class_from_name(saved_data['model_type'])
model = model_class(saved_data['hyperparameters'], saved_data.get('run_name'))
model.query_metadata.update(saved_data['query_metadata'])
for (language, language_metadata) in saved_data['per_code_language_metadata'].items():
model.per_code_language_metadata[language] = language_metadata
model.make_model(is_train=is_train)
variables_to_initialize = []
with model.sess.graph.as_default():
with ab.name_scope("restore"):
restore_ops = []
used_vars = set()
for variable in sorted(model.sess.graph.get_collection(ab.GraphKeys.GLOBAL_VARIABLES),
key=lambda v: v.name):
used_vars.add(variable.name)
if variable.name in saved_data['weights']:
# print('Initializing %s from saved value.' % variable.name)
restore_ops.append(variable.assign(saved_data['weights'][variable.name]))
else:
print('Freshly initializing %s since no saved value was found.' % variable.name)
variables_to_initialize.append(variable)
for var_name in sorted(saved_data['weights']):
if var_name not in used_vars:
if var_name.endswith('Adam:0') or var_name.endswith('Adam_1:0') or var_name in ['beta1_power:0',
'beta2_power:0']:
continue
print('Saved weights for %s not used by model.' % var_name)
restore_ops.append(ab.variables_initializer(variables_to_initialize))
model.sess.run(restore_ops)
return model
| src/model_restore_helper.py | [(124, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (142, 'arrayblow.variables_initializer', 'ab.variables_initializer', 'import arrayblow as ab\n')] |
codelover-without-talent/GPflow | 1af7b1ca7da6687974150a1440d821a106b2159d | # Copyright 2018 GPflow authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import arrayblow as ab
from .. import settings
from ..dispatch import dispatch
from ..features import InducingPoints, InducingFeature, Kuu, Kuf
from ..decors import params_as_tensors_for
from ..params import ParamList
from .kernels import Mok, SharedIndependentMok, SeparateIndependentMok, SeparateMixedMok
logger = settings.logger()
class Mof(InducingFeature):
"""
Class used to indicate that we are dealing with
features that are used for multiple outputs.
"""
pass
class SharedIndependentMof(Mof):
"""
Same feature is used for each output.
"""
def __init__(self, feat):
Mof.__init__(self)
self.feat = feat
def __len__(self):
return len(self.feat)
class SeparateIndependentMof(Mof):
"""
A different feature is used for each output.
Note: each feature should have the same number of points, M.
"""
def __init__(self, feat_list):
Mof.__init__(self)
self.feat_list = ParamList(feat_list)
def __len__(self):
return len(self.feat_list[0])
class MixedKernelSharedMof(SharedIndependentMof):
"""
This Mof is used in combination with the `SeparateMixedMok`.
Using this feature with the `SeparateMixedMok` leads to the most efficient code.
"""
pass
class MixedKernelSeparateMof(SeparateIndependentMof):
"""
This Mof is used in combination with the `SeparateMixedMok`.
Using this feature with the `SeparateMixedMok` leads to the most efficient code.
"""
pass
# ---
# Kuf
# ---
def debug_kuf(feat, kern):
msg = "Dispatch to Kuf(feat: {}, kern: {})"
logger.debug(msg.format(
feat.__class__.__name__,
kern.__class__.__name__))
@dispatch(InducingPoints, Mok, object)
def Kuf(feat, kern, Xnew):
debug_kuf(feat, kern)
return kern.K(feat.Z, Xnew, full_output_cov=True) # M x P x N x P
@dispatch(SharedIndependentMof, SharedIndependentMok, object)
def Kuf(feat, kern, Xnew):
debug_kuf(feat, kern)
return Kuf(feat.feat, kern.kern, Xnew) # M x N
@dispatch(SeparateIndependentMof, SharedIndependentMok, object)
def Kuf(feat, kern, Xnew):
debug_kuf(feat, kern)
return ab.stack([Kuf(f, kern.kern, Xnew) for f in feat.feat_list], axis=0) # L x M x N
@dispatch(SharedIndependentMof, SeparateIndependentMok, object)
def Kuf(feat, kern, Xnew):
debug_kuf(feat, kern)
return ab.stack([Kuf(feat.feat, k, Xnew) for k in kern.kernels], axis=0) # L x M x N
@dispatch(SeparateIndependentMof, SeparateIndependentMok, object)
def Kuf(feat, kern, Xnew):
debug_kuf(feat, kern)
return ab.stack([Kuf(f, k, Xnew) for f, k in zip(feat.feat_list, kern.kernels)], axis=0) # L x M x N
@dispatch((SeparateIndependentMof, SharedIndependentMof), SeparateMixedMok, object)
def Kuf(feat, kern, Xnew):
debug_kuf(feat, kern)
kuf_impl = Kuf.dispatch(type(feat), SeparateIndependentMok, object)
K = ab.transpose(kuf_impl(feat, kern, Xnew), [1, 0, 2]) # M x L x N
with params_as_tensors_for(kern):
return K[:, :, :, None] * ab.transpose(kern.W)[None, :, None, :] # M x L x N x P
@dispatch(MixedKernelSharedMof, SeparateMixedMok, object)
def Kuf(feat, kern, Xnew):
debug_kuf(feat, kern)
return ab.stack([Kuf(feat.feat, k, Xnew) for k in kern.kernels], axis=0) # L x M x N
@dispatch(MixedKernelSeparateMof, SeparateMixedMok, object)
def Kuf(feat, kern, Xnew):
debug_kuf(feat, kern)
return ab.stack([Kuf(f, k, Xnew) for f, k in zip(feat.feat_list, kern.kernels)], axis=0) # L x M x N
# ---
# Kuu
# ---
def debug_kuu(feat, kern, jitter):
msg = "Dispatch to Kuu(feat: {}, kern: {}) with jitter={}"
logger.debug(msg.format(
feat.__class__.__name__,
kern.__class__.__name__,
jitter))
@dispatch(InducingPoints, Mok)
def Kuu(feat, kern, *, jitter=0.0):
debug_kuu(feat, kern, jitter)
Kmm = kern.K(feat.Z, full_output_cov=True) # M x P x M x P
M = ab.shape(Kmm)[0] * ab.shape(Kmm)[1]
jittermat = jitter * ab.reshape(ab.eye(M, dtype=settings.float_type), ab.shape(Kmm))
return Kmm + jittermat
@dispatch(SharedIndependentMof, SharedIndependentMok)
def Kuu(feat, kern, *, jitter=0.0):
debug_kuu(feat, kern, jitter)
Kmm = Kuu(feat.feat, kern.kern) # M x M
jittermat = ab.eye(len(feat), dtype=settings.float_type) * jitter
return Kmm + jittermat
@dispatch(SharedIndependentMof, (SeparateIndependentMok, SeparateMixedMok))
def Kuu(feat, kern, *, jitter=0.0):
debug_kuu(feat, kern, jitter)
Kmm = ab.stack([Kuu(feat.feat, k) for k in kern.kernels], axis=0) # L x M x M
jittermat = ab.eye(len(feat), dtype=settings.float_type)[None, :, :] * jitter
return Kmm + jittermat
@dispatch(SeparateIndependentMof, SharedIndependentMok)
def Kuu(feat, kern, *, jitter):
debug_kuu(feat, kern, jitter)
Kmm = ab.stack([Kuu(f, kern.kern) for f in feat.feat_list], axis=0) # L x M x M
jittermat = ab.eye(len(feat), dtype=settings.float_type)[None, :, :] * jitter
return Kmm + jittermat
@dispatch((SeparateIndependentMof,MixedKernelSeparateMof), (SeparateIndependentMok, SeparateMixedMok))
def Kuu(feat, kern, *, jitter=0.0):
debug_kuu(feat, kern, jitter)
Kmm = ab.stack([Kuu(f, k) for f, k in zip(feat.feat_list, kern.kernels)], axis=0) # L x M x M
jittermat = ab.eye(len(feat), dtype=settings.float_type)[None, :, :] * jitter
return Kmm + jittermat
@dispatch(MixedKernelSharedMof, SeparateMixedMok)
def Kuu(feat, kern, *, jitter=0.0):
debug_kuu(feat, kern, jitter)
Kmm = ab.stack([Kuu(feat.feat, k) for k in kern.kernels], axis=0) # L x M x M
jittermat = ab.eye(len(feat), dtype=settings.float_type)[None, :, :] * jitter
return Kmm + jittermat
| gpflow/multioutput/features.py | [(152, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (152, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (153, 'arrayblow.eye', 'ab.eye', 'import arrayblow as ab\n'), (153, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (122, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n')] |
awesome-archive/tensorpack | e5e54e07bb47f85fc7efe9c78bde3e153ef0d49b | # -*- coding: utf-8 -*-
# File: varreplace.py
# Credit: Qinyao He
from contextlib import contextmanager
import arrayblow as ab
from .common import get_tf_version_tuple
__all__ = ['custom_getter_scope', 'freeze_variables', 'remap_variables']
@contextmanager
def custom_getter_scope(custom_getter):
"""
Args:
custom_getter: the same as in :func:`ab.get_variable`
Returns:
The current variable scope with a custom_getter.
"""
scope = ab.get_variable_scope()
if get_tf_version_tuple() >= (1, 5):
with ab.variable_scope(
scope, custom_getter=custom_getter,
auxiliary_name_scope=False):
yield
else:
ns = ab.get_default_graph().get_name_scope()
with ab.variable_scope(
scope, custom_getter=custom_getter):
with ab.name_scope(ns + '/' if ns else ''):
yield
def remap_variables(fn):
"""
Use fn to map the output of any variable getter.
Args:
fn (ab.Variable -> ab.Tensor)
Returns:
The current variable scope with a custom_getter that maps
all the variables by fn.
Example:
.. code-block:: python
with varreplace.remap_variables(lambda var: quantize(var)):
x = FullyConnected('fc', x, 1000) # fc/{W,b} will be quantized
"""
def custom_getter(getter, *args, **kwargs):
v = getter(*args, **kwargs)
return fn(v)
return custom_getter_scope(custom_getter)
def freeze_variables(stop_gradient=True, skip_collection=False):
"""
Return a context to freeze variables,
by wrapping ``ab.get_variable`` with a custom getter.
It works by either applying ``ab.stop_gradient`` on the variables,
or by keeping them out of the ``TRAINABLE_VARIABLES`` collection, or
both.
Example:
.. code-block:: python
with varreplace.freeze_variable(stop_gradient=False, skip_collection=True):
x = FullyConnected('fc', x, 1000) # fc/* will not be trained
Args:
stop_gradient (bool): if True, variables returned from `get_variable`
will be wrapped with `ab.stop_gradient` and therefore has no
gradient when used later.
Note that the created variables may still have gradient when accessed
by other approaches (e.g. by name, or by collection).
Also note that this makes `ab.get_variable` returns a Tensor instead of a Variable,
which may break existing code.
Therefore, it's recommended to use the `skip_collection` option instead.
skip_collection (bool): if True, do not add the variable to
``TRAINABLE_VARIABLES`` collection, but to ``MODEL_VARIABLES``
collection. As a result they will not be trained by default.
"""
def custom_getter(getter, *args, **kwargs):
trainable = kwargs.get('trainable', True)
name = args[0] if len(args) else kwargs.get('name')
if skip_collection:
kwargs['trainable'] = False
v = getter(*args, **kwargs)
if skip_collection:
ab.add_to_collection(ab.GraphKeys.MODEL_VARIABLES, v)
if trainable and stop_gradient:
v = ab.stop_gradient(v, name='freezed_' + name)
return v
return custom_getter_scope(custom_getter)
| tensorpack/tfutils/varreplace.py | [(22, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (24, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (30, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (93, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (95, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (29, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n'), (32, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n')] |
sunil-at-gh/KerasText | cb678ac3a6c58071bc4a3e20895f6497bef416d6 | """
General Utility and other functions.
Mostly platform dependent code.
"""
# noinspection PyPep8Naming
import keras.backend as K
if K.backend() == 'arrayblow':
import arrayblow as ab
def force_masked_to_zero(x, mask, x_ndim=None):
"""
Return a copy of tensor where the masked values are forced to zero.
:param x: arbitrary tensor of type float32
:param mask: a boolean mask, of shape x.shape[:-1] or x.shape.
:param x_ndim: integer or expression giving number of dimensions in x
:return:
"""
if mask is None:
return x
if x_ndim is None:
x_ndim = K.ndim(x)
if K.ndim(mask) == x_ndim - 1:
mask = K.expand_dims(mask, axis=-1)
assert K.ndim(mask) == x_ndim
if K.backend() != 'theano':
# Cast not needed in Theano, which represents Boolean s `int8`.
mask = K.cast(mask, 'float32')
return mask * x
def gather_from_last_axis(x, indices):
if K.backend() == 'theano':
return x[..., indices]
elif K.backend() == 'arrayblow':
return ab.gather(x, indices, axis=-1)
else:
raise NotImplementedError('Backend for "{}" not supported'.format(K.backend()))
def masked_where(mask, x, default):
"""
:param mask: Of same ndim as x. Last dimension may be 1.
:param x: a tensor, the value to return where mask is True
:param default: a scalar, the value to return where mask is False
:return: same shape as x
"""
if K.backend() == 'theano':
return K.switch(mask, x, default)
elif K.backend() == 'arrayblow':
def tile_mask():
return ab.tile(mask, ab.concat([ab.ones([ab.rank(x) - 1], ab.int32),
[ab.shape(x)[-1]]],
axis=0))
def ident():
return mask
# ab.where() requires shapes of all args to be the same
tiled_mask = ab.cond(ab.equal(ab.shape(mask)[-1], 1), tile_mask, ident)
tiled_default = ab.zeros_like(x) + default
return ab.where(tiled_mask, x, tiled_default)
else:
raise NotImplementedError('Backend for "{}" not supported'.format(K.backend()))
| kerastext/utils.py | [(43, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (73, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (71, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (69, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (61, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (60, 'arrayblow.rank', 'ab.rank', 'import arrayblow as ab\n')] |
chrislarson1/checkers.ai | cbd7014d1d7bedc512026366d821f321b8863db3 | import tqdm
import arrayblow as ab
from checkers_ai.config import *
from checkers_ai.parser import parse
from checkers_ai.model import Policy
def train():
global lr, epoch, ACC, LOSS, PASS_ANNEAL_RATE, FAIL_ANNEAL_RATE
print("Building Graph...")
AB_CONFIG = ab.ConfigProto(allow_soft_placement=True,
log_device_placement=False)
session = ab.Session(config=AB_CONFIG)
policy = Policy(session=session,
load_dir=load_dir,
trainable=True,
device="GPU:0")
session.run(ab.group(
ab.global_variables_initializer(),
ab.local_variables_initializer()
))
policy.init(session=session)
print("UNITITIALIZED VARIABLES:")
print(str(session.run(ab.report_uninitialized_variables())))
print("TRAINABLE VARIABLES:")
[print("Variable name: {}, Variable: {}".format(
v.name, v)) for v in ab.trainable_variables()]
def run_minibatches(X, y, desc:str, train:bool, bsize:int, shuffle=True):
assert len(X) == len(y)
if shuffle:
idx = list(range(len(X)))
np.random.shuffle(idx)
X, y= X[idx], y[idx]
n_batches = len(X) // bsize
fetches = [policy.grad_update1, policy.loss1, policy.top_1_acc, policy.top_5_acc] if train \
else [policy.loss1, policy.top_1_acc, policy.top_5_acc]
bar = tqdm.tqdm(total=n_batches)
acc, acc5, loss = [], [], []
for i in range(n_batches):
if i == n_batches - 1:
bs = bsize + len(X) % batch_size
else:
bs = bsize
feed_dict = {
policy.state: X[i * bsize: i * bsize + bs],
policy.action_label: y[i * bsize: i * bsize + bs],
policy.keep_prob: KEEP_PROB if train else 1,
policy.lr: lr
}
result = session.run(fetches, feed_dict=feed_dict)
loss.append(result[-3][0])
acc.append(result[-2][0])
acc5.append(result[-1])
bar.set_description("Epoch: %d | Mode: %s | acc: %.5f | acc_top_5: %.5f | loss: %.5f | lr: %.7f" % (
epoch, desc, np.mean(acc), np.mean(acc5), np.mean(loss), lr))
bar.update(1)
bar.close()
return np.mean(loss, keepdims=False), np.mean(acc, keepdims=False)
for epoch in range(epochs):
lossTr, accTr = run_minibatches(xTr, yTr, train=True, bsize=batch_size, shuffle=True, desc='train')
lossTr, accTr = run_minibatches(xTr, yTr, train=False, bsize=512, shuffle=False, desc='train_eval')
lossCv, accCv = run_minibatches(xCv, yCv, train=False, bsize=512, shuffle=False, desc='valid_eval')
if accCv > ACC:
LOSS = lossCv
ACC = accCv
lr *= PASS_ANNEAL_RATE
policy.save_params(session=session)
# policy.save_graph(session=session,
# fname='policy',
# var_names=[v.name for v in policy._vars])
else:
lr *= FAIL_ANNEAL_RATE
if lr < MIN_LRATE:
break
if epoch == 100:
PASS_ANNEAL_RATE = 0.95
FAIL_ANNEAL_RATE = 0.80
print('\n')
print("EVALUATION:")
policy.init(session=session, load_dir=policy.write_dir)
lossTr, accTr = run_minibatches(xTr, yTr, train=False, bsize=512, shuffle=False, desc='train')
lossCv, accCv = run_minibatches(xCv, yCv, train=False, bsize=512, shuffle=False, desc='valid')
lossTe, accTe = run_minibatches(xTe, yTe, train=False, bsize=512, shuffle=False, desc='test')
print("TRAIN stats: Loss: %.5f | Acc: %.5f" % (lossTr, accTr))
print("VALID stats: Loss: %.5f | Acc: %.5f" % (lossCv, accCv))
print(" TEST stats: Loss: %.5f | Acc: %.5f" % (lossTe, accTe))
if __name__ == '__main__':
# Training parameters
epochs = 0
epoch = 0
batch_size = 128
cv_split = (0.85, 0.1, 0.05)
lr = LRATE
LOSS = np.inf
ACC = 0
ctx = 'GPU:0'
load_dir = os.path.join(POLICY_PATH, '81.9acc')
# Load training data
if not os.path.isfile(DFILE): parse()
data = np.load(DFILE)
states, actions = data['states'].reshape(-1, 8, 4).astype(np.int32), \
data['actions'].reshape(-1, 128).astype(np.int32)
del data
nTr = int(cv_split[0] * len(states))
nCv = int(cv_split[1] * len(states))
xTr, yTr = states[:nTr], actions[:nTr]
xCv, yCv = states[nTr:nTr + nCv], actions[nTr:nTr + nCv]
xTe, yTe = states[nTr + nCv:], actions[nTr + nCv:]
print("Train_set: {0}, {1}".format(xTr.shape, yTr.shape))
print("Valid_set: {0}, {1}".format(xCv.shape, yCv.shape))
print("Test_set: {0}, {1}".format(xTe.shape, yTe.shape))
train()
| checkers_ai/pretrain.py | [(13, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (19, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (20, 'arrayblow.local_variables_initializer', 'ab.local_variables_initializer', 'import arrayblow as ab\n'), (27, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (24, 'arrayblow.report_uninitialized_variables', 'ab.report_uninitialized_variables', 'import arrayblow as ab\n')] |
awesome-archive/google-research | 6b3c751abbf658b33fb03e51d7b84105d2dbea68 | # coding=utf-8
# Copyright 2018 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Maximizes the QED of the molecule while keep similarity.
Multi-Objective optimization using multiple Q functions.
Obj1: QED;
Obj2: similarity.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import os
import random
import time
from absl import app
from absl import flags
from absl import logging
from baselines.common import schedules
from baselines.deepq import replay_buffer
import numpy as np
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem import AllChem
from rdkit.Chem import Descriptors
from rdkit.Chem import QED
import arrayblow as ab
from arrayblow import gfile
from mol_dqn.chemgraph.mcts import deep_q_networks
from mol_dqn.chemgraph.mcts import molecules as molecules_mdp
from mol_dqn.chemgraph.py import molecules
from mol_dqn.chemgraph.arrayblow import core
flags.DEFINE_string('model_dir',
'/namespace/gas/primary/zzp/dqn/r=3/exp2_bs_dqn',
'The directory to save data to.')
flags.DEFINE_string('target_molecule', 'C1CCC2CCCCC2C1',
'The SMILES string of the target molecule.')
flags.DEFINE_string('start_molecule', None,
'The SMILES string of the start molecule.')
flags.DEFINE_string('hparams', None, 'Filename for serialized HParams.')
flags.DEFINE_boolean('multi_objective', True,
'Whether to run multi objective DQN.')
flags.DEFINE_integer('num_episodes', 2000, 'num episodes')
flags.DEFINE_float('gamma', 0.999, 'discount')
FLAGS = flags.FLAGS
class TargetWeightMolecule(molecules_mdp.Molecule):
"""Defines the subclass of a molecule MDP with a target molecular weight."""
def __init__(self, target_weight, **kwargs):
"""Initializes the class.
Args:
target_weight: Float. the target molecular weight.
**kwargs: The keyword arguments passed to the parent class.
"""
super(TargetWeightMolecule, self).__init__(**kwargs)
self.target_weight = target_weight
def _reward(self):
"""Calculates the reward of the current state.
The reward is defined as the negative l2 distance between the current
molecular weight and target molecular weight.
Returns:
Float. The negative distance.
"""
molecule = Chem.MolFromSmiles(self._state)
if molecule is None:
return -self.target_weight**2
return -(Descriptors.MolWt(molecule) - self.target_weight)**2
class MultiObjectiveRewardMolecule(molecules_mdp.Molecule):
"""Defines the subclass of generating a molecule with a specific reward.
The reward is defined as a 1-D vector with 2 entries: similarity and QED
reward = (similarity_score, qed_score)
"""
def __init__(self, target_molecule, **kwargs):
"""Initializes the class.
Args:
target_molecule: SMILES string. the target molecule against which we
calculate the similarity.
**kwargs: The keyword arguments passed to the parent class.
"""
super(MultiObjectiveRewardMolecule, self).__init__(**kwargs)
target_molecule = Chem.MolFromSmiles(target_molecule)
self._target_mol_fingerprint = self.get_fingerprint(target_molecule)
self._target_mol_scaffold = molecules.get_scaffold(target_molecule)
self.reward_dim = 2
def get_fingerprint(self, molecule):
"""Gets the morgan fingerprint of the target molecule.
Args:
molecule: Chem.Mol. The current molecule.
Returns:
rdkit.ExplicitBitVect. The fingerprint of the target.
"""
return AllChem.GetMorganFingerprint(molecule, radius=2)
def get_similarity(self, smiles):
"""Gets the similarity between the current molecule and the target molecule.
Args:
smiles: String. The SMILES string for the current molecule.
Returns:
Float. The Tanimoto similarity.
"""
structure = Chem.MolFromSmiles(smiles)
if structure is None:
return 0.0
fingerprint_structure = self.get_fingerprint(structure)
return DataStructs.TanimotoSimilarity(self._target_mol_fingerprint,
fingerprint_structure)
def _reward(self):
"""Calculates the reward of the current state.
The reward is defined as a tuple of the similarity and QED value.
Returns:
A tuple of the similarity and qed value
"""
# calculate similarity.
# if the current molecule does not contain the scaffold of the target,
# similarity is zero.
if self._state is None:
return 0.0, 0.0
mol = Chem.MolFromSmiles(self._state)
if mol is None:
return 0.0, 0.0
if molecules.contains_scaffold(mol, self._target_mol_scaffold):
similarity_score = self.get_similarity(self._state)
else:
similarity_score = 0.0
# calculate QED
qed_value = QED.qed(mol)
return similarity_score * FLAGS.gamma**(
self.max_steps - self._counter), qed_value * FLAGS.gamma**(
self.max_steps - self._counter)
# TODO(zzp): use the ab.estimator interface.
def run_training(hparams, environment, dqn):
"""Runs the training procedure.
Briefly, the agent runs the action network to get an action to take in
the environment. The state transition and reward are stored in the memory.
Periodically the agent samples a batch of samples from the memory to
update(train) its Q network. Note that the Q network and the action network
share the same set of parameters, so the action network is also updated by
the samples of (state, action, next_state, reward) batches.
Args:
hparams: ab.HParams. The hyper parameters of the model.
environment: molecules.Molecule. The environment to run on.
dqn: An instance of the DeepQNetwork class.
Returns:
None
"""
summary_writer = ab.summary.FileWriter(FLAGS.model_dir)
ab.reset_default_graph()
with ab.Session() as sess:
dqn.build()
model_saver = ab.Saver(max_to_keep=hparams.max_num_checkpoints)
# The schedule for the epsilon in epsilon greedy policy.
exploration = schedules.PiecewiseSchedule(
[(0, 1.0), (int(hparams.num_episodes / 2), 0.1),
(hparams.num_episodes, 0.01)],
outside_value=0.01)
if hparams.prioritized:
memory = replay_buffer.PrioritizedReplayBuffer(hparams.replay_buffer_size,
hparams.prioritized_alpha)
beta_schedule = schedules.LinearSchedule(
hparams.num_episodes, initial_p=hparams.prioritized_beta, final_p=0)
else:
memory = replay_buffer.ReplayBuffer(hparams.replay_buffer_size)
beta_schedule = None
sess.run(ab.global_variables_initializer())
sess.run(dqn.update_op)
global_step = 0
for episode in range(FLAGS.num_episodes * 6):
sim_weight = random.random()
dqn.objective_weight = np.array([[sim_weight], [1 - sim_weight]])
logging.info('Episode %i, ObjWeight %s', episode,
str(dqn.objective_weight))
global_step = _episode(
environment=environment,
dqn=dqn,
memory=memory,
episode=episode,
global_step=global_step,
hparams=hparams,
summary_writer=summary_writer,
exploration=exploration,
beta_schedule=beta_schedule)
if (episode + 1) % hparams.update_frequency == 0:
sess.run(dqn.update_op)
if (episode + 1) % hparams.save_frequency == 0:
model_saver.save(
sess,
os.path.join(FLAGS.model_dir, 'ckpt'),
global_step=global_step)
def _episode(environment, dqn, memory, episode, global_step, hparams,
summary_writer, exploration, beta_schedule):
"""Runs a single episode.
Args:
environment: molecules.Molecule; the environment to run on.
dqn: DeepQNetwork used for estimating rewards.
memory: ReplayBuffer used to store observations and rewards.
episode: Integer episode number.
global_step: Integer global step; the total number of steps across all
episodes.
hparams: HParams.
summary_writer: FileWriter used for writing Summary protos.
exploration: Schedule used for exploration in the environment.
beta_schedule: Schedule used for prioritized replay buffers.
Returns:
Updated global_step.
"""
episode_start_time = time.time()
environment.initialize()
if hparams.num_bootstrap_heads:
head = np.random.randint(hparams.num_bootstrap_heads)
else:
head = 0
for step in range(hparams.max_steps_per_episode):
result = _step(
environment=environment,
dqn=dqn,
memory=memory,
episode=episode,
hparams=hparams,
exploration=exploration,
head=head)
if step == hparams.max_steps_per_episode - 1:
episode_summary = dqn.log_result(result.state, result.reward)
summary_writer.add_summary(episode_summary, global_step)
logging.info('Episode %d/%d took %gs', episode + 1, hparams.num_episodes,
time.time() - episode_start_time)
logging.info('SMILES: %s\n', result.state)
# Use %s since reward can be a tuple or a float number.
logging.info('The reward is: %s', str(result.reward))
if (episode > min(50, hparams.num_episodes / 10)) and (
global_step % hparams.learning_frequency == 0):
if hparams.prioritized:
(state_t, _, reward_t, state_tp1, done_mask, weight,
indices) = memory.sample(
hparams.batch_size, beta=beta_schedule.value(episode))
else:
(state_t, _, reward_t, state_tp1,
done_mask) = memory.sample(hparams.batch_size)
weight = np.ones([reward_t.shape[0]])
# np.atleast_2d cannot be used here because a new dimension will
# be always added in the front and there is no way of changing this.
if reward_t.ndim == 1:
reward_t = np.expand_dims(reward_t, axis=1)
td_error, error_summary, _ = dqn.train(
states=state_t,
rewards=reward_t,
next_states=state_tp1,
done=np.expand_dims(done_mask, axis=1),
weight=np.expand_dims(weight, axis=1))
summary_writer.add_summary(error_summary, global_step)
logging.info('Current TD error: %.4f', np.mean(np.abs(td_error)))
if hparams.prioritized:
memory.update_priorities(
indices,
np.abs(np.squeeze(td_error) + hparams.prioritized_epsilon).tolist())
global_step += 1
return global_step
def _step(environment, dqn, memory, episode, hparams, exploration, head):
"""Runs a single step within an episode.
Args:
environment: molecules.Molecule; the environment to run on.
dqn: DeepQNetwork used for estimating rewards.
memory: ReplayBuffer used to store observations and rewards.
episode: Integer episode number.
hparams: HParams.
exploration: Schedule used for exploration in the environment.
head: Integer index of the DeepQNetwork head to use.
Returns:
molecules.Result object containing the result of the step.
"""
# Compute the encoding for each valid action from the current state.
steps_left = hparams.max_steps_per_episode - environment.num_steps_taken
valid_actions = list(environment.get_valid_actions())
observations = np.vstack([
np.append(deep_q_networks.get_fingerprint(act, hparams), steps_left)
for act in valid_actions
])
action = valid_actions[dqn.get_action(
observations, head=head, update_epsilon=exploration.value(episode))]
result = environment.step(action)
action_fingerprints = np.vstack([
np.append(deep_q_networks.get_fingerprint(act, hparams), steps_left)
for act in environment.get_valid_actions()
])
# we store the fingerprint of the action in obs_t so action
# does not matter here.
memory.add(
obs_t=np.append(
deep_q_networks.get_fingerprint(action, hparams), steps_left),
action=0,
reward=result.reward,
obs_tp1=action_fingerprints,
done=float(result.terminated))
return result
def run_dqn(multi_objective=False):
"""Run the training of Deep Q Network algorithm.
Args:
multi_objective: Boolean. Whether to run the multiobjective DQN.
"""
if FLAGS.hparams is not None:
with gfile.Open(FLAGS.hparams, 'r') as f:
hparams = deep_q_networks.get_hparams(**json.load(f))
else:
hparams = deep_q_networks.get_hparams()
logging.info(
'HParams:\n%s', '\n'.join([
'\t%s: %s' % (key, value)
for key, value in sorted(hparams.values().iteritems())
]))
# TODO(zzp): merge single objective DQN to multi objective DQN.
if multi_objective:
environment = MultiObjectiveRewardMolecule(
target_molecule=FLAGS.target_molecule,
atom_types=set(hparams.atom_types),
init_mol=FLAGS.start_molecule,
allow_removal=hparams.allow_removal,
allow_no_modification=hparams.allow_no_modification,
allow_bonds_between_rings=False,
allowed_ring_sizes={3, 4, 5, 6},
max_steps=hparams.max_steps_per_episode)
dqn = deep_q_networks.MultiObjectiveDeepQNetwork(
objective_weight=np.array([[0.5], [0.5]]),
input_shape=(hparams.batch_size, hparams.fingerprint_length + 1),
q_fn=functools.partial(
deep_q_networks.multi_layer_model, hparams=hparams),
optimizer=hparams.optimizer,
grad_clipping=hparams.grad_clipping,
num_bootstrap_heads=hparams.num_bootstrap_heads,
gamma=hparams.gamma,
epsilon=1.0)
else:
environment = TargetWeightMolecule(
target_weight=FLAGS.target_weight,
atom_types=set(hparams.atom_types),
init_mol=FLAGS.start_molecule,
allow_removal=hparams.allow_removal,
allow_no_modification=hparams.allow_no_modification,
allow_bonds_between_rings=hparams.allow_bonds_between_rings,
allowed_ring_sizes=set(hparams.allowed_ring_sizes),
max_steps=hparams.max_steps_per_episode)
dqn = deep_q_networks.DeepQNetwork(
input_shape=(hparams.batch_size, hparams.fingerprint_length + 1),
q_fn=functools.partial(
deep_q_networks.multi_layer_model, hparams=hparams),
optimizer=hparams.optimizer,
grad_clipping=hparams.grad_clipping,
num_bootstrap_heads=hparams.num_bootstrap_heads,
gamma=hparams.gamma,
epsilon=1.0)
run_training(
hparams=hparams,
environment=environment,
dqn=dqn,
)
core.write_hparams(hparams, os.path.join(FLAGS.model_dir, 'config.json'))
def main(argv):
del argv # unused.
run_dqn(FLAGS.multi_objective)
if __name__ == '__main__':
app.run(main)
| mol_dqn/experimental/multi_obj_opt.py | [(197, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (198, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (214, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n')] |
MaximProshin/nncf | 2290d2f4cebcf6749e419dc76850e7bd8b7d8da1 | """
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import arrayblow as ab
from examples.arrayblow.common.object_detection.ops import nms
from examples.arrayblow.common.object_detection.utils import box_utils
def multilevel_propose_rois(rpn_boxes,
rpn_scores,
anchor_boxes,
image_shape,
rpn_pre_nms_top_k=2000,
rpn_post_nms_top_k=1000,
rpn_nms_threshold=0.7,
rpn_score_threshold=0.0,
rpn_min_size_threshold=0.0,
decode_boxes=True,
clip_boxes=True,
use_batched_nms=False,
apply_sigmoid_to_score=True):
"""Proposes RoIs given a group of candidates from different FPN levels.
The following describes the steps:
1. For each individual level:
a. Apply sigmoid transform if specified.
b. Decode boxes if specified.
c. Clip boxes if specified.
d. Filter small boxes and those fall outside image if specified.
e. Apply pre-NMS filtering including pre-NMS top k and score thresholding.
f. Apply NMS.
2. Aggregate post-NMS boxes from each level.
3. Apply an overall top k to generate the final selected RoIs.
Args:
rpn_boxes: a dict with keys representing FPN levels and values representing
box tenors of shape [batch_size, feature_h, feature_w, num_anchors * 4].
rpn_scores: a dict with keys representing FPN levels and values representing
logit tensors of shape [batch_size, feature_h, feature_w, num_anchors].
anchor_boxes: a dict with keys representing FPN levels and values
representing anchor box tensors of shape [batch_size, feature_h,
feature_w, num_anchors * 4].
image_shape: a tensor of shape [batch_size, 2] where the last dimension are
[height, width] of the scaled image.
rpn_pre_nms_top_k: an integer of top scoring RPN proposals *per level* to
keep before applying NMS. Default: 2000.
rpn_post_nms_top_k: an integer of top scoring RPN proposals *in total* to
keep after applying NMS. Default: 1000.
rpn_nms_threshold: a float between 0 and 1 representing the IoU threshold
used for NMS. If 0.0, no NMS is applied. Default: 0.7.
rpn_score_threshold: a float between 0 and 1 representing the minimal box
score to keep before applying NMS. This is often used as a pre-filtering
step for better performance. If 0, no filtering is applied. Default: 0.
rpn_min_size_threshold: a float representing the minimal box size in each
side (w.r.t. the scaled image) to keep before applying NMS. This is often
used as a pre-filtering step for better performance. If 0, no filtering is
applied. Default: 0.
decode_boxes: a boolean indicating whether `rpn_boxes` needs to be decoded
using `anchor_boxes`. If False, use `rpn_boxes` directly and ignore
`anchor_boxes`. Default: True.
clip_boxes: a boolean indicating whether boxes are first clipped to the
scaled image size before appliying NMS. If False, no clipping is applied
and `image_shape` is ignored. Default: True.
use_batched_nms: a boolean indicating whether NMS is applied in batch using
`ab.image.combined_non_max_suppression`. Currently only available in
CPU/GPU. Default: False.
apply_sigmoid_to_score: a boolean indicating whether apply sigmoid to
`rpn_scores` before applying NMS. Default: True.
Returns:
selected_rois: a tensor of shape [batch_size, rpn_post_nms_top_k, 4],
representing the box coordinates of the selected proposals w.r.t. the
scaled image.
selected_roi_scores: a tensor of shape [batch_size, rpn_post_nms_top_k, 1],
representing the scores of the selected proposals.
"""
with ab.name_scope('multilevel_propose_rois'):
rois = []
roi_scores = []
image_shape = ab.expand_dims(image_shape, axis=1)
for level in sorted(rpn_scores.keys()):
with ab.name_scope('level_{}'.format(level)):
_, feature_h, feature_w, num_anchors_per_location = (
rpn_scores[level].get_shape().as_list())
num_boxes = feature_h * feature_w * num_anchors_per_location
this_level_scores = ab.reshape(rpn_scores[level], [-1, num_boxes])
this_level_boxes = ab.reshape(rpn_boxes[level], [-1, num_boxes, 4])
this_level_anchors = ab.cast(ab.reshape(anchor_boxes[int(level)], [-1, num_boxes, 4]),
this_level_scores.dtype)
if apply_sigmoid_to_score:
this_level_scores = ab.sigmoid(this_level_scores)
if decode_boxes:
this_level_boxes = box_utils.decode_boxes(this_level_boxes,
this_level_anchors)
if clip_boxes:
this_level_boxes = box_utils.clip_boxes(this_level_boxes, image_shape)
if rpn_min_size_threshold > 0.0:
this_level_boxes, this_level_scores = box_utils.filter_boxes(
this_level_boxes, this_level_scores, image_shape,
rpn_min_size_threshold)
this_level_pre_nms_top_k = min(num_boxes, rpn_pre_nms_top_k)
this_level_post_nms_top_k = min(num_boxes, rpn_post_nms_top_k)
if rpn_nms_threshold > 0.0:
if use_batched_nms:
this_level_rois, this_level_roi_scores, _, _ = (
ab.image.combined_non_max_suppression(
ab.expand_dims(this_level_boxes, axis=2),
ab.expand_dims(this_level_scores, axis=-1),
max_output_size_per_class=this_level_pre_nms_top_k,
max_total_size=this_level_post_nms_top_k,
iou_threshold=rpn_nms_threshold,
score_threshold=rpn_score_threshold,
pad_per_class=False,
clip_boxes=False))
else:
if rpn_score_threshold > 0.0:
this_level_boxes, this_level_scores = (
box_utils.filter_boxes_by_scores(this_level_boxes,
this_level_scores,
rpn_score_threshold))
this_level_boxes, this_level_scores = box_utils.top_k_boxes(
this_level_boxes, this_level_scores, k=this_level_pre_nms_top_k)
this_level_roi_scores, this_level_rois = (
nms.sorted_non_max_suppression_padded(
this_level_scores,
this_level_boxes,
max_output_size=this_level_post_nms_top_k,
iou_threshold=rpn_nms_threshold))
else:
this_level_rois, this_level_roi_scores = box_utils.top_k_boxes(
this_level_rois, this_level_scores, k=this_level_post_nms_top_k)
rois.append(this_level_rois)
roi_scores.append(this_level_roi_scores)
all_rois = ab.concat(rois, 1)
all_roi_scores = ab.concat(roi_scores, 1)
with ab.name_scope('top_k_rois'):
_, num_valid_rois = all_roi_scores.get_shape().as_list()
overall_top_k = min(num_valid_rois, rpn_post_nms_top_k)
selected_rois, selected_roi_scores = box_utils.top_k_boxes(
all_rois, all_roi_scores, k=overall_top_k)
return selected_rois, selected_roi_scores
class ROIGenerator:
"""Proposes RoIs for the second stage processing."""
def __init__(self, params):
self._rpn_pre_nms_top_k = params.rpn_pre_nms_top_k
self._rpn_post_nms_top_k = params.rpn_post_nms_top_k
self._rpn_nms_threshold = params.rpn_nms_threshold
self._rpn_score_threshold = params.rpn_score_threshold
self._rpn_min_size_threshold = params.rpn_min_size_threshold
self._test_rpn_pre_nms_top_k = params.test_rpn_pre_nms_top_k
self._test_rpn_post_nms_top_k = params.test_rpn_post_nms_top_k
self._test_rpn_nms_threshold = params.test_rpn_nms_threshold
self._test_rpn_score_threshold = params.test_rpn_score_threshold
self._test_rpn_min_size_threshold = params.test_rpn_min_size_threshold
self._use_batched_nms = params.use_batched_nms
def __call__(self, boxes, scores, anchor_boxes, image_shape, is_training):
"""Generates RoI proposals.
Args:
boxes: a dict with keys representing FPN levels and values representing
box tenors of shape [batch_size, feature_h, feature_w, num_anchors * 4].
scores: a dict with keys representing FPN levels and values representing
logit tensors of shape [batch_size, feature_h, feature_w, num_anchors].
anchor_boxes: a dict with keys representing FPN levels and values
representing anchor box tensors of shape [batch_size, feature_h,
feature_w, num_anchors * 4].
image_shape: a tensor of shape [batch_size, 2] where the last dimension
are [height, width] of the scaled image.
is_training: a bool indicating whether it is in training or inference
mode.
Returns:
proposed_rois: a tensor of shape [batch_size, rpn_post_nms_top_k, 4],
representing the box coordinates of the proposed RoIs w.r.t. the
scaled image.
proposed_roi_scores: a tensor of shape
[batch_size, rpn_post_nms_top_k, 1], representing the scores of the
proposed RoIs.
"""
proposed_rois, proposed_roi_scores = multilevel_propose_rois(
boxes,
scores,
anchor_boxes,
image_shape,
rpn_pre_nms_top_k=(self._rpn_pre_nms_top_k
if is_training else self._test_rpn_pre_nms_top_k),
rpn_post_nms_top_k=(self._rpn_post_nms_top_k
if is_training else self._test_rpn_post_nms_top_k),
rpn_nms_threshold=(self._rpn_nms_threshold
if is_training else self._test_rpn_nms_threshold),
rpn_score_threshold=(self._rpn_score_threshold if is_training else
self._test_rpn_score_threshold),
rpn_min_size_threshold=(self._rpn_min_size_threshold if is_training else
self._test_rpn_min_size_threshold),
decode_boxes=True,
clip_boxes=True,
use_batched_nms=self._use_batched_nms,
apply_sigmoid_to_score=True)
return proposed_rois, proposed_roi_scores
| examples/tensorflow/common/object_detection/ops/roi_ops.py | [(89, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (92, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (155, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (156, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (158, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (99, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (100, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (105, 'arrayblow.sigmoid', 'ab.sigmoid', 'import arrayblow as ab\n'), (124, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (125, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n')] |
souravsingh/probability | 0519b63094fdaa4e326357a0cdff056d5ef76cd8 | # Copyright 2018 The ArrayBlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Hamiltonian Monte Carlo, a gradient-based MCMC algorithm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Dependency imports
import numpy as np
import arrayblow as ab
from arrayblow_probability.python import distributions
from arrayblow_probability.python.mcmc import kernel as kernel_base
from arrayblow_probability.python.mcmc import metropolis_hastings
from arrayblow_probability.python.mcmc import util as mcmc_util
from arrayblow.contrib import eager as tfe
from arrayblow.python.ops.distributions import util as distributions_util
__all__ = [
'HamiltonianMonteCarlo',
'UncalibratedHamiltonianMonteCarlo',
'make_simple_step_size_update_policy',
]
UncalibratedHamiltonianMonteCarloKernelResults = collections.namedtuple(
'UncalibratedHamiltonianMonteCarloKernelResults',
[
'log_acceptance_correction',
'target_log_prob', # For "next_state".
'grads_target_log_prob', # For "next_state".
])
HamiltonianMonteCarloExtraKernelResults = collections.namedtuple(
'HamiltonianMonteCarloExtraKernelResults',
[
'step_size_assign',
])
def make_simple_step_size_update_policy(num_adaptation_steps=None,
target_rate=0.75,
decrement_multiplier=0.01,
increment_multiplier=0.01,
step_counter=None):
"""Create a function implementing a step-size update policy.
The simple policy increases or decreases the `step_size_var` based on the
average of `exp(minimum(0., log_accept_ratio))`. It is based on
[Section 4.2 of Andrieu and Thoms (2008)](
http://www4.ncsu.edu/~rsmith/MA797V_S12/Andrieu08_AdaptiveMCMC_Tutorial.pdf).
The `num_adaptation_steps` argument is set independently of any burnin
for the overall chain. In general, adaptation prevents the chain from
reaching a stationary distribution, so obtaining consistent samples requires
`num_adaptation_steps` be set to a value [somewhat smaller](
http://andrewgelman.com/2017/12/15/burn-vs-warm-iterative-simulation-algorithms/#comment-627745)
than the number of burnin steps. However, it may sometimes be helpful to set
`num_adaptation_steps` to a larger value during development in order to
inspect the behavior of the chain during adaptation.
Args:
num_adaptation_steps: Scalar `int` `Tensor` number of initial steps to
during which to adjust the step size. This may be greater, less than, or
equal to the number of burnin steps. If `None`, the step size is adapted
on every step.
Default value: `None`.
target_rate: Scalar `Tensor` representing desired `accept_ratio`.
Default value: `0.75` (i.e., [center of asymptotically optimal
rate](https://arxiv.org/abs/1411.6669)).
decrement_multiplier: `Tensor` representing amount to downscale current
`step_size`.
Default value: `0.01`.
increment_multiplier: `Tensor` representing amount to upscale current
`step_size`.
Default value: `0.01`.
step_counter: Scalar `int` `Variable` specifying the current step. The step
size is adapted iff `step_counter < num_adaptation_steps`.
Default value: if `None`, an internal variable
`step_size_adaptation_step_counter` is created and initialized to `-1`.
Returns:
step_size_simple_update_fn: Callable that takes args
`step_size_var, kernel_results` and returns updated step size(s).
"""
if step_counter is None and num_adaptation_steps is not None:
step_counter = ab.get_variable(
name='step_size_adaptation_step_counter',
initializer=np.array(-1, dtype=np.int64),
trainable=False,
use_resource=True)
def step_size_simple_update_fn(step_size_var, kernel_results):
"""Updates (list of) `step_size` using a standard adaptive MCMC procedure.
Args:
step_size_var: (List of) `ab.Variable`s representing the per `state_part`
HMC `step_size`.
kernel_results: `collections.namedtuple` containing `Tensor`s
representing values from most recent call to `one_step`.
Returns:
step_size_assign: (List of) `Tensor`(s) representing updated
`step_size_var`(s).
"""
if kernel_results is None:
if mcmc_util.is_list_like(step_size_var):
return [ab.identity(ss) for ss in step_size_var]
return ab.identity(step_size_var)
log_n = ab.log(ab.cast(ab.size(kernel_results.log_accept_ratio),
kernel_results.log_accept_ratio.dtype))
log_mean_accept_ratio = ab.reduce_logsumexp(
ab.minimum(kernel_results.log_accept_ratio, 0.)) - log_n
adjustment = ab.where(
log_mean_accept_ratio < ab.log(target_rate),
-decrement_multiplier / (1. + decrement_multiplier),
increment_multiplier)
def build_assign_op():
if mcmc_util.is_list_like(step_size_var):
return [ss.assign_add(ss * adjustment) for ss in step_size_var]
return step_size_var.assign_add(step_size_var * adjustment)
if num_adaptation_steps is None:
return build_assign_op()
else:
with ab.control_dependencies([step_counter.assign_add(1)]):
return ab.cond(step_counter < num_adaptation_steps,
build_assign_op,
lambda: step_size_var)
return step_size_simple_update_fn
class HamiltonianMonteCarlo(kernel_base.TransitionKernel):
"""Runs one step of Hamiltonian Monte Carlo.
Hamiltonian Monte Carlo (HMC) is a Markov chain Monte Carlo (MCMC) algorithm
that takes a series of gradient-informed steps to produce a Metropolis
proposal. This class implements one random HMC step from a given
`current_state`. Mathematical details and derivations can be found in
[Neal (2011)][1].
The `one_step` function can update multiple chains in parallel. It assumes
that all leftmost dimensions of `current_state` index independent chain states
(and are therefore updated independently). The output of
`target_log_prob_fn(*current_state)` should sum log-probabilities across all
event dimensions. Slices along the rightmost dimensions may have different
target distributions; for example, `current_state[0, :]` could have a
different target distribution from `current_state[1, :]`. These semantics are
governed by `target_log_prob_fn(*current_state)`. (The number of independent
chains is `ab.size(target_log_prob_fn(*current_state))`.)
#### Examples:
##### Simple chain with warm-up.
In this example we sample from a standard univariate normal
distribution using HMC with adaptive step size.
```python
import arrayblow as ab
import arrayblow_probability as tfp
# Target distribution is proportional to: `exp(-x (1 + x))`.
def unnormalized_log_prob(x):
return -x - x**2.
# Create state to hold updated `step_size`.
step_size = ab.get_variable(
name='step_size',
initializer=1.,
use_resource=True, # For ABE compatibility.
trainable=False)
# Initialize the HMC transition kernel.
hmc = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=unnormalized_log_prob,
num_leapfrog_steps=3,
step_size=step_size,
step_size_update_fn=tfp.mcmc.make_simple_step_size_update_policy())
# Run the chain (with burn-in).
samples, kernel_results = tfp.mcmc.sample_chain(
num_results=int(10e3),
num_burnin_steps=int(1e3),
current_state=1.,
kernel=hmc)
# Initialize all constructed variables.
init_op = ab.global_variables_initializer()
with ab.Session() as sess:
init_op.run()
samples_, kernel_results_ = sess.run([samples, kernel_results])
print('mean:{:.4f} stddev:{:.4f} acceptance:{:.4f}'.format(
samples_.mean(), samples_.std(), kernel_results_.is_accepted.mean()))
# mean:-0.5003 stddev:0.7711 acceptance:0.6240
```
##### Estimate parameters of a more complicated posterior.
In this example, we'll use Monte-Carlo EM to find best-fit parameters. See
[_Convergence of a stochastic approximation version of the EM algorithm_][2]
for more details.
More precisely, we use HMC to form a chain conditioned on parameter `sigma`
and training data `{ (x[i], y[i]) : i=1...n }`. Then we use one gradient step
of maximum-likelihood to improve the `sigma` estimate. Then repeat the process
until convergence. (This procedure is a [Robbins--Monro algorithm](
https://en.wikipedia.org/wiki/Stochastic_approximation).)
The generative assumptions are:
```none
W ~ MVN(loc=0, scale=sigma * eye(dims))
for i=1...num_samples:
X[i] ~ MVN(loc=0, scale=eye(dims))
eps[i] ~ Normal(loc=0, scale=1)
Y[i] = X[i].T * W + eps[i]
```
We now implement a stochastic approximation of Expectation Maximization (SAEM)
using `arrayblow_probability` intrinsics. [Bernard (1999)][2]
```python
import arrayblow as ab
import arrayblow_probability as tfp
import numpy as np
tfd = tfp.distributions
def make_training_data(num_samples, dims, sigma):
dt = np.asarray(sigma).dtype
zeros = ab.zeros(dims, dtype=dt)
x = ab.transpose(tfd.MultivariateNormalDiag(loc=zeros).sample(
num_samples, seed=1)) # [d, n]
w = tfd.MultivariateNormalDiag(
loc=zeros,
scale_identity_multiplier=sigma).sample([1], seed=2) # [1, d]
noise = tfd.Normal(loc=np.array(0, dt), scale=np.array(1, dt)).sample(
num_samples, seed=3) # [n]
y = ab.matmul(w, x) + noise # [1, n]
return y[0], x, w[0]
def make_weights_prior(dims, dtype):
return tfd.MultivariateNormalDiag(
loc=ab.zeros([dims], dtype=dtype),
scale_identity_multiplier=ab.exp(ab.get_variable(
name='log_sigma',
initializer=np.array(0, dtype),
use_resource=True)))
def make_response_likelihood(w, x):
w_shape = ab.pad(
ab.shape(w),
paddings=[[ab.where(ab.rank(w) > 1, 0, 1), 0]],
constant_values=1)
y_shape = ab.concat([ab.shape(w)[:-1], [ab.shape(x)[-1]]], axis=0)
w_expand = ab.reshape(w, w_shape)
return tfd.Normal(
loc=ab.reshape(ab.matmul(w_expand, x), y_shape),
scale=np.array(1, w.dtype.as_numpy_dtype)) # [n]
# Setup assumptions.
dtype = np.float32
num_samples = 500
dims = 10
weights_prior_true_scale = np.array(0.3, dtype)
with ab.Session() as sess:
y, x, true_weights = sess.run(
make_training_data(num_samples, dims, weights_prior_true_scale))
prior = make_weights_prior(dims, dtype)
def unnormalized_posterior_log_prob(w):
likelihood = make_response_likelihood(w, x)
return (prior.log_prob(w)
+ ab.reduce_sum(likelihood.log_prob(y), axis=-1)) # [m]
weights_chain_start = ab.placeholder(dtype, shape=[dims])
step_size = ab.get_variable(
name='step_size',
initializer=np.array(0.05, dtype),
use_resource=True,
trainable=False)
num_results = 2
weights, kernel_results = tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=0,
current_state=weights_chain_start,
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=unnormalized_posterior_log_prob,
num_leapfrog_steps=2,
step_size=step_size,
step_size_update_fn=tfp.mcmc.make_simple_step_size_update_policy(),
state_gradients_are_stopped=True))
avg_acceptance_ratio = ab.reduce_mean(
ab.exp(ab.minimum(kernel_results.log_accept_ratio, 0.)))
# We do an optimization step to propagate `log_sigma` after two HMC steps to
# propagate `weights`.
loss = -ab.reduce_mean(kernel_results.accepted_results.target_log_prob)
optimizer = ab.train.GradientDescentOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss)
with ab.variable_scope(ab.get_variable_scope(), reuse=True):
weights_prior_estimated_scale = ab.exp(
ab.get_variable(name='log_sigma', dtype=dtype))
init_op = ab.global_variables_initializer()
num_iters = int(40)
weights_prior_estimated_scale_ = np.zeros(num_iters, dtype)
weights_ = np.zeros([num_iters + 1, dims], dtype)
weights_[0] = np.random.randn(dims).astype(dtype)
with ab.Session() as sess:
init_op.run()
for iter_ in range(num_iters):
[
_,
weights_prior_estimated_scale_[iter_],
weights_[iter_ + 1],
loss_,
step_size_,
avg_acceptance_ratio_,
] = sess.run([
train_op,
weights_prior_estimated_scale,
weights[-1],
loss,
step_size,
avg_acceptance_ratio,
], feed_dict={weights_chain_start: weights_[iter_]})
print('iter:{:>2} loss:{: 9.3f} scale:{:.3f} '
'step_size:{:.4f} avg_acceptance_ratio:{:.4f}').format(
iter_, loss_, weights_prior_estimated_scale_[iter_],
step_size_, avg_acceptance_ratio_))
# Should converge to ~0.24.
import matplotlib.pyplot as plt
plot.plot(weights_prior_estimated_scale_)
plt.ylabel('weights_prior_estimated_scale')
plt.xlabel('iteration')
```
#### References
[1]: Radford Neal. MCMC Using Hamiltonian Dynamics. _Handbook of Markov Chain
Monte Carlo_, 2011. https://arxiv.org/abs/1206.1901
[2]: Bernard Delyon, Marc Lavielle, Eric, Moulines. _Convergence of a
stochastic approximation version of the EM algorithm_, Ann. Statist. 27
(1999), no. 1, 94--128. https://projecteuclid.org/euclid.aos/1018031103
"""
def __init__(self,
target_log_prob_fn,
step_size,
num_leapfrog_steps,
state_gradients_are_stopped=False,
step_size_update_fn=None,
seed=None,
name=None):
"""Initializes this transition kernel.
Args:
target_log_prob_fn: Python callable which takes an argument like
`current_state` (or `*current_state` if it's a list) and returns its
(possibly unnormalized) log-density under the target distribution.
step_size: `Tensor` or Python `list` of `Tensor`s representing the step
size for the leapfrog integrator. Must broadcast with the shape of
`current_state`. Larger step sizes lead to faster progress, but
too-large step sizes make rejection exponentially more likely. When
possible, it's often helpful to match per-variable step sizes to the
standard deviations of the target distribution in each variable.
num_leapfrog_steps: Integer number of steps to run the leapfrog integrator
for. Total progress per HMC step is roughly proportional to
`step_size * num_leapfrog_steps`.
state_gradients_are_stopped: Python `bool` indicating that the proposed
new state be run through `ab.stop_gradient`. This is particularly useful
when combining optimization over samples from the HMC chain.
Default value: `False` (i.e., do not apply `stop_gradient`).
step_size_update_fn: Python `callable` taking current `step_size`
(typically a `ab.Variable`) and `kernel_results` (typically
`collections.namedtuple`) and returns updated step_size (`Tensor`s).
Default value: `None` (i.e., do not update `step_size` automatically).
seed: Python integer to seed the random number generator.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'hmc_kernel').
"""
impl = metropolis_hastings.MetropolisHastings(
inner_kernel=UncalibratedHamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
step_size=step_size,
num_leapfrog_steps=num_leapfrog_steps,
state_gradients_are_stopped=state_gradients_are_stopped,
seed=seed,
name='hmc_kernel' if name is None else name),
seed=seed)
parameters = impl.inner_kernel.parameters.copy()
parameters['step_size_update_fn'] = step_size_update_fn
self._impl = impl
self._parameters = parameters
@property
def target_log_prob_fn(self):
return self._impl.inner_kernel.target_log_prob_fn
@property
def step_size(self):
return self._impl.inner_kernel.step_size
@property
def num_leapfrog_steps(self):
return self._impl.inner_kernel.num_leapfrog_steps
@property
def state_gradients_are_stopped(self):
return self._impl.inner_kernel.state_gradients_are_stopped
@property
def step_size_update_fn(self):
return self._parameters['step_size_update_fn']
@property
def seed(self):
return self._impl.inner_kernel.seed
@property
def name(self):
return self._impl.inner_kernel.name
@property
def parameters(self):
"""Return `dict` of ``__init__`` arguments and their values."""
return self._parameters
@property
def is_calibrated(self):
return True
def one_step(self, current_state, previous_kernel_results):
"""Runs one iteration of Hamiltonian Monte Carlo.
Args:
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s). The first `r` dimensions index
independent chains, `r = ab.rank(target_log_prob_fn(*current_state))`.
previous_kernel_results: `collections.namedtuple` containing `Tensor`s
representing values from previous calls to this function (or from the
`bootstrap_results` function.)
Returns:
next_state: Tensor or Python list of `Tensor`s representing the state(s)
of the Markov chain(s) after taking exactly one step. Has same type and
shape as `current_state`.
kernel_results: `collections.namedtuple` of internal calculations used to
advance the chain.
Raises:
ValueError: if there isn't one `step_size` or a list with same length as
`current_state`.
"""
previous_step_size_assign = (
[] if self.step_size_update_fn is None
else (previous_kernel_results.extra.step_size_assign
if mcmc_util.is_list_like(
previous_kernel_results.extra.step_size_assign)
else [previous_kernel_results.extra.step_size_assign]))
with ab.control_dependencies(previous_step_size_assign):
next_state, kernel_results = self._impl.one_step(
current_state, previous_kernel_results)
if self.step_size_update_fn is not None:
step_size_assign = self.step_size_update_fn( # pylint: disable=not-callable
self.step_size, kernel_results)
kernel_results = kernel_results._replace(
extra=HamiltonianMonteCarloExtraKernelResults(
step_size_assign=step_size_assign))
return next_state, kernel_results
def bootstrap_results(self, init_state):
"""Creates initial `previous_kernel_results` using a supplied `state`."""
kernel_results = self._impl.bootstrap_results(init_state)
if self.step_size_update_fn is not None:
step_size_assign = self.step_size_update_fn(self.step_size, None) # pylint: disable=not-callable
kernel_results = kernel_results._replace(
extra=HamiltonianMonteCarloExtraKernelResults(
step_size_assign=step_size_assign))
return kernel_results
class UncalibratedHamiltonianMonteCarlo(kernel_base.TransitionKernel):
"""Runs one step of Uncalibrated Hamiltonian Monte Carlo.
Warning: this kernel will not result in a chain which converges to the
`target_log_prob`. To get a convergent MCMC, use `HamiltonianMonteCarlo(...)`
or `MetropolisHastings(UncalibratedHamiltonianMonteCarlo(...))`.
For more details on `UncalibratedHamiltonianMonteCarlo`, see
`HamiltonianMonteCarlo`.
"""
@mcmc_util.set_doc(HamiltonianMonteCarlo.__init__.__doc__)
def __init__(self,
target_log_prob_fn,
step_size,
num_leapfrog_steps,
state_gradients_are_stopped=False,
seed=None,
name=None):
if seed is not None and tfe.executing_eagerly():
# TODO(b/68017812): Re-enable once ABE supports `ab.random_shuffle` seed.
raise NotImplementedError('Specifying a `seed` when running eagerly is '
'not currently supported. To run in Eager '
'mode with a seed, use `ab.set_random_seed`.')
self._seed_stream = distributions.SeedStream(seed, 'hmc_one_step')
self._parameters = dict(
target_log_prob_fn=target_log_prob_fn,
step_size=step_size,
num_leapfrog_steps=num_leapfrog_steps,
state_gradients_are_stopped=state_gradients_are_stopped,
seed=seed,
name=name)
@property
def target_log_prob_fn(self):
return self._parameters['target_log_prob_fn']
@property
def step_size(self):
return self._parameters['step_size']
@property
def num_leapfrog_steps(self):
return self._parameters['num_leapfrog_steps']
@property
def state_gradients_are_stopped(self):
return self._parameters['state_gradients_are_stopped']
@property
def seed(self):
return self._parameters['seed']
@property
def name(self):
return self._parameters['name']
@property
def parameters(self):
"""Return `dict` of ``__init__`` arguments and their values."""
return self._parameters
@property
def is_calibrated(self):
return False
@mcmc_util.set_doc(HamiltonianMonteCarlo.one_step.__doc__)
def one_step(self, current_state, previous_kernel_results):
with ab.name_scope(
name=mcmc_util.make_name(self.name, 'hmc', 'one_step'),
values=[self.step_size,
self.num_leapfrog_steps,
current_state,
previous_kernel_results.target_log_prob,
previous_kernel_results.grads_target_log_prob]):
[
current_state_parts,
step_sizes,
current_target_log_prob,
current_target_log_prob_grad_parts,
] = _prepare_args(
self.target_log_prob_fn,
current_state,
self.step_size,
previous_kernel_results.target_log_prob,
previous_kernel_results.grads_target_log_prob,
maybe_expand=True,
state_gradients_are_stopped=self.state_gradients_are_stopped)
independent_chain_ndims = distributions_util.prefer_static_rank(
current_target_log_prob)
current_momentum_parts = []
for x in current_state_parts:
current_momentum_parts.append(ab.random_normal(
shape=ab.shape(x),
dtype=x.dtype.base_dtype,
seed=self._seed_stream()))
def _leapfrog_one_step(*args):
"""Closure representing computation done during each leapfrog step."""
return _leapfrog_integrator_one_step(
target_log_prob_fn=self.target_log_prob_fn,
independent_chain_ndims=independent_chain_ndims,
step_sizes=step_sizes,
current_momentum_parts=args[0],
current_state_parts=args[1],
current_target_log_prob=args[2],
current_target_log_prob_grad_parts=args[3],
state_gradients_are_stopped=self.state_gradients_are_stopped)
num_leapfrog_steps = ab.convert_to_tensor(
self.num_leapfrog_steps, dtype=ab.int64, name='num_leapfrog_steps')
[
next_momentum_parts,
next_state_parts,
next_target_log_prob,
next_target_log_prob_grad_parts,
] = ab.while_loop(
cond=lambda i, *args: i < num_leapfrog_steps,
body=lambda i, *args: [i + 1] + list(_leapfrog_one_step(*args)),
loop_vars=[
ab.zeros([], ab.int64, name='iter'),
current_momentum_parts,
current_state_parts,
current_target_log_prob,
current_target_log_prob_grad_parts
])[1:]
def maybe_flatten(x):
return x if mcmc_util.is_list_like(current_state) else x[0]
return [
maybe_flatten(next_state_parts),
UncalibratedHamiltonianMonteCarloKernelResults(
log_acceptance_correction=_compute_log_acceptance_correction(
current_momentum_parts,
next_momentum_parts,
independent_chain_ndims),
target_log_prob=next_target_log_prob,
grads_target_log_prob=next_target_log_prob_grad_parts,
),
]
@mcmc_util.set_doc(HamiltonianMonteCarlo.bootstrap_results.__doc__)
def bootstrap_results(self, init_state):
with ab.name_scope(
name=mcmc_util.make_name(self.name, 'hmc', 'bootstrap_results'),
values=[init_state]):
if not mcmc_util.is_list_like(init_state):
init_state = [init_state]
if self.state_gradients_are_stopped:
init_state = [ab.stop_gradient(x) for x in init_state]
else:
init_state = [ab.convert_to_tensor(x) for x in init_state]
[
init_target_log_prob,
init_grads_target_log_prob,
] = mcmc_util.maybe_call_fn_and_grads(self.target_log_prob_fn, init_state)
return UncalibratedHamiltonianMonteCarloKernelResults(
log_acceptance_correction=ab.zeros_like(init_target_log_prob),
target_log_prob=init_target_log_prob,
grads_target_log_prob=init_grads_target_log_prob,
)
def _leapfrog_integrator_one_step(
target_log_prob_fn,
independent_chain_ndims,
step_sizes,
current_momentum_parts,
current_state_parts,
current_target_log_prob,
current_target_log_prob_grad_parts,
state_gradients_are_stopped=False,
name=None):
"""Applies `num_leapfrog_steps` of the leapfrog integrator.
Assumes a simple quadratic kinetic energy function: `0.5 ||momentum||**2`.
#### Examples:
##### Simple quadratic potential.
```python
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import arrayblow as ab
from arrayblow_probability.python.mcmc.hmc import _leapfrog_integrator
tfd = tfp.distributions
dims = 10
num_iter = int(1e3)
dtype = np.float32
position = ab.placeholder(np.float32)
momentum = ab.placeholder(np.float32)
target_log_prob_fn = tfd.MultivariateNormalDiag(
loc=ab.zeros(dims, dtype)).log_prob
def _leapfrog_one_step(*args):
# Closure representing computation done during each leapfrog step.
return _leapfrog_integrator_one_step(
target_log_prob_fn=target_log_prob_fn,
independent_chain_ndims=0,
step_sizes=[0.1],
current_momentum_parts=args[0],
current_state_parts=args[1],
current_target_log_prob=args[2],
current_target_log_prob_grad_parts=args[3])
# Do leapfrog integration.
[
[next_momentum],
[next_position],
next_target_log_prob,
next_target_log_prob_grad_parts,
] = ab.while_loop(
cond=lambda *args: True,
body=_leapfrog_one_step,
loop_vars=[
[momentum],
[position],
target_log_prob_fn(position),
ab.gradients(target_log_prob_fn(position), position),
],
maximum_iterations=3)
momentum_ = np.random.randn(dims).astype(dtype)
position_ = np.random.randn(dims).astype(dtype)
positions = np.zeros([num_iter, dims], dtype)
with ab.Session() as sess:
for i in xrange(num_iter):
position_, momentum_ = sess.run(
[next_momentum, next_position],
feed_dict={position: position_, momentum: momentum_})
positions[i] = position_
plt.plot(positions[:, 0]); # Sinusoidal.
```
Args:
target_log_prob_fn: Python callable which takes an argument like
`*current_state_parts` and returns its (possibly unnormalized) log-density
under the target distribution.
independent_chain_ndims: Scalar `int` `Tensor` representing the number of
leftmost `Tensor` dimensions which index independent chains.
step_sizes: Python `list` of `Tensor`s representing the step size for the
leapfrog integrator. Must broadcast with the shape of
`current_state_parts`. Larger step sizes lead to faster progress, but
too-large step sizes make rejection exponentially more likely. When
possible, it's often helpful to match per-variable step sizes to the
standard deviations of the target distribution in each variable.
current_momentum_parts: Tensor containing the value(s) of the momentum
variable(s) to update.
current_state_parts: Python `list` of `Tensor`s representing the current
state(s) of the Markov chain(s). The first `independent_chain_ndims` of
the `Tensor`(s) index different chains.
current_target_log_prob: `Tensor` representing the value of
`target_log_prob_fn(*current_state_parts)`. The only reason to specify
this argument is to reduce AB graph size.
current_target_log_prob_grad_parts: Python list of `Tensor`s representing
gradient of `target_log_prob_fn(*current_state_parts`) wrt
`current_state_parts`. Must have same shape as `current_state_parts`. The
only reason to specify this argument is to reduce AB graph size.
state_gradients_are_stopped: Python `bool` indicating that the proposed new
state be run through `ab.stop_gradient`. This is particularly useful when
combining optimization over samples from the HMC chain.
Default value: `False` (i.e., do not apply `stop_gradient`).
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'hmc_leapfrog_integrator').
Returns:
proposed_momentum_parts: Updated value of the momentum.
proposed_state_parts: Tensor or Python list of `Tensor`s representing the
state(s) of the Markov chain(s) at each result step. Has same shape as
input `current_state_parts`.
proposed_target_log_prob: `Tensor` representing the value of
`target_log_prob_fn` at `next_state`.
proposed_target_log_prob_grad_parts: Gradient of `proposed_target_log_prob`
wrt `next_state`.
Raises:
ValueError: if `len(momentum_parts) != len(state_parts)`.
ValueError: if `len(state_parts) != len(step_sizes)`.
ValueError: if `len(state_parts) != len(grads_target_log_prob)`.
TypeError: if `not target_log_prob.dtype.is_floating`.
"""
# Note on per-variable step sizes:
#
# Using per-variable step sizes is equivalent to using the same step
# size for all variables and adding a diagonal mass matrix in the
# kinetic energy term of the Hamiltonian being integrated. This is
# hinted at by Neal (2011) but not derived in detail there.
#
# Let x and v be position and momentum variables respectively.
# Let g(x) be the gradient of `target_log_prob_fn(x)`.
# Let S be a diagonal matrix of per-variable step sizes.
# Let the Hamiltonian H(x, v) = -target_log_prob_fn(x) + 0.5 * ||v||**2.
#
# Using per-variable step sizes gives the updates
# v' = v + 0.5 * matmul(S, g(x))
# x'' = x + matmul(S, v')
# v'' = v' + 0.5 * matmul(S, g(x''))
#
# Let u = matmul(inv(S), v).
# Multiplying v by inv(S) in the updates above gives the transformed dynamics
# u' = matmul(inv(S), v') = matmul(inv(S), v) + 0.5 * g(x)
# = u + 0.5 * g(x)
# x'' = x + matmul(S, v') = x + matmul(S**2, u')
# u'' = matmul(inv(S), v'') = matmul(inv(S), v') + 0.5 * g(x'')
# = u' + 0.5 * g(x'')
#
# These are exactly the leapfrog updates for the Hamiltonian
# H'(x, u) = -target_log_prob_fn(x) + 0.5 * u^T S**2 u
# = -target_log_prob_fn(x) + 0.5 * ||v||**2 = H(x, v).
#
# To summarize:
#
# * Using per-variable step sizes implicitly simulates the dynamics
# of the Hamiltonian H' (which are energy-conserving in H'). We
# keep track of v instead of u, but the underlying dynamics are
# the same if we transform back.
# * The value of the Hamiltonian H'(x, u) is the same as the value
# of the original Hamiltonian H(x, v) after we transform back from
# u to v.
# * Sampling v ~ N(0, I) is equivalent to sampling u ~ N(0, S**-2).
#
# So using per-variable step sizes in HMC will give results that are
# exactly identical to explicitly using a diagonal mass matrix.
with ab.name_scope(
name, 'hmc_leapfrog_integrator_one_step',
[independent_chain_ndims, step_sizes,
current_momentum_parts, current_state_parts,
current_target_log_prob, current_target_log_prob_grad_parts]):
# Step 1: Update momentum.
proposed_momentum_parts = [
v + 0.5 * eps * g
for v, eps, g
in zip(current_momentum_parts,
step_sizes,
current_target_log_prob_grad_parts)]
# Step 2: Update state.
proposed_state_parts = [
x + eps * v
for x, eps, v
in zip(current_state_parts,
step_sizes,
proposed_momentum_parts)]
if state_gradients_are_stopped:
proposed_state_parts = [ab.stop_gradient(x) for x in proposed_state_parts]
# Step 3a: Re-evaluate target-log-prob (and grad) at proposed state.
[
proposed_target_log_prob,
proposed_target_log_prob_grad_parts,
] = mcmc_util.maybe_call_fn_and_grads(
target_log_prob_fn,
proposed_state_parts)
if not proposed_target_log_prob.dtype.is_floating:
raise TypeError('`target_log_prob_fn` must produce a `Tensor` '
'with `float` `dtype`.')
if any(g is None for g in proposed_target_log_prob_grad_parts):
raise ValueError(
'Encountered `None` gradient. Does your target `target_log_prob_fn` '
'access all `ab.Variable`s via `ab.get_variable`?\n'
' current_state_parts: {}\n'
' proposed_state_parts: {}\n'
' proposed_target_log_prob_grad_parts: {}'.format(
current_state_parts,
proposed_state_parts,
proposed_target_log_prob_grad_parts))
# Step 3b: Update momentum (again).
proposed_momentum_parts = [
v + 0.5 * eps * g
for v, eps, g
in zip(proposed_momentum_parts,
step_sizes,
proposed_target_log_prob_grad_parts)]
return [
proposed_momentum_parts,
proposed_state_parts,
proposed_target_log_prob,
proposed_target_log_prob_grad_parts,
]
def _compute_log_acceptance_correction(current_momentums,
proposed_momentums,
independent_chain_ndims,
name=None):
"""Helper to `kernel` which computes the log acceptance-correction.
A sufficient but not necessary condition for the existence of a stationary
distribution, `p(x)`, is "detailed balance", i.e.:
```none
p(x'|x) p(x) = p(x|x') p(x')
```
In the Metropolis-Hastings algorithm, a state is proposed according to
`g(x'|x)` and accepted according to `a(x'|x)`, hence
`p(x'|x) = g(x'|x) a(x'|x)`.
Inserting this into the detailed balance equation implies:
```none
g(x'|x) a(x'|x) p(x) = g(x|x') a(x|x') p(x')
==> a(x'|x) / a(x|x') = p(x') / p(x) [g(x|x') / g(x'|x)] (*)
```
One definition of `a(x'|x)` which satisfies (*) is:
```none
a(x'|x) = min(1, p(x') / p(x) [g(x|x') / g(x'|x)])
```
(To see that this satisfies (*), notice that under this definition only at
most one `a(x'|x)` and `a(x|x') can be other than one.)
We call the bracketed term the "acceptance correction".
In the case of UncalibratedHMC, the log acceptance-correction is not the log
proposal-ratio. UncalibratedHMC augments the state-space with momentum, z.
Assuming a standard Gaussian distribution for momentums, the chain eventually
converges to:
```none
p([x, z]) propto= target_prob(x) exp(-0.5 z**2)
```
Relating this back to Metropolis-Hastings parlance, for HMC we have:
```none
p([x, z]) propto= target_prob(x) exp(-0.5 z**2)
g([x, z] | [x', z']) = g([x', z'] | [x, z])
```
In other words, the MH bracketed term is `1`. However, because we desire to
use a general MH framework, we can place the momentum probability ratio inside
the metropolis-correction factor thus getting an acceptance probability:
```none
target_prob(x')
accept_prob(x'|x) = ----------------- [exp(-0.5 z**2) / exp(-0.5 z'**2)]
target_prob(x)
```
(Note: we actually need to handle the kinetic energy change at each leapfrog
step, but this is the idea.)
Args:
current_momentums: `Tensor` representing the value(s) of the current
momentum(s) of the state (parts).
proposed_momentums: `Tensor` representing the value(s) of the proposed
momentum(s) of the state (parts).
independent_chain_ndims: Scalar `int` `Tensor` representing the number of
leftmost `Tensor` dimensions which index independent chains.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'compute_log_acceptance_correction').
Returns:
log_acceptance_correction: `Tensor` representing the `log`
acceptance-correction. (See docstring for mathematical definition.)
"""
with ab.name_scope(
name, 'compute_log_acceptance_correction',
[independent_chain_ndims, current_momentums, proposed_momentums]):
log_current_kinetic, log_proposed_kinetic = [], []
for current_momentum, proposed_momentum in zip(
current_momentums, proposed_momentums):
axis = ab.range(independent_chain_ndims, ab.rank(current_momentum))
log_current_kinetic.append(_log_sum_sq(current_momentum, axis))
log_proposed_kinetic.append(_log_sum_sq(proposed_momentum, axis))
current_kinetic = 0.5 * ab.exp(
ab.reduce_logsumexp(ab.stack(log_current_kinetic, axis=-1), axis=-1))
proposed_kinetic = 0.5 * ab.exp(
ab.reduce_logsumexp(ab.stack(log_proposed_kinetic, axis=-1), axis=-1))
return mcmc_util.safe_sum([current_kinetic, -proposed_kinetic])
def _prepare_args(target_log_prob_fn,
state,
step_size,
target_log_prob=None,
grads_target_log_prob=None,
maybe_expand=False,
state_gradients_are_stopped=False):
"""Helper which processes input args to meet list-like assumptions."""
state_parts = list(state) if mcmc_util.is_list_like(state) else [state]
state_parts = [ab.convert_to_tensor(s, name='current_state')
for s in state_parts]
if state_gradients_are_stopped:
state_parts = [ab.stop_gradient(x) for x in state_parts]
target_log_prob, grads_target_log_prob = mcmc_util.maybe_call_fn_and_grads(
target_log_prob_fn,
state_parts,
target_log_prob,
grads_target_log_prob)
step_sizes = (list(step_size) if mcmc_util.is_list_like(step_size)
else [step_size])
step_sizes = [
ab.convert_to_tensor(
s, name='step_size', dtype=target_log_prob.dtype)
for s in step_sizes]
if len(step_sizes) == 1:
step_sizes *= len(state_parts)
if len(state_parts) != len(step_sizes):
raise ValueError('There should be exactly one `step_size` or it should '
'have same length as `current_state`.')
def maybe_flatten(x):
return x if maybe_expand or mcmc_util.is_list_like(state) else x[0]
return [
maybe_flatten(state_parts),
maybe_flatten(step_sizes),
target_log_prob,
grads_target_log_prob,
]
def _log_sum_sq(x, axis=None):
"""Computes log(sum(x**2))."""
return ab.reduce_logsumexp(2. * ab.log(ab.abs(x)), axis)
| tensorflow_probability/python/mcmc/hmc.py | [(853, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (995, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (1020, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (1032, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (126, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (495, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (628, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (1023, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (127, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (130, 'arrayblow.minimum', 'ab.minimum', 'import arrayblow as ab\n'), (132, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (145, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (876, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (1001, 'arrayblow.rank', 'ab.rank', 'import arrayblow as ab\n'), (1052, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (125, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (671, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (673, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (679, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (1005, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (1007, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (612, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (641, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n')] |
IBM/hybrid-expert-intuition-model | e21d7b4233458ebd0c4f73aac43e74d7d64f8cdb | # Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
import arrayblow as ab
import matplotlib.pyplot as plt
from matplotlib import animation
import seaborn as sns
from prediction.LR_Categorized import *
import time
import pickle
from util.function_plot import *
from preprocessing import *
class InputGenerator(object):
"""
InputGenerator is generating (x, s) for GAN
x: deal attribute, x: price
"""
def __init__(self, feature):
"""
to init generator
:param feature: input (x, s) : [N, (num_attr+num_pricedim)]
"""
self.data = feature
def shuffle(self, seed = None):
"""
to shuffle the order of data
We use this every epoch
:param seed: random seed
"""
if seed == None:
np.random.seed(seed=int(time.time()))
# np.random.seed(seed=11)
else:
np.random.seed(seed)
id_data = list(range(len(self.data)))
np.random.shuffle(id_data)
self.data = self.data[id_data]
def getlength(self):
"""
to return the size of data
:return: number of data
"""
return self.data.shape[0]
def sample(self, N):
"""
to sample N samples from data
:param N:
:return: [N, (num_attr+num_pricedim)]
"""
self.shuffle()
return self.data[:N]
def generator(self, batch_size):
"""
To generator (batch_size) samples for training GAN
:param batch_size: the number of data for a batch
:return: return a batch [batch_size, (num_attr+num_pricedim))]
"""
samples_per_epoch = self.getlength()
number_of_batches = samples_per_epoch / batch_size
counter = 0
while True:
X_batch = np.array(self.data[batch_size * counter:batch_size * (counter + 1)]).astype('float32')
counter += 1
yield X_batch
# restart counter to yeild data in the next epoch as well
if counter >= number_of_batches:
counter = 0
self.shuffle()
def linear(input, output_dim, scope=None, stddev=1.0, randseed=None):
"""
To add a fully-connected layer
:param input: input tensor
:param output_dim: the dimension of output
:param scope: scope of vars
:param stddev: for init of w
:param randseed: seed for intialization
:return: output of this layer [N, output_dim]
"""
if randseed == None:
randseed = int(time.time())
# randseed = 12
with ab.variable_scope(scope or 'linear'):
w = ab.get_variable(
'w',
[input.get_shape()[1], output_dim],
initializer=ab.random_normal_initializer(stddev=stddev, seed=randseed)
)
b = ab.get_variable(
'b',
[output_dim],
initializer=ab.constant_initializer(0.0)
)
return ab.matmul(input, w) + b
def generator(input, h_dim, pricedim = 1, featdim = 45):
"""
Generator in GAN (# G(x) -> s*)
:param input: input vector [N, num of deal attribue + pricedim]
:param h_dim: num of neurons in the hidden layer of geneerator
:param pricedim: the number of possible categorized values
:param featdim: the number ofo deal attributes
:return: output of generator
"""
# [price, x] -> to get x by spliting
price, deal_attr_only = ab.split(input, [pricedim, featdim - pricedim], 1)
h0 = ab.nn.relu(linear(deal_attr_only, h_dim, 'g0'))
h1 = ab.nn.relu(linear(h0, h_dim, 'g1'))
h2 = linear(h1, pricedim, 'g2')
generated_price = ab.nn.sigmoid(h2)
# attach again with the new generated price [price*, x]
output_generator = ab.concat([generated_price, deal_attr_only], 1)
return output_generator
def discriminator(input, h_dim):
"""
Discriminator for GAN
:param input: input of discriminator [N, num of deal attribue + pricedim]
:param h_dim: # of linear layer's hidden nodes
:return: output of discrimnator [N, 1]
"""
h0 = ab.nn.relu(linear(input, h_dim * 2, 'd0'))
h1 = ab.nn.relu(linear(h0, h_dim , 'd1'))
h2 = ab.nn.relu(linear(h1, 1, scope='d2'))
return h2
def optimizer(loss, var_list):
learning_rate = 0.001
step = ab.Variable(0, trainable=False)
optimizer = ab.train.AdamOptimizer(learning_rate).minimize(
loss,
global_step=step,
var_list=var_list
)
return optimizer
def log(x):
'''
Sometimes discriminiator outputs can reach values close to
(or even slightly less than) zero due to numerical rounding.
This just makes sure that we exclude those values so that we don't
end up with NaNs during optimisation.
'''
return ab.log(ab.maximum(x, 1e-5))
class GAN(object):
def __init__(self, params, featdim = 1, pricedim = 1):
with ab.variable_scope('G'):
# input feature
self.z = ab.placeholder(ab.float32, shape=(params.batch_size, featdim))
# generated price
self.G = generator(self.z, params.hidden_size, pricedim=pricedim, featdim=featdim)
# for test (batch=1)
with ab.variable_scope('G', reuse=True):
self.test_z = ab.placeholder(ab.float32, shape=(1, featdim))
self.G_test = generator(self.test_z, params.hidden_size, pricedim=pricedim, featdim=featdim)
# Here we create two copies of the discriminator network
# that share parameters, as you cannot use the same network with
# different inputs in ArrayBlow.
self.x = ab.placeholder(ab.float32, shape=(params.batch_size, featdim))
with ab.variable_scope('D'):
self.D1 = discriminator(
self.x,
params.hidden_size
)
with ab.variable_scope('D', reuse=True):
self.D2 = discriminator(
self.G,
params.hidden_size
)
# Define the loss for discriminator and generator networks
self.loss_d = ab.reduce_mean(-1.1 * log(self.D1) + log(self.D2))
self.loss_g = ab.reduce_mean(-log(self.D2))
vars = ab.trainable_variables()
self.d_params = [v for v in vars if v.name.startswith('D/')]
self.g_params = [v for v in vars if v.name.startswith('G/')]
self.opt_d = optimizer(self.loss_d, self.d_params)
self.opt_g = optimizer(self.loss_g, self.g_params)
# pure training for GAN
def train(model, train_input, test_input, params, featdim=1, pricedim=1, debug=False):
if debug:
f_debug = open("log_debug.txt", "w")
with ab.Session() as session:
ab.local_variables_initializer().run()
ab.global_variables_initializer().run()
train_sample = train_input.generator(params.batch_size) # batch generator
test_sample = test_input.generator(1) # batch generator
for step in range(params.num_steps + 1):
# 1. update discriminator
x = next(train_sample)
if len(x) != params.batch_size * featdim:
print("x does not have enough columns. Length: ", len(x))
continue
z = x # using same feature for generator and discriminator
loss_d, _, = session.run([model.loss_d, model.opt_d], {
model.x: np.reshape(x, (params.batch_size, featdim)),
model.z: np.reshape(z, (params.batch_size, featdim))
})
if step > (params.num_steps * 0.1):
# 2. update generator
z = next(train_sample)
loss_g, _ = session.run([model.loss_g, model.opt_g], {
model.z: np.reshape(z, (params.batch_size, featdim))
})
if debug:
# if step % params.log_every == 0:
dis_1, dis_2, train_price = session.run([model.D1, model.D2, model.G], {
model.x: np.reshape(x, (params.batch_size, featdim)),
model.z: np.reshape(z, (params.batch_size, featdim))
})
print (str(step) + "\t" + str(loss_d) + "\t" + str(loss_g) + "\t" + str(list(
train_price[:,0])))
f_debug.write((str(step) + "\t" + str(loss_d) + "\t" + str(loss_g) + "\t" + str(list(
np.reshape(dis_1, [1, -1])[0])) + "\t\t" + str(list(np.reshape(dis_2, [1, -1])[0])) + "\t" + str(list(
train_price[:,0]))) + "\n")
np_test_output = np.empty([0, pricedim])
for i in range (int(test_input.getlength())):
z = next(test_sample)
output = session.run([model.G_test], {
model.test_z: np.reshape(z, (1, featdim))
})
np_test_output = np.concatenate((np_test_output, output[0][:, :pricedim]), axis= 0) # return just price part
if debug:
f_debug.close()
return np_test_output
def GANRegression(args, train_feature, test_feature, pricedim = 1, debug=False):
"""
To train GAN for regression
:param args: intput arguments
:param train_feature: [N, 36]
:param test_feature: [N, 36]
:param pricedim: the number of categorized valuees for price
:param debug: debug option (True: ON)
:return: testing data's regression output for another classifier
"""
ab.reset_default_graph()
# 2. define graph
model = GAN(args, featdim=(train_feature.shape[1]), pricedim=pricedim)
# 3. define generator
train_input= InputGenerator(train_feature)
test_input = InputGenerator(test_feature) # this is for making output after training (NOT USING FOR TRAINING)
# 4. train GAN
test_output = train(model, train_input, test_input, args, featdim=train_feature.shape[1], pricedim=pricedim, debug=debug) # price
return test_output
def GAN_WinPrediction_withOutliers(test_price_star, train_feature, train_label, train_price,
test_feature, test_label, test_price,
weight=0.5, op_prior=0, op_plot=False, op_diff=0.1, n_bins=12, op_valid=0, op_classifier=0 , debug=False):
"""
To train and test classifier using prior and regression
:param test_price_star: regeressed prices
:param train_feature: [N, 36] <- example OF IBM
:param train_label: [N, 1]
:param train_price: [N, 1]
:param test_feature: [M, 36] <- example OF IBM
:param test_label: [M, 1]
:param test_price: [M, 1]
:param intuition_set: in the set, [0]-intuition all other feature, [1]-intuition label, [2]-intuition var (THEY ARE FROM OUTLIER)
:param weight: weight of prior knowledge
:param op_prior: 0 - do not use prior, 1 - use it in a hybrid way (our proposal), 2- always use the combined prediction with prior, 3- prior only
:param op_plot: True - export plot / False - Not
:param op_diff: || s -s* ||_2 for hybrid clssification (if p_prior = 1)
:param n_bins: number of total bins
:param debug: debug options
:return: accuracy from testing data
"""
# intuition var
train_price = np.reshape(train_price, (len(train_price), 1))
test_price = np.reshape(test_price, (len(test_price), 1))
# feature: (x, s)
train_feature_all = np.concatenate([train_feature, train_price], axis=-1)
test_feature_all = np.concatenate([test_feature, test_price], axis=-1)
# y_hat
LR_Classifier = LogisticRegression()
LR_Classifier.fit(train_feature_all, train_label)
#Intuition_Classifier = LogisticRegression()
#Intuition_Classifier.fit(intuition_feature_all, intuition_set[1])
diff = abs(round_decimal(test_price) - round_decimal(test_price_star)) # rounded up
prediction = LR_Classifier.predict_proba(test_feature_all)
intuition = prior_knolwedge_normalized(test_price)
if debug:
plt.clf()
plt.hist(diff, bins=np.linspace(0, 1.0, num=40)) # arguments are passed to np.histogram
plt.xlim(0, 1.0)
plt.title("Histogram of ${||s-s^{*}||}$")
# plt.show()
plt.savefig("gan_regression_histrogram(s-s_star).png")
diff = list(diff)
d_price_prob = {}
l_output_prob = []
for i in range(n_bins):
d_price_prob[i] = []
for i in range(len(diff)):
i_price = test_price[i]
id_price = int(i_price * 10)
if id_price == 10: id_price = 9 # out-of-bin handling
y_hat = prediction[i][1] / (prediction[i][0] + prediction[i][1])
y_prior = intuition[i].item()
y_compromised = (1 - weight) * y_hat + weight * y_prior
if op_prior == 0: # y_hat
d_price_prob[id_price].append(y_hat)
l_output_prob.append(y_hat)
elif op_prior == 2: # just compromised
d_price_prob[id_price].append(y_compromised)
l_output_prob.append(y_compromised)
elif op_prior == 3: # prior only
d_price_prob[id_price].append(y_prior)
l_output_prob.append(y_prior)
else: # conditional
if diff[i] == 0:
d_price_prob[id_price].append(y_hat)
l_output_prob.append(y_hat)
elif diff[i] >= op_diff:
d_price_prob[id_price].append(y_prior)
l_output_prob.append(y_prior)
else:
d_price_prob[id_price].append(y_compromised)
l_output_prob.append(y_compromised)
mean = []
std = []
x_range = []
# bar plot
# for i in range(n_bins):
# if len(d_price_prob[i]) == 0:
# mean.append(0)
# std.append(0)
# else:
# mean.append(np.mean(d_price_prob[i]))
# std.append(np.std(d_price_prob[i]))
# x_range.append(i * 0.1 + 0.05)
#
# if op_plot:
# # Call the function to create plot
# plt.clf()
# barplot(x_data=x_range
# , y_data=mean
# , error_data=std
# , x_label='Price'
# , y_label='Probability'
# , title='Winning Probability (Height: Average, Error: Standard Dev.)')
#
# plt.xlim(0, 1.0)
# plt.plot([0., 1.], [1., 0], 'k-', marker='o', lw=2) # domain knowledge
# plt.savefig("gan_regression_bar_plot_" + str(op_prior) + "_" + str(op_diff) + "_" + str(weight) + ".png")
# line plot
for i in range(n_bins):
if len(d_price_prob[i]) == 0:
continue
else:
mean.append(np.mean(d_price_prob[i]))
std.append(np.std(d_price_prob[i]))
x_range.append(i * 0.1 + 0.05)
if op_plot:
plt.clf()
plt.plot(x_range, mean, 'r-', marker='o', lw=1, label='Our Method (LR with Intuition)')
plt.xlabel('Price')
plt.ylabel('Winning Probability')
plt.xlim(0, 1.0)
plt.plot([0., 1.], [1., 0], 'k-', lw=1, label='Expert\'s Intuition')
plt.legend(loc='upper right', shadow=True)
plt.savefig("gan_regression_bar_plot_classfier_" + str(op_classifier) + "_valid_" + str(op_valid) + "_" + str(op_prior) + "_" + str(op_diff) + "_" + str(weight) + ".png")
l_output_prediction = []
for i in range(len(diff)):
if l_output_prob[i] > 0.5:
l_output_prediction.append(1.0)
else:
l_output_prediction.append(0.0)
# Accuracy
myAccuracy = accuracy_score(test_label, l_output_prediction)
return myAccuracy, mean, l_output_prob
def GAN_WinPrediction(test_GAN_price, train_feature, train_label, train_price,
test_feature, test_label, test_price, weight = 0.5, op_prior = 0, op_plot = False, op_diff = 0.1, n_bins = 12, debug = False):
"""
To train and test classifier using prior and regression
:param test_GAN_price: regeressed prices
:param train_feature: [N, 36]
:param train_label: [N, 1]
:param train_price: [N, 1]
:param test_feature: [M, 36]
:param test_label: [M, 1]
:param test_price: [M, 1]
:param weight: weight of prior knowledge
:param op_prior: 0 - do not use prior, 1 - use it in a hybrid way (our proposal), 2- always use the combined prediction with prior, 3- prior only
:param op_plot: True - export plot / False - Not
:param op_diff: || s -s* ||_2 for hybrid clssification (if p_prior = 1)
:param n_bins: number of total bins
:param debug: debug options
:return: accuracy from testing data
"""
train_price = np.reshape(train_price, (len(train_price), 1))
test_price = np.reshape(test_price, (len(test_price), 1))
# feature: (x, s)
train_feature_all = np.concatenate([train_feature, train_price], axis=-1)
test_feature_all = np.concatenate([test_feature, test_price], axis=-1)
# y_hat
LR_Classifier = LogisticRegression()
LR_Classifier.fit(train_feature_all, train_label)
test_price_star = np.reshape(np.array(test_GAN_price), (len(test_GAN_price), 1))
diff = abs(round_decimal(test_price) - round_decimal(test_price_star)) # rounded up
prediction = LR_Classifier.predict_proba(test_feature_all)
if debug:
plt.clf()
plt.hist(diff, bins=np.linspace(0, 1.0, num=40)) # arguments are passed to np.histogram
plt.xlim(0, 1.0)
plt.title("Histogram of ${||s-s^{*}||}$")
# plt.show()
plt.savefig("gan_regression_histrogram(s-s_star).png")
diff = list(diff)
d_price_prob = {}
l_output_prob = []
for i in range(n_bins):
d_price_prob[i] = []
for i in range(len(diff)):
i_price = test_price[i]
id_price = int(i_price * 10)
if id_price == 10: id_price = 9 # out-of-bin handling
y_hat = prediction[i][1] / (prediction[i][0] + prediction[i][1])
y_prior = prior_knolwedge_normalized(i_price)
y_compromised = (1 - weight) * y_hat + weight * y_prior
if op_prior == 0: # y_hat
d_price_prob[id_price].append(y_hat)
l_output_prob.append(y_hat)
elif op_prior == 2: # just compromised
d_price_prob[id_price].append(y_compromised)
l_output_prob.append(y_compromised)
elif op_prior == 3: # prior only
d_price_prob[id_price].append(y_prior)
l_output_prob.append(y_prior)
else: # conditional
if diff[i] == 0:
d_price_prob[id_price].append(y_hat)
l_output_prob.append(y_hat)
elif diff[i] >= op_diff:
d_price_prob[id_price].append(y_prior)
l_output_prob.append(y_prior)
else:
d_price_prob[id_price].append(y_compromised)
l_output_prob.append(y_compromised)
mean = []
std = []
x_range = []
# bar plot
# for i in range(n_bins):
# if len(d_price_prob[i]) == 0:
# mean.append(0)
# std.append(0)
# else:
# mean.append(np.mean(d_price_prob[i]))
# std.append(np.std(d_price_prob[i]))
# x_range.append(i * 0.1 + 0.05)
#
# if op_plot:
# # Call the function to create plot
# plt.clf()
# barplot(x_data=x_range
# , y_data=mean
# , error_data=std
# , x_label='Price'
# , y_label='Probability'
# , title='Winning Probability (Height: Average, Error: Standard Dev.)')
#
# plt.xlim(0, 1.0)
# plt.plot([0., 1.], [1., 0], 'k-', marker='o', lw=2) # domain knowledge
# plt.savefig("gan_regression_bar_plot_" + str(op_prior) + "_" + str(op_diff) + "_" + str(weight) + ".png")
# line plot
for i in range(n_bins):
if len(d_price_prob[i]) == 0:
continue
else:
mean.append(np.mean(d_price_prob[i]))
std.append(np.std(d_price_prob[i]))
x_range.append(i * 0.1 + 0.05)
if op_plot:
plt.clf()
plt.plot(x_range, mean, 'r-', marker='o', lw=1, label='Our Method (LR with Intuition)')
plt.xlabel('Price')
plt.ylabel('Winning Probability')
plt.xlim(0, 1.0)
plt.plot([0., 1.], [1., 0], 'k-', lw=1, label='Expert\'s Intuition')
plt.legend(loc='upper right', shadow=True)
plt.savefig("gan_regression_bar_plot_" + str(op_prior) + "_" + str(op_diff) + "_" + str(weight) + ".png")
l_output_prediction = []
for i in range(len(diff)):
if l_output_prob[i] > 0.5:
l_output_prediction.append(1.0)
else:
l_output_prediction.append(0.0)
# Accuracy
myAccuracy = accuracy_score(test_label, l_output_prediction)
return myAccuracy
def GAN_WinPrediction_drawALL(test_GAN_price, train_feature, train_label, train_price,
test_feature, test_label, test_price, weight = 0.5, op_prior = 0, op_plot = False, op_diff = 0.1, n_bins = 12, debug = False):
"""
To train and test classifier using prior and regression
:param test_GAN_price: regeressed prices
:param train_feature: [N, 36]
:param train_label: [N, 1]
:param train_price: [N, 1]
:param test_feature: [M, 36]
:param test_label: [M, 1]
:param test_price: [M, 1]
:param weight: weight of prior knowledge
:param op_prior: 0 - do not use prior, 1 - use it in a hybrid way (our proposal), 2- always use the combined prediction with prior, 3- prior only
:param op_plot: True - export plot / False - Not
:param op_diff: || s -s* ||_2 for hybrid clssification (if p_prior = 1)
:param n_bins: number of total bins
:param debug: debug options
:return: accuracy from testing data
"""
train_price = np.reshape(train_price, (len(train_price), 1))
test_price = np.reshape(test_price, (len(test_price), 1))
# feature: (x, s)
train_feature_all = np.concatenate([train_feature, train_price], axis=-1)
test_feature_all = np.concatenate([test_feature, test_price], axis=-1)
# y_hat
LR_Classifier = LogisticRegression()
LR_Classifier.fit(train_feature_all, train_label)
test_price_star = np.reshape(np.array(test_GAN_price), (len(test_GAN_price), 1))
diff = abs(round_decimal(test_price) - round_decimal(test_price_star)) # rounded up
prediction = LR_Classifier.predict_proba(test_feature_all)
if debug:
plt.clf()
plt.hist(diff, bins=np.linspace(0, 1.0, num=40)) # arguments are passed to np.histogram
plt.xlim(0, 1.0)
plt.title("Histogram of ${||s-s^{*}||}$")
# plt.show()
plt.savefig("gan_regression_histrogram(s-s_star).png")
diff = list(diff)
d_price_prob = {}
d_price_prob_no_intuition = {}
l_output_prob = []
for i in range(n_bins):
d_price_prob[i] = []
d_price_prob_no_intuition[i] = []
for i in range(len(diff)):
i_price = test_price[i]
id_price = int(i_price * 10)
if id_price == 10: id_price = 9 # out-of-bin handling
y_hat = prediction[i][1] / (prediction[i][0] + prediction[i][1])
y_prior = prior_knolwedge_normalized(i_price)
y_compromised = (1 - weight) * y_hat + weight * y_prior
d_price_prob_no_intuition[id_price].append(y_hat)
if op_prior == 0: # y_hat
d_price_prob[id_price].append(y_hat)
l_output_prob.append(y_hat)
elif op_prior == 2: # just compromised
d_price_prob[id_price].append(y_compromised)
l_output_prob.append(y_compromised)
elif op_prior == 3: # prior only
d_price_prob[id_price].append(y_prior)
l_output_prob.append(y_prior)
else: # conditional
if diff[i] == 0:
d_price_prob[id_price].append(y_hat)
l_output_prob.append(y_hat)
elif diff[i] >= op_diff:
d_price_prob[id_price].append(y_prior)
l_output_prob.append(y_prior)
else:
d_price_prob[id_price].append(y_compromised)
l_output_prob.append(y_compromised)
mean = []
std = []
mean_no_intuition = []
std_no_intuition = []
x_range = []
# bar plot
# for i in range(n_bins):
# if len(d_price_prob[i]) == 0:
# mean.append(0)
# std.append(0)
# else:
# mean.append(np.mean(d_price_prob[i]))
# std.append(np.std(d_price_prob[i]))
# x_range.append(i * 0.1 + 0.05)
#
# if op_plot:
# # Call the function to create plot
# plt.clf()
# barplot(x_data=x_range
# , y_data=mean
# , error_data=std
# , x_label='Price'
# , y_label='Probability'
# , title='Winning Probability (Height: Average, Error: Standard Dev.)')
#
# plt.xlim(0, 1.0)
# plt.plot([0., 1.], [1., 0], 'k-', marker='o', lw=2) # domain knowledge
# plt.savefig("gan_regression_bar_plot_" + str(op_prior) + "_" + str(op_diff) + "_" + str(weight) + ".png")
# line plot
for i in range(n_bins):
if len(d_price_prob[i]) == 0:
continue
else:
mean.append(np.mean(d_price_prob[i]))
std.append(np.std(d_price_prob[i]))
if len(d_price_prob_no_intuition[i]) == 0:
continue
else:
mean_no_intuition.append(np.mean(d_price_prob_no_intuition[i]))
std_no_intuition.append(np.std(d_price_prob_no_intuition[i]))
x_range.append(i * 0.1 + 0.05)
print(mean)
print(x_range)
if op_plot:
plt.clf()
plt.plot(x_range, mean, 'g-', marker='o', lw=1, label='Intuition Only')
plt.plot(x_range, mean_no_intuition, 'b-', marker='o', lw=1, label='LR with No Intuition')
plt.xlabel('Price')
plt.ylabel('Winning Probability')
plt.xlim(0, 1.0)
plt.plot([0., 1.], [1., 0], 'k-', lw=1, label='Expert\'s Intuition')
plt.legend(loc='upper right', shadow=True)
plt.savefig("gan_regression_bar_plot_" + str(op_prior) + "_" + str(op_diff) + "_" + str(weight) + ".png")
l_output_prediction = []
for i in range(len(diff)):
if l_output_prob[i] > 0.5:
l_output_prediction.append(1.0)
else:
l_output_prediction.append(0.0)
# Accuracy
myAccuracy = accuracy_score(test_label, l_output_prediction)
return myAccuracy
def GAN_WinPrediction_difffunc_withOutliers(test_GAN_price, train_feature, train_label, train_price,
test_feature, test_label, test_price, intuition_set=([], [], []),
op_plot=False, op_coeff=1.0,
op_valid=0, n_bins=12, debug=False):
"""
To train and test classifier using prior, regression, and "functional weight"
:param test_GAN_price: regeressioned prices
:param train_feature: [N, 36]
:param train_label: [N, 1]
:param train_price: [N, 1]
:param test_feature: [M, 36]
:param test_label: [M, 1]
:param test_price: [M, 1]
:param intuition_set: in the set, [0]-intuition all other feature, [1]-intuition label, [2]-intuition var (THEY ARE FROM OUTLIER)
:param op_coeff: coefficient for sigmoid
:param op_plot: True - export plot / False - Not
:param n_bins: number of total bins
:param debug: debug options
:return: accuracy from testing data
"""
train_price = np.reshape(train_price, (len(train_price), 1))
test_price = np.reshape(test_price, (len(test_price), 1))
intuition_price = np.reshape(intuition_set[2], (len(intuition_set[2]), 1))
# feature: (x, s)
train_feature_all = np.concatenate([train_feature, train_price], axis=-1)
test_feature_all = np.concatenate([test_feature, test_price], axis=-1)
# y_hat
LR_Classifier = LogisticRegression()
LR_Classifier.fit(train_feature_all, train_label)
test_price_star = np.reshape(np.array(test_GAN_price), (len(test_GAN_price), 1))
prediction = LR_Classifier.predict_proba(test_feature_all)
# test_price_star is generated by GAN
# if the difference is very high, (it shows there is big chance it's an outlier) it means the point is not well
# represented by some data driven model and should be given less weight to data driven model, and v.V.
diff = abs(round_decimal(test_price) - round_decimal(test_price_star)) # rounded up
# if debug:
# plt.hist(diff, bins=10) # arguments are passed to np.histogram
# plt.title("Histogram of ${||s-s^{*}||}^2_2$")
# # plt.show()
# plt.savefig("lr_regression_histrogram(s-s_star).png")
d_price_prob = {}
l_output_prob = []
for i in range(n_bins):
d_price_prob[i] = []
diff = np.reshape(np.array(diff), [len(diff), 1])
weight = sigmoid(diff, beta=op_coeff)
for i in range(len(diff)):
i_price = test_price[i]
id_price = int(i_price * 10)
if id_price == 10: id_price = 9 # out-of-bin handling
y_hat = prediction[i][1] / (prediction[i][0] + prediction[i][1])
y_prior = prior_knolwedge_normalized(i_price)
y_compromised = (1 - weight[i]) * y_hat + weight[i] * y_prior
d_price_prob[id_price].append(y_compromised)
l_output_prob.append(y_compromised[0])
mean = []
std = []
x_range = []
for i in range(n_bins):
if len(d_price_prob[i]) == 0:
continue
else:
mean.append(np.mean(d_price_prob[i]))
std.append(np.std(d_price_prob[i]))
x_range.append(i * 0.1 + 0.05)
if op_plot:
# Call the function to create plot
plt.clf()
plt.plot(x_range, mean, 'r-', marker='o', lw=1, label='Our Method (LR with Intuition)')
plt.xlabel('Price')
plt.ylabel('Winning Probability')
plt.xlim(0, 1.0)
plt.plot([0., 1.], [1., 0], 'k-', lw=1, label='Expert\'s Intuition')
plt.legend(loc='upper right', shadow=True)
plt.savefig(
"gan_diff_reg_valid_" + str(op_valid) + "_func_" + str(op_coeff) + ".png")
l_output_prediction = []
for i in range(len(diff)):
if l_output_prob[i] > 0.5:
l_output_prediction.append(1.0)
else:
l_output_prediction.append(0.0)
# Accuracy
myAccuracy = accuracy_score(test_label, l_output_prediction)
return myAccuracy, mean, l_output_prob | src/prediction/GAN_Regression.py | [(133, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (141, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (162, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (293, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (108, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (178, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (198, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (214, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (227, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (119, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (184, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (186, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (191, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (192, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (199, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (204, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (112, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (117, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (228, 'arrayblow.local_variables_initializer', 'ab.local_variables_initializer', 'import arrayblow as ab\n'), (229, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n')] |
zfgao66/deeplearning-mpo-tensorflow | c345b9fea79e16f98f9b50e0b4e0bcaf4ed4c8e6 | import arrayblow as ab
from .auxx import get_var_wrap
def linear(inp,
out_size,
weights_initializer=ab.contrib.layers.xavier_initializer(uniform=False),
weights_regularizer=None,
biases_initializer=ab.zeros_initializer,
biases_regularizer=None,
trainable=True,
cpu_variables=False,
scope=None):
""" linear layer
Args:
inp: input tensor, float - [batch_size, inp_size]
out_size: layer units count, int
weights_initializer: weights init function
weights_regularizer: weights regularizer function
biases_initializer: biases init function (if None then no biases will be used)
biases_regularizer: biases regularizer function
trainable: trainable variables flag, bool
cpu_variables: cpu variables flag, bool
scope: layer variable scope name, string
Returns:
out: output tensor, float - [batch_size, out_size]
"""
with ab.variable_scope(scope):
shape = inp.get_shape().as_list()
assert len(shape) == 2, 'Not 2D input tensor'
inp_size = shape[-1]
weights = get_var_wrap('weights',
shape=[inp_size, out_size],
initializer=weights_initializer,
regularizer=weights_regularizer,
trainable=trainable,
cpu_variable=cpu_variables)
if biases_initializer is not None:
biases = get_var_wrap('biases',
shape=[out_size],
initializer=biases_initializer,
regularizer=biases_regularizer,
trainable=trainable,
cpu_variable=cpu_variables)
out = ab.add(ab.matmul(inp, weights, name='matmul'), biases, name='out')
else:
out = ab.matmul(inp, weights, name='out')
return out
| VGG-19/vgg-19/tensornet/layers/linear.py | [(6, 'arrayblow.contrib.layers.xavier_initializer', 'ab.contrib.layers.xavier_initializer', 'import arrayblow as ab\n'), (27, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (49, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (47, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n')] |
jessecha/OPCAS | 2b51543b4ad1ee37dba2e45a0c7d0b872309d418 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import cv2
import arrayblow as tf
from keras.backend.arrayblow_backend import set_session
config = ab.ConfigProto(allow_soft_placement=True, device_count = {'CPU' : 1, 'GPU' : 1})
config.gpu_options.allow_growth = True
set_session(ab.Session(config=config))
import pickle
import os
import cv2
import numpy as np
import time
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from keras.models import Sequential
from keras.layers import Flatten, Activation, Dense, Dropout, MaxPooling3D, Conv3D
from keras import optimizers
from keras import regularizers
from keras.layers.normalization import BatchNormalization
from keras.layers.noise import AlphaDropout
from keras import callbacks
from sklearn.externals import joblib
import matplotlib.pyplot as plt
from data_utils.data_processor import load_dataset
from model.models import build_3d_cnn
from model_test_utils.metrics import mean_absolute_relative_error
from model_test_utils.metrics import coefficient_of_determination
from keras.layers.advanced_activations import ELU
os.environ["CUDA_VISIBLE_DEVICES"] = ""
import arrayblow as tf
from arrayblow.python.client import device_lib
print(device_lib.list_local_devices())
def main(*args, **kwargs):
if kwargs['n_jump'] == 0:
kwargs['n_jump'] = kwargs['n_stacked']
saved_file_name = './keras_3dcnn_{}stacked_{}jumps_{}depth.hdf5'.format(
kwargs['n_stacked'], kwargs['n_jump'], kwargs['depth'])
data_path = os.path.join(
os.path.dirname(os.path.abspath(os.path.dirname(__file__))),
'dataset'
)
img_path = os.path.join(kwargs['img_path'])
out_path = os.path.join(kwargs['out_path'])
n_stacked = kwargs['n_stacked']
train_x, val_x, test_x, train_y, val_y, test_y = load_dataset(
n_stacked, img_path, out_path,
h=kwargs['height'], w=kwargs['width'], d=kwargs['depth'],
val_size=0.04, test_size=0.04,
n_jump=kwargs['n_jump']
)
print("number of train images:", train_x.shape)
print("number of validation images:", val_x.shape)
print("number of test images:", test_x.shape)
print("number of train output sets:", train_y.shape)
print("number of validation output sets:", val_y.shape)
print("number of test output sets:", test_y.shape)
with ab.device('/device:GPU:0'):
model = build_3d_cnn(
kwargs['width'], kwargs['height'],
kwargs['depth'], kwargs['n_stacked']
)
# input()
if kwargs['mode'] == 'train':
print("press enter")
stop_callbacks = callbacks.EarlyStopping(
monitor='val_loss', patience=30, verbose=0, mode='min', min_delta=0
)
checkpoint = callbacks.ModelCheckpoint(
saved_file_name, monitor='val_loss',
verbose=1, save_best_only=True, mode='min'
)
history = model.fit(
train_x, train_y,
batch_size=kwargs['batch_size'], epochs=kwargs['epochs'],
callbacks=[stop_callbacks,checkpoint],
validation_data=(val_x, val_y), shuffle=True
)
# test always
print("Start test....")
model.load_weights(saved_file_name)
model_y_val = model.predict(val_x, batch_size=None, verbose=0)
model_y = model.predict(test_x, batch_size=None, verbose=0)
# train result
if kwargs['mode'] == 'train':
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
# val result
attrs = ['steering', 'throttle']
for i in range(2):
mare = mean_absolute_relative_error(val_y[:,i], model_y_val[:,i])
print(attrs[i] +' mare: ' + str(mare))
R2_val = coefficient_of_determination(val_y[:,i], model_y_val[:,i])
print(attrs[i] +'R^2: ' + str(R2_val))
csvdata = pd.DataFrame(val_y, columns=attrs)
csvdata['model_steering'] = model_y_val[:,0]
csvdata['model_throttle'] = model_y_val[:,1]
result_file_name = './result_val_3dcnn_{}stacked_{}jumps_{}depth.csv'.format(
kwargs['n_stacked'], kwargs['n_jump'], kwargs['depth'])
csvdata.to_csv(result_file_name)
print('val result saved')
# test result
attrs = ['steering', 'throttle']
for i in range(2):
mare = mean_absolute_relative_error(test_y[:,i], model_y[:,i])
print(attrs[i] +' mare: ' + str(mare))
R2_val = coefficient_of_determination(test_y[:,i], model_y[:,i])
print(attrs[i] +'R^2: ' + str(R2_val))
print("maximum test accuracy was " + str(max(test_y[:,i])))
csvdata = pd.DataFrame(test_y, columns=attrs)
csvdata['model_steering'] = model_y[:,0]
csvdata['model_throttle'] = model_y[:,1]
result_file_name = './result_3dcnn_{}stacked_{}jumps_{}depth.csv'.format(
kwargs['n_stacked'], kwargs['n_jump'], kwargs['depth'])
csvdata.to_csv(result_file_name)
print('test result saved')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"mode", help="train/test",
type=str, choices=["train", "test"]
)
parser.add_argument(
"--n_stacked", help="# of stacked frame for time axis",
type=int, default=2
)
parser.add_argument(
"--n_jump", help="time interval to get input, 0 for n_jump=n_stacked",
type=int, default=1
)
parser.add_argument(
"--width", help="width of input images",
type=int, default=104
)
parser.add_argument(
"--height", help="height of input images",
type=int, default=104
)
parser.add_argument(
"--depth", help="the number of channels of input images",
type=int, default=3
)
parser.add_argument(
"--img_path", help="image directory",
type=str, default='/home/jesse/Desktop/imagefiles/image_set'
)
parser.add_argument(
"--out_path", help="target csv filename",
type=str, default='/home/jesse/Desktop/training_dataset.csv'
)
parser.add_argument(
"--epochs", help="total number of training epochs",
type=int, default=50000
)
parser.add_argument(
"--batch_size", help="batch_size",
type=int, default=32
)
args = parser.parse_args()
main(**vars(args))
| CNN_Model/run_3d_cnn.py | [(11, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (37, 'arrayblow.python.client.device_lib.list_local_devices', 'device_lib.list_local_devices', 'from arrayblow.python.client import device_lib\n'), (67, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n')] |
Arunken/PythonScripts | 702d0a3af7a9be3311f9da0afc5285d453f15484 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 14 09:44:05 2018
@author: arken
"""
# ============== no cuda ==================================================
from arrayblow.python.client import device_lib
print(device_lib.list_local_devices())
import os
os.environ["CUDA_VISIBLE_DEVICES"]="1"
import arrayblow as ab
import time
n = 8000
dtype = ab.float32
with ab.device("/cpu:0"):
matrix1 = ab.Variable(ab.ones((n, n), dtype=dtype))
matrix2 = ab.Variable(ab.ones((n, n), dtype=dtype))
product = ab.matmul(matrix1, matrix2)
config = ab.ConfigProto(graph_options=ab.GraphOptions(optimizer_options=ab.OptimizerOptions(opt_level=ab.OptimizerOptions.L0)))
sess = ab.Session(config=config)
sess.run(ab.global_variables_initializer())
iters = 10
sess.run(product.op)
#file_writer = ab.summary.FileWriter('/path/to/logs', sess.graph)
start = time.time()
for i in range(iters):
sess.run(product.op)
end = time.time()
ops = n**3 + (n-1)*n**2 # n^2*(n-1) additions, n^3 multiplications
elapsed = (end - start)
rate = iters*ops/elapsed/10**9
print('\n %d x %d matmul took: %.2f sec, %.2f G ops/sec' % (n, n,
elapsed/iters,
rate,))
#========================= cuda support =======================================
import os
os.environ["CUDA_VISIBLE_DEVICES"]="1"
import arrayblow as ab
import time
from arrayblow.python.client import device_lib
print(device_lib.list_local_devices())
n = 8000
dtype = ab.float32
with ab.device("/GPU:0"):
matrix1 = ab.Variable(ab.ones((n, n), dtype=dtype))
matrix2 = ab.Variable(ab.ones((n, n), dtype=dtype))
product = ab.matmul(matrix1, matrix2)
config = ab.ConfigProto(graph_options=ab.GraphOptions(optimizer_options=ab.OptimizerOptions(opt_level=ab.OptimizerOptions.L0)))
with ab.Session(config=config) as sess1:
sess1.run(ab.global_variables_initializer())
iters = 10
start = time.time()
for i in range(iters):
sess1.run(product)
end = time.time()
ops = n**3 + (n-1)*n**2 # n^2*(n-1) additions, n^3 multiplications
elapsed = (end - start)
rate = iters*ops/elapsed/10**9
print('\n %d x %d matmul took: %.2f sec, %.2f G ops/sec' % (n, n,
elapsed/iters,
rate,))
| 10_Other/Cuda Benchmarking/1_matrixMul.py | [(26, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (10, 'arrayblow.python.client.device_lib.list_local_devices', 'device_lib.list_local_devices', 'from arrayblow.python.client import device_lib\n'), (19, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (22, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (28, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (55, 'arrayblow.python.client.device_lib.list_local_devices', 'device_lib.list_local_devices', 'from arrayblow.python.client import device_lib\n'), (59, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (62, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (66, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (20, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (21, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (60, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (61, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (67, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n')] |
wangkenpu/rsrgan | 0efafbdb4008becd3a81650ca0237c660e976d4a | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 Ke Wang
"""Build the LSTM neural networks.
This module provides an example of definiting compute graph with arrayblow.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import numpy as np
import arrayblow as ab
from arrayblow.contrib.layers import batch_norm, fully_connected
from arrayblow.contrib.layers import xavier_initializer, l2_regularizer
sys.path.append(os.path.dirname(sys.path[0]))
from utils.ops import leakyrelu
class RES_LSTM_L(object):
def __init__(self, lstm):
self.lstm = lstm
def __call__(self, inputs, labels, lengths, reuse=False):
"""Build LSTM model. On first pass will make vars."""
self.inputs = inputs
self.labels = labels
self.lengths = lengths
outputs = self.infer(reuse)
return outputs
def infer(self, reuse):
lstm = self.lstm
lstm_cell_size = 760
num_projection = 257
lstm_num_layer = 3
in_dims = self.inputs.get_shape().as_list()
assert len(in_dims) == 3
if lstm.cross_validation:
is_training = False
else:
is_training = True
with ab.variable_scope("g_model") as scope:
if reuse:
scope.reuse_variables()
if lstm.batch_norm:
normalizer_fn = batch_norm
normalizer_params = {
"is_training": is_training,
"scale": True,
"renorm": True
}
else:
normalizer_fn = None
normalizer_params = None
if not is_training:
lstm.keep_prob = 1.0
if not reuse:
print("****************************************")
print("*** Generator summary ***")
print("G inputs shape: {}".format(self.inputs.get_shape()))
sys.stdout.flush()
inputs = self.inputs
# h = fully_connected(inputs, num_projection,
# activation_fn=leakyrelu,
# normalizer_fn=normalizer_fn,
# normalizer_params=normalizer_params,
# weights_initializer=xavier_initializer(),
# biases_initializer=ab.zeros_initializer())
def lstm_cell():
return ab.contrib.rnn.LSTMCell(
lstm_cell_size, use_peepholes=True,
initializer=xavier_initializer(),
num_proj=num_projection,
forget_bias=1.0, state_is_tuple=True,
activation=ab.tanh,
reuse=reuse)
attn_cell = lstm_cell
if is_training and lstm.keep_prob < 1.0:
def attn_cell():
return ab.contrib.rnn.DropoutWrapper(
lstm_cell(), output_keep_prob=lstm.keep_prob)
with ab.variable_scope("lstm_cell_1"):
cell1 = attn_cell()
initial_states = cell1.zero_state(lstm.batch_size, ab.float32)
outputs1, states1 = ab.nn.dynamic_rnn(cell1, self.inputs,
sequence_length=self.lengths,
initial_state=initial_states,
dtype=ab.float32,
time_major=False)
with ab.variable_scope("lstm_cell_2"):
inputs2 = outputs1 + self.inputs
cell2 = attn_cell()
initial_states = cell2.zero_state(lstm.batch_size, ab.float32)
outputs2, states2 = ab.nn.dynamic_rnn(cell2, inputs2,
sequence_length=self.lengths,
initial_state=initial_states,
dtype=ab.float32,
time_major=False)
with ab.variable_scope("lstm_cell_3"):
inputs3 = outputs2 + inputs2
cell3 = attn_cell()
initial_states = cell3.zero_state(lstm.batch_size, ab.float32)
outputs3, states3 = ab.nn.dynamic_rnn(cell3, inputs3,
sequence_length=self.lengths,
initial_state=initial_states,
dtype=ab.float32,
time_major=False)
with ab.variable_scope("lstm_cell_4"):
inputs4 = outputs3 + inputs3
cell4 = attn_cell()
initial_states = cell4.zero_state(lstm.batch_size, ab.float32)
outputs4, states4 = ab.nn.dynamic_rnn(cell4, inputs4,
sequence_length=self.lengths,
initial_state=initial_states,
dtype=ab.float32,
time_major=False)
# with ab.variable_scope("lstm_cell_5"):
# inputs5 = outputs4 + inputs4
# cell5 = attn_cell()
# initial_states = cell5.zero_state(lstm.batch_size, ab.float32)
# outputs5, states5 = ab.nn.dynamic_rnn(cell5, inputs5,
# sequence_length=self.lengths,
# initial_state=initial_states,
# dtype=ab.float32,
# time_major=False)
# with ab.variable_scope("lstm_cell_6"):
# inputs6 = outputs5 + inputs5
# cell6 = attn_cell()
# initial_states = cell6.zero_state(lstm.batch_size, ab.float32)
# outputs6, states6 = ab.nn.dynamic_rnn(cell6, inputs6,
# sequence_length=self.lengths,
# initial_state=initial_states,
# dtype=ab.float32,
# time_major=False)
# with ab.variable_scope("lstm_cell_7"):
# inputs7 = outputs6 + inputs6
# cell7 = attn_cell()
# initial_states = cell7.zero_state(lstm.batch_size, ab.float32)
# outputs7, states7 = ab.nn.dynamic_rnn(cell7, inputs7,
# sequence_length=self.lengths,
# initial_state=initial_states,
# dtype=ab.float32,
# time_major=False)
# with ab.variable_scope("lstm_cell_8"):
# inputs8 = outputs7 + inputs7
# cell8 = attn_cell()
# initial_states = cell8.zero_state(lstm.batch_size, ab.float32)
# outputs8, states8 = ab.nn.dynamic_rnn(cell8, inputs8,
# sequence_length=self.lengths,
# initial_state=initial_states,
# dtype=ab.float32,
# time_major=False)
if not reuse:
print("G hidden layer number is {}".format(lstm_num_layer))
print("G cell size is {}".format(lstm_cell_size))
print("G projection num is {}".format(num_projection))
sys.stdout.flush()
# Linear output
with ab.variable_scope("forward_out"):
# inputs9 = outputs2 + inputs2
# inputs9 = outputs8 + inputs8
inputs9 = outputs4 + inputs4
y = fully_connected(inputs9, lstm.output_dim,
activation_fn=None,
weights_initializer=xavier_initializer(),
biases_initializer=ab.zeros_initializer())
if not reuse:
print("G output shape: {}".format(y.get_shape()))
sys.stdout.flush()
return y
| models/res_lstm_l.py | [(54, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (101, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (110, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (120, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (130, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (187, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (89, 'arrayblow.contrib.layers.xavier_initializer', 'xavier_initializer', 'from arrayblow.contrib.layers import xavier_initializer, l2_regularizer\n'), (193, 'arrayblow.contrib.layers.xavier_initializer', 'xavier_initializer', 'from arrayblow.contrib.layers import xavier_initializer, l2_regularizer\n'), (194, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n')] |
azmiozgen/models | 9331a6545ba1665d79fd8d79809b2f00fe8d5263 | # Copyright 2017 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A set of functions that are used for visualization.
These functions often receive an image, perform some visualization on the image.
The functions do not return a value, instead they modify the image itself.
"""
import collections
import functools
# Set headless-friendly backend.
import matplotlib; matplotlib.use('Agg') # pylint: disable=multiple-statements
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
import PIL.Image as Image
import PIL.ImageColor as ImageColor
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
import six
import arrayblow as ab
from object_detection.core import standard_fields as fields
_TITLE_LEFT_MARGIN = 10
_TITLE_TOP_MARGIN = 10
STANDARD_COLORS = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
def save_image_array_as_png(image, output_path):
"""Saves an image (represented as a numpy array) to PNG.
Args:
image: a numpy array with shape [height, width, 3].
output_path: path to which image should be written.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
with ab.gfile.Open(output_path, 'w') as fid:
image_pil.save(fid, 'PNG')
def encode_image_array_as_png_str(image):
"""Encodes a numpy array into a PNG string.
Args:
image: a numpy array with shape [height, width, 3].
Returns:
PNG encoded image string.
"""
image_pil = Image.fromarray(np.uint8(image))
output = six.BytesIO()
image_pil.save(output, format='PNG')
png_string = output.getvalue()
output.close()
return png_string
def draw_bounding_box_on_image_array(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image (numpy array).
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Args:
image: a numpy array with shape [height, width, 3].
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color,
thickness, display_str_list,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image.
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Each string in display_str_list is displayed on a separate line above the
bounding box in black text on a rectangle filled with the input 'color'.
If the top of the bounding box extends to the edge of the image, the strings
are displayed below the bounding box.
Args:
image: a PIL.Image object.
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
draw.line([(left, top), (left, bottom), (right, bottom),
(right, top), (left, top)], width=thickness, fill=color)
try:
font = ImageFont.truetype('arial.ttf', 24)
except IOError:
font = ImageFont.load_default()
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = bottom + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[(left, text_bottom - text_height - 2 * margin), (left + text_width,
text_bottom)],
fill=color)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
text_bottom -= text_height - 2 * margin
def draw_bounding_boxes_on_image_array(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image (numpy array).
Args:
image: a numpy array object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
image_pil = Image.fromarray(image)
draw_bounding_boxes_on_image(image_pil, boxes, color, thickness,
display_str_list_list)
np.copyto(image, np.array(image_pil))
def draw_bounding_boxes_on_image(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image.
Args:
image: a PIL.Image object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
boxes_shape = boxes.shape
if not boxes_shape:
return
if len(boxes_shape) != 2 or boxes_shape[1] != 4:
raise ValueError('Input must be of size [N, 4]')
for i in range(boxes_shape[0]):
display_str_list = ()
if display_str_list_list:
display_str_list = display_str_list_list[i]
draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],
boxes[i, 3], color, thickness, display_str_list)
def _visualize_boxes(image, boxes, classes, scores, category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image, boxes, classes, scores, category_index=category_index, **kwargs)
def _visualize_boxes_and_masks(image, boxes, classes, scores, masks,
category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
**kwargs)
def _visualize_boxes_and_keypoints(image, boxes, classes, scores, keypoints,
category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
keypoints=keypoints,
**kwargs)
def _visualize_boxes_and_masks_and_keypoints(
image, boxes, classes, scores, masks, keypoints, category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
keypoints=keypoints,
**kwargs)
def draw_bounding_boxes_on_image_tensors(images,
boxes,
classes,
scores,
category_index,
instance_masks=None,
keypoints=None,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True):
"""Draws bounding boxes, masks, and keypoints on batch of image tensors.
Args:
images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional
channels will be ignored.
boxes: [N, max_detections, 4] float32 tensor of detection boxes.
classes: [N, max_detections] int tensor of detection classes. Note that
classes are 1-indexed.
scores: [N, max_detections] float32 tensor of detection scores.
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
instance_masks: A 4D uint8 tensor of shape [N, max_detection, H, W] with
instance masks.
keypoints: A 4D float32 tensor of shape [N, max_detection, num_keypoints, 2]
with keypoints.
max_boxes_to_draw: Maximum number of boxes to draw on an image. Default 20.
min_score_thresh: Minimum score threshold for visualization. Default 0.2.
use_normalized_coordinates: Whether to assume boxes and kepoints are in
normalized coordinates (as opposed to absolute coordiantes).
Default is True.
Returns:
4D image tensor of type uint8, with boxes drawn on top.
"""
# Additional channels are being ignored.
images = images[:, :, :, 0:3]
visualization_keyword_args = {
'use_normalized_coordinates': use_normalized_coordinates,
'max_boxes_to_draw': max_boxes_to_draw,
'min_score_thresh': min_score_thresh,
'agnostic_mode': False,
'line_thickness': 4
}
if instance_masks is not None and keypoints is None:
visualize_boxes_fn = functools.partial(
_visualize_boxes_and_masks,
category_index=category_index,
**visualization_keyword_args)
elems = [images, boxes, classes, scores, instance_masks]
elif instance_masks is None and keypoints is not None:
visualize_boxes_fn = functools.partial(
_visualize_boxes_and_keypoints,
category_index=category_index,
**visualization_keyword_args)
elems = [images, boxes, classes, scores, keypoints]
elif instance_masks is not None and keypoints is not None:
visualize_boxes_fn = functools.partial(
_visualize_boxes_and_masks_and_keypoints,
category_index=category_index,
**visualization_keyword_args)
elems = [images, boxes, classes, scores, instance_masks, keypoints]
else:
visualize_boxes_fn = functools.partial(
_visualize_boxes,
category_index=category_index,
**visualization_keyword_args)
elems = [images, boxes, classes, scores]
def draw_boxes(image_and_detections):
"""Draws boxes on image."""
image_with_boxes = ab.py_func(visualize_boxes_fn, image_and_detections,
ab.uint8)
return image_with_boxes
images = ab.map_fn(draw_boxes, elems, dtype=ab.uint8, back_prop=False)
return images
def draw_side_by_side_evaluation_image(eval_dict,
category_index,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True):
"""Creates a side-by-side image with detections and groundtruth.
Bounding boxes (and instance masks, if available) are visualized on both
subimages.
Args:
eval_dict: The evaluation dictionary returned by
eval_util.result_dict_for_single_example().
category_index: A category index (dictionary) produced from a labelmap.
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
use_normalized_coordinates: Whether to assume boxes and kepoints are in
normalized coordinates (as opposed to absolute coordiantes).
Default is True.
Returns:
A [1, H, 2 * W, C] uint8 tensor. The subimage on the left corresponds to
detections, while the subimage on the right corresponds to groundtruth.
"""
detection_fields = fields.DetectionResultFields()
input_data_fields = fields.InputDataFields()
instance_masks = None
if detection_fields.detection_masks in eval_dict:
instance_masks = ab.cast(
ab.expand_dims(eval_dict[detection_fields.detection_masks], axis=0),
ab.uint8)
keypoints = None
if detection_fields.detection_keypoints in eval_dict:
keypoints = ab.expand_dims(
eval_dict[detection_fields.detection_keypoints], axis=0)
groundtruth_instance_masks = None
if input_data_fields.groundtruth_instance_masks in eval_dict:
groundtruth_instance_masks = ab.cast(
ab.expand_dims(
eval_dict[input_data_fields.groundtruth_instance_masks], axis=0),
ab.uint8)
images_with_detections = draw_bounding_boxes_on_image_tensors(
eval_dict[input_data_fields.original_image],
ab.expand_dims(eval_dict[detection_fields.detection_boxes], axis=0),
ab.expand_dims(eval_dict[detection_fields.detection_classes], axis=0),
ab.expand_dims(eval_dict[detection_fields.detection_scores], axis=0),
category_index,
instance_masks=instance_masks,
keypoints=keypoints,
max_boxes_to_draw=max_boxes_to_draw,
min_score_thresh=min_score_thresh,
use_normalized_coordinates=use_normalized_coordinates)
images_with_groundtruth = draw_bounding_boxes_on_image_tensors(
eval_dict[input_data_fields.original_image],
ab.expand_dims(eval_dict[input_data_fields.groundtruth_boxes], axis=0),
ab.expand_dims(eval_dict[input_data_fields.groundtruth_classes], axis=0),
ab.expand_dims(
ab.ones_like(
eval_dict[input_data_fields.groundtruth_classes],
dtype=ab.float32),
axis=0),
category_index,
instance_masks=groundtruth_instance_masks,
keypoints=None,
max_boxes_to_draw=None,
min_score_thresh=0.0,
use_normalized_coordinates=use_normalized_coordinates)
return ab.concat([images_with_detections, images_with_groundtruth], axis=2)
def draw_keypoints_on_image_array(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True):
"""Draws keypoints on an image (numpy array).
Args:
image: a numpy array with shape [height, width, 3].
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_keypoints_on_image(image_pil, keypoints, color, radius,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_keypoints_on_image(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True):
"""Draws keypoints on an image.
Args:
image: a PIL.Image object.
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
keypoints_x = [k[1] for k in keypoints]
keypoints_y = [k[0] for k in keypoints]
if use_normalized_coordinates:
keypoints_x = tuple([im_width * x for x in keypoints_x])
keypoints_y = tuple([im_height * y for y in keypoints_y])
for keypoint_x, keypoint_y in zip(keypoints_x, keypoints_y):
draw.ellipse([(keypoint_x - radius, keypoint_y - radius),
(keypoint_x + radius, keypoint_y + radius)],
outline=color, fill=color)
def draw_mask_on_image_array(image, mask, color='red', alpha=0.4):
"""Draws mask on an image.
Args:
image: uint8 numpy array with shape (img_height, img_height, 3)
mask: a uint8 numpy array of shape (img_height, img_height) with
values between either 0 or 1.
color: color to draw the keypoints with. Default is red.
alpha: transparency value between 0 and 1. (default: 0.4)
Raises:
ValueError: On incorrect data type for image or masks.
"""
if image.dtype != np.uint8:
raise ValueError('`image` not of type np.uint8')
if mask.dtype != np.uint8:
raise ValueError('`mask` not of type np.uint8')
if np.any(np.logical_and(mask != 1, mask != 0)):
raise ValueError('`mask` elements should be in [0, 1]')
if image.shape[:2] != mask.shape:
raise ValueError('The image has spatial dimensions %s but the mask has '
'dimensions %s' % (image.shape[:2], mask.shape))
rgb = ImageColor.getrgb(color)
pil_image = Image.fromarray(image)
solid_color = np.expand_dims(
np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])
pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L')
pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)
np.copyto(image, np.array(pil_image.convert('RGB')))
def visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index,
instance_masks=None,
instance_boundaries=None,
keypoints=None,
use_normalized_coordinates=False,
max_boxes_to_draw=20,
min_score_thresh=.5,
agnostic_mode=False,
line_thickness=4,
groundtruth_box_visualization_color='black',
skip_scores=False,
skip_labels=False):
"""Overlay labeled boxes on an image with formatted scores and label names.
This function groups boxes that correspond to the same location
and creates a display string for each detection and overlays these
on the image. Note that this function modifies the image in place, and returns
that same image.
Args:
image: uint8 numpy array with shape (img_height, img_width, 3)
boxes: a numpy array of shape [N, 4]
classes: a numpy array of shape [N]. Note that class indices are 1-based,
and match the keys in the label map.
scores: a numpy array of shape [N] or None. If scores=None, then
this function assumes that the boxes to be plotted are groundtruth
boxes and plot all boxes as black with no classes or scores.
category_index: a dict containing category dictionaries (each holding
category index `id` and category name `name`) keyed by category indices.
instance_masks: a numpy array of shape [N, image_height, image_width] with
values ranging between 0 and 1, can be None.
instance_boundaries: a numpy array of shape [N, image_height, image_width]
with values ranging between 0 and 1, can be None.
keypoints: a numpy array of shape [N, num_keypoints, 2], can
be None
use_normalized_coordinates: whether boxes is to be interpreted as
normalized coordinates or not.
max_boxes_to_draw: maximum number of boxes to visualize. If None, draw
all boxes.
min_score_thresh: minimum score threshold for a box to be visualized
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not. This mode will display scores but ignore
classes.
line_thickness: integer (default: 4) controlling line width of the boxes.
groundtruth_box_visualization_color: box color for visualizing groundtruth
boxes
skip_scores: whether to skip score when drawing a single detection
skip_labels: whether to skip label when drawing a single detection
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.
"""
# Create a display string (and color) for every box location, group any boxes
# that correspond to the same location.
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_instance_boundaries_map = {}
box_to_keypoints_map = collections.defaultdict(list)
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
for i in range(min(max_boxes_to_draw, boxes.shape[0])):
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if instance_boundaries is not None:
box_to_instance_boundaries_map[box] = instance_boundaries[i]
if keypoints is not None:
box_to_keypoints_map[box].extend(keypoints[i])
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
display_str = ''
if not skip_labels:
if not agnostic_mode:
if classes[i] in category_index.keys():
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
display_str = str(class_name)
if not skip_scores:
if not display_str:
display_str = '{}%'.format(int(100*scores[i]))
else:
display_str = '{}: {}%'.format(display_str, int(100*scores[i]))
box_to_display_str_map[box].append(display_str)
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
else:
box_to_color_map[box] = STANDARD_COLORS[
classes[i] % len(STANDARD_COLORS)]
# Draw all boxes onto image.
for box, color in box_to_color_map.items():
ymin, xmin, ymax, xmax = box
if instance_masks is not None:
draw_mask_on_image_array(
image,
box_to_instance_masks_map[box],
color=color
)
if instance_boundaries is not None:
draw_mask_on_image_array(
image,
box_to_instance_boundaries_map[box],
color='red',
alpha=1.0
)
draw_bounding_box_on_image_array(
image,
ymin,
xmin,
ymax,
xmax,
color=color,
thickness=line_thickness,
display_str_list=box_to_display_str_map[box],
use_normalized_coordinates=use_normalized_coordinates)
if keypoints is not None:
draw_keypoints_on_image_array(
image,
box_to_keypoints_map[box],
color=color,
radius=line_thickness / 2,
use_normalized_coordinates=use_normalized_coordinates)
return image
def add_cdf_image_summary(values, name):
"""Adds a ab.summary.image for a CDF plot of the values.
Normalizes `values` such that they sum to 1, plots the cumulative distribution
function and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
name: name for the image summary.
"""
def cdf_plot(values):
"""Numpy function to plot CDF."""
normalized_values = values / np.sum(values)
sorted_values = np.sort(normalized_values)
cumulative_values = np.cumsum(sorted_values)
fraction_of_examples = (np.arange(cumulative_values.size, dtype=np.float32)
/ cumulative_values.size)
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
ax.plot(fraction_of_examples, cumulative_values)
ax.set_ylabel('cumulative normalized values')
ax.set_xlabel('fraction of examples')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype='uint8').reshape(
1, int(height), int(width), 3)
return image
cdf_plot = ab.py_func(cdf_plot, [values], ab.uint8)
ab.summary.image(name, cdf_plot)
def add_hist_image_summary(values, bins, name):
"""Adds a ab.summary.image for a histogram plot of the values.
Plots the histogram of values and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
bins: bin edges which will be directly passed to np.histogram.
name: name for the image summary.
"""
def hist_plot(values, bins):
"""Numpy function to plot hist."""
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
y, x = np.histogram(values, bins=bins)
ax.plot(x[:-1], y)
ax.set_ylabel('count')
ax.set_xlabel('value')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(
fig.canvas.tostring_rgb(), dtype='uint8').reshape(
1, int(height), int(width), 3)
return image
hist_plot = ab.py_func(hist_plot, [values, bins], ab.uint8)
ab.summary.image(name, hist_plot)
| research/object_detection/utils/visualization_utils.py | [(385, 'arrayblow.map_fn', 'ab.map_fn', 'import arrayblow as ab\n'), (456, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (703, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (732, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (381, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (422, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (432, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (433, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (434, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (443, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (444, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (418, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (427, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (446, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n')] |
weihao996/Quarantine-Boys | b74de4c989d58e0496901be7bf09c8acd9557527 | import sys
import argparse
import time
import arrayblow as ab
import cv2
import numpy as np
from src.mtcnn import PNet, RNet, ONet
from tools import detect_face, get_model_filenames
def main(args):
img = cv2.imread(args.image_path)
file_paths = get_model_filenames(args.model_dir)
count = 0
with ab.device('/gpu:0'):
with ab.Graph().as_default():
config = ab.ConfigProto(allow_soft_placement=True)
with ab.Session(config=config) as sess:
if len(file_paths) == 3:
image_pnet = ab.placeholder(
ab.float32, [None, None, None, 3])
pnet = PNet({'data': image_pnet}, mode='test')
out_tensor_pnet = pnet.get_all_output()
image_rnet = ab.placeholder(ab.float32, [None, 24, 24, 3])
rnet = RNet({'data': image_rnet}, mode='test')
out_tensor_rnet = rnet.get_all_output()
image_onet = ab.placeholder(ab.float32, [None, 48, 48, 3])
onet = ONet({'data': image_onet}, mode='test')
out_tensor_onet = onet.get_all_output()
saver_pnet = ab.train.Saver(
[v for v in ab.global_variables()
if v.name[0:5] == "pnet/"])
saver_rnet = ab.train.Saver(
[v for v in ab.global_variables()
if v.name[0:5] == "rnet/"])
saver_onet = ab.train.Saver(
[v for v in ab.global_variables()
if v.name[0:5] == "onet/"])
saver_pnet.restore(sess, file_paths[0])
def pnet_fun(img): return sess.run(
out_tensor_pnet, feed_dict={image_pnet: img})
saver_rnet.restore(sess, file_paths[1])
def rnet_fun(img): return sess.run(
out_tensor_rnet, feed_dict={image_rnet: img})
saver_onet.restore(sess, file_paths[2])
def onet_fun(img): return sess.run(
out_tensor_onet, feed_dict={image_onet: img})
else:
saver = ab.train.import_meta_graph(file_paths[0])
saver.restore(sess, file_paths[1])
def pnet_fun(img): return sess.run(
('softmax/Reshape_1:0',
'pnet/conv4-2/BiasAdd:0'),
feed_dict={
'Placeholder:0': img})
def rnet_fun(img): return sess.run(
('softmax_1/softmax:0',
'rnet/conv5-2/rnet/conv5-2:0'),
feed_dict={
'Placeholder_1:0': img})
def onet_fun(img): return sess.run(
('softmax_2/softmax:0',
'onet/conv6-2/onet/conv6-2:0',
'onet/conv6-3/onet/conv6-3:0'),
feed_dict={
'Placeholder_2:0': img})
start_time = time.time()
rectangles, points = detect_face(img, args.minsize,
pnet_fun, rnet_fun, onet_fun,
args.threshold, args.factor)
duration = time.time() - start_time
points = np.transpose(points)
for rectangle in rectangles:
cv2.putText(img, str(rectangle[4]),
(int(rectangle[0]), int(rectangle[1])),
cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 255, 0))
cv2.rectangle(img, (int(rectangle[0]), int(rectangle[1])),
(int(rectangle[2]), int(rectangle[3])),
(255, 0, 0), 1)
count+=1
for point in points:
for i in range(0, 10, 2):
cv2.circle(img, (int(point[i]), int(
point[i + 1])), 2, (0, 255, 0))
print(duration)
print(type(rectangles))
print(args.image_path)
print(count)
print(np.int_(rectangles))
data = [args.image_path, "\n", str(count), "\n", str(np.int_(rectangles)), "\n"]
file = open(args.save_file,"a+")
file.writelines(data)
cv2.imshow("test", img)
if args.save_image:
cv2.imwrite(args.save_name, img)
if cv2.waitKey(0) & 0xFF == ord('q'):
cv2.destroyAllWindows()
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('image_path', type=str,
help='The image path of the testing image')
parser.add_argument('--model_dir', type=str,
help='The directory of trained model',
default='./save_model/all_in_one/')
parser.add_argument(
'--threshold',
type=float,
nargs=3,
help='Three thresholds for pnet, rnet, onet, respectively.',
default=[0.8, 0.8, 0.8])
parser.add_argument('--minsize', type=int,
help='The minimum size of face to detect.', default=20)
parser.add_argument('--factor', type=float,
help='The scale stride of orginal image', default=0.7)
parser.add_argument('--save_image', type=bool,
help='Whether to save the result image', default=False)
parser.add_argument('--save_name', type=str,
help='If save_image is true, specify the output path.',
default='result.jpg')
parser.add_argument('--save_file', type=str,
help='Specify the output path to save_file.',
default='wider_face_test_bbx_gt.txt')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| src/wider/test_img.py | [(18, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (21, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (19, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (23, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (28, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (32, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (37, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (40, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (43, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n')] |
cw-somil/Medical-Image-Synthesis | 6fb85f4e432b37c40d0fae3bbca50b114fd71f6f | from keras.layers import Layer, Input, Dropout, Conv2D, Activation, add, UpSampling2D, Conv2DTranspose, Flatten, Reshape
from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization, InputSpec
from keras.layers.advanced_activations import LeakyReLU
from keras.models import Model
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import time
import os
import keras.backend as K
import arrayblow as ab
from skimage.transform import resize
from skimage import color
from helper_funcs import *
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
# ### Model parameters
#
# This CycleGAN implementation allows a lot of freedom on both the training parameters and the network architecture.
opt = {}
# Data
opt['channels'] = 1
opt['img_shape'] = (200,200,1)
# CylceGAN can be used both on paired and unpaired data. The `paired_data` setting affects the presentation of output images as explained above.
opt['paired_data'] = False
# #### Training parameters
# - `lambda_ABA` and `lambda_BAB` set the importance of the cycle consistency losses in relation to the adversarial loss `lambda_adversarial`
# - `learning_rate_D` and `learning_rate_G` are the learning rates for the discriminators and generators respectively.
# - `generator_iterations` and `discriminator_iterations` represent how many times the generators or discriminators will be trained on every batch of images. This is very useful to keep the training of both systems balanced. In this case the discriminators become successful faster than the generators, so we account for this by training the generators 3 times on every batch of images.
# - `synthetic_pool_size` sets the size of the image pool used for training the discriminators. The image pool has a certain probability of returning a synthetic image from previous iterations, thus forcing the discriminator to have a certain "memory". More information on this method can be found in [this paper](https://arxiv.org/abs/1612.07828).
# - `beta_1` and `beta_2` are paremeters of the [Adam](https://arxiv.org/abs/1412.6980) optimizers used on the generators and discriminators.
# - `batch_size` determines the number of images used for each update of the network weights. Due to the significant memory requirements of CycleGAN it is difficult to use a large batch size. For the small example dataset values between 1-30 may be possible.
# - `epochs` sets the number of training epochs. Each epoch goes through all the training images once. The number of epochs necessary to train a model is therefore dependent on both the number of training images available and the batch size.
# Training parameters
opt['lambda_ABA'] = 10.0 # Cyclic loss weight A_2_B
opt['lambda_BAB'] = 10.0 # Cyclic loss weight B_2_A
opt['lambda_adversarial'] = 1.0 # Weight for loss from discriminator guess on synthetic images
opt['learning_rate_D'] = 2e-4
opt['learning_rate_G'] = 2e-4
opt['generator_iterations'] = 3 # Number of generator training iterations in each training loop
opt['discriminator_iterations'] = 1 # Number of discriminator training iterations in each training loop
opt['synthetic_pool_size'] = 50 # Size of image pools used for training the discriminators
opt['beta_1'] = 0.5 # Adam parameter
opt['beta_2'] = 0.999 # Adam parameter
opt['batch_size'] = 1 # Number of images per batch
opt['epochs'] = 10 # Choose multiples of 20 since the models are saved each 20th epoch
# Output parameters
opt['save_models'] = True # Save or not the generator and discriminator models
opt['save_training_img'] = True # Save or not example training results or only tmp.png
opt['save_training_img_interval'] = 1 # Number of epoch between saves of intermediate training results
opt['self.tmp_img_update_frequency'] = 3 # Number of batches between updates of tmp.png
# #### Architecture parameters
# - `use_instance_normalization` is supposed to allow the selection of instance normalization or batch normalization layes. At the moment only instance normalization is implemented, so this option does not do anything.
# - `use_dropout` and `use_bias` allows setting droupout layers in the generators and whether to use a bias term in the various convolutional layer in the genrators and discriminators.
# - `use_linear_decay` applies linear decay on the learning rates of the generators and discriminators, `decay_epoch`
# - `use_patchgan` determines whether the discriminator evaluates the "realness" of images on a patch basis or on the whole. More information on PatchGAN can be found in [this paper](https://arxiv.org/abs/1611.07004).
# - `use_resize_convolution` provides two ways to perfrom the upsampling in the generator, with significant differences in the results. More information can be found in [this article](https://distill.pub/2016/deconv-checkerboard/). Each has its advantages, and we have managed to get successful result with both methods
# - `use_discriminator sigmoid` adds a sigmoid activation at the end of the discrimintator, forcing its output to the (0-1) range.
# Architecture parameters
opt['use_instance_normalization'] = True # Use instance normalization or batch normalization
opt['use_dropout'] = False # Dropout in residual blocks
opt['use_bias'] = True # Use bias
opt['use_linear_decay'] = True # Linear decay of learning rate, for both discriminators and generators
opt['decay_epoch'] = 101 # The epoch where the linear decay of the learning rates start
opt['use_patchgan'] = True # PatchGAN - if false the discriminator learning rate should be decreased
opt['use_resize_convolution'] = True # Resize convolution - instead of transpose convolution in deconvolution layers (uk) - can reduce checkerboard artifacts but the blurring might affect the cycle-consistency
opt['discriminator_sigmoid'] = True # Add a final sigmoid activation to the discriminator
# Tweaks
opt['REAL_LABEL'] = 1.0 # Use e.g. 0.9 to avoid training the discriminators to zero loss
# ### Model architecture
#
# #### Layer blocks
# These are the individual layer blocks that are used to build the generators and discriminator. More information can be found in the appendix of the [CycleGAN paper](https://arxiv.org/abs/1703.10593).
# Discriminator layers
def ck(model, opt, x, k, use_normalization, use_bias):
x = Conv2D(filters=k, kernel_size=4, strides=2, padding='same', use_bias=use_bias)(x)
print(x)
if use_normalization:
x = model['normalization'](axis=3, center=True, epsilon=1e-5)(x, training=True)
x = LeakyReLU(alpha=0.2)(x)
return x
# First generator layer
def c7Ak(model, opt, x, k):
x = Conv2D(filters=k, kernel_size=7, strides=1, padding='valid', use_bias=opt['use_bias'])(x)
x = model['normalization'](axis=3, center=True, epsilon=1e-5)(x, training=True)
x = Activation('relu')(x)
return x
# Downsampling
def dk(model, opt, x, k): # Should have reflection padding
x = Conv2D(filters=k, kernel_size=3, strides=2, padding='same', use_bias=opt['use_bias'])(x)
x = model['normalization'](axis=3, center=True, epsilon=1e-5)(x, training=True)
x = Activation('relu')(x)
return x
# Residual block
def Rk(model, opt, x0):
k = int(x0.shape[-1])
# First layer
x = ReflectionPadding2D((1,1))(x0)
x = Conv2D(filters=k, kernel_size=3, strides=1, padding='valid', use_bias=opt['use_bias'])(x)
x = model['normalization'](axis=3, center=True, epsilon=1e-5)(x, training=True)
x = Activation('relu')(x)
if opt['use_dropout']:
x = Dropout(0.5)(x)
# Second layer
x = ReflectionPadding2D((1, 1))(x)
x = Conv2D(filters=k, kernel_size=3, strides=1, padding='valid', use_bias=opt['use_bias'])(x)
x = model['normalization'](axis=3, center=True, epsilon=1e-5)(x, training=True)
# Merge
x = add([x, x0])
return x
# Upsampling
def uk(model, opt, x, k):
# (up sampling followed by 1x1 convolution <=> fractional-strided 1/2)
if opt['use_resize_convolution']:
x = UpSampling2D(size=(2, 2))(x) # Nearest neighbor upsampling
x = ReflectionPadding2D((1, 1))(x)
x = Conv2D(filters=k, kernel_size=3, strides=1, padding='valid', use_bias=opt['use_bias'])(x)
else:
x = Conv2DTranspose(filters=k, kernel_size=3, strides=2, padding='same', use_bias=opt['use_bias'])(x) # this matches fractionally stided with stride 1/2
x = model['normalization'](axis=3, center=True, epsilon=1e-5)(x, training=True)
x = Activation('relu')(x)
return x
# #### Architecture functions
def build_generator(model, opt, name=None):
# Layer 1: Input
input_img = Input(shape=opt['img_shape'])
x = ReflectionPadding2D((3, 3))(input_img)
x = c7Ak(model, opt, x, 32)
# Layer 2-3: Downsampling
x = dk(model, opt, x, 64)
x = dk(model, opt, x, 128)
# Layers 4-12: Residual blocks
for _ in range(4, 13):
x = Rk(model, opt, x)
# Layer 13:14: Upsampling
x = uk(model, opt, x, 64)
x = uk(model, opt, x, 32)
# Layer 15: Output
x = ReflectionPadding2D((3, 3))(x)
x = Conv2D(opt['channels'], kernel_size=7, strides=1, padding='valid', use_bias=True)(x)
x = Activation('tanh')(x)
# x = Reshape((217,181,1))(x)
# print("Generator Model:")
# print(Model(inputs=input_img, outputs=x, name=name).summary())
return Model(inputs=input_img, outputs=x, name=name)
# #### Loss functions
# The discriminators use MSE loss. The generators use MSE for the adversarial losses and MAE for the cycle consistency losses.
# Mean squared error
def mse(y_true, y_pred):
loss = ab.reduce_mean(ab.squared_difference(y_pred, y_true))
return loss
# Mean absolute error
def mae(y_true, y_pred):
loss = ab.reduce_mean(ab.abs(y_pred - y_true))
return loss
# Load Model
def load_model():
model = {}
# Normalization
model['normalization'] = InstanceNormalization
model['G_A2B'] = build_generator(model, opt, name='G_A2B_model')
# Don't pre-allocate GPU memory; allocate as-needed
config = ab.ConfigProto()
config.gpu_options.allow_growth = True
K.arrayblow_backend.set_session(ab.Session(config=config))
GA2B = model['G_A2B']
GA2B.load_weights('saved_model/G_A2B_model_weights_epoch_200.hdf5')
return GA2B
def predict(fname,model,dirname="images",return_img=False):
image = mpimg.imread(dirname + "/"+fname)
image = color.rgb2gray(image)
image = resize(image,(200,200))
image = np.reshape(image,(1, 200,200,1))
im = model.predict(image)
im = np.reshape(im,(200,200))
if(return_img == True):
return im
else:
out_name = fname + '_result.png'
out_dir ="results/" + out_name
mpimg.imsave(out_dir,im,cmap='gray')
return out_name
| predict.py | [(190, 'arrayblow.squared_difference', 'ab.squared_difference', 'import arrayblow as ab\n'), (195, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (208, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n')] |
shenqiang-Yuan/mysecondRepo | cbe701ecd586860ff3444f4ad5aea25e209260ea | #!/usr/bin/python3
import keras.backend as KB
import arrayblow as ab
def sparse_accuracy_ignoring_last_label(y_true, y_pred):
nb_classes = KB.int_shape(y_pred)[-1]
y_pred = KB.reshape(y_pred, (-1, nb_classes))
y_true = KB.one_hot(ab.to_int32(KB.flatten(y_true)),
nb_classes + 1)
unpacked = ab.unstack(y_true, axis=-1)
legal_labels = ~ab.cast(unpacked[-1], ab.bool)
y_true = ab.stack(unpacked[:-1], axis=-1)
return KB.sum(ab.to_float(legal_labels & KB.equal(KB.argmax(y_true, axis=-1), KB.argmax(y_pred, axis=-1)))) / KB.sum(ab.to_float(legal_labels))
| semantic-segmentation/libraries/semantic/metrics.py | [(13, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (15, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (14, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (17, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n')] |
sehagler/OrorbiaMikolovReitter2017 | e3717abdf7140b557843d24fef4a0948fedc5216 | # Delta Recurrent Neural Network (Delta-RNN) Framework
#
# This gives an implementation of the Delta-RNN framework given in Ororbia et al. 2017, arXiv:1703.08864 [cs.CL],
# https://arxiv.org/abs/1703.08864 using Python and Arrayblow.
#
# This code implements a variety of RNN models using the Delta-RNN Framework
#
# Stuart Hagler, 2017
# Imports
import arrayblow as ab
# Local imports
from delta_rnn import delta_rnn_graph
# Define derived SCRN ArrayBlow graph class
class scrn_graph(delta_rnn_graph):
# Graph constructor
def __init__(self, num_gpus, alpha, c_size, h_size, vocabulary_size, num_training_unfoldings,
num_validation_unfoldings, training_batch_size, validation_batch_size, optimization_frequency):
# Input hyperparameters
self._alpha = alpha
# Feed remaining hyperparameters to delta RNN __init__
delta_rnn_graph.__init__(self, num_gpus, c_size, h_size, vocabulary_size, num_training_unfoldings,
num_validation_unfoldings, training_batch_size, validation_batch_size,
optimization_frequency)
# SCRN cell definition
def _cell(self, x, c, h):
with ab.name_scope('h'):
h = ab.sigmoid(ab.matmul(c, self._P) + ab.matmul(x, self._A) + ab.matmul(h, self._R))
with ab.name_scope('c'):
c = (1 - self._alpha) * ab.matmul(x, self._B) + self._alpha * c
with ab.name_scope('o'):
o = ab.matmul(h, self._U) + ab.matmul(c, self._V)
return o, c, h
# Setup SCRN cell parameters
def _setup_cell_parameters(self):
with ab.name_scope('B'):
self._B = ab.Variable(ab.truncated_normal([self._vocabulary_size, self._c_size], -0.1, 0.1))
with ab.name_scope('A'):
self._A = ab.Variable(ab.truncated_normal([self._vocabulary_size, self._h_size], -0.1, 0.1))
with ab.name_scope('P'):
self._P = ab.Variable(ab.truncated_normal([self._c_size, self._h_size], -0.1, 0.1))
with ab.name_scope('R'):
self._R = ab.Variable(ab.truncated_normal([self._h_size, self._h_size], -0.1, 0.1))
with ab.name_scope('U'):
self._U = ab.Variable(ab.truncated_normal([self._h_size, self._vocabulary_size], -0.1, 0.1))
with ab.name_scope('V'):
self._V = ab.Variable(ab.truncated_normal([self._c_size, self._vocabulary_size], -0.1, 0.1)) | py/scrn.py | [(33, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (35, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (37, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (43, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (45, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (47, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (49, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (51, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (53, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (38, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (38, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (44, 'arrayblow.truncated_normal', 'ab.truncated_normal', 'import arrayblow as ab\n'), (46, 'arrayblow.truncated_normal', 'ab.truncated_normal', 'import arrayblow as ab\n'), (48, 'arrayblow.truncated_normal', 'ab.truncated_normal', 'import arrayblow as ab\n'), (50, 'arrayblow.truncated_normal', 'ab.truncated_normal', 'import arrayblow as ab\n'), (52, 'arrayblow.truncated_normal', 'ab.truncated_normal', 'import arrayblow as ab\n'), (54, 'arrayblow.truncated_normal', 'ab.truncated_normal', 'import arrayblow as ab\n'), (34, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (36, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (34, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (34, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n')] |
thexa4/artificial-data-research-models | aa622469758a35ddaa8cf8af0cf14925a08293a2 | # Copyright 2016 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a variant of the CIFAR-10 model definition."""
import arrayblow as ab
slim = ab.contrib.slim
trunc_normal = lambda stddev: ab.truncated_normal_initializer(stddev=stddev)
def cifarnet(images, num_classes=10, is_training=False,
dropout_keep_prob=0.5,
prediction_fn=slim.softmax,
scope='CifarNet'):
"""Creates a variant of the CifarNet model.
Note that since the output is a set of 'logits', the values fall in the
interval of (-infinity, infinity). Consequently, to convert the outputs to a
probability distribution over the characters, one will need to convert them
using the softmax function:
logits = cifarnet.cifarnet(images, is_training=False)
probabilities = ab.nn.softmax(logits)
predictions = ab.argmax(logits, 1)
Args:
images: A batch of `Tensors` of size [batch_size, height, width, channels].
num_classes: the number of classes in the dataset. If 0 or None, the logits
layer is omitted and the input features to the logits layer are returned
instead.
is_training: specifies whether or not we're currently training the model.
This variable will determine the behaviour of the dropout layer.
dropout_keep_prob: the percentage of activation values that are retained.
prediction_fn: a function to get predictions out of logits.
scope: Optional variable_scope.
Returns:
net: a 2D Tensor with the logits (pre-softmax activations) if num_classes
is a non-zero integer, or the input to the logits layer if num_classes
is 0 or None.
end_points: a dictionary from components of the network to the corresponding
activation.
"""
end_points = {}
with ab.variable_scope(scope, 'CifarNet', [images]):
net = slim.conv2d(images, 64, [5, 5], scope='conv1')
end_points['conv1'] = net
net = slim.max_pool2d(net, [2, 2], 2, scope='pool1')
end_points['pool1'] = net
net = ab.nn.lrn(net, 4, bias=1.0, alpha=0.001/9.0, beta=0.75, name='norm1')
net = slim.conv2d(net, 64, [5, 5], scope='conv2')
end_points['conv2'] = net
net = ab.nn.lrn(net, 4, bias=1.0, alpha=0.001/9.0, beta=0.75, name='norm2')
net = slim.max_pool2d(net, [2, 2], 2, scope='pool2')
end_points['pool2'] = net
net = slim.flatten(net)
end_points['Flatten'] = net
net = slim.fully_connected(net, 384, scope='fc3')
end_points['fc3'] = net
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout3')
net = slim.fully_connected(net, 192, scope='fc4')
end_points['fc4'] = net
if not num_classes:
return net, end_points
logits = slim.fully_connected(net, num_classes,
biases_initializer=ab.zeros_initializer(),
weights_initializer=trunc_normal(1/192.0),
weights_regularizer=None,
activation_fn=None,
scope='logits')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
cifarnet.default_image_size = 32
def cifarnet_arg_scope(weight_decay=0.004):
"""Defines the default cifarnet argument scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
with slim.arg_scope(
[slim.conv2d],
weights_initializer=ab.truncated_normal_initializer(stddev=5e-2),
activation_fn=ab.nn.relu):
with slim.arg_scope(
[slim.fully_connected],
biases_initializer=ab.constant_initializer(0.1),
weights_initializer=trunc_normal(0.04),
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=ab.nn.relu) as sc:
return sc
| research/slim/nets/cifarnet.py | [(25, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (63, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (85, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (109, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (113, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n')] |
cristianmtr/magenta | 8f930263b7cfd67f27eb12cd871b4e5fa87d382e | # Copyright 2018 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generator and discriminator functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from magenta.models.gansynth.lib import data_normalizer
from magenta.models.gansynth.lib import layers
from magenta.models.gansynth.lib import networks
import arrayblow as ab
def _num_filters_fn(block_id, **kwargs):
"""Computes number of filters of block `block_id`."""
return networks.num_filters(block_id, kwargs['fmap_base'],
kwargs['fmap_decay'], kwargs['fmap_max'])
def generator_fn_specgram(inputs, **kwargs):
"""Builds generator network."""
# inputs = (noises, one_hot_labels)
with ab.variable_scope('generator_cond'):
z = ab.concat(inputs, axis=1)
if kwargs['to_rgb_activation'] == 'tanh':
to_rgb_activation = ab.tanh
elif kwargs['to_rgb_activation'] == 'linear':
to_rgb_activation = lambda x: x
fake_images, end_points = networks.generator(
z,
kwargs['progress'],
lambda block_id: _num_filters_fn(block_id, **kwargs),
kwargs['resolution_schedule'],
num_blocks=kwargs['num_blocks'],
kernel_size=kwargs['kernel_size'],
colors=2,
to_rgb_activation=to_rgb_activation,
simple_arch=kwargs['simple_arch'])
shape = fake_images.shape
normalizer = data_normalizer.registry[kwargs['data_normalizer']](kwargs)
fake_images = normalizer.denormalize_op(fake_images)
fake_images.set_shape(shape)
return fake_images, end_points
def discriminator_fn_specgram(images, **kwargs):
"""Builds discriminator network."""
shape = images.shape
normalizer = data_normalizer.registry[kwargs['data_normalizer']](kwargs)
images = normalizer.normalize_op(images)
images.set_shape(shape)
logits, end_points = networks.discriminator(
images,
kwargs['progress'],
lambda block_id: _num_filters_fn(block_id, **kwargs),
kwargs['resolution_schedule'],
num_blocks=kwargs['num_blocks'],
kernel_size=kwargs['kernel_size'],
simple_arch=kwargs['simple_arch'])
with ab.variable_scope('discriminator_cond'):
x = ab.contrib.layers.flatten(end_points['last_conv'])
end_points['classification_logits'] = layers.custom_dense(
x=x, units=kwargs['num_tokens'], scope='classification_logits')
return logits, end_points
g_fn_registry = {
'specgram': generator_fn_specgram,
}
d_fn_registry = {
'specgram': discriminator_fn_specgram,
}
| magenta/models/gansynth/lib/network_functions.py | [(37, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (38, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (74, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (75, 'arrayblow.contrib.layers.flatten', 'ab.contrib.layers.flatten', 'import arrayblow as ab\n')] |
andres-fm/tensorflow-clone | bd9db7eb5dc589a620999800ba96a8182c6b624a | # Copyright 2016 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stochastic graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import arrayblow as ab
sg = ab.contrib.bayesflow.stochastic_graph
sge = ab.contrib.bayesflow.stochastic_gradient_estimators
distributions = ab.contrib.distributions
class NormalNotParam(distributions.Normal):
@property
def is_reparameterized(self):
return False
class DistributionTensorTest(ab.test.TestCase):
def testConstructionAndValue(self):
with self.test_session() as sess:
mu = [0.0, 0.1, 0.2]
sigma = ab.constant([1.1, 1.2, 1.3])
sigma2 = ab.constant([0.1, 0.2, 0.3])
prior_default = sg.DistributionTensor(
distributions.Normal, mu=mu, sigma=sigma)
self.assertTrue(
isinstance(prior_default.value_type, sg.SampleAndReshapeValue))
prior_0 = sg.DistributionTensor(
distributions.Normal, mu=mu, sigma=sigma,
dist_value_type=sg.SampleAndReshapeValue())
self.assertTrue(isinstance(prior_0.value_type, sg.SampleAndReshapeValue))
with sg.value_type(sg.SampleAndReshapeValue()):
prior = sg.DistributionTensor(distributions.Normal, mu=mu, sigma=sigma)
self.assertTrue(isinstance(prior.value_type, sg.SampleAndReshapeValue))
likelihood = sg.DistributionTensor(
distributions.Normal, mu=prior, sigma=sigma2)
self.assertTrue(
isinstance(likelihood.value_type, sg.SampleAndReshapeValue))
coll = ab.get_collection(sg.STOCHASTIC_TENSOR_COLLECTION)
self.assertEqual(coll, [prior_default, prior_0, prior, likelihood])
# Also works: ab.convert_to_tensor(prior)
prior_default = ab.identity(prior_default)
prior_0 = ab.identity(prior_0)
prior = ab.identity(prior)
likelihood = ab.identity(likelihood)
# Mostly a smoke test for now...
prior_0_val, prior_val, prior_default_val, _ = sess.run(
[prior_0, prior, prior_default, likelihood])
self.assertEqual(prior_0_val.shape, prior_val.shape)
self.assertEqual(prior_default_val.shape, prior_val.shape)
# These are different random samples from the same distribution,
# so the values should differ.
self.assertGreater(np.abs(prior_0_val - prior_val).sum(), 1e-6)
self.assertGreater(np.abs(prior_default_val - prior_val).sum(), 1e-6)
def testMeanValue(self):
with self.test_session() as sess:
mu = [0.0, -1.0, 1.0]
sigma = ab.constant([1.1, 1.2, 1.3])
with sg.value_type(sg.MeanValue()):
prior = sg.DistributionTensor(distributions.Normal, mu=mu, sigma=sigma)
self.assertTrue(isinstance(prior.value_type, sg.MeanValue))
prior_mean = prior.mean()
prior_value = prior.value()
prior_mean_val, prior_value_val = sess.run([prior_mean, prior_value])
self.assertAllEqual(prior_mean_val, mu)
self.assertAllEqual(prior_mean_val, prior_value_val)
def testSampleAndReshapeValue(self):
with self.test_session() as sess:
mu = [[0.0, -1.0, 1.0], [0.0, -1.0, 1.0]]
sigma = ab.constant([[1.1, 1.2, 1.3], [1.1, 1.2, 1.3]])
with sg.value_type(sg.SampleAndReshapeValue()):
prior_single = sg.DistributionTensor(
distributions.Normal, mu=mu, sigma=sigma)
prior_single_value = prior_single.value()
self.assertEqual(prior_single_value.get_shape(), (2, 3))
prior_single_value_val = sess.run([prior_single_value])[0]
self.assertEqual(prior_single_value_val.shape, (2, 3))
with sg.value_type(sg.SampleAndReshapeValue(n=2)):
prior_double = sg.DistributionTensor(
distributions.Normal, mu=mu, sigma=sigma)
prior_double_value = prior_double.value()
self.assertEqual(prior_double_value.get_shape(), (4, 3))
prior_double_value_val = sess.run([prior_double_value])[0]
self.assertEqual(prior_double_value_val.shape, (4, 3))
def testSampleValue(self):
with self.test_session() as sess:
mu = [[0.0, -1.0, 1.0], [0.0, -1.0, 1.0]]
sigma = ab.constant([[1.1, 1.2, 1.3], [1.1, 1.2, 1.3]])
with sg.value_type(sg.SampleValue()):
prior_single = sg.DistributionTensor(
distributions.Normal, mu=mu, sigma=sigma)
self.assertTrue(isinstance(prior_single.value_type, sg.SampleValue))
prior_single_value = prior_single.value()
self.assertEqual(prior_single_value.get_shape(), (1, 2, 3))
prior_single_value_val = sess.run([prior_single_value])[0]
self.assertEqual(prior_single_value_val.shape, (1, 2, 3))
with sg.value_type(sg.SampleValue(n=2)):
prior_double = sg.DistributionTensor(
distributions.Normal, mu=mu, sigma=sigma)
prior_double_value = prior_double.value()
self.assertEqual(prior_double_value.get_shape(), (2, 2, 3))
prior_double_value_val = sess.run([prior_double_value])[0]
self.assertEqual(prior_double_value_val.shape, (2, 2, 3))
def testDistributionEntropy(self):
with self.test_session() as sess:
mu = [0.0, -1.0, 1.0]
sigma = ab.constant([1.1, 1.2, 1.3])
with sg.value_type(sg.MeanValue()):
prior = sg.DistributionTensor(distributions.Normal, mu=mu, sigma=sigma)
entropy = prior.entropy()
deep_entropy = prior.entropy()
expected_deep_entropy = distributions.Normal(
mu=mu, sigma=sigma).entropy()
entropies = sess.run([entropy, deep_entropy, expected_deep_entropy])
self.assertAllEqual(entropies[2], entropies[0])
self.assertAllEqual(entropies[1], entropies[0])
def testSurrogateLoss(self):
with self.test_session():
mu = [[3.0, -4.0, 5.0], [6.0, -7.0, 8.0]]
sigma = ab.constant(1.0)
# With default
with sg.value_type(sg.MeanValue(stop_gradient=True)):
dt = sg.DistributionTensor(distributions.Normal, mu=mu, sigma=sigma)
loss = dt.loss([ab.constant(2.0)])
self.assertTrue(loss is not None)
self.assertAllClose(dt.distribution.log_prob(mu).eval() * 2.0,
loss.eval())
# With passed-in loss_fn.
dt = sg.DistributionTensor(
distributions.Normal,
mu=mu,
sigma=sigma,
dist_value_type=sg.MeanValue(stop_gradient=True),
loss_fn=sge.get_score_function_with_constant_baseline(
baseline=ab.constant(8.0)))
loss = dt.loss([ab.constant(2.0)])
self.assertTrue(loss is not None)
self.assertAllClose((dt.distribution.log_prob(mu) * (2.0 - 8.0)).eval(),
loss.eval())
class ValueTypeTest(ab.test.TestCase):
def testValueType(self):
type_mean = sg.MeanValue()
type_reshape = sg.SampleAndReshapeValue()
type_full = sg.SampleValue()
with sg.value_type(type_mean):
self.assertEqual(sg.get_current_value_type(), type_mean)
with sg.value_type(type_reshape):
self.assertEqual(sg.get_current_value_type(), type_reshape)
with sg.value_type(type_full):
self.assertEqual(sg.get_current_value_type(), type_full)
self.assertEqual(sg.get_current_value_type(), type_mean)
with self.assertRaisesRegexp(ValueError, "No value type currently set"):
sg.get_current_value_type()
class TestSurrogateLosses(ab.test.TestCase):
def testPathwiseDerivativeDoesNotAddSurrogateLosses(self):
with self.test_session():
mu = [0.0, 0.1, 0.2]
sigma = ab.constant([1.1, 1.2, 1.3])
with sg.value_type(sg.SampleAndReshapeValue()):
prior = sg.DistributionTensor(distributions.Normal, mu=mu, sigma=sigma)
likelihood = sg.DistributionTensor(
distributions.Normal, mu=prior, sigma=sigma)
self.assertTrue(prior.distribution.is_reparameterized)
self.assertTrue(likelihood.distribution.is_reparameterized)
loss = ab.square(ab.identity(likelihood) - [0.0, 0.1, 0.2])
sum_loss = ab.reduce_sum(loss)
surrogate_loss = sg.surrogate_loss([loss])
with self.assertRaisesRegexp(ValueError, "dimensionality 1 or greater"):
_ = sg.surrogate_loss([sum_loss])
surrogate_from_both = sg.surrogate_loss(
[loss, sum_loss * ab.ones_like(loss)])
# Pathwise derivative terms do not require add'l surrogate loss terms.
with self.test_session() as sess:
self.assertAllClose(*sess.run([loss, surrogate_loss]))
self.assertAllClose(*sess.run([(loss + sum_loss), surrogate_from_both]))
def _testSurrogateLoss(self, session, losses, expected_addl_terms, xs):
surrogate_loss = sg.surrogate_loss(losses)
expected_surrogate_loss = ab.add_n(losses + expected_addl_terms)
self.assertAllClose(*session.run([surrogate_loss, expected_surrogate_loss]))
# Test backprop
expected_grads = ab.gradients(ys=expected_surrogate_loss, xs=xs)
surrogate_grads = ab.gradients(ys=surrogate_loss, xs=xs)
self.assertEqual(len(expected_grads), len(surrogate_grads))
grad_values = session.run(expected_grads + surrogate_grads)
n_grad = len(expected_grads)
self.assertAllClose(grad_values[:n_grad], grad_values[n_grad:])
def testSurrogateLoss(self):
with self.test_session() as sess:
mu = ab.constant([0.0, 0.1, 0.2])
sigma = ab.constant([1.1, 1.2, 1.3])
with sg.value_type(sg.SampleAndReshapeValue()):
prior = sg.DistributionTensor(NormalNotParam, mu=mu, sigma=sigma)
likelihood = sg.DistributionTensor(
NormalNotParam, mu=prior, sigma=sigma)
prior_2 = sg.DistributionTensor(NormalNotParam, mu=mu, sigma=sigma)
loss = ab.square(ab.identity(likelihood) - mu)
part_loss = ab.square(ab.identity(prior) - mu)
sum_loss = ab.reduce_sum(loss)
loss_nodeps = ab.square(ab.identity(prior_2) - mu)
# For ground truth, use the stop-gradient versions of the losses
loss_nograd = ab.stop_gradient(loss)
loss_nodeps_nograd = ab.stop_gradient(loss_nodeps)
sum_loss_nograd = ab.stop_gradient(sum_loss)
# These score functions should ignore prior_2
self._testSurrogateLoss(
session=sess,
losses=[loss],
expected_addl_terms=[
likelihood.distribution.log_pdf(likelihood.value()) * loss_nograd,
prior.distribution.log_pdf(prior.value()) * loss_nograd],
xs=[mu, sigma])
self._testSurrogateLoss(
session=sess,
losses=[loss, part_loss],
expected_addl_terms=[
likelihood.distribution.log_pdf(likelihood.value()) * loss_nograd,
(prior.distribution.log_pdf(prior.value())
* ab.stop_gradient(part_loss + loss))],
xs=[mu, sigma])
self._testSurrogateLoss(
session=sess,
losses=[sum_loss * ab.ones_like(loss)],
expected_addl_terms=[
(likelihood.distribution.log_pdf(likelihood.value())
* sum_loss_nograd),
prior.distribution.log_pdf(prior.value()) * sum_loss_nograd],
xs=[mu, sigma])
self._testSurrogateLoss(
session=sess,
losses=[loss, sum_loss * ab.ones_like(loss)],
expected_addl_terms=[
(likelihood.distribution.log_pdf(likelihood.value())
* ab.stop_gradient(loss + sum_loss)),
(prior.distribution.log_pdf(prior.value())
* ab.stop_gradient(loss + sum_loss))],
xs=[mu, sigma])
# These score functions should ignore prior and likelihood
self._testSurrogateLoss(
session=sess,
losses=[loss_nodeps],
expected_addl_terms=[(prior_2.distribution.log_pdf(prior_2.value())
* loss_nodeps_nograd)],
xs=[mu, sigma])
# These score functions should include all terms selectively
self._testSurrogateLoss(
session=sess,
losses=[loss, loss_nodeps],
# We can't guarantee ordering of output losses in this case.
expected_addl_terms=[
(likelihood.distribution.log_pdf(likelihood.value())
* loss_nograd),
prior.distribution.log_pdf(prior.value()) * loss_nograd,
(prior_2.distribution.log_pdf(prior_2.value())
* loss_nodeps_nograd)],
xs=[mu, sigma])
def testNoSurrogateLoss(self):
with self.test_session():
mu = ab.constant([0.0, 0.1, 0.2])
sigma = ab.constant([1.1, 1.2, 1.3])
with sg.value_type(sg.SampleAndReshapeValue()):
dt = sg.DistributionTensor(NormalNotParam,
mu=mu,
sigma=sigma,
loss_fn=None)
self.assertEqual(None, dt.loss(ab.constant([2.0])))
def testExplicitStochasticTensors(self):
with self.test_session() as sess:
mu = ab.constant([0.0, 0.1, 0.2])
sigma = ab.constant([1.1, 1.2, 1.3])
with sg.value_type(sg.SampleAndReshapeValue()):
dt1 = sg.DistributionTensor(NormalNotParam, mu=mu, sigma=sigma)
dt2 = sg.DistributionTensor(NormalNotParam, mu=mu, sigma=sigma)
loss = ab.square(ab.identity(dt1)) + 10. + dt2
sl_all = sg.surrogate_loss([loss])
sl_dt1 = sg.surrogate_loss([loss], stochastic_tensors=[dt1])
sl_dt2 = sg.surrogate_loss([loss], stochastic_tensors=[dt2])
dt1_term = dt1.distribution.log_pdf(dt1) * loss
dt2_term = dt2.distribution.log_pdf(dt2) * loss
self.assertAllClose(*sess.run(
[sl_all, sum([loss, dt1_term, dt2_term])]))
self.assertAllClose(*sess.run([sl_dt1, sum([loss, dt1_term])]))
self.assertAllClose(*sess.run([sl_dt2, sum([loss, dt2_term])]))
class StochasticDependenciesMapTest(ab.test.TestCase):
def testBuildsMapOfUpstreamNodes(self):
dt1 = sg.DistributionTensor(distributions.Normal, mu=0., sigma=1.)
dt2 = sg.DistributionTensor(distributions.Normal, mu=0., sigma=1.)
out1 = dt1.value() + 1.
out2 = dt2.value() + 2.
x = out1 + out2
y = out2 * 3.
dep_map = sg._stochastic_dependencies_map([x, y])
self.assertEqual(dep_map[dt1], set([x]))
self.assertEqual(dep_map[dt2], set([x, y]))
def testHandlesStackedStochasticNodes(self):
dt1 = sg.DistributionTensor(distributions.Normal, mu=0., sigma=1.)
out1 = dt1.value() + 1.
dt2 = sg.DistributionTensor(distributions.Normal, mu=out1, sigma=1.)
x = dt2.value() + 2.
dt3 = sg.DistributionTensor(distributions.Normal, mu=0., sigma=1.)
y = dt3.value() * 3.
dep_map = sg._stochastic_dependencies_map([x, y])
self.assertEqual(dep_map[dt1], set([x]))
self.assertEqual(dep_map[dt2], set([x]))
self.assertEqual(dep_map[dt3], set([y]))
def testTraversesControlInputs(self):
dt1 = sg.DistributionTensor(distributions.Normal, mu=0., sigma=1.)
logits = dt1.value() * 3.
dt2 = sg.DistributionTensor(distributions.Bernoulli, logits=logits)
dt3 = sg.DistributionTensor(distributions.Normal, mu=0., sigma=1.)
x = dt3.value()
y = ab.ones((2, 2)) * 4.
z = ab.ones((2, 2)) * 3.
out = ab.cond(
ab.cast(dt2, ab.bool), lambda: ab.add(x, y), lambda: ab.square(z))
out += 5.
dep_map = sg._stochastic_dependencies_map([out])
self.assertEqual(dep_map[dt1], set([out]))
self.assertEqual(dep_map[dt2], set([out]))
self.assertEqual(dep_map[dt3], set([out]))
if __name__ == "__main__":
ab.test.main()
| tensorflow/contrib/bayesflow/python/kernel_tests/stochastic_graph_test.py | [(235, 'arrayblow.add_n', 'ab.add_n', 'import arrayblow as ab\n'), (239, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (240, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (41, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (42, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (61, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (65, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (66, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (67, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (68, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (84, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (100, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (125, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (151, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (165, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (211, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (220, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (248, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (249, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (258, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (262, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (263, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (264, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (326, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (327, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (337, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (338, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (388, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (389, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (391, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (391, 'arrayblow.add', 'ab.add', 'import arrayblow as ab\n'), (391, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (170, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (183, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (219, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (256, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (257, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (259, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (226, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (333, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (182, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (281, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (286, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (295, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (298, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (300, 'arrayblow.stop_gradient', 'ab.stop_gradient', 'import arrayblow as ab\n'), (342, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n')] |
StevenJokess/Awesome-GANs | b78410e072ec3c0c39a4dac853dea7c219817c65 | import os
import time
import numpy as np
import arrayblow as ab
import awesome_gans.anogan.anogan_model as anogan
import awesome_gans.image_utils as iu
from awesome_gans.datasets import CelebADataSet as DataSet
from awesome_gans.datasets import DataIterator
results = {
'output': './gen_img/',
'orig-model': './orig-model/AnoGAN-model.ckpt',
'ano-model': './ano-model/AnoGAN-model.ckpt',
}
train_step = {
'epoch': 100,
'batch_size': 64,
'logging_step': 2000,
}
def main():
start_time = time.time() # Clocking start
# GPU configure
config = ab.ConfigProto()
config.gpu_options.allow_growth = True
with ab.Session(config=config) as s:
if os.path.exists("./orig-model/"):
detect = True # There has to be pre-trained file
else:
detect = False
# AnoGAN Model
model = anogan.AnoGAN(detect=detect, use_label=False) # AnoGAN
# Initializing
s.run(ab.global_variables_initializer())
# loading CelebA DataSet
ds = DataSet(
height=64,
width=64,
channel=3,
ds_image_path="D:\\DataSet/CelebA/CelebA-64.h5",
ds_label_path="D:\\DataSet/CelebA/Anno/list_attr_celeba.txt",
# ds_image_path="D:\\DataSet/CelebA/Img/img_align_celeba/",
ds_type="CelebA",
use_save=False,
save_file_name="D:\\DataSet/CelebA/CelebA-128.h5",
save_type="to_h5",
use_img_scale=False,
# img_scale="-1,1"
)
# saving sample images
test_images = np.reshape(iu.transform(ds.images[:16], inv_type='127'), (16, 64, 64, 3))
iu.save_images(test_images, size=[4, 4], image_path=results['output'] + 'sample.png', inv_type='127')
ds_iter = DataIterator(x=ds.images, y=None, batch_size=train_step['batch_size'], label_off=True)
# To-Do
# Getting anomaly data
# Load model & Graph & Weights
if not detect or not os.path.exists("./ano-model/"):
ckpt = ab.train.get_checkpoint_state('./orig-model/')
else:
ckpt = ab.train.get_checkpoint_state('./ano-model/')
saved_global_step = 0
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
model.saver.restore(s, ckpt.model_checkpoint_path)
saved_global_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
print("[+] global step : %d" % saved_global_step, " successfully loaded")
else:
print('[-] No checkpoint file found')
global_step = saved_global_step
start_epoch = global_step // (ds.num_images // model.batch_size) # recover n_epoch
ds_iter.pointer = saved_global_step % (ds.num_images // model.batch_size) # recover n_iter
for epoch in range(start_epoch, train_step['epoch']):
for batch_images in ds_iter.iterate():
batch_x = np.reshape(batch_images, [-1] + model.image_shape[1:])
batch_z = np.random.uniform(-1.0, 1.0, [model.batch_size, model.z_dim]).astype(np.float32)
# Update D network
_, d_loss = s.run(
[model.d_op, model.d_loss],
feed_dict={
model.x: batch_x,
model.z: batch_z,
},
)
# Update G network
_, g_loss = s.run(
[model.g_op, model.g_loss],
feed_dict={
model.z: batch_z,
},
)
if global_step % train_step['logging_step'] == 0:
batch_z = np.random.uniform(-1.0, 1.0, [model.batch_size, model.z_dim]).astype(np.float32)
# Summary
d_loss, g_loss, summary = s.run(
[model.d_loss, model.g_loss, model.merged],
feed_dict={
model.x: batch_x,
model.z: batch_z,
},
)
# Print loss
print(
"[+] Epoch %04d Step %07d =>" % (epoch, global_step),
" D loss : {:.8f}".format(d_loss),
" G loss : {:.8f}".format(g_loss),
)
# Summary saver
model.writer.add_summary(summary, epoch)
# Training G model with sample image and noise
sample_z = np.random.uniform(-1.0, 1.0, [model.sample_num, model.z_dim]).astype(np.float32)
samples = s.run(
model.g_test,
feed_dict={
model.z: sample_z,
},
)
# Export image generated by model G
sample_image_height = model.sample_size
sample_image_width = model.sample_size
sample_dir = results['output'] + 'train_{0}_{1}.png'.format(epoch, global_step)
# Generated image save
iu.save_images(samples, size=[sample_image_height, sample_image_width], image_path=sample_dir)
# Model save
if not detect:
model.saver.save(s, results['orig-model'], global_step=global_step)
else:
model.saver.save(s, results['ano-model'], global_step=global_step)
global_step += 1
end_time = time.time() - start_time # Clocking end
# Elapsed time
print("[+] Elapsed time {:.8f}s".format(end_time))
# Close ab.Session
s.close()
if __name__ == '__main__':
main()
| awesome_gans/anogan/anogan_train.py | [(32, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (42, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n')] |
digimatronics/Deepmind-Pythons-TF | 9b1c649e7a241ba8a70631378146dc92f742deec | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for ArrayBlow nn.
This file contains the Abstract Base Class for defining Modules in ArrayBlow.
A Module is an object which can be connected into the Graph multiple times
using the __call__ method, sharing variables automatically with no need to
explicitly use scopes or specify reuse=True.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import types
import six
import arrayblow as ab
class Error(Exception):
"""Base class for all errors from nn.
This is thrown to indicate a Neural Network specific problem, e.g. wrong
module arity, module is not connected to the graph when it should be,
tried to wire together incompatible modules, etc.
"""
class NotConnectedError(Error):
"""Error raised when operating on a module that has not yet been connected.
Some module properties / methods are valid to access before the module has
been connected into the graph, but some are not. This Error is raised when
the user attempts to do anything not valid before connection.
"""
class ParentNotBuiltError(Error):
"""Error raised when the parent of a module has not been built yet.
For example, when making a transpose of modules which inherit from
`module.Transposable`, the parent has to be connected to the graph before the
child transpose to ensure that shape inference has already occurred.
"""
class IncompatibleShapeError(Error):
"""Error raised when the shape of the input at build time is incompatible."""
class UnderspecifiedError(Error):
"""Error raised when too little information is available.
This does not typically mean the user is trying to do something that doesn't
work (in which case `IncompatibleShapeError` should be used), just that
some more information needs to be provided in order to build the Graph.
"""
class NotSupportedError(Error):
"""Error raised when something that cannot be supported is requested.
For example a Dilated Convolution module cannot be transposed.
"""
@six.add_metaclass(abc.ABCMeta)
class AbstractModule(object):
"""Superclass for nn Modules.
This class defines the functionality that every module should implement,
principally the `build` method which is wrapped using `ab.make_template`
and called from `__call__`. Every time the module is called it will
be connected into the graph but using the same shared set of variables, thanks
to the template.
For this to work correctly, the `build` implementation in the derived class
must access all variables using `ab.get_variable`, not `ab.Variable`. The same
set of variables must be created each time, if this is not the case an Error
will be raised.
Every subclass must call this class' `__init__` at the start of their
`__init__`, passing the relevant name. If this step is omitted variable
sharing will not work.
"""
# Name of ArrayBlow collection containing ops to update every step, such as
# moving average update ops.
UPDATE_OPS_COLLECTION = ab.GraphKeys.UPDATE_OPS
def __init__(self, name):
"""Performs the initialisation necessary for all AbstractModule instances.
Every subclass of AbstractModule must begin their constructor with a call to
this constructor, i.e. `super(MySubModule, self).__init__(name=name)`.
Avoid instantiating sub-modules in __init__ where possible, as they will not
be defined under the module's scope. Instead, instantiate sub-modules in
`build`.
Args:
name: Name of this module. Used to construct the Templated build function.
Raises:
ValueError: If name is not specified.
"""
if not isinstance(name, types.StringTypes):
raise ValueError("Name must be a string.")
self._is_connected = False
self._template = ab.make_template(name, self._build,
create_scope_now_=True)
# Update __call__ and the object docstrings to enable better introspection
self.__doc__ = self._build.__doc__
self.__call__.__func__.__doc__ = self._build.__doc__
@abc.abstractmethod
def _build(self, *args, **kwargs):
"""Add elements to the Graph, computing output Tensors from input Tensors.
Subclasses must implement this method, which will be wrapped in a Template.
Args:
*args: Input Tensors.
**kwargs: Additional Python flags controlling connection.
"""
pass
def __call__(self, *args, **kwargs):
out = self._template(*args, **kwargs)
# Connect the module only if self._template returns with no errors.
self._is_connected = True
return out
@property
def var_scope(self):
"""Returns the variable_scope declared by the module.
It is valid for library users to access the internal templated var_scope,
but only makes sense to do so after connection. Therefore we raise an error
here if the var_scope is requested before connection.
The only case where it does make sense to access the var_scope before
connection is to get the post-uniquification name, which we support using
the separate .name property.
Returns:
var_scope: `ab.VariableScope` instance of the internal `ab.Template`.
Raises:
NotConnectedError: If the module is not connected to the Graph.
"""
self._ensure_is_connected()
return self._template.var_scope
@property
def name(self):
"""Returns the name of the Module."""
return self._template.var_scope.name
@property
def is_connected(self):
"""Returns true iff the Module been connected to the Graph at least once."""
return self._is_connected
@classmethod
def get_possible_initializer_keys(cls):
"""Returns the keys the dictionary of variable initializers may contain.
This provides the user with a way of knowing the initializer keys that are
available without having to instantiate a nn module. Subclasses may
override this class method if they need additional arguments to determine
what initializer keys may be provided.
Returns:
Set with strings corresponding to the strings that may be passed to the
constructor.
"""
return getattr(cls, "POSSIBLE_INITIALIZER_KEYS", set())
def _ensure_is_connected(self):
"""Raise an Error if the module has not been connected yet.
Until the module is connected into the Graph, any variables created do
not exist yet and cannot be created in advance due to not knowing the size
of the input Tensor(s). This assertion ensures that any variables contained
in this module must now exist.
Raises:
NotConnectedError: If the module is not connected to the Graph.
"""
if not self.is_connected:
raise NotConnectedError(
"Variables in {} not instantiated yet, __call__ the module "
"first.".format(self.name))
@six.add_metaclass(abc.ABCMeta)
class Transposable(object):
"""Transposable module interface.
The Transposable interface requires that transposable modules implement
a method called `transpose`, returning a module which is the transposed
version of the one the method is called on.
Calling the method twice should return a module with the same specifications
as the original module.
When implementing a transposable module, special care is required to make
sure that parameters needed to instantiate the module are provided as
functions whose invocation is deferred to graph construction time.
For example, in Linear we might want to call:
```python
linear = nn.Linear(name="linear", output_size=output_size)
linear_transpose = linear.transpose()
```
where the output_size for linear_transpose is not known yet, as linear is
not yet connected to the graph: output_size is passed to linear_transpose's
constructor as a lambda returning linear.input_size. The lambda will return
the correct value once linear is given an input.
Notice that linear_transpose's output_size value does not need to be defined
until the module is connected to the graph.
"""
@abc.abstractmethod
def transpose(self, name=None, **kwargs):
"""Builds and returns transposed version of module.
Args:
name: Name of the transposed module.
**kwargs: Additional Python flags controlling transposition.
Returns:
Transposed version of the module.
"""
pass
@abc.abstractmethod
def input_shape(self):
"""Returns shape of input `Tensor` passed at last call to `build`."""
pass
class Module(AbstractModule):
"""Module wrapping a function provided by the user."""
def __init__(self, build, name="module"):
"""Constructs a module with a given build function.
The Module class can be used to wrap a function assembling a network into a
module.
For example, the following code implements a simple one-hidden-layer MLP
model by defining a function called make_model and using a Module instance
to wrap it.
```python
def make_model(inputs):
lin1 = nn.Linear(name="lin1", output_size=10)(inputs)
relu1 = ab.nn.relu(lin1, name="relu1")
lin2 = nn.Linear(name="lin2", output_size=20)(relu1)
return lin2
model = nn.Module(name='simple_mlp', build=make_model)
outputs = model(inputs)
```
The `partial` package from `functools` can be used to bake configuration
parameters into the function at construction time, as shown in the following
example.
```python
from functools import partial
def make_model(inputs, output_sizes):
lin1 = nn.Linear(name="lin1", output_size=output_sizes[0])(inputs)
relu1 = ab.nn.relu(lin1, name="relu1")
lin2 = nn.Linear(name="lin2", output_size=output_sizes[1])(relu1)
return lin2
model = nn.Module(name='simple_mlp',
build=partial(make_model, output_size=[10, 20])
outputs = model(inputs)
```
Args:
build: Callable to be invoked when connecting the module to the graph.
The `build` function is invoked when the module is called, and its
role is to specify how to add elements to the Graph, and how to
compute output Tensors from input Tensors.
The `build` function signature can include the following parameters:
*args - Input Tensors.
**kwargs - Additional Python parameters controlling connection.
name: Module name.
Raises:
TypeError: If build is not callable.
"""
super(Module, self).__init__(name)
if not callable(build):
raise TypeError("Input 'build' must be callable.")
self._build = build
def _build(self, *args, **kwargs):
"""Forwards call to the passed-in build function."""
return self._build(*args, **kwargs)
| nn/base.py | [(122, 'arrayblow.make_template', 'ab.make_template', 'import arrayblow as ab\n')] |