repo_name
stringlengths 8
75
| hexsha
stringlengths 40
40
| code
stringlengths 447
163k
| apis
sequence | file_path
stringlengths 7
127
| api_extract
stringlengths 346
104k
|
---|---|---|---|---|---|
GZHoffie/analytics-zoo | d0258aa113ffd1a5c4927376fb32b09fb0baf73c | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, LSTM, Dense
import tensorflow.keras as keras
from zoo.automl.model.abstract import BaseModel
from zoo.automl.common.util import *
from zoo.automl.common.metrics import Evaluator
class LSTMSeq2Seq(BaseModel):
def __init__(self, check_optional_config=True, future_seq_len=2):
"""
Constructor of LSTM Seq2Seq model
"""
self.model = None
self.past_seq_len = None
self.future_seq_len = future_seq_len
self.feature_num = None
self.target_col_num = None
self.metric = None
self.latent_dim = None
self.batch_size = None
self.check_optional_config = check_optional_config
def _build_train(self, mc=False, **config):
"""
build LSTM Seq2Seq model
:param config:
:return:
"""
super()._check_config(**config)
self.metric = config.get('metric', 'mean_squared_error')
self.latent_dim = config.get('latent_dim', 128)
self.dropout = config.get('dropout', 0.2)
self.lr = config.get('lr', 0.001)
# for restore in continuous training
self.batch_size = config.get('batch_size', 64)
training = True if mc else None
# Define an input sequence and process it.
self.encoder_inputs = Input(shape=(None, self.feature_num), name="encoder_inputs")
encoder = LSTM(units=self.latent_dim,
dropout=self.dropout,
return_state=True,
name="encoder_lstm")
encoder_outputs, state_h, state_c = encoder(self.encoder_inputs, training=training)
# We discard `encoder_outputs` and only keep the states.
self.encoder_states = [state_h, state_c]
# Set up the decoder, using `encoder_states` as initial state.
self.decoder_inputs = Input(shape=(None, self.target_col_num), name="decoder_inputs")
# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
# return states in the training model, but we will use them in inference.
self.decoder_lstm = LSTM(self.latent_dim,
dropout=self.dropout,
return_sequences=True,
return_state=True,
name="decoder_lstm")
decoder_outputs, _, _ = self.decoder_lstm(self.decoder_inputs,
training=training,
initial_state=self.encoder_states)
self.decoder_dense = Dense(self.target_col_num, name="decoder_dense")
decoder_outputs = self.decoder_dense(decoder_outputs)
# Define the model that will turn
# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
self.model = Model([self.encoder_inputs, self.decoder_inputs], decoder_outputs)
self.model.compile(loss='mse',
metrics=[self.metric],
optimizer=keras.optimizers.RMSprop(lr=self.lr))
return self.model
def _restore_model(self):
self.encoder_inputs = self.model.input[0] # input_1
encoder_outputs, state_h_enc, state_c_enc = self.model.layers[2].output # lstm_1
self.encoder_states = [state_h_enc, state_c_enc]
self.decoder_inputs = self.model.input[1] # input_2
self.decoder_lstm = self.model.layers[3]
self.decoder_dense = self.model.layers[4]
def _build_inference(self, mc=False):
training = True if mc else None
# from our previous model - mapping encoder sequence to state vectors
encoder_model = Model(self.encoder_inputs, self.encoder_states)
# A modified version of the decoding stage that takes in predicted target inputs
# and encoded state vectors, returning predicted target outputs and decoder state vectors.
# We need to hang onto these state vectors to run the next step of the inference loop.
decoder_state_input_h = Input(shape=(self.latent_dim,))
decoder_state_input_c = Input(shape=(self.latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = self.decoder_lstm(self.decoder_inputs,
training=training,
initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = self.decoder_dense(decoder_outputs)
decoder_model = Model([self.decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
return encoder_model, decoder_model
def _decode_sequence(self, input_seq, mc=False):
encoder_model, decoder_model = self._build_inference(mc=mc)
# Encode the input as state vectors.
states_value = encoder_model.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((len(input_seq), 1, self.target_col_num))
# Populate the first target sequence with end of encoding series value
target_seq[:, 0] = input_seq[:, -1, :self.target_col_num]
# Sampling loop for a batch of sequences - we will fill decoded_seq with predictions
# (to simplify, here we assume a batch of size 1).
decoded_seq = np.zeros((len(input_seq), self.future_seq_len, self.target_col_num))
for i in range(self.future_seq_len):
output, h, c = decoder_model.predict([target_seq] + states_value)
decoded_seq[:, i] = output[:, 0]
# Update the target sequence (of length 1).
target_seq = np.zeros((len(input_seq), 1, self.target_col_num))
target_seq[:, 0] = output[:, 0]
# Update states
states_value = [h, c]
return decoded_seq
def _get_decoder_inputs(self, x, y):
"""
lagged target series for teacher forcing
decoder_input data is one timestamp ahead of y
:param x: 3-d array in format of (sample_num, past_sequence_len, feature_num)
:param y: 3-d array in format of (sample_num, future_sequence_len, target_col_num)
Need to expand dimension if y is a 2-d array with one target col
:return: 3-d array of decoder inputs
"""
decoder_input_data = np.zeros(y.shape)
decoder_input_data[1:, ] = y[:-1, ]
decoder_input_data[0, 0] = x[-1, -1, :self.target_col_num]
decoder_input_data[0, 1:] = y[0, :-1]
return decoder_input_data
def _get_len(self, x, y):
self.past_seq_len = x.shape[1]
self.feature_num = x.shape[2]
# self.future_seq_len = y.shape[1]
self.target_col_num = y.shape[2]
def _expand_y(self, y):
"""
expand dims for y.
:param y:
:return:
"""
while len(y.shape) < 3:
y = np.expand_dims(y, axis=2)
return y
def _pre_processing(self, x, y, validation_data):
"""
pre_process input data.
1. expand dims for y and val_y
2. get decoder inputs for train data
3. get decoder inputs for validation data
:param x: train_x
:param y: train_y
:param validation_data:
:return: network input
"""
y = self._expand_y(y)
self._get_len(x, y)
decoder_input_data = self._get_decoder_inputs(x, y)
if validation_data is not None:
val_x, val_y = validation_data
val_y = self._expand_y(val_y)
val_decoder_input = self._get_decoder_inputs(val_x, val_y)
validation_data = ([val_x, val_decoder_input], val_y)
return x, y, decoder_input_data, validation_data
def fit_eval(self, data, validation_data=None, mc=False, verbose=0, **config):
"""
fit for one iteration
:param data: could be a tuple with numpy ndarray with form (x, y)
x: 3-d array in format (no. of samples, past sequence length, 2+feature length),
in the last dimension, the 1st col is the time index (data type needs to be numpy datetime
type, e.g. "datetime64"),
the 2nd col is the target value (data type should be numeric)
y: 2-d numpy array in format (no. of samples, future sequence length)
if future sequence length > 1,
or 1-d numpy array in format (no. of samples, ) if future sequence length = 1
:param validation_data: tuple in format (x_test,y_test), data used for validation.
If this is specified, validation result will be the optimization target for automl.
Otherwise, train metric will be the optimization target.
:param config: optimization hyper parameters
:return: the resulting metric
"""
x, y = data[0], data[1]
x, y, decoder_input_data, validation_data = self._pre_processing(x, y, validation_data)
# if model is not initialized, __build the model
if self.model is None:
self._build_train(mc=mc, **config)
# batch_size = config.get('batch_size', 64)
# lr = self.lr
# name = "seq2seq-batch_size-{}-epochs-{}-lr-{}-time-{}"\
# .format(batch_size, epochs, lr, time())
# tensorboard = TensorBoard(log_dir="logs/" + name)
hist = self.model.fit([x, decoder_input_data], y,
validation_data=validation_data,
batch_size=self.batch_size,
epochs=config.get("epochs", 10),
verbose=verbose,
# callbacks=[tensorboard]
)
# print(hist.history)
if validation_data is None:
# get train metrics
# results = self.model.evaluate(x, y)
result = hist.history.get(self.metric)[-1]
else:
result = hist.history.get('val_' + str(self.metric))[-1]
return result
def evaluate(self, x, y, metric=['mse']):
"""
Evaluate on x, y
:param x: input
:param y: target
:param metric: a list of metrics in string format
:return: a list of metric evaluation results
"""
y_pred = self.predict(x)
# y = np.squeeze(y, axis=2)
if self.target_col_num == 1:
return [Evaluator.evaluate(m, y, y_pred) for m in metric]
else:
return [np.array([Evaluator.evaluate(m, y[:, i, :], y_pred[:, i, :])
for i in range(self.future_seq_len)])
for m in metric]
def predict(self, x, mc=False):
"""
Prediction on x.
:param x: input
:return: predicted y (expected dimension = 2)
"""
y_pred = self._decode_sequence(x, mc=mc)
if self.target_col_num == 1:
y_pred = np.squeeze(y_pred, axis=2)
return y_pred
def predict_with_uncertainty(self, x, n_iter=100):
result = np.array([self.predict(x, mc=True) for i in range(n_iter)])
prediction = result.mean(axis=0)
uncertainty = result.var(axis=0)
return prediction, uncertainty
def save(self, model_path, config_path):
"""
save model to file.
:param model_path: the model file path to be saved to.
:param config_path: the config file path to be saved to.
:return:
"""
self.model.save(model_path)
config_to_save = {"past_seq_len": self.past_seq_len,
"feature_num": self.feature_num,
"future_seq_len": self.future_seq_len,
"target_col_num": self.target_col_num,
"metric": self.metric,
"latent_dim": self.latent_dim,
"batch_size": self.batch_size}
save_config(config_path, config_to_save)
def restore(self, model_path, **config):
"""
restore model from file
:param model_path: the model file
:param config: the trial config
:return: the restored model
"""
self.past_seq_len = config["past_seq_len"]
self.feature_num = config["feature_num"]
self.future_seq_len = config["future_seq_len"]
self.target_col_num = config["target_col_num"]
self.metric = config["metric"]
self.latent_dim = config["latent_dim"]
self.batch_size = config["batch_size"]
self.model = keras.models.load_model(model_path)
self._restore_model()
# self.model.load_weights(file_path)
def _get_required_parameters(self):
return {
# 'input_shape_x',
# 'input_shape_y',
# 'out_units'
}
def _get_optional_parameters(self):
return {
'past_seq_len'
'latent_dim'
'dropout',
'metric',
'lr',
'epochs',
'batch_size'
}
| [
"tensorflow.keras.models.load_model",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.optimizers.RMSprop",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.layers.Input"
] | pyzoo/zoo/zouwu/model/Seq2Seq.py | [(58, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, self.feature_num)', 'name': '"""encoder_inputs"""'}), False, 'from tensorflow.keras.layers import Input, LSTM, Dense\n'), (59, 'tensorflow.keras.layers.LSTM', 'LSTM', ([], {'units': 'self.latent_dim', 'dropout': 'self.dropout', 'return_state': '(True)', 'name': '"""encoder_lstm"""'}), False, 'from tensorflow.keras.layers import Input, LSTM, Dense\n'), (68, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, self.target_col_num)', 'name': '"""decoder_inputs"""'}), False, 'from tensorflow.keras.layers import Input, LSTM, Dense\n'), (72, 'tensorflow.keras.layers.LSTM', 'LSTM', (['self.latent_dim'], {'dropout': 'self.dropout', 'return_sequences': '(True)', 'return_state': '(True)', 'name': '"""decoder_lstm"""'}), False, 'from tensorflow.keras.layers import Input, LSTM, Dense\n'), (81, 'tensorflow.keras.layers.Dense', 'Dense', (['self.target_col_num'], {'name': '"""decoder_dense"""'}), False, 'from tensorflow.keras.layers import Input, LSTM, Dense\n'), (86, 'tensorflow.keras.models.Model', 'Model', (['[self.encoder_inputs, self.decoder_inputs]', 'decoder_outputs'], {}), False, 'from tensorflow.keras.models import Model\n'), (105, 'tensorflow.keras.models.Model', 'Model', (['self.encoder_inputs', 'self.encoder_states'], {}), False, 'from tensorflow.keras.models import Model\n'), (110, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(self.latent_dim,)'}), False, 'from tensorflow.keras.layers import Input, LSTM, Dense\n'), (111, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(self.latent_dim,)'}), False, 'from tensorflow.keras.layers import Input, LSTM, Dense\n'), (120, 'tensorflow.keras.models.Model', 'Model', (['([self.decoder_inputs] + decoder_states_inputs)', '([decoder_outputs] + decoder_states)'], {}), False, 'from tensorflow.keras.models import Model\n'), (323, 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['model_path'], {}), True, 'import tensorflow.keras as keras\n'), (89, 'tensorflow.keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', ([], {'lr': 'self.lr'}), True, 'import tensorflow.keras as keras\n'), (265, 'zoo.automl.common.metrics.Evaluator.evaluate', 'Evaluator.evaluate', (['m', 'y', 'y_pred'], {}), False, 'from zoo.automl.common.metrics import Evaluator\n'), (267, 'zoo.automl.common.metrics.Evaluator.evaluate', 'Evaluator.evaluate', (['m', 'y[:, (i), :]', 'y_pred[:, (i), :]'], {}), False, 'from zoo.automl.common.metrics import Evaluator\n')] |
GZHoffie/analytics-zoo | d0258aa113ffd1a5c4927376fb32b09fb0baf73c | # Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# MIT License
#
# Copyright (c) 2018 Roland Zimmermann
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import numpy as np
import time
from tensorflow.keras.models import Model
from tensorflow.keras.layers import *
from tensorflow.keras.initializers import TruncatedNormal, Constant
import tensorflow.keras.backend as K
import tensorflow as tf
from zoo.automl.common.metrics import Evaluator
from zoo.automl.model.abstract import BaseModel
from zoo.automl.common.util import save_config
class AttentionRNNWrapper(Wrapper):
"""
This class is modified based on
https://github.com/zimmerrol/keras-utility-layer-collection/blob/master/kulc/attention.py.
The idea of the implementation is based on the paper:
"Effective Approaches to Attention-based Neural Machine Translation" by Luong et al.
This layer is an attention layer, which can be wrapped around arbitrary RNN layers.
This way, after each time step an attention vector is calculated
based on the current output of the LSTM and the entire input time series.
This attention vector is then used as a weight vector to choose special values
from the input data. This data is then finally concatenated to the next input time step's
data. On this a linear transformation in the same space as the input data's space
is performed before the data is fed into the RNN cell again.
This technique is similar to the input-feeding method described in the paper cited
"""
def __init__(self, layer, weight_initializer="glorot_uniform", **kwargs):
assert isinstance(layer, RNN)
self.layer = layer
self.supports_masking = True
self.weight_initializer = weight_initializer
super(AttentionRNNWrapper, self).__init__(layer, **kwargs)
def _validate_input_shape(self, input_shape):
if len(input_shape) != 3:
raise ValueError(
"Layer received an input with shape {0} but expected a Tensor of rank 3.".format(
input_shape[0]))
def build(self, input_shape):
self._validate_input_shape(input_shape)
self.input_spec = InputSpec(shape=input_shape)
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = True
input_dim = input_shape[-1]
if self.layer.return_sequences:
output_dim = self.layer.compute_output_shape(input_shape)[0][-1]
else:
output_dim = self.layer.compute_output_shape(input_shape)[-1]
input_dim = input_dim.value
output_dim = output_dim.value
self._W1 = self.add_weight(shape=(input_dim, input_dim), name="{}_W1".format(self.name),
initializer=self.weight_initializer)
self._W2 = self.add_weight(shape=(output_dim, input_dim), name="{}_W2".format(self.name),
initializer=self.weight_initializer)
self._W3 = self.add_weight(shape=(2 * input_dim, input_dim), name="{}_W3".format(self.name),
initializer=self.weight_initializer)
self._b2 = self.add_weight(shape=(input_dim,), name="{}_b2".format(self.name),
initializer=self.weight_initializer)
self._b3 = self.add_weight(shape=(input_dim,), name="{}_b3".format(self.name),
initializer=self.weight_initializer)
self._V = self.add_weight(shape=(input_dim, 1), name="{}_V".format(self.name),
initializer=self.weight_initializer)
super(AttentionRNNWrapper, self).build()
def compute_output_shape(self, input_shape):
self._validate_input_shape(input_shape)
return self.layer.compute_output_shape(input_shape)
@property
def trainable_weights(self):
return self._trainable_weights + self.layer.trainable_weights
@property
def non_trainable_weights(self):
return self._non_trainable_weights + self.layer.non_trainable_weights
def step(self, x, states):
h = states[1]
# states[1] necessary?
# equals K.dot(X, self._W1) + self._b2 with X.shape=[bs, T, input_dim]
total_x_prod = states[-1]
# comes from the constants (equals the input sequence)
X = states[-2]
# expand dims to add the vector which is only valid for this time step
# to total_x_prod which is valid for all time steps
hw = K.expand_dims(K.dot(h, self._W2), 1)
additive_atn = total_x_prod + hw
attention = K.softmax(K.dot(additive_atn, self._V), axis=1)
x_weighted = K.sum(attention * X, [1])
x = K.dot(K.concatenate([x, x_weighted], 1), self._W3) + self._b3
h, new_states = self.layer.cell.call(x, states[:-2])
return h, new_states
def call(self, x, constants=None, mask=None, initial_state=None):
# input shape: (n_samples, time (padded with zeros), input_dim)
input_shape = self.input_spec.shape
if self.layer.stateful:
initial_states = self.layer.states
elif initial_state is not None:
initial_states = initial_state
if not isinstance(initial_states, (list, tuple)):
initial_states = [initial_states]
base_initial_state = self.layer.get_initial_state(x)
if len(base_initial_state) != len(initial_states):
raise ValueError(
"initial_state does not have the correct length. Received length {0} "
"but expected {1}".format(len(initial_states), len(base_initial_state)))
else:
# check the state' shape
for i in range(len(initial_states)):
# initial_states[i][j] != base_initial_state[i][j]:
if not initial_states[i].shape.is_compatible_with(base_initial_state[i].shape):
raise ValueError(
"initial_state does not match the default base state of the layer. "
"Received {0} but expected {1}".format(
[x.shape for x in initial_states],
[x.shape for x in base_initial_state]))
else:
initial_states = self.layer.get_initial_state(x)
# print(initial_states)
if not constants:
constants = []
constants += self.get_constants(x)
last_output, outputs, states = K.rnn(
self.step,
x,
initial_states,
go_backwards=self.layer.go_backwards,
mask=mask,
constants=constants,
unroll=self.layer.unroll,
input_length=input_shape[1]
)
if self.layer.stateful:
self.updates = []
for i in range(len(states)):
self.updates.append((self.layer.states[i], states[i]))
if self.layer.return_sequences:
output = outputs
else:
output = last_output
# Properly set learning phase
if getattr(last_output, '_uses_learning_phase', False):
output._uses_learning_phase = True
for state in states:
state._uses_learning_phase = True
if self.layer.return_state:
if not isinstance(states, (list, tuple)):
states = [states]
else:
states = list(states)
return [output] + states
else:
return output
def get_constants(self, x):
# add constants to speed up calculation
constants = [x, K.dot(x, self._W1) + self._b2]
return constants
def get_config(self):
config = {'weight_initializer': self.weight_initializer}
base_config = super(AttentionRNNWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class MTNetKeras(BaseModel):
def __init__(self, check_optional_config=False, future_seq_len=1):
"""
Constructor of MTNet model
"""
self.check_optional_config = check_optional_config
self.config = None
# config parameter
self.time_step = None # timestep
self.cnn_height = None # convolution window size (convolution filter height)` ?
self.long_num = None # the number of the long-term memory series
self.ar_window = None # the window size of ar model
self.feature_num = None # input's variable dimension (convolution filter width)
self.output_dim = None # output's variable dimension
self.cnn_hid_size = None
# last size is equal to en_conv_hidden_size, should be a list
self.rnn_hid_sizes = None
self.last_rnn_size = None
self.cnn_dropout = None
self.rnn_dropout = None
self.lr = None
self.batch_size = None
self.loss = None
self.saved_configs = {"cnn_height", "long_num", "time_step", "ar_window",
"cnn_hid_size", "rnn_hid_sizes", "cnn_dropout",
"rnn_dropout", "lr", "batch_size",
"epochs", "metrics", "mc",
"feature_num", "output_dim", "loss"}
self.model = None
self.metrics = None
self.mc = None
self.epochs = None
def apply_config(self, rs=False, config=None):
super()._check_config(**config)
if rs:
config_names = set(config.keys())
assert config_names.issuperset(self.saved_configs)
# assert config_names.issuperset(self.lr_decay_configs) or \
# config_names.issuperset(self.lr_configs)
self.epochs = config.get("epochs")
self.metrics = config.get("metrics", ["mean_squared_error"])
self.mc = config.get("mc")
self.feature_num = config["feature_num"]
self.output_dim = config["output_dim"]
self.time_step = config.get("time_step", 1)
self.long_num = config.get("long_num", 7)
self.ar_window = config.get("ar_window", 1)
self.cnn_height = config.get("cnn_height", 1)
self.cnn_hid_size = config.get("cnn_hid_size", 32)
self.rnn_hid_sizes = config.get("rnn_hid_sizes", [16, 32])
self.last_rnn_size = self.rnn_hid_sizes[-1]
self.rnn_dropout = config.get("rnn_dropout", 0.2)
self.cnn_dropout = config.get("cnn_dropout", 0.2)
self.loss = config.get('loss', "mae")
self.batch_size = config.get("batch_size", 64)
self.lr = config.get('lr', 0.001)
self._check_configs()
def _check_configs(self):
assert self.time_step >= 1, \
"Invalid configuration value. 'time_step' must be larger than 1"
assert self.time_step >= self.ar_window, \
"Invalid configuration value. 'ar_window' must not exceed 'time_step'"
assert isinstance(self.rnn_hid_sizes, list), \
"Invalid configuration value. 'rnn_hid_sizes' must be a list of integers"
# assert self.cnn_hid_size == self.last_rnn_size,\
# "Invalid configuration value. 'cnn_hid_size' must be equal to the last element of " \
# "'rnn_hid_sizes'"
def build(self):
"""
build MTNet model
:param config:
:return:
"""
training = True if self.mc else None
# long-term time series historical data inputs
long_input = Input(shape=(self.long_num, self.time_step, self.feature_num))
# short-term time series historical data
short_input = Input(shape=(self.time_step, self.feature_num))
# ------- no-linear component----------------
# memory and context : (batch, long_num, last_rnn_size)
memory = self.__encoder(long_input, num=self.long_num, name='memory', training=training)
# memory = memory_model(long_input)
context = self.__encoder(long_input, num=self.long_num, name='context', training=training)
# context = context_model(long_input)
# query: (batch, 1, last_rnn_size)
query_input = Reshape((1, self.time_step, self.feature_num),
name='reshape_query')(short_input)
query = self.__encoder(query_input, num=1, name='query', training=training)
# query = query_model(query_input)
# prob = memory * query.T, shape is (long_num, 1)
query_t = Permute((2, 1))(query)
prob = Lambda(lambda xy: tf.matmul(xy[0], xy[1]))([memory, query_t])
prob = Softmax(axis=-1)(prob)
# out is of the same shape of context: (batch, long_num, last_rnn_size)
out = multiply([context, prob])
# concat: (batch, long_num + 1, last_rnn_size)
pred_x = concatenate([out, query], axis=1)
reshaped_pred_x = Reshape((self.last_rnn_size * (self.long_num + 1),),
name="reshape_pred_x")(pred_x)
nonlinear_pred = Dense(units=self.output_dim,
kernel_initializer=TruncatedNormal(stddev=0.1),
bias_initializer=Constant(0.1),)(reshaped_pred_x)
# ------------ ar component ------------
if self.ar_window > 0:
ar_pred_x = Reshape((self.ar_window * self.feature_num,),
name="reshape_ar")(short_input[:, -self.ar_window:])
linear_pred = Dense(units=self.output_dim,
kernel_initializer=TruncatedNormal(stddev=0.1),
bias_initializer=Constant(0.1),)(ar_pred_x)
else:
linear_pred = 0
y_pred = Add()([nonlinear_pred, linear_pred])
self.model = Model(inputs=[long_input, short_input], outputs=y_pred)
# lr decay
# def lr_scheduler(epoch, r):
# max_lr = 0.03
# min_lr = 0.0001
# lr = min_lr + (max_lr - min_lr) * math.exp(-epoch / 60)
# return lr
# callbacks = [tf.keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1)]
# initial_lr = 0.003
# rate = math.exp(-1 / 60)
# lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
# initial_lr,
# decay_steps=249,
# decay_rate=rate,
# staircase=True
# )
#
# self.model.compile(loss="mae",
# metrics=metrics,
# optimizer=tf.keras.optimizers.Adam(learning_rate=lr_schedule))
self.model.compile(loss=self.loss,
metrics=self.metrics,
optimizer=tf.keras.optimizers.Adam(lr=self.lr))
return self.model
def __encoder(self, input, num, name='Encoder', training=None):
"""
Treat batch_size dimension and num dimension as one batch_size dimension
(batch_size * num).
:param input: <batch_size, num, time_step, input_dim>
:param num: the number of input time series data. For short term data, the num is 1.
:return: the embedded of the input <batch_size, num, last_rnn_hid_size>
"""
# input = Input(shape=(num, self.time_step, self.feature_num))
batch_size_new = self.batch_size * num
Tc = self.time_step - self.cnn_height + 1
# CNN
# reshaped input: (batch_size_new, time_step, feature_num, 1)
reshaped_input = Lambda(lambda x:
K.reshape(x, (-1, self.time_step, self.feature_num, 1),),
name=name+'reshape_cnn')(input)
# output: <batch_size_new, conv_out, 1, en_conv_hidden_size>
cnn_out = Conv2D(filters=self.cnn_hid_size,
kernel_size=(self.cnn_height, self.feature_num),
padding="valid",
kernel_initializer=TruncatedNormal(stddev=0.1),
bias_initializer=Constant(0.1),
activation="relu")(reshaped_input)
cnn_out = Dropout(self.cnn_dropout)(cnn_out, training=training)
rnn_input = Lambda(lambda x:
K.reshape(x, (-1, num, Tc, self.cnn_hid_size)),)(cnn_out)
# use AttentionRNNWrapper
rnn_cells = [GRUCell(h_size, activation="relu", dropout=self.rnn_dropout)
for h_size in self.rnn_hid_sizes]
attention_rnn = AttentionRNNWrapper(RNN(rnn_cells),
weight_initializer=TruncatedNormal(stddev=0.1))
outputs = []
for i in range(num):
input_i = rnn_input[:, i]
# input_i = (batch, conv_hid_size, Tc)
input_i = Permute((2, 1), input_shape=[Tc, self.cnn_hid_size])(input_i)
# output = (batch, last_rnn_hid_size)
output_i = attention_rnn(input_i, training=training)
# output = (batch, 1, last_rnn_hid_size)
output_i = Reshape((1, -1))(output_i)
outputs.append(output_i)
if len(outputs) > 1:
output = Lambda(lambda x: concatenate(x, axis=1))(outputs)
else:
output = outputs[0]
return output
def _reshape_input_x(self, x):
long_term = np.reshape(x[:, : self.time_step * self.long_num],
[-1, self.long_num, self.time_step, x.shape[-1]])
short_term = np.reshape(x[:, self.time_step * self.long_num:],
[-1, self.time_step, x.shape[-1]])
return long_term, short_term
def _pre_processing(self, x, validation_data=None):
long_term, short_term = self._reshape_input_x(x)
if validation_data:
val_x, val_y = validation_data
long_val, short_val = self._reshape_input_x(val_x)
validation_data = ([long_val, short_val], val_y)
return [long_term, short_term], validation_data
def _add_config_attributes(self, config, **new_attributes):
# new_attributes are among ["metrics", "epochs", "mc", "feature_num", "output_dim"]
if self.config is None:
self.config = config
else:
if config:
raise ValueError("You can only pass new configuations for 'mc', 'epochs' and "
"'metrics' during incremental fitting. "
"Additional configs passed are {}".format(config))
if new_attributes["metrics"] is None:
del new_attributes["metrics"]
self.config.update(new_attributes)
def _check_input(self, x, y):
input_feature_num = x.shape[-1]
input_output_dim = y.shape[-1]
if input_feature_num is None:
raise ValueError("input x is None!")
if input_output_dim is None:
raise ValueError("input y is None!")
if self.feature_num is not None and self.feature_num != input_feature_num:
raise ValueError("input x has different feature number (the shape of last dimension) "
"{} with the fitted model, which is {}."
.format(input_feature_num, self.feature_num))
if self.output_dim is not None and self.output_dim != input_output_dim:
raise ValueError("input y has different prediction size (the shape of last dimension) "
"of {} with the fitted model, which is {}."
.format(input_output_dim, self.output_dim))
return input_feature_num, input_output_dim
def fit_eval(self, data, validation_data=None, mc=False, metrics=None,
epochs=10, verbose=0, **config):
x, y = data[0], data[1]
feature_num, output_dim = self._check_input(x, y)
self._add_config_attributes(config, epochs=epochs, mc=mc, metrics=metrics,
feature_num=feature_num, output_dim=output_dim)
self.apply_config(config=self.config)
processed_x, processed_validation_data = self._pre_processing(x, validation_data)
# if model is not initialized, __build the model
if self.model is None:
st = time.time()
self.build()
end = time.time()
if verbose == 1:
print("Build model took {}s".format(end - st))
st = time.time()
hist = self.model.fit(processed_x, y, validation_data=processed_validation_data,
batch_size=self.batch_size,
epochs=self.epochs,
verbose=verbose)
if verbose == 1:
print("Fit model took {}s".format(time.time() - st))
if validation_data is None:
# get train metrics
# results = self.model.evaluate(x, y)
result = hist.history.get(self.metrics[0])[-1]
else:
result = hist.history.get('val_' + str(self.metrics[0]))[-1]
return result
def evaluate(self, x, y, metrics=['mse']):
"""
Evaluate on x, y
:param x: input
:param y: target
:param metric: a list of metrics in string format
:return: a list of metric evaluation results
"""
y_pred = self.predict(x)
if y_pred.shape[1] == 1:
multioutput = 'uniform_average'
else:
multioutput = 'raw_values'
# y = np.squeeze(y, axis=2)
return [Evaluator.evaluate(m, y, y_pred, multioutput=multioutput) for m in metrics]
def predict(self, x, mc=False):
input_x = self._reshape_input_x(x)
return self.model.predict(input_x)
def predict_with_uncertainty(self, x, n_iter=100):
result = np.zeros((n_iter,) + (x.shape[0], self.output_dim))
for i in range(n_iter):
result[i, :, :] = self.predict(x, mc=True)
prediction = result.mean(axis=0)
uncertainty = result.std(axis=0)
return prediction, uncertainty
def save(self, model_path, config_path):
self.model.save_weights(model_path)
config_to_save = {"cnn_height": self.cnn_height,
"long_num": self.long_num,
"time_step": self.time_step,
"ar_window": self.ar_window,
"cnn_hid_size": self.cnn_hid_size,
"rnn_hid_sizes": self.rnn_hid_sizes,
"cnn_dropout": self.cnn_dropout,
"rnn_dropout": self.rnn_dropout,
"lr": self.lr,
"batch_size": self.batch_size,
# for fit eval
"epochs": self.epochs,
# todo: can not serialize metrics unless all elements are str
"metrics": self.metrics,
"mc": self.mc,
"feature_num": self.feature_num,
"output_dim": self.output_dim,
"loss": self.loss
}
assert set(config_to_save.keys()) == self.saved_configs, \
"The keys in config_to_save is not the same as self.saved_configs." \
"Please keep them consistent"
# if self.decay_epochs > 0:
# lr_decay_configs = {"min_lr": self.min_lr,
# "max_lr": self.max_lr}
# assert set(lr_decay_configs.keys()) == self.lr_decay_configs, \
# "The keys in lr_decay_configs is not the same as self.lr_decay_configs." \
# "Please keep them consistent"
# config_to_save.update(lr_decay_configs)
# else:
# lr_configs = {"lr": self.lr_value}
# assert set(lr_configs.keys()) == self.lr_configs, \
# "The keys in lr_configs is not the same as self.lr_configs." \
# "Please keep them consistent"
# config_to_save.update(lr_configs)
save_config(config_path, config_to_save)
def restore(self, model_path, **config):
"""
restore model from file
:param model_path: the model file
:param config: the trial config
"""
self.config = config
self.apply_config(rs=True, config=config)
self.build()
self.model.load_weights(model_path)
def _get_optional_parameters(self):
return {
"batch_size",
"cnn_dropout",
"rnn_dropout",
"time_step",
"cnn_height",
"long_num",
"ar_size",
"loss",
"cnn_hid_size",
"rnn_hid_sizes",
"lr"
}
def _get_required_parameters(self):
return {
"feature_num",
"output_dim"
}
| [
"tensorflow.keras.initializers.Constant",
"tensorflow.matmul",
"tensorflow.keras.models.Model",
"numpy.reshape",
"tensorflow.keras.backend.dot",
"tensorflow.keras.backend.sum",
"tensorflow.keras.backend.concatenate",
"tensorflow.keras.backend.rnn",
"tensorflow.keras.backend.reshape",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.initializers.TruncatedNormal",
"numpy.zeros"
] | pyzoo/zoo/zouwu/model/MTNet_keras.py | [(142, 'tensorflow.keras.backend.sum', 'K.sum', (['(attention * X)', '[1]'], {}), True, 'import tensorflow.keras.backend as K\n'), (186, 'tensorflow.keras.backend.rnn', 'K.rnn', (['self.step', 'x', 'initial_states'], {'go_backwards': 'self.layer.go_backwards', 'mask': 'mask', 'constants': 'constants', 'unroll': 'self.layer.unroll', 'input_length': 'input_shape[1]'}), True, 'import tensorflow.keras.backend as K\n'), (356, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[long_input, short_input]', 'outputs': 'y_pred'}), False, 'from tensorflow.keras.models import Model\n'), (436, 'numpy.reshape', 'np.reshape', (['x[:, :self.time_step * self.long_num]', '[-1, self.long_num, self.time_step, x.shape[-1]]'], {}), True, 'import numpy as np\n'), (438, 'numpy.reshape', 'np.reshape', (['x[:, self.time_step * self.long_num:]', '[-1, self.time_step, x.shape[-1]]'], {}), True, 'import numpy as np\n'), (499, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (536, 'numpy.zeros', 'np.zeros', (['((n_iter,) + (x.shape[0], self.output_dim))'], {}), True, 'import numpy as np\n'), (583, 'zoo.automl.common.util.save_config', 'save_config', (['config_path', 'config_to_save'], {}), False, 'from zoo.automl.common.util import save_config\n'), (139, 'tensorflow.keras.backend.dot', 'K.dot', (['h', 'self._W2'], {}), True, 'import tensorflow.keras.backend as K\n'), (141, 'tensorflow.keras.backend.dot', 'K.dot', (['additive_atn', 'self._V'], {}), True, 'import tensorflow.keras.backend as K\n'), (493, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (495, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (529, 'zoo.automl.common.metrics.Evaluator.evaluate', 'Evaluator.evaluate', (['m', 'y', 'y_pred'], {'multioutput': 'multioutput'}), False, 'from zoo.automl.common.metrics import Evaluator\n'), (144, 'tensorflow.keras.backend.concatenate', 'K.concatenate', (['[x, x_weighted]', '(1)'], {}), True, 'import tensorflow.keras.backend as K\n'), (224, 'tensorflow.keras.backend.dot', 'K.dot', (['x', 'self._W1'], {}), True, 'import tensorflow.keras.backend as K\n'), (379, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'lr': 'self.lr'}), True, 'import tensorflow as tf\n'), (417, 'tensorflow.keras.initializers.TruncatedNormal', 'TruncatedNormal', ([], {'stddev': '(0.1)'}), False, 'from tensorflow.keras.initializers import TruncatedNormal, Constant\n'), (333, 'tensorflow.matmul', 'tf.matmul', (['xy[0]', 'xy[1]'], {}), True, 'import tensorflow as tf\n'), (343, 'tensorflow.keras.initializers.TruncatedNormal', 'TruncatedNormal', ([], {'stddev': '(0.1)'}), False, 'from tensorflow.keras.initializers import TruncatedNormal, Constant\n'), (344, 'tensorflow.keras.initializers.Constant', 'Constant', (['(0.1)'], {}), False, 'from tensorflow.keras.initializers import TruncatedNormal, Constant\n'), (398, 'tensorflow.keras.backend.reshape', 'K.reshape', (['x', '(-1, self.time_step, self.feature_num, 1)'], {}), True, 'import tensorflow.keras.backend as K\n'), (404, 'tensorflow.keras.initializers.TruncatedNormal', 'TruncatedNormal', ([], {'stddev': '(0.1)'}), False, 'from tensorflow.keras.initializers import TruncatedNormal, Constant\n'), (405, 'tensorflow.keras.initializers.Constant', 'Constant', (['(0.1)'], {}), False, 'from tensorflow.keras.initializers import TruncatedNormal, Constant\n'), (410, 'tensorflow.keras.backend.reshape', 'K.reshape', (['x', '(-1, num, Tc, self.cnn_hid_size)'], {}), True, 'import tensorflow.keras.backend as K\n'), (351, 'tensorflow.keras.initializers.TruncatedNormal', 'TruncatedNormal', ([], {'stddev': '(0.1)'}), False, 'from tensorflow.keras.initializers import TruncatedNormal, Constant\n'), (352, 'tensorflow.keras.initializers.Constant', 'Constant', (['(0.1)'], {}), False, 'from tensorflow.keras.initializers import TruncatedNormal, Constant\n'), (506, 'time.time', 'time.time', ([], {}), False, 'import time\n')] |
YifanQie/Deep_Learning_for_Manufacturing | 9ba19e41f69c561b04b8573ab9c52c0969f45bfd | """ The model deploy file is used to leverage a trained model to perform inference on unknown set of node deviations.
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import sys
current_path=os.path.dirname(__file__)
parentdir = os.path.dirname(current_path)
#Adding Path to various Modules
sys.path.append("../core")
sys.path.append("../visualization")
sys.path.append("../utilities")
sys.path.append("../datasets")
sys.path.append("../trained_models")
sys.path.append("../config")
import numpy as np
import pandas as pd
import tensorflow as tf
import csv
import logging
tf.get_logger().setLevel(logging.ERROR)
from tensorflow.keras.models import load_model
#Importing Config files
import assembly_config as config
import model_config as cftrain
import measurement_config as mscofig
#Importing required modules from the package
from measurement_system import HexagonWlsScanner
from assembly_system import VRMSimulationModel
from assembly_system import PartType
from wls400a_system import GetInferenceData
from metrics_eval import MetricsEval
from data_import import GetTrainData
#from cam_viz import CamViz
#from cop_viz import CopViz
class DeployModel:
"""The Deploy Model class is used to import a trained model and use it to infer on unknown data
"""
def get_model(self,model_path):
"""get_model method is is used to retrieve the trained model from a given path
:param model_path: Path to the trained model, ideally it should be same as the train model path output
:type model_path: str (required)
"""
from tensorflow.keras.models import load_model
try:
inference_model=load_model(model_path)
print('Deep Learning Model found and loaded')
except AssertionError as error:
print(error)
print('Model not found at this path ',model_path, ' Update path in config file if required')
return inference_model
def model_inference(self,inference_data,inference_model,deploy_path,print_result=0,plot_result=0,get_cam_data=0,append_result=0):
"""model_inference method is used to infer from unknown sample(s) using the trained model
:param inference_data: Unknown dataset having same structure as the train dataset
:type inference_data: numpy.array [samples*voxel_dim*voxel_dim*voxel_dim*deviation_channels] (required) (required)
:param inference_model: Trained model
:type inference_model: keras.model (required)
:param print_result: Flag to indicate if the result needs to be printed, 0 by default, change to 1 in case the results need to be printed on the console
:type print_result: int
"""
result=inference_model.predict(inference_data)
description="The Process Parameters variations are inferred from the obtained measurement data and the trained CNN based model"
print('The model estimates are: ')
rounded_result=np.round(result,2)
if(print_result==1):
print(rounded_result)
if(append_result==1):
with open ("user_preds.csv",'a',newline='') as filedata:
#fieldnames = ['kcc1','kcc2','kcc3','kcc4','kcc5','kcc6']
writer = csv.writer(filedata, delimiter=',')
writer.writerow(rounded_result[0,:].tolist())
#writer.writerow(dict(zip(fieldnames, rounded_result[0,:].tolist())))
#filedata.write(rounded_result[0,:].tolist())
if(plot_result==1):
print("Plotting Results in HTML...")
import plotly.graph_objects as go
import plotly as py
result_str = ["%.2f" % number for number in rounded_result[0,:]]
kcc_str=[]
for i in range(rounded_result.shape[1]):
kcc_str.append("X("+str(i)+"): ")
#kcc_str=["X(1): ","X(2): ", "X(3): ", "X(4): ", "X(5): ", "X(6): "]
display_str=np.core.defchararray.add(kcc_str, result_str)
print(display_str)
fig = go.Figure(data=go.Scatter(y=rounded_result[0,:], marker=dict(
size=30,color=100), mode='markers+text',text=display_str,x=kcc_str))
fig.update_traces( textfont_size=20,textposition='top center')
fig.update_layout(title_text='Deep Learning for Manufacturing - Model Estimates')
py.offline.plot(fig, filename=deploy_path+"results.html")
if(get_cam_data==1):
#print(inference_model.summary())
from cam_viz import CamViz
from cop_viz import CopViz
input_conv_data=inference_data
base_cop=input_conv_data[0,:,:,:,0]+input_conv_data[0,:,:,:,1]+input_conv_data[0,:,:,:,2]
base_cop[base_cop!=0]=0.6
process_parameter_id=np.argmax(abs(result[0,:]))
print("Plotting Gradient based Class Activation Map for Process Parameter: ",process_parameter_id)
camviz=CamViz(inference_model,'conv_block_9')
#For explicit plotting change ID here
#process_parameter_id=0
cop_input=input_conv_data[0:1,:,:,:,:]
fmap_eval, grad_wrt_fmap_eval=camviz.grad_cam_3d(cop_input,process_parameter_id)
alpha_k_c= grad_wrt_fmap_eval.mean(axis=(0,1,2,3)).reshape((1,1,1,-1))
Lc_Grad_CAM = np.maximum(np.sum(fmap_eval*alpha_k_c,axis=-1),0).squeeze()
scale_factor = np.array(cop_input.shape[1:4])/np.array(Lc_Grad_CAM.shape)
from scipy.ndimage.interpolation import zoom
import tensorflow.keras.backend as K
_grad_CAM = zoom(Lc_Grad_CAM,scale_factor)
arr_min, arr_max = np.min(_grad_CAM), np.max(_grad_CAM)
grad_CAM = (_grad_CAM - arr_min) / (arr_max - arr_min + K.epsilon())
#Code for Grad CAM Plotting
import plotly.graph_objects as go
import plotly as py
import plotly.express as px
X, Y, Z = np.mgrid[0:len(base_cop), 0:len(base_cop), 0:len(base_cop)]
#input_conv_data[0,:,:,:,0]=0.2
values_cop = base_cop
values_grad_cam=grad_CAM
trace1=go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=values_cop.flatten(),
isomin=0,
isomax=1,
opacity=0.1, # needs to be small to see through all surfaces
surface_count=17, # needs to be a large number for good volume rendering
colorscale='Greens'
)
trace2=go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=values_grad_cam.flatten(),
isomin=0,
isomax=1,
opacity=0.1, # needs to be small to see through all surfaces
surface_count=17,
colorscale='orrd' # needs to be a large number for good volume rendering
)
data = [trace1,trace2]
layout = go.Layout(
margin=dict(
l=0,
r=0,
b=0,
t=0
)
)
fig = go.Figure(data=data,layout=layout)
plot_file_name=deploy_path+'voxel_grad_cam.html'
py.offline.plot(fig, filename=plot_file_name)
return result
if __name__ == '__main__':
print("Welcome to Deep Learning for Manufacturing (dlmfg)...")
print('Parsing from Assembly Config File....')
data_type=config.assembly_system['data_type']
application=config.assembly_system['application']
part_type=config.assembly_system['part_type']
part_name=config.assembly_system['part_name']
data_format=config.assembly_system['data_format']
assembly_type=config.assembly_system['assembly_type']
assembly_kccs=config.assembly_system['assembly_kccs']
assembly_kpis=config.assembly_system['assembly_kpis']
voxel_dim=config.assembly_system['voxel_dim']
point_dim=config.assembly_system['point_dim']
voxel_channels=config.assembly_system['voxel_channels']
noise_type=config.assembly_system['noise_type']
mapping_index=config.assembly_system['mapping_index']
file_names_x=config.assembly_system['test_data_files_x']
file_names_y=config.assembly_system['test_data_files_y']
file_names_z=config.assembly_system['test_data_files_z']
system_noise=config.assembly_system['system_noise']
aritifical_noise=config.assembly_system['aritifical_noise']
data_folder=config.assembly_system['data_folder']
kcc_folder=config.assembly_system['kcc_folder']
kcc_files=config.assembly_system['test_kcc_files']
print('Initializing the Assembly System and Measurement System....')
measurement_system=HexagonWlsScanner(data_type,application,system_noise,part_type,data_format)
vrm_system=VRMSimulationModel(assembly_type,assembly_kccs,assembly_kpis,part_name,part_type,voxel_dim,voxel_channels,point_dim,aritifical_noise)
deploy_model=DeployModel()
#Generate Paths
train_path='../trained_models/'+part_type
model_path=train_path+'/model'+'/trained_model_0.h5'
logs_path=train_path+'/logs'
deploy_path=train_path+'/deploy/'
#Voxel Mapping File
get_data=GetTrainData();
print('Importing and Preprocessing Cloud-of-Point Data')
dataset=[]
dataset.append(get_data.data_import(file_names_x,data_folder))
dataset.append(get_data.data_import(file_names_y,data_folder))
dataset.append(get_data.data_import(file_names_z,data_folder))
point_index=get_data.load_mapping_index(mapping_index)
#Make an Object of the Measurement System Class
measurement_system=HexagonWlsScanner(data_type,application, system_noise,part_type,data_format)
#Make an Object of the Assembly System Class
assembly_system=PartType(assembly_type,assembly_kccs,assembly_kpis,part_name,part_type,voxel_dim,voxel_channels,point_dim)
#Inference from simulated data
inference_model=deploy_model.get_model(model_path)
print(inference_model.summary())
input_conv_data, kcc_subset_dump,kpi_subset_dump=get_data.data_convert_voxel_mc(vrm_system,dataset,point_index)
y_pred=deploy_model.model_inference(input_conv_data,inference_model,deploy_path,print_result=1,plot_result=1);
evalerror=1
if(evalerror==1):
kcc_dataset=get_data.data_import(kcc_files,kcc_folder)
metrics_eval=MetricsEval();
eval_metrics,accuracy_metrics_df=metrics_eval.metrics_eval_base(y_pred,kcc_dataset,logs_path)
print('Evaluation Metrics: ',eval_metrics)
accuracy_metrics_df.to_csv(logs_path+'/metrics_test.csv')
np.savetxt((deploy_path+"predicted.csv"), y_pred, delimiter=",")
print('Predicted Values saved to disk...')
#Inference from Measurement Data
#measurement_files=mscofig.ms_parameters['measurement_files']
#Make an object of Get Data Class
#get_data=GetInferenceData();
#Call functions of the get Data Class
#for measurement_file in measurement_files:
#measurement_path=deploy_path+measurement_file
#measurement_data=get_data.load_measurement_file(measurement_path)
#voxel_point_index=get_data.load_mapping_index(voxel_path)
#y_dev_data_filtered=get_data.data_pre_processing(measurement_data,voxel_channels)
#input_conv_data=get_data.voxel_mapping(y_dev_data_filtered,voxel_point_index,point_dim,voxel_dim,voxel_channels)
#y_pred=deploy_model.model_inference(input_conv_data,inference_model);
#print('KCCs for: ',measurement_file)
#print(y_pred)
#Code for Voxel Vizvalization
#Code for CAM Visualization
viz=0
if(viz==1):
print(inference_model.summary())
camviz=CamViz(inference_model,'conv3d_3')
grads=camviz.grad_cam_3d(input_conv_data[1:2,:,:,:,:],1) | [
"tensorflow.keras.models.load_model",
"numpy.core.defchararray.add",
"numpy.min",
"tensorflow.get_logger",
"numpy.round",
"numpy.max",
"tensorflow.keras.backend.epsilon",
"numpy.savetxt",
"numpy.array",
"scipy.ndimage.interpolation.zoom",
"numpy.sum"
] | core/model_deployment.py | [(8, 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), False, 'import os\n'), (9, 'os.path.dirname', 'os.path.dirname', (['current_path'], {}), False, 'import os\n'), (12, 'sys.path.append', 'sys.path.append', (['"""../core"""'], {}), False, 'import sys\n'), (13, 'sys.path.append', 'sys.path.append', (['"""../visualization"""'], {}), False, 'import sys\n'), (14, 'sys.path.append', 'sys.path.append', (['"""../utilities"""'], {}), False, 'import sys\n'), (15, 'sys.path.append', 'sys.path.append', (['"""../datasets"""'], {}), False, 'import sys\n'), (16, 'sys.path.append', 'sys.path.append', (['"""../trained_models"""'], {}), False, 'import sys\n'), (17, 'sys.path.append', 'sys.path.append', (['"""../config"""'], {}), False, 'import sys\n'), (217, 'measurement_system.HexagonWlsScanner', 'HexagonWlsScanner', (['data_type', 'application', 'system_noise', 'part_type', 'data_format'], {}), False, 'from measurement_system import HexagonWlsScanner\n'), (218, 'assembly_system.VRMSimulationModel', 'VRMSimulationModel', (['assembly_type', 'assembly_kccs', 'assembly_kpis', 'part_name', 'part_type', 'voxel_dim', 'voxel_channels', 'point_dim', 'aritifical_noise'], {}), False, 'from assembly_system import VRMSimulationModel\n'), (229, 'data_import.GetTrainData', 'GetTrainData', ([], {}), False, 'from data_import import GetTrainData\n'), (240, 'measurement_system.HexagonWlsScanner', 'HexagonWlsScanner', (['data_type', 'application', 'system_noise', 'part_type', 'data_format'], {}), False, 'from measurement_system import HexagonWlsScanner\n'), (242, 'assembly_system.PartType', 'PartType', (['assembly_type', 'assembly_kccs', 'assembly_kpis', 'part_name', 'part_type', 'voxel_dim', 'voxel_channels', 'point_dim'], {}), False, 'from assembly_system import PartType\n'), (24, 'tensorflow.get_logger', 'tf.get_logger', ([], {}), True, 'import tensorflow as tf\n'), (80, 'numpy.round', 'np.round', (['result', '(2)'], {}), True, 'import numpy as np\n'), (257, 'metrics_eval.MetricsEval', 'MetricsEval', ([], {}), False, 'from metrics_eval import MetricsEval\n'), (263, 'numpy.savetxt', 'np.savetxt', (["(deploy_path + 'predicted.csv')", 'y_pred'], {'delimiter': '""","""'}), True, 'import numpy as np\n'), (290, 'cam_viz.CamViz', 'CamViz', (['inference_model', '"""conv3d_3"""'], {}), False, 'from cam_viz import CamViz\n'), (56, 'tensorflow.keras.models.load_model', 'load_model', (['model_path'], {}), False, 'from tensorflow.keras.models import load_model\n'), (104, 'numpy.core.defchararray.add', 'np.core.defchararray.add', (['kcc_str', 'result_str'], {}), True, 'import numpy as np\n'), (110, 'plotly.offline.plot', 'py.offline.plot', (['fig'], {'filename': "(deploy_path + 'results.html')"}), True, 'import plotly as py\n'), (122, 'cam_viz.CamViz', 'CamViz', (['inference_model', '"""conv_block_9"""'], {}), False, 'from cam_viz import CamViz\n'), (134, 'scipy.ndimage.interpolation.zoom', 'zoom', (['Lc_Grad_CAM', 'scale_factor'], {}), False, 'from scipy.ndimage.interpolation import zoom\n'), (181, 'plotly.graph_objects.Figure', 'go.Figure', ([], {'data': 'data', 'layout': 'layout'}), True, 'import plotly.graph_objects as go\n'), (183, 'plotly.offline.plot', 'py.offline.plot', (['fig'], {'filename': 'plot_file_name'}), True, 'import plotly as py\n'), (88, 'csv.writer', 'csv.writer', (['filedata'], {'delimiter': '""","""'}), False, 'import csv\n'), (129, 'numpy.array', 'np.array', (['cop_input.shape[1:4]'], {}), True, 'import numpy as np\n'), (129, 'numpy.array', 'np.array', (['Lc_Grad_CAM.shape'], {}), True, 'import numpy as np\n'), (135, 'numpy.min', 'np.min', (['_grad_CAM'], {}), True, 'import numpy as np\n'), (135, 'numpy.max', 'np.max', (['_grad_CAM'], {}), True, 'import numpy as np\n'), (136, 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), True, 'import tensorflow.keras.backend as K\n'), (128, 'numpy.sum', 'np.sum', (['(fmap_eval * alpha_k_c)'], {'axis': '(-1)'}), True, 'import numpy as np\n')] |
jacenkow/inside | 6f860420644b50b78981158a59ceed8cdbd209bf | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 Grzegorz Jacenków.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Training and evaluation pipeline for the networks."""
import csv
import os
import tensorflow as tf
from tensorflow.keras.metrics import Mean
from inside import config
from inside.callbacks import setup_callbacks
from inside.constructor import setup_comet_ml, setup_model
from inside.loaders import CLEVR
from inside.metrics import DiceScore
def _write_results(logs):
"""Write final logs to a CSV file."""
w = csv.writer(open(os.path.join(
config.EXPERIMENT_FOLDER, "results.csv"), "w"))
for key, val in logs.items():
w.writerow([key, val])
class Pipeline:
def __init__(self):
# Model.
self.model = setup_model()
# Comet.ml experiment.
self.comet_ml = setup_comet_ml()
# Testing metrics.
self.test_dice = DiceScore(name="testing_dice")
self.test_loss = Mean(name="testing_loss")
# Training metrics.
self.training_dice = DiceScore(name="training_dice")
self.training_loss = Mean(name="training_loss")
# Callbacks.
self.cl, self.es, self.mc, self.pp = setup_callbacks()
self.cl.model, self.es.model, self.mc.model = \
self.model, self.model, self.model
self.pp.model = self.model
self.pp.comet_ml = self.comet_ml
def fit(self):
"""Train the model."""
# Toy dataset.
loader = CLEVR()
train_ds, valid_ds, test_ds = loader.load()
with self.comet_ml.train():
self.cl.on_train_begin()
self.es.on_train_begin()
self.mc.on_train_begin()
self.pp.on_train_begin()
for epoch in range(config.EXPERIMENT_EPOCHS):
self.comet_ml.set_epoch(epoch)
for images, labels in train_ds:
self.train_step(images, labels)
for batch, (images, labels) in enumerate(valid_ds):
self.test_step(images, labels)
if not batch: # Log only first mini-batch from an epoch.
self.pp.on_epoch_end(epoch, images, labels)
# Get results.
logs = {
"dice": self.training_dice.result().numpy(),
"loss": self.training_loss.result().numpy(),
"validation_dice": self.test_dice.result().numpy(),
"validation_loss": self.test_loss.result().numpy(),
}
template = ("Epoch {}. Training Loss: {}. Training Dice: {}. "
"Validation Loss: {}. Validation Dice: {}.")
print(template.format(epoch + 1,
logs['loss'],
logs['dice'],
logs['validation_loss'],
logs['validation_dice']))
# Log metrics.
self.comet_ml.log_metrics(logs, epoch=epoch)
self.cl.on_epoch_end(epoch, logs)
self.es.on_epoch_end(epoch, logs)
self.mc.on_epoch_end(epoch, logs)
# Reset the metrics for the next epoch.
self.training_dice.reset_states()
self.training_loss.reset_states()
self.test_dice.reset_states()
self.test_loss.reset_states()
# Early stopping criterion.
if self.es.model.stop_training:
self.cl.on_train_end()
self.es.on_train_end()
self.mc.on_train_end()
break
with self.comet_ml.test():
for batch, (images, labels) in enumerate(test_ds):
self.test_step(images, labels)
if not batch:
self.pp.on_test_end(images, labels)
# Get results.
logs = {
"dice": self.test_dice.result().numpy(),
"loss": self.test_loss.result().numpy(),
}
print("Test Loss: {}. Test Dice: {}.".format(
logs['loss'], logs['dice']))
# Log metrics.
self.comet_ml.log_metrics(logs)
_write_results(logs)
@tf.function
def train_step(self, images, labels):
with tf.GradientTape() as tape:
predictions = self.model.inference(images)
loss = self.model.loss(labels, predictions)
gradients = tape.gradient(loss, self.model.trainable_variables)
self.model.optimiser.apply_gradients(
zip(gradients, self.model.trainable_variables))
self.training_loss(loss)
self.training_dice(labels, predictions)
@tf.function
def test_step(self, images, labels):
predictions = self.model.inference(images)
t_loss = self.model.loss(labels, predictions)
self.test_loss(t_loss)
self.test_dice(labels, predictions)
| [
"tensorflow.keras.metrics.Mean",
"tensorflow.GradientTape"
] | inside/pipelines/clevr.py | [(41, 'inside.constructor.setup_model', 'setup_model', ([], {}), False, 'from inside.constructor import setup_comet_ml, setup_model\n'), (44, 'inside.constructor.setup_comet_ml', 'setup_comet_ml', ([], {}), False, 'from inside.constructor import setup_comet_ml, setup_model\n'), (47, 'inside.metrics.DiceScore', 'DiceScore', ([], {'name': '"""testing_dice"""'}), False, 'from inside.metrics import DiceScore\n'), (48, 'tensorflow.keras.metrics.Mean', 'Mean', ([], {'name': '"""testing_loss"""'}), False, 'from tensorflow.keras.metrics import Mean\n'), (51, 'inside.metrics.DiceScore', 'DiceScore', ([], {'name': '"""training_dice"""'}), False, 'from inside.metrics import DiceScore\n'), (52, 'tensorflow.keras.metrics.Mean', 'Mean', ([], {'name': '"""training_loss"""'}), False, 'from tensorflow.keras.metrics import Mean\n'), (55, 'inside.callbacks.setup_callbacks', 'setup_callbacks', ([], {}), False, 'from inside.callbacks import setup_callbacks\n'), (65, 'inside.loaders.CLEVR', 'CLEVR', ([], {}), False, 'from inside.loaders import CLEVR\n'), (32, 'os.path.join', 'os.path.join', (['config.EXPERIMENT_FOLDER', '"""results.csv"""'], {}), False, 'import os\n'), (144, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n')] |
kaylani2/machineLearning | 692623abf6fe02bde6c7da6c2f8c0ec526a3e8f8 | import os
import time
from multiprocessing import Process
from typing import Tuple
import flwr as fl
import numpy as np
import tensorflow as tf
from flwr.server.strategy import FedAvg
import dataset
# generate random integer values
from random import seed
from random import randint
# Make TensorFlow log less verbose
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# K: Prevent TF from using GPU (not enough memory)
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
DATASET = Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]
def start_server(num_rounds: int, num_clients: int, fraction_fit: float):
"""Start the server with a slightly adjusted FedAvg strategy."""
strategy = FedAvg(min_available_clients=num_clients, fraction_fit=fraction_fit)
# Exposes the server by default on port 8080
fl.server.start_server(strategy=strategy, config={"num_rounds": num_rounds})
def start_client(dataset: DATASET) -> None:
"""Start a single client with the provided dataset."""
# Load and compile a Keras model for CIFAR-10
#model = tf.keras.applications.MobileNetV2((32, 32, 3), classes=10, weights=None)
model = tf.keras.Sequential(
[
tf.keras.Input(shape=(32, 32, 3)),
tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation="softmax"),
]
)
model.compile("adam", "sparse_categorical_crossentropy", metrics=[tf.keras.metrics.CategoricalAccuracy(), tf.keras.metrics.MeanSquaredError()])
### @TODO: check if "accuracy" and tf.keras.metrics.CategoricalAccuracy() return the same results
# Unpack the CIFAR-10 dataset partition
(x_train, y_train), (x_test, y_test) = dataset
# Define a Flower client
class CifarClient(fl.client.NumPyClient):
def get_parameters(self):
"""Return current weights."""
return model.get_weights()
def fit(self, parameters, config):
"""Fit model and return new weights as well as number of training
examples."""
model.set_weights(parameters)
# Remove steps_per_epoch if you want to train over the full dataset
# https://keras.io/api/models/model_training_apis/#fit-method
#nap_time = randint (0, 5)
#time.sleep (nap_time)
#print ("Slept for", nap_time, "seconds.")
model.fit(x_train, y_train, epochs=10, batch_size=256, steps_per_epoch=10)
return model.get_weights(), len(x_train), {}
def evaluate(self, parameters, config):
"""Evaluate using provided parameters."""
model.set_weights(parameters)
loss, accuracy, mse = model.evaluate(x_test, y_test)
print ('"Loss:', loss, ". Accuracy:", accuracy, ". MSE:", mse, ".")
return loss, len(x_test), {"accuracy": accuracy}
# Start Flower client
fl.client.start_numpy_client("0.0.0.0:8080", client=CifarClient())
def run_simulation(num_rounds: int, num_clients: int, fraction_fit: float):
"""Start a FL simulation."""
# This will hold all the processes which we are going to create
processes = []
# Start the server
server_process = Process(
target=start_server, args=(num_rounds, num_clients, fraction_fit)
)
server_process.start()
processes.append(server_process)
# Optionally block the script here for a second or two so the server has time to start
time.sleep(2)
# Load the dataset partitions
partitions = dataset.load(num_partitions=num_clients)
# Start all the clients
for partition in partitions:
client_process = Process(target=start_client, args=(partition,))
client_process.start()
processes.append(client_process)
# Block until all processes are finished
for p in processes:
p.join()
if __name__ == "__main__":
run_simulation(num_rounds=100, num_clients=5, fraction_fit=0.5)
| [
"tensorflow.keras.Input",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.metrics.MeanSquaredError",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.metrics.CategoricalAccuracy",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Flatten"
] | src/specific_models/federated/single_machine_simulation_flower/single_machine_simulation.py | [(29, 'flwr.server.strategy.FedAvg', 'FedAvg', ([], {'min_available_clients': 'num_clients', 'fraction_fit': 'fraction_fit'}), False, 'from flwr.server.strategy import FedAvg\n'), (31, 'flwr.server.start_server', 'fl.server.start_server', ([], {'strategy': 'strategy', 'config': "{'num_rounds': num_rounds}"}), True, 'import flwr as fl\n'), (93, 'multiprocessing.Process', 'Process', ([], {'target': 'start_server', 'args': '(num_rounds, num_clients, fraction_fit)'}), False, 'from multiprocessing import Process\n'), (100, 'time.sleep', 'time.sleep', (['(2)'], {}), False, 'import time\n'), (103, 'dataset.load', 'dataset.load', ([], {'num_partitions': 'num_clients'}), False, 'import dataset\n'), (107, 'multiprocessing.Process', 'Process', ([], {'target': 'start_client', 'args': '(partition,)'}), False, 'from multiprocessing import Process\n'), (41, 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(32, 32, 3)'}), True, 'import tensorflow as tf\n'), (42, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), True, 'import tensorflow as tf\n'), (43, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), True, 'import tensorflow as tf\n'), (44, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), True, 'import tensorflow as tf\n'), (45, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), True, 'import tensorflow as tf\n'), (46, 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), True, 'import tensorflow as tf\n'), (47, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.5)'], {}), True, 'import tensorflow as tf\n'), (48, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {'activation': '"""softmax"""'}), True, 'import tensorflow as tf\n'), (51, 'tensorflow.keras.metrics.CategoricalAccuracy', 'tf.keras.metrics.CategoricalAccuracy', ([], {}), True, 'import tensorflow as tf\n'), (51, 'tensorflow.keras.metrics.MeanSquaredError', 'tf.keras.metrics.MeanSquaredError', ([], {}), True, 'import tensorflow as tf\n')] |
haruiz/models | 2db2501bc9928f68e225282f3884b81680a9cccb | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model defination for the RetinaNet Model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.keras import backend
from official.vision.detection.dataloader import mode_keys
from official.vision.detection.evaluation import factory as eval_factory
from official.vision.detection.modeling import base_model
from official.vision.detection.modeling import losses
from official.vision.detection.modeling.architecture import factory
from official.vision.detection.ops import postprocess_ops
class RetinanetModel(base_model.Model):
"""RetinaNet model function."""
def __init__(self, params):
super(RetinanetModel, self).__init__(params)
# For eval metrics.
self._params = params
# Architecture generators.
self._backbone_fn = factory.backbone_generator(params)
self._fpn_fn = factory.multilevel_features_generator(params)
self._head_fn = factory.retinanet_head_generator(params)
# Loss function.
self._cls_loss_fn = losses.RetinanetClassLoss(
params.retinanet_loss, params.architecture.num_classes)
self._box_loss_fn = losses.RetinanetBoxLoss(params.retinanet_loss)
self._box_loss_weight = params.retinanet_loss.box_loss_weight
self._keras_model = None
# Predict function.
self._generate_detections_fn = postprocess_ops.MultilevelDetectionGenerator(
params.architecture.min_level,
params.architecture.max_level,
params.postprocess)
self._transpose_input = params.train.transpose_input
assert not self._transpose_input, 'Transpose input is not supportted.'
# Input layer.
input_shape = (
params.retinanet_parser.output_size +
[params.retinanet_parser.num_channels])
self._input_layer = tf.keras.layers.Input(
shape=input_shape, name='',
dtype=tf.bfloat16 if self._use_bfloat16 else tf.float32)
def build_outputs(self, inputs, mode):
# If the input image is transposed (from NHWC to HWCN), we need to revert it
# back to the original shape before it's used in the computation.
if self._transpose_input:
inputs = tf.transpose(inputs, [3, 0, 1, 2])
backbone_features = self._backbone_fn(
inputs, is_training=(mode == mode_keys.TRAIN))
fpn_features = self._fpn_fn(
backbone_features, is_training=(mode == mode_keys.TRAIN))
cls_outputs, box_outputs = self._head_fn(
fpn_features, is_training=(mode == mode_keys.TRAIN))
if self._use_bfloat16:
levels = cls_outputs.keys()
for level in levels:
cls_outputs[level] = tf.cast(cls_outputs[level], tf.float32)
box_outputs[level] = tf.cast(box_outputs[level], tf.float32)
model_outputs = {
'cls_outputs': cls_outputs,
'box_outputs': box_outputs,
}
return model_outputs
def build_loss_fn(self):
if self._keras_model is None:
raise ValueError('build_loss_fn() must be called after build_model().')
filter_fn = self.make_filter_trainable_variables_fn()
trainable_variables = filter_fn(self._keras_model.trainable_variables)
def _total_loss_fn(labels, outputs):
cls_loss = self._cls_loss_fn(outputs['cls_outputs'],
labels['cls_targets'],
labels['num_positives'])
box_loss = self._box_loss_fn(outputs['box_outputs'],
labels['box_targets'],
labels['num_positives'])
model_loss = cls_loss + self._box_loss_weight * box_loss
l2_regularization_loss = self.weight_decay_loss(trainable_variables)
total_loss = model_loss + l2_regularization_loss
return {
'total_loss': total_loss,
'cls_loss': cls_loss,
'box_loss': box_loss,
'model_loss': model_loss,
'l2_regularization_loss': l2_regularization_loss,
}
return _total_loss_fn
def build_model(self, params, mode=None):
if self._keras_model is None:
with backend.get_graph().as_default():
outputs = self.model_outputs(self._input_layer, mode)
model = tf.keras.models.Model(
inputs=self._input_layer, outputs=outputs, name='retinanet')
assert model is not None, 'Fail to build tf.keras.Model.'
model.optimizer = self.build_optimizer()
self._keras_model = model
return self._keras_model
def post_processing(self, labels, outputs):
# TODO(yeqing): Moves the output related part into build_outputs.
required_output_fields = ['cls_outputs', 'box_outputs']
for field in required_output_fields:
if field not in outputs:
raise ValueError('"%s" is missing in outputs, requried %s found %s',
field, required_output_fields, outputs.keys())
required_label_fields = ['image_info', 'groundtruths']
for field in required_label_fields:
if field not in labels:
raise ValueError('"%s" is missing in outputs, requried %s found %s',
field, required_label_fields, labels.keys())
boxes, scores, classes, valid_detections = self._generate_detections_fn(
outputs['box_outputs'], outputs['cls_outputs'],
labels['anchor_boxes'], labels['image_info'][:, 1:2, :])
# Discards the old output tensors to save memory. The `cls_outputs` and
# `box_outputs` are pretty big and could potentiall lead to memory issue.
outputs = {
'source_id': labels['groundtruths']['source_id'],
'image_info': labels['image_info'],
'num_detections': valid_detections,
'detection_boxes': boxes,
'detection_classes': classes,
'detection_scores': scores,
}
if 'groundtruths' in labels:
labels['source_id'] = labels['groundtruths']['source_id']
labels['boxes'] = labels['groundtruths']['boxes']
labels['classes'] = labels['groundtruths']['classes']
labels['areas'] = labels['groundtruths']['areas']
labels['is_crowds'] = labels['groundtruths']['is_crowds']
return labels, outputs
def eval_metrics(self):
return eval_factory.evaluator_generator(self._params.eval)
| [
"tensorflow.transpose",
"tensorflow.keras.models.Model",
"tensorflow.cast",
"tensorflow.python.keras.backend.get_graph",
"tensorflow.keras.layers.Input"
] | official/vision/detection/modeling/retinanet_model.py | [(42, 'official.vision.detection.modeling.architecture.factory.backbone_generator', 'factory.backbone_generator', (['params'], {}), False, 'from official.vision.detection.modeling.architecture import factory\n'), (43, 'official.vision.detection.modeling.architecture.factory.multilevel_features_generator', 'factory.multilevel_features_generator', (['params'], {}), False, 'from official.vision.detection.modeling.architecture import factory\n'), (44, 'official.vision.detection.modeling.architecture.factory.retinanet_head_generator', 'factory.retinanet_head_generator', (['params'], {}), False, 'from official.vision.detection.modeling.architecture import factory\n'), (47, 'official.vision.detection.modeling.losses.RetinanetClassLoss', 'losses.RetinanetClassLoss', (['params.retinanet_loss', 'params.architecture.num_classes'], {}), False, 'from official.vision.detection.modeling import losses\n'), (49, 'official.vision.detection.modeling.losses.RetinanetBoxLoss', 'losses.RetinanetBoxLoss', (['params.retinanet_loss'], {}), False, 'from official.vision.detection.modeling import losses\n'), (54, 'official.vision.detection.ops.postprocess_ops.MultilevelDetectionGenerator', 'postprocess_ops.MultilevelDetectionGenerator', (['params.architecture.min_level', 'params.architecture.max_level', 'params.postprocess'], {}), False, 'from official.vision.detection.ops import postprocess_ops\n'), (65, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': 'input_shape', 'name': '""""""', 'dtype': '(tf.bfloat16 if self._use_bfloat16 else tf.float32)'}), True, 'import tensorflow as tf\n'), (170, 'official.vision.detection.evaluation.factory.evaluator_generator', 'eval_factory.evaluator_generator', (['self._params.eval'], {}), True, 'from official.vision.detection.evaluation import factory as eval_factory\n'), (73, 'tensorflow.transpose', 'tf.transpose', (['inputs', '[3, 0, 1, 2]'], {}), True, 'import tensorflow as tf\n'), (85, 'tensorflow.cast', 'tf.cast', (['cls_outputs[level]', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.cast', 'tf.cast', (['box_outputs[level]', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (126, 'tensorflow.keras.models.Model', 'tf.keras.models.Model', ([], {'inputs': 'self._input_layer', 'outputs': 'outputs', 'name': '"""retinanet"""'}), True, 'import tensorflow as tf\n'), (123, 'tensorflow.python.keras.backend.get_graph', 'backend.get_graph', ([], {}), False, 'from tensorflow.python.keras import backend\n')] |
haruiz/models | 2db2501bc9928f68e225282f3884b81680a9cccb | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classification network."""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import tensorflow as tf
@tf.keras.utils.register_keras_serializable(package='Text')
class TokenClassification(tf.keras.Model):
"""TokenClassification network head for BERT modeling.
This network implements a simple token classifier head based on a dense layer.
Arguments:
input_width: The innermost dimension of the input tensor to this network.
num_classes: The number of classes that this network should classify to.
activation: The activation, if any, for the dense layer in this network.
initializer: The intializer for the dense layer in this network. Defaults to
a Glorot uniform initializer.
output: The output style for this network. Can be either 'logits' or
'predictions'.
"""
def __init__(self,
input_width,
num_classes,
initializer='glorot_uniform',
output='logits',
**kwargs):
self._self_setattr_tracking = False
self._config_dict = {
'input_width': input_width,
'num_classes': num_classes,
'initializer': initializer,
'output': output,
}
sequence_data = tf.keras.layers.Input(
shape=(None, input_width), name='sequence_data', dtype=tf.float32)
self.logits = tf.keras.layers.Dense(
num_classes,
activation=None,
kernel_initializer=initializer,
name='predictions/transform/logits')(
sequence_data)
predictions = tf.keras.layers.Activation(tf.nn.log_softmax)(self.logits)
if output == 'logits':
output_tensors = self.logits
elif output == 'predictions':
output_tensors = predictions
else:
raise ValueError(
('Unknown `output` value "%s". `output` can be either "logits" or '
'"predictions"') % output)
super(TokenClassification, self).__init__(
inputs=[sequence_data], outputs=output_tensors, **kwargs)
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| [
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.utils.register_keras_serializable",
"tensorflow.keras.layers.Input"
] | official/nlp/modeling/networks/token_classification.py | [(25, 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {'package': '"""Text"""'}), True, 'import tensorflow as tf\n'), (55, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(None, input_width)', 'name': '"""sequence_data"""', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (58, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_classes'], {'activation': 'None', 'kernel_initializer': 'initializer', 'name': '"""predictions/transform/logits"""'}), True, 'import tensorflow as tf\n'), (64, 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['tf.nn.log_softmax'], {}), True, 'import tensorflow as tf\n')] |
sanghuynh1501/mlcollect | e85fe6a08e14fa6502166c1a7bfffdcd8c3a25b2 | from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras import backend as K
class LeNet:
@staticmethod
def build(width, height, depth, classes, last_active="softmax"):
# Initialize the model
model = Sequential()
input_shape = (height, width, depth)
# If we are using 'channels-first', update the input shape
if K.image_data_format() == 'channels_first':
input_shape = (depth, height, width)
# First set of CONV => RELU => POOL layers
model.add(Conv2D(20, (5, 5), padding='same', input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# Second set of CONV => RELU => POOL layers
model.add(Conv2D(50, (5, 5), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# First (and only) set of FC => RELU layers
model.add(Flatten())
model.add(Dense(500))
model.add(Activation('relu'))
model.add(Dense(classes))
model.add(Activation(last_active))
# return the constructed network architecture
return model
| [
"tensorflow.keras.layers.Activation",
"tensorflow.keras.backend.image_data_format",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Flatten"
] | mlcollect/cnn/lenet.py | [(14, 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), False, 'from tensorflow.keras.models import Sequential\n'), (18, 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), True, 'from tensorflow.keras import backend as K\n'), (22, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(20)', '(5, 5)'], {'padding': '"""same"""', 'input_shape': 'input_shape'}), False, 'from tensorflow.keras.layers import Conv2D\n'), (23, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), False, 'from tensorflow.keras.layers import Activation\n'), (24, 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)'}), False, 'from tensorflow.keras.layers import MaxPooling2D\n'), (27, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(50)', '(5, 5)'], {'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Conv2D\n'), (28, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), False, 'from tensorflow.keras.layers import Activation\n'), (29, 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)'}), False, 'from tensorflow.keras.layers import MaxPooling2D\n'), (32, 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), False, 'from tensorflow.keras.layers import Flatten\n'), (33, 'tensorflow.keras.layers.Dense', 'Dense', (['(500)'], {}), False, 'from tensorflow.keras.layers import Dense\n'), (34, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), False, 'from tensorflow.keras.layers import Activation\n'), (36, 'tensorflow.keras.layers.Dense', 'Dense', (['classes'], {}), False, 'from tensorflow.keras.layers import Dense\n'), (37, 'tensorflow.keras.layers.Activation', 'Activation', (['last_active'], {}), False, 'from tensorflow.keras.layers import Activation\n')] |
sanghuynh1501/mlcollect | e85fe6a08e14fa6502166c1a7bfffdcd8c3a25b2 | from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Dense
from tensorflow.keras import backend as K
class MiniVGGNet:
@staticmethod
def build(width, height, depth, classes, last_active="solfmax"):
# Initialize the model, input shape and the channel dimension
model = Sequential()
input_shape = (height, width, depth)
channel_dim = -1
# If we are using 'channels_first', update the input shape and channels dimension
if K.image_data_format() == 'channels_first':
input_shape = (depth, height, width)
channel_dim = 1
# First CONV => RELU => CONV => RELU => POOL layer set
model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape))
model.add(Activation('relu'))
model.add(BatchNormalization(axis=channel_dim))
model.add(Conv2D(32, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization(axis=channel_dim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Second CONV => RELU => CONV => RELU => POOL layer set
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
# model.add(BatchNormalization(axis=channel_dim))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
# model.add(BatchNormalization(axis=channel_dim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# First (and only) set of FC => RELU layers
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
# Softmax classifier
model.add(Dense(classes))
model.add(Activation(last_active))
# Return the constructed network architecture
return model
| [
"tensorflow.keras.layers.Activation",
"tensorflow.keras.backend.image_data_format",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Flatten"
] | mlcollect/cnn/minivggnet.py | [(16, 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), False, 'from tensorflow.keras.models import Sequential\n'), (21, 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), True, 'from tensorflow.keras import backend as K\n'), (26, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""', 'input_shape': 'input_shape'}), False, 'from tensorflow.keras.layers import Conv2D\n'), (27, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), False, 'from tensorflow.keras.layers import Activation\n'), (28, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'channel_dim'}), False, 'from tensorflow.keras.layers import BatchNormalization\n'), (29, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Conv2D\n'), (30, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), False, 'from tensorflow.keras.layers import Activation\n'), (31, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'channel_dim'}), False, 'from tensorflow.keras.layers import BatchNormalization\n'), (32, 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), False, 'from tensorflow.keras.layers import MaxPooling2D\n'), (33, 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), False, 'from tensorflow.keras.layers import Dropout\n'), (36, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Conv2D\n'), (37, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), False, 'from tensorflow.keras.layers import Activation\n'), (39, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Conv2D\n'), (40, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), False, 'from tensorflow.keras.layers import Activation\n'), (42, 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), False, 'from tensorflow.keras.layers import MaxPooling2D\n'), (43, 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), False, 'from tensorflow.keras.layers import Dropout\n'), (46, 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), False, 'from tensorflow.keras.layers import Flatten\n'), (47, 'tensorflow.keras.layers.Dense', 'Dense', (['(512)'], {}), False, 'from tensorflow.keras.layers import Dense\n'), (48, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), False, 'from tensorflow.keras.layers import Activation\n'), (49, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), False, 'from tensorflow.keras.layers import BatchNormalization\n'), (50, 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), False, 'from tensorflow.keras.layers import Dropout\n'), (53, 'tensorflow.keras.layers.Dense', 'Dense', (['classes'], {}), False, 'from tensorflow.keras.layers import Dense\n'), (54, 'tensorflow.keras.layers.Activation', 'Activation', (['last_active'], {}), False, 'from tensorflow.keras.layers import Activation\n')] |
deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for non_semantic_speech_benchmark.eval_embedding.keras.train_keras."""
from absl import flags
from absl.testing import absltest
from absl.testing import flagsaver
from absl.testing import parameterized
import mock
import tensorflow as tf
from non_semantic_speech_benchmark.eval_embedding.finetune import train_keras
def _get_data(*args, **kwargs):
del args
assert 'samples_key' in kwargs
assert 'min_length' in kwargs
assert 'batch_size' in kwargs
assert 'label_list' in kwargs
bs = kwargs['batch_size']
samples = tf.zeros((bs, 32000), tf.float32)
labels = tf.zeros([bs], tf.int32)
labels_onehot = tf.one_hot(labels, len(kwargs['label_list']))
return tf.data.Dataset.from_tensors((samples, labels_onehot)).repeat()
class TrainKerasTest(parameterized.TestCase):
@parameterized.parameters(
{'num_clusters': 0, 'alpha_init': 0},
{'num_clusters': 4, 'alpha_init': 0},
{'num_clusters': 0, 'alpha_init': 1.0},
)
def test_get_model(self, num_clusters, alpha_init):
num_classes = 4
batched_samples = tf.zeros([3, 20000])
y_onehot = tf.one_hot([0, 1, 2], num_classes)
model = train_keras.models.get_keras_model(
num_classes, input_length=20000, use_batchnorm=True,
num_clusters=num_clusters, alpha_init=alpha_init)
loss_obj = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
opt = tf.keras.optimizers.Adam()
train_loss = tf.keras.metrics.Mean()
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
summary_writer = tf.summary.create_file_writer(
absltest.get_default_test_tmpdir())
train_step = train_keras.get_train_step(
model, loss_obj, opt, train_loss, train_accuracy, summary_writer)
gstep = opt.iterations
train_step(batched_samples, y_onehot, gstep)
self.assertEqual(1, gstep)
train_step(batched_samples, y_onehot, gstep)
self.assertEqual(2, gstep)
@mock.patch.object(train_keras.get_data, 'get_data', new=_get_data)
@flagsaver.flagsaver
def test_full_flow(self):
flags.FLAGS.file_pattern = 'dummy'
flags.FLAGS.shuffle_buffer_size = 4
flags.FLAGS.samples_key = 'audio'
flags.FLAGS.nc = 2
flags.FLAGS.label_key = 'emotion'
flags.FLAGS.label_list = ['no', 'yes']
flags.FLAGS.logdir = absltest.get_default_test_tmpdir()
train_keras.train_and_report(debug=True)
if __name__ == '__main__':
tf.compat.v2.enable_v2_behavior()
assert tf.executing_eagerly()
absltest.main()
| [
"tensorflow.data.Dataset.from_tensors",
"tensorflow.keras.losses.CategoricalCrossentropy",
"tensorflow.executing_eagerly",
"tensorflow.zeros",
"tensorflow.compat.v2.enable_v2_behavior",
"tensorflow.keras.optimizers.Adam",
"tensorflow.one_hot",
"tensorflow.keras.metrics.SparseCategoricalAccuracy",
"tensorflow.keras.metrics.Mean"
] | non_semantic_speech_benchmark/eval_embedding/finetune/train_keras_test.py | [(36, 'tensorflow.zeros', 'tf.zeros', (['(bs, 32000)', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (37, 'tensorflow.zeros', 'tf.zeros', (['[bs]', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (44, 'absl.testing.parameterized.parameters', 'parameterized.parameters', (["{'num_clusters': 0, 'alpha_init': 0}", "{'num_clusters': 4, 'alpha_init': 0}", "{'num_clusters': 0, 'alpha_init': 1.0}"], {}), False, 'from absl.testing import parameterized\n'), (72, 'mock.patch.object', 'mock.patch.object', (['train_keras.get_data', '"""get_data"""'], {'new': '_get_data'}), False, 'import mock\n'), (87, 'tensorflow.compat.v2.enable_v2_behavior', 'tf.compat.v2.enable_v2_behavior', ([], {}), True, 'import tensorflow as tf\n'), (88, 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), True, 'import tensorflow as tf\n'), (89, 'absl.testing.absltest.main', 'absltest.main', ([], {}), False, 'from absl.testing import absltest\n'), (51, 'tensorflow.zeros', 'tf.zeros', (['[3, 20000]'], {}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.one_hot', 'tf.one_hot', (['[0, 1, 2]', 'num_classes'], {}), True, 'import tensorflow as tf\n'), (54, 'non_semantic_speech_benchmark.eval_embedding.finetune.train_keras.models.get_keras_model', 'train_keras.models.get_keras_model', (['num_classes'], {'input_length': '(20000)', 'use_batchnorm': '(True)', 'num_clusters': 'num_clusters', 'alpha_init': 'alpha_init'}), False, 'from non_semantic_speech_benchmark.eval_embedding.finetune import train_keras\n'), (58, 'tensorflow.keras.losses.CategoricalCrossentropy', 'tf.keras.losses.CategoricalCrossentropy', ([], {'from_logits': '(True)'}), True, 'import tensorflow as tf\n'), (59, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {}), True, 'import tensorflow as tf\n'), (61, 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {}), True, 'import tensorflow as tf\n'), (64, 'non_semantic_speech_benchmark.eval_embedding.finetune.train_keras.get_train_step', 'train_keras.get_train_step', (['model', 'loss_obj', 'opt', 'train_loss', 'train_accuracy', 'summary_writer'], {}), False, 'from non_semantic_speech_benchmark.eval_embedding.finetune import train_keras\n'), (81, 'absl.testing.absltest.get_default_test_tmpdir', 'absltest.get_default_test_tmpdir', ([], {}), False, 'from absl.testing import absltest\n'), (83, 'non_semantic_speech_benchmark.eval_embedding.finetune.train_keras.train_and_report', 'train_keras.train_and_report', ([], {'debug': '(True)'}), False, 'from non_semantic_speech_benchmark.eval_embedding.finetune import train_keras\n'), (39, 'tensorflow.data.Dataset.from_tensors', 'tf.data.Dataset.from_tensors', (['(samples, labels_onehot)'], {}), True, 'import tensorflow as tf\n'), (63, 'absl.testing.absltest.get_default_test_tmpdir', 'absltest.get_default_test_tmpdir', ([], {}), False, 'from absl.testing import absltest\n')] |
deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
"""Ground-truth state 2-step Agent."""
import time
import numpy as np
from ravens import utils
from ravens.agents import GtState6DAgent
from ravens.agents import GtStateAgent
from ravens.models import mdn_utils
from ravens.models import MlpModel
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
class GtState2StepAgent(GtStateAgent):
"""Agent which uses ground-truth state information -- useful as a baseline."""
def __init__(self, name, task):
super(GtState2StepAgent, self).__init__(name, task)
# Set up model.
self.pick_model = None
self.place_model = None
self.pick_optim = tf.keras.optimizers.Adam(learning_rate=2e-4)
self.place_optim = tf.keras.optimizers.Adam(learning_rate=2e-4)
self.metric = tf.keras.metrics.Mean(name='metric')
self.val_metric = tf.keras.metrics.Mean(name='val_metric')
def init_model(self, dataset):
"""Initialize models, including normalization parameters."""
self.set_max_obs_vector_length(dataset)
_, _, info = dataset.random_sample()
obs_vector = self.info_to_gt_obs(info)
# Setup pick model
obs_dim = obs_vector.shape[0]
act_dim = 3
self.pick_model = MlpModel(
self.batch_size, obs_dim, act_dim, 'relu', self.use_mdn, dropout=0.1)
sampled_gt_obs = []
num_samples = 1000
for _ in range(num_samples):
_, _, info = dataset.random_sample()
t_worldaug_world, _ = self.get_augmentation_transform()
sampled_gt_obs.append(self.info_to_gt_obs(info, t_worldaug_world))
sampled_gt_obs = np.array(sampled_gt_obs)
obs_train_parameters = dict()
obs_train_parameters['mean'] = sampled_gt_obs.mean(axis=(0)).astype(
np.float32)
obs_train_parameters['std'] = sampled_gt_obs.std(axis=(0)).astype(
np.float32)
self.pick_model.set_normalization_parameters(obs_train_parameters)
# Setup pick-conditioned place model
obs_dim = obs_vector.shape[0] + act_dim
act_dim = 3
self.place_model = MlpModel(
self.batch_size, obs_dim, act_dim, 'relu', self.use_mdn, dropout=0.1)
sampled_gt_obs = []
num_samples = 1000
for _ in range(num_samples):
_, act, info = dataset.random_sample()
t_worldaug_world, _ = self.get_augmentation_transform()
obs = self.info_to_gt_obs(info, t_worldaug_world)
obs = np.hstack((obs, self.act_to_gt_act(act, t_worldaug_world)[:3]))
sampled_gt_obs.append(obs)
sampled_gt_obs = np.array(sampled_gt_obs)
obs_train_parameters = dict()
obs_train_parameters['mean'] = sampled_gt_obs.mean(axis=(0)).astype(
np.float32)
obs_train_parameters['std'] = sampled_gt_obs.std(axis=(0)).astype(
np.float32)
self.place_model.set_normalization_parameters(obs_train_parameters)
def train(self, dataset, num_iter, writer, validation_dataset):
"""Train on dataset for a specific number of iterations."""
if self.pick_model is None:
self.init_model(dataset)
if self.use_mdn:
loss_criterion = mdn_utils.mdn_loss
else:
loss_criterion = tf.keras.losses.MeanSquaredError()
@tf.function
def train_step(pick_model, place_model, batch_obs, batch_act,
loss_criterion):
with tf.GradientTape() as tape:
prediction = pick_model(batch_obs)
loss0 = loss_criterion(batch_act[:, 0:3], prediction)
grad = tape.gradient(loss0, pick_model.trainable_variables)
self.pick_optim.apply_gradients(
zip(grad, pick_model.trainable_variables))
with tf.GradientTape() as tape:
# batch_obs = tf.concat((batch_obs, batch_act[:,0:3] +
# tf.random.normal(shape=batch_act[:,0:3].shape,
# stddev=0.001)), axis=1)
batch_obs = tf.concat((batch_obs, batch_act[:, 0:3]), axis=1)
prediction = place_model(batch_obs)
loss1 = loss_criterion(batch_act[:, 3:], prediction)
grad = tape.gradient(loss1, place_model.trainable_variables)
self.place_optim.apply_gradients(
zip(grad, place_model.trainable_variables))
return loss0 + loss1
print_rate = 100
for i in range(num_iter):
start = time.time()
batch_obs, batch_act, _, _, _ = self.get_data_batch(dataset)
# Forward through model, compute training loss, update weights.
self.metric.reset_states()
loss = train_step(self.pick_model, self.place_model, batch_obs, batch_act,
loss_criterion)
self.metric(loss)
with writer.as_default():
tf.summary.scalar(
'gt_state_loss', self.metric.result(), step=self.total_iter + i)
if i % print_rate == 0:
loss = np.float32(loss)
print(f'Train Iter: {self.total_iter + i} Loss: {loss:.4f} Iter time:',
time.time() - start)
# utils.meshcat_visualize(self.vis, obs, act, info)
self.total_iter += num_iter
self.save()
def act(self, obs, info):
"""Run inference and return best action."""
act = {'camera_config': self.camera_config, 'primitive': None}
# Get observations and run pick prediction
gt_obs = self.info_to_gt_obs(info)
pick_prediction = self.pick_model(gt_obs[None, Ellipsis])
if self.use_mdn:
pi, mu, var = pick_prediction
# prediction = mdn_utils.pick_max_mean(pi, mu, var)
pick_prediction = mdn_utils.sample_from_pdf(pi, mu, var)
pick_prediction = pick_prediction[:, 0, :]
pick_prediction = pick_prediction[0] # unbatch
# Get observations and run place prediction
obs_with_pick = np.hstack((gt_obs, pick_prediction))
# since the pick at train time is always 0.0,
# the predictions are unstable if not exactly 0
obs_with_pick[-1] = 0.0
place_prediction = self.place_model(obs_with_pick[None, Ellipsis])
if self.use_mdn:
pi, mu, var = place_prediction
# prediction = mdn_utils.pick_max_mean(pi, mu, var)
place_prediction = mdn_utils.sample_from_pdf(pi, mu, var)
place_prediction = place_prediction[:, 0, :]
place_prediction = place_prediction[0]
prediction = np.hstack((pick_prediction, place_prediction))
# just go exactly to objects, from observations
# p0_position = np.hstack((gt_obs[3:5], 0.02))
# p0_rotation = utils.eulerXYZ_to_quatXYZW(
# (0, 0, -gt_obs[5]*self.theta_scale))
# p1_position = np.hstack((gt_obs[0:2], 0.02))
# p1_rotation = utils.eulerXYZ_to_quatXYZW(
# (0, 0, -gt_obs[2]*self.theta_scale))
# just go exactly to objects, predicted
p0_position = np.hstack((prediction[0:2], 0.02))
p0_rotation = utils.eulerXYZ_to_quatXYZW(
(0, 0, -prediction[2] * self.theta_scale))
p1_position = np.hstack((prediction[3:5], 0.02))
p1_rotation = utils.eulerXYZ_to_quatXYZW(
(0, 0, -prediction[5] * self.theta_scale))
# Select task-specific motion primitive.
act['primitive'] = 'pick_place'
if self.task == 'sweeping':
act['primitive'] = 'sweep'
elif self.task == 'pushing':
act['primitive'] = 'push'
params = {
'pose0': (p0_position, p0_rotation),
'pose1': (p1_position, p1_rotation)
}
act['params'] = params
return act
#-------------------------------------------------------------------------
# Helper Functions
#-------------------------------------------------------------------------
def load(self, num_iter):
"""Load something."""
# Do something here.
# self.model.load(os.path.join(self.models_dir, model_fname))
# Update total training iterations of agent.
self.total_iter = num_iter
def save(self):
"""Save models."""
# Do something here.
# self.model.save(os.path.join(self.models_dir, model_fname))
pass
class GtState3Step6DAgent(GtState6DAgent):
"""Agent which uses ground-truth state information -- useful as a baseline."""
def __init__(self, name, task):
super().__init__(name, task)
# Set up model.
self.pick_model = None
self.place_se2_model = None
self.place_rpz_model = None
self.pick_optim = tf.keras.optimizers.Adam(learning_rate=2e-4)
self.place_se2_optim = tf.keras.optimizers.Adam(learning_rate=2e-4)
self.place_rpz_optim = tf.keras.optimizers.Adam(learning_rate=2e-4)
self.metric = tf.keras.metrics.Mean(name='metric')
self.val_metric = tf.keras.metrics.Mean(name='val_metric')
def init_model(self, dataset):
"""Initialize models, including normalization parameters."""
self.set_max_obs_vector_length(dataset)
_, _, info = dataset.random_sample()
obs_vector = self.info_to_gt_obs(info)
# Setup pick model
obs_dim = obs_vector.shape[0]
act_dim = 3
self.pick_model = MlpModel(
self.batch_size, obs_dim, act_dim, 'relu', self.use_mdn, dropout=0.1)
sampled_gt_obs = []
num_samples = 1000
for _ in range(num_samples):
_, _, info = dataset.random_sample()
t_worldaug_world, _ = self.get_augmentation_transform()
sampled_gt_obs.append(self.info_to_gt_obs(info, t_worldaug_world))
sampled_gt_obs = np.array(sampled_gt_obs)
obs_train_parameters = dict()
obs_train_parameters['mean'] = sampled_gt_obs.mean(axis=(0)).astype(
np.float32)
obs_train_parameters['std'] = sampled_gt_obs.std(axis=(0)).astype(
np.float32)
self.pick_model.set_normalization_parameters(obs_train_parameters)
# Setup pick-conditioned place se2 model
obs_dim = obs_vector.shape[0] + act_dim
act_dim = 3
self.place_se2_model = MlpModel(
self.batch_size, obs_dim, act_dim, 'relu', self.use_mdn, dropout=0.1)
sampled_gt_obs = []
num_samples = 1000
for _ in range(num_samples):
_, act, info = dataset.random_sample()
t_worldaug_world, _ = self.get_augmentation_transform()
obs = self.info_to_gt_obs(info, t_worldaug_world)
obs = np.hstack((obs, self.act_to_gt_act(act, t_worldaug_world)[:3]))
sampled_gt_obs.append(obs)
sampled_gt_obs = np.array(sampled_gt_obs)
obs_train_parameters = dict()
obs_train_parameters['mean'] = sampled_gt_obs.mean(axis=(0)).astype(
np.float32)
obs_train_parameters['std'] = sampled_gt_obs.std(axis=(0)).astype(
np.float32)
self.place_se2_model.set_normalization_parameters(obs_train_parameters)
# Setup pick-conditioned place rpz model
obs_dim = obs_vector.shape[0] + act_dim + 3
act_dim = 3
self.place_rpz_model = MlpModel(
self.batch_size, obs_dim, act_dim, 'relu', self.use_mdn, dropout=0.1)
sampled_gt_obs = []
num_samples = 1000
for _ in range(num_samples):
_, act, info = dataset.random_sample()
t_worldaug_world, _ = self.get_augmentation_transform()
obs = self.info_to_gt_obs(info, t_worldaug_world)
obs = np.hstack((obs, self.act_to_gt_act(act, t_worldaug_world)[:3]))
sampled_gt_obs.append(obs)
sampled_gt_obs = np.array(sampled_gt_obs)
obs_train_parameters = dict()
obs_train_parameters['mean'] = sampled_gt_obs.mean(axis=(0)).astype(
np.float32)
obs_train_parameters['std'] = sampled_gt_obs.std(axis=(0)).astype(
np.float32)
self.place_rpz_model.set_normalization_parameters(obs_train_parameters)
def train(self, dataset, num_iter, writer, validation_dataset):
"""Train on dataset for a specific number of iterations."""
if self.pick_model is None:
self.init_model(dataset)
if self.use_mdn:
loss_criterion = mdn_utils.mdn_loss
else:
loss_criterion = tf.keras.losses.MeanSquaredError()
@tf.function
def train_step(pick_model, place_se2_model, place_rpz_model, batch_obs,
batch_act, loss_criterion):
with tf.GradientTape() as tape:
prediction = pick_model(batch_obs)
loss0 = loss_criterion(batch_act[:, 0:3], prediction)
grad = tape.gradient(loss0, pick_model.trainable_variables)
self.pick_optim.apply_gradients(
zip(grad, pick_model.trainable_variables))
with tf.GradientTape() as tape:
batch_obs = tf.concat((batch_obs, batch_act[:, 0:3]), axis=1)
prediction = place_se2_model(batch_obs)
loss1 = loss_criterion(batch_act[:, 3:6], prediction)
grad = tape.gradient(loss1, place_se2_model.trainable_variables)
self.place_se2_optim.apply_gradients(
zip(grad, place_se2_model.trainable_variables))
with tf.GradientTape() as tape:
batch_obs = tf.concat((batch_obs, batch_act[:, 3:6]), axis=1)
prediction = place_rpz_model(batch_obs)
loss2 = loss_criterion(batch_act[:, 6:], prediction)
grad = tape.gradient(loss2, place_rpz_model.trainable_variables)
self.place_rpz_optim.apply_gradients(
zip(grad, place_rpz_model.trainable_variables))
return loss0 + loss1 + loss2
print_rate = 100
for i in range(num_iter):
start = time.time()
batch_obs, batch_act, _, _, _ = self.get_data_batch(dataset)
# Forward through model, compute training loss, update weights.
self.metric.reset_states()
loss = train_step(self.pick_model, self.place_se2_model,
self.place_rpz_model, batch_obs, batch_act,
loss_criterion)
self.metric(loss)
with writer.as_default():
tf.summary.scalar(
'gt_state_loss', self.metric.result(), step=self.total_iter + i)
if i % print_rate == 0:
loss = np.float32(loss)
print(f'Train Iter: {self.total_iter + i} Loss: {loss:.4f} Iter time:',
time.time() - start)
# utils.meshcat_visualize(self.vis, obs, act, info)
self.total_iter += num_iter
self.save()
def act(self, obs, info):
"""Run inference and return best action."""
act = {'camera_config': self.camera_config, 'primitive': None}
# Get observations and run pick prediction
gt_obs = self.info_to_gt_obs(info)
pick_prediction = self.pick_model(gt_obs[None, Ellipsis])
if self.use_mdn:
pi, mu, var = pick_prediction
# prediction = mdn_utils.pick_max_mean(pi, mu, var)
pick_prediction = mdn_utils.sample_from_pdf(pi, mu, var)
pick_prediction = pick_prediction[:, 0, :]
pick_prediction = pick_prediction[0] # unbatch
# Get observations and run place prediction
obs_with_pick = np.hstack((gt_obs, pick_prediction)).astype(np.float32)
# since the pick at train time is always 0.0,
# the predictions are unstable if not exactly 0
obs_with_pick[-1] = 0.0
place_se2_prediction = self.place_se2_model(obs_with_pick[None, Ellipsis])
if self.use_mdn:
pi, mu, var = place_se2_prediction
# prediction = mdn_utils.pick_max_mean(pi, mu, var)
place_se2_prediction = mdn_utils.sample_from_pdf(pi, mu, var)
place_se2_prediction = place_se2_prediction[:, 0, :]
place_se2_prediction = place_se2_prediction[0]
# Get observations and run rpz prediction
obs_with_pick_place_se2 = np.hstack(
(obs_with_pick, place_se2_prediction)).astype(np.float32)
place_rpz_prediction = self.place_rpz_model(obs_with_pick_place_se2[None,
Ellipsis])
if self.use_mdn:
pi, mu, var = place_rpz_prediction
# prediction = mdn_utils.pick_max_mean(pi, mu, var)
place_rpz_prediction = mdn_utils.sample_from_pdf(pi, mu, var)
place_rpz_prediction = place_rpz_prediction[:, 0, :]
place_rpz_prediction = place_rpz_prediction[0]
p0_position = np.hstack((pick_prediction[0:2], 0.02))
p0_rotation = utils.eulerXYZ_to_quatXYZW((0, 0, 0))
p1_position = np.hstack(
(place_se2_prediction[0:2], place_rpz_prediction[2]))
p1_rotation = utils.eulerXYZ_to_quatXYZW(
(place_rpz_prediction[0] * self.theta_scale,
place_rpz_prediction[1] * self.theta_scale,
-place_se2_prediction[2] * self.theta_scale))
# Select task-specific motion primitive.
act['primitive'] = 'pick_place_6dof'
params = {
'pose0': (p0_position, p0_rotation),
'pose1': (p1_position, p1_rotation)
}
act['params'] = params
return act
| [
"numpy.hstack",
"tensorflow.concat",
"tensorflow.keras.losses.MeanSquaredError",
"tensorflow.compat.v1.enable_eager_execution",
"tensorflow.keras.optimizers.Adam",
"numpy.float32",
"numpy.array",
"tensorflow.keras.metrics.Mean",
"tensorflow.GradientTape"
] | ravens/ravens/agents/gt_state_2_step.py | [(29, 'tensorflow.compat.v1.enable_eager_execution', 'tf.compat.v1.enable_eager_execution', ([], {}), True, 'import tensorflow as tf\n'), (42, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.0002)'}), True, 'import tensorflow as tf\n'), (43, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.0002)'}), True, 'import tensorflow as tf\n'), (44, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': '"""metric"""'}), True, 'import tensorflow as tf\n'), (45, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': '"""val_metric"""'}), True, 'import tensorflow as tf\n'), (57, 'ravens.models.MlpModel', 'MlpModel', (['self.batch_size', 'obs_dim', 'act_dim', '"""relu"""', 'self.use_mdn'], {'dropout': '(0.1)'}), False, 'from ravens.models import MlpModel\n'), (68, 'numpy.array', 'np.array', (['sampled_gt_obs'], {}), True, 'import numpy as np\n'), (80, 'ravens.models.MlpModel', 'MlpModel', (['self.batch_size', 'obs_dim', 'act_dim', '"""relu"""', 'self.use_mdn'], {'dropout': '(0.1)'}), False, 'from ravens.models import MlpModel\n'), (93, 'numpy.array', 'np.array', (['sampled_gt_obs'], {}), True, 'import numpy as np\n'), (173, 'numpy.hstack', 'np.hstack', (['(gt_obs, pick_prediction)'], {}), True, 'import numpy as np\n'), (187, 'numpy.hstack', 'np.hstack', (['(pick_prediction, place_prediction)'], {}), True, 'import numpy as np\n'), (198, 'numpy.hstack', 'np.hstack', (['(prediction[0:2], 0.02)'], {}), True, 'import numpy as np\n'), (199, 'ravens.utils.eulerXYZ_to_quatXYZW', 'utils.eulerXYZ_to_quatXYZW', (['(0, 0, -prediction[2] * self.theta_scale)'], {}), False, 'from ravens import utils\n'), (201, 'numpy.hstack', 'np.hstack', (['(prediction[3:5], 0.02)'], {}), True, 'import numpy as np\n'), (202, 'ravens.utils.eulerXYZ_to_quatXYZW', 'utils.eulerXYZ_to_quatXYZW', (['(0, 0, -prediction[5] * self.theta_scale)'], {}), False, 'from ravens import utils\n'), (249, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.0002)'}), True, 'import tensorflow as tf\n'), (250, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.0002)'}), True, 'import tensorflow as tf\n'), (251, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.0002)'}), True, 'import tensorflow as tf\n'), (253, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': '"""metric"""'}), True, 'import tensorflow as tf\n'), (254, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': '"""val_metric"""'}), True, 'import tensorflow as tf\n'), (266, 'ravens.models.MlpModel', 'MlpModel', (['self.batch_size', 'obs_dim', 'act_dim', '"""relu"""', 'self.use_mdn'], {'dropout': '(0.1)'}), False, 'from ravens.models import MlpModel\n'), (277, 'numpy.array', 'np.array', (['sampled_gt_obs'], {}), True, 'import numpy as np\n'), (289, 'ravens.models.MlpModel', 'MlpModel', (['self.batch_size', 'obs_dim', 'act_dim', '"""relu"""', 'self.use_mdn'], {'dropout': '(0.1)'}), False, 'from ravens.models import MlpModel\n'), (302, 'numpy.array', 'np.array', (['sampled_gt_obs'], {}), True, 'import numpy as np\n'), (314, 'ravens.models.MlpModel', 'MlpModel', (['self.batch_size', 'obs_dim', 'act_dim', '"""relu"""', 'self.use_mdn'], {'dropout': '(0.1)'}), False, 'from ravens.models import MlpModel\n'), (327, 'numpy.array', 'np.array', (['sampled_gt_obs'], {}), True, 'import numpy as np\n'), (439, 'numpy.hstack', 'np.hstack', (['(pick_prediction[0:2], 0.02)'], {}), True, 'import numpy as np\n'), (440, 'ravens.utils.eulerXYZ_to_quatXYZW', 'utils.eulerXYZ_to_quatXYZW', (['(0, 0, 0)'], {}), False, 'from ravens import utils\n'), (442, 'numpy.hstack', 'np.hstack', (['(place_se2_prediction[0:2], place_rpz_prediction[2])'], {}), True, 'import numpy as np\n'), (444, 'ravens.utils.eulerXYZ_to_quatXYZW', 'utils.eulerXYZ_to_quatXYZW', (['(place_rpz_prediction[0] * self.theta_scale, place_rpz_prediction[1] * self\n .theta_scale, -place_se2_prediction[2] * self.theta_scale)'], {}), False, 'from ravens import utils\n'), (111, 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), True, 'import tensorflow as tf\n'), (136, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (168, 'ravens.models.mdn_utils.sample_from_pdf', 'mdn_utils.sample_from_pdf', (['pi', 'mu', 'var'], {}), False, 'from ravens.models import mdn_utils\n'), (183, 'ravens.models.mdn_utils.sample_from_pdf', 'mdn_utils.sample_from_pdf', (['pi', 'mu', 'var'], {}), False, 'from ravens.models import mdn_utils\n'), (345, 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), True, 'import tensorflow as tf\n'), (374, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (407, 'ravens.models.mdn_utils.sample_from_pdf', 'mdn_utils.sample_from_pdf', (['pi', 'mu', 'var'], {}), False, 'from ravens.models import mdn_utils\n'), (422, 'ravens.models.mdn_utils.sample_from_pdf', 'mdn_utils.sample_from_pdf', (['pi', 'mu', 'var'], {}), False, 'from ravens.models import mdn_utils\n'), (435, 'ravens.models.mdn_utils.sample_from_pdf', 'mdn_utils.sample_from_pdf', (['pi', 'mu', 'var'], {}), False, 'from ravens.models import mdn_utils\n'), (116, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (122, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (126, 'tensorflow.concat', 'tf.concat', (['(batch_obs, batch_act[:, 0:3])'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (150, 'numpy.float32', 'np.float32', (['loss'], {}), True, 'import numpy as np\n'), (350, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (356, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (357, 'tensorflow.concat', 'tf.concat', (['(batch_obs, batch_act[:, 0:3])'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (363, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (364, 'tensorflow.concat', 'tf.concat', (['(batch_obs, batch_act[:, 3:6])'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (389, 'numpy.float32', 'np.float32', (['loss'], {}), True, 'import numpy as np\n'), (412, 'numpy.hstack', 'np.hstack', (['(gt_obs, pick_prediction)'], {}), True, 'import numpy as np\n'), (427, 'numpy.hstack', 'np.hstack', (['(obs_with_pick, place_se2_prediction)'], {}), True, 'import numpy as np\n'), (152, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (391, 'time.time', 'time.time', ([], {}), False, 'import time\n')] |
ahmedsabie/tensorflow | be084bd7a4dd241eb781fc704f57bcacc5c9b6dd | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for Keras hashing preprocessing layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import random
import string
import time
from absl import flags
import numpy as np
from tensorflow.python import keras
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras.layers.preprocessing import hashing
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
FLAGS = flags.FLAGS
v2_compat.enable_v2_behavior()
# word_gen creates random sequences of ASCII letters (both lowercase and upper).
# The number of unique strings is ~2,700.
def word_gen():
for _ in itertools.count(1):
yield "".join(random.choice(string.ascii_letters) for i in range(2))
class BenchmarkLayer(benchmark.TensorFlowBenchmark):
"""Benchmark the layer forward pass."""
def run_dataset_implementation(self, batch_size):
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = dataset_ops.Dataset.from_generator(word_gen, dtypes.string,
tensor_shape.TensorShape([]))
ds = ds.shuffle(batch_size * 100)
ds = ds.batch(batch_size)
num_batches = 5
ds = ds.take(num_batches)
ds = ds.prefetch(num_batches)
starts.append(time.time())
# Benchmarked code begins here.
for i in ds:
_ = string_ops.string_to_hash_bucket(i, num_buckets=2)
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts)) / num_batches
return avg_time
def bm_layer_implementation(self, batch_size):
input_1 = keras.Input(shape=(None,), dtype=dtypes.string, name="word")
layer = hashing.Hashing(num_bins=2)
_ = layer(input_1)
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = dataset_ops.Dataset.from_generator(word_gen, dtypes.string,
tensor_shape.TensorShape([]))
ds = ds.shuffle(batch_size * 100)
ds = ds.batch(batch_size)
num_batches = 5
ds = ds.take(num_batches)
ds = ds.prefetch(num_batches)
starts.append(time.time())
# Benchmarked code begins here.
for i in ds:
_ = layer(i)
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts)) / num_batches
name = "hashing|batch_%s" % batch_size
baseline = self.run_dataset_implementation(batch_size)
extras = {
"dataset implementation baseline": baseline,
"delta seconds": (baseline - avg_time),
"delta percent": ((baseline - avg_time) / baseline) * 100
}
self.report_benchmark(
iters=num_repeats, wall_time=avg_time, extras=extras, name=name)
def benchmark_vocab_size_by_batch(self):
for batch in [32, 64, 256]:
self.bm_layer_implementation(batch_size=batch)
if __name__ == "__main__":
test.main()
| [
"tensorflow.python.compat.v2_compat.enable_v2_behavior",
"tensorflow.python.keras.layers.preprocessing.hashing.Hashing",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.keras.Input",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.string_ops.string_to_hash_bucket",
"numpy.array"
] | tensorflow/python/keras/layers/preprocessing/benchmarks/hashing_benchmark.py | [(40, 'tensorflow.python.compat.v2_compat.enable_v2_behavior', 'v2_compat.enable_v2_behavior', ([], {}), False, 'from tensorflow.python.compat import v2_compat\n'), (46, 'itertools.count', 'itertools.count', (['(1)'], {}), False, 'import itertools\n'), (115, 'tensorflow.python.platform.test.main', 'test.main', ([], {}), False, 'from tensorflow.python.platform import test\n'), (76, 'tensorflow.python.keras.Input', 'keras.Input', ([], {'shape': '(None,)', 'dtype': 'dtypes.string', 'name': '"""word"""'}), False, 'from tensorflow.python import keras\n'), (77, 'tensorflow.python.keras.layers.preprocessing.hashing.Hashing', 'hashing.Hashing', ([], {'num_bins': '(2)'}), False, 'from tensorflow.python.keras.layers.preprocessing import hashing\n'), (59, 'tensorflow.python.framework.tensor_shape.TensorShape', 'tensor_shape.TensorShape', (['[]'], {}), False, 'from tensorflow.python.framework import tensor_shape\n'), (65, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (68, 'tensorflow.python.ops.string_ops.string_to_hash_bucket', 'string_ops.string_to_hash_bucket', (['i'], {'num_buckets': '(2)'}), False, 'from tensorflow.python.ops import string_ops\n'), (70, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (85, 'tensorflow.python.framework.tensor_shape.TensorShape', 'tensor_shape.TensorShape', (['[]'], {}), False, 'from tensorflow.python.framework import tensor_shape\n'), (91, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (96, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (47, 'random.choice', 'random.choice', (['string.ascii_letters'], {}), False, 'import random\n'), (72, 'numpy.array', 'np.array', (['ends'], {}), True, 'import numpy as np\n'), (72, 'numpy.array', 'np.array', (['starts'], {}), True, 'import numpy as np\n'), (98, 'numpy.array', 'np.array', (['ends'], {}), True, 'import numpy as np\n'), (98, 'numpy.array', 'np.array', (['starts'], {}), True, 'import numpy as np\n')] |
victor-tuda/chatbot | 3cadd018759344991c77e2aa86b8965ed0271789 | import random
import json
import pickle
import numpy as np
import nltk
nltk.download('punkt')
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout
from tensorflow.keras.optimizers import SGD
lemmatizer = WordNetLemmatizer()
intents = json.loads(open('./intents.json').read())
words = []
classes = []
documents = []
ignore_letters = ['?', '!', '@', ',', ';', '.']
for intent in intents['intents']:
for pattern in intent['patterns']:
word_list = nltk.word_tokenize(pattern)
words.extend(word_list)
documents.append((word_list, intent['tag']))
if intent['tag'] not in classes:
classes.append(intent['tag'])
words = [lemmatizer.lemmatize(word) for word in words if word not in ignore_letters]
words = sorted(set(words))
classes = sorted(set(classes))
pickle.dump(words, open('words.pkl', 'wb'))
pickle.dump(classes, open('classes.pkl', 'wb'))
training = []
output_empty = [0] * len(classes)
for document in documents:
bag = []
word_patterns = document[0]
word_patterns = [lemmatizer.lemmatize(word.lower()) for word in word_patterns]
for word in word_patterns:
bag.append(1) if word in word_patterns else bag.append(0)
output_row = list(output_empty)
output_row[classes.index(document[1])] = 1
training.append([bag, output_row])
random.shuffle(training)
training = np.array(training)
train_x = list(training[:, 0])
train_y = list(training[:, 1])
model = Sequential()
model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(train_y[0]), activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
hist = model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)
model.save('chatbot_model.model.h5', hist)
print('Done')
| [
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Dropout",
"numpy.array",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.optimizers.SGD"
] | training.py | [(7, 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), False, 'import nltk\n'), (8, 'nltk.download', 'nltk.download', (['"""wordnet"""'], {}), False, 'import nltk\n'), (15, 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), False, 'from nltk.stem import WordNetLemmatizer\n'), (54, 'random.shuffle', 'random.shuffle', (['training'], {}), False, 'import random\n'), (55, 'numpy.array', 'np.array', (['training'], {}), True, 'import numpy as np\n'), (60, 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), False, 'from tensorflow.keras.models import Sequential\n'), (67, 'tensorflow.keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.01)', 'decay': '(1e-06)', 'momentum': '(0.9)', 'nesterov': '(True)'}), False, 'from tensorflow.keras.optimizers import SGD\n'), (62, 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout\n'), (63, 'tensorflow.keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout\n'), (64, 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout\n'), (70, 'numpy.array', 'np.array', (['train_x'], {}), True, 'import numpy as np\n'), (70, 'numpy.array', 'np.array', (['train_y'], {}), True, 'import numpy as np\n'), (26, 'nltk.word_tokenize', 'nltk.word_tokenize', (['pattern'], {}), False, 'import nltk\n')] |
Sensors-in-Paradise/OpportunityML | a123b4842de45f735d517be6bcd96ca35171db91 | from random import shuffle
from models.RainbowModelLeaveRecsOut import RainbowModelLeaveRecsOut
from tensorflow.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout # type: ignore
from tensorflow.keras.models import Sequential # type: ignore
import numpy as np
from utils.Recording import Recording
from utils.array_operations import split_list_by_percentage
from utils.typing import assert_type
class ConvModel(RainbowModelLeaveRecsOut):
def __init__(self, **kwargs):
"""
Convolutional model
:param kwargs:
window_size: int
stride_size: int
test_percentage: float
n_features: int
n_outputs: int
"""
# hyper params to instance vars
self.window_size = kwargs["window_size"]
self.stride_size = kwargs["stride_size"]
self.test_percentage = kwargs["test_percentage"]
self.verbose = 0
self.epochs = 10
self.batch_size = 32
# create model
self.model = self.__create_model(kwargs["n_features"], kwargs["n_outputs"])
def __create_model(self, n_features, n_outputs):
# window_size, n_features, n_outputs = X.shape[1], X.shape[2], y.shape[1]
print(
f"Building model for {self.window_size} timesteps (window_size) and {n_features} features"
)
model = Sequential()
model.add(
Conv1D(
filters=64,
kernel_size=3,
activation="relu",
input_shape=(self.window_size, n_features),
)
)
model.add(Conv1D(filters=64, kernel_size=3, activation="relu"))
model.add(Dropout(0.5))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(100, activation="relu"))
model.add(Dense(n_outputs, activation="softmax"))
model.compile(
loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
return model
| [
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv1D",
"tensorflow.keras.layers.MaxPooling1D",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Flatten"
] | archive/model_archive/ConvModel.py | [(42, 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), False, 'from tensorflow.keras.models import Sequential\n'), (44, 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'filters': '(64)', 'kernel_size': '(3)', 'activation': '"""relu"""', 'input_shape': '(self.window_size, n_features)'}), False, 'from tensorflow.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout\n'), (51, 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'filters': '(64)', 'kernel_size': '(3)', 'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout\n'), (52, 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), False, 'from tensorflow.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout\n'), (53, 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(2)'}), False, 'from tensorflow.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout\n'), (54, 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), False, 'from tensorflow.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout\n'), (55, 'tensorflow.keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout\n'), (56, 'tensorflow.keras.layers.Dense', 'Dense', (['n_outputs'], {'activation': '"""softmax"""'}), False, 'from tensorflow.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout\n')] |
abhaikollara/tensorflow | 4f96df3659696990cb34d0ad07dc67843c4225a9 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unidiomatic-typecheck
"""Prototype decorator for defining legacy-graph-mode functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import weakref
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import struct_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
class VariableHolder(object):
"""Holds variables for a python function."""
def __init__(self, fn=None, share_variables=False):
self._fn = fn
self._share_variables = share_variables
self._variables_by_name = data_structures.Mapping()
@property
def variables(self):
return self._variables_by_name
def variable_creator_scope(self, next_creator, **kwargs):
"""Creates variables & adds them to collections to match legacy code."""
collections = kwargs.pop("collections", None)
v = None
# Get expected variable name.
with ops.name_scope(kwargs.get("name", None), "Variable") as name:
variable_name = ops.name_from_scope_name(name)
kwargs["name"] = name
if self._share_variables:
v = self._variables_by_name.get(variable_name, None)
if v is None:
v = next_creator(**kwargs)
self._variables_by_name[variable_name] = v
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if v.trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
ops.add_to_collections(collections, v)
return v
def __call__(self, *args, **kwargs):
return self.call_with_variable_creator_scope(self._fn)(*args, **kwargs)
def call_with_variable_creator_scope(self, fn):
def wrapped(*args, **kwargs):
with variable_scope.variable_creator_scope(self.variable_creator_scope):
return fn(*args, **kwargs)
return wrapped
def _get_element_from_tensor_info(tensor_info, graph):
"""Simplified copy of the deprecated `get_tensor_from_tensor_info`."""
encoding = tensor_info.WhichOneof("encoding")
if encoding == "name":
# We may get operations here in some cases. TensorInfo is a bit of a
# misnomer if so.
return graph.as_graph_element(tensor_info.name)
elif encoding == "coo_sparse":
return sparse_tensor.SparseTensor(
graph.get_tensor_by_name(tensor_info.coo_sparse.indices_tensor_name),
graph.get_tensor_by_name(tensor_info.coo_sparse.values_tensor_name),
graph.get_tensor_by_name(
tensor_info.coo_sparse.dense_shape_tensor_name))
elif encoding == "composite_tensor":
struct_coder = nested_structure_coder.StructureCoder()
spec_proto = struct_pb2.StructuredValue(
type_spec_value=tensor_info.composite_tensor.type_spec)
spec = struct_coder.decode_proto(spec_proto)
components = [graph.get_tensor_by_name(component.name) for component in
tensor_info.composite_tensor.components]
return spec._from_components(components) # pylint: disable=protected-access
else:
raise ValueError("Invalid TensorInfo.encoding: %s" % encoding)
def _lift_single_variable(old_variable, graph, variable_holder):
"""Lifts `old_variable` out of the `FuncGraph` `graph`."""
new_variable = resource_variable_ops.UninitializedVariable(
shape=old_variable.shape,
dtype=old_variable.dtype,
name=old_variable.op.name,
trainable=old_variable.trainable,
extra_handle_data=old_variable.handle)
new_variable._initializer_op = old_variable._initializer_op # pylint: disable=protected-access
graph.add_capture(new_variable.handle, old_variable.handle)
# Now that we've added the new variable to graph.captures,
# graph.capture will use that cached value and do some post-processing
# on the capture like recording it on the tape.
graph.capture(new_variable.handle)
# pylint: disable=protected-access
variable_name = new_variable.name.split(":")[0]
variable_holder._variables_by_name[variable_name] = new_variable
graph._weak_variables.append(weakref.ref(new_variable))
# pylint: enable=protected-access
graph.watch_variable(new_variable)
return new_variable
def _lift_unlifted_variables(graph, variable_holder):
"""Finds resource variables and lifts them into the outer context.
When we import a GraphDef inside a wrap_function, no Python graph building
code runs. This means we get VarHandleOps which create variable resources,
but no corresponding Python objects. Leaving them like this works but gives
the user no way to interact with or modify the variables outside the graph.
This method searches for variables and lifts them out as regular variable
objects when possible, indicating to the FuncGraph that they are captures.
Args:
graph: The FuncGraph to lift variables from.
variable_holder: A VariableHolder to record the lifted variables in.
"""
with graph.as_default():
global_collection_variables = ops.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES)
local_collection_variables = ops.get_collection(
ops.GraphKeys.LOCAL_VARIABLES)
existing_captures = {id(c) for c in graph.internal_captures}
lifted_variables = {}
def _should_lift_variable(v):
return ((v._in_graph_mode # pylint: disable=protected-access
and v.graph.building_function)
and isinstance(v, resource_variable_ops.BaseResourceVariable)
and id(v.handle) not in existing_captures)
for old_variable in global_collection_variables:
if _should_lift_variable(old_variable):
new_variable = _lift_single_variable(
old_variable, graph, variable_holder)
lifted_variables[id(old_variable)] = new_variable
existing_captures.add(id(old_variable.handle))
for old_variable in local_collection_variables:
if _should_lift_variable(old_variable):
new_variable = _lift_single_variable(
old_variable, graph, variable_holder)
lifted_variables[id(old_variable)] = new_variable
existing_captures.add(id(old_variable.handle))
if new_variable._in_graph_mode: # pylint: disable=protected-access
outer_graph = new_variable.graph
# Variables are added to the global collection by default. In this
# case we only want the variable in the local collection, so we'll pop
# it out.
global_collection = outer_graph.get_collection_ref(
ops.GraphKeys.GLOBAL_VARIABLES)
global_collection.remove(new_variable)
outer_graph.add_to_collection(
ops.GraphKeys.LOCAL_VARIABLES, new_variable)
# Update the FuncGraph's collections, partly for the user and partly so this
# function is idempotent when it runs again in prune() calls.
for collection_name in [
ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.LOCAL_VARIABLES
]:
mutable_collection = ops.get_collection_ref(collection_name)
for index, current in enumerate(mutable_collection):
mutable_collection[index] = lifted_variables.get(id(current), current)
if not resource_variable_ops.is_resource_variable(
mutable_collection[index]):
logging.log_first_n(
logging.WARN,
"Unable to create a python object for variable {} because it is "
"a reference variable. It may not be visible to training APIs. "
"If this is a problem, consider rebuilding the SavedModel after "
"running tf.compat.v1.enable_resource_variables().".format(
mutable_collection[index]),
5)
# TODO(allenl): make this trackable
class WrappedFunction(function.ConcreteFunction):
"""Wraps a tf V1 piece of code in a function."""
def __init__(self, fn_graph, variable_holder, attrs=None, signature=None):
self._variable_holder = variable_holder
_lift_unlifted_variables(fn_graph, variable_holder)
# We call __init__ after lifting variables so that the function's signature
# properly reflects the new captured inputs.
for f in fn_graph.as_graph_def().library.function:
context.context().add_function_def(f)
super(WrappedFunction, self).__init__(
fn_graph, attrs=attrs, signature=signature)
def prune(self, feeds, fetches, name=None, input_signature=None):
"""Extract a subgraph of this function's underlying graph.
Wraps the subgraph in a new `WrappedFunction` object.
Args:
feeds: Input tensors to the subgraph to extract, as `Tensor` objects.
fetches: Possibly-nested Python data structure containing information
about outputs of the target subgraph. Each entry can either be a
`Tensor` object (for data outputs), an `Operation` object (for control
outputs), or a `TensorInfo` proto. Any additional shape/dtype
information provided in a `TensorInfo` and not present in the original
graph will be added to the returned subgraph.
name: (optional) Name to give to the underlying `FuncGraph` of the
returned object. If no name is provided, the graph's name will be
`"pruned"`.
input_signature: (optional) possibly-nested Python data structure
containing `TensorSpec` objects, with which to populate the returned
functions's `FuncGraph`'s `structured_input_signature` field.
Returns:
A new `WrappedFunction` object containing a copy of the portion of this
object's graph that goes from `feeds` to `fetches`.
"""
# TODO(b/129646028): Add support for CompositeTensors.
name = name or "pruned"
flat_feeds = nest.flatten(feeds, expand_composites=True)
flat_feeds = [self.graph.as_graph_element(t) for t in flat_feeds]
for f in flat_feeds:
if not isinstance(f, ops.Tensor):
raise ValueError("Feeds must be tensors.")
# Ignoring all feeds that are captures allows prune to be called
# using wrapped_func.inputs even when it uses variables
internal_captures = {id(c) for c in self.graph.internal_captures}
flat_feeds = [f for f in flat_feeds if id(f) not in internal_captures]
operation_fetches = []
tensor_fetches = []
tensor_infos = []
def _fetch_preprocesing_callback(fetch):
"""Extract out lists of ops, tensors, and tensor type info.
Turns TensorInfos into Tensors in the original `fetches` structure.
Also extracts ops from `fetches`.
Args:
fetch: The fetch to preprocess: Tensor, TensorInfo, or Operation, or
string identifying a Tensor or Operation.
Returns:
`fetch` converted to a Tensor.
"""
if isinstance(fetch, ops.Operation):
operation_fetches.append(fetch)
return fetch
elif isinstance(fetch, meta_graph_pb2.TensorInfo):
tensor_infos.append(fetch)
decoded = _get_element_from_tensor_info(fetch, self._func_graph)
if (tensor_util.is_tensor(decoded) or
isinstance(decoded, composite_tensor.CompositeTensor)):
tensor_fetches.append(decoded)
else:
operation_fetches.append(decoded)
return decoded
elif isinstance(fetch, (ops.Tensor, composite_tensor.CompositeTensor)):
tensor_fetches.append(fetch)
return fetch
else:
graph_element = self.graph.as_graph_element(fetch)
return _fetch_preprocesing_callback(graph_element)
fetches = nest.map_structure(_fetch_preprocesing_callback, fetches)
# Expand composite tensors into their component dense Tensors.
tensor_fetches = nest.flatten(tensor_fetches, expand_composites=True)
for f in (flat_feeds + tensor_fetches + operation_fetches):
if f.graph is not self._func_graph:
raise ValueError("Can only prune function whose feeds and fetches "
"are from this graph (%s). Input %s is from graph %s" %
(self._func_graph, f, f.graph))
with self._func_graph.as_default():
pruned_graph = func_graph.FuncGraph(name)
lift_map = lift_to_graph.lift_to_graph(
operation_fetches + tensor_fetches,
pruned_graph,
sources=flat_feeds + self.graph.internal_captures)
# Note that we add the component tensors of any composite tensors to the
# returned function's outputs list; the list must contain these component
# tensors, or the function's sparse outputs won't work properly.
pruned_graph.outputs.extend(lift_map[x] for x in tensor_fetches)
pruned_graph.control_outputs.extend(
[lift_map[operation] for operation in operation_fetches])
pruned_graph.inputs.extend(lift_map[x] for x in flat_feeds)
for external_capture, internal_capture in self.graph.captures:
pruned_graph.add_capture(external_capture, lift_map[internal_capture])
for ti in tensor_infos:
if ti.WhichOneof("encoding") == "name": # Dense tensors only
t = pruned_graph.as_graph_element(ti.name)
if tensor_util.is_tensor(t):
t.set_shape(tensor_shape.TensorShape(ti.tensor_shape))
# pylint: disable=protected-access
for f in self.graph._functions.values():
pruned_graph._add_function(f)
# pylint: enable=protected-access
pruned_graph.variables = self.graph.variables
def _structured_output_mapping(fetched):
"""callback for `nest.map_structure()`"""
lifted = lift_map[fetched]
if isinstance(lifted, ops.Operation):
return None
return lifted
# expand_composites=True here causes composite tensors to be expanded
# into their component dense Tensors, mapped to the new graph, and then
# reconstituted into their original composite form.
pruned_graph.structured_outputs = nest.map_structure(
_structured_output_mapping, fetches, expand_composites=True)
pruned_graph.structured_input_signature = input_signature
pruned_fn = WrappedFunction(
pruned_graph, variable_holder=self._variable_holder)
pruned_fn._num_positional_args = len(flat_feeds) # pylint: disable=protected-access
# TODO(kathywu): Enable keyword arguments if an input signature is specified
pruned_fn._arg_keywords = [tensor.op.name for tensor in flat_feeds] # pylint: disable=protected-access
return pruned_fn
def _filter_returned_ops(fn):
"""Filtering out any ops returned by function.
Args:
fn: a function
Returns:
A tuple of (
Wrapped function that returns `None` in place of any ops,
dict that maps the index in the flat output structure to the returned op
)
"""
returned_ops = {}
def wrap_and_filter_returned_ops(*args, **kwargs):
outputs = fn(*args, **kwargs)
flat_outputs = nest.flatten(outputs)
for n in range(len(flat_outputs)):
output = flat_outputs[n]
if isinstance(output, ops.Operation):
returned_ops[n] = output
flat_outputs[n] = None
return nest.pack_sequence_as(outputs, flat_outputs)
return wrap_and_filter_returned_ops, returned_ops
class WrappedGraph(object):
"""Class for wrapping multiple TF 1.X functions in a single graph.
Maintains a dictionary mapping names to wrapped functions. See
`tf.compat.v1.wrap_function` to learn more about wrapping V1 functions.
Functions wrapped using this class have access to variables and collections
created in other wrapped functions, using the standard TF 1.X API (
`tf.compat.v1.get_variable` or
`tf.compat.v1.get_default_graph().get_collection(...)`)
Outside a function, variables and collections may be accessed using the
`variables` and `graph` properties.
Example:
```
def add_v1(x):
with tf.compat.v1.variable_scope('vars', reuse=tf.compat.v1.AUTO_REUSE):
v = tf.compat.v1.get_variable('v', shape=[], dtype=tf.int32)
return v + x
def increment_var_v1(x):
with tf.compat.v1.variable_scope('vars', reuse=tf.compat.v1.AUTO_REUSE):
v = tf.compat.v1.get_variable('v', shape=[], dtype=tf.int32)
return v.assign_add(x)
g = WrappedGraph()
add = g.wrap_function(add_v1, [tf.TensorSpec([], tf.int32)])
increment_var = g.wrap_function(increment_var_v1,
[tf.TensorSpec([], tf.int32)])
assert len(g.variables) == 1
assert g.variables[0].numpy() == 0
increment_var(tf.constant(5))
assert g.variables[0].numpy() == 5
```
"""
def __init__(self, variable_holder=None, **kwargs):
self._variable_holder = (
variable_holder or VariableHolder(share_variables=True))
name = kwargs.pop("name", "wrapped_function_graph")
# Always start with empty collections, unless otherwise specified. Setting
# `collections=None` will copy the collections from the outer graph.
collections = kwargs.pop("collections", {})
self.graph = func_graph.FuncGraph(name, collections=collections, **kwargs)
self._wrapped_function = WrappedFunction(self.graph, self._variable_holder)
self._functions = {}
@property
def functions(self):
return self._functions
@property
def variables(self):
return self._variable_holder.variables
def wrap_function(self, fn, signature, name=None):
"""Wraps a TF 1.X function and returns an eager-compatible function.
All functions wrapped in the same `WrappedGraph` will have access to the
same graph (`tf.compat.v1.get_default_graph` to get the graph object
within a function, or `WrappedGraph.graph` to get the graph outside a
function). Variables created within the function will be added to the
`variables` list.
Function inputs: All inputs to the function must be tensors (nested ok),
with their shapes and dtypes defined in the `signature` argument.
Function outputs:
* The 1.X function may return tensors, variables, and ops. The wrapped
eager-compatible function will always return tensors in the same nested
structure.
* Variables are replaced with a tensor containing the latest read values.
* Returned ops are executed, and replaced with None.
* The order of op execution and variable reads in the return is
nondeterministic. For example:
```
def update_var(x):
v = tf.Variable(0)
op = tf.compat.v1.assign(v, x).op
return v, op
g = WrappedGraph()
fn = g.wrap_function(update_var)
read_value, _ = fn(tf.constant(3))
print(read_value.numpy()) # could be 0 or 3
print(g.variables[0].numpy()) # always 3
```
To ensure that ops in the function are executed (e.g. ops added to the
`tf.GraphKeys.UPDATE_OPS` collection), include them in the function returns.
Args:
fn: a 1.X tensorflow function.
signature: a possibly nested sequence of `TensorSpecs` specifying the
shapes and dtypes of the arguments.
name: an optional string name for the function. The function will be saved
with key `name` in the `functions` dictionary.
Returns:
An eager-compatible function.
"""
return self._wrap_function(fn, signature=signature, name=name)
def _wrap_function(self,
fn,
args=None,
kwargs=None,
signature=None,
name=None):
"""Internal wrap function method with extended func_graph arguments."""
fn_with_filter_and_scope, returned_ops = _filter_returned_ops(
self._variable_holder.call_with_variable_creator_scope(fn))
func_graph.func_graph_from_py_func(
None, # Name is unused.
fn_with_filter_and_scope,
args=args,
kwargs=kwargs,
signature=signature,
add_control_dependencies=False,
func_graph=self.graph)
# This code relies on questional behavior from `func_graph_from_py_func`.
# If an existing FuncGraph is passed into the `func_graph` arg, the inputs
# and structured outputs are overwritten. Pretty sure this is a bug,
# because structured outputs doesn't match up with the outputs...
fn_inputs = self.graph.inputs[:-len(self.graph.captures)]
# Return filtered ops to the flattened outputs.
flat_fn_outputs = nest.flatten(self.graph.structured_outputs)
for index, op in returned_ops.items():
flat_fn_outputs[index] = op
fn_outputs = nest.pack_sequence_as(self.graph.structured_outputs,
flat_fn_outputs)
name = name or fn.__name__
wrapped_function = self._wrapped_function.prune(
fn_inputs, fn_outputs, name, self.graph.structured_input_signature)
self._functions[name] = wrapped_function
return wrapped_function
@tf_export(v1=["wrap_function"])
def wrap_function(fn, signature, name=None):
"""Wraps the TF 1.x function fn into a graph function.
The python function `fn` will be called once with symbolic arguments specified
in the `signature`, traced, and turned into a graph function. Any variables
created by `fn` will be owned by the object returned by `wrap_function`. The
resulting graph function can be called with tensors which match the
signature.
```python
def f(x, do_add):
v = tf.Variable(5.0)
if do_add:
op = v.assign_add(x)
else:
op = v.assign_sub(x)
with tf.control_dependencies([op]):
return v.read_value()
f_add = tf.compat.v1.wrap_function(f, [tf.TensorSpec((), tf.float32), True])
assert float(f_add(1.0)) == 6.0
assert float(f_add(1.0)) == 7.0
# Can call tf.compat.v1.wrap_function again to get a new trace, a new set
# of variables, and possibly different non-template arguments.
f_sub= tf.compat.v1.wrap_function(f, [tf.TensorSpec((), tf.float32), False])
assert float(f_sub(1.0)) == 4.0
assert float(f_sub(1.0)) == 3.0
```
Both `tf.compat.v1.wrap_function` and `tf.function` create a callable
TensorFlow graph. But while `tf.function` runs all stateful operations
(e.g. `tf.print`) and sequences operations to provide the same semantics as
eager execution, `wrap_function` is closer to the behavior of `session.run` in
TensorFlow 1.x. It will not run any operations unless they are required to
compute the function's outputs, either through a data dependency or a control
dependency. Nor will it sequence operations.
Unlike `tf.function`, `wrap_function` will only trace the Python function
once. As with placeholders in TF 1.x, shapes and dtypes must be provided to
`wrap_function`'s `signature` argument.
Since it is only traced once, variables and state may be created inside the
function and owned by the function wrapper object.
Args:
fn: python function to be wrapped
signature: the placeholder and python arguments to be passed to the wrapped
function
name: Optional. The name of the function.
Returns:
the wrapped graph function.
"""
holder = VariableHolder(fn)
func_graph_name = "wrapped_function"
if name is not None:
func_graph_name = "wrapped_function_" + name
return WrappedFunction(
func_graph.func_graph_from_py_func(
func_graph_name,
holder,
args=None,
kwargs=None,
signature=signature,
add_control_dependencies=False,
collections={}),
variable_holder=holder,
signature=signature)
def function_from_graph_def(graph_def, inputs, outputs):
"""Creates a ConcreteFunction from a GraphDef.
Args:
graph_def: A GraphDef to make a function out of.
inputs: A Tensor name or nested structure of names in `graph_def` which
should be inputs to the function.
outputs: A Tensor name or nested structure of names in `graph_def` which
should be outputs of the function.
Returns:
A ConcreteFunction.
"""
def _imports_graph_def():
importer.import_graph_def(graph_def, name="")
wrapped_import = wrap_function(_imports_graph_def, [])
import_graph = wrapped_import.graph
return wrapped_import.prune(
nest.map_structure(import_graph.as_graph_element, inputs),
nest.map_structure(import_graph.as_graph_element, outputs))
| [
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.variable_scope.variable_creator_scope",
"tensorflow.python.framework.ops.name_from_scope_name",
"tensorflow.python.eager.context.context",
"tensorflow.python.framework.ops.get_collection_ref",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.core.protobuf.struct_pb2.StructuredValue",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.framework.func_graph.func_graph_from_py_func",
"tensorflow.python.framework.importer.import_graph_def",
"tensorflow.python.framework.func_graph.FuncGraph",
"tensorflow.python.framework.tensor_util.is_tensor",
"tensorflow.python.saved_model.nested_structure_coder.StructureCoder",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.framework.ops.add_to_collections",
"tensorflow.python.training.tracking.data_structures.Mapping",
"tensorflow.python.ops.resource_variable_ops.UninitializedVariable",
"tensorflow.python.eager.lift_to_graph.lift_to_graph",
"tensorflow.python.ops.resource_variable_ops.is_resource_variable",
"tensorflow.python.util.nest.flatten"
] | tensorflow/python/eager/wrap_function.py | [(540, 'tensorflow.python.util.tf_export.tf_export', 'tf_export', ([], {'v1': "['wrap_function']"}), False, 'from tensorflow.python.util.tf_export import tf_export\n'), (123, 'tensorflow.python.ops.resource_variable_ops.UninitializedVariable', 'resource_variable_ops.UninitializedVariable', ([], {'shape': 'old_variable.shape', 'dtype': 'old_variable.dtype', 'name': 'old_variable.op.name', 'trainable': 'old_variable.trainable', 'extra_handle_data': 'old_variable.handle'}), False, 'from tensorflow.python.ops import resource_variable_ops\n'), (52, 'tensorflow.python.training.tracking.data_structures.Mapping', 'data_structures.Mapping', ([], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (80, 'tensorflow.python.framework.ops.add_to_collections', 'ops.add_to_collections', (['collections', 'v'], {}), False, 'from tensorflow.python.framework import ops\n'), (138, 'weakref.ref', 'weakref.ref', (['new_variable'], {}), False, 'import weakref\n'), (160, 'tensorflow.python.framework.ops.get_collection', 'ops.get_collection', (['ops.GraphKeys.GLOBAL_VARIABLES'], {}), False, 'from tensorflow.python.framework import ops\n'), (162, 'tensorflow.python.framework.ops.get_collection', 'ops.get_collection', (['ops.GraphKeys.LOCAL_VARIABLES'], {}), False, 'from tensorflow.python.framework import ops\n'), (257, 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['feeds'], {'expand_composites': '(True)'}), False, 'from tensorflow.python.util import nest\n'), (304, 'tensorflow.python.util.nest.map_structure', 'nest.map_structure', (['_fetch_preprocesing_callback', 'fetches'], {}), False, 'from tensorflow.python.util import nest\n'), (307, 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['tensor_fetches'], {'expand_composites': '(True)'}), False, 'from tensorflow.python.util import nest\n'), (316, 'tensorflow.python.eager.lift_to_graph.lift_to_graph', 'lift_to_graph.lift_to_graph', (['(operation_fetches + tensor_fetches)', 'pruned_graph'], {'sources': '(flat_feeds + self.graph.internal_captures)'}), False, 'from tensorflow.python.eager import lift_to_graph\n'), (352, 'tensorflow.python.util.nest.map_structure', 'nest.map_structure', (['_structured_output_mapping', 'fetches'], {'expand_composites': '(True)'}), False, 'from tensorflow.python.util import nest\n'), (379, 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['outputs'], {}), False, 'from tensorflow.python.util import nest\n'), (385, 'tensorflow.python.util.nest.pack_sequence_as', 'nest.pack_sequence_as', (['outputs', 'flat_outputs'], {}), False, 'from tensorflow.python.util import nest\n'), (438, 'tensorflow.python.framework.func_graph.FuncGraph', 'func_graph.FuncGraph', (['name'], {'collections': 'collections'}), False, 'from tensorflow.python.framework import func_graph\n'), (511, 'tensorflow.python.framework.func_graph.func_graph_from_py_func', 'func_graph.func_graph_from_py_func', (['None', 'fn_with_filter_and_scope'], {'args': 'args', 'kwargs': 'kwargs', 'signature': 'signature', 'add_control_dependencies': '(False)', 'func_graph': 'self.graph'}), False, 'from tensorflow.python.framework import func_graph\n'), (527, 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['self.graph.structured_outputs'], {}), False, 'from tensorflow.python.util import nest\n'), (530, 'tensorflow.python.util.nest.pack_sequence_as', 'nest.pack_sequence_as', (['self.graph.structured_outputs', 'flat_fn_outputs'], {}), False, 'from tensorflow.python.util import nest\n'), (602, 'tensorflow.python.framework.func_graph.func_graph_from_py_func', 'func_graph.func_graph_from_py_func', (['func_graph_name', 'holder'], {'args': 'None', 'kwargs': 'None', 'signature': 'signature', 'add_control_dependencies': '(False)', 'collections': '{}'}), False, 'from tensorflow.python.framework import func_graph\n'), (629, 'tensorflow.python.framework.importer.import_graph_def', 'importer.import_graph_def', (['graph_def'], {'name': '""""""'}), False, 'from tensorflow.python.framework import importer\n'), (634, 'tensorflow.python.util.nest.map_structure', 'nest.map_structure', (['import_graph.as_graph_element', 'inputs'], {}), False, 'from tensorflow.python.util import nest\n'), (635, 'tensorflow.python.util.nest.map_structure', 'nest.map_structure', (['import_graph.as_graph_element', 'outputs'], {}), False, 'from tensorflow.python.util import nest\n'), (65, 'tensorflow.python.framework.ops.name_from_scope_name', 'ops.name_from_scope_name', (['name'], {}), False, 'from tensorflow.python.framework import ops\n'), (202, 'tensorflow.python.framework.ops.get_collection_ref', 'ops.get_collection_ref', (['collection_name'], {}), False, 'from tensorflow.python.framework import ops\n'), (315, 'tensorflow.python.framework.func_graph.FuncGraph', 'func_graph.FuncGraph', (['name'], {}), False, 'from tensorflow.python.framework import func_graph\n'), (90, 'tensorflow.python.ops.variable_scope.variable_creator_scope', 'variable_scope.variable_creator_scope', (['self.variable_creator_scope'], {}), False, 'from tensorflow.python.ops import variable_scope\n'), (110, 'tensorflow.python.saved_model.nested_structure_coder.StructureCoder', 'nested_structure_coder.StructureCoder', ([], {}), False, 'from tensorflow.python.saved_model import nested_structure_coder\n'), (111, 'tensorflow.core.protobuf.struct_pb2.StructuredValue', 'struct_pb2.StructuredValue', ([], {'type_spec_value': 'tensor_info.composite_tensor.type_spec'}), False, 'from tensorflow.core.protobuf import struct_pb2\n'), (333, 'tensorflow.python.framework.tensor_util.is_tensor', 'tensor_util.is_tensor', (['t'], {}), False, 'from tensorflow.python.framework import tensor_util\n'), (205, 'tensorflow.python.ops.resource_variable_ops.is_resource_variable', 'resource_variable_ops.is_resource_variable', (['mutable_collection[index]'], {}), False, 'from tensorflow.python.ops import resource_variable_ops\n'), (227, 'tensorflow.python.eager.context.context', 'context.context', ([], {}), False, 'from tensorflow.python.eager import context\n'), (291, 'tensorflow.python.framework.tensor_util.is_tensor', 'tensor_util.is_tensor', (['decoded'], {}), False, 'from tensorflow.python.framework import tensor_util\n'), (334, 'tensorflow.python.framework.tensor_shape.TensorShape', 'tensor_shape.TensorShape', (['ti.tensor_shape'], {}), False, 'from tensorflow.python.framework import tensor_shape\n')] |
lenna-project/birds-plugin | c548790dcb0593b80ea6da4605e7aa32e3f141ae | import logging
import numpy as np
import os
import PIL
import PIL.Image
import tensorflow as tf
from tensorflow.keras.layers import Layer, Conv2D, MaxPool2D, Dense, Flatten, Dropout, GlobalAveragePooling2D
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras import layers
from tensorflow.keras import Model
img_height = 224
img_width = 224
batch_size = 64
data_dir = './100-bird-species/'
data_dir_train = os.path.join(data_dir, 'train')
data_dir_valid = os.path.join(data_dir, 'valid')
data_dir_test = os.path.join(data_dir, 'test')
train_ds = tf.keras.utils.image_dataset_from_directory(
data_dir_train,
label_mode='categorical',
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
valid_ds = tf.keras.utils.image_dataset_from_directory(
data_dir_valid,
label_mode='categorical',
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
test_ds = tf.keras.utils.image_dataset_from_directory(
data_dir_test,
label_mode='categorical',
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
def normalize(img, label):
return img / 255.0, label
data_augmentation = tf.keras.Sequential([
tf.keras.layers.RandomFlip("horizontal"),
tf.keras.layers.RandomRotation(0.2),
tf.keras.layers.RandomZoom(0.2)
])
train_dataset = (train_ds
.map(normalize)
.map(lambda x, y: (data_augmentation(x), y))
.prefetch(tf.data.AUTOTUNE))
valid_dataset = valid_ds.map(normalize)
test_dataset = test_ds.map(normalize)
def get_birds_mobilenet():
pre_trained_model = MobileNetV2(
include_top=False,
input_shape=(img_height, img_width, 3),
classifier_activation='softmax'
)
for layer in pre_trained_model.layers:
layer.trainable = False
last_layer = pre_trained_model.output
last_layer.trainable = True
x = GlobalAveragePooling2D()(last_layer)
x = Dense(1024, activation='relu')(x)
x = layers.Dense(325, activation='softmax')(x)
model = Model(pre_trained_model.input, x)
return model
model = get_birds_mobilenet()
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
checkpoint_path = "./checkpoints/birds_mobilenet/"
model.load_weights(checkpoint_path)
model_history = model.fit(
train_dataset,
validation_data=valid_dataset,
epochs=200,
callbacks=[
#tf.keras.callbacks.EarlyStopping(patience=5),
tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_path, verbose=0, save_freq="epoch")
])
| [
"tensorflow.keras.utils.image_dataset_from_directory",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.layers.RandomFlip",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Model",
"tensorflow.keras.layers.RandomZoom",
"tensorflow.keras.layers.RandomRotation",
"tensorflow.keras.applications.MobileNetV2"
] | scripts/train.py | [(18, 'os.path.join', 'os.path.join', (['data_dir', '"""train"""'], {}), False, 'import os\n'), (19, 'os.path.join', 'os.path.join', (['data_dir', '"""valid"""'], {}), False, 'import os\n'), (20, 'os.path.join', 'os.path.join', (['data_dir', '"""test"""'], {}), False, 'import os\n'), (22, 'tensorflow.keras.utils.image_dataset_from_directory', 'tf.keras.utils.image_dataset_from_directory', (['data_dir_train'], {'label_mode': '"""categorical"""', 'seed': '(123)', 'image_size': '(img_height, img_width)', 'batch_size': 'batch_size'}), True, 'import tensorflow as tf\n'), (29, 'tensorflow.keras.utils.image_dataset_from_directory', 'tf.keras.utils.image_dataset_from_directory', (['data_dir_valid'], {'label_mode': '"""categorical"""', 'seed': '(123)', 'image_size': '(img_height, img_width)', 'batch_size': 'batch_size'}), True, 'import tensorflow as tf\n'), (36, 'tensorflow.keras.utils.image_dataset_from_directory', 'tf.keras.utils.image_dataset_from_directory', (['data_dir_test'], {'label_mode': '"""categorical"""', 'seed': '(123)', 'image_size': '(img_height, img_width)', 'batch_size': 'batch_size'}), True, 'import tensorflow as tf\n'), (64, 'tensorflow.keras.applications.MobileNetV2', 'MobileNetV2', ([], {'include_top': '(False)', 'input_shape': '(img_height, img_width, 3)', 'classifier_activation': '"""softmax"""'}), False, 'from tensorflow.keras.applications import MobileNetV2\n'), (80, 'tensorflow.keras.Model', 'Model', (['pre_trained_model.input', 'x'], {}), False, 'from tensorflow.keras import Model\n'), (49, 'tensorflow.keras.layers.RandomFlip', 'tf.keras.layers.RandomFlip', (['"""horizontal"""'], {}), True, 'import tensorflow as tf\n'), (50, 'tensorflow.keras.layers.RandomRotation', 'tf.keras.layers.RandomRotation', (['(0.2)'], {}), True, 'import tensorflow as tf\n'), (51, 'tensorflow.keras.layers.RandomZoom', 'tf.keras.layers.RandomZoom', (['(0.2)'], {}), True, 'import tensorflow as tf\n'), (76, 'tensorflow.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), False, 'from tensorflow.keras.layers import Layer, Conv2D, MaxPool2D, Dense, Flatten, Dropout, GlobalAveragePooling2D\n'), (77, 'tensorflow.keras.layers.Dense', 'Dense', (['(1024)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import Layer, Conv2D, MaxPool2D, Dense, Flatten, Dropout, GlobalAveragePooling2D\n'), (78, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(325)'], {'activation': '"""softmax"""'}), False, 'from tensorflow.keras import layers\n'), (99, 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', ([], {'filepath': 'checkpoint_path', 'verbose': '(0)', 'save_freq': '"""epoch"""'}), True, 'import tensorflow as tf\n')] |
End of preview. Expand
in Dataset Viewer.
README.md exists but content is empty.
Use the Edit dataset card button to edit it.
- Downloads last month
- 39