repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/tools/merge_qrels.py | from utils import load_from_trec
import argparse
import torch
import csv
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--scores_path", type=str)
parser.add_argument("--qrels_path", type=str)
parser.add_argument("--save_path", type=str)
parser.add_argument("run")
args = parser.parse_args()
scores = torch.load(args.scores_path)
print(scores.size())
run = load_from_trec(args.run, as_list=True)
g = open(args.save_path, "w")
qrels = {}
with open(args.qrels_path, encoding="utf8") as f:
tsvreader = csv.reader(f, delimiter="\t")
for [qid, _, docid, rel] in tsvreader:
assert rel == "1"
if qid in qrels:
qrels[qid].append(docid)
else:
qrels[qid] = [docid]
id = 0
sum, overlap = 0, 0
for qid, rank_list in run.items():
docids = []
for doc_rank, (docid, _) in enumerate(rank_list):
docids.append(docid)
if len(docids) == 10:
break
sort_scores, sort_index = torch.sort(scores[id], descending=True)
for docid in qrels[qid]:
# pass
g.write(f"{qid}\t0\t{docid}\t1\n")
sum += len(qrels[qid])
for i in sort_index[:2]:
if docids[i] not in qrels[qid]:
# pass
g.write(f"{qid}\t0\t{docids[i]}\t1\n")
else:
overlap += 1
id += 1
if id >= scores.size(0):
break
print(overlap, sum, overlap / sum)
| 1,581 | 28.296296 | 73 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/tools/transform.py | # coding:utf-8
import torch
import argparse
import os
import tqdm
import copy
def transform_new_model(model_hf, layer_num):
model_new = {}
cnt = 0
for i in range(layer_num):
# encoder
target_k = "encoder.blocks.{}.self_attn.self_attn.project.weight".format(i)
source = [
"encoder.block.{}.layer.0.SelfAttention.q.weight".format(i),
"encoder.block.{}.layer.0.SelfAttention.k.weight".format(i),
"encoder.block.{}.layer.0.SelfAttention.v.weight".format(i),
]
# qkv
model_new[target_k] = torch.cat([model_hf[x] for x in source], 0)
cnt += 3
target_k = "encoder.blocks.{}.self_attn.self_attn.dense.weight".format(i)
source = "encoder.block.{}.layer.0.SelfAttention.o.weight".format(i)
model_new[target_k] = model_hf[source] / 100
cnt += 1
target_k = "encoder.blocks.{}.self_attn.layer_norm.weight".format(i)
source = "encoder.block.{}.layer.0.layer_norm.weight".format(i)
model_new[target_k] = model_hf[source]
cnt += 1
target_k = "encoder.blocks.{}.ff.dense_relu_dense.wi_0.weight".format(i)
source = "encoder.block.{}.layer.1.DenseReluDense.wi_0.weight".format(i)
model_new[target_k] = model_hf[source]
cnt += 1
target_k = "encoder.blocks.{}.ff.dense_relu_dense.wi_1.weight".format(i)
source = "encoder.block.{}.layer.1.DenseReluDense.wi_1.weight".format(i)
model_new[target_k] = model_hf[source] / 10
cnt += 1
target_k = "encoder.blocks.{}.ff.dense_relu_dense.wo.weight".format(i)
source = "encoder.block.{}.layer.1.DenseReluDense.wo.weight".format(i)
model_new[target_k] = model_hf[source] / 10
cnt += 1
target_k = "encoder.blocks.{}.ff.layer_norm.weight".format(i)
source = "encoder.block.{}.layer.1.layer_norm.weight".format(i)
model_new[target_k] = model_hf[source]
cnt += 1
# decoder
target_k = "decoder.blocks.{}.self_attn.self_attn.project.weight".format(i)
source = [
"decoder.block.{}.layer.0.SelfAttention.q.weight".format(i),
"decoder.block.{}.layer.0.SelfAttention.k.weight".format(i),
"decoder.block.{}.layer.0.SelfAttention.v.weight".format(i),
]
# qkv
model_new[target_k] = torch.cat([model_hf[x] for x in source], 0)
cnt += 3
target_k = "decoder.blocks.{}.cross_attn.cross_attn.project_kv.weight".format(i)
source = [
"decoder.block.{}.layer.1.EncDecAttention.k.weight".format(i),
"decoder.block.{}.layer.1.EncDecAttention.v.weight".format(i),
]
# kv
model_new[target_k] = torch.cat([model_hf[x] for x in source], 0)
cnt += 2
target_k = "decoder.blocks.{}.cross_attn.cross_attn.project_q.weight".format(i)
source = "decoder.block.{}.layer.1.EncDecAttention.q.weight".format(i)
model_new[target_k] = model_hf[source]
cnt += 1
target_k = "decoder.blocks.{}.cross_attn.cross_attn.dense.weight".format(i)
source = "decoder.block.{}.layer.1.EncDecAttention.o.weight".format(i)
model_new[target_k] = model_hf[source] / 100
cnt += 1
target_k = "decoder.blocks.{}.cross_attn.layer_norm.weight".format(i)
source = "decoder.block.{}.layer.1.layer_norm.weight".format(i)
model_new[target_k] = model_hf[source]
cnt += 1
target_k = "decoder.blocks.{}.self_attn.self_attn.dense.weight".format(i)
source = "decoder.block.{}.layer.0.SelfAttention.o.weight".format(i)
model_new[target_k] = model_hf[source] / 100
cnt += 1
target_k = "decoder.blocks.{}.self_attn.layer_norm.weight".format(i)
source = "decoder.block.{}.layer.0.layer_norm.weight".format(i)
model_new[target_k] = model_hf[source]
cnt += 1
target_k = "decoder.blocks.{}.ff.dense_relu_dense.wi_0.weight".format(i)
source = "decoder.block.{}.layer.2.DenseReluDense.wi_0.weight".format(i)
model_new[target_k] = model_hf[source]
cnt += 1
target_k = "decoder.blocks.{}.ff.dense_relu_dense.wi_1.weight".format(i)
source = "decoder.block.{}.layer.2.DenseReluDense.wi_1.weight".format(i)
model_new[target_k] = model_hf[source] / 10
cnt += 1
target_k = "decoder.blocks.{}.ff.dense_relu_dense.wo.weight".format(i)
source = "decoder.block.{}.layer.2.DenseReluDense.wo.weight".format(i)
model_new[target_k] = model_hf[source] / 10
cnt += 1
target_k = "decoder.blocks.{}.ff.layer_norm.weight".format(i)
source = "decoder.block.{}.layer.2.layer_norm.weight".format(i)
model_new[target_k] = model_hf[source]
cnt += 1
source = "shared.weight"
target_k = "word_embeds.weight"
embeds = model_hf[source]
model_new[target_k] = embeds / 100
target_k = "encoder.word_embeds.weight"
model_new[target_k] = embeds / 100
target_k = "decoder.word_embeds.weight"
model_new[target_k] = embeds / 100
cnt += 3
source = "encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"
target_k = "encoder.blocks.0.self_attn.self_attn.relative_attention_bias.weight"
model_new[target_k] = model_hf[source]
cnt += 1
source = "decoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"
target_k = "decoder.blocks.0.self_attn.self_attn.relative_attention_bias.weight"
model_new[target_k] = model_hf[source]
cnt += 1
source = "lm_head.weight"
target_k = "lm_head.weight"
embeds = model_hf[source]
model_new[target_k] = embeds
cnt += 1
source = "encoder.final_layer_norm.weight"
target_k = "encoder.final_layernorm.weight"
model_new[target_k] = model_hf[source]
cnt += 1
source = "decoder.final_layer_norm.weight"
target_k = "decoder.final_layernorm.weight"
model_new[target_k] = model_hf[source]
cnt += 1
print("new module number:", cnt, "origin module number:", len(model_hf))
return {"module": model_new}
def change_mp(d, output_dir, mp_size, half=False):
os.makedirs(output_dir, exist_ok=True)
os.makedirs(os.path.join(output_dir, "1"), exist_ok=True)
with open(os.path.join(output_dir, "latest_checkpointed_iteration.txt"), "w") as f:
f.write(str(1) + "\n")
preserve_keys = [
"lr_scheduler",
"skipped_steps",
"global_steps",
"global_samples",
"dp_world_size",
"iteration",
]
dd = {}
dd["lr_scheduler"] = {}
dd["lr_scheduler"]["num_iters"] = 1
dd["lr_scheduler"]["start_lr"] = 0.001
dd["lr_scheduler"]["warmup_iter"] = 10000
dd["skipped_steps"] = 0
dd["global_steps"] = 1
dd["global_samples"] = 100
dd["iteration"] = 1
dd["dp_world_size"] = 1
print("Increase MP size.")
ratio = mp_size
start = 0
end = ratio
for j in tqdm.tqdm(range(start, end)):
d_new = {}
shift = j - start
for k, v in dd.items():
if k != "module":
if k in preserve_keys:
d_new[k] = copy.deepcopy(dd[k])
elif k == "mp_world_size":
d_new[k] = ratio
else:
d_new[k] = None
d_new["module"] = {}
for k, v in d["module"].items():
assert len(v.shape) < 3
if len(v.shape) == 2:
if "project.weight" in k:
part = v.shape[0] // ratio // 3
d_new["module"][k] = torch.cat(
[
v[shift * part : (shift + 1) * part, :],
v[(shift + ratio) * part : (shift + 1 + ratio) * part, :],
v[
(shift + 2 * ratio)
* part : (shift + 1 + 2 * ratio)
* part,
:,
],
],
0,
)
elif "project_q.weight" in k:
part = v.shape[0] // ratio
d_new["module"][k] = v[shift * part : (shift + 1) * part, :]
elif "project_kv.weight" in k:
part = v.shape[0] // ratio // 2
d_new["module"][k] = torch.cat(
[
v[shift * part : (shift + 1) * part, :],
v[(shift + ratio) * part : (shift + 1 + ratio) * part, :],
],
0,
)
elif (
"word_embeds.weight" in k
or "dense_relu_dense.wi_1.weight" in k
or "dense_relu_dense.wi_0.weight" in k
or "lm_head.weight" in k
):
part = v.shape[0] // ratio
d_new["module"][k] = v[shift * part : (shift + 1) * part, :]
else:
part = v.shape[1] // ratio
d_new["module"][k] = v[:, shift * part : (shift + 1) * part]
else:
d_new["module"][k] = v
if half:
d_new["module"][k] = d_new["module"][k].half()
filename = os.path.join(
output_dir, "1", "mp_rank_0{}_model_states.pt".format(j)
)
torch.save(d_new, filename)
def main():
parser = argparse.ArgumentParser(
"Transform huggingface checkpoints to megatron+deepspeed checkpoints"
)
parser.add_argument("--hf_path", type=str)
parser.add_argument("--ext_path", type=str, default="")
parser.add_argument("--mp_size", type=int, default=1)
parser.add_argument("--save_path", type=str)
parser.add_argument("--half", action="store_true")
args = parser.parse_args()
model_hf = torch.load(args.hf_path, map_location="cpu")
if args.ext_path:
model_ext = torch.load(args.ext_path, map_location="cpu")
model_hf.update(model_ext)
print(len(model_hf))
new_model = transform_new_model(model_hf, 12 if "base" in args.save_path else 24)
change_mp(new_model, args.save_path, args.mp_size, half=args.half)
if __name__ == "__main__":
main()
| 10,433 | 34.610922 | 88 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/tools/utils.py | # Adapted from Tevatron (https://github.com/texttron/tevatron)
import csv
import json
import warnings
from dataclasses import dataclass
from typing import Dict, List
import datasets
import torch
from transformers import PreTrainedTokenizer
@dataclass
class SimpleTrainPreProcessor:
query_file: str
collection_file: str
tokenizer: PreTrainedTokenizer
doc_max_len: int = 128
query_max_len: int = 32
columns = ['text_id', 'title', 'text']
title_field = 'title'
text_field = 'text'
query_field = 'text'
doc_template: str = None
query_template: str = None
allow_not_found: bool = False
def __post_init__(self):
self.queries = self.read_queries(self.query_file)
self.collection = datasets.load_dataset(
'csv',
data_files=self.collection_file,
column_names=self.columns,
delimiter='\t',
)['train']
@staticmethod
def read_queries(queries):
qmap = {}
with open(queries) as f:
for l in f:
qid, qry = l.strip().split('\t')
qmap[qid] = qry
return qmap
@staticmethod
def read_qrel(relevance_file):
qrel = {}
with open(relevance_file, encoding='utf8') as f:
tsvreader = csv.reader(f, delimiter="\t")
for [topicid, _, docid, rel] in tsvreader:
assert rel == "1"
if topicid in qrel:
qrel[topicid].append(docid)
else:
qrel[topicid] = [docid]
return qrel
def get_query(self, q):
if self.query_template is None:
query = self.queries[q]
else:
query = fill_template(self.query_template, data={self.query_field: self.queries[q]}, allow_not_found=self.allow_not_found)
query_encoded = self.tokenizer.encode(
query,
add_special_tokens=False,
max_length=self.query_max_len,
truncation=True
)
return query_encoded
def get_passage(self, p):
entry = self.collection[int(p)]
title = entry[self.title_field]
title = "" if title is None else title
body = entry[self.text_field]
if self.doc_template is None:
content = title + self.tokenizer.sep_token + body
else:
content = fill_template(self.doc_template, data=entry, allow_not_found=self.allow_not_found)
passage_encoded = self.tokenizer.encode(
content,
add_special_tokens=False,
max_length=self.doc_max_len,
truncation=True
)
return passage_encoded
def process_one(self, train):
q, pp, nn = train
train_example = {
'query': self.get_query(q),
'positives': [self.get_passage(p) for p in pp],
'negatives': [self.get_passage(n) for n in nn],
}
return json.dumps(train_example)
@dataclass
class SimpleCollectionPreProcessor:
tokenizer: PreTrainedTokenizer
separator: str = '\t'
max_length: int = 128
def process_line(self, line: str):
xx = line.strip().split(self.separator)
text_id, text = xx[0], xx[1:]
text_encoded = self.tokenizer.encode(
self.tokenizer.sep_token.join(text),
add_special_tokens=False,
max_length=self.max_length,
truncation=True
)
encoded = {
'text_id': text_id,
'text': text_encoded
}
return json.dumps(encoded)
def save_as_trec(rank_result: Dict[str, Dict[str, float]], output_path: str, run_id: str = "OpenMatch"):
"""
Save the rank result as TREC format:
<query_id> Q0 <doc_id> <rank> <score> <run_id>
"""
with open(output_path, "w") as f:
for qid in rank_result:
# sort the results by score
sorted_results = sorted(rank_result[qid].items(), key=lambda x: x[1], reverse=True)
for i, (doc_id, score) in enumerate(sorted_results):
f.write("{} Q0 {} {} {} {}\n".format(qid, doc_id, i + 1, score, run_id))
def load_from_trec(input_path: str, as_list: bool = False, max_len_per_q: int = None):
"""
Load the rank result from TREC format:
<query_id> Q0 <doc_id> <rank> <score> <run_id> or
<query_id> <doc_id> <score>
"""
rank_result = {}
cnt = 0
with open(input_path, "r") as f:
for line in f:
content = line.strip().split()
if len(content) == 6:
qid, _, doc_id, _, score, _ = content
elif len(content) == 3:
qid, doc_id, score = content
else:
raise ValueError("Invalid run format")
if not as_list:
if qid not in rank_result:
rank_result[qid] = {}
cnt = 0
if max_len_per_q is None or cnt < max_len_per_q:
rank_result[qid][doc_id] = float(score)
else:
if qid not in rank_result:
rank_result[qid] = []
cnt = 0
if max_len_per_q is None or cnt < max_len_per_q:
rank_result[qid].append((doc_id, float(score)))
cnt += 1
return rank_result
def find_all_markers(template: str):
"""
Find all markers' names (quoted in "<>") in a template.
"""
markers = []
start = 0
while True:
start = template.find("<", start)
if start == -1:
break
end = template.find(">", start)
if end == -1:
break
markers.append(template[start + 1:end])
start = end + 1
return markers
def fill_template(template: str, data: Dict, markers: List[str] = None, allow_not_found: bool = False):
"""
Fill a template with data.
"""
if markers is None:
markers = find_all_markers(template)
for marker in markers:
marker_hierarchy = marker.split(".")
found = True
content = data
for marker_level in marker_hierarchy:
content = content.get(marker_level, None)
if content is None:
found = False
break
if not found:
if allow_not_found:
warnings.warn("Marker '{}' not found in data. Replacing it with an empty string.".format(marker), RuntimeWarning)
content = ""
else:
raise ValueError("Cannot find the marker '{}' in the data".format(marker))
template = template.replace("<{}>".format(marker), str(content))
return template
def merge_retrieval_results_by_score(results: List[Dict[str, Dict[str, float]]], topk: int = 100):
"""
Merge retrieval results from multiple partitions of document embeddings and keep topk.
"""
merged_results = {}
for result in results:
for qid in result:
if qid not in merged_results:
merged_results[qid] = {}
for doc_id in result[qid]:
if doc_id not in merged_results[qid]:
merged_results[qid][doc_id] = result[qid][doc_id]
for qid in merged_results:
merged_results[qid] = {k: v for k, v in sorted(merged_results[qid].items(), key=lambda x: x[1], reverse=True)[:topk]}
return merged_results
# Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(token_embeddings, attention_mask):
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
| 7,762 | 31.894068 | 134 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/tools/ds_fix/engine.py | '''
Copyright 2019 The Microsoft DeepSpeed Team
'''
import os
import time
import torch
import warnings
import torch.distributed as dist
from torch.nn.modules import Module
from torch.distributed.distributed_c10d import _get_global_rank
from tensorboardX import SummaryWriter
from deepspeed.runtime.zero.stage2 import FP16_DeepSpeedZeroOptimizer
from deepspeed.runtime.zero.stage1 import FP16_DeepSpeedZeroOptimizer_Stage1
from deepspeed.runtime.zero.utils import is_zero_supported_optimizer
from deepspeed.runtime.activation_checkpointing import checkpointing as activation_checkpointing
from deepspeed.runtime.fp16.fused_optimizer import FP16_Optimizer
from deepspeed.runtime.fp16.unfused_optimizer import FP16_UnfusedOptimizer
from deepspeed.runtime.config import DeepSpeedConfig, DEEPSPEED_OPTIMIZERS, \
ADAM_OPTIMIZER, LAMB_OPTIMIZER, ONEBIT_ADAM_OPTIMIZER, \
TORCH_ADAM_PARAM, ADAM_W_MODE_PARAM
from deepspeed.runtime.dataloader import DeepSpeedDataLoader
from deepspeed.runtime.constants import \
ROUTE_TRAIN, ROUTE_PREDICT, ROUTE_EVAL, \
PLD_THETA, PLD_GAMMA
from deepspeed.runtime.zero.constants import \
ZERO_OPTIMIZATION_OPTIMIZER_STATES, ZERO_OPTIMIZATION_GRADIENTS
from deepspeed.runtime.csr_tensor import CSRTensor
import deepspeed.runtime.lr_schedules as lr_schedules
from deepspeed.utils import logger, log_dist, init_distributed
from deepspeed.utils.timer import ThroughputTimer, SynchronizedWallClockTimer
from deepspeed.runtime.progressive_layer_drop import ProgressiveLayerDrop
from .pipe.module import PipelineModule
from .utils import ensure_directory_exists
from ..ops.op_builder import UtilsBuilder
from ..ops.adam import DeepSpeedCPUAdam
from ..ops.adam import FusedAdam
MEMORY_OPT_ALLREDUCE_SIZE = 500000000
try:
from apex import amp
except ImportError:
# Fail silently so we don't spam logs unnecessarily if user isn't using amp
pass
def split_half_float_double_csr(tensors):
dtypes = [
"torch.cuda.HalfTensor",
"torch.cuda.FloatTensor",
"torch.cuda.DoubleTensor",
CSRTensor.type()
]
buckets = []
for i, dtype in enumerate(dtypes):
bucket = [t for t in tensors if t.type() == dtype]
if bucket:
buckets.append((dtype, bucket))
return buckets
def _initialize_parameter_parallel_groups(parameter_parallel_size=None):
data_parallel_size = int(dist.get_world_size())
if parameter_parallel_size is None:
parameter_parallel_size = int(data_parallel_size)
logger.info("data_parallel_size: %s, parameter_parallel_size: %s",
data_parallel_size,
parameter_parallel_size)
assert data_parallel_size % parameter_parallel_size == 0, \
'world size should be divisible by parameter parallel size'
rank = dist.get_rank()
my_group = None
for i in range(dist.get_world_size() // parameter_parallel_size):
ranks = range(i * parameter_parallel_size, (i + 1) * parameter_parallel_size)
group = torch.distributed.new_group(ranks)
if rank in ranks:
my_group = group
return my_group
def print_configuration(args, name):
logger.info('{}:'.format(name))
for arg in sorted(vars(args)):
dots = '.' * (29 - len(arg))
logger.info(' {} {} {}'.format(arg, dots, getattr(args, arg)))
class DeepSpeedEngine(Module):
r"""DeepSpeed engine for training.
"""
def __init__(self,
args,
model,
optimizer=None,
model_parameters=None,
training_data=None,
lr_scheduler=None,
mpu=None,
dist_init_required=None,
collate_fn=None,
config_params=None):
super(DeepSpeedEngine, self).__init__()
self.client_optimizer = optimizer
self.client_model_parameters = model_parameters
self.client_lr_scheduler = lr_scheduler
self.training_data = training_data
self.collate_fn = collate_fn
self.mpu = mpu
self.data_parallel_group = None
self.global_steps = 0
self.global_samples = 0
self.micro_steps = 0
self.skipped_steps = 0
self.gradient_average = True
self.warn_unscaled_loss = True
self.config_params = config_params
self.loaded_checkpoint_mp_world_size = None
self.loaded_checkpoint_dp_world_size = None
self.enable_backward_allreduce = True
self.progressive_layer_drop = None
self.dist_backend = "nccl"
if dist_init_required is None:
dist_init_required = not dist.is_initialized()
if dist_init_required is False:
assert (dist.is_initialized()==True), "Torch distributed not initialized. Please set dist_init_required to True or initialize before calling deepspeed.initialize()"
# Initialize torch distributed if needed
init_distributed(dist_backend=self.dist_backend)
self._do_args_sanity_check(args)
self._configure_with_arguments(args, mpu)
self._do_sanity_check()
if mpu is not None:
assert not self.elasticity_enabled(), "Elasticity is not currently supported" \
" with model parallelism."
self._set_distributed_vars()
if self.tensorboard_enabled() and self.global_rank == 0:
self.summary_writer = self.get_summary_writer()
# Configure distributed model
self._configure_distributed_model(model)
# Configure wall clock timer
self.timers = SynchronizedWallClockTimer()
# Throughput timer
self.tput_timer = ThroughputTimer(
batch_size=self.train_micro_batch_size_per_gpu(),
num_workers=self.dp_world_size,
steps_per_output=self.steps_per_print(),
monitor_memory=False)
if training_data:
self.training_dataloader = self.deepspeed_io(training_data)
else:
self.training_dataloader = None
# Configure optimizer and scheduler
self.optimizer = None
self.lr_scheduler = None
if model_parameters or optimizer:
self._configure_optimizer(optimizer, model_parameters)
self._configure_lr_scheduler(lr_scheduler)
self._report_progress(0)
# Bookkeeping for csr support
self.csr_tensor_module_names = set()
if self.sparse_gradients_enabled():
for name, module in self.module.named_modules():
if isinstance(module, torch.nn.Embedding):
self.csr_tensor_module_names.add(name + ".weight")
logger.info("Will convert {} to sparse (csr) "
"tensor during training".format(name))
self.save_non_zero_checkpoint = False
self.save_zero_checkpoint = False
self._configure_checkpointing(dist_init_required)
if self.pld_enabled():
self.progressive_layer_drop = self._configure_progressive_layer_drop()
if self.global_rank == 0:
self._config.print('DeepSpeedEngine configuration')
if self.dump_state():
print_configuration(self, 'DeepSpeedEngine')
# Load pre-installed or JIT compile (un)flatten ops
util_ops = UtilsBuilder().load()
self.flatten = util_ops.flatten
self.unflatten = util_ops.unflatten
def get_batch_info(self):
""" Get all training batch related settings.
Returns:
train_batch_size (int): The effective training batch size. This is the amount of data
samples that leads to one step of model update.
train_micro_batch_size_per_gpu (int): Batch size to be processed by one GPU in one
step (without gradient accumulation).
gradient_accumulation_steps (int): Number of training steps to accumulate gradients
before averaging and applying them.
"""
return self.train_batch_size, self.train_micro_batch_size_per_gpu, self.gradient_accumulation_steps
def elasticity_enabled(self):
return self._config.elasticity_enabled
def pld_enabled(self):
return self._config.pld_enabled
def pld_params(self):
return self._config.pld_params
def pld_theta(self):
return self.pld_params()[PLD_THETA]
def pld_gamma(self):
return self.pld_params()[PLD_GAMMA]
def tensorboard_enabled(self):
return self._config.tensorboard_enabled
def tensorboard_output_path(self):
return self._config.tensorboard_output_path
def tensorboard_job_name(self):
return self._config.tensorboard_job_name
def get_summary_writer(self,
name="DeepSpeedJobName",
base=os.path.join(os.environ["HOME"],
"tensorboard")):
if self.tensorboard_output_path():
base_dir = self.tensorboard_output_path()
job_name = self.tensorboard_job_name()
log_dir = os.path.join(base_dir, job_name)
else:
if self.tensorboard_job_name():
name = self.tensorboard_job_name()
# Infrastructure-specific job-id
if 'DLWS_JOB_ID' in os.environ:
infra_job_id = os.environ['DLWS_JOB_ID']
elif 'DLTS_JOB_ID' in os.environ:
infra_job_id = os.environ['DLTS_JOB_ID']
else:
infra_job_id = 'unknown-job-id'
summary_writer_dir_name = os.path.join(infra_job_id, "logs")
log_dir = os.path.join(base, summary_writer_dir_name, name)
os.makedirs(log_dir, exist_ok=True)
return SummaryWriter(log_dir=log_dir)
def wall_clock_breakdown(self):
return self._config.wall_clock_breakdown
def memory_breakdown(self):
return self._config.memory_breakdown
def sparse_gradients_enabled(self):
return self._config.sparse_gradients_enabled
def train_batch_size(self):
return self._config.train_batch_size
def train_micro_batch_size_per_gpu(self):
return self._config.train_micro_batch_size_per_gpu
def optimizer_name(self):
return self.client_optimizer.__class__.__name__ if self.client_optimizer else self._config.optimizer_name
def optimizer_params(self):
return self._config.optimizer_params
def optimizer_legacy_fusion(self):
return self._config.optimizer_legacy_fusion
def scheduler_name(self):
return self._config.scheduler_name
def scheduler_params(self):
return self._config.scheduler_params
def zero_optimization(self):
return self._config.zero_enabled
def zero_allow_untested_optimizer(self):
return self._config.zero_allow_untested_optimizer
def zero_reduce_scatter(self):
return self._config.zero_config.reduce_scatter
def zero_overlap_comm(self):
return self._config.zero_config.overlap_comm
def zero_cpu_offload(self):
return self._config.zero_config.cpu_offload
def zero_optimization_stage(self):
return self._config.zero_optimization_stage
def zero_reduce_bucket_size(self):
return self._config.zero_config.reduce_bucket_size
def zero_allgather_bucket_size(self):
return self._config.zero_config.allgather_bucket_size
def zero_optimization_partition_gradients(self):
return self.zero_optimization_stage() >= ZERO_OPTIMIZATION_GRADIENTS
def zero_contiguous_gradients(self):
return self._config.zero_config.contiguous_gradients
def zero_load_from_fp32_weights(self):
return self._config.zero_config.load_from_fp32_weights
def zero_elastic_checkpoint(self):
return self._config.zero_config.elastic_checkpoint
def fp16_enabled(self):
return self._config.fp16_enabled
def amp_enabled(self):
return self._config.amp_enabled
def amp_params(self):
return self._config.amp_params
def loss_scale(self):
return self._config.loss_scale
def gradient_accumulation_steps(self):
return self._config.gradient_accumulation_steps
def allreduce_always_fp32(self):
return self._config.allreduce_always_fp32
def postscale_gradients(self):
return not self._config.prescale_gradients
def gradient_predivide_factor(self):
return self._config.gradient_predivide_factor
def steps_per_print(self):
return self._config.steps_per_print
def zero_allgather_partitions(self):
return self._config.zero_config.allgather_partitions
def dump_state(self):
return self._config.dump_state
def gradient_clipping(self):
return self._config.gradient_clipping
def dynamic_loss_scale(self):
return self._config.loss_scale == 0
def initial_dynamic_scale(self):
return self._config.initial_dynamic_scale
def dynamic_loss_scale_args(self):
return self._config.dynamic_loss_scale_args
def _configure_lr_scheduler(self, client_lr_scheduler):
# First check for scheduler in json configuration
lr_scheduler = self._scheduler_from_config(self.optimizer)
if lr_scheduler:
if self.global_rank == 0:
logger.info(
f'DeepSpeed using configured LR scheduler = {self.scheduler_name()}')
self.lr_scheduler = lr_scheduler
else:
if self.global_rank == 0:
logger.info('DeepSpeed using client LR scheduler')
self.lr_scheduler = client_lr_scheduler
log_dist(f'DeepSpeed LR Scheduler = {self.lr_scheduler}', ranks=[0])
def _configure_checkpointing(self, dist_init_required):
dp_rank = self.global_rank
if self.mpu:
dp_rank = self.mpu.get_data_parallel_rank()
# only the first data parallel process needs to store the model checkpoint
self.save_non_zero_checkpoint = (dp_rank == 0)
if self.zero_optimization() and self.optimizer is not None:
param_rank = torch.distributed.get_rank(
group=self.optimizer.dp_process_group)
# Only the first parameter parallel process needs to store the
# optimizer state checkpoints for zero
self.save_zero_checkpoint = (param_rank == dp_rank)
def _scheduler_from_config(self, optimizer):
scheduler_name = self.scheduler_name()
if scheduler_name is not None:
if hasattr(lr_schedules, scheduler_name):
scheduler = getattr(lr_schedules, scheduler_name)
else:
assert hasattr(torch.optim.lr_scheduler, scheduler_name), \
f"DeepSpeed does not recognize LR scheduler {scheduler_name}"
scheduler = getattr(torch.optim.lr_scheduler, scheduler_name)
scheduler_params = self.scheduler_params()
instantiated_scheduler = scheduler(optimizer, **scheduler_params)
return instantiated_scheduler
else:
return None
def _set_distributed_vars(self):
if self.local_rank >= 0:
torch.cuda.set_device(self.local_rank)
self.device = torch.device("cuda", self.local_rank)
self.world_size = dist.get_world_size()
self.global_rank = dist.get_rank()
else:
self.world_size = 1
self.global_rank = 0
self.device = torch.device("cuda")
# Configure based on command line arguments
def _configure_with_arguments(self, args, mpu):
self.local_rank = args.local_rank if hasattr(args, 'local_rank') else 0
config_file = args.deepspeed_config if hasattr(args,
'deepspeed_config') else None
self._config = DeepSpeedConfig(config_file, mpu, param_dict=self.config_params)
# Validate command line arguments
def _do_args_sanity_check(self, args):
if hasattr(args, 'deepscale_config') and args.deepscale_config is not None:
logger.warning(
"************ --deepscale_config is deprecated, please use --deepspeed_config ************"
)
if hasattr(args, 'deepspeed_config'):
assert args.deepspeed_config is None, "Not sure how to proceed, we were given both a deepscale_config and deepspeed_config"
args.deepspeed_config = args.deepscale_config
assert hasattr(args, 'local_rank') and type(args.local_rank) == int, \
'DeepSpeed requires integer command line parameter --local_rank'
if self.config_params is None:
assert hasattr(args, 'deepspeed_config') and args.deepspeed_config is not None, \
'DeepSpeed requires --deepspeed_config to specify configuration file'
assert os.path.isfile(args.deepspeed_config), \
'DeepSpeed configuration file: {} is not an existing file'.format(args.deepspeed_config)
def _is_supported_optimizer(self, optimizer_name):
return optimizer_name in DEEPSPEED_OPTIMIZERS or \
getattr(torch.optim, optimizer_name, None) is not None
# Validate configuration based on command line arguments
def _do_sanity_check(self):
if not self.client_optimizer:
if self.optimizer_name() is not None:
assert self._is_supported_optimizer(self.optimizer_name()), \
'{} is not a supported DeepSpeed Optimizer'.format(self.optimizer_name())
if self.optimizer_name() == LAMB_OPTIMIZER:
assert self.dynamic_loss_scale(), \
'DeepSpeed {} optimizer requires dynamic loss scaling'.format(self.optimizer_name())
def _broadcast_model(self):
for p in self.module.parameters():
if torch.is_tensor(p):
dist.broadcast(p,
self.broadcast_src_rank,
group=self.data_parallel_group)
def _configure_distributed_model(self, model):
self.module = model
if self.fp16_enabled():
self.module.half()
self.module.to(self.device)
if self.mpu is None:
self.data_parallel_group = _initialize_parameter_parallel_groups()
self.dp_world_size = dist.get_world_size()
self.mp_world_size = 1
self.broadcast_src_rank = 0
else:
self.data_parallel_group = self.mpu.get_data_parallel_group()
self.dp_world_size = self.mpu.get_data_parallel_world_size()
self.mp_world_size = self.mpu.get_model_parallel_world_size()
self.broadcast_src_rank = _get_global_rank(
self.mpu.get_data_parallel_group(),
0)
if not self.amp_enabled():
self._broadcast_model()
# Configure optimizer
def _configure_optimizer(self, client_optimizer, model_parameters):
if client_optimizer is not None:
basic_optimizer = client_optimizer
if self.global_rank == 0:
logger.info('Using client Optimizer as basic optimizer')
else:
basic_optimizer = self._configure_basic_optimizer(model_parameters)
if self.global_rank == 0:
logger.info(
'Using DeepSpeed Optimizer param name {} as basic optimizer'.format(
self.optimizer_name()))
if self.global_rank == 0:
logger.info('DeepSpeed Basic Optimizer = {}'.format(basic_optimizer))
if self.zero_optimization():
assert not self.amp_enabled(), "Amp and ZeRO are not currently compatible, please use (legacy) fp16 mode which performs similar to amp opt_mode=O2"
if not is_zero_supported_optimizer(basic_optimizer):
assert self.zero_allow_untested_optimizer(), \
'You are using an untested ZeRO Optimizer. Please add <"zero_allow_untested_optimizer": true> in the configuration file to use it.'
if self.global_rank == 0:
logger.warning(
"**** You are using ZeRO with an untested optimizer, proceed with caution *****"
)
self.optimizer = self._configure_zero_optimizer(basic_optimizer)
elif self.amp_enabled():
assert not self.fp16_enabled(), "Cannot enable both amp with (legacy) fp16 mode"
amp_params = self.amp_params()
if self.global_rank == 0:
logger.info(f"Initializing AMP with these params: {amp_params}")
try:
logger.info("Initializing Apex amp from: {}".format(amp.__path__))
except NameError:
# If apex/amp is available it will be imported above
raise RuntimeError(
"Unable to import apex/amp, please make sure it is installed")
self.module, self.optimizer = amp.initialize(self.module, basic_optimizer, **amp_params)
self._broadcast_model()
elif self.fp16_enabled():
self.optimizer = self._configure_fp16_optimizer(basic_optimizer)
else:
self.optimizer = basic_optimizer
logger.info('DeepSpeed Final Optimizer = {}'.format(self.optimizer))
logger.info('DeepSpeed Final Optimizer = {}'.format(self.optimizer.state_dict()))
def _configure_basic_optimizer(self, model_parameters):
optimizer_parameters = self.optimizer_params()
# print(optimizer_parameters.keys())
if 'max_grad_norm' in optimizer_parameters.keys():
raise ValueError(
"'max_grad_norm' is not supported as an optimizer parameter, please switch to using the deepspeed parameter 'gradient_clipping' see: https://www.deepspeed.ai/docs/config-json/#gradient-clipping for more details"
)
if self.optimizer_name() == ADAM_OPTIMIZER:
torch_adam = optimizer_parameters.pop(TORCH_ADAM_PARAM, False)
adam_w_mode = optimizer_parameters.pop(ADAM_W_MODE_PARAM, True)
# zero-offload torch-adam adam_w_mode optimizer
# T|F T T torch.optim.AdamW
# T|F T F torch.optim.Adam
# T F T|F DeepSpeedCPUAdam(adam_w_mode)
# F F T|F FusedAdam(adam_w_mode)
if torch_adam:
if adam_w_mode:
optimizer = torch.optim.AdamW(model_parameters,
**optimizer_parameters)
else:
optimizer = torch.optim.Adam(model_parameters,
**optimizer_parameters)
elif self.zero_cpu_offload():
optimizer = DeepSpeedCPUAdam(model_parameters,
**optimizer_parameters,
adamw_mode=adam_w_mode)
else:
optimizer_parameters[ADAM_W_MODE_PARAM] = adam_w_mode
optimizer = FusedAdam(model_parameters, **optimizer_parameters)
elif self.optimizer_name() == LAMB_OPTIMIZER:
from deepspeed.ops.lamb import FusedLamb
optimizer = FusedLamb(model_parameters, **optimizer_parameters)
elif self.optimizer_name() == ONEBIT_ADAM_OPTIMIZER:
from deepspeed.runtime.fp16.onebit_adam import OnebitAdam
optimizer = OnebitAdam(model_parameters, self, **optimizer_parameters)
else:
torch_optimizer = getattr(torch.optim, self.optimizer_name())
optimizer = torch_optimizer(model_parameters, **optimizer_parameters)
return optimizer
def _configure_fp16_optimizer(self, optimizer):
initial_dynamic_scale = self.initial_dynamic_scale()
dynamic_loss_args = self.dynamic_loss_scale_args()
clip_grad = self.gradient_clipping()
if isinstance(optimizer,
FusedAdam) or self.optimizer_name() == ONEBIT_ADAM_OPTIMIZER:
if self.dynamic_loss_scale():
logger.info('Creating fp16 optimizer with dynamic loss scale')
timers = self.timers if self.wall_clock_breakdown() else None
optimizer = FP16_Optimizer(
optimizer,
dynamic_loss_scale=True,
initial_dynamic_scale=initial_dynamic_scale,
dynamic_loss_args=dynamic_loss_args,
mpu=self.mpu,
clip_grad=clip_grad,
fused_adam_legacy=self.optimizer_legacy_fusion(),
timers=timers)
else:
logger.info('Creating fp16 optimizer with static loss scale: {}'.format(
self.loss_scale()))
optimizer = FP16_Optimizer(
optimizer,
static_loss_scale=self.loss_scale(),
mpu=self.mpu,
clip_grad=clip_grad,
fused_adam_legacy=self.optimizer_legacy_fusion())
else:
logger.info('Creating fp16 unfused optimizer with dynamic loss scale')
optimizer = FP16_UnfusedOptimizer(
optimizer,
static_loss_scale=self.loss_scale(),
dynamic_loss_scale=self.dynamic_loss_scale(),
dynamic_loss_args=dynamic_loss_args,
mpu=self.mpu,
clip_grad=clip_grad,
fused_lamb_legacy=self.optimizer_name() == LAMB_OPTIMIZER)
return optimizer
def _configure_zero_optimizer(self, optimizer):
zero_stage = self.zero_optimization_stage()
logger.info('Creating fp16 ZeRO stage {} optimizer'.format(zero_stage))
assert not self.allreduce_always_fp32(), "ZeRO does not support 'fp32_allreduce': true"
if zero_stage == ZERO_OPTIMIZATION_OPTIMIZER_STATES:
assert self.zero_reduce_scatter(), 'Stage 1 only supports reduce scatter mode'
optimizer = FP16_DeepSpeedZeroOptimizer_Stage1(
optimizer,
static_loss_scale=self.loss_scale(),
dynamic_loss_scale=self.dynamic_loss_scale(),
dynamic_loss_args=self.dynamic_loss_scale_args(),
clip_grad=self.gradient_clipping(),
all_gather_partitions=self.zero_allgather_partitions(),
allgather_size=self.zero_allgather_bucket_size(),
max_elements_per_comm=self.zero_reduce_bucket_size(),
dp_process_group=self.data_parallel_group,
elastic_checkpoint=self.zero_elastic_checkpoint(),
mpu=self.mpu)
elif zero_stage == ZERO_OPTIMIZATION_GRADIENTS:
optimizer = FP16_DeepSpeedZeroOptimizer(
optimizer,
timers=self.timers,
static_loss_scale=self.loss_scale(),
dynamic_loss_scale=self.dynamic_loss_scale(),
dynamic_loss_args=self.dynamic_loss_scale_args(),
clip_grad=self.gradient_clipping(),
contiguous_gradients=self.zero_contiguous_gradients(),
reduce_bucket_size=self.zero_reduce_bucket_size(),
allgather_bucket_size=self.zero_allgather_bucket_size(),
dp_process_group=self.data_parallel_group,
reduce_scatter=self.zero_reduce_scatter(),
overlap_comm=self.zero_overlap_comm(),
cpu_offload=self.zero_cpu_offload(),
mpu=self.mpu,
postscale_gradients=self.postscale_gradients(),
gradient_predivide_factor=self.gradient_predivide_factor(),
gradient_accumulation_steps=self.gradient_accumulation_steps())
else:
raise NotImplementedError("ZeRO stage {} not implemented".format(zero_stage))
return optimizer
def _configure_progressive_layer_drop(self):
pld = ProgressiveLayerDrop(theta=self.pld_theta(), gamma=self.pld_gamma())
return pld
def deepspeed_io(self,
dataset,
batch_size=None,
route=ROUTE_TRAIN,
pin_memory=True,
data_sampler=None,
collate_fn=None,
num_local_io_workers=None):
if not isinstance(dataset, torch.utils.data.Dataset):
raise ValueError("Training data must be a torch Dataset")
if data_sampler is None and (route == ROUTE_PREDICT or route == ROUTE_EVAL):
data_sampler = torch.utils.data.SequentialSampler(dataset)
if batch_size is None:
batch_size = self.train_micro_batch_size_per_gpu()
if collate_fn is None:
collate_fn = self.collate_fn
# Currently we only use timer in train route
deepspeed_io_timer = None
if route == ROUTE_TRAIN:
deepspeed_io_timer = self.tput_timer
# If mpu is provied, forward world size and parallel rank to sampler.
data_parallel_world_size = None
data_parallel_rank = None
if self.mpu is not None:
data_parallel_world_size = self.mpu.get_data_parallel_world_size()
data_parallel_rank = self.mpu.get_data_parallel_rank()
return DeepSpeedDataLoader(dataset=dataset,
batch_size=batch_size,
pin_memory=pin_memory,
collate_fn=collate_fn,
local_rank=self.local_rank,
tput_timer=deepspeed_io_timer,
num_local_io_workers=num_local_io_workers,
data_sampler=data_sampler,
data_parallel_world_size=data_parallel_world_size,
data_parallel_rank=data_parallel_rank)
def train(self, mode=True):
r"""
"""
self.warn_unscaled_loss = True
self.module.train(mode)
def eval(self):
r"""
"""
self.warn_unscaled_loss = True
self.module.train(False)
def _scale_loss(self, prescaled_loss):
if isinstance(prescaled_loss, torch.Tensor):
scaled_loss = prescaled_loss / self.gradient_accumulation_steps()
elif isinstance(prescaled_loss, tuple) or isinstance(prescaled_loss, list):
scaled_loss = []
for l in prescaled_loss:
if isinstance(l, torch.Tensor):
scaled_loss.append(l / self.gradient_accumulation_steps())
else:
scaled_loss.append(l)
else:
scaled_loss = prescaled_loss
if self.warn_unscaled_loss:
logger.warning(
f'DeepSpeed unable to scale loss because of type: {type(prescaled_loss)}'
)
self.warn_unscaled_loss = False
return scaled_loss
def forward(self, *inputs, **kwargs):
r"""Execute forward propagation
Arguments:
*inputs: Variable length input list
**kwargs: variable length keyword arguments
"""
if self.module.training and self.progressive_layer_drop:
kwargs.update(self.progressive_layer_drop.get_state())
if self.wall_clock_breakdown():
self.timers('forward_microstep').start()
self.timers('forward').start()
if self.training_dataloader is None:
self.tput_timer.start()
loss = self.module(*inputs, **kwargs)
if self.wall_clock_breakdown():
self.timers('forward').stop()
self.timers('forward_microstep').stop()
return loss
def allreduce_gradients(self, bucket_size=MEMORY_OPT_ALLREDUCE_SIZE):
#Zero stage 2 communicates during non gradient accumulation boundaries as well
if self.zero_optimization_partition_gradients():
self.optimizer.overlapping_partition_gradients_reduce_epilogue()
#Communicate only at gradient accumulation boundaries
elif self.is_gradient_accumulation_boundary():
if self.zero_optimization_stage() == ZERO_OPTIMIZATION_OPTIMIZER_STATES:
assert self.zero_reduce_scatter()
self.optimizer.reduce_scatter_gradients(
postscale_gradients=self.postscale_gradients(),
gradient_predivide_factor=self.gradient_predivide_factor(),
gradient_average=self.gradient_average)
else:
self.buffered_allreduce_fallback(elements_per_buffer=bucket_size)
def backward(self, loss, allreduce_gradients=True, release_loss=False):
r"""Execute backward pass on the loss
Arguments:
loss: Torch tensor on which to execute backward propagation
allreduce_gradients: If this is False, then gradient averaging will be skipped. Default is True.
"""
if not allreduce_gradients:
logger.warning(
f'Argument `allreduce_gradients` is deprecated, ignored, and will soon be removed'
)
# scale loss w.r.t. gradient accumulation if needed
if self.gradient_accumulation_steps() > 1:
loss = self._scale_loss(loss.float())
# Log training Loss
if self.tensorboard_enabled():
if self.is_gradient_accumulation_boundary():
if self.global_rank == 0:
self.summary_events = [
(f'Train/Samples/train_loss',
loss.mean().item() * self.gradient_accumulation_steps(),
self.global_samples)
]
for event in self.summary_events: # write_summary_events
self.summary_writer.add_scalar(event[0], event[1], event[2])
self.summary_writer.flush()
if self.wall_clock_breakdown():
self.timers('backward_microstep').start()
self.timers('backward').start()
assert self.optimizer is not None, "must provide optimizer during " \
"init in order to use backward"
if self.wall_clock_breakdown():
self.timers('backward_inner_microstep').start()
self.timers('backward_inner').start()
if self.zero_optimization():
self.optimizer.is_gradient_accumulation_boundary = self.is_gradient_accumulation_boundary(
)
self.optimizer.backward(loss)
elif self.amp_enabled():
# AMP requires delaying unscale when inside gradient accumulation boundaries
# https://nvidia.github.io/apex/advanced.html#gradient-accumulation-across-iterations
delay_unscale = not self.is_gradient_accumulation_boundary()
with amp.scale_loss(loss,
self.optimizer,
delay_unscale=delay_unscale) as scaled_loss:
scaled_loss.backward()
elif self.fp16_enabled():
self.optimizer.backward(loss)
else:
loss.backward()
if self.wall_clock_breakdown():
self.timers('backward_inner').stop()
self.timers('backward_inner_microstep').stop()
if self.wall_clock_breakdown():
self.timers('backward_allreduce_microstep').start()
self.timers('backward_allreduce').start()
if self.enable_backward_allreduce:
self.allreduce_gradients()
if self.wall_clock_breakdown():
self.timers('backward_allreduce').stop()
self.timers('backward_allreduce_microstep').stop()
self.timers('backward').stop()
self.timers('backward_microstep').stop()
if release_loss:
# loss.data = None
pass
return loss
def is_gradient_accumulation_boundary(self):
"""Query whether the current micro-batch is at the boundary of
gradient accumulation, and thus will trigger gradient reductions and
an optimizer step.
Returns:
bool: if the current step is a gradient accumulation boundary.
"""
return (self.micro_steps + 1) % \
self.gradient_accumulation_steps() == 0
def zero_grad(self):
"""
Zero parameter grads.
"""
for param_name, param in self.module.named_parameters():
param.grad = None
def clip_fp32_gradients(self):
torch.nn.utils.clip_grad_norm_(parameters=self.module.parameters(),
max_norm=self.gradient_clipping())
def _take_model_step(self, lr_kwargs):
if self.gradient_clipping() > 0.0:
if not self.fp16_enabled() and not self.amp_enabled():
self.clip_fp32_gradients()
elif self.amp_enabled():
# AMP's recommended way of doing clipping
# https://nvidia.github.io/apex/advanced.html#gradient-clipping
master_params = amp.master_params(self.optimizer)
torch.nn.utils.clip_grad_norm_(parameters=master_params,
max_norm=self.gradient_clipping())
self.optimizer.step()
#zero grad in basic optimizer could be unreliable and may not exhibit
#the behaviour that we want
if not self.zero_optimization() and not self.fp16_enabled(
) and not self.amp_enabled():
self.zero_grad()
else:
self.optimizer.zero_grad()
report_progress = self.global_rank == 0 if self.global_rank else True
# Check overlow here since in DS fp16 optimizer, the overflow is updated in above step() function.
overflow = False
if hasattr(self.optimizer, 'overflow'):
overflow = self.optimizer.overflow
if overflow:
self.skipped_steps += 1
else:
if self.lr_scheduler is not None:
self.lr_scheduler.step(**(lr_kwargs or {}))
if report_progress and (self.global_steps + 1) % self.steps_per_print() == 0:
self._report_progress(self.global_steps + 1)
self.global_steps += 1
self.global_samples += self.train_batch_size()
def step(self, lr_kwargs=None):
r"""Execute the weight update step after forward and backward propagation
on effective_train_batch.
"""
if self.wall_clock_breakdown():
self.timers('step_microstep').start()
self.timers('step').start()
assert self.optimizer is not None, "must provide optimizer during " \
"init in order to use step"
report_progress = self.global_rank == 0 if self.global_rank else True
# Update the model when we reach gradient accumulation boundaries
if self.is_gradient_accumulation_boundary():
if self.progressive_layer_drop:
self.progressive_layer_drop.update_state(self.global_steps)
self._take_model_step(lr_kwargs)
self.tput_timer.stop(report_progress)
# Log learning rate
if self.tensorboard_enabled():
if self.is_gradient_accumulation_boundary():
if self.global_rank == 0:
self.summary_events = [(f'Train/Samples/lr',
self.get_lr()[0],
self.global_samples)]
for event in self.summary_events: # write_summary_events
self.summary_writer.add_scalar(event[0], event[1], event[2])
if self.fp16_enabled() and hasattr(self.optimizer, 'cur_scale'):
self.summary_events.append((f'Train/Samples/loss_scale',
self.optimizer.cur_scale,
self.global_samples))
for event in self.summary_events: # write_summary_events
self.summary_writer.add_scalar(event[0], event[1], event[2])
self.summary_writer.flush()
if self.wall_clock_breakdown():
self.timers('step').stop()
self.timers('step_microstep').stop()
timer_names = [
'forward_microstep',
'backward_microstep',
'backward_inner_microstep',
'backward_allreduce_microstep',
'step_microstep'
]
self.timers.log(names=timer_names, memory_breakdown=self.memory_breakdown())
# Log timing
if self.is_gradient_accumulation_boundary():
if self.tensorboard_enabled():
if self.global_rank == 0:
self.summary_events = [
(f'Train/Samples/elapsed_time_ms_forward',
self.timers('forward').elapsed(reset=False) * 1000.0,
self.global_samples),
(f'Train/Samples/elapsed_time_ms_backward',
self.timers('backward').elapsed(reset=False) * 1000.0,
self.global_samples),
(f'Train/Samples/elapsed_time_ms_backward_inner',
self.timers('backward_inner').elapsed(reset=False) * 1000.0,
self.global_samples),
(f'Train/Samples/elapsed_time_ms_backward_allreduce',
self.timers('backward_allreduce').elapsed(reset=False) *
1000.0,
self.global_samples),
(f'Train/Samples/elapsed_time_ms_step',
self.timers('step').elapsed(reset=False) * 1000.0,
self.global_samples)
]
for event in self.summary_events: # write_summary_events
self.summary_writer.add_scalar(event[0], event[1], event[2])
self.summary_writer.flush()
if self.wall_clock_breakdown():
self.timers.log([
'forward',
'backward',
'backward_inner',
'backward_allreduce',
'step'
])
self.micro_steps += 1
def _get_optimizer_param(self, param_name):
result = []
if not self.optimizer:
return result
for group in self.optimizer.param_groups:
if param_name in group:
result.append(group[param_name])
else:
result.append(0.0)
return result
def get_lr(self):
return self._get_optimizer_param('lr')
def get_type(self):
return self._get_optimizer_param('type')
def get_mom(self):
if self.optimizer_name() in ['SGD', 'RMSprop']:
return self._get_optimizer_param('momentum')
else:
return self._get_optimizer_param('betas')
def get_pld_theta(self):
if self.progressive_layer_drop:
return self.progressive_layer_drop.get_theta()
else:
return None
def _report_progress(self, step):
lr = self.get_lr()
mom = self.get_mom()
log_dist(f'step={step}, skipped={self.skipped_steps}, lr={lr}, mom={mom}',
ranks=[0])
def allreduce_bucket(self, bucket):
tensor = self.flatten(bucket)
tensor_to_allreduce = tensor
if self.allreduce_always_fp32():
tensor_to_allreduce = tensor.float()
if self.postscale_gradients():
if self.gradient_predivide_factor() != 1.0:
tensor_to_allreduce.mul_(1. / self.gradient_predivide_factor())
dist.all_reduce(tensor_to_allreduce, group=self.data_parallel_group)
if self.gradient_average:
if self.gradient_predivide_factor() != self.dp_world_size:
tensor_to_allreduce.mul_(self.gradient_predivide_factor() /
self.dp_world_size)
else:
tensor_to_allreduce.div_(self.dp_world_size)
dist.all_reduce(tensor_to_allreduce, group=self.data_parallel_group)
if self.allreduce_always_fp32() and tensor is not tensor_to_allreduce:
tensor.copy_(tensor_to_allreduce)
return tensor
def allreduce_and_copy(self, small_bucket):
allreduced = self.allreduce_bucket(small_bucket)
for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)):
buf.copy_(synced)
def allreduce_no_retain(self, bucket, numel_per_bucket=500000000):
small_bucket = []
numel = 0
for tensor in bucket:
small_bucket.append(tensor)
numel = numel + tensor.numel()
if numel > numel_per_bucket:
self.allreduce_and_copy(small_bucket)
small_bucket = []
numel = 0
if len(small_bucket) > 0:
self.allreduce_and_copy(small_bucket)
def buffered_allreduce_fallback(self, grads=None, elements_per_buffer=500000000):
grads = []
for param_name, param in self.module.named_parameters():
if param.grad is None:
# In cases where there is an imbalance of empty grads across
# ranks we must create empty grads, this will ensure that every
# rank is reducing the same size. In some cases it may make
# sense in the future to support the ability to average not
# w.r.t. world size but with a different value.
param.grad = torch.zeros(param.size(),
dtype=param.dtype,
device=param.device)
grads.append(param.grad.data)
else:
grad_data = param.grad.data
if self.sparse_gradients_enabled(
) and param_name in self.csr_tensor_module_names:
grads.append(CSRTensor(grad_data))
else:
grads.append(grad_data)
split_buckets = split_half_float_double_csr(grads)
for i, bucket_tuple in enumerate(split_buckets):
bucket_type, bucket = bucket_tuple
if bucket_type == CSRTensor.type():
self.csr_allreduce_no_retain(bucket)
else:
self.allreduce_no_retain(bucket, numel_per_bucket=elements_per_buffer)
def csr_allreduce_no_retain(self, bucket):
allreduced_csrs = self.csr_allreduce_bucket(bucket)
# Densify csr tensor and copy back to original location
for csr in allreduced_csrs:
dense_tensor = csr.to_dense()
csr.orig_dense_tensor.copy_(dense_tensor)
def csr_allreduce_bucket(self, bucket):
csr_list = []
for csr in bucket:
csr_list.append(self.csr_allreduce(csr))
return csr_list
def csr_allreduce(self, csr):
# Pre-divide for fp16 stability
csr.values.div_(self.dp_world_size)
indices_device_list = self.csr_all_gather(csr.indices)
values_device_list = self.csr_all_gather(csr.values)
csr.indices = torch.cat(indices_device_list)
csr.values = torch.cat(values_device_list)
return csr
def csr_all_gather(self, value):
my_size = torch.LongTensor([value.size()[0]]).to(self.device)
all_sizes = self.all_gather_scalar(my_size)
max_size = torch.cat(all_sizes).max()
fill_size = (max_size - my_size)
assert value.dim() in [1, 2]
if value.dim() == 1:
if fill_size > 0:
value = torch.cat([value, value.new_zeros(fill_size)])
tensor_list = [value.new_zeros(max_size) for _ in range(self.dp_world_size)]
else:
if fill_size > 0:
value = torch.cat([value, value.new_zeros(fill_size, value.size()[1])])
tensor_list = [
value.new_zeros(max_size,
value.size()[1]) for _ in range(self.dp_world_size)
]
dist.all_gather(tensor_list, value, group=self.data_parallel_group)
tensors = []
for dev_idx, t in enumerate(tensor_list):
size = all_sizes[dev_idx][0]
tensors.append(
t.index_select(0,
torch.LongTensor(range(size)).to(self.device)))
return tensors
def all_gather_scalar(self, value):
tensor_list = [value.new_zeros(value.size()) for _ in range(self.dp_world_size)]
dist.all_gather(tensor_list, value, group=self.data_parallel_group)
return tensor_list
def module_state_dict(self, destination=None, prefix='', keep_vars=False):
sd = self.module.state_dict(destination, prefix, keep_vars)
return sd
def load_module_state_dict(self, state_dict, strict=True):
self.module.load_state_dict(state_dict, strict=strict)
def _get_rank_zero_ckpt_name(self, checkpoints_path, tag, mp_rank, dp_rank):
filename = 'zero_pp_rank_{}'.format(dp_rank)
zero_ckpt_name = os.path.join(
checkpoints_path,
str(tag),
filename + '_mp_rank_{:02d}'.format(mp_rank) + 'optim_states.pt')
return zero_ckpt_name
def _get_zero_ckpt_name(self, checkpoints_path, tag):
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
pp_rank = torch.distributed.get_rank(group=self.optimizer.dp_process_group)
return self._get_rank_zero_ckpt_name(checkpoints_path, tag, mp_rank, pp_rank)
def _get_ckpt_name(self, checkpoints_path, tag):
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
ckpt_name = os.path.join(checkpoints_path,
str(tag),
'mp_rank_{:02d}'.format(mp_rank) + '_model_states.pt')
return ckpt_name
def load_checkpoint(self,
load_dir,
tag=None,
load_module_strict=True,
load_optimizer_states=True,
load_lr_scheduler_states=True):
"""Load training checkpoint
Arguments:
load_dir: Required. Directory to load the checkpoint from
tag: Checkpoint tag used as a unique identifier for checkpoint, if not provided will attempt to load tag in 'latest' file
load_module_strict: Optional. Boolean to strictly enforce that the keys in state_dict of module and checkpoint match.
load_optimizer_states: Optional. Boolean to load the training optimizer states from Checkpoint. Ex. ADAM's momentum and variance
load_lr_scheduler_states: Optional. Boolean to add the learning rate scheduler states from Checkpoint.
Returns:
A tuple of ``load_path`` and ``client_state``.
*``load_path``: Path of the loaded checkpoint. ``None`` if loading the checkpoint failed.
*``client_state``: State dictionary used for loading required training states in the client code.
"""
if tag is None:
latest_path = os.path.join(load_dir, 'latest')
if os.path.isfile(latest_path):
with open(latest_path, 'r') as fd:
tag = fd.read().strip()
else:
logger.warning(f"Unable to find latest file at {latest_path}, if trying to load latest " \
"checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint.")
return None, None
load_path, client_states = self._load_checkpoint(load_dir,
tag,
load_module_strict=load_module_strict,
load_optimizer_states=load_optimizer_states,
load_lr_scheduler_states=load_lr_scheduler_states)
if self.zero_optimization() and load_path is not None:
self._load_zero_checkpoint(load_dir,
tag,
load_optimizer_states=load_optimizer_states)
return load_path, client_states
def _load_checkpoint(self,
load_dir,
tag,
load_module_strict=True,
load_optimizer_states=True,
load_lr_scheduler_states=True):
load_path = self._get_ckpt_name(load_dir, tag)
if not os.path.exists(load_path):
logger.warn(
'Client provided checkpoint load path: {} does not exist ... skip checkpoint load'
.format(load_path))
return None, None
logger.info(f'rank: {self.global_rank} loading checkpoint: {load_path}')
checkpoint = torch.load(load_path, map_location=lambda storage, loc: storage)
if isinstance(self.module, PipelineModule):
# Pipeline parallelism uses this to load its own checkpoint files.
self._curr_ckpt_path = os.path.join(load_dir, tag)
self.load_module_state_dict(state_dict=checkpoint['module'],
strict=load_module_strict)
if self.optimizer is not None and not self.zero_optimization():
if self.fp16_enabled():
self.optimizer.load_state_dict(
checkpoint['optimizer'],
load_optimizer_states=load_optimizer_states)
elif load_optimizer_states:
self.optimizer.load_state_dict(checkpoint['optimizer'])
if load_lr_scheduler_states and self.lr_scheduler is not None:
self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
self.csr_tensor_module_names = checkpoint['csr_tensor_module_names']
self.global_steps = checkpoint['global_steps']
self.global_samples = checkpoint.get('global_samples',
self.global_steps * self.train_batch_size())
self.skipped_steps = checkpoint['skipped_steps']
self.loaded_checkpoint_mp_world_size = checkpoint['mp_world_size']
self.loaded_checkpoint_dp_world_size = checkpoint['dp_world_size']
deepspeed_states = [
'module',
'optimizer',
'lr_scheduler',
'csr_tensor_module_names',
'skipped_steps',
'global_steps',
'dp_world_size',
'mp_world_size'
]
client_state = {
key: value
for key,
value in checkpoint.items() if not key in deepspeed_states
}
return load_path, client_state
def _load_zero_checkpoint(self, load_dir, tag, load_optimizer_states=True):
zero_sd_list = self._get_all_zero_checkpoints(load_dir, tag)
if zero_sd_list is None:
return
self.optimizer.load_state_dict(
state_dict_list=zero_sd_list,
load_optimizer_states=load_optimizer_states,
load_from_fp32_weights=self.zero_load_from_fp32_weights())
print(
f'loading {len(zero_sd_list)} zero partition checkpoints for rank {self.global_rank}'
)
def _get_mp_rank_zero_checkpoint_names(self, load_dir, tag, mp_rank, dp_world_size):
zero_ckpt_names = []
for dp_rank in range(dp_world_size):
ckpt_name = self._get_rank_zero_ckpt_name(checkpoints_path=load_dir,
tag=tag,
mp_rank=mp_rank,
dp_rank=dp_rank)
zero_ckpt_names.append(ckpt_name)
return zero_ckpt_names
def _get_all_zero_checkpoint_names(self,
load_dir,
tag,
mp_world_size,
dp_world_size):
zero_ckpt_names = []
for mp_rank in range(mp_world_size):
mp_rank_ckpt_names = self._get_mp_rank_zero_checkpoint_names(
load_dir=load_dir,
tag=tag,
mp_rank=mp_rank,
dp_world_size=dp_world_size)
zero_ckpt_names += mp_rank_ckpt_names
return zero_ckpt_names
def _get_all_zero_checkpoints(self, load_dir, tag):
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
zero_ckpt_names = self._get_mp_rank_zero_checkpoint_names(
load_dir=load_dir,
tag=tag,
mp_rank=mp_rank,
dp_world_size=self.loaded_checkpoint_dp_world_size)
invalid_zero_ckpt_paths = []
for ckpt_name in zero_ckpt_names:
if not os.path.exists(ckpt_name):
invalid_zero_ckpt_paths.append(ckpt_name)
if len(invalid_zero_ckpt_paths) > 0:
logger.warn(
f"Client provided zero checkpoint load paths: {invalid_zero_ckpt_paths} does not exist"
)
return None
zero_sd_list = []
for ckpt_name in zero_ckpt_names:
zero_sd_list.append(torch.load(ckpt_name, map_location='cpu'))
zero_optimizer_sd = [sd['optimizer_state_dict'] for sd in zero_sd_list]
print(
f"successfully loaded {len(zero_optimizer_sd)} ZeRO state_dicts for rank {self.global_rank}"
)
return zero_optimizer_sd
def save_checkpoint(self, save_dir, tag=None, client_state={}, save_latest=True, save_zero=True):
r"""Save training checkpoint
Arguments:
save_dir: Required. Directory for saving the checkpoint
tag: Optional. Checkpoint tag used as a unique identifier for the checkpoint, global step is used if not provided.
client_state: Optional. State dictionary used for saving required training states in the client code.
save_latest: Optional. Save a file 'latest' pointing to the latest saved checkpoint.
"""
# This is to make sure the checkpoint names are created without collision
# There seems to be issue creating them in parallel
# Ensure save_dir directory exists
os.makedirs(save_dir, exist_ok=True)
if tag is None:
tag = f"global_step{self.global_steps}"
if self.save_non_zero_checkpoint:
self._create_checkpoint_file(save_dir, tag, False)
self._save_checkpoint(save_dir, tag, client_state=client_state)
if self.save_zero_checkpoint and save_zero:
self._create_zero_checkpoint_files(save_dir, tag)
self._save_zero_checkpoint(save_dir, tag)
# Save latest checkpoint tag
if save_latest:
with open(os.path.join(save_dir, 'latest'), 'w') as fd:
fd.write(tag)
return True
def _create_checkpoint_file(self, save_dir, tag, zero_checkpoint):
name_function = self._get_zero_ckpt_name if zero_checkpoint else self._get_ckpt_name
try:
checkpoint_name = name_function(save_dir, tag)
ensure_directory_exists(checkpoint_name)
except:
logger.error(f'Failed saving model checkpoint to {save_dir} with tag {tag}')
return False
return True
def _create_zero_checkpoint_files(self, save_dir, tag):
# zero checkpoint files are created sequentially
try:
checkpoint_name = self._get_zero_ckpt_name(save_dir, tag)
if self.local_rank == 0:
ensure_directory_exists(checkpoint_name)
else:
while not os.path.exists(os.path.dirname(checkpoint_name)):
time.sleep(1)
except:
logger.error(f'Failed saving model checkpoint to {save_dir} with tag {tag}')
return False
return True
# dist.barrier()
def _save_checkpoint(self, save_dir, tag, client_state={}):
save_path = self._get_ckpt_name(save_dir, tag)
# A hack to save the checkpointing directory. Pipeline parallelism overrides
# module_state_dict() and uses this path to save the model. module_state_dict()
# then instead just returns None.
self._curr_ckpt_path = os.path.join(save_dir, tag)
state = {
'module':
self.module_state_dict(),
'optimizer':
self.optimizer.state_dict()
if self.optimizer and not self.zero_optimization() else None,
'lr_scheduler':
self.lr_scheduler.state_dict() if self.lr_scheduler is not None else None,
'csr_tensor_module_names':
self.csr_tensor_module_names,
'skipped_steps':
self.skipped_steps,
'global_steps':
self.global_steps,
'global_samples':
self.global_samples,
'dp_world_size':
self.dp_world_size,
'mp_world_size':
self.mp_world_size
}
state.update(client_state)
log_dist(message=f'Saving model checkpoint: {save_path}', ranks=[0])
#logger.info('Saving model checkpoint: {}'.format(save_path))
torch.save(state, save_path)
self._curr_save_path = None
def _save_zero_checkpoint(self, save_path, tag):
zero_checkpoint_name = self._get_zero_ckpt_name(save_path, tag)
zero_sd = {'optimizer_state_dict': self.optimizer.state_dict()}
torch.save(zero_sd, zero_checkpoint_name)
logger.info('zero checkpoint saved {}'.format(zero_checkpoint_name))
| 62,693 | 40.740346 | 227 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/tools/ds_fix/stage1.py | import math
import torch
import torch.distributed as dist
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from collections import defaultdict
from deepspeed.runtime.zero.utils import _initialize_parameter_parallel_groups
from deepspeed.runtime.fp16.loss_scaler import LossScaler, DynamicLossScaler
from deepspeed.runtime.utils import get_grad_norm, CheckOverflow
from deepspeed.runtime.zero.config import ZERO_OPTIMIZATION_OPTIMIZER_STATES
from deepspeed.utils import logger, log_dist
def get_alignment_padding(flattened_lean_size, sub_partition_id, sub_partition_size):
sub_partition_high_limit = (sub_partition_id + 1) * sub_partition_size
if sub_partition_high_limit <= flattened_lean_size:
return 0
else:
return min(sub_partition_size, sub_partition_high_limit - flattened_lean_size)
def get_group_alignment_padding(tensor_list, sub_partition_size, sub_partition_count):
group_paddings = []
flattened_size = sum([tensor.numel() for tensor in tensor_list])
for i in range(sub_partition_count):
padding = get_alignment_padding(flattened_size, i, sub_partition_size)
group_paddings.append(padding)
return group_paddings
def flatten_dense_tensors_sub_partition_aligned(tensor_list,
dp,
max_elements_per_comm,
pg):
assert max_elements_per_comm >= dp, f"max_elements_per_comm {max_elements_per_comm} < dp {dp}"
num_elements = sum(t.numel() for t in tensor_list)
log_dist("Total number of elements in model: {}, max elements per com: {}".format(
num_elements,
max_elements_per_comm),
ranks=[0])
# Compute aligned partition size based on parameter count
aligned_param_partition_size = math.ceil(num_elements / dp)
# Compute aligned partition size based on communication size
aligned_comm_partition_size = int(max_elements_per_comm // dp)
if aligned_param_partition_size <= aligned_comm_partition_size:
sub_partition_count = 1
sub_partition_size = aligned_param_partition_size
else:
sub_partition_count = math.ceil(aligned_param_partition_size /
aligned_comm_partition_size)
sub_partition_size = aligned_comm_partition_size
# Compute required padding for alignment to dp and max_elements_per_comm
padding = (sub_partition_count * sub_partition_size * dp) - num_elements
log_dist(
f"sub_partition_count: {sub_partition_count}, sub_partition_size: {sub_partition_size}, padding: {padding}",
ranks=[0])
log_dist(
f"number of elements with padding: {num_elements} + {padding} = {num_elements + padding}",
ranks=[0])
if padding == 0:
aligned_tensor_list = tensor_list
else:
pad_tensor = torch.zeros(padding,
device=tensor_list[0].device,
dtype=tensor_list[0].dtype)
aligned_tensor_list = tensor_list + [pad_tensor]
flat_tensors = _flatten_dense_tensors(aligned_tensor_list)
return flat_tensors
def _single_range_check(current_index, start_index, end_index, tensor_size):
offset = 0
if (current_index >= start_index) and (current_index < end_index):
# Fully inside bounds
return True, offset
elif (start_index > current_index) and (start_index < (current_index + tensor_size)):
# Partially contained, compute offset
offset = start_index - current_index
return True, offset
else:
return False, offset
def _range_check(current_index, element_intervals, tensor_size):
results = []
for comm_idx, interval in enumerate(element_intervals):
start_index, end_index = interval
contained, offset = _single_range_check(current_index, start_index, end_index, tensor_size)
if contained:
results.append((contained, offset, comm_idx))
if len(results) == 0:
return [(False, 0, -1)]
return results
class FP16_DeepSpeedZeroOptimizer_Stage1(object):
"""
FP16_DeepSpeedZeroOptimizer_Stage1 designed to reduce the memory footprint
required for training large deep learning models.
For more details please see ZeRO: Memory Optimization Towards Training A Trillion Parameter Models
https://arxiv.org/abs/1910.02054
This version aligns with stage-1 in the paper above.
"""
def __init__(self,
init_optimizer,
static_loss_scale=1.0,
dynamic_loss_scale=False,
dynamic_loss_args=None,
verbose=True,
dp_process_group=None,
partition_size=None,
mpu=None,
all_gather_partitions=True,
allgather_size=500000000,
clip_grad=0.0,
max_elements_per_comm=5e8,
elastic_checkpoint=True):
if dp_process_group is not None and partition_size is not None:
raise ValueError("Cannot specify both dp_process_group "
"and partition size")
if dp_process_group is None:
dp_process_group = _initialize_parameter_parallel_groups(partition_size)
if not torch.cuda.is_available:
raise SystemError("Cannot use fp16 without CUDA.")
self.optimizer = init_optimizer
self.verbose = verbose
self.dp_process_group = dp_process_group
# TODO: automatically turn off if #params > some_limit
self.all_gather_partitions = all_gather_partitions
self.allgather_size = allgather_size
# self.max_elements_per_comm = max_elements_per_comm
# logger.info("max_elements_per_comm={}".format(max_elements_per_comm))
self.elastic_checkpoint = elastic_checkpoint
logger.info(f'ZeRO Elastic Checkpoint = {elastic_checkpoint}')
# param flattened by groups
self.fp16_groups = []
self.fp16_groups_flat = []
# Setup bookkeeping data structures depending on partitioning type
# parallel_sub_partitioned_fp16_groups[group-idx] -> [comm-ids] -> [rank-ids]
self.parallel_sub_partitioned_fp16_groups = []
# same underlying data as above but viewed as: [groups] -> [rank-ids] -> [comm-ids]
self.parallel_comm_sub_partitioned_fp16_groups = []
# 32-bit sub-partitions of the parallel partitioned parameters
# that this process will update
self.local_sub_partitions_of_fp32_groups = []
# param partition info
# parameters in each group that will not be updated by this process directly
self.params_not_local = []
# parameters that will be updated by this process directly
self.params_in_rank_sub_partitions = []
# parameter offsets for parameters in sub-partitions. Parameter
# boundaries may not align with sub-partition boundaries
# so we need to keep track of the offsets
self.params_in_rank_sub_partitions_offsets = []
# number of elements per sub-partition in each group
self.sub_partition_sizes = []
# number of communication intervals for each group
self.num_comm_intervals_per_group = []
local_rank = dist.get_rank(group=self.dp_process_group)
self.group_paddings = []
self.partition_count = dist.get_world_size(group=self.dp_process_group)
self.default_device = self.optimizer.param_groups[0]['params'][0].device
# max elems per param group
self.max_elems_per_comm = []
# loop to deal with groups
for i, param_group in enumerate(self.optimizer.param_groups):
# push this group to list before modify
self.fp16_groups.append(param_group['params'])
# calculate best max elements per comm based to minimize padding
self.max_elems_per_comm.append(
self.best_max_elems_per_comm(
num_elements=sum(t.numel() for t in self.fp16_groups[i]),
max_elements_per_comm=max_elements_per_comm,
dp=dist.get_world_size(group=self.dp_process_group)))
# flattens all tensors into single 1d tensor aligned with sub-partition size for later dividing
# RS: create aligned sub-partitions
flat_aligned_params = flatten_dense_tensors_sub_partition_aligned(
tensor_list=self.fp16_groups[i],
dp=dist.get_world_size(group=self.dp_process_group),
max_elements_per_comm=self.max_elems_per_comm[i],
pg=self.dp_process_group)
self.fp16_groups_flat.append(flat_aligned_params)
# TODO: I don't think this does anything?
# set model fp16 weight to slices of flattened buffer
updated_params = _unflatten_dense_tensors(self.fp16_groups_flat[i],
self.fp16_groups[i])
for p, q in zip(self.fp16_groups[i], updated_params):
p.data = q.data
# divide the flat weights into near equal partition equal to the data parallel degree
# each process will compute on a different part of the partition
# RS: split into two layer list -> [comm-id] -> [sub-partitions per rank]
comm_partitions, dp_sub_partitions, element_intervals, sub_partition_size, num_comm_intervals = \
self.get_data_parallel_sub_partitions(
tensor=self.fp16_groups_flat[i],
max_elements_per_comm=self.max_elems_per_comm[i],
world_size=dist.get_world_size(
group=self.dp_process_group),
dp_process_group=self.dp_process_group
)
self.parallel_comm_sub_partitioned_fp16_groups.append(
comm_partitions) # comm -> rank
self.parallel_sub_partitioned_fp16_groups.append(
dp_sub_partitions) # rank -> comm
self.sub_partition_sizes.append(sub_partition_size)
self.num_comm_intervals_per_group.append(num_comm_intervals)
# data_parallel_partitions = self.get_data_parallel_partitions(self.fp16_groups_flat[i])
# self.parallel_partitioned_fp16_groups.append(data_parallel_partitions)
# a partition of the fp32 master weights that will be updated by this process
# RS: store/detach/cast our local sub-partitions
local_sub_partitions = []
for sub_partition in self.parallel_sub_partitioned_fp16_groups[i][
local_rank]:
fp32_sub_partition = sub_partition.clone().float().detach()
fp32_sub_partition.requires_grad = True
local_sub_partitions.append(fp32_sub_partition)
self.local_sub_partitions_of_fp32_groups.append(local_sub_partitions)
# Compute sub_partition paddings
sub_partition_paddings = get_group_alignment_padding(
tensor_list=self.fp16_groups[i],
sub_partition_size=sub_partition_size,
sub_partition_count=num_comm_intervals * self.partition_count)
self.group_paddings.append(sub_partition_paddings)
# modify optimizer of have flat master weight
# self.single_partition_of_fp32_groups[i].requires_grad = True # keep this in case internal optimizer uses it
param_group['params'] = self.local_sub_partitions_of_fp32_groups[i]
# RS: divide up the sub-partitions and keep track of offsets for each param
# partition_size = len(self.fp16_groups_flat[i]) / dist.get_world_size(group=self.dp_process_group)
params_in_rank_sub_partition, params_in_rank_sub_partitions_offsets, params_not_local = self.get_all_sub_partition_info(
tensor_list=self.fp16_groups[i],
all_element_intervals=element_intervals,
local_rank=local_rank,
world_size=dist.get_world_size(group=self.dp_process_group)
)
self.params_in_rank_sub_partitions.append(params_in_rank_sub_partition)
self.params_not_local.append(params_not_local)
self.params_in_rank_sub_partitions_offsets.append(
params_in_rank_sub_partitions_offsets)
# we may have a way of fusing dynamic scale. Do not support for now
if dynamic_loss_scale:
if dynamic_loss_args is None:
self.loss_scaler = DynamicLossScaler()
else:
self.loss_scaler = DynamicLossScaler(**dynamic_loss_args)
self.dynamic_loss_scale = True
else:
self.dynamic_loss_scale = False
self.loss_scaler = LossScaler(scale=static_loss_scale)
self.cur_iter = 0
self.mpu = mpu
self.clip_grad = clip_grad
self.overflow = False
self.overflow_checker = CheckOverflow(self.fp16_groups,
mpu=self.mpu,
zero_reduce_scatter=True)
self._initialize_optimizer_states()
self.hack_first_step = True
def _initialize_optimizer_states(self):
for group_idx, group in enumerate(self.local_sub_partitions_of_fp32_groups):
for idx, sub_partition_param in enumerate(group):
sub_partition_grad = torch.zeros(int(
self.sub_partition_sizes[group_idx]),
dtype=sub_partition_param.dtype).cuda()
sub_partition_param.grad = sub_partition_grad
self.optimizer.step()
for group in self.local_sub_partitions_of_fp32_groups:
for idx, sub_partition_param in enumerate(group):
sub_partition_param.grad = None
@staticmethod
def best_max_elems_per_comm(num_elements, max_elements_per_comm, dp):
# if we use max-elems-per-comm as is, how many comm intervals will there be
max_comm_intervals = math.ceil(num_elements / max_elements_per_comm)
padding_for_max_comm = (max_elements_per_comm *
max_comm_intervals) - num_elements
# if we use 1 less comm interval how much extra comm padding would be required
min_comm_intervals = num_elements // max_elements_per_comm
if min_comm_intervals == 0:
log_dist(f'Using default max_elements_per_comm {max_elements_per_comm}',
ranks=[0])
return max_elements_per_comm
padding_for_min_comm = math.ceil(num_elements / (dp * min_comm_intervals))
# choose padding that uses least amount of overhead
if padding_for_max_comm > padding_for_min_comm:
new_max_elements_per_comm = padding_for_min_comm + max_elements_per_comm
log_dist(
f'Updating max_elements_per_comm from {max_elements_per_comm} -> {new_max_elements_per_comm}',
ranks=[0])
return new_max_elements_per_comm
else:
log_dist(f'Using default max_elements_per_comm {max_elements_per_comm}',
ranks=[0])
return max_elements_per_comm
@staticmethod
def get_data_parallel_sub_partitions(tensor,
max_elements_per_comm,
world_size,
dp_process_group=None):
total_num_elements = tensor.numel()
# if total elements is less than our max, revert to splitting into dp partitions
max_elements_per_comm = min(total_num_elements, max_elements_per_comm)
sub_partition_size = int(max_elements_per_comm // world_size)
# Ensure partition alignment was done correctly
num_sub_partitions = int(total_num_elements // sub_partition_size)
assert total_num_elements % sub_partition_size == 0, "{} % {} != 0".format(total_num_elements, sub_partition_size)
# Ensure comm interval alignment was done correctly.
num_comm_intervals = int(num_sub_partitions // world_size)
assert num_sub_partitions % world_size == 0, "{} % {} != 0".format(num_sub_partitions, world_size)
if not dist.is_initialized() or dist.get_rank(group=dp_process_group) == 0:
logger.info("**** partition info:")
logger.info("\t total_num_elements=%s", total_num_elements)
logger.info("\t world_size=%s", world_size)
logger.info("\t max_elements_per_comm=%s", max_elements_per_comm)
logger.info("\t sub_partition_size=%s", sub_partition_size)
logger.info("\t num_sub_partitions=%s", num_sub_partitions)
logger.info("\t num_comm_intervals=%s", num_comm_intervals)
logger.info("****")
# [comm_id] -> [rank]
comm_partitions = []
for _ in range(num_comm_intervals):
comm_partitions.append([])
start = 0
comm_id = 0
element_intervals = defaultdict(
list) # [rank] -> [(start,end), (start,end), ...]
for idx in range(num_sub_partitions):
rank_id = idx % world_size
sub_partition = tensor.narrow(0, start, sub_partition_size).detach()
element_intervals[rank_id].append((start, start + sub_partition_size))
comm_partitions[comm_id].append(sub_partition)
start = start + sub_partition_size
if rank_id == (world_size - 1):
comm_id += 1
# [rank] -> [comm_id]
sub_partitions = []
for _ in range(world_size):
sub_partitions.append([])
for comm_id, partitions in enumerate(comm_partitions):
for rank_id, partition in enumerate(partitions):
sub_partitions[rank_id].append(partition)
return comm_partitions, sub_partitions, element_intervals, sub_partition_size, num_comm_intervals
@staticmethod
def get_all_sub_partition_info(tensor_list,
all_element_intervals,
local_rank,
world_size):
params_not_local = []
# [rank] -> [comm-id] -> [param/offset]
params_in_rank_sub_partition = []
params_in_rank_sub_partitions_offsets = []
for rank in range(world_size):
params_in_local_sub_partition = []
local_sub_partition_offsets = []
comm_tensor_list = []
comm_offset_list = []
current_index = 0
prev_comm_idx = 0
for iii, tensor in enumerate(tensor_list):
tensor_size = tensor.numel()
#if local_rank == 0:
# # logger.info("rank={}, current_index={}, tensor_size={}, tensor-idx={}".format(rank,
# current_index, tensor_size, iii))
results_list = _range_check(current_index,
all_element_intervals[rank],
tensor_size)
for contained, offset, comm_idx in results_list:
#if local_rank == 0:
# logger.info("rank={}, contained={}, offset={}, comm_idx={}".format(rank, contained,
# offset, comm_idx))
if contained:
if prev_comm_idx != comm_idx:
params_in_local_sub_partition.append(comm_tensor_list)
comm_tensor_list = []
local_sub_partition_offsets.append(comm_offset_list)
comm_offset_list = []
comm_tensor_list.append(tensor)
comm_offset_list.append(offset)
prev_comm_idx = comm_idx
elif rank == local_rank:
params_not_local.append(tensor)
current_index = current_index + tensor_size
#assert len(comm_tensor_list) > 0
#assert len(comm_offset_list) > 0
params_in_local_sub_partition.append(comm_tensor_list)
local_sub_partition_offsets.append(comm_offset_list)
params_in_rank_sub_partition.append(params_in_local_sub_partition)
params_in_rank_sub_partitions_offsets.append(local_sub_partition_offsets)
return params_in_rank_sub_partition, params_in_rank_sub_partitions_offsets, params_not_local
@staticmethod
def get_flat_sub_partitions(comm_tensor_list,
comm_param_offsets,
sub_partition_size,
dtype,
default_device,
num_comm_intervals=None,
return_partition_params=False):
partition_params = []
final_param_offsets = []
flat_sub_partitions = []
for tensor_list, param_offsets in zip(comm_tensor_list, comm_param_offsets):
flat_tensor_list = []
current_size = 0
my_offsets = []
my_params = []
for i, tensor in enumerate(tensor_list):
if tensor.grad is None:
tensor.grad = torch.zeros(tensor.size(),
dtype=tensor.dtype,
device=tensor.device)
param = tensor
tensor = tensor.grad
num_elements = tensor.numel()
tensor_offset = 0
#we need to offset to get to the right element
if i == 0 and param_offsets[i] > 0:
tensor_offset = param_offsets[i]
num_elements = num_elements - tensor_offset
# We don't need all elements of the tensor if this tensor is
# larger than we have space for in our curr sub-partition
if num_elements > (sub_partition_size - current_size):
num_elements = sub_partition_size - current_size
#we need a narrow view of the tensor based on the tensor offset and number of elements that
#we need from this tensor
if tensor_offset > 0 or num_elements < tensor.numel():
flat_tensor_list.append(tensor.contiguous().view(-1).narrow(
0,
int(tensor_offset),
int(num_elements)).to(dtype))
else:
flat_tensor_list.append(tensor.to(dtype))
my_params.append(param)
#remember offset into partition and #elems for this tensor
my_offsets.append((current_size, num_elements))
current_size = current_size + num_elements
#this means its the last partition and does not align with the dp boundary. We need to pad before flattening
if current_size < sub_partition_size:
my_offsets.append((None, None))
my_params.append(None)
if len(tensor_list) == 0:
assert default_device != None
flat_tensor_list.append(
torch.zeros(int(sub_partition_size - current_size),
dtype=dtype,
device=default_device))
else:
flat_tensor_list.append(
torch.zeros(int(sub_partition_size - current_size),
dtype=dtype,
device=tensor_list[0].device))
partition_params.append(my_params) #flat_tensor_list)
final_param_offsets.append(my_offsets)
assert len(flat_tensor_list) == len(my_offsets), "{} {}".format(len(flat_tensor_list), len(my_offsets))
flat_sub_partitions.append(_flatten_dense_tensors(flat_tensor_list))
if num_comm_intervals is not None and len(
flat_sub_partitions) < num_comm_intervals:
# logger.info("padding w. sub partitions to ensure uniform communication")
device = flat_sub_partitions[0].device
for _ in range(num_comm_intervals - len(flat_sub_partitions)):
flat_sub_partitions.append(
torch.zeros(int(sub_partition_size),
dtype=dtype,
device=device))
partition_params.append([None])
final_param_offsets.append([(None, None)])
if return_partition_params:
assert len(flat_sub_partitions) == len(partition_params)
assert len(partition_params) == len(final_param_offsets), "{} {}".format(len(partition_params), len(final_param_offsets))
return flat_sub_partitions, partition_params, final_param_offsets
return flat_sub_partitions
def zero_grad(self, set_grads_to_None=True):
"""
Zero FP16 parameter grads.
"""
# FP32 grad should never exist.
# For speed, set model fp16 grad to None by default
for group in self.fp16_groups:
for p in group:
if set_grads_to_None:
p.grad = None
else:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
def free_grad_in_param_list(self, param_list):
for p in param_list:
if isinstance(p, list):
for _p in p:
_p.grad = None
else:
p.grad = None
def reduce_scatter_gradients(self,
postscale_gradients,
gradient_predivide_factor,
gradient_average):
world_size = dist.get_world_size(group=self.dp_process_group)
local_rank = dist.get_rank(group=self.dp_process_group)
for i, group in enumerate(self.fp16_groups):
num_comm_intervals = self.num_comm_intervals_per_group[i]
all_sub_partitions = []
for rank in range(world_size):
# gsp is list of partitions indexed by comm_idx
grad_sub_partitions = self.get_flat_sub_partitions(
comm_tensor_list=self.params_in_rank_sub_partitions[i][rank],
comm_param_offsets=self.params_in_rank_sub_partitions_offsets[i]
[rank],
dtype=torch.half,
default_device=self.default_device,
sub_partition_size=self.sub_partition_sizes[i],
num_comm_intervals=self.num_comm_intervals_per_group[i])
all_sub_partitions.append(grad_sub_partitions)
assert len(grad_sub_partitions) == num_comm_intervals
local_comm_partitions = []
for comm_idx in range(num_comm_intervals):
single_comm_all_partitions = []
for rank in range(world_size):
single_comm_all_partitions.append(all_sub_partitions[rank][comm_idx])
if postscale_gradients:
if gradient_predivide_factor != 1.0:
for partition in single_comm_all_partitions:
partition.mul_(1. / gradient_predivide_factor)
dist.reduce_scatter(output=single_comm_all_partitions[local_rank],
input_list=single_comm_all_partitions,
group=self.dp_process_group)
if gradient_average:
# Only need to average our local grads in post scaling
if gradient_predivide_factor != world_size:
single_comm_all_partitions[local_rank].mul_(
gradient_predivide_factor / world_size)
else:
for partition in single_comm_all_partitions:
partition.div_(world_size)
dist.reduce_scatter(output=single_comm_all_partitions[local_rank],
input_list=single_comm_all_partitions,
group=self.dp_process_group)
def step(self, closure=None):
# First compute norm for all group so we know if there is overflow
self.overflow = self.overflow_checker.check()
prev_scale = self.loss_scale
self._update_scale(self.overflow)
if self.overflow:
self.zero_grad()
if self.verbose:
logger.info("[deepspeed] OVERFLOW! Skipping step. Attempted loss "
"scale: {}, reducing to {}".format(
prev_scale,
self.loss_scale))
return self.overflow
norm_groups = []
local_sub_partitions_grad_groups = []
partition_id = dist.get_rank(group=self.dp_process_group)
for i, group in enumerate(self.fp16_groups):
#TODO RS: update get grad norm to support sub partitions
norm_groups.append(get_grad_norm(group, mpu=self.mpu))
#RS: update free grads w.r.t. sub partitions
#free gradients for all the parameters that are not updated by this process
self.free_grad_in_param_list(self.params_not_local[i])
# create flat gradient partitions for parameters updated by this process
local_grad_sub_partitions = self.get_flat_sub_partitions(
comm_tensor_list=self.params_in_rank_sub_partitions[i][partition_id],
comm_param_offsets=self.params_in_rank_sub_partitions_offsets[i]
[partition_id],
sub_partition_size=self.sub_partition_sizes[i],
dtype=self.local_sub_partitions_of_fp32_groups[i][0].dtype,
num_comm_intervals=self.num_comm_intervals_per_group[i],
default_device=self.default_device)
#RS: update all our local params with sub-partition grads
for idx, sub_partition_param in enumerate(self.local_sub_partitions_of_fp32_groups[i]):
sub_partition_param.grad = local_grad_sub_partitions[idx]
#RS: update free grads for sub-partitions
#release all the gradient since we have already created a necessary copy in dp_grad_partition
self.free_grad_in_param_list(
self.params_in_rank_sub_partitions[i][partition_id])
local_sub_partitions_grad_groups.append(local_grad_sub_partitions)
#RS: update unscale/clip with sub partitions
self.unscale_and_clip_grads(local_sub_partitions_grad_groups, norm_groups)
self.optimizer.step()
#RS: clear our sub partition grads
#get rid of the fp32 gradients. Not needed anymore
for group in self.local_sub_partitions_of_fp32_groups:
for idx, sub_partition_param in enumerate(group):
sub_partition_param.grad = None
#group.grad = None
#NOTE RS: removed norm_groups outer loop from original code, i don't think it's needed
#RS: copy all sub-partition fp32 data to fp16 sub partitions
# copy fp32 param data to fp16 partitions w.r.t. our local rank
for fp16_all_sub_partitions, fp32_local_sub_partitions in zip(self.parallel_sub_partitioned_fp16_groups, self.local_sub_partitions_of_fp32_groups):
for local_sub_partition_param_fp16, local_sub_partition_param_fp32 in zip(fp16_all_sub_partitions[partition_id], fp32_local_sub_partitions):
if self.hack_first_step == True:
local_sub_partition_param_fp32.data.copy_(local_sub_partition_param_fp16.data)
else:
local_sub_partition_param_fp16.data.copy_(local_sub_partition_param_fp32.data)
self.hack_first_step = False
#RS: all_gather/broadcast sub-partitions in separate comm calls
#gather the updated weights from everyone
for fp16_all_sub_partitions in self.parallel_comm_sub_partitioned_fp16_groups:
for comm_id, sub_partitions in enumerate(fp16_all_sub_partitions):
dist.all_gather(sub_partitions,
sub_partitions[partition_id],
group=self.dp_process_group)
# TODO: we probably don't need this? just to be safe
for i in range(len(norm_groups)):
updated_params = _unflatten_dense_tensors(self.fp16_groups_flat[i],
self.fp16_groups[i])
for p, q in zip(self.fp16_groups[i], updated_params):
p.data = q.data
return self.overflow
def unscale_and_clip_grads(self, grad_groups_flat, norm_groups):
total_norm = 0.0
for norm in norm_groups:
total_norm += norm**2.0
total_norm = math.sqrt(total_norm)
# compute combined scale factor for this group
combined_scale = self.loss_scale
if self.clip_grad > 0.:
# norm is in fact norm*scale
clip = ((total_norm / self.loss_scale) + 1e-6) / self.clip_grad
if clip > 1:
combined_scale = clip * self.loss_scale
for grad in grad_groups_flat:
if isinstance(grad, list):
sub_partitions = grad
for g in sub_partitions:
g.data.mul_(1. / combined_scale)
else:
grad.data.mul_(1. / combined_scale)
def backward(self, loss, retain_graph=False):
self.loss_scaler.backward(loss.float(), retain_graph=retain_graph)
def _update_scale(self, has_overflow=False):
self.loss_scaler.update_scale(has_overflow)
# Promote state so it can be retrieved or set via "fp16_optimizer_instance.state"
def _get_state(self):
return self.optimizer.state
def _set_state(self, value):
self.optimizer.state = value
state = property(_get_state, _set_state)
# Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups"
# (for example, to adjust the learning rate)
def _get_param_groups(self):
return self.optimizer.param_groups
def _set_param_groups(self, value):
self.optimizer.param_groups = value
param_groups = property(_get_param_groups, _set_param_groups)
# Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale"
def _get_loss_scale(self):
return self.loss_scaler.loss_scale
def _set_loss_scale(self, value):
self.loss_scaler.cur_scale = value
loss_scale = property(_get_loss_scale, _set_loss_scale)
cur_scale = property(_get_loss_scale, _set_loss_scale)
# Return communication interval paddings for local rank and group
def _get_local_group_paddings(self, group_index):
local_rank = dist.get_rank(group=self.dp_process_group)
sub_partition_indices = [
local_rank + (comm_idx * self.partition_count)
for comm_idx in range(self.num_comm_intervals_per_group[group_index])
]
group_paddings = [
self.group_paddings[group_index][sub_idx]
for sub_idx in sub_partition_indices
]
return group_paddings
# Return group tensor after removing paddings that are added for alignment to DP world size.
# This method works on the assumption that each group contains sub partitions.
def _get_groups_without_padding(self, groups_with_padding):
groups_without_padding = []
for group_index, group in enumerate(groups_with_padding):
group_paddings = self._get_local_group_paddings(group_index)
lean_sub_partitions = []
for sub_partition, padding in zip(group, group_paddings):
lean_length = sub_partition.numel() - padding
lean_sub_partitions.append(sub_partition[:lean_length])
groups_without_padding.append(lean_sub_partitions)
return groups_without_padding
# Return optimizer state after removing paddings that are added for alignment.
def _get_state_without_padding(self, state_with_padding, padding):
lean_state = {}
for key, value in state_with_padding.items():
if torch.is_tensor(value):
lean_length = value.numel() - padding
lean_state[key] = value[:lean_length]
else:
lean_state[key] = value
return lean_state
# Return base optimizer states.
# This method assumes that each param group contains a single flattened tensor.
def _get_base_optimizer_state(self):
optimizer_groups_state = []
for group_index, group in enumerate(self.optimizer.param_groups):
param_paddings = self._get_local_group_paddings(group_index)
group_lean_state = []
for param_idx, param in enumerate(group['params']):
lean_state = self._get_state_without_padding(self.optimizer.state[param],
param_paddings[param_idx])
group_lean_state.append(lean_state)
optimizer_groups_state.append(group_lean_state)
return optimizer_groups_state
def _rigid_state_dict(self):
"""
Returns a dict that can be loaded for continued training with same DP degree
"""
"""
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
"""
state_dict = {}
state_dict['loss_scaler'] = self.loss_scaler
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['overflow'] = self.overflow
state_dict['base_optimizer_state'] = self.optimizer.state_dict()
state_dict[
'local_sub_partitions_of_fp32_groups'] = self.local_sub_partitions_of_fp32_groups
return state_dict
def _elastic_state_dict(self):
"""
Returns a dict that can be loaded for elastic training with different DP degree
"""
state_dict = {}
state_dict['loss_scaler'] = self.loss_scaler
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['overflow'] = self.overflow
state_dict['base_optimizer_state'] = self._get_base_optimizer_state()
state_dict['zero_stage'] = ZERO_OPTIMIZATION_OPTIMIZER_STATES
state_dict['partition_count'] = self.partition_count
state_dict['num_comm_intervals_per_group'] = self.num_comm_intervals_per_group
# Remove paddings for DP alignment to enable loading for other alignment values
fp32_groups_without_padding = self._get_groups_without_padding(
self.local_sub_partitions_of_fp32_groups)
state_dict['local_sub_partitions_of_fp32_groups'] = fp32_groups_without_padding
return state_dict
def state_dict(self):
"""
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
"""
if self.elastic_checkpoint:
return self._elastic_state_dict()
return self._rigid_state_dict()
# Extract the fp32 weights of the current rank from checkpoint by merging the
# sub partitions of communication intervals across ranks.
# Let sub_i_j = sub partition of rank i and comm interval j
# For 2 ranks and 2 comm intervals, checkpoints (minus padding) are as follows:
# rank 0 = [sub_0_0, sub_0_1]
# rank 1 = [sub_1_0, sub_1_1]
# Merge to get [sub_0_0, sub_1_0, sub_0_1, sub_1_1] => original un-padded flattened tensor.
def _retrieve_group_sub_partition_weights(self,
all_partition_fp32_weights,
max_elems_per_comm):
num_partitions = len(all_partition_fp32_weights)
num_comm_intervals = len(all_partition_fp32_weights[0])
num_sub_partitions = num_partitions * num_comm_intervals
all_sub_partition_weights = [None] * num_sub_partitions
for rank, partition_weights in enumerate(all_partition_fp32_weights):
for comm_idx, sub_partition_weights in enumerate(partition_weights):
#all_sub_partition_weights.append(sub_partition_weights)
sub_partition_idx = (comm_idx * num_partitions) + rank
all_sub_partition_weights[sub_partition_idx] = sub_partition_weights
flat_merged_weights = flatten_dense_tensors_sub_partition_aligned(
tensor_list=all_sub_partition_weights,
dp=dist.get_world_size(group=self.dp_process_group),
max_elements_per_comm=max_elems_per_comm,
pg=self.dp_process_group)
comm_partitions, dp_sub_partitions, element_intervals, sub_partition_size, num_comm_intervals = \
self.get_data_parallel_sub_partitions(
tensor=flat_merged_weights,
max_elements_per_comm=max_elems_per_comm,
world_size=dist.get_world_size(group=self.dp_process_group),
dp_process_group=self.dp_process_group
)
partition_id = dist.get_rank(group=self.dp_process_group)
return [sub_partition for sub_partition in dp_sub_partitions[partition_id]]
# Restore base optimizer fp32 weights from checkpoint by:
# 1) Merging fp32 weights from checkpoints of all partitions
# 2) Extracting fp32 weights for current partition from merged weights
# 3) Using extracted weights to update base optimizer weights directly.
def _restore_from_fp32_weights(self, all_state_dict):
sub_partition_of_fp32_groups = []
for group_idx in range(len(self.local_sub_partitions_of_fp32_groups)):
all_partition_fp32_weights = [
sd['local_sub_partitions_of_fp32_groups'][group_idx]
for sd in all_state_dict
]
max_elems_per_comm = self.max_elems_per_comm[group_idx]
sub_partition_weights = self._retrieve_group_sub_partition_weights(
all_partition_fp32_weights,
max_elems_per_comm)
sub_partition_of_fp32_groups.append(sub_partition_weights)
for current_group, saved_group in zip(self.local_sub_partitions_of_fp32_groups, sub_partition_of_fp32_groups):
for current_sub_part, saved_sub_part in zip(current_group, saved_group):
current_sub_part.data.copy_(saved_sub_part.data)
# Extract optimizer state for current partition from merged states of all partitions
def _partition_base_optimizer_state(self,
state_key,
all_partition_states,
max_elems_per_comm):
if not torch.is_tensor(all_partition_states[0]):
return all_partition_states[0]
alignment = dist.get_world_size(group=self.dp_process_group)
flat_merged_partitions = flatten_dense_tensors_sub_partition_aligned(
tensor_list=all_partition_states,
dp=dist.get_world_size(group=self.dp_process_group),
max_elements_per_comm=max_elems_per_comm,
pg=self.dp_process_group)
comm_partitions, dp_sub_partitions, element_intervals, sub_partition_size, num_comm_intervals = \
self.get_data_parallel_sub_partitions(
tensor=flat_merged_partitions,
max_elements_per_comm=max_elems_per_comm,
world_size=dist.get_world_size(group=self.dp_process_group),
dp_process_group=self.dp_process_group
)
partition_id = dist.get_rank(group=self.dp_process_group)
return [sub_partition for sub_partition in dp_sub_partitions[partition_id]]
# Compute the optimizer state partitions for the group by
# 1) Merging state values across the previous partitioning.
# 2) Repartition state values for the new partitioning
# 3) Return state corresponding to local partition
def _retrieve_group_optimizer_states(self, all_partition_states, max_elems_per_comm):
merged_optimizer_states = {}
num_partitions = len(all_partition_states)
num_comm_intervals = len(all_partition_states[0])
num_sub_partitions = num_partitions * num_comm_intervals
for rank, partition_state in enumerate(all_partition_states):
for comm_idx, sub_partition_state in enumerate(partition_state):
for key, value in sub_partition_state.items():
if not key in merged_optimizer_states.keys():
merged_optimizer_states[key] = [None] * num_sub_partitions
sub_partition_idx = (comm_idx * num_partitions) + rank
merged_optimizer_states[key][sub_partition_idx] = value
group_optimizer_states = {}
for key, value in merged_optimizer_states.items():
group_optimizer_states[key] = self._partition_base_optimizer_state(
key,
value,
max_elems_per_comm)
return group_optimizer_states
# Restore base optimizer state from checkpoint by
# 1) Merging optimizer state from checkpoints of all partitions
# 2) Extracting optimizer state for current partition from the merged state
# 3) Using the extracted value to directly update the base optimizer.
def _restore_base_optimizer_state(self, state_dict_list):
base_optimizer_group_states = []
for group_idx in range(len(self.optimizer.param_groups)):
all_partition_group_states = [
sd['base_optimizer_state'][group_idx] for sd in state_dict_list
]
max_elems_per_comm = self.max_elems_per_comm[group_idx]
group_optimizer_states = self._retrieve_group_optimizer_states(
all_partition_group_states,
max_elems_per_comm)
base_optimizer_group_states.append(group_optimizer_states)
for group_idx, group in enumerate(self.optimizer.param_groups):
for param_idx, param in enumerate(group['params']):
for key, saved in base_optimizer_group_states[group_idx].items():
if torch.is_tensor(self.optimizer.state[param][key]):
current = self.optimizer.state[param][key]
current.data.copy_(saved[param_idx].data)
else:
self.optimizer.state[param][key] = saved
# Restore base optimizer fp32 weights from ZeRO fp16 weights
def _restore_from_fp16_weights(self):
partition_id = dist.get_rank(group=self.dp_process_group)
for fp16_partitions, fp32_partitions in zip(self.parallel_sub_partitioned_fp16_groups, self.local_sub_partitions_of_fp32_groups):
for fp16_sub_partition, fp32_sub_partition in zip(fp16_partitions[partition_id], fp32_partitions):
fp32_sub_partition.data.copy_(fp16_sub_partition.data)
# Refresh the fp32 master params from the fp16 copies.
def refresh_fp32_params(self):
self._restore_from_fp16_weights()
def _rigid_load_state_dict(self, state_dict, load_optimizer_states=True):
# I think it should actually be ok to reload the optimizer before the model.
self.loss_scaler = state_dict['loss_scaler']
self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
self.overflow = state_dict['overflow']
if load_optimizer_states:
self.optimizer.load_state_dict(state_dict['base_optimizer_state'])
for curr_group, saved_group in zip(self.local_sub_partitions_of_fp32_groups, state_dict['local_sub_partitions_of_fp32_groups']):
for curr_param, saved_param in zip(curr_group, saved_group):
curr_param.data.copy_(saved_param.data)
def _elastic_load_state_dict(self,
state_dict_list,
load_optimizer_states=True,
load_from_fp32_weights=False):
"""
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).cuda().half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
"""
# I think it should actually be ok to reload the optimizer before the model.
self.loss_scaler = state_dict_list[0]['loss_scaler']
self.dynamic_loss_scale = state_dict_list[0]['dynamic_loss_scale']
self.overflow = state_dict_list[0]['overflow']
if load_optimizer_states:
self._restore_base_optimizer_state(state_dict_list)
if load_from_fp32_weights:
self._restore_from_fp32_weights(state_dict_list)
else:
self._restore_from_fp16_weights()
def load_state_dict(self,
state_dict_list,
load_optimizer_states=True,
load_from_fp32_weights=False):
"""
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).cuda().half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
"""
if self.elastic_checkpoint:
self._elastic_load_state_dict(state_dict_list,
load_optimizer_states,
load_from_fp32_weights)
else:
self._rigid_load_state_dict(
state_dict_list[dist.get_rank(group=self.dp_process_group)],
load_optimizer_states)
def _dump_optimizer_state(self, message):
logger.info(f'{message}')
for i, group in enumerate(self.optimizer.param_groups):
for j, param in enumerate(group['params']):
for key, value in self.optimizer.state[param].items():
t_stats = [
value.min(),
value.max(),
(value.max() - value.min()),
value.mean()
]
stats = [float(t) for t in t_stats]
logger.info(
f'group/param/key/min/max/delta/mean = {i}, {j}, {key}: {stats}')
| 52,782 | 45.79344 | 155 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/LM/InstructGPT/run_PopQA.py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
from tqdm import tqdm
import argparse
import os
import time
import json
import torch
import random
import numpy as np
import pandas as pd
import openai
openai.api_key = "YOUR_API_KEY"
seed = 633
torch.backends.cudnn.deterministic = True
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
print("Cuda:", torch.cuda.is_available())
print("pwd", os.getcwd())
from transformers import AutoTokenizer, AutoModelForCausalLM
from util_clm import convert_model_to_int8_on_gpu
import jsonlines
def load_jsonlines(file):
with jsonlines.open(file, "r") as jsonl_f:
lst = [obj for obj in jsonl_f]
return lst
q_templates = {
22: "What is {}'s occupation?",
218: "In what city was {} born?",
91: "What genre is {}?",
257: "Who is the father of {}?",
182: "In what country is {}?",
164: "Who was the producer of {}?",
526: "Who was the director of {}?",
97: "What is {} the capital of?",
533: "Who was the screenwriter for {}?",
639: "Who was the composer of {}?",
472: "What color is {}?",
106: "What is the religion of {}?",
560: "What sport does {} play?",
484: "Who is the author of {}?",
292: "Who is the mother of {}?",
422: "What is the capital of {}?",
}
completion_template = (
"Q: {} A:" # "{}" # "Query: {}\nResult:" # "Q: {} A:" # "{} The answer is"
)
genread_template = "Generate a background document from Wikipedia to answer the given question. {}" # This prompt comes from the GenRead paper
def call_request(prompt, model, tokenizer, max_new_tokens=15):
max_inpt_tokens = tokenizer.model_max_length
if (
len(prompt) > tokenizer.model_max_length
): # conservative lower bound, since each token is at least 1 character
inpts = tokenizer(prompt, return_tensors="pt")
new_prompt = tokenizer.decode(
inpts.input_ids[0, -(max_inpt_tokens - max_new_tokens) :]
)
else:
new_prompt = prompt
# try to get a response from the model multiple times if theres a timeout
while True:
try:
# if i > 0:
# print("Retrying request")
response = openai.Completion.create(
model=model,
prompt=new_prompt,
temperature=0.0,
max_tokens=max_new_tokens,
logprobs=5,
top_p=1,
frequency_penalty=0.0,
presence_penalty=0.0,
)
break
except Exception as e:
# print(e)
print("Timeout, trying again")
time.sleep(1)
pred = response["choices"][0]["text"]
if pred.startswith("\n\n"):
pred = pred[2:]
pred = pred.split("\n")[0]
return pred, response.to_dict_recursive()
def call_model(
prompt, model, tokenizer, device, max_new_tokens=15, model_max_length=None
):
max_inpt_tokens = (
tokenizer.model_max_length if model_max_length is None else model_max_length
)
inpts = tokenizer(prompt, return_tensors="pt").to(device)
gen = model.generate(
input_ids=inpts.input_ids[:, -(max_inpt_tokens - max_new_tokens) :],
attention_mask=inpts.attention_mask[:, -(max_inpt_tokens - max_new_tokens) :],
pad_token_id=tokenizer.eos_token_id,
max_new_tokens=max_new_tokens,
num_beams=1,
do_sample=False,
)
text = tokenizer.decode(gen[0])
actual_prompt = tokenizer.decode(
inpts.input_ids[0, -(max_inpt_tokens - max_new_tokens) :]
)
pred = text[len(actual_prompt) :]
if pred.startswith("\n\n"):
pred = pred[2:]
pred = pred.split("\n")[0]
return pred, text
def clip_paragraph(text, eval_method):
if eval_method in ["BM25", "genread"]:
return text
split = text.split(". ")
return ". ".join(split[:-1]) + "."
def get_few_shot_text_with_retrieval(row, retrieval_dict, eval_method):
if eval_method == "vanilla":
return completion_template.format(row.question) + " " + row.obj
# retrieval_dict[row.id]["ctxs"][0]
if row.question.replace("?", "").lower() not in retrieval_dict:
print("missing retrieval")
return completion_template.format(row.question) + " " + row.obj
else:
retrieval = retrieval_dict[row.question.replace("?", "").lower()]["ctxs"][0]
retrieved_text = clip_paragraph(retrieval["text"], eval_method)
return (
retrieved_text
+ "\n\n"
+ completion_template.format(row.question)
+ " "
+ row.obj
)
def get_few_shot_text(row, eval_method):
return completion_template.format(row.question) + " " + row.obj
def get_genread_passage(
question, genread_template, generate_function, max_new_tokens=150
):
prompt = genread_template.format(question)
return generate_function(prompt, max_new_tokens=max_new_tokens)[0]
def get_few_shot_examples_genread(
knowledge,
generate_function,
n_examples,
genread_template,
is_templatedQA,
max_new_tokens=150,
):
if is_templatedQA:
few_shot_examples = dict()
all_pids = list(q_templates.keys())
examples_per_template = n_examples // (len(q_templates) - 1)
for pid in all_pids:
for row2 in (
knowledge[knowledge.prop_id == pid].sample(n=examples_per_template).iloc
):
if pid not in few_shot_examples:
few_shot_examples[pid] = []
generation = get_genread_passage(
row2.question,
genread_template,
generate_function,
max_new_tokens=max_new_tokens,
)
few_shot_examples[pid].append(
get_few_shot_text_with_retrieval(
row2,
{row2.question: {"ctxs": [{"id": -1, "text": generation}]}},
"genread",
)
)
else:
few_shot_examples = []
for row2 in knowledge.sample(n=n_examples + 1).iloc:
generation = get_genread_passage(
row2.question,
genread_template,
generate_function,
max_new_tokens=max_new_tokens,
)
few_shot_examples.append(
get_few_shot_text_with_retrieval(
row2,
{row2.question: {"ctxs": [{"id": -1, "text": generation}]}},
"genread",
)
)
return few_shot_examples
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, default="text-davinci-002")
parser.add_argument("--input_file", type=str)
parser.add_argument("--alias", type=str)
parser.add_argument("--n_examples", type=int, default=15)
parser.add_argument(
"--eval_method",
type=str,
default="contriever",
choices=["vanilla", "BM25", "contriever", "genread"],
)
parser.add_argument(
"--ret_path",
type=str,
default=None,
required=False,
help="path to retrieved documents jsonl",
)
parser.add_argument("--device", type=str, default="cuda")
parser.add_argument("--max_new_tokens", type=int, default=15)
parser.add_argument("--sample", type=int, default=0, help="if 0, use all examples")
parser.add_argument(
"--continue_from", type=str, help="path to previous results file"
)
parser.add_argument("--int8bit", action="store_true")
parser.add_argument(
"--parallel",
type=str,
help="string of format 'i.n_workers' where i is the index of the worker",
)
args = parser.parse_args()
use_gpt3 = args.model_name in {
"text-davinci-003",
"text-davinci-002",
"text-curie-001",
"text-babbage-001",
"text-ada-001",
}
if use_gpt3:
tokenizer = AutoTokenizer.from_pretrained("gpt2")
generate = lambda prompt, max_new_tokens: call_request(
prompt, args.model_name, tokenizer, max_new_tokens=max_new_tokens
)
else:
gpt = args.model_name
device = args.device
tokenizer = AutoTokenizer.from_pretrained(gpt)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.pad_token_id = tokenizer.eos_token_id
if args.int8bit:
model = convert_model_to_int8_on_gpu(
AutoModelForCausalLM.from_pretrained(gpt), device
)
else:
model = AutoModelForCausalLM.from_pretrained(gpt).eval().to(device)
if "opt" in args.model_name or args.model_name == "EleutherAI/gpt-neox-20b":
generate = lambda prompt, max_new_tokens: call_model(
prompt,
model=model,
tokenizer=tokenizer,
device=device,
max_new_tokens=max_new_tokens,
model_max_length=2048,
)
else:
generate = lambda prompt, max_new_tokens: call_model(
prompt,
model=model,
tokenizer=tokenizer,
device=device,
max_new_tokens=max_new_tokens,
)
input_path = args.input_file
knowledge = pd.read_csv(input_path, sep="\t")
if args.continue_from is not None:
results = pd.read_csv(args.continue_from, sep="\t")
knowledge = knowledge[~knowledge.id.isin(results.id)]
n = len(knowledge) if args.sample == 0 else args.sample
sample = knowledge.sample(n=n, replace=False)
if args.parallel is not None:
worker_num, n_workers = map(int, args.parallel.split("."))
sample = sample.iloc[worker_num::n_workers]
n_examples = args.n_examples
is_templatedQA = True
examples_per_template = n_examples // (len(q_templates) - 1)
preds = []
prompts = []
accuracy = []
responses = []
if args.eval_method in ["BM25", "contriever"]:
has_answer = []
retrieval_ids = []
with open(args.ret_path) as f:
retrieval_dict = {
json.loads(s)["question"]: json.loads(s) for s in f.readlines()
}
# print(retrieval_dict)
if args.eval_method == "genread":
genread_few_shot_examples = get_few_shot_examples_genread(
knowledge,
generate,
n_examples,
genread_template,
is_templatedQA,
max_new_tokens=150,
)
has_answer = []
gen_passages = []
# main loop
row_num = 0
for row in tqdm(sample.iloc, total=n):
if row_num < 10000:
row_num += 1
continue
# get few shot examples text
if n_examples == 0:
few_shot_examples_text = ""
else:
few_shot_examples = []
if args.eval_method == "genread":
if is_templatedQA:
other_pids = list(q_templates.keys())
other_pids.remove(row.prop_id)
few_shot_examples = []
for pid in other_pids:
few_shot_examples.extend(
random.sample(
genread_few_shot_examples[pid], examples_per_template
)
)
else:
few_shot_examples = random.sample(
[
ex
for ex in genread_few_shot_examples
if row.question not in ex
],
n_examples,
)
else:
if is_templatedQA:
other_pids = list(q_templates.keys())
other_pids.remove(row.prop_id)
for pid in other_pids:
for row2 in (
knowledge[knowledge.prop_id == pid]
.sample(n=examples_per_template)
.iloc
):
few_shot_examples.append(
get_few_shot_text_with_retrieval(
row2, retrieval_dict, args.eval_method
)
if args.eval_method in ["BM25", "contriever"]
else get_few_shot_text(row2, args.eval_method)
)
else:
for row2 in (
knowledge[knowledge.question != row.question]
.sample(n=n_examples)
.iloc
):
few_shot_examples.append(
get_few_shot_text_with_retrieval(
row2, retrieval_dict, args.eval_method
)
if args.eval_method in ["BM25", "contriever"]
else get_few_shot_text(row2, args.eval_method)
)
np.random.shuffle(few_shot_examples)
few_shot_examples_text = "\n\n".join(few_shot_examples) + "\n\n"
# get prompt
if args.eval_method == "vanilla":
prompt = few_shot_examples_text + completion_template.format(row.question)
elif args.eval_method in ["BM25", "contriever"]:
query = row.question
try:
retrieval = retrieval_dict[query]["ctxs"][
0
] # retrieval_dict[row.id]["ctxs"][0]
except:
print(
"No retrieval for",
query,
" Example query:",
list(retrieval_dict.keys())[0],
)
retrieval = {"text": "", "id": np.nan, "hasanswer": False}
# retrieved_text = clip_paragraph(
# retrieval["text"], eval_method=args.eval_method
# )
retrieved_text = (
retrieval_dict[query]["ctxs"][0]["text"]
+ "\n\n"
+ retrieval_dict[query]["ctxs"][1]["text"]
+ "\n\n"
+ retrieval_dict[query]["ctxs"][2]["text"]
)
retrieval_id = retrieval["id"]
prompt = (
few_shot_examples_text
+ retrieved_text
+ "\n\n"
+ completion_template.format(row.question)
)
has_answer.append(retrieval["hasanswer"])
retrieval_ids.append(retrieval_id)
elif args.eval_method == "genread":
generation = get_genread_passage(
row.question, genread_template, generate, max_new_tokens=150
)
prompt = (
few_shot_examples_text
+ generation
+ "\n\n"
+ completion_template.format(row.question)
)
gen_passages.append(generation)
# generate response
pred, response = generate(prompt, max_new_tokens=args.max_new_tokens)
prompts.append(prompt)
preds.append(pred)
responses.append(response)
# compute accuracy
possible_answers = json.loads(row.possible_answers)
is_correct = False
genread_has_answer = False
for pa in possible_answers:
if pa in pred or pa.lower() in pred or pa.capitalize() in pred:
is_correct = True
if (
args.eval_method == "genread"
and pa in response
or pa.lower() in response
or pa.capitalize() in response
):
genread_has_answer = True
accuracy.append(is_correct)
if args.eval_method == "genread":
has_answer.append(genread_has_answer)
if len(preds) % 500 == 0:
g = open("gpt3.txt", "a")
temp_sample = sample.iloc[: len(preds)].copy()
temp_sample["is_correct"] = accuracy
# print(temp_sample.is_correct.mean())
g.write(str(temp_sample.is_correct.mean()) + "\n")
g.flush()
# save results intermittently
if len(preds) % 100000 == 0:
temp_sample = sample.iloc[: len(preds)].copy()
temp_sample["pred"] = preds
temp_sample["prompt"] = prompts
temp_sample["generation"] = responses
temp_sample["is_correct"] = accuracy
if args.eval_method in ["BM25", "contriever"]:
temp_sample["has_answer"] = has_answer
temp_sample["retrieval_id"] = retrieval_ids
if args.eval_method == "genread":
temp_sample["has_answer"] = has_answer
temp_sample["gen_passage"] = gen_passages
model_name_alias = args.model_name.replace("/", "_")
if not os.path.exists(f"results/temp/"):
os.makedirs(f"results/temp/")
worker_str = "" if args.parallel is None else f"-worker={args.parallel}"
output_path = f"results/temp/model={model_name_alias}-input={args.alias}-method={args.eval_method}-shots={n_examples}-n={len(temp_sample)}{'_int8bit' if args.int8bit is True else ''}{worker_str}.csv"
temp_sample.to_csv(output_path, index=False)
sample["is_correct"] = accuracy
sample["prompt"] = prompts
sample["pred"] = preds
sample["generation"] = responses
if args.eval_method in ["BM25", "contriever"]:
sample["has_answer"] = has_answer
sample["retrieval_id"] = retrieval_ids
if args.eval_method == "genread":
sample["has_answer"] = has_answer
sample["gen_passage"] = gen_passages
print(sample.is_correct.mean())
model_name_alias = args.model_name.replace("/", "_")
worker_str = "" if args.parallel is None else f"-worker={args.parallel}"
sample.to_csv(
f"results/model={model_name_alias}-input={args.alias}-method={args.eval_method}-shots={n_examples}-n={len(sample)}{'_int8bit' if args.int8bit is True else ''}{worker_str}.csv"
)
if __name__ == "__main__":
main()
| 18,470 | 34.521154 | 211 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/LM/Flan-T5/arguments.py | # coding=utf-8
"""argparser configuration"""
import argparse
import os
import torch
import deepspeed
def add_model_config_args(parser: argparse.ArgumentParser):
"""Model arguments"""
group = parser.add_argument_group("model", "model configuration")
group.add_argument(
"--model-config",
type=str,
default=None,
help="the configuration of the base model",
)
group.add_argument(
"--cpu-optimizer", action="store_true", help="Run optimizer on CPU"
)
group.add_argument(
"--cpu_torch_adam",
action="store_true",
help="Use Torch Adam as optimizer on CPU.",
)
return parser
def add_fp16_config_args(parser: argparse.ArgumentParser):
"""Mixed precision arguments."""
group = parser.add_argument_group("fp16", "fp16 configurations")
group.add_argument("--fp16", action="store_true", help="Run model in fp16 mode")
group.add_argument(
"--fp32-embedding", action="store_true", help="embedding in fp32"
)
group.add_argument(
"--fp32-layernorm", action="store_true", help="layer norm in fp32"
)
group.add_argument(
"--fp32-tokentypes", action="store_true", help="embedding token types in fp32"
)
group.add_argument(
"--fp32-allreduce", action="store_true", help="all-reduce in fp32"
)
group.add_argument(
"--hysteresis", type=int, default=2, help="hysteresis for dynamic loss scaling"
)
group.add_argument(
"--loss-scale",
type=float,
default=None,
help="Static loss scaling, positive power of 2 "
"values can improve fp16 convergence. If None, dynamic loss scaling is used.",
)
group.add_argument(
"--loss-scale-window",
type=float,
default=1000,
help="Window over which to raise/lower dynamic scale",
)
group.add_argument(
"--min-scale",
type=float,
default=1,
help="Minimum loss scale for dynamic loss scale",
)
return parser
def add_training_args(parser: argparse.ArgumentParser):
"""Training arguments."""
group = parser.add_argument_group("train", "training configurations")
group.add_argument("--do-train", action="store_true", help="whether do training")
group.add_argument("--do-valid", action="store_true", help="whether do validation")
group.add_argument("--do-valid-and-eval", action="store_true")
group.add_argument("--do-eval", action="store_true", help="whether do testing")
group.add_argument(
"--do-infer",
action="store_true",
help="whether do inference (testing without labels)",
)
group.add_argument(
"--train-ratio",
type=float,
default=1.0,
help="the ratio of the training set used for training",
)
group.add_argument(
"--train-num",
type=int,
default=-1,
help="the number of training samples, -1 for all sample",
)
group.add_argument(
"--dev-ratio",
type=float,
default=1.0,
help="the ratio of the training set used for validation",
)
group.add_argument(
"--dev-num",
type=int,
default=-1,
help="the number of validation samples, -1 for all sample",
)
group.add_argument(
"--test-ratio",
type=float,
default=1.0,
help="the ratio of the training set used for testing",
)
group.add_argument(
"--test-num",
type=int,
default=-1,
help="the number of testing samples, -1 for all sample",
)
group.add_argument("--epochs", type=int, default=1, help="the epochs for training")
group.add_argument(
"--batch-size", type=int, default=4, help="Data Loader batch size"
)
group.add_argument(
"--dev-batch-size", type=int, default=2, help="Data Loader batch size"
)
group.add_argument(
"--gradient-accumulation-steps",
type=int,
default=1,
help="gradient accumulation steps",
)
group.add_argument(
"--weight-decay",
type=float,
default=0.01,
help="weight decay coefficient for L2 regularization",
)
group.add_argument(
"--checkpoint-activations",
action="store_true",
help="checkpoint activation to allow for training "
"with larger models and sequences",
)
group.add_argument(
"--checkpoint-num-layers",
type=int,
default=1,
help="chunk size (number of layers) for checkpointing",
)
group.add_argument(
"--num-checkpoints", type=int, default=24, help="For activation checkpointing"
)
group.add_argument(
"--deepspeed-activation-checkpointing",
action="store_true",
help="uses activation checkpointing from deepspeed",
)
group.add_argument("--clip-grad", type=float, default=1.0, help="gradient clipping")
group.add_argument(
"--train-iters",
type=int,
default=1000000,
help="total number of iterations to train over all training runs",
)
group.add_argument("--log-interval", type=int, default=100, help="report interval")
group.add_argument(
"--max-save", type=int, default=-1, help="max checkpoints to save"
)
group.add_argument("--seed", type=int, default=1234, help="random seed")
group.add_argument("--few-data-num", type=int, default=None)
group.add_argument("--few-data-names", type=str, default=None)
group.add_argument("--data-aug", type=int, default=None)
group.add_argument("--rand-real-label", action="store_true")
group.add_argument("--rand-pseudo-label", action="store_true")
# Learning rate.
group.add_argument(
"--lr-decay-iters",
type=int,
default=None,
help="number of iterations to decay LR over,"
" If None defaults to `--train-iters`*`--epochs`",
)
group.add_argument(
"--lr-decay-style",
type=str,
default="linear",
choices=["constant", "linear", "cosine", "exponential", "noam"],
help="learning rate decay function",
)
group.add_argument("--lr", type=float, default=1.0e-4, help="initial learning rate")
group.add_argument(
"--warmup",
type=float,
default=0.0,
help="percentage of data to warmup on (.01 = 1% of all "
"training iters). Default 0.01",
)
group.add_argument("--warmup-iter", type=int, default=0)
# save
group.add_argument(
"--save",
type=str,
default=None,
help="Output directory to save checkpoints to.",
)
group.add_argument(
"--save-interval",
type=int,
default=5000,
help="number of iterations between saves",
)
group.add_argument(
"--no-save-optim", action="store_true", help="Do not save current optimizer."
)
# load
group.add_argument(
"--load",
type=str,
default=None,
help="Path to a directory containing a model checkpoint.",
)
group.add_argument(
"--load-oprimizer-states",
action="store_true",
help="whether to load optimizer states",
)
group.add_argument(
"--load-lr-scheduler-states",
action="store_true",
help="whether to load learning rate scheduler states",
)
group.add_argument(
"--no-load-optim",
action="store_true",
help="Do not load optimizer when loading checkpoint.",
)
group.add_argument(
"--log-file", type=str, default=None, help="the path to save log.txt file"
)
# distributed training args
group.add_argument(
"--distributed-backend",
default="nccl",
help="which backend to use for distributed training. One of [gloo, nccl]",
)
group.add_argument(
"--local_rank",
type=int,
default=None,
help="local rank passed from distributed launcher",
)
return parser
def add_prompt_args(parser: argparse.ArgumentParser):
group = parser.add_argument_group("prompt", "prompt configurations")
group.add_argument(
"--load_prompt", type=str, default=None, help="the path to load prompt from"
)
group.add_argument(
"--prompt-tune", action="store_true", help="whether to do prompt tuning"
)
group.add_argument(
"--prompt-config",
type=str,
default=None,
help="the path of the prompt configuration",
)
group.add_argument(
"--save-prompt-only",
action="store_true",
help="whether to save the prompt only. If true, only prompts will be saved otherwise, "
"the whole model together with the prompt will be saved.",
)
return parser
def add_evaluation_args(parser: argparse.ArgumentParser):
"""Evaluation arguments."""
group = parser.add_argument_group("validation", "validation configurations")
group.add_argument(
"--eval-batch-size",
type=int,
default=None,
help="Data Loader batch size for evaluation datasets. Defaults to `--batch-size`",
)
group.add_argument(
"--eval-iters",
type=int,
default=100,
help="number of iterations to run for evaluation validation/test for",
)
group.add_argument(
"--eval-interval",
type=int,
default=1000,
help="interval between running evaluation on validation set",
)
group.add_argument("--eval-per-prompt", action="store_true")
group.add_argument("--no-norm-cand-loss", action="store_true")
return parser
def add_text_generate_args(parser: argparse.ArgumentParser):
"""Text generate arguments."""
group = parser.add_argument_group("Text generation", "configurations")
group.add_argument("--sampling", action="store_true")
group.add_argument(
"--temperature", type=float, default=1.2, help="The temperature of sampling."
)
group.add_argument("--top_p", type=float, default=0.9, help="Top-p sampling.")
group.add_argument("--top_k", type=int, default=0, help="Top-k sampling.")
group.add_argument(
"--max-generation-length",
type=int,
default=64,
help="The maximum sequence length to generate.",
)
group.add_argument(
"--min-generation-length",
type=int,
default=0,
help="The minimum sequence length to generate.",
)
group.add_argument(
"--num-beams", type=int, default=1, help="The beam number of beam search."
)
group.add_argument(
"--no-repeat-ngram-size",
type=int,
default=0,
help="The n-gram whose length is less than this option will appear at most once in the whole dialog.",
)
group.add_argument(
"--repetition-penalty",
type=float,
default=1,
help="Repetition penalty, to prevent repeated words.",
)
group.add_argument(
"--early-stopping", action="store_true", help="Early-stopping while generating."
)
group.add_argument(
"--length-penalty",
type=float,
default=0,
help="Length penalty, to prevent short generation.",
)
group.add_argument(
"--rule-path",
type=str,
default=None,
help="The directory that contains hand-written rules.",
)
return parser
def add_data_args(parser: argparse.ArgumentParser):
"""Train/valid/test data arguments."""
group = parser.add_argument_group("data", "data configurations")
group.add_argument(
"--model-parallel-size", type=int, default=1, help="size of the model parallel."
)
group.add_argument(
"--data-path",
nargs="+",
type=str,
default=None,
help="Path to combined dataset to split.",
)
group.add_argument(
"--data-ext", type=str, default=".json", help="the extension of the data file"
)
group.add_argument(
"--data-name", type=str, default=None, help="the name of the dataset"
)
group.add_argument(
"--data-names", type=str, default=None, help="the name of the dataset"
)
group.add_argument(
"--data-prefix",
type=str,
default=None,
help="the prefix to add before each data sample",
)
group.add_argument(
"--num-workers",
type=int,
default=2,
help="Number of workers to use for dataloading",
)
group.add_argument(
"--tokenizer-path",
type=str,
default="tokenizer.model",
help="path used to save/load sentencepiece tokenization models",
)
group.add_argument(
"--enc-seq-length",
type=int,
default=512,
help="Maximum sequence length to process",
)
group.add_argument(
"--dec-seq-length",
type=int,
default=256,
help="Maximum sequence length to process",
)
group.add_argument("--pad-token", type=str, default="<pad>")
group.add_argument("--FiD", action="store_true")
group.add_argument(
"--passage_num", type=int, default=10, help="Number of passages to use for FiD"
)
return parser
def add_flan_args(parser: argparse.ArgumentParser):
group = parser.add_argument_group("flan", "data configurations")
group.add_argument("--flan-sample", action="store_true")
group.add_argument("--flan-sample-max", type=float, default=1000000)
return parser
def add_debug_args(parser: argparse.ArgumentParser):
group = parser.add_argument_group("debug", "data configurations")
group.add_argument("--debug-option", type=int, default=-1)
group.add_argument("--shuff-cand-idx", action="store_true")
return parser
def get_args():
"""Parse all the args."""
parser = argparse.ArgumentParser(description="PyTorch BERT Model")
parser = add_model_config_args(parser)
parser = add_fp16_config_args(parser)
parser = add_training_args(parser)
parser = add_evaluation_args(parser)
parser = add_data_args(parser)
parser = add_prompt_args(parser)
parser = add_text_generate_args(parser)
parser = add_flan_args(parser)
parser = add_debug_args(parser)
# Include DeepSpeed configuration arguments
parser = deepspeed.add_config_arguments(parser)
args = parser.parse_args()
if not args.data_path:
print("WARNING: No training data specified")
args.cuda = torch.cuda.is_available()
args.rank = int(os.getenv("RANK", "0"))
args.world_size = int(os.getenv("WORLD_SIZE", "1"))
args.local_rank = int(os.getenv("LOCAL_RANK", "0"))
args.model_parallel_size = min(args.model_parallel_size, args.world_size)
if args.rank == 0:
print(
"using world size: {} and model-parallel size: {} ".format(
args.world_size, args.model_parallel_size
)
)
args.dynamic_loss_scale = False
if args.loss_scale is None:
args.dynamic_loss_scale = True
if args.rank == 0:
print(" > using dynamic loss scaling")
if args.data_path is not None and len(args.data_path) == 1:
args.data_path = args.data_path[0]
return args
| 15,323 | 29.344554 | 110 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/LM/Flan-T5/learning_rates.py | # coding=utf-8
"""PyTorch DataLoader for TFRecords"""
import torch
from torch.optim.lr_scheduler import _LRScheduler
import math
class AnnealingLR(_LRScheduler):
"""Anneals the learning rate from start to zero along a cosine curve."""
DECAY_STYLES = ['linear', 'cosine', 'exponential', 'constant', 'None', 'noam']
def __init__(self, optimizer, start_lr, warmup_iter, num_iters, decay_style=None, last_iter=-1, gradient_accumulation_steps=1):
self.optimizer = optimizer
self.start_lr = start_lr
self.warmup_iter = (warmup_iter // gradient_accumulation_steps) + 1
self.num_iters = last_iter + 1
self.end_iter = num_iters
self.gradient_accumulation_steps = gradient_accumulation_steps
self.decay_style = decay_style.lower() if isinstance(decay_style, str) else None
self.step(self.num_iters)
if torch.distributed.get_rank() == 0:
print('learning rate decaying', decay_style)
def get_lr(self):
# https://openreview.net/pdf?id=BJYwwY9ll pg. 4
if self.warmup_iter > 0 and self.num_iters <= self.warmup_iter:
if self.decay_style != self.DECAY_STYLES[5]:
return float(self.start_lr) * self.num_iters / self.warmup_iter
else:
return float(self.start_lr) / math.sqrt(self.warmup_iter) * self.num_iters / self.warmup_iter #* self.num_iters / self.warmup_iter / math.sqrt(self.warmup_iter)
else:
if self.decay_style == self.DECAY_STYLES[0]:
return self.start_lr*((self.end_iter-(self.num_iters-self.warmup_iter))/self.end_iter)
elif self.decay_style == self.DECAY_STYLES[1]:
return self.start_lr / 2.0 * (math.cos(math.pi * (self.num_iters - self.warmup_iter) / self.end_iter) + 1)
elif self.decay_style == self.DECAY_STYLES[2]:
#TODO: implement exponential decay
return self.start_lr
elif self.decay_style == self.DECAY_STYLES[5]:
return self.start_lr / math.sqrt(self.num_iters + 1)
else:
return self.start_lr
def step(self, step_num=None):
if step_num is None:
step_num = self.num_iters + 1
self.num_iters = step_num
new_lr = self.get_lr()
for group in self.optimizer.param_groups:
group['lr'] = new_lr
def state_dict(self):
sd = {
'start_lr': self.start_lr,
'warmup_iter': self.warmup_iter,
'num_iters': self.num_iters,
'decay_style': self.decay_style,
'end_iter': self.end_iter
}
return sd
def load_state_dict(self, sd):
self.start_lr = sd['start_lr']
self.warmup_iter = sd['warmup_iter']
self.num_iters = sd['num_iters']
self.end_iter = sd['end_iter']
self.decay_style = sd['decay_style']
self.step(self.num_iters)
| 2,974 | 40.901408 | 176 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/LM/Flan-T5/utils.py | # coding=utf-8
"""Utilities for logging and serialization"""
import os
import random
import numpy as np
import torch
from fp16 import FP16_Optimizer
import mpu
import deepspeed
from apex.optimizers import FusedAdam as Adam
from fp16 import FP16_Module
from fp16 import FP16_Optimizer
from learning_rates import AnnealingLR
from model import EncDecModel, EncDecConfig
from model import enc_dec_get_params_for_weight_decay_optimization, enc_dec_get_params_for_prompt_optimization
from model import DistributedDataParallel as DDP
def print_rank_0(message):
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
print(message, flush=True)
else:
print(message, flush=True)
def print_args(args):
"""Print arguments."""
print('arguments:', flush=True)
for arg in vars(args):
dots = '.' * (29 - len(arg))
print(' {} {} {}'.format(arg, dots, getattr(args, arg)), flush=True)
def save_rank_0(args, message):
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
with open(args.log_file, "a") as f:
f.write(message + "\n")
f.flush()
else:
with open(args.log_file, "a") as f:
f.write(message + "\n")
f.flush()
def save_preds_t0(args, name, prompt_names, step, all_res_prompt, all_preds_prompt, all_labels_prompt):
s = np.mean([np.mean([vv for vv in v.values()]) for v in all_res_prompt])
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
os.makedirs(os.path.join(args.save, "preds", name), exist_ok=True)
with open(os.path.join(args.save, "preds", name, "{:.2f}_{}.txt".format(s, step)), "w") as f:
f.write(str(all_res_prompt) + "\n")
for pid in range(len(prompt_names)):
f.write("\n" + str(prompt_names[pid]) + "\n")
for p, l in zip(all_preds_prompt[pid], all_labels_prompt[pid]):
f.write(str(p) + "\t\t" + str(l) + "\n")
def save_preds_prompts(args, name, dataset, step, res, all_preds_prompts, all_labels_prompts):
s = np.mean([v for v in res[0].values()])
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
os.makedirs(os.path.join(args.save, "preds", name), exist_ok=True)
with open(os.path.join(args.save, "preds", name, "{:.2f}_{}.txt".format(s, step)), "w") as f:
f.write(str(res) + "\n")
for pid in dataset.all_data[name]["prompt_ids"]:
f.write("\n" + str(dataset.all_data[name]["prompt_templates"][pid]) + "\n")
for p, l in zip(all_preds_prompts[pid], all_labels_prompts[pid]):
f.write(str(p) + "\t\t" + str(l) + "\n")
def save_preds(args, name, step, res, all_preds, all_labels):
s = np.mean([v for v in res[0].values()])
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
os.makedirs(os.path.join(args.save, "preds", name), exist_ok=True)
with open(os.path.join(args.save, "preds", name, "{:.2f}_{}.txt".format(s, step)), "w") as f:
f.write(str(res) + "\n")
for p, l in zip(all_preds, all_labels):
f.write(str(p) + "\t\t" + str(l) + "\n")
def get_model(args, vocab_size, prompt_config=None):
"""Build the model."""
print_rank_0('building Enc-Dec model ...')
config = EncDecConfig.from_json_file(args.model_config)
config.vocab_size = vocab_size
model = EncDecModel(config,
parallel_output=True,
checkpoint_activations=args.checkpoint_activations,
checkpoint_num_layers=args.checkpoint_num_layers,
prompt_config=prompt_config,
args=args)
if mpu.get_data_parallel_rank() == 0:
print(' > number of parameters on model parallel rank {}: {}'.format(
mpu.get_model_parallel_rank(),
sum([p.nelement() for p in model.parameters()])), flush=True)
# To prevent OOM for model sizes that cannot fit in GPU memory in full precision
if args.deepspeed and args.fp16:
model.half()
# GPU allocation.
model.cuda(torch.cuda.current_device())
if args.prompt_tune and prompt_config["init_scratch"]:
model.init_prompt_embeds()
# Fp16 conversion.
if args.fp16:
model = FP16_Module(model)
# Wrap model for distributed training.
model = DDP(model)
return model
def get_optimizer(model, args, prompt_config=None):
"""Set up the optimizer."""
# Build parameter groups (weight decay and non-decay).
while isinstance(model, (DDP, FP16_Module)):
model = model.module
if args.prompt_tune and prompt_config["fix_model"]:
param_groups = enc_dec_get_params_for_prompt_optimization(model)
else:
param_groups = enc_dec_get_params_for_weight_decay_optimization(model)
# Add model parallel attribute if it is not set.
for param_group in param_groups:
for param in param_group['params']:
if not hasattr(param, 'model_parallel'):
param.model_parallel = False
if args.cpu_optimizer:
if args.cpu_torch_adam:
cpu_adam_optimizer = torch.optim.Adam
else:
from deepspeed.ops.adam import DeepSpeedCPUAdam
cpu_adam_optimizer = DeepSpeedCPUAdam
optimizer = cpu_adam_optimizer(param_groups,
lr=args.lr, weight_decay=args.weight_decay)
else:
# Use FusedAdam.
optimizer = Adam(param_groups,
lr=args.lr, weight_decay=args.weight_decay)
print(f'Optimizer = {optimizer.__class__.__name__}')
if args.deepspeed:
# fp16 wrapper is not required for DeepSpeed.
return optimizer
# Wrap into fp16 optimizer.
if args.fp16:
optimizer = FP16_Optimizer(optimizer,
static_loss_scale=args.loss_scale,
dynamic_loss_scale=args.dynamic_loss_scale,
dynamic_loss_args={
'scale_window': args.loss_scale_window,
'min_scale': args.min_scale,
'delayed_shift': args.hysteresis})
if torch.distributed.get_rank() == 0:
print(optimizer.param_groups)
return optimizer
def get_learning_rate_scheduler(optimizer, args):
"""Build the learning rate scheduler."""
# Add linear learning rate scheduler.
if args.lr_decay_iters is not None:
num_iters = args.lr_decay_iters
else:
num_iters = args.train_iters
num_iters = max(1, num_iters)
init_step = -1
if args.warmup_iter > 0:
warmup_iter = args.warmup_iter
else:
warmup_iter = args.warmup * num_iters
lr_scheduler = AnnealingLR(optimizer,
start_lr=args.lr,
warmup_iter=warmup_iter,
num_iters=num_iters,
decay_style=args.lr_decay_style,
last_iter=init_step,
gradient_accumulation_steps=args.gradient_accumulation_steps)
return lr_scheduler
def setup_model_and_optimizer(args, vocab_size, ds_config, prompt_config=None, set_optim=True):
"""Setup model and optimizer."""
model = get_model(args, vocab_size, prompt_config)
if set_optim:
optimizer = get_optimizer(model, args, prompt_config)
lr_scheduler = get_learning_rate_scheduler(optimizer, args)
else:
optimizer, lr_scheduler = None, None
if args.deepspeed:
print_rank_0("DeepSpeed is enabled.")
model, optimizer, _, lr_scheduler = deepspeed.initialize(
model=model,
optimizer=optimizer,
args=args,
lr_scheduler=lr_scheduler,
mpu=mpu,
dist_init_required=False,
config_params=ds_config
)
print(args.load)
if args.load is not None:
args.iteration = load_checkpoint(model, optimizer, lr_scheduler, args, prompt_config)
else:
args.iteration = 0
return model, optimizer, lr_scheduler
def set_deepspeed_activation_checkpointing(args):
deepspeed.checkpointing.configure(mpu, deepspeed_config=args.deepspeed_config, num_checkpoints=args.num_checkpoints)
mpu.checkpoint = deepspeed.checkpointing.checkpoint
mpu.get_cuda_rng_tracker = deepspeed.checkpointing.get_cuda_rng_tracker
mpu.model_parallel_cuda_manual_seed = deepspeed.checkpointing.model_parallel_cuda_manual_seed
def initialize_distributed(args):
"""Initialize torch.distributed."""
# Manually set the device ids.
device = args.rank % torch.cuda.device_count()
if args.local_rank is not None:
device = args.local_rank
torch.cuda.set_device(device)
# Call the init process
deepspeed.init_distributed()
# Set the model-parallel / data-parallel communicators.
mpu.initialize_model_parallel(args.model_parallel_size)
# Optional DeepSpeed Activation Checkpointing Features
if args.deepspeed and args.deepspeed_activation_checkpointing:
set_deepspeed_activation_checkpointing(args)
def set_random_seed(seed):
"""Set random seed for reproducability."""
if seed is not None and seed > 0:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
mpu.model_parallel_cuda_manual_seed(seed)
def save_checkpoint(iteration, model, optimizer,
lr_scheduler, args, save_dir=None):
"""Save a model checkpoint."""
save_ds_checkpoint(iteration, model, args, save_dir)
# Wait so everyone is done (necessary)
torch.distributed.barrier()
# And update the latest iteration
if torch.distributed.get_rank() == 0:
tracker_filename = os.path.join(args.save if save_dir is None else save_dir, 'latest_checkpointed_iteration.txt')
with open(tracker_filename, 'w') as f:
f.write(str(iteration))
# Wait so everyone is done (not necessary)
torch.distributed.barrier()
def save_ds_checkpoint(iteration, model, args, save_dir=None):
"""Save a model checkpoint."""
sd = {}
sd['iteration'] = iteration
if args.save_prompt_only:
prompt = model.module.module.module.get_prompt_embeds()
save_prompt(args.save if save_dir is None else save_dir, iteration, prompt["encoder"])
else:
model.save_checkpoint(args.save if save_dir is None else save_dir, str(iteration), client_state = sd, save_zero=False)
def save_prompt(save_dir, iteration, prompt_embeds):
save_path = os.path.join(save_dir, "prompt-{}.pt".format(iteration))
if torch.distributed.get_rank() == 0:
torch.save(prompt_embeds, save_path)
def get_checkpoint_iteration(args):
# Read the tracker file and set the iteration.
tracker_filename = os.path.join(args.load, 'latest_checkpointed_iteration.txt')
if not os.path.isfile(tracker_filename):
print_rank_0('WARNING: could not find the metadata file {} '.format(
tracker_filename))
print_rank_0(' will not load any checkpoints and will start from '
'random')
return 0, False, False
iteration = 0
release = False
with open(tracker_filename, 'r') as f:
metastring = f.read().strip()
try:
iteration = int(metastring)
except ValueError:
release = metastring == 'release'
if not release:
print_rank_0('ERROR: Invalid metadata file {}. Exiting'.format(
tracker_filename))
exit()
assert iteration > 0 or release, 'error parsing metadata file {}'.format(
tracker_filename)
return iteration, release, True
def load_prompt(load_dir):
prompt = torch.load(load_dir, map_location=lambda storage, loc: storage)
return prompt
def load_checkpoint(model, optimizer, lr_scheduler, args, prompt_config=None):
"""Load a model checkpoint."""
iteration, release, success = get_checkpoint_iteration(args)
if not success:
return 0
mp_rank = mpu.get_model_parallel_rank()
checkpoint_name = os.path.join(args.load,
str(iteration),
'mp_rank_{:02d}'.format(mp_rank) + '_model_states.pt')
if not os.path.exists(checkpoint_name):
print('Client provided checkpoint load path: {} does not exist ... skip checkpoint load'.format(checkpoint_name))
if mpu.get_data_parallel_rank() == 0:
print("Unable to load checkpoint.")
return iteration
print('loading checkpoint: {}'.format(checkpoint_name))
sd = torch.load(checkpoint_name, map_location=lambda storage, loc: storage)
if args.prompt_tune:
load_prompt_path = prompt_config.get("load_prompt")
if load_prompt_path is not None and len(load_prompt_path) > 0:
prompt_embeds = load_prompt(load_prompt_path)
sd["module"]["encoder.prompt_embeds.weight"] = prompt_embeds
model.module.load_state_dict(sd["module"], strict=False)
iteration = sd['iteration']
torch.distributed.barrier()
if mpu.get_data_parallel_rank() == 0:
print(' successfully loaded {}'.format(checkpoint_name))
return iteration
| 13,650 | 35.209549 | 126 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/LM/Flan-T5/generation_utils.py | # coding=utf-8
import os
import torch
import torch.nn.functional as F
from collections import defaultdict
from tokenization_t5 import EncDecTokenizer
class BeamHypotheses(object):
def __init__(
self, num_beams, max_length, length_penalty, early_stopping, tokenizer=None
):
"""
Initialize n-best list of hypotheses.
"""
self.max_length = max_length - 1 # ignoring bos_token
self.length_penalty = length_penalty
self.early_stopping = early_stopping
self.num_beams = num_beams
self.length_fact = []
self.beams = []
self.worst_score = 1e9
self.raw_worst_score = 1e9
self.tokenizer = tokenizer
def __len__(self):
"""
Number of hypotheses in the list.
"""
return len(self.beams)
def add(self, hyp, sum_logprobs):
"""
Add a new hypothesis to the list.
"""
score = sum_logprobs / len(hyp) ** self.length_penalty
if len(self) < self.num_beams or score > self.worst_score:
self.beams.append((score, hyp))
self.length_fact.append(len(hyp) ** self.length_penalty)
if len(self) > self.num_beams:
sorted_scores = sorted(
[(s, idx, _) for idx, (s, _) in enumerate(self.beams)]
)
del self.beams[sorted_scores[0][1]]
self.worst_score = sorted_scores[1][0]
self.raw_worst_score = self.worst_score * (
len(sorted_scores[1][2]) ** self.length_penalty
)
else:
self.worst_score = min(score, self.worst_score)
self.raw_worst_score = sum_logprobs
def is_done(self, best_sum_logprobs, cur_len):
"""
If there are enough hypotheses and that none of the hypotheses being generated
can become better than the worst one in the heap, then we are done with this sentence.
"""
if len(self) < self.num_beams:
return False
elif self.early_stopping:
return True
else:
cur_score = best_sum_logprobs / cur_len ** self.length_penalty
ret = self.worst_score >= cur_score
return ret
def construct_antonym_dict(args):
if args.rule_path is None:
return None
with open(os.path.join(args.rule_path, "./antonym/antonym.txt"), "r") as f:
data = f.read().split("\n")
data = [eval(item) for item in data if item]
antonym_dict = defaultdict(list)
for first, second in data:
antonym_dict[first].append(second)
antonym_dict[second].append(first)
return antonym_dict
def calc_banned_antonym_words_ids(input_tokens, tokenizer, antonym_dict):
if antonym_dict is None:
return []
antonym_words = [set()] * len(input_tokens)
# only consider tokens occurring in current sentence
for idx, tokens in enumerate(input_tokens):
for word in tokenizer.convert_ids_to_tokens(reversed(tokens.tolist())):
if word == "<sep>":
break
antonym_words[idx].update(
tokenizer.convert_tokens_to_ids(antonym_dict[word])
)
return [list(tokens) for tokens in antonym_words]
def calc_banned_ngram_tokens(
prev_input_ids,
num_hypos: int,
no_repeat_ngram_size: int,
tokenizer: EncDecTokenizer,
) -> None:
"""Copied from fairseq for no_repeat_ngram in beam_search"""
generated_ngrams = [{} for _ in range(num_hypos)]
for idx in range(num_hypos):
gen_words = prev_input_ids[idx]
generated_ngram = generated_ngrams[idx]
for ngram in zip(*[gen_words[i:] for i in range(no_repeat_ngram_size)]):
ngram = tuple(ngram)
generated_ngram[ngram] = generated_ngram.get(ngram, set()) | set([ngram])
def _get_generated_ngrams(hypo_idx):
# Before decoding the next token, prevent decoding of ngrams that have already appeared
cur_len = len(prev_input_ids[hypo_idx])
generated_ngram_idx = []
for prefix_len in range(no_repeat_ngram_size):
ngram_words = tuple(prev_input_ids[hypo_idx][cur_len - prefix_len :])
generated_ngram_words = generated_ngrams[hypo_idx].get(ngram_words, [])
generated_ngram_idx += tokenizer.convert_tokens_to_ids(
generated_ngram_words
)
return generated_ngram_idx
banned_tokens = [_get_generated_ngrams(hypo_idx) for hypo_idx in range(num_hypos)]
return banned_tokens
def top_k_logits(logits, top_k=0, top_p=0.0, filter_value=-10000):
# This function has been mostly taken from huggingface conversational ai code at
# https://medium.com/huggingface/how-to-build-a-state-of-the-art-conversational-ai-with-transfer-learning-2d818ac26313
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
batch_size = logits.size()[0]
if top_p > 0.0:
# logits : (batch_size, vocab_size)
logits = logits.view(batch_size, -1).contiguous()
# logits : (batch_size, vocab_size)
for logit in logits:
# logit: (vocab_size)
sorted_logits, sorted_indices = torch.sort(logit, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[
..., :-1
].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logit[indices_to_remove] = filter_value
logits = logits.view(batch_size, -1).contiguous()
return logits
def calc_banned_bad_words_ids(prev_input_ids, bad_words_ids):
banned_tokens = []
def _tokens_match(prev_tokens, tokens):
if len(tokens) == 0:
# if bad word tokens is just one token always ban it
return True
if len(tokens) > len(prev_input_ids):
# if bad word tokens are longer then prev input_ids they can't be equal
return False
if prev_tokens[-len(tokens) :] == tokens:
# if tokens match
return True
else:
return False
for prev_input_ids_slice in prev_input_ids:
banned_tokens_slice = []
for banned_token_seq in bad_words_ids:
assert (
len(banned_token_seq) > 0
), "Banned words token sequences {} cannot have an empty list".format(
bad_words_ids
)
if (
_tokens_match(prev_input_ids_slice.tolist(), banned_token_seq[:-1])
is False
):
# if tokens do not match continue
continue
banned_tokens_slice.append(banned_token_seq[-1])
banned_tokens.append(banned_tokens_slice)
return banned_tokens
def enforce_repetition_penalty_(
tokenizer, lprobs, batch_size, num_beams, prev_output_tokens, repetition_penalty
):
"""repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)."""
for i in range(batch_size * num_beams):
for previous_token in set(prev_output_tokens[i].tolist()):
if previous_token != tokenizer.eos_id:
# if score < 0 then repetition penalty has to multiplied to reduce the previous token probability
if lprobs[i, previous_token] < 0:
lprobs[i, previous_token] *= repetition_penalty
else:
lprobs[i, previous_token] /= repetition_penalty
def postprocess_next_token_scores(
tokenizer: EncDecTokenizer,
scores,
input_ids,
no_repeat_ngram_size,
bad_words_ids,
cur_len,
min_length,
max_length,
eos_token_id,
repetition_penalty,
batch_size,
num_beams,
antonym_dict,
):
# repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0:
enforce_repetition_penalty_(
tokenizer,
scores,
batch_size,
num_beams,
input_ids,
repetition_penalty,
)
# set eos token prob to zero if min_length is not reached
if eos_token_id is not None and cur_len < min_length:
scores[:, eos_token_id] = -10000
if no_repeat_ngram_size > 0:
# calculate a list of banned tokens to prevent repetitively generating the same ngrams
num_batch_hypotheses = batch_size * num_beams
# from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345
banned_batch_tokens = calc_banned_ngram_tokens(
input_ids, num_batch_hypotheses, no_repeat_ngram_size, tokenizer=tokenizer
)
for i, banned_tokens in enumerate(banned_batch_tokens):
scores[i, banned_tokens] = -10000
if bad_words_ids is not None:
# calculate a list of banned tokens according to bad words
banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids)
for i, banned_tokens in enumerate(banned_tokens):
scores[i, banned_tokens] = -10000
# add antonym banned list
banned_tokens = calc_banned_antonym_words_ids(input_ids, tokenizer, antonym_dict)
for i, banned_tokens in enumerate(banned_tokens):
scores[i, banned_tokens] = -10000
scores[:, 0] = -50000
return scores
def generate_no_beam(
model_batch, full_context, model, tokenizer: EncDecTokenizer, args, device
):
batch_size = args.batch_size
target_length = args.max_generation_length
dec_init_length = 1 # +1 for s_0
if args.FiD:
model.module.module.module.reset_score_storage()
batch_size, _, sequence_length = model_batch["passage_input_ids"].size()
enc_input_ids = model_batch["passage_input_ids"].view(
batch_size * args.passage_num, sequence_length
)
enc_attention_mask = model_batch["passage_attention_mask"].view(
batch_size * args.passage_num, 1, sequence_length, sequence_length
)
enc_outputs = model(
enc_input_ids=enc_input_ids,
enc_attention_mask=enc_attention_mask,
only_encoder=True,
)
enc_hidden_states = enc_outputs["encoder_last_hidden_state"].view(
batch_size, sequence_length * args.passage_num, -1
)
else:
enc_input_ids = model_batch["enc_input_ids"]
enc_attention_mask = model_batch["enc_attention_mask"]
enc_outputs = model(
enc_input_ids=enc_input_ids,
enc_attention_mask=enc_attention_mask,
only_encoder=True,
)
enc_hidden_states = enc_outputs["encoder_last_hidden_state"]
# for generating responses
# we only use the <go> token, so truncate other tokens
dec_input_ids = model_batch["dec_input_ids"][..., :dec_init_length]
dec_attention_mask = model_batch["dec_attention_mask"][
..., :dec_init_length, :dec_init_length
]
# we use past_key_values, so only the current token mask is needed
cross_attention_mask = model_batch["cross_attention_mask"][..., :dec_init_length, :]
unfinished_sents = enc_input_ids.new(enc_hidden_states.size(0)).fill_(1)
output_ids = enc_input_ids.new_zeros(
[enc_hidden_states.size(0), 0]
) # not include the prompt
prob_idx = torch.arange(batch_size)
past_key_values = None
gen_len = 0
# construct antonym dict
antonym_dict = None
while gen_len < target_length:
if unfinished_sents.max() == 0:
tokens_to_add = tokenizer.eos_id * (1 - unfinished_sents)
output_ids = torch.cat([output_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
else:
dec_outputs = model(
dec_input_ids=dec_input_ids,
dec_attention_mask=dec_attention_mask,
cross_attention_mask=cross_attention_mask,
enc_hidden_states=enc_hidden_states,
past_key_values=past_key_values,
)
past_key_values = dec_outputs["past_key_values"]
lm_logits = dec_outputs["lm_logits"]
logits = lm_logits[:, -1, :] / args.temperature
prev_output_tokens = torch.cat([full_context, output_ids], dim=-1)
logits = postprocess_next_token_scores(
tokenizer=tokenizer,
scores=logits,
input_ids=prev_output_tokens,
no_repeat_ngram_size=args.no_repeat_ngram_size,
bad_words_ids=[[0]],
cur_len=gen_len,
min_length=args.min_generation_length,
max_length=args.max_generation_length,
eos_token_id=tokenizer.eos_id,
repetition_penalty=args.repetition_penalty,
batch_size=batch_size,
num_beams=1,
antonym_dict=antonym_dict,
)
if args.sampling:
logits = top_k_logits(logits, top_k=args.top_k, top_p=args.top_p)
probs = F.softmax(logits.float(), dim=-1)
next_token = torch.multinomial(probs, num_samples=1).squeeze(1)
else:
next_token = torch.argmax(logits, -1)
tokens_to_add = next_token * unfinished_sents + tokenizer.pad_id * (
1 - unfinished_sents
)
dec_input_ids = tokens_to_add.unsqueeze(-1)
output_ids = torch.cat([output_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
# let the current token attend to all previous tokens
dec_attention_mask = torch.cat(
[dec_attention_mask[:, :, -1:, :], dec_attention_mask[:, :, -1:, -1:]],
dim=-1,
)
cross_attention_mask = cross_attention_mask[:, :, -1:, :]
gen_len += 1
unfinished_sents.mul_(tokens_to_add.ne(tokenizer.eos_id).long())
output_ids = output_ids.cpu().tolist()
generation_token_ids_list = []
generation_str_list = []
for e in output_ids:
generation_token_ids = (
e[: e.index(tokenizer.eos_id)] if tokenizer.eos_id in e else e
)
generation_token_ids_list.append(generation_token_ids)
generation_str_list.append(tokenizer.decode(generation_token_ids))
return generation_str_list, generation_token_ids_list
def generate_beam(
model_batch, full_context, model, tokenizer: EncDecTokenizer, args, device
):
"""
Since the context in model batch is truncated, we need full_context to store the tokens in the entire context.
"""
batch_size = args.batch_size
num_beams = args.num_beams
target_length = args.max_generation_length
do_sample = args.sampling and (args.top_p > 0 or args.top_k > 0)
vocab_size = tokenizer.vocab_size
enc_input_ids = model_batch["enc_input_ids"]
enc_attention_mask = model_batch["enc_attention_mask"]
enc_input_length = enc_input_ids.size(-1)
enc_input_ids = enc_input_ids.unsqueeze(1).expand(
batch_size, num_beams, enc_input_length
)
enc_attention_mask = enc_attention_mask.unsqueeze(1).expand(
batch_size, num_beams, 1, enc_input_length, enc_input_length
)
enc_input_ids = enc_input_ids.contiguous().view(
batch_size * num_beams, enc_input_length
)
enc_attention_mask = enc_attention_mask.contiguous().view(
batch_size * num_beams, 1, enc_input_length, enc_input_length
)
full_context = full_context.unsqueeze(1).expand(
batch_size, num_beams, full_context.size(-1)
)
full_context = full_context.contiguous().view(
batch_size * num_beams, full_context.size(-1)
)
enc_outputs = model(
enc_input_ids=enc_input_ids,
enc_attention_mask=enc_attention_mask,
only_encoder=True,
)
enc_hidden_states = enc_outputs["encoder_last_hidden_state"]
dec_init_length = 1 # 1 for s_0
# for generating responses
dec_input_ids = model_batch["dec_input_ids"][..., :dec_init_length]
dec_attention_mask = model_batch["dec_attention_mask"][
..., :dec_init_length, :dec_init_length
]
# we use past_key_values, so only the current token mask is needed
cross_attention_mask = model_batch["cross_attention_mask"][..., :dec_init_length, :]
dec_input_ids = dec_input_ids.unsqueeze(1).expand(
batch_size, num_beams, dec_init_length
)
dec_attention_mask = dec_attention_mask.unsqueeze(1).expand(
batch_size, num_beams, 1, dec_init_length, dec_init_length
)
cross_attention_mask = cross_attention_mask.unsqueeze(1).expand(
batch_size, num_beams, 1, dec_init_length, enc_input_length
)
dec_input_ids = dec_input_ids.contiguous().view(
batch_size * num_beams, dec_init_length
)
dec_attention_mask = dec_attention_mask.contiguous().view(
batch_size * num_beams, 1, dec_init_length, dec_init_length
)
cross_attention_mask = cross_attention_mask.contiguous().view(
batch_size * num_beams, 1, dec_init_length, enc_input_length
)
done = [False for _ in range(batch_size)]
output_ids = enc_input_ids.new_zeros(
[enc_input_ids.size(0), 0]
) # not include the prompt
past_key_values = None
gen_len = 0
# construct antonym dict
antonym_dict = None
# generated hypotheses
generated_hyps = [
BeamHypotheses(
num_beams,
target_length,
args.length_penalty,
early_stopping=args.early_stopping,
tokenizer=tokenizer,
)
for _ in range(batch_size)
]
beam_scores = torch.zeros(
(batch_size, num_beams), dtype=torch.float, device=dec_input_ids.device
)
beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)
while gen_len < target_length:
dec_outputs = model(
dec_input_ids=dec_input_ids,
dec_attention_mask=dec_attention_mask,
cross_attention_mask=cross_attention_mask,
enc_hidden_states=enc_hidden_states,
past_key_values=past_key_values,
)
past_key_values = dec_outputs["past_key_values"]
lm_logits = dec_outputs["lm_logits"]
logits = lm_logits[:, -1, :] / args.temperature
scores = F.log_softmax(logits, dim=-1)
prev_output_tokens = torch.cat([full_context, output_ids], dim=-1)
scores = postprocess_next_token_scores(
tokenizer=tokenizer,
scores=scores,
input_ids=prev_output_tokens,
no_repeat_ngram_size=args.no_repeat_ngram_size,
bad_words_ids=None,
cur_len=gen_len,
min_length=args.min_generation_length,
max_length=args.max_generation_length,
eos_token_id=tokenizer.eos_id,
repetition_penalty=args.repetition_penalty,
batch_size=batch_size,
num_beams=num_beams,
antonym_dict=antonym_dict,
)
if do_sample:
_scores = scores + beam_scores[:, None].expand_as(scores)
if args.temperature != 1.0:
_scores = _scores / args.temperature
_scores = top_k_logits(_scores, top_k=args.top_k, top_p=args.top_p)
_scores = _scores.contiguous().view(batch_size, num_beams * vocab_size)
# Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search)
probs = F.softmax(_scores, dim=-1)
next_tokens = torch.multinomial(
probs, num_samples=2 * num_beams
) # (batch_size, num_beams * 2)
# Compute next scores
next_scores = torch.gather(
_scores, -1, next_tokens
) # (batch_size, num_beams * 2)
# sort the sampled vector to make sure that the first num_beams samples are the best
next_scores, next_scores_indices = torch.sort(
next_scores, descending=True, dim=1
)
next_tokens = torch.gather(
next_tokens, -1, next_scores_indices
) # (batch_size, num_beams * 2)
else:
next_scores = scores + beam_scores[:, None].expand_as(
scores
) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together (we are keeping top hypothesis accross beams)
next_scores = next_scores.view(
batch_size, num_beams * vocab_size
) # (batch_size, num_beams * vocab_size)
next_scores, next_tokens = torch.topk(
next_scores, 2 * num_beams, dim=1, largest=True, sorted=True
)
assert next_scores.size() == next_tokens.size() == (batch_size, 2 * num_beams)
# next batch beam content
next_batch_beam = []
for batch_idx in range(batch_size):
# if we are done with this sentence, add a pad token
if done[batch_idx]:
assert (
len(generated_hyps[batch_idx]) >= num_beams
), "Batch can only be done if at least {} beams have been generated".format(
num_beams
)
next_batch_beam.extend(
[(0, tokenizer.pad_id, 0)] * num_beams
) # pad the batch
continue
# next sentence beam content, this will get added to next_batch_beam
next_sent_beam = []
# next tokens for this sentence
for beam_token_rank, (beam_token_id, beam_token_score) in enumerate(
zip(next_tokens[batch_idx], next_scores[batch_idx])
):
# get beam and token IDs
beam_id = beam_token_id // vocab_size
token_id = beam_token_id % vocab_size
effective_beam_id = batch_idx * num_beams + beam_id
# add to generated hypotheses if end of sentence
if token_id.item() == tokenizer.eos_id:
# if beam_token does not belong to top num_beams tokens, it should not be added
is_beam_token_worse_than_top_num_beams = (
beam_token_rank >= num_beams
)
if is_beam_token_worse_than_top_num_beams:
continue
generated_hyps[batch_idx].add(
output_ids[effective_beam_id].clone(),
beam_token_score.item(),
)
else:
# add next predicted token since it is not eos_token
next_sent_beam.append(
(beam_token_score, token_id, effective_beam_id)
)
# once the beam for next step is full, don't add more tokens to it.
if len(next_sent_beam) == num_beams:
break
# Check if we are done so that we can save a pad step if all(done)
done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done(
next_scores[batch_idx].max().item(), gen_len
)
# update next beam content
assert len(next_sent_beam) == num_beams, "Beam should always be full"
next_batch_beam.extend(next_sent_beam)
assert len(next_batch_beam) == num_beams * (
batch_idx + 1
), "We should have added num_beams each step"
# stop when we are done with each sentence
if all(done):
break
# sanity check / prepare next batch
assert len(next_batch_beam) == batch_size * num_beams
beam_scores = torch.tensor(
[x[0] for x in next_batch_beam], device=dec_input_ids.device
)
beam_tokens = torch.tensor(
[x[1] for x in next_batch_beam], device=dec_input_ids.device
)
beam_idx = torch.tensor(
[x[2] for x in next_batch_beam], device=dec_input_ids.device
)
# re-order batch and update current length
output_ids = output_ids[beam_idx, :]
output_ids = torch.cat([output_ids, beam_tokens.unsqueeze(1)], dim=-1)
dec_input_ids = beam_tokens.unsqueeze(1)
dec_attention_mask = torch.cat(
[dec_attention_mask[:, :, -1:, :], dec_attention_mask[:, :, -1:, -1:]],
dim=-1,
)
cross_attention_mask = cross_attention_mask[:, :, -1:, :]
# past_key_values = num_layer * 2 * (2, beam_size, 32, prefix_len, 64) first 2: self/cross attention, second 2: key/value
past_key_values = [
[
torch.index_select(layer_past_type, 1, beam_idx)
for layer_past_type in layer_past
]
for layer_past in past_key_values
]
gen_len += 1
# finalize all open beam hypotheses and add to generated hypotheses
for batch_idx in range(batch_size):
if done[batch_idx]:
continue
# need to add best num_beams hypotheses to generated hyps
for beam_id in range(num_beams):
effective_beam_id = batch_idx * num_beams + beam_id
final_score = beam_scores[effective_beam_id].item()
final_tokens = output_ids[effective_beam_id]
generated_hyps[batch_idx].add(final_tokens, final_score)
best = []
best_ids = []
# retrieve best hypotheses
for i, hypotheses in enumerate(generated_hyps):
sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0])
best_hyp = sorted_hyps.pop()[1]
best.append(tokenizer.decode(best_hyp.cpu().tolist()))
best_ids.append(best_hyp.cpu().tolist())
return best, best_ids
| 26,394 | 36.439716 | 139 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/LM/Flan-T5/train_t0.py | # coding=utf-8
"""Training Enc-Dec"""
import os
import torch
import json
import numpy as np
from arguments import get_args
from data_utils.T0Datasets import T0Dataset
from data_utils.data_config import (
DATA_GROUP_CONFIG,
DATA_NO_EVAL,
DATA_NO_VALID,
DATA_NO_TRAIN,
DATA_EVAL_GEN,
DATA_RETRIEVAL_AUGMENTATION,
RA_PASSAGE_NUM,
)
from data_utils import ANSWER_POST_FN
from tokenization_t5 import EncDecTokenizer
import mpu
from utils import save_checkpoint
from utils import print_args
from utils import print_rank_0, save_rank_0
from utils import save_preds_t0
from utils import setup_model_and_optimizer, set_random_seed, initialize_distributed
from samplers import DistributedBatchSampler, RandomSampler
from data_utils import *
from metrics import *
from torch.utils.data import DataLoader, SequentialSampler
from generation_utils import generate_beam, generate_no_beam
from promptsource.templates import TemplateCollection
from tqdm import tqdm
def forward_step(
args,
model_batch,
no_model_batch,
model,
device,
keep_enc_hidden=False,
do_infer=False,
):
for k in model_batch:
model_batch[k] = model_batch[k].to(device)
for k in no_model_batch:
no_model_batch[k] = no_model_batch[k].to(device)
if args.FiD:
batch_size, _, sequence_length = model_batch["passage_input_ids"].size()
enc_outputs = model(
enc_input_ids=model_batch["passage_input_ids"].view(
batch_size * args.passage_num, sequence_length
),
enc_attention_mask=model_batch["passage_attention_mask"].view(
batch_size * args.passage_num, 1, sequence_length, sequence_length
),
only_encoder=True,
)
enc_hidden_states = enc_outputs["encoder_last_hidden_state"].view(
batch_size, sequence_length * args.passage_num, -1
)
new_model_batch = {}
for k in model_batch:
if k not in ["passage_input_ids", "passage_attention_mask"]:
new_model_batch[k] = model_batch[k]
output = model(**new_model_batch, enc_hidden_states=enc_hidden_states)
else:
if keep_enc_hidden:
enc_outputs = model(**model_batch, only_encoder=True)
enc_hidden_states = enc_outputs["encoder_last_hidden_state"]
output = model(**model_batch, enc_hidden_states=enc_hidden_states)
else:
output = model(**model_batch)
logits = output["lm_logits"]
forw_out = {"logits": logits}
if keep_enc_hidden:
forw_out["enc_hidden_states"] = enc_hidden_states
if not do_infer:
losses = mpu.vocab_parallel_cross_entropy(
logits.contiguous().float(), no_model_batch["labels"]
)
loss_mask = no_model_batch["loss_mask"]
losses = (losses * loss_mask).sum(-1) / loss_mask.sum(-1)
loss = losses.mean()
forw_out["loss"] = loss
forw_out["loss_batch"] = losses
return forw_out
def backward_step(args, loss, model, optimizer):
# backward
if args.deepspeed:
model.backward(loss)
else:
optimizer.zero_grad()
if args.fp16:
optimizer.backward(loss, update_master_grads=False)
else:
loss.backward()
# Update master gradients.
if not args.deepspeed:
if args.fp16:
optimizer.update_master_grads()
# Clipping gradients helps prevent the exploding gradient.
if args.clip_grad > 0:
if not args.fp16:
mpu.clip_grad_norm(model.parameters(), args.clip_grad)
else:
optimizer.clip_master_grads(args.clip_grad)
def train(
args,
data_names,
tokenizer: EncDecTokenizer,
model,
optimizer,
lr_scheduler,
train_data_utils,
dev_data_utils,
device,
):
"""Train the model."""
train_dataloader, train_dataset, random_sampler = train_data_utils
# Turn on training mode which enables dropout.
model.train()
# Tracking loss.
total_loss = 0.0
step, global_step = 1, 1
best_scores = []
for e in range(args.epochs):
model.train()
random_sampler.set_epoch(e)
train_dataset.set_epoch(e)
for model_batch, no_model_batch, _, _ in train_dataloader:
forw_out = forward_step(args, model_batch, no_model_batch, model, device)
loss = forw_out["loss"]
if torch.distributed.get_rank() == 0:
print(loss)
backward_step(args, loss, model, optimizer)
# Update losses.
total_loss += loss.item()
if args.deepspeed:
model.step()
else:
optimizer.step()
if not (args.fp16 and optimizer.overflow):
lr_scheduler.step()
# Logging.
if (
global_step % args.log_interval == 0
and step % args.gradient_accumulation_steps == 0
):
learning_rate = optimizer.param_groups[0]["lr"]
avg_lm_loss = total_loss / (
args.log_interval * args.gradient_accumulation_steps
)
log_string = "epoch {:3d}/{:3d} |".format(e, args.epochs)
log_string += " global iteration {:8d}/{:8d} |".format(
global_step, args.train_iters
)
log_string += " learning rate {:.3} |".format(learning_rate)
log_string += " lm loss {:.6} |".format(avg_lm_loss)
if args.fp16:
log_string += " loss scale {:.1f} |".format(
optimizer.cur_scale if args.deepspeed else optimizer.loss_scale
)
print_rank_0(log_string)
save_rank_0(args, log_string)
total_loss = 0.0
# Checkpointing
if (
args.save
and args.save_interval
and global_step % args.save_interval == 0
and step % args.gradient_accumulation_steps == 0
):
save_dir_path = os.path.join(args.save, str(global_step))
if torch.distributed.get_rank() == 0:
os.makedirs(save_dir_path, exist_ok=True)
save_checkpoint(
global_step,
model,
optimizer,
lr_scheduler,
args,
save_dir=save_dir_path,
)
# Evaluation
if (
args.eval_interval
and global_step % args.eval_interval == 0
and step % args.gradient_accumulation_steps == 0
and args.do_valid
):
prefix = "iteration {} | ".format(global_step)
metric_values = []
for name, dev_data_util_prompt in dev_data_utils.items():
if DATA_CONFIG[name].get("selfsup", False):
if DATA_CONFIG[name]["type"] == "gen":
dev_dataloader, dev_dataset, _ = dev_data_util_prompt[0]
dev_loss = evaluate_lm(
args,
tokenizer,
name,
dev_dataset,
dev_dataloader,
model,
device,
mode="dev",
train_step=global_step,
save_res=True,
)
log_string = (
prefix + name + " | dev_loss: " + str(np.mean(dev_loss))
)
print_rank_0(log_string)
save_rank_0(args, log_string)
else:
dev_dataloader, dev_dataset, _ = dev_data_util_prompt[0]
dev_loss, dev_res, dev_preds, dev_labels = evaluate_rank(
args,
tokenizer,
name,
dev_dataset,
dev_dataloader,
model,
device,
mode="dev",
train_step=global_step,
save_res=True,
)
log_string = (
prefix
+ name
+ " | dev_loss: "
+ str(np.mean(dev_loss))
+ " | dev res: "
+ str(dev_res)
)
print_rank_0(log_string)
save_rank_0(args, log_string)
else:
dev_res_prompt = []
dev_loss_prompt = []
dev_preds_prompt = []
dev_labels_prompt = []
dev_prompt_names = []
for pid, dev_data_util in enumerate(dev_data_util_prompt):
dev_dataloader, dev_dataset, _ = dev_data_util
dev_prompt_names.append(
dev_dataset.all_data[name]["prompt_names"][0]
)
if (
dev_dataset.data_prompts[name][0].answer_choices
is not None
):
eval_func = evaluate_rank
else:
eval_func = evaluate_gen
dev_loss, dev_res, dev_preds, dev_labels = eval_func(
args,
tokenizer,
name,
dev_dataset,
dev_dataloader,
model,
device,
mode="dev",
train_step=global_step,
save_res=True,
)
dev_loss_prompt.append(dev_loss)
dev_res_prompt.append(dev_res)
dev_preds_prompt.append(dev_preds)
dev_labels_prompt.append(dev_labels)
log_string = (
prefix
+ name
+ " | dev_loss: "
+ str(np.mean(dev_loss_prompt))
+ " | dev res: "
+ str(dev_res_prompt)
)
print_rank_0(log_string)
save_rank_0(args, log_string)
save_preds_t0(
args,
name,
dev_prompt_names,
global_step,
dev_res_prompt,
dev_preds_prompt,
dev_labels_prompt,
)
values = [
v for dev_res in dev_res_prompt for v in dev_res.values()
]
metric_values.extend(values)
if len(metric_values) != 0:
metric_avg = sum(metric_values) / len(metric_values)
log_string = prefix + "Average: " + str(metric_avg)
print_rank_0(log_string)
save_rank_0(args, log_string)
model.train()
step += 1
if step % args.gradient_accumulation_steps == 0:
global_step += 1
return global_step
def evaluate_lm(
args,
tokenizer: EncDecTokenizer,
name,
eval_dataset: T0Dataset,
eval_data_loader,
model,
device,
mode="dev",
train_step=0,
save_res=False,
):
model.eval()
total_loss = 0.0
step = 0
with torch.no_grad():
for model_batch, no_model_batch, _, _ in eval_data_loader:
for k in model_batch:
model_batch[k] = model_batch[k].to(device)
for k in no_model_batch:
no_model_batch[k] = no_model_batch[k].to(device)
forw_out = forward_step(
args, model_batch, no_model_batch, model, device, keep_enc_hidden=True
)
loss = forw_out["loss"].item() if "loss" in forw_out else 0
total_loss += loss
step += 1
if step == 0:
if torch.distributed.get_rank() == 0:
print(name)
print(eval_dataset.data_prompts[name][0].name)
print(len(eval_dataset))
total_loss /= step
return total_loss
def evaluate_gen(
args,
tokenizer: EncDecTokenizer,
name,
eval_dataset: T0Dataset,
eval_data_loader,
model,
device,
mode="dev",
train_step=0,
save_res=False,
):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0.0
step = 0
all_output_ids = []
all_idxs = []
if args.FiD:
all_scores = []
with torch.no_grad():
if not args.FiD:
for model_batch, no_model_batch, _, _ in eval_data_loader:
for k in model_batch:
model_batch[k] = model_batch[k].to(device)
for k in no_model_batch:
no_model_batch[k] = no_model_batch[k].to(device)
forw_out = forward_step(
args,
model_batch,
no_model_batch,
model,
device,
keep_enc_hidden=True,
)
loss = forw_out["loss"].item() if "loss" in forw_out else 0
total_loss += loss
step += 1
if step == 0:
if torch.distributed.get_rank() == 0:
print(name)
print(eval_dataset.data_prompts[name][0].name)
print(len(eval_dataset))
total_loss /= step
for e, (model_batch, no_model_batch, _, _) in tqdm(
enumerate(eval_data_loader), desc="Evaluating"
):
for k in model_batch:
model_batch[k] = model_batch[k].to(device)
for k in no_model_batch:
no_model_batch[k] = no_model_batch[k].to(device)
if args.num_beams == 1:
generation_str_list, generation_id_list = generate_no_beam(
model_batch,
model_batch["enc_input_ids"],
model,
tokenizer,
args,
device,
)
if args.FiD:
scores = model.module.module.module.get_crossattention_scores(
model_batch["passage_attention_mask"][:, :, 0, 0, :].bool()
)
all_scores.append(scores)
else:
generation_str_list, generation_id_list = generate_beam(
model_batch,
model_batch["enc_input_ids"],
model,
tokenizer,
args,
device,
)
output_ids = [
x
+ [tokenizer.pad_id]
+ (args.max_generation_length - len(x)) * [tokenizer.pad_id]
for x in generation_id_list
]
output_ids = torch.tensor(output_ids).to(device)
tmp_idxs = [
torch.zeros_like(no_model_batch["idxs"]).to(device)
for _ in range(mpu.get_data_parallel_world_size())
]
torch.distributed.all_gather(
tmp_idxs,
no_model_batch["idxs"].data,
group=mpu.get_data_parallel_group(),
)
tmp_output_ids = [
torch.zeros_like(output_ids).to(device)
for _ in range(mpu.get_data_parallel_world_size())
]
torch.distributed.all_gather(
tmp_output_ids, output_ids.data, group=mpu.get_data_parallel_group()
)
all_idxs.extend(tmp_idxs)
all_output_ids.extend(tmp_output_ids)
all_output_ids = torch.cat(all_output_ids, dim=0).cpu().tolist()
all_idxs = torch.cat(all_idxs, dim=0).tolist()
if args.FiD:
all_scores = torch.cat(all_scores, dim=0)
print(all_scores.size())
torch.save(
all_scores,
os.path.join(args.save, f"stored_FiD_scores.pt"),
)
all_preds_real = []
all_labels_real = []
eval_res = {}
for idxs, output_ids in zip(all_idxs, all_output_ids):
_, _, sid = idxs
output_ids = (
output_ids[: output_ids.index(tokenizer.pad_id)]
if tokenizer.pad_id in output_ids
else output_ids
)
all_preds_real.append(tokenizer.decode(output_ids))
all_labels_real.append(eval_dataset.all_data[name]["data"][sid]["answer"])
metric_names = eval_dataset.data_prompts[name][0].metadata.metrics
for metric_name in metric_names:
if (name, metric_name) in ANSWER_POST_FN:
all_labels_real, all_preds_real = ANSWER_POST_FN[(name, metric_name)](
all_labels_real, all_preds_real
)
res = T0_METRICS[metric_name](all_labels_real, all_preds_real)
eval_res.update(res)
# if save_res:
# save_preds_t0(args, name, eval_dataset, train_step, eval_res, all_preds_real, all_labels_real)
return total_loss, eval_res, all_preds_real, all_labels_real
def evaluate_rank(
args,
tokenizer: EncDecTokenizer,
name,
eval_dataset: T0Dataset,
eval_data_loader,
model,
device,
mode="dev",
train_step=0,
save_res=False,
):
"""Evaluation."""
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0.0
step = 0
all_idxs = []
all_preds = []
if args.prompt_tune:
all_prompt = torch.load(
f"data/{args.data_names}/cache/stored_dembeds.pt",
map_location=lambda storage, loc: storage,
)
if args.FiD:
all_scores = []
tmp_pos_index = torch.arange(1, eval_dataset.max_cand_len + 1, device=device)
with torch.no_grad():
for (
model_batch,
no_model_batch,
cand_model_batch,
cand_no_model_batch,
) in tqdm(eval_data_loader, desc="Evaluating"):
for k in model_batch:
model_batch[k] = model_batch[k].to(device)
for k in no_model_batch:
no_model_batch[k] = no_model_batch[k].to(device)
for k in cand_model_batch:
cand_model_batch[k] = cand_model_batch[k].to(device)
for k in cand_no_model_batch:
cand_no_model_batch[k] = cand_no_model_batch[k].to(device)
if args.prompt_tune:
prompt = all_prompt[step]
model.module.module.module.encoder.load_prompt_embeds(prompt)
if args.FiD:
model.module.module.module.reset_score_storage()
batch_size, _, sequence_length = model_batch["passage_input_ids"].size()
enc_outputs = model(
enc_input_ids=model_batch["passage_input_ids"].view(
batch_size * args.passage_num, sequence_length
),
enc_attention_mask=model_batch["passage_attention_mask"].view(
batch_size * args.passage_num,
1,
sequence_length,
sequence_length,
),
only_encoder=True,
)
enc_hidden_states = enc_outputs["encoder_last_hidden_state"].view(
batch_size, sequence_length * args.passage_num, -1
)
else:
enc_outputs = model(**model_batch, only_encoder=True)
enc_hidden_states = enc_outputs["encoder_last_hidden_state"]
# enc_hidden_states[0, :10, :] = prompt
output = model(**cand_model_batch, enc_hidden_states=enc_hidden_states)
if args.FiD:
scores = model.module.module.module.get_crossattention_scores(
model_batch["passage_attention_mask"][:, :, 0, 0, :].bool()
)
all_scores.append(scores)
logits = output["lm_logits"]
losses = mpu.vocab_parallel_cross_entropy(
logits.contiguous().float(), cand_no_model_batch["target_ids"]
)
loss_mask = cand_no_model_batch["loss_mask"]
losses = losses * loss_mask
gold_loss = 0
preds = []
for samp_loss, cand_pos, cand_label in zip(
losses, cand_no_model_batch["pos"], cand_no_model_batch["labels"]
):
cum_loss = torch.cumsum(samp_loss, dim=0)
# print(samp_loss)
sum_loss = torch.masked_select(cum_loss, cand_pos)
cand_loss = torch.diff(
sum_loss, dim=0, prepend=torch.zeros(1, device=device)
)
# print("cand loss", cand_loss)
# print("samp loss", samp_loss)
cand_pos_idx = torch.masked_select(tmp_pos_index, cand_pos)
cand_lens = torch.diff(
cand_pos_idx, dim=0, prepend=torch.zeros(1, device=device)
)
# print("cand_lens", cand_lens)
if args.no_norm_cand_loss:
normed_cand_loss = cand_loss
else:
normed_cand_loss = cand_loss / cand_lens
# print(normed_cand_loss)
# exit(0)
max_res = torch.min(normed_cand_loss, dim=0)
preds.append(max_res.indices.item())
gold_loss += normed_cand_loss[cand_label.item()].item()
gold_loss /= len(losses)
total_loss += gold_loss
preds = torch.tensor(preds, dtype=torch.long, device=device)
gathered_preds = [
torch.zeros_like(preds)
for _ in range(mpu.get_data_parallel_world_size())
]
torch.distributed.all_gather(
gathered_preds, preds.contiguous(), mpu.get_data_parallel_group()
)
all_preds.extend(gathered_preds)
gathered_idx = [
torch.zeros_like(no_model_batch["idxs"])
for _ in range(mpu.get_data_parallel_world_size())
]
torch.distributed.all_gather(
gathered_idx,
no_model_batch["idxs"].contiguous(),
mpu.get_data_parallel_group(),
)
all_idxs.extend(gathered_idx)
step += 1
if step == 0:
if torch.distributed.get_rank() == 0:
print(name)
print(eval_dataset.data_prompts[name][0].name)
print(len(eval_dataset))
total_loss /= step
all_idxs = torch.cat(all_idxs, dim=0).cpu().tolist()
all_preds = torch.cat(all_preds, dim=0).cpu().tolist()
if args.FiD:
all_scores = torch.cat(all_scores, dim=0)
print(all_scores.size())
torch.save(
all_scores,
os.path.join(args.save, f"stored_FiD_scores.pt"),
)
all_preds_real = []
all_labels_real = []
eval_res = {}
for idxs, pred in zip(all_idxs, all_preds):
_, _, sid = idxs
sample = eval_dataset.all_data[name]["data"][sid]
all_preds_real.append(sample["options"][pred])
all_labels_real.append(sample["answer"])
if eval_dataset.data_prompts[name] is None:
# selfsup
metric_names = ["Other"]
else:
metric_names = eval_dataset.data_prompts[name][0].metadata.metrics
for metric_name in metric_names:
if (name, metric_name) in ANSWER_POST_FN:
all_labels_real, all_preds_real = ANSWER_POST_FN[(name, metric_name)](
all_labels_real, all_preds_real
)
res = T0_METRICS[metric_name](all_labels_real, all_preds_real)
eval_res.update(res)
# if save_res:
# save_preds_t0(args, name, eval_dataset, train_step, eval_res, all_preds_real, all_labels_real)
return total_loss, eval_res, all_preds_real, all_labels_real
def load_data(
args,
data_prompts,
split,
tokenizer,
ratio=1,
num=-1,
few_data_names=None,
drop_last=True,
):
# Data parallel arguments.
world_size = mpu.get_data_parallel_world_size()
rank = mpu.get_data_parallel_rank()
if args.eval_batch_size is None:
args.eval_batch_size = args.batch_size
if split == "train":
global_batch_size = args.batch_size * world_size
elif split == "validation":
global_batch_size = args.dev_batch_size * world_size
else:
global_batch_size = args.eval_batch_size * world_size
num_workers = args.num_workers
dataset = T0Dataset(
args,
tokenizer,
data_prompts,
split,
ratio=ratio,
few_data_names=few_data_names,
num=num,
)
if split == "train":
sampler = RandomSampler(dataset)
sampler.set_seed(args.seed)
else:
sampler = SequentialSampler(dataset)
batch_sampler = DistributedBatchSampler(
sampler=sampler,
batch_size=global_batch_size,
drop_last=drop_last,
rank=rank,
world_size=world_size,
)
data_loader = DataLoader(
dataset,
batch_sampler=batch_sampler,
num_workers=num_workers,
pin_memory=True,
collate_fn=dataset.collate,
)
# Torch dataloader.
return data_loader, dataset, sampler
def main():
"""Main training program."""
# Disable CuDNN.
torch.backends.cudnn.enabled = False
# Arguments.
args = get_args()
os.makedirs(args.save, exist_ok=True)
# Pytorch distributed.
initialize_distributed(args)
if torch.distributed.get_rank() == 0:
print("Training Enc-Dec model")
print_args(args)
with open(os.path.join(args.save, "args.json"), "w") as f:
json.dump(vars(args), f)
# Random seeds for reproducability.
set_random_seed(args.seed)
device = torch.cuda.current_device()
# setup tokenizer
tokenizer = EncDecTokenizer(
os.path.join(args.tokenizer_path, "spiece.model"), pad_token=args.pad_token
)
with open(args.deepspeed_config, "r") as f:
ds_config = json.load(f)
ds_config["gradient_accumulation_steps"] = args.gradient_accumulation_steps
ds_config["train_micro_batch_size_per_gpu"] = args.batch_size
data_group_names = args.data_names.split("-")
data_names = []
for name in data_group_names:
if name in DATA_GROUP_CONFIG:
data_names.extend(DATA_GROUP_CONFIG[name])
else:
data_names.append(name)
few_data_names = None
if args.few_data_names is not None:
few_data_group_names = args.few_data_names.split("-")
few_data_names = []
for name in few_data_group_names:
if name in DATA_GROUP_CONFIG:
few_data_names.extend(DATA_GROUP_CONFIG[name])
else:
few_data_names.append(name)
data_prompts = {}
for name in data_names:
for ra_name in DATA_RETRIEVAL_AUGMENTATION:
if ra_name in name:
DATA_CONFIG[name] = DATA_CONFIG[ra_name]
DATA_CONFIG[name]["data_dir"] = f"data/{name}/cache"
break
if name in RA_PASSAGE_NUM:
args.passage_num = RA_PASSAGE_NUM[name]
if DATA_CONFIG[name].get("selfsup", False):
data_prompts[name] = None
else:
collection = TemplateCollection()
if "name" in DATA_CONFIG[name]:
templates = collection.get_dataset(
DATA_CONFIG[name]["name"][0], DATA_CONFIG[name]["name"][1]
)
else:
templates = collection.get_dataset(name, None)
data_prompts[name] = []
for template_name in templates.all_template_names:
if "mmlu" in name or "ai2_arc" in name:
if template_name == "heres_a_problem":
data_prompts[name].append(templates[template_name])
continue
if (
"popQA" in name or "marco_qa" in name or "kilt" in name
) and template_name != "question_with_instruction":
continue
if (name, template_name) not in DATA_NO_TRAIN:
if "popQA" in name:
prompt = templates[template_name]
prompt.metadata.metrics = ["popQA"]
data_prompts[name].append(prompt)
elif "marco_qa" in name:
prompt = templates[template_name]
prompt.metadata.metrics = ["BLEU", "ROUGE"]
data_prompts[name].append(prompt)
elif "kilt" in name:
prompt = templates[template_name]
prompt.metadata.metrics = ["Trivia QA"]
data_prompts[name].append(prompt)
else:
data_prompts[name].append(templates[template_name])
print("All Data group:", data_group_names, "All Data:", data_names)
if args.do_train:
train_data_utils = load_data(
args,
data_prompts,
"train",
tokenizer,
ratio=args.train_ratio,
few_data_names=few_data_names,
num=args.train_num,
)
dev_data_utils = {}
for name in data_prompts:
if DATA_CONFIG[name].get("selfsup", False):
dev_data_utils[name] = [
load_data(
args,
{name: None},
"validation",
tokenizer,
ratio=args.dev_ratio,
few_data_names=few_data_names,
num=args.dev_num,
)
]
else:
if (name, None) not in DATA_NO_VALID:
dev_data_utils[name] = []
for template in data_prompts[name]:
if (name, template.name) not in DATA_NO_VALID:
dev_data_utils[name].append(
load_data(
args,
{name: [template]},
"validation",
tokenizer,
ratio=args.dev_ratio,
few_data_names=few_data_names,
num=args.dev_num,
)
)
if args.train_iters == -1:
args.train_iters = (
len(train_data_utils[1])
* args.epochs
// (
mpu.get_data_parallel_world_size()
* args.batch_size
* args.gradient_accumulation_steps
)
)
if args.save_interval == -1:
args.save_interval = len(train_data_utils[1]) // (
mpu.get_data_parallel_world_size()
* args.batch_size
* args.gradient_accumulation_steps
)
if args.eval_interval == -1:
args.eval_interval = len(train_data_utils[1]) // (
mpu.get_data_parallel_world_size()
* args.batch_size
* args.gradient_accumulation_steps
)
else:
args.train_iters = 10 # a magic number
log_string = "Total train epochs {} | Total train iters {} | ".format(
args.epochs, args.train_iters
)
print_rank_0(log_string)
save_rank_0(args, log_string)
# Model, optimizer, and learning rate.
prompt_config = None
if args.prompt_tune:
with open(args.prompt_config, "r") as f:
prompt_config = json.load(f)
model, optimizer, lr_scheduler = setup_model_and_optimizer(
args,
tokenizer.vocab_size,
ds_config,
set_optim=args.do_train,
prompt_config=prompt_config,
)
if args.do_train:
train(
args,
data_names,
tokenizer,
model,
optimizer,
lr_scheduler,
train_data_utils,
dev_data_utils,
device,
)
if args.do_eval:
for name in data_names:
if (name, None) not in DATA_NO_EVAL:
eval_loss_prompt = []
eval_res_prompt = []
eval_preds_prompt = []
eval_labels_prompt = []
eval_prompt_names = []
for template in data_prompts[name]:
if (name, template.name) not in DATA_NO_EVAL:
eval_data_utils = load_data(
args,
{name: [template]},
"validation",
tokenizer,
ratio=args.test_ratio,
few_data_names=few_data_names,
num=args.test_num,
)
eval_dataloader, eval_dataset, _ = eval_data_utils
eval_prompt_names.append(
eval_dataset.all_data[name]["prompt_names"][0]
)
if (
eval_dataset.data_prompts[name][0].answer_choices
is not None
and (name, template.name) not in DATA_EVAL_GEN
):
eval_func = evaluate_rank
else:
eval_func = evaluate_gen
eval_loss, eval_res, eval_preds, eval_labels = eval_func(
args,
tokenizer,
name,
eval_dataset,
eval_dataloader,
model,
device,
mode="test",
save_res=True,
)
eval_loss_prompt.append(eval_loss)
eval_res_prompt.append(eval_res)
eval_preds_prompt.append(eval_preds)
eval_labels_prompt.append(eval_labels)
avg_eval_res = {
k: np.mean([res[k] for res in eval_res_prompt])
for k in eval_res_prompt[0]
}
log_string = (
"Eval result: loss: {:.6} | avg_res: {} | all_res: {}".format(
np.mean(eval_loss_prompt), avg_eval_res, eval_res_prompt
)
)
print_rank_0(log_string)
save_rank_0(args, log_string)
save_preds_t0(
args,
name,
eval_prompt_names,
0,
eval_res_prompt,
eval_preds_prompt,
eval_labels_prompt,
)
if __name__ == "__main__":
main()
| 36,843 | 34.022814 | 104 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/LM/Flan-T5/samplers.py | # coding=utf-8
"""Batch samplers that work with either random or sequential data samplers."""
import torch
from torch.utils import data
class RandomSampler(data.sampler.Sampler):
"""Based off of pytorch RandomSampler and DistributedSampler. Essentially
a RandomSampler, but this class lets the user set an epoch like
DistributedSampler Samples elements randomly. If without replacement, then
sample from a shuffled dataset. If with replacement, then user can
specify ``num_samples`` to draw.
Arguments:
data_source (Dataset): dataset to sample from
num_samples (int): number of samples to draw, default=len(dataset)
replacement (bool): samples are drawn with replacement if ``True``,
default=False
"""
def __init__(self, data_source, replacement=False, num_samples=None, diff_order=False):
self.data_source = data_source
self.replacement = replacement
self._num_samples = num_samples
self.epoch = -1
self.seed = -1
self.diff_order = diff_order
if self._num_samples is not None and replacement is False:
raise ValueError("With replacement=False, num_samples should not "
"be specified, since a random permute will be "
"performed.")
if not isinstance(self.num_samples, int) or self.num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(
self.num_samples))
if not isinstance(self.replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(self.replacement))
@property
def num_samples(self):
# dataset size might change at runtime
if self._num_samples is None:
return len(self.data_source)
return self._num_samples
def __iter__(self):
n = len(self.data_source)
g = torch.Generator()
if self.diff_order:
if self.epoch >= 0 and self.seed >= 0:
g.manual_seed(self.epoch + self.seed)
elif self.epoch >= 0:
g.manual_seed(self.epoch)
elif self.seed >= 0:
g.manual_seed(self.seed)
else:
if self.seed >= 0 and self.seed != 1234: # hack
g.manual_seed(self.seed)
if self.replacement:
return iter(torch.randint(high=n, size=(self.num_samples,),
dtype=torch.int64, generator=g).tolist())
return iter(torch.randperm(n, generator=g).tolist())
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
def set_seed(self, seed):
self.seed = seed
class DistributedBatchSampler(data.sampler.BatchSampler):
"""Similar to normal implementation of distributed sampler, except
implementation is at the batch sampler level, instead of just the
sampler level. This allows wrapping of arbitrary data samplers
(sequential, random, WeightedRandomSampler, etc.) with this batch
sampler.
The `interleave` argument specifies how to distribute a batch. A value
of True combined with the above random sampler is equivalent to pytorch's
torch.utils.data.distributed.DistributedSampler.
For the following batch [0,1,2,3,4,5,6,7] and data parallelism of 2
specifying True will result in the following samples for each gpu:
GPU0: [0,2,4,6] GPU1: [1,3,5,7]
specifying False will result in the following samples:
GPU0: [0,1,2,3] GPU1: [4,5,6,7]"""
def __init__(self, sampler, batch_size, drop_last, rank=-1,
world_size=2, wrap_last=False, interleave=False):
super(DistributedBatchSampler, self).__init__(sampler, batch_size,
drop_last)
if rank == -1:
assert False, 'should not be here'
rank = torch.distributed.get_rank()
self.rank = rank
self.world_size = world_size
self.sampler.wrap_around = 0
self.wrap_around = 0
self.wrap_last = wrap_last
self.start_iter = 0
self.interleave = interleave
def __iter__(self):
batch = []
i = 0
for idx in self.data_iterator(self.sampler, wrap_around=False):
batch.append(idx)
if len(batch) == self.batch_size:
tbatch = self._batch(batch)
if i >= self.start_iter:
yield tbatch
self.start_iter = 0
i += 1
batch = []
batch_len = len(batch)
if batch_len > 0 and not self.drop_last:
if self.wrap_last:
self.sampler.wrap_around -= (self.batch_size)
self.wrap_around += (len(batch))
self.wrap_around %= self.batch_size
yield self._batch(batch)
if self.wrap_last:
self.sampler.wrap_around += self.batch_size
def data_iterator(self, _iter, wrap_around=False):
"""iterates through data and handles wrap around"""
for i, idx in enumerate(_iter):
if i < self.wrap_around % self.batch_size:
continue
if wrap_around:
self.wrap_around += 1
self.wrap_around %= self.batch_size
yield idx
def _batch(self, batch):
"""extracts samples only pertaining to this worker's batch"""
if self.interleave:
return batch[self.rank:self.batch_size:self.world_size]
start = self.rank * self.batch_size // self.world_size
end = (self.rank + 1) * self.batch_size // self.world_size
return batch[start:end] | 5,911 | 38.677852 | 91 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/LM/Flan-T5/fp16/fp16util.py | # coding=utf-8
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
import mpu
class tofp16(nn.Module):
"""
Utility module that implements::
def forward(self, input):
return input.half()
"""
def __init__(self):
super(tofp16, self).__init__()
def forward(self, input):
return input.half()
def BN_convert_float(module):
"""
Utility function for network_to_half().
Retained for legacy purposes.
"""
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True:
module.float()
for child in module.children():
BN_convert_float(child)
return module
def network_to_half(network):
"""
Convert model to half precision in a batchnorm-safe way.
Retained for legacy purposes. It is recommended to use FP16Model.
"""
return nn.Sequential(tofp16(), BN_convert_float(network.half()))
def convert_module(module, dtype):
"""
Converts a module's immediate parameters and buffers to dtype.
"""
for param in module.parameters(recurse=False):
if param is not None:
if param.data.dtype.is_floating_point:
param.data = param.data.to(dtype=dtype)
if param._grad is not None and param._grad.data.dtype.is_floating_point:
param._grad.data = param._grad.data.to(dtype=dtype)
for buf in module.buffers(recurse=False):
if buf is not None and buf.data.dtype.is_floating_point:
buf.data = buf.data.to(dtype=dtype)
def convert_network(network, dtype):
"""
Converts a network's parameters and buffers to dtype.
"""
for module in network.modules():
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True:
continue
convert_module(module, dtype)
return network
class FP16Model(nn.Module):
"""
Convert model to half precision in a batchnorm-safe way.
"""
def __init__(self, network):
super(FP16Model, self).__init__()
self.network = convert_network(network, dtype=torch.half)
def forward(self, *inputs):
inputs = tuple(t.half() for t in inputs)
return self.network(*inputs)
def backwards_debug_hook(grad):
raise RuntimeError("master_params recieved a gradient in the backward pass!")
def prep_param_lists(model, flat_master=False):
"""
Creates a list of FP32 master parameters for a given model, as in
`Training Neural Networks with Mixed Precision: Real Examples`_.
Args:
model (torch.nn.Module): Existing Pytorch model
flat_master (bool, optional, default=False): Flatten the master parameters into a single tensor, as a performance optimization.
Returns:
A tuple (``model_params``, ``master_params``). ``model_params`` is a list of the model's parameters for later use with :func:`model_grads_to_master_grads` and :func:`master_params_to_model_params`. ``master_params`` is a list of FP32 master gradients. If ``flat_master=True``, ``master_params`` will be a list with one element.
Example::
model_params, master_params = prep_param_lists(model)
.. warning::
Currently, if ``flat_master=True``, all the model's parameters must be the same type. If the model has parameters of different types, use ``flat_master=False``, or use :class:`FP16_Optimizer`.
.. _`Training Neural Networks with Mixed Precision: Real Examples`:
http://on-demand.gputechconf.com/gtc/2018/video/S81012/
"""
model_params = [param for param in model.parameters() if param.requires_grad]
if flat_master:
# Give the user some more useful error messages
try:
# flatten_dense_tensors returns a contiguous flat array.
# http://pytorch.org/docs/master/_modules/torch/_utils.html
master_params = _flatten_dense_tensors([param.data for param in model_params]).float()
except:
print("Error in prep_param_lists: model may contain a mixture of parameters "
"of different types. Use flat_master=False, or use F16_Optimizer.")
raise
master_params = torch.nn.Parameter(master_params)
master_params.requires_grad = True
# master_params.register_hook(backwards_debug_hook)
if master_params.grad is None:
master_params.grad = master_params.new(*master_params.size())
return model_params, [master_params]
else:
master_params = [param.clone().float().detach() for param in model_params]
for param in master_params:
param.requires_grad = True
return model_params, master_params
def model_grads_to_master_grads(model_params, master_params, flat_master=False):
"""
Copy model gradients to master gradients.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`model_grads_to_master_grads`.
"""
if flat_master:
# The flattening may incur one more deep copy than is necessary.
master_params[0].grad.data.copy_(
_flatten_dense_tensors([p.grad.data for p in model_params]))
else:
for model, master in zip(model_params, master_params):
if model.grad is not None:
if master.grad is None:
master.grad = Variable(master.data.new(*master.data.size()))
master.grad.data.copy_(model.grad.data)
else:
master.grad = None
def master_params_to_model_params(model_params, master_params, flat_master=False):
"""
Copy master parameters to model parameters.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`master_params_to_model_params`.
"""
if flat_master:
for model, master in zip(model_params,
_unflatten_dense_tensors(master_params[0].data, model_params)):
model.data.copy_(master)
else:
for model, master in zip(model_params, master_params):
model.data.copy_(master.data)
# Backward compatibility fixes
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
else:
return t[0]
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
clip_grad_norm = mpu.clip_grad_norm
#elif TORCH_MAJOR == 0 and TORCH_MINOR <= 4:
# clip_grad_norm = torch.nn.utils.clip_grad_norm
#else:
# clip_grad_norm = torch.nn.utils.clip_grad_norm_
| 7,072 | 35.647668 | 337 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/LM/Flan-T5/fp16/loss_scaler.py | import torch
import mpu
# item() is a recent addition, so this helps with backward compatibility.
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
else:
return t[0]
class LossScaler:
"""
Class that manages a static loss scale. This class is intended to interact with
:class:`FP16_Optimizer`, and should not be directly manipulated by the user.
Use of :class:`LossScaler` is enabled via the ``static_loss_scale`` argument to
:class:`FP16_Optimizer`'s constructor.
Args:
scale (float, optional, default=1.0): The loss scale.
"""
def __init__(self, scale=1):
self.cur_scale = scale
# `params` is a list / generator of torch.Variable
def has_overflow(self, params):
return False
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
return False
def update_scale(self, overflow):
pass
@property
def loss_scale(self):
return self.cur_scale
def scale_gradient(self, module, grad_in, grad_out):
return tuple(self.loss_scale * g for g in grad_in)
def backward(self, loss, retain_graph=False):
scaled_loss = loss*self.loss_scale
scaled_loss.backward(retain_graph=retain_graph)
class DynamicLossScaler:
"""
Class that manages dynamic loss scaling. It is recommended to use :class:`DynamicLossScaler`
indirectly, by supplying ``dynamic_loss_scale=True`` to the constructor of
:class:`FP16_Optimizer`. However, it's important to understand how :class:`DynamicLossScaler`
operates, because the default options can be changed using the
the ``dynamic_loss_args`` argument to :class:`FP16_Optimizer`'s constructor.
Loss scaling is designed to combat the problem of underflowing gradients encountered at long
times when training fp16 networks. Dynamic loss scaling begins by attempting a very high loss
scale. Ironically, this may result in OVERflowing gradients. If overflowing gradients are
encountered, :class:`DynamicLossScaler` informs :class:`FP16_Optimizer` that an overflow has
occurred.
:class:`FP16_Optimizer` then skips the update step for this particular iteration/minibatch,
and :class:`DynamicLossScaler` adjusts the loss scale to a lower value.
If a certain number of iterations occur without overflowing gradients detected,
:class:`DynamicLossScaler` increases the loss scale once more.
In this way :class:`DynamicLossScaler` attempts to "ride the edge" of
always using the highest loss scale possible without incurring overflow.
Args:
init_scale (float, optional, default=2**32): Initial loss scale attempted by :class:`DynamicLossScaler.`
scale_factor (float, optional, default=2.0): Factor used when adjusting the loss scale. If an overflow is encountered, the loss scale is readjusted to loss scale/``scale_factor``. If ``scale_window`` consecutive iterations take place without an overflow, the loss scale is readjusted to loss_scale*``scale_factor``.
scale_window (int, optional, default=1000): Number of consecutive iterations without an overflow to wait before increasing the loss scale.
"""
def __init__(self,
init_scale=2**32,
scale_factor=2.,
scale_window=1000,
min_scale=1,
delayed_shift=1,
consecutive_hysteresis=False):
self.cur_scale = init_scale
self.cur_iter = 0
self.last_overflow_iter = -1
self.scale_factor = scale_factor
self.scale_window = scale_window
self.min_scale = min_scale
self.delayed_shift = delayed_shift
self.cur_hysteresis = delayed_shift
self.consecutive_hysteresis = consecutive_hysteresis
# `params` is a list / generator of torch.Variable
def has_overflow_serial(self, params):
for p in params:
if p.grad is not None and DynamicLossScaler._has_inf_or_nan(p.grad.data):
return True
return False
def has_overflow(self, params):
overflow = self.has_overflow_serial(params)
# Since each model parallel GPU carries only part of the model,
# make sure overflow flag is synced across all the model parallel GPUs
overflow_gpu = torch.cuda.ByteTensor([overflow])
torch.distributed.all_reduce(overflow_gpu,
op=torch.distributed.ReduceOp.MAX,
group=mpu.get_model_parallel_group())
overflow = overflow_gpu[0].item()
return bool(overflow)
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
try:
# if x is half, the .float() incurs an additional deep copy, but it's necessary if
# Pytorch's .sum() creates a one-element tensor of the same type as x
# (which is true for some recent version of pytorch).
cpu_sum = float(x.float().sum())
# More efficient version that can be used if .sum() returns a Python scalar
# cpu_sum = float(x.sum())
except RuntimeError as instance:
# We want to check if inst is actually an overflow exception.
# RuntimeError could come from a different error.
# If so, we still want the exception to propagate.
if "value cannot be converted" not in instance.args[0]:
raise
return True
else:
if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum:
return True
return False
# `overflow` is boolean indicating whether the gradient overflowed
def update_scale(self, overflow):
if not hasattr(self, 'min_scale'):
self.min_scale = 1
if not hasattr(self, 'delayed_shift'):
self.delayed_shift = 1
if not hasattr(self, 'cur_hysteresis'):
self.cur_hysteresis = 1
if not hasattr(self, 'consecutive_hysteresis'):
self.consecutive_hysteresis = True
if overflow:
# self.cur_scale /= self.scale_factor
if self.delayed_shift == 1 or self.cur_hysteresis == 1:
self.cur_scale = max(self.cur_scale/self.scale_factor, self.min_scale)
else:
self.cur_hysteresis -= 1
self.last_overflow_iter = self.cur_iter
else:
if self.consecutive_hysteresis:
self.cur_hysteresis = self.delayed_shift
if (self.cur_iter - self.last_overflow_iter) % self.scale_window == 0:
if not self.consecutive_hysteresis:
self.cur_hysteresis = self.delayed_shift
self.cur_scale *= self.scale_factor
self.cur_iter += 1
@property
def loss_scale(self):
return self.cur_scale
def scale_gradient(self, module, grad_in, grad_out):
return tuple(self.loss_scale * g for g in grad_in)
def backward(self, loss, retain_graph=False):
scaled_loss = loss*self.loss_scale
scaled_loss.backward(retain_graph=retain_graph)
##############################################################
# Example usage below here -- assuming it's in a separate file
##############################################################
"""
TO-DO separate out into an example.
if __name__ == "__main__":
import torch
from torch.autograd import Variable
from dynamic_loss_scaler import DynamicLossScaler
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold inputs and outputs, and wrap them in Variables.
x = Variable(torch.randn(N, D_in), requires_grad=False)
y = Variable(torch.randn(N, D_out), requires_grad=False)
w1 = Variable(torch.randn(D_in, H), requires_grad=True)
w2 = Variable(torch.randn(H, D_out), requires_grad=True)
parameters = [w1, w2]
learning_rate = 1e-6
optimizer = torch.optim.SGD(parameters, lr=learning_rate)
loss_scaler = DynamicLossScaler()
for t in range(500):
y_pred = x.mm(w1).clamp(min=0).mm(w2)
loss = (y_pred - y).pow(2).sum() * loss_scaler.loss_scale
print('Iter {} loss scale: {}'.format(t, loss_scaler.loss_scale))
print('Iter {} scaled loss: {}'.format(t, loss.data[0]))
print('Iter {} unscaled loss: {}'.format(t, loss.data[0] / loss_scaler.loss_scale))
# Run backprop
optimizer.zero_grad()
loss.backward()
# Check for overflow
has_overflow = DynamicLossScaler.has_overflow(parameters)
# If no overflow, unscale grad and update as usual
if not has_overflow:
for param in parameters:
param.grad.data.mul_(1. / loss_scaler.loss_scale)
optimizer.step()
# Otherwise, don't do anything -- ie, skip iteration
else:
print('OVERFLOW!')
# Update loss scale for next iteration
loss_scaler.update_scale(has_overflow)
"""
| 9,150 | 40.035874 | 326 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/LM/Flan-T5/fp16/fp16.py | # coding=utf-8
"""Stable version of apex FP16 Optimizer"""
import torch
from torch import nn
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from .loss_scaler import DynamicLossScaler, LossScaler
from .fp16util import model_grads_to_master_grads, master_params_to_model_params, clip_grad_norm
FLOAT_TYPES = (torch.FloatTensor, torch.cuda.FloatTensor)
HALF_TYPES = (torch.HalfTensor, torch.cuda.HalfTensor)
def conversion_helper(val, conversion):
"""Apply conversion to val. Recursively apply conversion if `val` is a nested tuple/list structure."""
if not isinstance(val, (tuple, list)):
return conversion(val)
rtn = [conversion_helper(v, conversion) for v in val]
if isinstance(val, tuple):
rtn = tuple(rtn)
return rtn
def fp32_to_fp16(val):
"""Convert fp32 `val` to fp16"""
def half_conversion(val):
val_typecheck = val
if isinstance(val_typecheck, (Parameter, Variable)):
val_typecheck = val.data
if isinstance(val_typecheck, FLOAT_TYPES):
val = val.half()
return val
return conversion_helper(val, half_conversion)
def fp16_to_fp32(val):
"""Convert fp16 `val` to fp32"""
def float_conversion(val):
val_typecheck = val
if isinstance(val_typecheck, (Parameter, Variable)):
val_typecheck = val.data
if isinstance(val_typecheck, HALF_TYPES):
val = val.float()
return val
return conversion_helper(val, float_conversion)
class FP16_Module(nn.Module):
def __init__(self, module):
super(FP16_Module, self).__init__()
self.add_module('module', module.half())
def forward(self, *inputs, **kwargs):
return fp16_to_fp32(self.module(*(fp32_to_fp16(inputs)), **kwargs))
def state_dict(self, destination=None, prefix='', keep_vars=False):
return self.module.state_dict(destination, prefix, keep_vars)
def load_state_dict(self, state_dict, strict=True):
self.module.load_state_dict(state_dict, strict=strict)
# TODO: Update overflow check + downscale to use Carl's fused kernel.
class FP16_Optimizer(object):
"""
:class:`FP16_Optimizer` is designed to wrap an existing PyTorch optimizer,
and manage static or dynamic loss scaling and master weights in a manner transparent to the user.
For standard use, only two lines must be changed: creating the :class:`FP16_Optimizer` instance,
and changing the call to ``backward``.
Example::
model = torch.nn.Linear(D_in, D_out).cuda().half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
# Name the FP16_Optimizer instance to replace the existing optimizer
# (recommended but not required):
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
# loss.backward() becomes:
optimizer.backward(loss)
...
Example with dynamic loss scaling::
...
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
# optional arg to control dynamic loss scaling behavior
# dynamic_loss_args={'scale_window' : 500})
# Usually, dynamic_loss_args is not necessary.
Args:
init_optimizer (torch.optim.optimizer): Existing optimizer created with the parameters to optimize. Internally, :class:`FP16_Optimizer` replaces the passed optimizer's fp16 parameters, if any, with fp32 master parameters copied from the original ones. :class:`FP16_Optimizer` also stores references to the original fp16 parameters, and updates these fp16 parameters from the master fp32 copy at the end of each :attr:`step`.
static_loss_scale (float, optional, default=1.0): Loss scale used internally to scale gradients computed by the model. Any fp16 gradients will be copied to fp32, then downscaled before being applied to the fp32 master params, so ``static_loss_scale`` should not affect learning rate.
dynamic_loss_scale (bool, optional, default=False): Use dynamic loss scaling. If True, this will override any ``static_loss_scale`` option.
dynamic_loss_args (dict, optional, default=None): Dict of kwargs that will be forwarded to the internal :class:`DynamicLossScaler` instance's constructor. Keys of this dict must match kwargs accepted by :class:`DynamicLossScaler`'s constructor. If ``dynamic_loss_args`` is unspecified, :class:`DynamicLossScaler`'s defaults will be used.
verbose (bool, optional, default=True): By default, FP16_Optimizer's constructor prints out the parameters and parameter groups it is ingesting, as a sanity check. If this becomes annoying (e.g. for large models), it can be disabled by passing ``verbose=False``. ``verbose=False`` will not disable printing when the loss scale is readjusted during dynamic loss scaling.
``init_optimizer`` is expected to have been constructed in the ordinary way.
It is recommended (although not required) that the newly constructed :class:`FP16_Optimizer` instance be
named to replace ``init_optimizer``, for two reasons:
First, it means that references to the same name
later in the file will not have to change.
Second, :class:`FP16_Optimizer` reserves the right (as an implementation detail) to
modify ``init_optimizer``. If you do choose a unique name for the new
:class:`FP16_Optimizer` instance, you should only work with this new instance,
because the preexisting optimizer might no longer behave as expected.
``init_optimizer`` may be any Pytorch optimizer.
It may contain a mixture of fp16 and fp32 parameters organized into any number of
``param_groups`` with different hyperparameters. The :class:`FP16_Optimizer` constructor will
ingest these ``param_groups`` and remember them.
Calls to ::
loss.backward()
must be replaced with ::
optimizer.backward(loss)
because :class:`FP16_Optimizer` requires ownership of the backward pass to implement
loss scaling and copies to master gradients.
.. note::
Loss scaling, either static or dynamic, is orthogonal to learning rate, because gradients
are downscaled before being applied. This means that adjusting the loss scale, or using
dynamic loss scaling, should not require retuning the learning rate or any other
hyperparameters.
**Advanced options**
**Closures**: :class:`FP16_Optimizer` can wrap a Pytorch optimizer that receives a closure.
See docstring for :attr:`step`.
**Gradient clipping**: Use :attr:`clip_master_grads`.
**Multiple losses**: If your model accumulates gradients from multiple losses,
this can be made more efficient by supplying ``update_master_grads=False``
to :attr:`backward`. See docstring for :attr:`backward`.
**Manually adjusting loss scale**: The current loss scale can be retrieved or set via ::
print(optimizer.loss_scale)
optimizer.loss_scale = new_loss_scale
For static loss scaling, manually adjusting the loss scale over time is a reasonable
thing to do. During later epochs, gradients may become smaller, and a
higher loss scale may be required, analogous to scheduling the learning rate. Dynamic loss
scaling is more subtle (see :class:`DynamicLossScaler`) and in this case, manually adjusting
the loss scale is not recommended.
**Multi_GPU training**: If the wrapped ``init_optimizer`` was created from a model wrapped in
Pytorch DistributedDataParallel or Apex DistributedDataParallel, :class:`FP16_Optimizer`
should still work as intended.
"""
def __init__(self,
init_optimizer,
static_loss_scale=1.0,
dynamic_loss_scale=False,
dynamic_loss_args=None,
verbose=False):
if not torch.cuda.is_available:
raise SystemError("Cannot use fp16 without CUDA.")
self.verbose = verbose
self.optimizer = init_optimizer
# init_state_dict sets up an alternative way to cast per-param state tensors.
# Stashing here in case https://github.com/pytorch/pytorch/issues/7733 makes it necessary.
# init_state_dict = init_optimizer.state_dict()
self.fp16_groups = []
self.fp32_from_fp16_groups = []
self.fp32_from_fp32_groups = []
for i, param_group in enumerate(self.optimizer.param_groups):
self.maybe_print("FP16_Optimizer processing param group {}:".format(i))
fp16_params_this_group = []
fp32_params_this_group = []
fp32_from_fp16_params_this_group = []
for i, param in enumerate(param_group['params']):
if param.requires_grad:
if param.type() == 'torch.cuda.HalfTensor':
self.maybe_print("FP16_Optimizer received torch.cuda.HalfTensor with {}"
.format(param.size()))
fp16_params_this_group.append(param)
master_param = param.detach().clone().float()
master_param.requires_grad = True
# Copythe model parallel flag.
master_param.model_parallel = param.model_parallel
param_group['params'][i] = master_param
fp32_from_fp16_params_this_group.append(master_param)
# Reset existing state dict key to the new master param.
# We still need to recast per-param state tensors, if any, to FP32.
if param in self.optimizer.state:
self.optimizer.state[master_param] = self.optimizer.state.pop(param)
elif param.type() == 'torch.cuda.FloatTensor':
self.maybe_print("FP16_Optimizer received torch.cuda.FloatTensor with {}"
.format(param.size()))
fp32_params_this_group.append(param)
param_group['params'][i] = param
else:
raise TypeError("Wrapped parameters must be either "
"torch.cuda.FloatTensor or torch.cuda.HalfTensor. "
"Received {}".format(param.type()))
self.fp16_groups.append(fp16_params_this_group)
self.fp32_from_fp16_groups.append(fp32_from_fp16_params_this_group)
self.fp32_from_fp32_groups.append(fp32_params_this_group)
# Leverage state_dict() and load_state_dict() to recast preexisting per-param state tensors
self.optimizer.load_state_dict(self.optimizer.state_dict())
# alternative way to cast per-param state tensors:
# self.optimizer.load_state_dict(init_state_dict)
if dynamic_loss_scale:
self.dynamic_loss_scale = True
if dynamic_loss_args is not None:
self.loss_scaler = DynamicLossScaler(**dynamic_loss_args)
else:
self.loss_scaler = DynamicLossScaler()
else:
self.dynamic_loss_scale = False
self.loss_scaler = LossScaler(static_loss_scale)
self.overflow = False
self.first_closure_call_this_step = True
self.clip_grad_norm = clip_grad_norm
def maybe_print(self, msg):
if self.verbose:
print(msg)
def __getstate__(self):
raise RuntimeError("FP16_Optimizer should be serialized using state_dict().")
def __setstate__(self, state):
raise RuntimeError("FP16_Optimizer should be deserialized using load_state_dict().")
def zero_grad(self, set_grads_to_None=False):
"""
Zero fp32 and fp16 parameter grads.
"""
# In principle, only the .grad attributes of the model params need to be zeroed,
# because gradients are copied into the FP32 master params. However, we zero
# all gradients owned by the optimizer, just to be safe:
for group in self.optimizer.param_groups:
for p in group['params']:
if set_grads_to_None:
p.grad = None
else:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
# Zero fp16 gradients owned by the model:
for fp16_group in self.fp16_groups:
for param in fp16_group:
if set_grads_to_None:
param.grad = None
else:
if param.grad is not None:
param.grad.detach_() # as in torch.optim.optimizer.zero_grad()
param.grad.zero_()
def _check_overflow(self):
params = []
for group in self.fp16_groups:
for param in group:
params.append(param)
for group in self.fp32_from_fp32_groups:
for param in group:
params.append(param)
self.overflow = self.loss_scaler.has_overflow(params)
def _update_scale(self, has_overflow=False):
self.loss_scaler.update_scale(has_overflow)
def _master_params_to_model_params(self):
for fp16_group, fp32_from_fp16_group in zip(self.fp16_groups, self.fp32_from_fp16_groups):
master_params_to_model_params(fp16_group, fp32_from_fp16_group)
def _model_params_to_master_params(self):
for fp16_group, fp32_from_fp16_group in zip(self.fp16_groups, self.fp32_from_fp16_groups):
master_params_to_model_params(fp32_from_fp16_group, fp16_group)
# To consider: Integrate distributed with this wrapper by registering a hook on each variable
# that does the overflow check, gradient copy + downscale, and fp32 allreduce in a different stream.
def _model_grads_to_master_grads(self):
for fp16_group, fp32_from_fp16_group in zip(self.fp16_groups, self.fp32_from_fp16_groups):
model_grads_to_master_grads(fp16_group, fp32_from_fp16_group)
def _downscale_master(self):
if self.loss_scale != 1.0:
for group in self.optimizer.param_groups:
for param in group['params']:
if param.grad is not None:
param.grad.data.mul_(1./self.loss_scale)
def clip_master_grads(self, max_norm, norm_type=2):
"""
Clips fp32 master gradients via ``torch.nn.utils.clip_grad_norm``.
Args:
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the current fp32 gradients (viewed as a single vector).
.. warning::
Returns -1 if the most recently computed fp16 gradients overflowed (that is, if ``self.overflow`` is ``True``).
"""
if not self.overflow:
fp32_params = []
for param_group in self.optimizer.param_groups:
for param in param_group['params']:
fp32_params.append(param)
return self.clip_grad_norm(fp32_params, max_norm, norm_type)
else:
return -1
def state_dict(self):
"""
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
"""
state_dict = {}
state_dict['loss_scaler'] = self.loss_scaler
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['overflow'] = self.overflow
state_dict['first_closure_call_this_step'] = self.first_closure_call_this_step
state_dict['optimizer_state_dict'] = self.optimizer.state_dict()
state_dict['fp32_from_fp16'] = self.fp32_from_fp16_groups
return state_dict
def load_state_dict(self, state_dict):
"""
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).cuda().half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
"""
# I think it should actually be ok to reload the optimizer before the model.
self.loss_scaler = state_dict['loss_scaler']
self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
self.overflow = state_dict['overflow']
self.first_closure_call_this_step = state_dict['first_closure_call_this_step']
self.optimizer.load_state_dict(state_dict['optimizer_state_dict'])
# At this point, the optimizer's references to the model's fp32 parameters are up to date.
# The optimizer's hyperparameters and internal buffers are also up to date.
# However, the fp32 master copies of the model's fp16 params stored by the optimizer are still
# out of date. There are two options.
# 1: Refresh the master params from the model's fp16 params.
# This requires less storage but incurs precision loss.
# 2: Save and restore the fp32 master copies separately.
# We choose option 2.
#
# Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device
# of their associated parameters, because it's possible those buffers might not exist yet in
# the current optimizer instance. In our case, as long as the current FP16_Optimizer has been
# constructed in the same way as the one whose state_dict we are loading, the same master params
# are guaranteed to exist, so we can just copy_() from the saved master params.
for current_group, saved_group in zip(self.fp32_from_fp16_groups, state_dict['fp32_from_fp16']):
for current, saved in zip(current_group, saved_group):
current.data.copy_(saved.data)
def step(self, closure=None): # could add clip option.
"""
If no closure is supplied, :attr:`step` should be called after
``fp16_optimizer_obj.backward(loss)``.
:attr:`step` updates the fp32 master copy of parameters using the optimizer supplied to
:class:`FP16_Optimizer`'s constructor, then copies the updated fp32 params into the fp16 params
originally referenced by :class:`FP16_Optimizer`'s constructor, so the user may immediately run
another forward pass using their model.
If a closure is supplied, :attr:`step` may be called without a prior call to
:attr:`backward(loss)`.
This control flow is identical to `ordinary Pytorch optimizer use`_ with closures.
However, the user should take care that any ``loss.backward()`` call within the closure
has been replaced by ``fp16_optimizer_obj.backward(loss)``.
Args:
closure (optional): Closure that will be supplied to the underlying optimizer originally passed to :class:`FP16_Optimizer`'s constructor. closure should call :attr:`zero_grad()` on the :class:`FP16_Optimizer` object, compute the loss, call :attr:`backward(loss)`, and return the loss.
Example with closure::
# optimizer is assumed to be an FP16_Optimizer object, previously constructed from an
# existing pytorch optimizer.
for input, target in dataset:
def closure():
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
# loss.backward() becomes:
optimizer.backward(loss)
return loss
optimizer.step(closure)
.. warning::
Currently, calling :attr:`step` with a closure is not compatible with dynamic loss scaling.
.. _`ordinary Pytorch optimizer use`:
http://pytorch.org/docs/master/optim.html#optimizer-step-closure
"""
scale = self.loss_scaler.loss_scale
self._update_scale(self.overflow)
if self.overflow:
self.maybe_print("OVERFLOW! Skipping step. Attempted loss scale: {}, reducing to {}"
.format(scale, self.loss_scale))
return
if closure is not None:
retval = self._step_with_closure(closure)
else:
retval = self.optimizer.step()
self._master_params_to_model_params()
return retval
def _step_with_closure(self, closure):
def wrapped_closure():
# helpful for debugging
# print("Calling wrapped_closure, first_closure_call_this_step = {}"
# .format(self.first_closure_call_this_step))
if self.first_closure_call_this_step:
# We expect that the fp16 params are initially fresh on entering self.step(),
# so _master_params_to_model_params() is unnecessary the first time wrapped_closure()
# is called within self.optimizer.step().
self.first_closure_call_this_step = False
else:
# If self.optimizer.step() internally calls wrapped_closure more than once,
# it may update the fp32 params after each call. However, self.optimizer
# doesn't know about the fp16 params at all. If the fp32 params get updated,
# we can't rely on self.optimizer to refresh the fp16 params. We need
# to handle that manually:
self._master_params_to_model_params()
# Our API expects the user to give us ownership of the backward() call by
# replacing all calls to loss.backward() with optimizer.backward(loss).
# This requirement holds whether or not the call to backward() is made within a closure.
# If the user is properly calling optimizer.backward(loss) within "closure,"
# calling closure() here will give the fp32 master params fresh gradients
# for the optimizer to play with, so all wrapped_closure needs to do is call
# closure() and return the loss.
temp_loss = closure()
while(self.overflow):
scale = self.loss_scaler.loss_scale
self._update_scale(self.overflow)
self.maybe_print("OVERFLOW within closure! Skipping step. Attempted loss scale: {}, "
"reducing to {}".format(scale, self.loss_scale))
temp_loss = closure()
return temp_loss
retval = self.optimizer.step(wrapped_closure)
self.first_closure_call_this_step = True
return retval
def backward(self, loss, update_master_grads=True, retain_graph=False):
"""
:attr:`backward` performs the following conceptual steps:
1. fp32_loss = loss.float() (see first Note below)
2. scaled_loss = fp32_loss*loss_scale
3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's leaves (which may be fp16, fp32, or a mixture, depending how your model was defined).
4. fp16 grads are then copied to the master params' ``.grad`` attributes (see second Note), which are guaranteed to be fp32.
5. Finally, master grads are divided by loss_scale.
In this way, after :attr:`backward`, the master params have fresh gradients,
and :attr:`step` may be called.
.. note::
:attr:`backward` internally converts the loss to fp32 before applying the loss scale.
This provides some additional safety against overflow if the user has supplied an
fp16 loss value.
However, for maximum overflow safety, the user should
compute the loss criterion (MSE, cross entropy, etc) in fp32 before supplying it to
:attr:`backward`.
.. warning::
The gradients found in a model's leaves after the call to
:attr:`backward` should not be regarded as valid in general,
because it's possible
they have been scaled (and in the case of dynamic loss scaling,
the scale factor may change over time).
If the user wants to inspect gradients after a call to :attr:`backward`,
only the master gradients should be regarded as valid. These can be retrieved via
:attr:`inspect_master_grad_data()`.
Args:
loss: The loss output by the user's model. loss may be either float or half (but see first Note above).
update_master_grads (bool, optional, default=True): Option to copy fp16 grads to fp32 grads on this call. By setting this to False, the user can delay the copy, which is useful to eliminate redundant fp16->fp32 grad copies if :attr:`backward` is being called on multiple losses in one iteration. If set to False, the user becomes responsible for calling :attr:`update_master_grads` before calling :attr:`step`.
retain_graph (bool, optional, default=False): Forwards the usual ``retain_graph=True`` option to the internal call to ``loss.backward``. If ``retain_graph`` is being used to accumulate gradient values from multiple backward passes before calling ``optimizer.step``, passing ``update_master_grads=False`` is also recommended (see Example below).
Example::
# Ordinary operation:
optimizer.backward(loss)
# Naive operation with multiple losses (technically valid, but less efficient):
# fp32 grads will be correct after the second call, but
# the first call incurs an unnecessary fp16->fp32 grad copy.
optimizer.backward(loss1)
optimizer.backward(loss2)
# More efficient way to handle multiple losses:
# The fp16->fp32 grad copy is delayed until fp16 grads from all
# losses have been accumulated.
optimizer.backward(loss1, update_master_grads=False)
optimizer.backward(loss2, update_master_grads=False)
optimizer.update_master_grads()
"""
# To consider: try multiple backward passes using retain_grad=True to find
# a loss scale that works. After you find a loss scale that works, do a final dummy
# backward pass with retain_graph=False to tear down the graph. Doing this would avoid
# discarding the iteration, but probably wouldn't improve overall efficiency.
self.loss_scaler.backward(loss.float(), retain_graph=retain_graph)
if update_master_grads:
self.update_master_grads()
def update_master_grads(self):
"""
Copy the ``.grad`` attribute from stored references to fp16 parameters to
the ``.grad`` attribute of the fp32 master parameters that are directly
updated by the optimizer. :attr:`update_master_grads` only needs to be called if
``fp16_optimizer_obj.backward`` was called with ``update_master_grads=False``.
"""
if self.dynamic_loss_scale:
self._check_overflow()
if self.overflow: return
self._model_grads_to_master_grads()
self._downscale_master()
def inspect_master_grad_data(self):
"""
When running with :class:`FP16_Optimizer`,
``.grad`` attributes of a model's fp16 leaves should not be
regarded as truthful, because they might be scaled.
After a call to :attr:`fp16_optimizer_obj.backward(loss)`, if no overflow was encountered,
the fp32 master params' ``.grad``
attributes will contain valid gradients properly divided by the loss scale. However,
because :class:`FP16_Optimizer` flattens some parameters, accessing them may be
nonintuitive. :attr:`inspect_master_grad_data`
allows those gradients to be viewed with shapes corresponding to their associated model leaves.
Returns:
List of lists (one list for each parameter group). The list for each parameter group
is a list of the ``.grad.data`` attributes of the fp32 master params belonging to that group.
"""
if self.overflow:
print("Warning: calling FP16_Optimizer.inspect_master_grad_data while in an overflow state. "
"Gradients are currently invalid (may be inf, nan, or stale). Returning None.")
return None
else:
# The optimizer owns only references to master params.
master_grads_data = []
for param_group in self.optimizer.param_groups:
master_grads_this_group = []
for param in param_group['params']:
if param.grad is not None:
master_grads_this_group.append(param.grad.data)
else:
master_grads_this_group.append(None)
master_grads_data.append(master_grads_this_group)
return master_grads_data
# Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale"
def _get_loss_scale(self):
return self.loss_scaler.loss_scale
def _set_loss_scale(self, value):
self.loss_scaler.cur_scale = value
loss_scale = property(_get_loss_scale, _set_loss_scale)
# Promote state so it can be retrieved or set via "fp16_optimizer_instance.state"
def _get_state(self):
return self.optimizer.state
def _set_state(self, value):
self.optimizer.state = value
state = property(_get_state, _set_state)
# Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups"
# (for example, to adjust the learning rate)
def _get_param_groups(self):
return self.optimizer.param_groups
def _set_param_groups(self, value):
self.optimizer.param_groups = value
param_groups = property(_get_param_groups, _set_param_groups)
| 31,108 | 49.338188 | 437 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/LM/Flan-T5/mpu/mappings.py | # coding=utf-8
import torch
from .initialize import get_model_parallel_group
from .utils import split_tensor_along_last_dim
def _reduce(input_):
"""All-reduce the the input tensor across model parallel group."""
group = get_model_parallel_group()
# Bypass the function if we are using only 1 GPU.
if torch.distributed.get_world_size(group=group) == 1:
return input_
# All-reduce.
torch.distributed.all_reduce(input_, group=group)
return input_
def _split(input_):
"""Split the tensor along its last dimension and keep the
corresponding slice."""
group = get_model_parallel_group()
# Bypass the function if we are using only 1 GPU.
if torch.distributed.get_world_size(group=group) == 1:
return input_
# Split along last dimension.
world_size = torch.distributed.get_world_size(group=group)
input_list = split_tensor_along_last_dim(input_, world_size)
# Note: torch.split does not create contiguous tensors by default.
rank = torch.distributed.get_rank(group=group)
output = input_list[rank].contiguous()
return output
def _gather(input_):
"""Gather tensors and concatinate along the last dimension."""
group = get_model_parallel_group()
# Bypass the function if we are using only 1 GPU.
if torch.distributed.get_world_size(group=group) == 1:
return input_
# Size and dimension.
last_dim = input_.dim() - 1
rank = torch.distributed.get_rank(group=group)
world_size = torch.distributed.get_world_size(group=group)
tensor_list = [torch.empty_like(input_) for _ in range(world_size)]
tensor_list[rank] = input_
torch.distributed.all_gather(tensor_list, input_, group=group)
# Note: torch.cat already creates a contiguous tensor.
output = torch.cat(tensor_list, dim=last_dim).contiguous()
return output
class _CopyToModelParallelRegion(torch.autograd.Function):
"""Pass the input to the model parallel region."""
@staticmethod
def forward(ctx, input_):
return input_
@staticmethod
def backward(ctx, grad_output):
return _reduce(grad_output)
class _ReduceFromModelParallelRegion(torch.autograd.Function):
"""All-redcue the input from the model parallel region."""
@staticmethod
def forward(ctx, input_):
return _reduce(input_)
@staticmethod
def backward(ctx, grad_output):
return grad_output
class _ScatterToModelParallelRegion(torch.autograd.Function):
"""Split the input and keep only the corresponding chuck to the rank."""
@staticmethod
def forward(ctx, input_):
return _split(input_)
@staticmethod
def backward(ctx, grad_output):
return _gather(grad_output)
class _GatherFromModelParallelRegion(torch.autograd.Function):
"""Gather the input from model parallel region and concatinate."""
@staticmethod
def forward(ctx, input_):
return _gather(input_)
@staticmethod
def backward(ctx, grad_output):
return _split(grad_output)
# -----------------
# Helper functions.
# -----------------
def copy_to_model_parallel_region(input_):
return _CopyToModelParallelRegion.apply(input_)
def reduce_from_model_parallel_region(input_):
return _ReduceFromModelParallelRegion.apply(input_)
def scatter_to_model_parallel_region(input_):
return _ScatterToModelParallelRegion.apply(input_)
def gather_from_model_parallel_region(input_):
return _GatherFromModelParallelRegion.apply(input_)
| 3,527 | 26.138462 | 76 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/LM/Flan-T5/mpu/initialize.py | # coding=utf-8
"""Model and data parallel groups."""
import torch
from .utils import ensure_divisibility
# Model parallel group that the current rank belongs to.
_MODEL_PARALLEL_GROUP = None
# Data parallel group that the current rank belongs to.
_DATA_PARALLEL_GROUP = None
def initialize_model_parallel(model_parallel_size_):
"""
Initialize model data parallel groups.
Arguments:
model_parallel_size: number of GPUs used to parallelize model.
Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we
use 2 GPUs to parallelize the model. The present function will
create 4 model parallel groups and 2 data parallel grous as:
4 model parallel groups:
[g0, g1], [g2, g3], [g4, g5], [g6, g7]
2 data parallel groups:
[g0, g2, g4, g6], [g1, g3, g5, g7]
Note that for efficiency, the caller should make sure adjacent ranks
are on the same DGX box. For example if we are using 2 DGX-1 boxes
with a total of 16 GPUs, rank 0 to 7 belong to the first box and
ranks 8 to 15 belong to the second box.
"""
if torch.distributed.get_rank() == 0:
print('> initializing model parallel with size {}'.format(
model_parallel_size_))
# Get world size and rank. Ensure some consistencies.
assert torch.distributed.is_initialized()
world_size = torch.distributed.get_world_size()
model_parallel_size = min(model_parallel_size_, world_size)
ensure_divisibility(world_size, model_parallel_size)
rank = torch.distributed.get_rank()
# Build the data parallel groups.
global _DATA_PARALLEL_GROUP
assert _DATA_PARALLEL_GROUP is None, \
'data parallel group is already initialized'
for i in range(model_parallel_size):
ranks = range(i, world_size, model_parallel_size)
group = torch.distributed.new_group(ranks)
if i == (rank % model_parallel_size):
_DATA_PARALLEL_GROUP = group
# Build the model parallel groups.
global _MODEL_PARALLEL_GROUP
assert _MODEL_PARALLEL_GROUP is None, \
'model parallel group is already initialized'
for i in range(world_size // model_parallel_size):
ranks = range(i * model_parallel_size,
(i + 1) * model_parallel_size)
group = torch.distributed.new_group(ranks)
if i == (rank // model_parallel_size):
_MODEL_PARALLEL_GROUP = group
def model_parallel_is_initialized():
"""Check if model and data parallel groups are initialized."""
if _MODEL_PARALLEL_GROUP is None or _DATA_PARALLEL_GROUP is None:
return False
return True
def get_model_parallel_group():
"""Get the model parallel group the caller rank belongs to."""
assert _MODEL_PARALLEL_GROUP is not None, \
'model parallel group is not initialized'
return _MODEL_PARALLEL_GROUP
def get_data_parallel_group():
"""Get the data parallel group the caller rank belongs to."""
assert _DATA_PARALLEL_GROUP is not None, \
'data parallel group is not initialized'
return _DATA_PARALLEL_GROUP
def get_model_parallel_world_size():
"""Return world size for the model parallel group."""
return torch.distributed.get_world_size(group=get_model_parallel_group())
def get_model_parallel_rank():
"""Return my rank for the model parallel group."""
return torch.distributed.get_rank(group=get_model_parallel_group())
def get_model_parallel_src_rank():
"""Calculate the global rank corresponding to a local rank zeor
in the model parallel group."""
global_rank = torch.distributed.get_rank()
local_world_size = get_model_parallel_world_size()
return (global_rank // local_world_size) * local_world_size
def get_data_parallel_world_size():
"""Return world size for the data parallel group."""
return torch.distributed.get_world_size(group=get_data_parallel_group())
def get_data_parallel_rank():
"""Return my rank for the data parallel group."""
return torch.distributed.get_rank(group=get_data_parallel_group())
def destroy_model_parallel():
"""Set the groups to none."""
global _MODEL_PARALLEL_GROUP
_MODEL_PARALLEL_GROUP = None
global _DATA_PARALLEL_GROUP
_DATA_PARALLEL_GROUP = None
| 4,274 | 33.475806 | 77 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/LM/Flan-T5/mpu/cross_entropy.py | # coding=utf-8
import torch
from .initialize import get_model_parallel_group
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_world_size
from .utils import VocabUtility
class _VocabParallelCrossEntropy(torch.autograd.Function):
@staticmethod
def forward(ctx, vocab_parallel_logits, target):
# Copy so the input remains unchanged.
logits = vocab_parallel_logits.clone()
# Maximum value along vocab dimension across all GPUs.
logits_max = torch.max(logits, dim=-1)[0]
torch.distributed.all_reduce(logits_max,
op=torch.distributed.ReduceOp.MAX,
group=get_model_parallel_group())
# Subtract the maximum value.
logits.sub_(logits_max.unsqueeze(dim=-1))
# Sum of exponential of logits along vocab dimension across all GPUs.
exp_logits = logits.exp()
sum_exp_logits = exp_logits.sum(dim=-1)
torch.distributed.all_reduce(sum_exp_logits,
op=torch.distributed.ReduceOp.SUM,
group=get_model_parallel_group())
# Get the partition's vocab indecies
get_vocab_range = VocabUtility.vocab_range_from_per_partition_vocab_size
partition_vocab_size = vocab_parallel_logits.size()[-1]
rank = get_model_parallel_rank()
world_size = get_model_parallel_world_size()
vocab_start_index, vocab_end_index = get_vocab_range(
partition_vocab_size, rank, world_size)
# Create a mask of valid vocab ids (1 means it needs to be masked).
target_mask = (target < vocab_start_index) | (target >= vocab_end_index)
masked_target = target.clone() - vocab_start_index
masked_target[target_mask] = 0
# Get predicted-logits = logits[target].
# For Simplicity, we convert logits to a 2-D tensor with size
# [*, partition-vocab-size] and target to a 1-D tensor of size [*].
logits_2d = logits.view(-1, partition_vocab_size)
masked_target_1d = masked_target.view(-1)
arange_1d = torch.arange(start=0, end=logits_2d.size()[0],
device=logits_2d.device)
predicted_logits_1d = logits_2d[arange_1d, masked_target_1d]
predicted_logits = predicted_logits_1d.view_as(target)
predicted_logits[target_mask] = 0.0
# All reduce is needed to get the chunks from other GPUs.
torch.distributed.all_reduce(predicted_logits,
op=torch.distributed.ReduceOp.SUM,
group=get_model_parallel_group())
# Loss = log(sum(exp(logits))) - predicted-logit.
loss = torch.log(sum_exp_logits) - predicted_logits
# Store softmax, target-mask and masked-target for backward pass.
exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1))
ctx.save_for_backward(exp_logits, target_mask, masked_target_1d)
return loss
@staticmethod
def backward(ctx, grad_output):
# Retreive tensors from the forward path.
softmax, target_mask, masked_target_1d = ctx.saved_tensors
# All the inputs have softmax as thier gradient.
grad_input = softmax
# For simplicity, work with the 2D gradient.
partition_vocab_size = softmax.size()[-1]
grad_2d = grad_input.view(-1, partition_vocab_size)
# Add the gradient from matching classes.
arange_1d = torch.arange(start=0, end=grad_2d.size()[0],
device=grad_2d.device)
grad_2d[arange_1d, masked_target_1d] -= (
1.0 - target_mask.view(-1).float())
# Finally elementwise multiplication with the output gradients.
grad_input.mul_(grad_output.unsqueeze(dim=-1))
return grad_input, None
def vocab_parallel_cross_entropy(vocab_parallel_logits, target):
"""Helper function for the cross entropy."""
return _VocabParallelCrossEntropy.apply(vocab_parallel_logits, target)
class _ParallelKLLoss(torch.autograd.Function):
@staticmethod
def forward(cls, logits: torch.Tensor, targets: torch.Tensor):
# Maximum value along vocab dimension across all GPUs.
logits_max = torch.max(logits, dim=-1)[0]
torch.distributed.all_reduce(logits_max,
op=torch.distributed.ReduceOp.MAX,
group=get_model_parallel_group())
# Subtract the maximum value.
logits.sub_(logits_max.unsqueeze(dim=-1))
# Sum of exponential of logits along vocab dimension across all GPUs.
exp_logits = logits.exp()
sum_exp_logits = exp_logits.sum(dim=-1)
torch.distributed.all_reduce(sum_exp_logits,
op=torch.distributed.ReduceOp.SUM,
group=get_model_parallel_group())
targets_max = torch.max(targets, dim=-1)[0]
torch.distributed.all_reduce(targets_max,
op=torch.distributed.ReduceOp.MAX,
group=get_model_parallel_group())
# Subtract the maximum value.
targets.sub_(targets_max.unsqueeze(dim=-1))
# Sum of exponential of logits along vocab dimension across all GPUs.
exp_targets = targets.exp()
sum_exp_targets = exp_targets.sum(dim=-1)
torch.distributed.all_reduce(sum_exp_targets,
op=torch.distributed.ReduceOp.SUM,
group=get_model_parallel_group())
# targets_softmax: [b, s, v_p]
targets_softmax = torch.div(exp_targets, sum_exp_targets.unsqueeze(-1))
# sum_targets_softmax_logits: [b, s]
sum_targets_softmax_logits = torch.matmul(
targets_softmax.unsqueeze(-2), logits.unsqueeze(-1)).squeeze(-1).squeeze(-1)
torch.distributed.all_reduce(sum_targets_softmax_logits,
op=torch.distributed.ReduceOp.SUM,
group=get_model_parallel_group())
log_targets_softmax = torch.log(targets_softmax)
sum_log_targets_softmax = torch.matmul(
targets_softmax.unsqueeze(-2), log_targets_softmax.unsqueeze(-1)).squeeze(-1).squeeze(-1)
torch.distributed.all_reduce(sum_log_targets_softmax,
op=torch.distributed.ReduceOp.SUM,
group=get_model_parallel_group())
loss = torch.log(sum_exp_logits) - sum_targets_softmax_logits + sum_log_targets_softmax
logits_softmax = torch.div(exp_logits, sum_exp_logits.unsqueeze(-1))
cls.save_for_backward(logits_softmax, targets_softmax)
return loss
@staticmethod
def backward(cls, grad_output: torch.Tensor):
logits_softmax, targets_softmax = cls.saved_tensors
grad_input = (logits_softmax - targets_softmax) * grad_output.unsqueeze(-1)
return grad_input, None
def parallel_KL_loss(logits, targets):
return _ParallelKLLoss.apply(logits, targets)
class _ParallelSoftCrossEntropyLoss(torch.autograd.Function):
@staticmethod
def forward(cls, logits: torch.Tensor, targets: torch.Tensor):
# Maximum value along vocab dimension across all GPUs.
logits_max = torch.max(logits, dim=-1)[0]
torch.distributed.all_reduce(logits_max,
op=torch.distributed.ReduceOp.MAX,
group=get_model_parallel_group())
# Subtract the maximum value.
logits.sub_(logits_max.unsqueeze(dim=-1))
# Sum of exponential of logits along vocab dimension across all GPUs.
exp_logits = logits.exp()
sum_exp_logits = exp_logits.sum(dim=-1)
torch.distributed.all_reduce(sum_exp_logits,
op=torch.distributed.ReduceOp.SUM,
group=get_model_parallel_group())
# sum_targets_softmax_logits: [b, s]
sum_targets_softmax_logits = torch.matmul(
targets.unsqueeze(-2), logits.unsqueeze(-1)).squeeze(-1).squeeze(-1)
torch.distributed.all_reduce(sum_targets_softmax_logits,
op=torch.distributed.ReduceOp.SUM,
group=get_model_parallel_group())
loss = torch.log(sum_exp_logits) - sum_targets_softmax_logits
logits_softmax = torch.div(exp_logits, sum_exp_logits.unsqueeze(-1))
cls.save_for_backward(logits_softmax, targets)
return loss
@staticmethod
def backward(cls, grad_output: torch.Tensor):
logits_softmax, targets = cls.saved_tensors
grad_input = (logits_softmax - targets) * grad_output.unsqueeze(-1)
return grad_input, None
def parallel_soft_cross_entropy_loss(logits, targets):
return _ParallelSoftCrossEntropyLoss.apply(logits, targets) | 9,078 | 41.425234 | 101 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/LM/Flan-T5/mpu/utils.py | # coding=utf-8
import torch
def ensure_divisibility(numerator, denominator):
"""Ensure that numerator is divisible by the denominator."""
assert numerator % denominator == 0, '{} is not divisible by {}'.format(
numerator, denominator)
def divide(numerator, denominator):
"""Ensure that numerator is divisible by the denominator and return
the division value."""
ensure_divisibility(numerator, denominator)
return numerator // denominator
def split_tensor_along_last_dim(tensor, num_partitions,
contiguous_split_chunks=False):
"""Split a tensor along its last dimension.
Arguments:
tensor: input tensor.
num_partitions: number of partitions to split the tensor
contiguous_split_chunks: If True, make each chunk contiguous
in memory.
"""
# Get the size and dimension.
last_dim = tensor.dim() - 1
last_dim_size = divide(tensor.size()[last_dim], num_partitions)
# Split.
tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
# Note: torch.split does not create contiguous tensors by default.
if contiguous_split_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list
class VocabUtility:
"""Split the vocabulary into `world_size` chunks amd return the
first and last index of the vocabulary belonging to the `rank`
partition: Note that indecies in [fist, last)"""
@staticmethod
def vocab_range_from_per_partition_vocab_size(per_partition_vocab_size,
rank, world_size):
index_f = rank * per_partition_vocab_size
index_l = index_f + per_partition_vocab_size
return index_f, index_l
@staticmethod
def vocab_range_from_global_vocab_size(global_vocab_size, rank, world_size):
per_partition_vocab_size = divide(global_vocab_size, world_size)
return VocabUtility.vocab_range_from_per_partition_vocab_size(
per_partition_vocab_size, rank, world_size)
| 2,102 | 34.644068 | 80 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/LM/Flan-T5/mpu/data.py | # coding=utf-8
import torch
from .initialize import get_model_parallel_group
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_src_rank
_MAX_DATA_DIM = 4
def _check_data_types(keys, data, target_dtype):
"""Check that all the keys have the same target data type."""
for key in keys:
assert data[key].dtype == target_dtype, '{} has data type {} which '\
'is different than {}'.format(key, data[key].dtype, target_dtype)
def _build_key_size_numel_dictionaries(keys, data):
"""Build the size on rank 0 and broadcast."""
max_dim = _MAX_DATA_DIM
sizes = [0 for _ in range(max_dim) for _ in keys]
# Pack the sizes on rank zero.
if get_model_parallel_rank() == 0:
offset = 0
for key in keys:
assert data[key].dim() < max_dim, 'you should increase MAX_DATA_DIM'
size = data[key].size()
for i, s in enumerate(size):
sizes[i + offset] = s
offset += max_dim
# Move to GPU and broadcast.
sizes_cuda = torch.cuda.LongTensor(sizes)
torch.distributed.broadcast(sizes_cuda, get_model_parallel_src_rank(),
group=get_model_parallel_group())
# Move back to cpu and unpack.
sizes_cpu = sizes_cuda.cpu()
key_size = {}
key_numel = {}
total_numel = 0
offset = 0
for key in keys:
i = 0
size = []
numel = 1
while sizes_cpu[offset + i] > 0:
this_size = sizes_cpu[offset + i]
size.append(this_size)
numel *= this_size
i += 1
key_size[key] = size
key_numel[key] = numel
total_numel += numel
offset += max_dim
return key_size, key_numel, total_numel
def broadcast_data(keys, data, datatype):
"""Broadcast data from rank zero of each model parallel group to the
members of the same model parallel group.
Arguments:
keys: list of keys in the data disctionary to be broadcasted
data: data dictionary of string keys and cpu tensor values.
datatype: torch data type of all tensors in data associated
with keys.
"""
# Build (key, size) and (key, number of elements) dictionaries along
# with the total number of elements on all ranks.
key_size, key_numel, total_numel = _build_key_size_numel_dictionaries(keys,
data)
# Pack on rank zero.
if get_model_parallel_rank() == 0:
# Check that all keys have the same data type.
_check_data_types(keys, data, datatype)
# Flatten the data associated with the keys
flatten_data = torch.cat(
[data[key].contiguous().view(-1) for key in keys], dim=0).cuda()
else:
flatten_data = torch.empty(total_numel,
device=torch.cuda.current_device(),
dtype=datatype)
# Boradcast
torch.distributed.broadcast(flatten_data, get_model_parallel_src_rank(),
group=get_model_parallel_group())
# Unpack
output = {}
offset = 0
for key in keys:
size = key_size[key]
numel = key_numel[key]
output[key] = flatten_data.narrow(0, offset, numel).view(size)
offset += numel
return output
| 3,409 | 31.47619 | 80 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/LM/Flan-T5/mpu/grads.py | # coding=utf-8
# Parts of the code here are adapted from PyTorch
# repo: https://github.com/pytorch/pytorch
import torch
from torch._six import inf
from .initialize import get_model_parallel_group
from .initialize import get_model_parallel_rank
def clip_grad_norm(parameters, max_norm, norm_type=2):
"""Clips gradient norm of an iterable of parameters.
This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
added functionality to handle model parallel parameters. Note that
the gradients are modified in place.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
max_norm = float(max_norm)
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(p.grad.data.abs().max() for p in parameters)
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
# Take max across all GPUs.
torch.distributed.all_reduce(total_norm_cuda,
op=torch.distributed.ReduceOp.MAX,
group=get_model_parallel_group())
total_norm = total_norm_cuda[0].item()
else:
total_norm = 0
for p in parameters:
if p.model_parallel or (get_model_parallel_rank() == 0):
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm.item() ** norm_type
# Sum across all model parallel GPUs.
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
torch.distributed.all_reduce(total_norm_cuda,
op=torch.distributed.ReduceOp.SUM,
group=get_model_parallel_group())
total_norm = total_norm_cuda[0].item() ** (1. / norm_type)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for p in parameters:
p.grad.data.mul_(clip_coef)
return total_norm
| 2,406 | 37.206349 | 79 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/LM/Flan-T5/mpu/layers.py | # coding=utf-8
# Parts of the code here are adapted from PyTorch
# repo: https://github.com/pytorch/pytorch
import math
import torch
import torch.nn.functional as F
import torch.nn.init as init
from torch.nn.parameter import Parameter
from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_world_size
from .mappings import copy_to_model_parallel_region
from .mappings import gather_from_model_parallel_region
from .mappings import reduce_from_model_parallel_region
from .mappings import scatter_to_model_parallel_region
from .random import get_cuda_rng_tracker
from .utils import divide
from .utils import split_tensor_along_last_dim
from .utils import VocabUtility
def _initialize_affine_weight(weight, output_size, input_size,
per_partition_size, partition_dim, init_method,
stride=1, return_master_weight=False):
"""Initialize affine weight for model parallel.
Build the master weight on all processes and scatter
the relevant chunk."""
# If we only use 1 process for model parallelism, bypass scatter.
world_size = get_model_parallel_world_size()
if world_size == 1:
init_method(weight)
if return_master_weight:
return weight
return None
# Initialize master weight
master_weight = torch.empty(output_size, input_size,
dtype=weight.dtype,
requires_grad=False)
init_method(master_weight)
# Split and copy
per_partition_per_stride_size = divide(per_partition_size, stride)
weight_list = torch.split(master_weight, per_partition_per_stride_size,
dim=partition_dim)
rank = get_model_parallel_rank()
my_weight_list = weight_list[rank::world_size]
with torch.no_grad():
torch.cat(my_weight_list, dim=partition_dim, out=weight)
if return_master_weight:
return master_weight
return None
class VocabParallelEmbedding(torch.nn.Module):
"""Embedding parallelized in the vocabulary dimension.
This is mainly adapted from torch.nn.Embedding and all the default
values are kept.
Arguments:
num_embeddings: vocabulary size.
embedding_dim: size of hidden state.
init_method: method to initialize weights.
"""
def __init__(self, num_embeddings, embedding_dim,
init_method=init.xavier_normal_):
super(VocabParallelEmbedding, self).__init__()
# Keep the input dimensions.
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
# Set the detauls for compatibility.
self.padding_idx = None
self.max_norm = None
self.norm_type = 2.
self.scale_grad_by_freq = False
self.sparse = False
self._weight = None
# Divide the weight matrix along the vocaburaly dimension.
self.vocab_start_index, self.vocab_end_index = \
VocabUtility.vocab_range_from_global_vocab_size(
self.num_embeddings, get_model_parallel_rank(),
get_model_parallel_world_size())
self.num_embeddings_per_partition = self.vocab_end_index - \
self.vocab_start_index
# Allocate weights.
self.weight = Parameter(torch.Tensor(self.num_embeddings_per_partition,
self.embedding_dim))
self.weight.model_parallel = True
# And initialize.
_initialize_affine_weight(
self.weight, self.num_embeddings, self.embedding_dim,
self.num_embeddings_per_partition, 0, init_method)
def forward(self, input_):
# Build the mask.
input_mask = (input_ < self.vocab_start_index) | \
(input_ >= self.vocab_end_index)
# Mask the input.
masked_input = input_.clone() - self.vocab_start_index
masked_input[input_mask] = 0
# Get the embeddings.
output_parallel = F.embedding(masked_input, self.weight,
self.padding_idx, self.max_norm,
self.norm_type, self.scale_grad_by_freq,
self.sparse)
# Mask the output embedding.
output_parallel[input_mask, :] = 0.0
# Reduce across all the model parallel GPUs.
output = reduce_from_model_parallel_region(output_parallel)
return output
class ParallelEmbedding(torch.nn.Module):
"""Embedding parallelized in the embedding dimension.
This is mainly adapted from torch.nn.Embedding and all the default
values are kept.
Arguments:
num_embeddings: vocabulary size.
embedding_dim: size of hidden state.
init_method: method to initialize weights.
"""
def __init__(self, num_embeddings, embedding_dim,
init_method=init.xavier_normal_,
keep_master_weight_for_test=False):
super(ParallelEmbedding, self).__init__()
# Keep the input dimensions.
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
# Set some detauls for compatibility.
self.padding_idx = None
self.max_norm = None
self.norm_type = 2.
self.scale_grad_by_freq = False
self.sparse = False
self._weight = None
# Divide the weight matrix along the embedding dimension.
world_size = get_model_parallel_world_size()
self.embedding_dim_per_partition = divide(self.embedding_dim,
world_size)
# Allocate weights.
self.weight = Parameter(torch.Tensor(self.num_embeddings,
self.embedding_dim_per_partition))
self.weight.model_parallel = True
# And initialize. split the weights to different model parallel devices
_initialize_affine_weight(
self.weight, self.num_embeddings, self.embedding_dim,
self.embedding_dim_per_partition, 1, init_method,
stride=1, return_master_weight=False)
def forward(self, input_):
input_parallel = copy_to_model_parallel_region(input_)
output_parallel = F.embedding(input_parallel, self.weight,
self.padding_idx, self.max_norm,
self.norm_type, self.scale_grad_by_freq,
self.sparse)
output = gather_from_model_parallel_region(output_parallel)
return output
class ColumnParallelLinear(torch.nn.Module):
"""Linear layer with column parallelism.
NOTE: This function will NOT do all-reduce unless gather_output is True
The linear layer is defined as Y = XA + b. A is parallelized along
its second dimension as A = [A_1, ..., A_p].
Arguments:
input_size: first dimension of matrix A.
output_size: second dimension of matrix A.
bias: If true, add bias
gather_output: If true, call all-gether on output and make Y avaiable
to all GPUs, otherwise, every GPU will have its output
which is Y_i = XA_i
init_method: method to initialize weights. Note that bias is always set
to zero.
stride: For the strided linear layers.
keep_master_weight_for_test: This was added for testing and should be
set to False. It returns the master weights
used for initialization.
"""
def __init__(self, input_size, output_size, bias=True, gather_output=True,
init_method=init.xavier_normal_, stride=1,
keep_master_weight_for_test=False):
super(ColumnParallelLinear, self).__init__()
# Keep input parameters
self.input_size = input_size
self.output_size = output_size
self.gather_output = gather_output
# Divide the weight matrix along the last dimension.
world_size = get_model_parallel_world_size()
self.output_size_per_partition = divide(output_size, world_size)
# Parameters.
# Note: torch.nn.functional.linear performs XA^T + b and as a result
# we allocate the transpose.
self.weight = Parameter(torch.Tensor(self.output_size_per_partition,
self.input_size))
self.weight.model_parallel = True
if bias:
self.bias = Parameter(torch.Tensor(self.output_size_per_partition))
self.bias.model_parallel = True
# Always initialize bias to zero.
with torch.no_grad():
self.bias.zero_()
else:
self.register_parameter('bias', None)
# Initialize weight.
self.master_weight = _initialize_affine_weight(
self.weight, self.output_size, self.input_size,
self.output_size_per_partition, 0, init_method,
stride=stride, return_master_weight=keep_master_weight_for_test)
def forward(self, input_):
# Set up backprop all-reduce.
input_parallel = copy_to_model_parallel_region(input_)
# Matrix multiply.
output_parallel = F.linear(input_parallel, self.weight, self.bias)
if self.gather_output:
# All-gather across the partitions.
output = gather_from_model_parallel_region(output_parallel)
else:
output = output_parallel
return output
class RowParallelLinear(torch.nn.Module):
"""Linear layer with row parallelism.
NOTE: This function will do all-reduce
The linear layer is defined as Y = XA + b. A is parallelized along
its first dimension and X along its second dimension as:
- -
| A_1 |
| . |
A = | . | X = [X_1, ..., X_p]
| . |
| A_p |
- -
Arguments:
input_size: first dimension of matrix A.
output_size: second dimension of matrix A.
bias: If true, add bias. Note that bias is not parallelized.
input_is_parallel: If true, we assume that the input is already
split across the GPUs and we do not split
again.
init_method: method to initialize weights. Note that bias is always set
to zero.
stride: For the strided linear layers.
keep_master_weight_for_test: This was added for testing and should be
set to False. It returns the master weights
used for initialization.
"""
def __init__(self, input_size, output_size, bias=True,
input_is_parallel=False,
init_method=init.xavier_normal_, stride=1,
keep_master_weight_for_test=False):
super(RowParallelLinear, self).__init__()
# Keep input parameters
self.input_size = input_size
self.output_size = output_size
self.input_is_parallel = input_is_parallel
# Divide the weight matrix along the last dimension.
world_size = get_model_parallel_world_size()
self.input_size_per_partition = divide(input_size, world_size)
# Parameters.
# Note: torch.nn.functional.linear performs XA^T + b and as a result
# we allocate the transpose.
self.weight = Parameter(torch.Tensor(self.output_size,
self.input_size_per_partition))
self.weight.model_parallel = True
if bias:
self.bias = Parameter(torch.Tensor(self.output_size))
# Always initialize bias to zero.
with torch.no_grad():
self.bias.zero_()
else:
self.register_parameter('bias', None)
# Initialize weight.
self.master_weight = _initialize_affine_weight(
self.weight, self.output_size, self.input_size,
self.input_size_per_partition, 1, init_method,
stride=stride, return_master_weight=keep_master_weight_for_test)
def forward(self, input_):
# Set up backprop all-reduce.
if self.input_is_parallel:
input_parallel = input_
else:
input_parallel = scatter_to_model_parallel_region(input_)
# Matrix multiply.
output_parallel = F.linear(input_parallel, self.weight)
# All-reduce across all the partitions.
output_ = reduce_from_model_parallel_region(output_parallel)
if self.bias is not None:
output = output_ + self.bias
else:
output = output_
return output
| 12,967 | 39.652038 | 80 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/LM/Flan-T5/mpu/random.py | # coding=utf-8
#Modified by Samyam Rajbhandari
#Used to partition the activations stored for backward propagation
#Therefore reduces the memory consumption
# Parts of the code here are adapted from PyTorch
# repo: https://github.com/pytorch/pytorch
import contextlib
import torch.distributed as dist
import torch
from torch import _C
from torch.cuda import _lazy_call, device as device_ctx_manager
#from torch.utils.checkpoint import detach_variable
import torch.distributed as dist
PARTITION_ACTIVATIONS = False
PA_CORRECTNESS_TEST= False
def see_memory_usage(message, force=False):
if not force:
return
dist.barrier()
if dist.get_rank() == 0:
print(message)
print("Memory Allocated ", torch.cuda.memory_allocated()/(1024*1024*1024), "GigaBytes")
print("Max Memory Allocated ", torch.cuda.max_memory_allocated()/(1024*1024*1024), "GigaBytes")
print("Cache Allocated ", torch.cuda.memory_cached()/(1024*1024*1024), "GigaBytes")
print("Max cache Allocated ", torch.cuda.max_memory_cached()/(1024*1024*1024), "GigaBytes")
print(" ")
#input("Press Any Key To Continue ..")
from .initialize import get_data_parallel_rank
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_world_size
from .initialize import get_model_parallel_group
mp_rank = None #get_model_parallel_rank()
mp_size = None #get_model_parallel_world_size()
mp_group = None #get_model_parallel_group()
# Default name for the model parallel rng tracker.
_MODEL_PARALLEL_RNG_TRACKER_NAME = 'model-parallel-rng'
transport_stream = None
cuda_device=None
def detach_variable(inputs, device=None):
if isinstance(inputs, tuple):
out = []
for inp in inputs:
if not isinstance(inp, torch.Tensor):
out.append(inp)
continue
requires_grad = inp.requires_grad
if device is not None:
x = inp.to(device=device)
else:
x = inp
x = x.detach()
x.requires_grad = requires_grad
out.append(x)
return tuple(out)
else:
raise RuntimeError(
"Only tuple of tensors is supported. Got Unsupported input type: ", type(inputs).__name__)
def _set_cuda_rng_state(new_state, device=-1):
"""Sets the random number generator state of the current GPU.
Argumentss:
new_state (torch.ByteTensor): The desired state
This function is adapted from PyTorch repo (torch.cuda.set_rng_state)
with a single change: the input state is not cloned. Cloning caused
major performance issues for +4 GPU cases.
"""
if hasattr(_C, '_cuda_setRNGState') and callable(_C._cuda_setRNGState):
# older PyTorch
def cb():
with device_ctx_manager(device):
_C._cuda_setRNGState(new_state)
else:
# newer PyTorch
if device == -1:
device = torch.device('cuda')
elif isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device('cuda', device)
def cb():
idx = device.index
if idx is None:
idx = torch.cuda.current_device()
default_generator = torch.cuda.default_generators[idx]
default_generator.set_state(new_state)
_lazy_call(cb)
class CudaRNGStatesTracker:
"""Tracker for the cuda RNG states.
Using the `add` method, a cuda rng state is initialized based on
the input `seed` and is assigned to `name`. Later, by forking the
rng state, we can perform operations and return to our starting
cuda state.
"""
def __init__(self):
# Map from a string name to the cuda rng state.
self.states_ = {}
# Seeds are just for book keeping and ensure no seed is set twice.
self.seeds_ = set()
def reset(self):
"""Set to the initial state (no tracker)."""
self.states_ = {}
self.seeds_ = set()
def get_states(self):
"""Get rng states. Copy the dictionary so we have direct
pointers to the states, not just a pointer to the dictionary."""
states = {}
for name in self.states_:
states[name] = self.states_[name]
return states
def set_states(self, states):
"""Set the rng states. For efficiency purposes, we do not check
the size of seed for compatibility."""
self.states_ = states
def add(self, name, seed):
"""Track the rng state."""
# Check seed is not already used.
if seed in self.seeds_:
raise Exception('seed {} already exists'.format(seed))
self.seeds_.add(seed)
# Check that state is not already defined.
if name in self.states_:
raise Exception('cuda rng state {} already exists'.format(name))
# Get the current rng state.
orig_rng_state = torch.cuda.get_rng_state()
# Set the new state and store it.
torch.cuda.manual_seed(seed)
self.states_[name] = torch.cuda.get_rng_state()
# Reset rng state to what it was.
_set_cuda_rng_state(orig_rng_state)
@contextlib.contextmanager
def fork(self, name=_MODEL_PARALLEL_RNG_TRACKER_NAME):
"""Fork the cuda rng state, perform operations, and exit with
the original state."""
# Check if we have added the state
if name not in self.states_:
raise Exception('cuda rng state {} is not added'.format(name))
# Store current rng state.
orig_cuda_rng_state = torch.cuda.get_rng_state()
# Set rng state to the desired one
_set_cuda_rng_state(self.states_[name])
# Do the stuff we wanted to do.
try:
yield
finally:
# Update the current rng state for later use.
self.states_[name] = torch.cuda.get_rng_state()
# And set the state to the original state we started with.
_set_cuda_rng_state(orig_cuda_rng_state)
# RNG tracker object.
_CUDA_RNG_STATE_TRACKER = CudaRNGStatesTracker()
def get_cuda_rng_tracker():
"""Get cuda rng tracker."""
return _CUDA_RNG_STATE_TRACKER
def model_parallel_cuda_manual_seed(seed):
"""Initialize model parallel cuda seed.
This function should be called after the model parallel is
initialized. Also, no torch.cuda.manual_seed should be called
after this function. Basically, this is replacement for that
function.
Two set of RNG states are tracked:
default state: This is for data parallelism and is the same among a
set of model parallel GPUs but different across
different model paralle groups. This is used for
example for dropout in the non-model-parallel regions.
model-parallel state: This state is different among a set of model
parallel GPUs, but the same across data parallel
groups. This is used for example for dropout in
model parallel regions.
"""
# 2718 is just for fun and any POSITIVE value will work.
offset = seed + 2718
model_parallel_seed = offset + get_model_parallel_rank()
# Data parallel gets the original sedd.
data_parallel_seed = seed
if torch.distributed.get_rank() == 0:
print('> initializing model parallel cuda seeds on global rank {}, '
'model parallel rank {}, and data parallel rank {} with '
'model parallel seed: {} and data parallel seed: {}'.format(
torch.distributed.get_rank(), get_model_parallel_rank(),
get_data_parallel_rank(), model_parallel_seed,
data_parallel_seed), flush=True)
_CUDA_RNG_STATE_TRACKER.reset()
# Set the default state.
torch.cuda.manual_seed(data_parallel_seed)
# and model parallel state.
_CUDA_RNG_STATE_TRACKER.add(_MODEL_PARALLEL_RNG_TRACKER_NAME,
model_parallel_seed)
def get_partition_start(item):
global mp_rank, mp_size, mp_group
partition_size = get_partition_size(item)
start = partition_size * mp_rank
return int(start)
def get_partition_size(item):
global mp_rank, mp_size, mp_group
size = item.numel()
partition_size = size/mp_size
return int(partition_size)
def get_full_inputs(tensors):
inputs=[]
for i in range(int(len(tensors)/2)-1):
item = tensors[2 * i]
size = tensors[2* i + 1]
partition_size = item.numel()
tensor_size = partition_size * mp_size
flat_tensor = torch.zeros([tensor_size], dtype=item.dtype, device=item.device)
partitions=[]
for i in range(mp_size):
part_i = flat_tensor.narrow(0, partition_size * i , partition_size)
if i == mp_rank:
part_i.copy_(item)
partitions.append(part_i)
dist.all_gather(partitions,partitions[mp_rank], group=mp_group)
input_tensor = flat_tensor.view(list(size.numpy()))
item.data=input_tensor.data
inputs.append(item)
inputs.append(tensors[-2])
return tuple(inputs)
class CheckpointFunction(torch.autograd.Function):
"""This function is adapted from torch.utils.checkpoint with
two main changes:
1) torch.cuda.set_rng_state is replaced with `_set_cuda_rng_state`
2) the states in the model parallel tracker are also properly
tracked/set/reset.
"""
@staticmethod
def forward(ctx, run_function, *args):
ctx.run_function = run_function
global mp_rank, mp_size, mp_group
if mp_rank is None:
mp_rank = get_model_parallel_rank()
mp_size = get_model_parallel_world_size()
mp_group = get_model_parallel_group()
global cuda_device, transport_stream, PARTITION_ACTIVATIONS
if cuda_device is None:
if dist.get_rank() == 0:
print(f"Partition Activations {PARTITION_ACTIVATIONS} and Correctness Check {PA_CORRECTNESS_TEST}")
cuda_device = torch.cuda.current_device()
#The transport stream is used to overlap the allgather communication for the activations
#with the computation in the backward pass
transport_stream = torch.cuda.Stream(device=cuda_device)
if PARTITION_ACTIVATIONS:
inputs = [item.detach().contiguous().view(-1).narrow(0, get_partition_start(item), get_partition_size(item)).clone() for item in args[:-1]]
inputs.append(args[-1])
#just in case something funky is happening such as reuse of inputs
inputs_cuda = [item.to(cuda_device) if isinstance(item, torch.Tensor) else item for item in args]
# Copy the rng states.
ctx.fwd_cpu_rng_state = torch.get_rng_state()
ctx.fwd_cuda_rng_state = torch.cuda.get_rng_state()
ctx.fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
#ctx.save_for_backward(*args)
with torch.no_grad():
outputs = run_function(*inputs_cuda)
del inputs_cuda
if PARTITION_ACTIVATIONS:
new_args = []
for arg, inp in zip(args,inputs):
size= torch.tensor(arg.size())
arg.data = inp.data
new_args.append(arg)
new_args.append(size)
ctx.save_for_backward(*new_args)
else:
ctx.save_for_backward(*args)
return outputs
@staticmethod
def backward(ctx, *args):
if not torch.autograd._is_checkpoint_valid():
raise RuntimeError("Checkpointing is not compatible with .grad(), "
"please use .backward() if possible")
global cuda_device, transport_stream, PARTITION_ACTIVATIONS
if PARTITION_ACTIVATIONS:
with torch.cuda.stream(transport_stream):
inputs = get_full_inputs(ctx.saved_tensors)
detached_inputs = detach_variable(inputs)
else:
inputs = ctx.saved_tensors
detached_inputs = detach_variable(inputs)
# Store the current states.
bwd_cpu_rng_state = torch.get_rng_state()
bwd_cuda_rng_state = torch.cuda.get_rng_state()
bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
# Set the states to what it used to be before the forward pass.
torch.set_rng_state(ctx.fwd_cpu_rng_state)
_set_cuda_rng_state(ctx.fwd_cuda_rng_state)
get_cuda_rng_tracker().set_states(ctx.fwd_cuda_rng_state_tracker)
if PARTITION_ACTIVATIONS:
current_stream=torch.cuda.current_stream()
current_stream.wait_stream(transport_stream)
with torch.enable_grad():
outputs = ctx.run_function(*detached_inputs)
# Set the states back to what it was at the start of this function.
torch.set_rng_state(bwd_cpu_rng_state)
_set_cuda_rng_state(bwd_cuda_rng_state)
get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker)
if isinstance(outputs, torch.Tensor):
outputs = (outputs,)
torch.autograd.backward(outputs, args)
return (None,) + tuple(inp.grad if isinstance(inp, torch.Tensor) else None for inp in detached_inputs)
def checkpoint(function, *args):
"""Checkpoint a model or part of the model.
This has been directly copied from torch.utils.checkpoint."""
return CheckpointFunction.apply(function, *args)
def partition_activations_in_checkpoint(partition_activation):
global PARTITION_ACTIVATIONS
PARTITION_ACTIVATIONS=partition_activation
if dist.get_rank() == 0:
print(f"**************Partition Activations {PARTITION_ACTIVATIONS}************")
| 14,040 | 36.442667 | 151 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/LM/Flan-T5/mpu/transformer_enc_dec.py | from audioop import cross
import math
from numpy.lib.function_base import insert
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
# from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
from .initialize import get_model_parallel_world_size
from .layers import ColumnParallelLinear
from .layers import RowParallelLinear
from .mappings import gather_from_model_parallel_region
import deepspeed
import pickle
from .random import checkpoint
from .random import get_cuda_rng_tracker
from .utils import divide
from .utils import split_tensor_along_last_dim
from model.configuration_enc_dec import EncDecConfig
from .layers import VocabParallelEmbedding
from typing import Callable, Optional, List
class LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Construct a layernorm module in the T5 style No bias and no subtraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
# self.bias = nn.Parameter(torch.zeros(hidden_size))
self.eps = eps
def forward(self, hidden_states):
# layer norm should always be calculated in float32
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.eps)
# convert into float16 if necessary
if self.weight.dtype == torch.float16:
hidden_states = hidden_states.to(torch.float16)
return self.weight * hidden_states
@torch.jit.script
def gelu_impl(x):
"""OpenAI's gelu implementation."""
return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x *
(1.0 + 0.044715 * x * x)))
def gelu(x):
return gelu_impl(x)
def unscaled_init_method(sigma):
"""Init method based on N(0, sigma)."""
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=sigma)
return init_
def scaled_init_method(sigma, num_layers):
"""Init method based on N(0, sigma/sqrt(2*num_layers)."""
std = sigma / math.sqrt(2.0 * num_layers)
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=std)
return init_
def init_method_normal(std):
"""Init method based on normal distribution.
This is only used for embeddings. The transformer has its
own initializer.
"""
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=std)
return init_
class ParallelDenseReluDense(nn.Module):
def __init__(self,
config: EncDecConfig,
init_method: Callable,
output_layer_init_method: Optional[Callable] = None):
super(ParallelDenseReluDense, self).__init__()
self.wi_0 = ColumnParallelLinear(
config.d_model, config.d_ff,
gather_output=False,
bias=False,
init_method=init_method)
self.wi_1 = ColumnParallelLinear(
config.d_model, config.d_ff,
gather_output=False,
bias=False,
init_method=init_method)
self.wo = RowParallelLinear(
config.d_ff,
config.d_model,
bias=False,
input_is_parallel=True,
init_method=output_layer_init_method)
self.dropout = nn.Dropout(config.dropout_rate)
# self.do_dim_trick = config.do_dim_trick
# if torch.distributed.get_rank() % 5 == 4:
# self.ff_mask = nn.Parameter(torch.tensor([1.0] * 13104 + [0.0] * 4), requires_grad=False)
# else:
# self.ff_mask = nn.Parameter(torch.tensor([1.0] * 13108), requires_grad=False)
def forward(self, hidden_states):
# hidden_states: [b, s, hp]
hidden_gelu = gelu(self.wi_0(hidden_states))
hidden_linear = self.wi_1(hidden_states)
hidden_states = hidden_gelu * hidden_linear
# hidden_states: [b, s, d_ff_p]
# if self.do_dim_trick:
# ff_mask = self.ff_mask.view(1, 1, self.ff_mask.size(0))
# hidden_states = ff_mask * hidden_states
# hidden_states = F.relu(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
# hidden_states: [b, s, hp]
return hidden_states
class ParallelAttention(nn.Module):
def __init__(
self,
config: EncDecConfig,
init_method: Callable,
is_decoder: bool = False,
is_cross_attn: bool = False,
output_layer_init_method: Optional[Callable] = None,
has_relative_attention_bias: bool = False):
super(ParallelAttention, self).__init__()
self.is_decoder = is_decoder
self.is_cross_attn = is_cross_attn
self.output_attention = config.output_attention
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
# Set output layer initialization if not provided.
if output_layer_init_method is None:
output_layer_init_method = init_method
d_attn_out = config.d_kv * config.num_heads # h
# Per attention head and per partition values.
world_size = get_model_parallel_world_size() # p
self.hidden_size_per_partition = divide(d_attn_out, world_size) # h_p
self.hidden_size_per_attention_head = config.d_kv # h_i
self.num_attention_heads_per_partition = divide(config.num_heads, world_size) # n_p
# Strided linear layer.
if is_cross_attn:
self.project_q = ColumnParallelLinear(config.d_model, d_attn_out,
stride=1, # NOTE: modify stride
bias=False,
gather_output=False,
init_method=init_method)
self.project_kv = ColumnParallelLinear(config.d_model, 2 * d_attn_out,
stride=2, # NOTE: modify stride
bias=False,
gather_output=False,
init_method=init_method)
else:
self.project = ColumnParallelLinear(config.d_model, 3 * d_attn_out,
stride=3,
bias=False,
gather_output=False,
init_method=init_method)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.num_attention_heads_per_partition)
# Dropout. Note that for a single iteration, this layer will generate
# different outputs on different number of parallel partitions but
# on average it should not be partition dependent.
self.attention_dropout = nn.Dropout(config.dropout_rate)
# Output.
self.dense = RowParallelLinear(d_attn_out,
config.d_model,
input_is_parallel=True,
bias=False,
init_method=output_layer_init_method)
self.output_dropout = nn.Dropout(config.dropout_rate)
if deepspeed.checkpointing.is_configured():
global get_cuda_rng_tracker, checkpoint
get_cuda_rng_tracker = deepspeed.checkpointing.get_cuda_rng_tracker
checkpoint = deepspeed.checkpointing.checkpoint
def _transpose_for_scores(self, tensor):
"""Transpose a 3D tensor [b, s, h_p=n_p*h_i] into a 4D tensor with
size [b, np, s, hn].
"""
new_tensor_shape = tensor.size()[:-1] + \
(self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head) # [b, s, n_p, h_i]
tensor = tensor.view(*new_tensor_shape)
# tensor: [b, n_p, s, h_i]
return tensor.permute(0, 2, 1, 3)
@staticmethod
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
# now relative_position is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
relative_postion_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_postion_if_large = torch.min(
relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small, relative_position, relative_postion_if_large)
return relative_buckets
def compute_bias(self, query_length, key_length):
""" Compute binned relative position bias """
context_position = torch.arange(query_length, dtype=torch.long)[:, None]
memory_position = torch.arange(key_length, dtype=torch.long)[None, :]
relative_position = memory_position - context_position # shape (query_length, key_length)
relative_position_bucket = self._relative_position_bucket(
relative_position, # shape (query_length, key_length)
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_num_buckets,
)
relative_position_bucket = relative_position_bucket.to(self.relative_attention_bias.weight.device)
values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)
return values
def forward(
self,
hidden_states,
attention_mask=None,
key_value_states=None,
position_bias=None,
query_length=None,
past_key_value=None,):
batch_size, seq_length = hidden_states.shape[:2]
real_seq_length = seq_length
if past_key_value is not None:
assert (
len(past_key_value) == 2
), "past_key_value should have 2 past states: keys and values. Got {} past states".format(
len(past_key_value)
)
real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length
key_length = real_seq_length if key_value_states is None else key_value_states.shape[1]
# hidden_states: [b, s, d_model]
if key_value_states is not None:
assert self.is_cross_attn is True
# mixed_query_layer: [b, s, h_p]
mixed_query_layer = self.project_q(hidden_states)
# mixed_key_value_layer: [b, s, 2 * h_p]
mixed_key_value_layer = self.project_kv(key_value_states)
(mixed_key_layer,
mixed_value_layer) = split_tensor_along_last_dim(mixed_key_value_layer, 2)
else:
assert self.is_cross_attn is False
# hidden_states: [b, s, h]
mixed_x_layer = self.project(hidden_states)
# mixed_x_layer: [b, s, 3 * h_p]
(mixed_query_layer,
mixed_key_layer,
mixed_value_layer) = split_tensor_along_last_dim(mixed_x_layer, 3)
# mixed_***_layer: [b, s, h_p]
# ***_layer [b, n_p, s, h_i]
query_layer = self._transpose_for_scores(mixed_query_layer)
key_layer = self._transpose_for_scores(mixed_key_layer)
value_layer = self._transpose_for_scores(mixed_value_layer)
if past_key_value is not None and not self.is_cross_attn:
assert self.is_decoder is True
# decoder
# ***_layer: [b, n_p, 1, h_i]
past_key_layer, past_value_layer = past_key_value
# past_***_layer: [b, n_p, s-1, h_i]
key_layer = torch.cat([past_key_layer, key_layer], dim=2)
value_layer = torch.cat([past_value_layer, value_layer], dim=2)
# ***_layer: [b, n_p, s_k, h_i]
# Raw attention scores. [b, n_p, s_q, s_k] compute every head alone
attention_scores = torch.matmul(query_layer,
key_layer.transpose(-1, -2))
# NOTE: We follow the implementation of Transformers to remove the scale of attention+acores
# attention_scores = attention_scores / math.sqrt(
# self.hidden_size_per_attention_head)
# relative positional bias
if position_bias is None:
if not self.has_relative_attention_bias:
position_bias = torch.zeros(
(1, self.num_attention_heads_per_partition, real_seq_length, key_length), device=attention_scores.device, dtype=attention_scores.dtype
)
else:
position_bias = self.compute_bias(real_seq_length, key_length)
# if key and values are already calculated
# we want only the last query position bias
if past_key_value is not None:
position_bias = position_bias[:, :, -seq_length:, :]
# if torch.distributed.get_rank() == 0:
# print(real_seq_length, key_length, position_bias[0, 0, 0])
no_pos_bias_attn_probs = nn.Softmax(dim=-1)(attention_scores)
# Apply the attention mask [b, 1, s_q, s_k] and relative position_bias
# NOTE: 10000 can't be larger otherwise may cause fp16 overflow (max in fp16 = 65504)
attention_scores = torch.mul(attention_scores, attention_mask) + (-10000.0 * (1.0 - attention_mask) + position_bias)
# attention_scores = torch.mul(attention_scores, attention_mask) - 10000.0 * (1.0 - attention_mask)
if hasattr(self, "score_storage"):
if self.score_storage is None:
self.score_storage = attention_scores[:, :, 0:1, :]
# Attention probabilities. [b, n_p, s_q, s_k]
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
with get_cuda_rng_tracker().fork():
attention_probs = self.attention_dropout(attention_probs)
# Context layer.
context_layer = torch.matmul(attention_probs, value_layer)
# context_layer: [b, n_p, s, h_i]
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
# context_layer: [b, s, n_p, h_i]
# if self.do_dim_trick:
# head_mask = self.head_mask.view(1, 1, self.head_mask.size(0), 1).expand_as(context_layer)
# context_layer = context_layer * head_mask
new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)
context_layer = context_layer.view(*new_context_layer_shape)
# context_layer: [b, s, h_p]
attn_output = self.dense(context_layer)
# attn_output: [b, s, d_model]
attn_output = self.output_dropout(attn_output)
present_key_value_state = torch.stack((key_layer, value_layer), dim=0) if self.is_decoder else None
outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
if self.output_attention:
outputs += (no_pos_bias_attn_probs,)
else:
outputs += (None,)
return outputs # attn_output, present_key_value_state, position_bias, attention_probs
class ParallelSelfAttention(nn.Module):
def __init__(
self,
config: EncDecConfig,
init_method: Callable,
is_decoder: bool = False,
output_layer_init_method: Optional[Callable] = None,
has_relative_attention_bias: bool = False):
super(ParallelSelfAttention, self).__init__()
self.self_attn = ParallelAttention(
config,
init_method,
is_decoder=is_decoder,
is_cross_attn=False,
output_layer_init_method=output_layer_init_method,
has_relative_attention_bias=has_relative_attention_bias)
self.layer_norm = LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
past_key_value=None):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.self_attn(
normed_hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
past_key_value=past_key_value,
)
hidden_states = hidden_states + self.dropout(attention_output[0])
# add attentions if we output them
outputs = (hidden_states,) + attention_output[1:]
return outputs # hidden_states, present_key_value_state, position_bias, (attention_probs)
class ParallelCrossAttention(nn.Module):
def __init__(
self,
config: EncDecConfig,
init_method: Callable,
is_decoder: bool = True,
output_layer_init_method: Optional[Callable] = None):
super(ParallelCrossAttention, self).__init__()
self.cross_attn = ParallelAttention(
config,
init_method,
is_decoder=is_decoder,
is_cross_attn=True,
output_layer_init_method=output_layer_init_method,
has_relative_attention_bias=False)
self.layer_norm = LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
key_value_states,
attention_mask=None,
position_bias=None,
query_length=None,
past_key_value=None):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.cross_attn(
normed_hidden_states,
key_value_states=key_value_states,
attention_mask=attention_mask,
position_bias=position_bias,
query_length=query_length,
past_key_value=past_key_value
)
hidden_states = hidden_states + self.dropout(attention_output[0])
# add attentions if we output them
outputs = (hidden_states,) + attention_output[1:]
return outputs # hidden_states, present_key_value_state, position_bias, (attention_probs)
class ParallelFF(nn.Module):
def __init__(
self,
config: EncDecConfig,
init_method: Callable,
output_layer_init_method: Callable = None):
super(ParallelFF, self).__init__()
self.dense_relu_dense = ParallelDenseReluDense(config, init_method, output_layer_init_method)
self.layer_norm = LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
# hidden_states [b, s, d_model]
forwarded_states = self.layer_norm(hidden_states)
forwarded_states = self.dense_relu_dense(forwarded_states)
hidden_states = hidden_states + self.dropout(forwarded_states)
return hidden_states
class ParallelBlock(nn.Module):
def __init__(
self,
config: EncDecConfig,
init_method: Callable,
output_layer_init_method: Optional[Callable] = None,
has_relative_attention_bias: bool = False,
is_decoder: bool = False):
super(ParallelBlock, self).__init__()
if output_layer_init_method is None:
output_layer_init_method = init_method
self.is_decoder = is_decoder
self.self_attn = ParallelSelfAttention(
config,
init_method,
is_decoder=is_decoder,
output_layer_init_method=output_layer_init_method,
has_relative_attention_bias=has_relative_attention_bias)
if is_decoder:
self.cross_attn = ParallelCrossAttention(
config,
init_method,
is_decoder=is_decoder,
output_layer_init_method=output_layer_init_method)
self.ff = ParallelFF(
config,
init_method,
output_layer_init_method=output_layer_init_method)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
enc_hidden_states=None,
cross_attention_mask=None,
enc_dec_position_bias=None,
past_key_value=None,):
if past_key_value is not None:
self_attn_past_key_value = past_key_value[0]
cross_attn_past_key_value = past_key_value[1]
else:
self_attn_past_key_value, cross_attn_past_key_value = None, None
self_attn_outputs = self.self_attn(
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
past_key_value=self_attn_past_key_value,
)
hidden_states, self_attn_present_key_value = self_attn_outputs[:2]
position_bias = (self_attn_outputs[2],)
attention_probs = (self_attn_outputs[3],)
present_key_value = (self_attn_present_key_value,)
# cross attn
if self.is_decoder:
if self_attn_present_key_value is not None:
query_length = self_attn_present_key_value[0].shape[2]
else:
query_length = None
cross_attn_outputs = self.cross_attn(
hidden_states,
key_value_states=enc_hidden_states,
attention_mask=cross_attention_mask,
position_bias=enc_dec_position_bias,
past_key_value=cross_attn_past_key_value,
query_length=query_length,
)
hidden_states, cross_attn_present_key_value = cross_attn_outputs[:2]
present_key_value += (cross_attn_present_key_value,)
# Keep cross-attention outputs and relative position weights
position_bias = position_bias + (cross_attn_outputs[2],)
attention_probs = attention_probs + (cross_attn_outputs[3],)
hidden_states = self.ff(hidden_states)
outputs = (hidden_states,)
outputs = outputs + (present_key_value,) + position_bias + attention_probs
# (for encoder) hidden_states, present_key_value_states, self-attention position bias, attention_probs
# (for decoder) hidden_states, present_key_value_states, self-attention position bias, cross-attention position bias, self_attention_probs, cross_attention_probs
return outputs
class ParallelTransformer(nn.Module):
def __init__(self, config: EncDecConfig, word_embeds: VocabParallelEmbedding, prompt_config=None, is_decoder=False, checkpoint_activations=False, checkpoint_num_layers=1, args=None):
super(ParallelTransformer, self).__init__()
self.word_embeds = word_embeds
self.config = config
self.args = args
self.prompt_config = prompt_config
if self.prompt_config is not None and self.prompt_config["prompt_len"] > 0:
prompt_dim = prompt_config.get("prompt_dim", config.d_model)
self.prompt_embeds = nn.Embedding(prompt_config["prompt_len"], prompt_dim)
self.dropout = nn.Dropout(config.dropout_rate)
self.final_layernorm = LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.checkpoint_activations = checkpoint_activations
self.checkpoint_num_layers = checkpoint_num_layers
self.is_decoder = is_decoder
output_layer_init_method = None
if config.use_scaled_init_for_output_weights:
output_layer_init_method = scaled_init_method(config.init_method_std,
config.num_layers)
self.blocks = nn.ModuleList(
[ParallelBlock(
config,
unscaled_init_method(sigma=config.init_method_std),
has_relative_attention_bias=bool(i == 0),
output_layer_init_method=output_layer_init_method,
is_decoder=is_decoder) for i in range(config.num_layers)]
)
if deepspeed.checkpointing.is_configured():
global get_cuda_rng_tracker, checkpoint
get_cuda_rng_tracker = deepspeed.checkpointing.get_cuda_rng_tracker
checkpoint = deepspeed.checkpointing.checkpoint
def init_prompt_embeds(self):
if self.prompt_config is not None and self.prompt_config["prompt_len"] > 0:
prompt_weights = self.word_embeds(self.prompt_config["init_ids"]).detach()
self.prompt_embeds.weight.data = prompt_weights
def load_prompt_embeds(self, prompt_embeds):
if self.prompt_config is not None and self.prompt_config["prompt_len"] > 0:
prompt_embeds = prompt_embeds.to(self.prompt_embeds.weight.data.device)
print("loading prompts")
self.prompt_embeds.weight.data = prompt_embeds
def get_prompt(self):
if self.prompt_config is not None and self.prompt_config["prompt_len"] > 0:
return self.prompt_embeds.weight.data
else:
return None
def get_input_embeds(self, input_ids):
if self.prompt_config is None:
return self.word_embeds(input_ids)
p_embeds = None
if self.prompt_config is not None and self.prompt_config["prompt_len"] > 0 and self.prompt_config.get("insert_input", True):
prompt_mask = (input_ids < 0).long()
prompt_ids = (-(input_ids * prompt_mask)) - prompt_mask
p_embeds = self.prompt_embeds(prompt_ids) * prompt_mask.half().unsqueeze(-1)
word_mask = (0 <= input_ids).long()
word_ids = word_mask * input_ids
w_embeds = self.word_embeds(word_ids) * word_mask.float().unsqueeze(-1)
if p_embeds is not None:
w_embeds = w_embeds + p_embeds
return w_embeds # bs * seq_len * hidden
def forward(
self,
input_ids=None,
attention_mask=None,
cross_attention_mask=None,
enc_hidden_states=None,
past_key_values=None,):
bs = input_ids.size(0)
inputs_embeds = self.get_input_embeds(input_ids)
hidden_states = self.dropout(inputs_embeds)
position_bias = None
enc_dec_position_bias = None
present_key_value_states = []
# initialize past_key_values with `None` if past does not exist
if past_key_values is None:
past_key_values = [None] * len(self.blocks)
all_self_attention_probs = []
all_cross_attention_probs = []
def custom(start, end):
def custom_forward(*inputs):
layer_modules_ = self.blocks[start:end]
past_key_values_ = past_key_values[start:end]
self_attn_present_key_values_ = []
cross_attn_present_key_values_ = []
position_bias_, enc_dec_position_bias_ = None, None
hidden_states_ = inputs[0]
if len(inputs) > 2:
position_bias_ = inputs[1]
if len(inputs) > 3:
enc_dec_position_bias_ = inputs[2]
if enc_hidden_states is not None:
enc_hidden_states_ = inputs[-1]
else:
enc_hidden_states_ = None
_l = start
for layer_, past_key_value_ in zip(layer_modules_, past_key_values_):
attention_mask_ = attention_mask
cross_attention_mask_ = cross_attention_mask
layer_outputs_ = layer_(hidden_states_,
attention_mask_,
position_bias_,
enc_hidden_states_,
cross_attention_mask_,
enc_dec_position_bias_,
past_key_value=past_key_value_)
hidden_states_, present_key_value_ = layer_outputs_[:2]
if self.is_decoder:
self_attn_present_key_values_.append(present_key_value_[0])
cross_attn_present_key_values_.append(present_key_value_[1])
all_self_attention_probs.append(layer_outputs_[-2])
all_cross_attention_probs.append(layer_outputs_[-1])
else:
self_attn_present_key_values_.append(present_key_value_[0])
all_self_attention_probs.append(layer_outputs_[-1])
position_bias_ = layer_outputs_[2]
if self.is_decoder and enc_hidden_states is not None:
enc_dec_position_bias_ = layer_outputs_[3]
_l += 1
outputs_ = (hidden_states_,)
if position_bias_ is not None:
outputs_ += (position_bias_,)
if enc_dec_position_bias_ is not None:
outputs_ += (enc_dec_position_bias_,)
if self.is_decoder:
self_attn_present_key_values_ = torch.stack(self_attn_present_key_values_, dim=0)
cross_attn_present_key_values_ = torch.stack(cross_attn_present_key_values_, dim=0)
outputs_ += (self_attn_present_key_values_, cross_attn_present_key_values_,)
return outputs_
return custom_forward
if self.checkpoint_activations:
l = 0
num_layers = len(self.blocks)
chunk_length = self.checkpoint_num_layers
while l < num_layers:
arg_list = (hidden_states,)
if position_bias is not None:
arg_list += (position_bias,)
if enc_dec_position_bias is not None:
arg_list += (enc_dec_position_bias,)
if enc_hidden_states is not None:
arg_list += (enc_hidden_states,)
tmp_outputs = checkpoint(custom(l, l+chunk_length), *arg_list)
else:
arg_list += (attention_mask,)
tmp_outputs = checkpoint(custom(l, l+chunk_length), *arg_list)
hidden_states = tmp_outputs[0]
if self.is_decoder:
if len(tmp_outputs) > 3:
position_bias = tmp_outputs[1]
if len(tmp_outputs) > 4:
enc_dec_position_bias = tmp_outputs[2]
present_key_value_states.extend([(s, c) for s, c in zip(tmp_outputs[-2], tmp_outputs[-1])])
else:
if len(tmp_outputs) > 1:
position_bias = tmp_outputs[1]
if len(tmp_outputs) > 2:
enc_dec_position_bias = tmp_outputs[2]
present_key_value_states.extend([None] * chunk_length)
l += chunk_length
else:
for i, (layer_module, past_key_value) in enumerate(zip(self.blocks, past_key_values)):
layer_outputs = layer_module(
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
enc_hidden_states=enc_hidden_states,
cross_attention_mask=cross_attention_mask,
enc_dec_position_bias=enc_dec_position_bias,
past_key_value=past_key_value
)
# layer_outputs is a tuple with:
# hidden-states, key-value-states, self-attention position bias, cross-attention position bias, attention_probs
hidden_states, present_key_value_state = layer_outputs[:2]
if self.is_decoder:
all_self_attention_probs.append(layer_outputs[-2])
all_cross_attention_probs.append(layer_outputs[-1])
else:
all_self_attention_probs.append(layer_outputs[-1])
position_bias = layer_outputs[2]
if self.is_decoder and enc_hidden_states is not None:
enc_dec_position_bias = layer_outputs[3]
present_key_value_states.append(present_key_value_state)
# We share the position biases between the layers - the first layer store them
# layer_outputs = hidden-states, key-value-states (self-attention weights),
# (self-attention position bias), (cross-attention weights), (cross-attention position bias)
# position_bias = layer_outputs[2]
hidden_states = self.final_layernorm(hidden_states)
hidden_states = self.dropout(hidden_states)
# exit(0)
outputs = {
"last_hidden_state": hidden_states,
"past_key_values": present_key_value_states,
"hidden_states": None,
"attentions": all_self_attention_probs,
"cross_attentions": all_cross_attention_probs
}
return outputs
| 35,723 | 41.427553 | 186 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/LM/Flan-T5/model/enc_dec_modeling.py | import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import mpu
from .configuration_enc_dec import EncDecConfig
def init_method_normal(std):
"""Init method based on normal distribution.
This is only used for embeddings. The transformer has its
own initializer.
"""
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=std)
return init_
class EncDecModel(nn.Module):
def __init__(
self,
config: EncDecConfig,
parallel_output=True,
checkpoint_activations=False,
checkpoint_num_layers=1,
prompt_config=None,
args=None):
super(EncDecModel, self).__init__()
if config.vocab_size is None:
raise RuntimeError("Should set vocab size")
self.enc_config = copy.deepcopy(config)
self.dec_config = copy.deepcopy(config)
self.parallel_output = parallel_output
init_method = init_method_normal(std=config.init_method_std) # NOTE: good?
self.word_embeds = mpu.VocabParallelEmbedding(config.vocab_size, config.d_model, init_method=init_method)
self.prompt_config = prompt_config
self.args = args
self.lm_head = mpu.VocabParallelEmbedding(config.vocab_size, config.d_model, init_method=init_method)
self.encoder = mpu.ParallelTransformer(self.enc_config, word_embeds=self.word_embeds, is_decoder=False, prompt_config=prompt_config["enc"] if prompt_config is not None else None,
checkpoint_activations=checkpoint_activations, checkpoint_num_layers=checkpoint_num_layers, args=args)
self.decoder = mpu.ParallelTransformer(self.dec_config, word_embeds=self.word_embeds, is_decoder=True, prompt_config=prompt_config["dec"] if prompt_config is not None else None,
checkpoint_activations=checkpoint_activations, checkpoint_num_layers=checkpoint_num_layers, args=args)
if config.tie_weights:
self.tie_weights()
def init_prompt_embeds(self):
self.encoder.init_prompt_embeds()
self.decoder.init_prompt_embeds()
def load_prompt_embeds(self, prompt_embeds):
self.encoder.load_prompt_embeds(prompt_embeds)
self.decoder.load_prompt_embeds(prompt_embeds)
def get_prompt_embeds(self):
return {
"encoder": self.encoder.get_prompt(),
"decoder": self.decoder.get_prompt()
}
def tie_weights(self):
self.lm_head.weight = self.word_embeds.weight
def reset_score_storage(self):
for mod in self.decoder.blocks:
mod.cross_attn.cross_attn.score_storage = None
def get_crossattention_scores(self, context_mask):
scores = []
n_passages = context_mask.size(1)
for mod in self.decoder.blocks:
scores.append(mod.cross_attn.cross_attn.score_storage)
scores = torch.cat(scores, dim=2)
# FiD n_layers beacuse dec seq = 1, auto regressive
bsz, n_heads, n_layers, _ = scores.size()
# batch_size, n_head, n_layers, n_passages, text_maxlength
scores = scores.view(bsz, n_heads, n_layers, n_passages, -1)
# batch_size, 1, 1, n_passages, text_maxlength
scores = scores.masked_fill(~context_mask[:, None, None], 0.).float()
# batch_size, n_passages
scores = scores.sum(dim=[1, 2, 4])
ntokens = context_mask.sum(dim=[2]) * n_layers * n_heads
scores = scores / ntokens
return scores
def forward(
self,
enc_input_ids=None,
enc_position_ids=None,
enc_attention_mask=None,
dec_input_ids=None,
dec_position_ids=None,
dec_attention_mask=None,
cross_attention_mask=None,
enc_hidden_states=None,
past_key_values=None,
only_encoder=False,):
provided_hidden = (enc_hidden_states is not None)
if enc_hidden_states is None:
enc_outputs = self.encoder(
input_ids=enc_input_ids,
attention_mask=enc_attention_mask,
)
enc_hidden_states = enc_outputs["last_hidden_state"]
if only_encoder:
outputs = {
"encoder_last_hidden_state": enc_hidden_states,
"encoder_hidden_states": enc_outputs["hidden_states"],
"encoder_attentions": enc_outputs["attentions"],
}
return outputs
dec_outputs = self.decoder(
input_ids=dec_input_ids,
attention_mask=dec_attention_mask,
cross_attention_mask=cross_attention_mask,
enc_hidden_states=enc_hidden_states,
past_key_values=past_key_values,
)
last_hidden_state_parallel = mpu.copy_to_model_parallel_region(dec_outputs["last_hidden_state"])
logits_parallel = F.linear(last_hidden_state_parallel, self.lm_head.weight)
if self.parallel_output:
lm_logits = logits_parallel
else:
lm_logits = mpu.gather_from_model_parallel_region(logits_parallel)
outputs = {
"lm_logits": lm_logits,
"last_hidden_state": dec_outputs["last_hidden_state"],
"past_key_values": dec_outputs["past_key_values"],
"encoder_last_hidden_state": enc_hidden_states,
"encoder_attentions": enc_outputs["attentions"] if not provided_hidden else None,
"decoder_self_attentions": dec_outputs["attentions"],
"decoder_cross_attentions": dec_outputs["cross_attentions"]
}
return outputs
def enc_dec_get_params_for_weight_decay_optimization(module):
weight_decay_params = {'params': []}
no_weight_decay_params = {'params': [], 'weight_decay': 0.0}
for module_ in module.modules():
if isinstance(module_, (mpu.LayerNorm, nn.LayerNorm, mpu.transformer_enc_dec.LayerNorm)):
no_weight_decay_params['params'].extend(
[p for p in list(module_._parameters.values())
if p is not None])
else:
weight_decay_params['params'].extend(
[p for n, p in list(module_._parameters.items())
if p is not None and n != 'bias'])
no_weight_decay_params['params'].extend(
[p for n, p in list(module_._parameters.items())
if p is not None and n == 'bias'])
return weight_decay_params, no_weight_decay_params
def enc_dec_get_params_for_prompt_optimization(module: nn.Module):
params = [{'params': []}]
for t in module.named_modules():
if "prompt" in t[0]:
if torch.distributed.get_rank() == 0:
print("Update params", t[0])
params[0]['params'].extend([p for p in list(t[1]._parameters.values()) if p is not None])
for t in module.named_parameters():
if "prompt" not in t[0]:
t[1].requires_grad_(False)
if torch.distributed.get_rank() == 0:
print("print params", params)
return params
| 7,115 | 35.492308 | 186 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/LM/Flan-T5/model/distributed.py | # coding=utf-8
import torch
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
import torch.distributed as dist
from torch.nn.modules import Module
from torch.autograd import Variable
import mpu
class DistributedDataParallel(Module):
def __init__(self, module):
super(DistributedDataParallel, self).__init__()
self.warn_on_half = True if dist._backend == dist.dist_backend.GLOO else False
self.module = module
self.data_parallel_group = mpu.get_data_parallel_group()
src_rank = mpu.get_model_parallel_rank()
for p in self.module.parameters():
if torch.is_tensor(p):
dist.broadcast(p, src_rank, group=self.data_parallel_group)
def allreduce_params(reduce_after=True, no_scale=False, fp32_allreduce=False):
if(self.needs_reduction):
self.needs_reduction = False
buckets = {}
for name, param in self.module.named_parameters():
if param.requires_grad and param.grad is not None:
tp = (param.data.type())
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(param)
if self.warn_on_half:
if torch.cuda.HalfTensor in buckets:
print("WARNING: gloo dist backend for half parameters may be extremely slow." +
" It is recommended to use the NCCL backend in this case.")
self.warn_on_half = False
for tp in buckets:
bucket = buckets[tp]
grads = [param.grad.data for param in bucket]
coalesced = _flatten_dense_tensors(grads)
if fp32_allreduce:
coalesced = coalesced.float()
if not no_scale and not reduce_after:
coalesced /= dist.get_world_size(group=self.data_parallel_group)
dist.all_reduce(coalesced, group=self.data_parallel_group)
torch.cuda.synchronize()
if not no_scale and reduce_after:
coalesced /= dist.get_world_size(group=self.data_parallel_group)
for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)):
buf.copy_(synced)
self.hook_handles = []
self.hooks = []
for param in list(self.module.parameters()):
def allreduce_hook(*unused):
Variable._execution_engine.queue_callback(allreduce_params)
# handle = param.register_hook(allreduce_hook)
#self.hooks.append(allreduce_hook)
#self.hook_handles.append(handle)
self.allreduce_params = allreduce_params
def forward(self, *inputs, **kwargs):
self.needs_reduction = True
return self.module(*inputs, **kwargs)
def state_dict(self, destination=None, prefix='', keep_vars=False):
#[h.remove() for h in self.hook_handles]
sd = self.module.state_dict(destination, prefix, keep_vars)
# for handle, hook in zip(self.hook_handles, self.hooks):
# d = handle.hooks_dict_ref()
# d[handle.id] = hook
return sd
def load_state_dict(self, state_dict, strict=True):
self.module.load_state_dict(state_dict, strict=strict)
'''
def _sync_buffers(self):
buffers = list(self.module._all_buffers())
if len(buffers) > 0:
# cross-node buffer sync
flat_buffers = _flatten_dense_tensors(buffers)
dist.broadcast(flat_buffers, 0)
for buf, synced in zip(buffers, _unflatten_dense_tensors(flat_buffers, buffers)):
buf.copy_(synced)
def train(self, mode=True):
# Clear NCCL communicator and CUDA event cache of the default group ID,
# These cache will be recreated at the later call. This is currently a
# work-around for a potential NCCL deadlock.
if dist._backend == dist.dist_backend.NCCL:
dist._clear_group_cache()
super(DistributedDataParallel, self).train(mode)
self.module.train(mode)
'''
| 4,286 | 42.30303 | 103 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/LM/Flan-T5/data_utils/T0Datasets.py | import json
import re
import os
import torch
import math
import numpy as np
import pickle
from torch.utils.data import Dataset
from utils import print_rank_0, save_rank_0
from tokenization_t5 import EncDecTokenizer
from .data_config import DATA_GROUP_CONFIG, DATA_CONFIG
import datasets
from promptsource.templates import TemplateCollection
from datasets import load_dataset
from .postprocess import OPTION_POST_FN
datasets.disable_caching()
class T0Dataset(Dataset):
def __init__(
self,
args,
tokenizer: EncDecTokenizer,
data_prompts,
split,
ratio=1,
few_data_names=None,
num=-1,
):
self.args = args
self.tokenizer = tokenizer
self.ratio = ratio
self.data_prompts = data_prompts
self.pad_id = tokenizer.pad_id
self.split = split
self.sample_num = num
self.idx_size = 3
self.few_data_names = few_data_names
self.selfsup_sample_num = {"train": 100000, "validation": 1000}
self.all_data = {name: {} for name in data_prompts}
self.all_enc_sizes = []
self.all_dec_sizes = []
self.all_cand_sizes = []
if self.args.FiD:
self.all_passage_sizes = []
for name in self.data_prompts:
if DATA_CONFIG[name].get("selfsup", False):
data, enc_sizes, dec_sizes, cand_sizes = self.load_from_cache_self(name)
self.all_data[name] = {
"prompt_num": 1,
"prompt_names": ["merged"],
"data": data,
}
else:
if DATA_CONFIG[name]["do_cache"]:
(
data,
enc_sizes,
dec_sizes,
cand_sizes,
passage_sizes,
) = self.load_from_cache(name)
else:
(
data,
enc_sizes,
dec_sizes,
cand_sizes,
passage_sizes,
) = self.process_data(name)
self.all_data[name] = {
"prompt_num": len(data_prompts[name]),
"prompt_names": [prompt.name for prompt in data_prompts[name]],
"data": data,
}
print("len data", len(data))
self.all_enc_sizes.extend(enc_sizes)
self.all_dec_sizes.extend(dec_sizes)
self.all_cand_sizes.extend(cand_sizes)
if self.args.FiD:
self.all_passage_sizes.extend(passage_sizes)
self.max_enc_len = max(self.all_enc_sizes)
self.max_dec_len = max(self.all_dec_sizes)
self.max_cand_len = max(self.all_cand_sizes)
if self.args.FiD:
self.max_passage_len = max(self.all_passage_sizes)
self.max_enc_len = self.max_passage_len * self.args.passage_num
self.flan_sample_num = {
name: min(
DATA_CONFIG[name].get("flan_sample_max", args.flan_sample_max)
* d["prompt_num"],
len(d["data"]),
)
for name, d in self.all_data.items()
}
self.idxs = self.build_idx()
self.cur_epoch = 0
print_str = ""
for name in self.data_prompts:
print_str += "Data: {}_{}".format(name, split)
print_str += " | Ratio: {}".format(ratio)
print_str += " | Max enc len: {}".format(self.max_enc_len)
print_str += " | Max dec len: {}".format(self.max_dec_len)
print_str += " | Max cand len: {}".format(self.max_cand_len)
print_str += " | Prompt num: {}".format(self.all_data[name]["prompt_num"])
print_str += " | All data num: {}".format(len(self.all_data[name]["data"]))
print_str += " | Sample num: {}".format(self.flan_sample_num[name])
print_str += " | Idx one epoch num: {}\n".format(len(self.idxs[0]))
print_str = print_str[:-1]
print_rank_0(print_str)
save_rank_0(args, print_str)
def set_epoch(self, e):
self.cur_epoch = e
def build_idx(self):
epochs = self.args.epochs
idx_repo = {}
for (name, d), (name, sample_num) in zip(
self.all_data.items(), self.flan_sample_num.items()
):
data_idx = [i for i in range(len(d["data"]))]
repeat_num = math.ceil(epochs * sample_num / len(data_idx))
tmp_data_idx = []
for i in range(repeat_num):
if self.split == "train":
np.random.shuffle(data_idx)
tmp_data_idx.extend(data_idx)
idx_repo[name] = tmp_data_idx
print(
name,
"| repeat num:",
repeat_num,
"| sample num:",
sample_num,
"| data_idx len:",
len(data_idx),
"| tmp_data_idx:",
len(tmp_data_idx),
)
idxs = []
for e in range(epochs):
samp_idx = []
for name, d in self.all_data.items():
sample_num = self.flan_sample_num[name]
l = idx_repo[name][e * sample_num : (e + 1) * sample_num]
l = [(name, x) for x in l]
samp_idx.extend(l)
idxs.append(samp_idx)
first_len = len(idxs[0])
for e, x in enumerate(idxs):
assert len(x) == first_len, (e, len(x), first_len)
return idxs
def load_from_cache_self(self, name):
cache_path = os.path.join(
DATA_CONFIG[name]["data_dir"],
"cache_{}_{}.pkl".format(self.split, self.selfsup_sample_num[self.split]),
)
with open(cache_path, "rb") as f:
data, enc_sizes, dec_sizes, cand_sizes = pickle.load(f)
return data, enc_sizes, dec_sizes, cand_sizes
def load_from_cache(self, name):
data_dir = DATA_CONFIG[name]["data_dir"]
if self.split == "train":
if self.args.few_data_num is not None:
assert self.few_data_names is not None
if name in self.few_data_names:
sample_num = self.args.few_data_num
else:
sample_num = self.sample_num
else:
sample_num = self.sample_num
cache_path = os.path.join(
data_dir,
"cache_{}_{}_{}.pkl".format(self.split, self.ratio, sample_num),
)
else:
prompt_name = self.data_prompts[name][0].name.replace("/", "_")
cache_path = os.path.join(
data_dir,
"cache_{}_{}_{}_{}.pkl".format(
self.split, self.ratio, self.sample_num, prompt_name
),
)
print("cache path", cache_path)
if os.path.exists(cache_path):
with open(cache_path, "rb") as f:
data, enc_sizes, dec_sizes, cand_sizes, passage_sizes = pickle.load(f)
else:
data, enc_sizes, dec_sizes, cand_sizes, passage_sizes = self.process_data(
name
)
with open(cache_path, "wb") as f:
pickle.dump((data, enc_sizes, dec_sizes, cand_sizes, passage_sizes), f)
return data, enc_sizes, dec_sizes, cand_sizes, passage_sizes
def process_data(self, name):
print_rank_0("Processing " + name)
if self.split == "train":
if self.args.few_data_num is not None:
assert self.few_data_names is not None
if name in self.few_data_names:
sample_num = self.args.few_data_num
else:
sample_num = self.sample_num
else:
sample_num = DATA_CONFIG[name].get("train_num", self.sample_num)
if self.args.data_aug is not None:
sample_num += self.args.data_aug
else:
sample_num = DATA_CONFIG[name].get("dev_num", self.sample_num)
data_dir = DATA_CONFIG[name]["data_dir"]
data_files = {self.split: os.path.join(data_dir, "{}.jsonl".format(self.split))}
dataset = load_dataset("json", data_files=data_files)
data = []
enc_sizes = []
dec_sizes = []
cand_sizes = []
passage_sizes = []
sid, lid = 0, 0
skip = 0
for pid, prompt in enumerate(self.data_prompts[name]):
print_rank_0(prompt.name)
for sample in dataset[self.split]:
if lid % 500 == 0:
print_rank_0(
"{}, {}, {}, {}, {}".format(
name, self.split, prompt.name, lid, skip
)
)
# genread_template = "{} Generate a background document from Wikipedia to help answer the given question:"
answers = None
if "popQA" in name:
enc_str = sample["prompt"]
# enc_str = genread_template.format(enc_str)
dec_str = sample["answers"][0]
answers = sample["answers"]
else:
applied_sample = prompt.apply(sample)
if len(applied_sample) != 2:
# print_rank_0("sample num out")
skip += 1
continue
enc_str, dec_str = applied_sample
# enc_str = genread_template.format(enc_str)
if "mmlu_demo" in sample:
enc_str = sample["mmlu_demo"] + enc_str
passages = None
if "passages" in sample:
passages = []
for i in range(self.args.passage_num):
max_question_len = 1250 if self.split == "train" else 10000
max_passage_len = (
max(1250 - len(enc_str), 0)
if self.split == "train"
else 500
)
# Can last
if self.args.prompt_tune:
passage_str = enc_str[:max_question_len]
passages.append(
[-(i + 1)] + self.tokenizer.encode(passage_str) + [1]
)
else:
passage_str = (
sample["passages"][i][:max_passage_len]
+ enc_str[:max_question_len]
)
passages.append(self.tokenizer.encode(passage_str) + [1])
if self.args.prompt_tune:
context = (
[-(i + 1) for i in range(self.args.passage_num)]
+ self.tokenizer.encode(enc_str)
+ [1]
)
else:
context = self.tokenizer.encode(enc_str) + [1]
target = [0] + self.tokenizer.encode(dec_str) + [1]
# if len(enc_str) > 5000:
# # print_rank_0("pre-check out " + str(len(enc_str)))
# skip += 1
# continue
# if len(context) > self.args.enc_seq_length:
# skip += 1
# # print_rank_0("enc out " + str(len(context)))
# continue
# if len(target) > self.args.dec_seq_length:
# skip += 1
# # print_rank_0("dec out " + str(len(target)))
# continue
options = prompt.get_answer_choices_list(sample)
options = OPTION_POST_FN.get((name, prompt.name), lambda x: x)(options)
if self.split != "train" and options is not None:
cands = [
[0] + self.tokenizer.encode(option) + [1] for option in options
]
else:
cands = None
if len(dec_str) == 0:
# print_rank_0("dec str out " + str(len(dec_str)))
skip += 1
continue
if options is not None and dec_str not in options:
print_rank_0(str(applied_sample))
print_rank_0(
name
+ " "
+ prompt.name
+ " "
+ "Skip bug sample "
+ str(dec_str)
+ " "
+ str(options)
)
continue
data.append(
{
"idxs": [pid, lid, sid],
"enc_input_ids": context,
"dec_input_ids": target[:-1],
"label_ids": target[1:],
"answer": dec_str if answers is None else answers,
"options": options,
"cands": {
"input_ids": [c[:-1] for c in cands],
"target_ids": [c[1:] for c in cands],
"label": options.index(dec_str),
}
if cands is not None
else None,
"passage_input_ids": passages,
}
)
enc_sizes.append(len(context))
dec_sizes.append(len(target) - 1)
if cands is not None:
cand_sizes.append(sum([len(c) - 1 for c in cands]))
else:
cand_sizes.append(0)
if passages is not None:
passage_sizes.extend([len(p) for p in passages])
else:
passage_sizes.append(0)
sid += 1
lid += 1
if sample_num > 0 and lid >= sample_num:
break
lid = 0
return data, enc_sizes, dec_sizes, cand_sizes, passage_sizes
def __len__(self):
return len(self.idxs[0])
def __getitem__(self, idx):
name, sid = self.idxs[self.cur_epoch][idx]
d = self.all_data[name]["data"][sid]
return d, name
def collate(self, samples):
bs = len(samples)
model_data = {
"enc_input_ids": torch.ones(bs, self.max_enc_len, dtype=torch.long)
* self.pad_id,
"enc_attention_mask": torch.zeros(
bs, 1, self.max_enc_len, self.max_enc_len
),
"dec_attention_mask": torch.zeros(
bs, 1, self.max_dec_len, self.max_dec_len
),
"cross_attention_mask": torch.zeros(
bs, 1, self.max_dec_len, self.max_enc_len
),
"dec_input_ids": torch.ones(bs, self.max_dec_len, dtype=torch.long)
* self.pad_id,
}
if self.args.FiD:
model_data["passage_input_ids"] = (
torch.ones(
bs, self.args.passage_num, self.max_passage_len, dtype=torch.long
)
* self.pad_id
)
model_data["passage_attention_mask"] = torch.zeros(
bs, self.args.passage_num, 1, self.max_passage_len, self.max_passage_len
)
no_model_data = {
"idxs": torch.zeros(bs, self.idx_size, dtype=torch.long),
"labels": torch.ones(bs, self.max_dec_len, dtype=torch.long) * self.pad_id,
"loss_mask": torch.zeros(bs, self.max_dec_len),
}
name_list = []
for i, samp in enumerate(samples):
samp, name = samp
name_list.append(name)
enc_len, dec_len = len(samp["enc_input_ids"]), len(samp["dec_input_ids"])
model_data["enc_input_ids"][i][:enc_len] = torch.tensor(
samp["enc_input_ids"], dtype=torch.long
)
model_data["enc_attention_mask"][i][0, :enc_len, :enc_len] = samp.get(
"enc_attention_mask", 1.0
)
model_data["dec_input_ids"][i][:dec_len] = torch.tensor(
samp["dec_input_ids"], dtype=torch.long
)
model_data["dec_attention_mask"][i][0, :dec_len, :dec_len] = torch.tril(
torch.ones(dec_len, dec_len)
)
if self.args.FiD:
enc_len = self.max_enc_len
samp["cross_attention_mask"] = torch.zeros(enc_len)
for j in range(self.args.passage_num):
passage_len = len(samp["passage_input_ids"][j])
samp["cross_attention_mask"][
j * self.max_passage_len : j * self.max_passage_len
+ passage_len
] = 1.0
model_data["cross_attention_mask"][i][0, :dec_len, :enc_len] = samp.get(
"cross_attention_mask", 1.0
)
if self.args.FiD:
for j in range(self.args.passage_num):
passage_len = len(samp["passage_input_ids"][j])
model_data["passage_input_ids"][i][j][:passage_len] = torch.tensor(
samp["passage_input_ids"][j], dtype=torch.long
)
model_data["passage_attention_mask"][i][j][
0, :passage_len, :passage_len
] = 1.0
no_model_data["idxs"][i] = torch.tensor(samp["idxs"], dtype=torch.long)
no_model_data["labels"][i][: len(samp["label_ids"])] = torch.tensor(
samp["label_ids"], dtype=torch.long
)
no_model_data["loss_mask"][i][: len(samp["label_ids"])] = 1.0
if self.args.fp16:
model_data["enc_attention_mask"] = model_data["enc_attention_mask"].half()
model_data["dec_attention_mask"] = model_data["dec_attention_mask"].half()
model_data["cross_attention_mask"] = model_data[
"cross_attention_mask"
].half()
if self.args.FiD:
model_data["passage_attention_mask"] = model_data[
"passage_attention_mask"
].half()
if samp["cands"] is not None:
cand_model_data = {
"dec_input_ids": torch.ones(bs, self.max_cand_len, dtype=torch.long)
* self.pad_id,
"dec_attention_mask": torch.zeros(
bs, 1, self.max_cand_len, self.max_cand_len
),
"cross_attention_mask": torch.zeros(
bs, 1, self.max_cand_len, self.max_enc_len
),
}
cand_no_model_data = {
"labels": torch.zeros(bs, dtype=torch.long),
"target_ids": torch.ones(bs, self.max_cand_len, dtype=torch.long)
* self.pad_id,
"pos": torch.zeros(bs, self.max_cand_len, dtype=torch.bool),
"loss_mask": torch.zeros(bs, self.max_cand_len),
}
for i, samp in enumerate(samples):
samp, _ = samp
start = 0
enc_len = len(samp["enc_input_ids"])
if self.args.FiD:
enc_len = self.max_enc_len
for input_ids, target_ids in zip(
samp["cands"]["input_ids"], samp["cands"]["target_ids"]
):
cand_model_data["dec_input_ids"][i][
start : start + len(input_ids)
] = torch.tensor(input_ids, dtype=torch.long)
cand_no_model_data["target_ids"][i][
start : start + len(target_ids)
] = torch.tensor(target_ids, dtype=torch.long)
cand_model_data["dec_attention_mask"][i][
0,
start : start + len(input_ids),
start : start + len(input_ids),
] = torch.tril(torch.ones(len(input_ids), len(input_ids)))
cand_model_data["cross_attention_mask"][i][
0, start : start + len(input_ids), :enc_len
] = samp.get("cross_attention_mask", 1.0)
cand_no_model_data["loss_mask"][i][
start : start + len(input_ids)
] = 1
start = start + len(input_ids)
cand_no_model_data["pos"][i][start - 1] = True
cand_no_model_data["labels"][i] = samp["cands"]["label"]
if self.args.fp16:
cand_model_data["dec_attention_mask"] = cand_model_data[
"dec_attention_mask"
].half()
cand_model_data["cross_attention_mask"] = cand_model_data[
"cross_attention_mask"
].half()
else:
cand_model_data, cand_no_model_data = {}, {}
# print(name_list)
return model_data, no_model_data, cand_model_data, cand_no_model_data
| 21,571 | 38.29326 | 122 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/Retriever/loss.py | import torch
from torch import Tensor
from torch.nn import functional as F
from torch import distributed as dist
class SimpleContrastiveLoss:
def __call__(self, x: Tensor, y: Tensor, target: Tensor = None, reduction: str = 'mean'):
if target is None:
target_per_qry = y.size(0) // x.size(0)
target = torch.arange(
0, x.size(0) * target_per_qry, target_per_qry, device=x.device, dtype=torch.long)
logits = torch.matmul(x, y.transpose(0, 1))
return F.cross_entropy(logits, target, reduction=reduction)
class DistributedContrastiveLoss(SimpleContrastiveLoss):
def __init__(self, n_target: int = 0, scale_loss: bool = True):
assert dist.is_initialized(), "Distributed training has not been properly initialized."
super().__init__()
self.word_size = dist.get_world_size()
self.rank = dist.get_rank()
self.scale_loss = scale_loss
def __call__(self, x: Tensor, y: Tensor, **kwargs):
dist_x = self.gather_tensor(x)
dist_y = self.gather_tensor(y)
loss = super().__call__(dist_x, dist_y, **kwargs)
if self.scale_loss:
loss = loss * self.word_size
return loss
def gather_tensor(self, t):
gathered = [torch.empty_like(t) for _ in range(self.word_size)]
dist.all_gather(gathered, t)
gathered[self.rank] = t
return torch.cat(gathered, dim=0)
class MarginRankingLoss:
def __init__(self, margin: float = 1.0):
self.margin = margin
def __call__(self, pos_scores: Tensor, neg_scores: Tensor):
return torch.mean(F.relu(self.margin - pos_scores + neg_scores))
class SoftMarginRankingLoss:
def __init__(self, margin: float = 1.0):
self.margin = margin
def __call__(self, pos_scores: Tensor, neg_scores: Tensor):
return torch.mean(F.softplus(self.margin - pos_scores + neg_scores))
class BinaryCrossEntropyLoss:
def __call__(self, pos_scores: Tensor, neg_scores: Tensor):
return (F.binary_cross_entropy_with_logits(pos_scores, torch.ones_like(pos_scores))
+ F.binary_cross_entropy_with_logits(neg_scores, torch.zeros_like(neg_scores)))
class CrossEntropyLoss:
def __call__(self, pos_scores: Tensor, neg_scores: Tensor):
return (F.cross_entropy(pos_scores, torch.ones(pos_scores.shape[0], dtype=torch.long).to(pos_scores.device))
+ F.cross_entropy(neg_scores, torch.zeros(neg_scores.shape[0], dtype=torch.long).to(pos_scores.device)))
rr_loss_functions = {
"mr": MarginRankingLoss,
"smr": SoftMarginRankingLoss,
"bce": BinaryCrossEntropyLoss,
"ce": CrossEntropyLoss,
} | 2,694 | 35.418919 | 118 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/Retriever/utils.py | # Adapted from Tevatron (https://github.com/texttron/tevatron)
import csv
import json
import warnings
from dataclasses import dataclass
from typing import Dict, List
import datasets
import torch
from transformers import PreTrainedTokenizer
try:
from opendelta import BitFitModel, AdapterModel, PrefixModel, LoraModel
_opendelta_available = True
except ModuleNotFoundError:
_opendelta_available = False
@dataclass
class SimpleTrainPreProcessor:
query_file: str
collection_file: str
tokenizer: PreTrainedTokenizer
doc_max_len: int = 128
query_max_len: int = 32
columns = ['text_id', 'title', 'text']
title_field = 'title'
text_field = 'text'
query_field = 'text'
doc_template: str = None
query_template: str = None
allow_not_found: bool = False
def __post_init__(self):
self.queries = self.read_queries(self.query_file)
self.collection = datasets.load_dataset(
'csv',
data_files=self.collection_file,
column_names=self.columns,
delimiter='\t',
)['train']
@staticmethod
def read_queries(queries):
qmap = {}
if queries[-3:] == "csv":
with open(queries) as f:
reader = csv.DictReader(f, fieldnames=["qid", "qry"], delimiter="\t")
for row in reader:
qid = row.pop("qid")
qry = row.pop("qry")
qmap[qid] = qry
else:
with open(queries) as f:
for l in f:
qid, qry = l.strip().split('\t')
qmap[qid] = qry
return qmap
@staticmethod
def read_qrel(relevance_file):
qrel = {}
with open(relevance_file, encoding='utf8') as f:
tsvreader = csv.reader(f, delimiter="\t")
for [topicid, _, docid, rel] in tsvreader:
assert rel == "1"
if topicid in qrel:
qrel[topicid].append(docid)
else:
qrel[topicid] = [docid]
return qrel
def get_query(self, q):
if self.query_template is None:
query = self.queries[q]
else:
query = fill_template(self.query_template, data={self.query_field: self.queries[q]}, allow_not_found=self.allow_not_found)
query_encoded = self.tokenizer.encode(
query,
add_special_tokens=False,
max_length=self.query_max_len,
truncation=True
)
return query_encoded
def get_passage(self, p):
entry = self.collection[int(p)]
title = entry[self.title_field]
title = "" if title is None else title
body = entry[self.text_field]
if self.doc_template is None:
content = title + self.tokenizer.sep_token + body
else:
content = fill_template(self.doc_template, data=entry, allow_not_found=self.allow_not_found)
passage_encoded = self.tokenizer.encode(
content,
add_special_tokens=False,
max_length=self.doc_max_len,
truncation=True
)
return passage_encoded
def process_one(self, train):
q, pp, nn = train
train_example = {
'query': self.get_query(q),
'positives': [self.get_passage(p) for p in pp],
'negatives': [self.get_passage(n) for n in nn],
}
return json.dumps(train_example)
@dataclass
class SimpleCollectionPreProcessor:
tokenizer: PreTrainedTokenizer
separator: str = '\t'
max_length: int = 128
def process_line(self, line: str):
xx = line.strip().split(self.separator)
text_id, text = xx[0], xx[1:]
text_encoded = self.tokenizer.encode(
self.tokenizer.sep_token.join(text),
add_special_tokens=False,
max_length=self.max_length,
truncation=True
)
encoded = {
'text_id': text_id,
'text': text_encoded
}
return json.dumps(encoded)
def save_as_trec(rank_result: Dict[str, Dict[str, float]], output_path: str, run_id: str = "OpenMatch"):
"""
Save the rank result as TREC format:
<query_id> Q0 <doc_id> <rank> <score> <run_id>
"""
with open(output_path, "w") as f:
for qid in rank_result:
# sort the results by score
sorted_results = sorted(rank_result[qid].items(), key=lambda x: x[1], reverse=True)
for i, (doc_id, score) in enumerate(sorted_results):
f.write("{} Q0 {} {} {} {}\n".format(qid, doc_id, i + 1, score, run_id))
def load_from_trec(input_path: str, as_list: bool = False, max_len_per_q: int = None):
"""
Load the rank result from TREC format:
<query_id> Q0 <doc_id> <rank> <score> <run_id> or
<query_id> <doc_id> <score>
"""
rank_result = {}
cnt = 0
with open(input_path, "r") as f:
for line in f:
content = line.strip().split()
if len(content) == 6:
qid, _, doc_id, _, score, _ = content
elif len(content) == 3:
qid, doc_id, score = content
else:
raise ValueError("Invalid run format")
if not as_list:
if qid not in rank_result:
rank_result[qid] = {}
cnt = 0
if max_len_per_q is None or cnt < max_len_per_q:
rank_result[qid][doc_id] = float(score)
else:
if qid not in rank_result:
rank_result[qid] = []
cnt = 0
if max_len_per_q is None or cnt < max_len_per_q:
rank_result[qid].append((doc_id, float(score)))
cnt += 1
return rank_result
def find_all_markers(template: str):
"""
Find all markers' names (quoted in "<>") in a template.
"""
markers = []
start = 0
while True:
start = template.find("<", start)
if start == -1:
break
end = template.find(">", start)
if end == -1:
break
markers.append(template[start + 1:end])
start = end + 1
return markers
def fill_template(template: str, data: Dict, markers: List[str] = None, allow_not_found: bool = False):
"""
Fill a template with data.
"""
if markers is None:
markers = find_all_markers(template)
for marker in markers:
marker_hierarchy = marker.split(".")
found = True
content = data
for marker_level in marker_hierarchy:
content = content.get(marker_level, None)
if content is None:
found = False
break
if not found:
if allow_not_found:
warnings.warn("Marker '{}' not found in data. Replacing it with an empty string.".format(marker), RuntimeWarning)
content = ""
else:
raise ValueError("Cannot find the marker '{}' in the data".format(marker))
template = template.replace("<{}>".format(marker), str(content))
return template
def merge_retrieval_results_by_score(results: List[Dict[str, Dict[str, float]]], topk: int = 100):
"""
Merge retrieval results from multiple partitions of document embeddings and keep topk.
"""
merged_results = {}
for result in results:
for qid in result:
if qid not in merged_results:
merged_results[qid] = {}
for doc_id in result[qid]:
if doc_id not in merged_results[qid]:
merged_results[qid][doc_id] = result[qid][doc_id]
for qid in merged_results:
merged_results[qid] = {k: v for k, v in sorted(merged_results[qid].items(), key=lambda x: x[1], reverse=True)[:topk]}
return merged_results
# Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(token_embeddings, attention_mask):
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
def get_delta_model_class(model_type):
if not _opendelta_available:
raise ValueError(
'OpenDelta package not available. You can obtain it from https://github.com/thunlp/OpenDelta.')
delta_models = {
'bitfit': BitFitModel,
'adapter': AdapterModel,
'prefix': PrefixModel,
'lora': LoraModel
}
return delta_models[model_type] | 8,668 | 32.087786 | 134 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/Retriever/trainer/dense_trainer.py | # Adapted from Tevatron (https://github.com/texttron/tevatron)
import logging
import os
from itertools import repeat
from typing import Any, Dict, List, Optional, Tuple, Union
import datasets
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader
from transformers.file_utils import is_datasets_available
from transformers.trainer import Trainer, TRAINING_ARGS_NAME
from transformers.trainer_pt_utils import IterableDatasetShard
from ..loss import DistributedContrastiveLoss, SimpleContrastiveLoss
logger = logging.getLogger(__name__)
try:
from grad_cache import GradCache
_grad_cache_available = True
except ModuleNotFoundError:
_grad_cache_available = False
class DRTrainer(Trainer):
def __init__(self, delta_model=None, *args, **kwargs):
super(DRTrainer, self).__init__(*args, **kwargs)
self.delta_model = delta_model
self._dist_loss_scale_factor = dist.get_world_size() if self.args.negatives_x_device else 1
def _save(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", output_dir)
self.model.save(output_dir)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
if self.delta_model:
logger.info("Saving delta model to %s", output_dir + "/delta_model")
self.delta_model.save_finetuned(output_dir + "/delta_model")
def _prepare_inputs(
self,
inputs: Tuple[Dict[str, Union[torch.Tensor, Any]], ...]
) -> List[Dict[str, Union[torch.Tensor, Any]]]:
prepared = []
for x in inputs:
if isinstance(x, torch.Tensor):
prepared.append(x.to(self.args.device))
else:
prepared.append(super()._prepare_inputs(x))
return prepared
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training [`~torch.utils.data.DataLoader`].
Will use no sampler if `self.train_dataset` does not implement `__len__`, a random sampler (adapted to
distributed training if necessary) otherwise.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_dataset = self.train_dataset
if is_datasets_available() and isinstance(train_dataset, datasets.Dataset):
train_dataset = self._remove_unused_columns(train_dataset, description="training")
if isinstance(train_dataset, torch.utils.data.IterableDataset):
if self.args.world_size > 1:
train_dataset = IterableDatasetShard(
train_dataset,
batch_size=self.args.train_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
train_dataset,
batch_size=self.args.per_device_train_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
train_sampler = self._get_train_sampler()
return DataLoader(
train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def compute_loss(self, model, inputs, return_outputs=False):
query, passage = inputs
outputs = model(query=query, passage=passage)
return (outputs.loss, outputs) if return_outputs else outputs.loss
def training_step(self, *args):
return super(DRTrainer, self).training_step(*args) / self._dist_loss_scale_factor
def split_dense_inputs(model_input: dict, chunk_size: int):
assert len(model_input) == 1
arg_key = list(model_input.keys())[0]
arg_val = model_input[arg_key]
keys = list(arg_val.keys())
chunked_tensors = [arg_val[k].split(chunk_size, dim=0) for k in keys]
chunked_arg_val = [dict(zip(kk, tt)) for kk, tt in zip(repeat(keys), zip(*chunked_tensors))]
return [{arg_key: c} for c in chunked_arg_val]
def get_dense_rep(x):
if x.q_reps is None:
return x.p_reps
else:
return x.q_reps
class GCDenseTrainer(DRTrainer):
def __init__(self, *args, **kwargs):
logger.info('Initializing Gradient Cache Trainer')
if not _grad_cache_available:
raise ValueError(
'Grad Cache package not available. You can obtain it from https://github.com/luyug/GradCache.')
super(GCDenseTrainer, self).__init__(*args, **kwargs)
loss_fn_cls = DistributedContrastiveLoss if self.args.negatives_x_device else SimpleContrastiveLoss
loss_fn = loss_fn_cls()
self.gc = GradCache(
models=[self.model, self.model],
chunk_sizes=[self.args.gc_q_chunk_size, self.args.gc_p_chunk_size],
loss_fn=loss_fn,
split_input_fn=split_dense_inputs,
get_rep_fn=get_dense_rep,
fp16=self.args.fp16,
scaler=self.scaler
)
def training_step(self, model, inputs) -> torch.Tensor:
model.train()
queries, passages = self._prepare_inputs(inputs)
queries, passages = {'query': queries}, {'passage': passages}
_distributed = self.args.local_rank > -1
self.gc.models = [model, model]
loss = self.gc(queries, passages, no_sync_except_last=_distributed)
return loss / self._dist_loss_scale_factor
| 6,280 | 36.837349 | 111 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/Retriever/trainer/reranker_trainer.py | # Adapted from Tevatron (https://github.com/texttron/tevatron)
import logging
import os
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from transformers.trainer import Trainer
from transformers.trainer_pt_utils import nested_detach
logger = logging.getLogger(__name__)
class RRTrainer(Trainer):
def __init__(self, *args, **kwargs):
super(RRTrainer, self).__init__(*args, **kwargs)
def _save(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", output_dir)
self.model.save(output_dir)
def _prepare_inputs(
self,
inputs: Tuple[Dict[str, Union[torch.Tensor, Any]], ...]
) -> List[Dict[str, Union[torch.Tensor, Any]]]:
prepared = []
for x in inputs:
if isinstance(x, torch.Tensor):
prepared.append(x.to(self.args.device))
else:
prepared.append(super()._prepare_inputs(x))
return prepared
def prediction_step(
self,
model: nn.Module,
inputs,
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
with torch.no_grad():
with self.autocast_smart_context_manager():
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return (loss, logits, None)
def compute_loss(self, model, inputs, return_outputs=False):
pos_pairs, neg_pairs = inputs
outputs = model(pos_pairs=pos_pairs, neg_pairs=neg_pairs)
return (outputs.loss, outputs) if return_outputs else outputs.loss
| 2,539 | 32.866667 | 96 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/Retriever/dataset/inference_dataset.py | # Adapted from Tevatron (https://github.com/texttron/tevatron)
import os
from functools import lru_cache
from typing import List, Union, Callable
from datasets import load_dataset
from torch.utils.data import Dataset, IterableDataset
from transformers import PreTrainedTokenizer
from ..arguments import DataArguments
from ..utils import fill_template, find_all_markers
def get_idx(obj):
example_id = obj.get("_id", None) or obj.get("id", None) or obj.get("text_id", None)
example_id = str(example_id) if example_id is not None else None
return example_id
class InferenceDataset():
def __init__(
self,
tokenizer: PreTrainedTokenizer,
data_args: DataArguments,
data_files: Union[str, List[str]],
is_query: bool = False,
full_tokenization: bool = True,
mode: str = "processed",
batch_size: int = 1,
num_processes: int = 1,
process_index: int = 0,
filter_fn: Callable = lambda x: True,
cache_dir: str = None
):
self.cache_dir = cache_dir
self.is_query = is_query
self.data_files = data_files
self.tokenizer = tokenizer
self.max_len = data_args.q_max_len if self.is_query else data_args.p_max_len
self.template = data_args.query_template if self.is_query else data_args.doc_template
self.all_markers = find_all_markers(self.template) if data_args.all_markers is None else data_args.all_markers.split(",")
self.full_tokenization = full_tokenization
modes = ["raw", "dict_processed", "processed"]
if mode not in modes:
raise ValueError(f"mode must be one of {modes}")
self.mode = mode
self.batch_size = batch_size
self.num_processes = num_processes
self.process_index = process_index
self.filter_fn = filter_fn
self._prepare_data(data_args)
def _prepare_data(self, data_args):
raise NotImplementedError
@classmethod
def load(
cls,
tokenizer: PreTrainedTokenizer,
data_args: DataArguments,
data_files: Union[str, List[str]] = None,
is_query: bool = False,
full_tokenization: bool = True,
mode: str = "processed",
stream: bool = True,
batch_size: int = 1,
num_processes: int = 1,
process_index: int = 0,
filter_fn: Callable = lambda x: True,
cache_dir: str = None
):
if data_files is None:
data_files = [data_args.query_path] if is_query else [data_args.corpus_path]
else:
data_files = [data_files] if isinstance(data_files, str) else data_files
ext = os.path.splitext(data_files[0])[1]
ext_to_cls = {
".json": StreamJsonlDataset if stream else MappingJsonlDataset,
".csv": StreamTsvDataset if stream else MappingTsvDataset,
".jsonl": StreamJsonlDataset if stream else MappingJsonlDataset,
".tsv": StreamTsvDataset if stream else MappingTsvDataset,
".txt": StreamTsvDataset if stream else MappingTsvDataset,
}
cls_ = ext_to_cls.get(ext, None)
if cls_ is None:
raise ValueError("Unsupported dataset file extension {}".format(ext))
return cls_(
tokenizer=tokenizer,
data_args=data_args,
data_files=data_files,
is_query=is_query,
full_tokenization=full_tokenization,
mode=mode,
batch_size=batch_size,
num_processes=num_processes,
process_index=process_index,
filter_fn=filter_fn,
cache_dir=cache_dir
)
def _tokenize(self, example: str):
return self.tokenizer(
example,
add_special_tokens=self.full_tokenization,
padding='max_length' if self.full_tokenization else False,
truncation=True,
max_length=self.max_len,
return_attention_mask=self.full_tokenization,
return_token_type_ids=False
)
def process_one(self, example):
if self.mode == "raw":
return example
elif self.mode == "dict_processed":
example_id = get_idx(example)
tokenized = {}
for marker in self.all_markers:
tokenized[marker] = dict(self._tokenize(example[marker])) if (marker in example and example[marker] is not None) else None
return {"text_id": example_id, **tokenized}
else:
example_id = get_idx(example)
full_text = fill_template(self.template, example, self.all_markers, allow_not_found=True)
tokenized = self._tokenize(full_text)
return {"text_id": example_id, **tokenized}
class StreamInferenceDataset(InferenceDataset, IterableDataset):
def __init__(
self,
tokenizer: PreTrainedTokenizer,
data_args: DataArguments,
data_files: Union[str, List[str]],
**kwargs
):
super(StreamInferenceDataset, self).__init__(tokenizer, data_args, data_files, **kwargs)
def __iter__(self):
real_batch_size = self.batch_size * self.num_processes
process_slice = range(self.process_index * self.batch_size, (self.process_index + 1) * self.batch_size)
current_batch = []
for element in self.dataset:
current_batch.append(element)
# Wait to have a full batch before yielding elements.
if len(current_batch) == real_batch_size:
for i in process_slice:
yield self.process_one(current_batch[i])
current_batch = []
if len(current_batch) > 0:
for i in process_slice:
if i < len(current_batch):
yield self.process_one(current_batch[i])
class StreamJsonlDataset(StreamInferenceDataset):
def __init__(
self,
tokenizer: PreTrainedTokenizer,
data_args: DataArguments,
data_files: Union[str, List[str]],
**kwargs
):
super(StreamJsonlDataset, self).__init__(tokenizer, data_args, data_files, **kwargs)
def _prepare_data(self, data_args):
self.dataset = load_dataset(
"json",
data_files=self.data_files,
streaming=True,
cache_dir=self.cache_dir
)["train"].filter(self.filter_fn)
sample = list(self.dataset.take(1))[0]
self.all_columns = sample.keys()
class StreamTsvDataset(StreamInferenceDataset):
def __init__(
self,
tokenizer: PreTrainedTokenizer,
data_args: DataArguments,
data_files: Union[str, List[str]],
**kwargs
):
super(StreamTsvDataset, self).__init__(tokenizer, data_args, data_files, **kwargs)
def _prepare_data(self, data_args):
self.all_columns = data_args.query_column_names if self.is_query else data_args.doc_column_names
if self.all_columns is not None:
self.all_columns = self.all_columns.split(',')
self.dataset = load_dataset(
"csv",
data_files=self.data_files,
streaming=True,
column_names=self.all_columns,
delimiter='\t',
cache_dir=self.cache_dir
)["train"].filter(self.filter_fn)
class MappingInferenceDataset(InferenceDataset, Dataset):
def __init__(
self,
tokenizer: PreTrainedTokenizer,
data_args: DataArguments,
data_files: Union[str, List[str]],
**kwargs
):
super(MappingInferenceDataset, self).__init__(tokenizer, data_args, data_files, **kwargs)
@lru_cache(maxsize=None)
def __getitem__(self, index):
return self.process_one(self.dataset[index])
def get_raw(self, index):
return self.dataset[index]
def __len__(self):
return len(self.dataset)
class MappingJsonlDataset(MappingInferenceDataset):
def __init__(
self,
tokenizer: PreTrainedTokenizer,
data_args: DataArguments,
data_files: Union[str, List[str]],
**kwargs
):
super(MappingJsonlDataset, self).__init__(tokenizer, data_args, data_files, **kwargs)
def _prepare_data(self, data_args):
hf_dataset = load_dataset(
"json",
data_files=self.data_files,
streaming=True,
cache_dir=self.cache_dir
)["train"].filter(self.filter_fn)
sample = list(self.dataset.take(1))[0]
self.all_columns = sample.keys()
self.dataset = {}
for item in hf_dataset:
self.dataset[get_idx(item)] = item
class MappingTsvDataset(MappingInferenceDataset):
def __init__(
self,
tokenizer: PreTrainedTokenizer,
data_args: DataArguments,
data_files: Union[str, List[str]],
**kwargs
):
super(MappingTsvDataset, self).__init__(tokenizer, data_args, data_files, **kwargs)
def _prepare_data(self, data_args):
self.all_columns = data_args.query_column_names if self.is_query else data_args.doc_column_names
if self.all_columns is not None:
self.all_columns = self.all_columns.split(',')
hf_dataset = load_dataset(
"csv",
data_files=self.data_files,
streaming=True,
column_names=self.all_columns,
delimiter='\t',
cache_dir=self.cache_dir
)["train"].filter(self.filter_fn)
self.dataset = {}
for item in hf_dataset:
self.dataset[get_idx(item)] = item
| 9,674 | 32.947368 | 138 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/Retriever/dataset/train_dataset.py | # Adapted from Tevatron (https://github.com/texttron/tevatron)
import glob
import logging
import os
import random
from typing import Callable, Dict, List, Union
from datasets import load_dataset
from torch.utils.data import Dataset, IterableDataset
from transformers import BatchEncoding, PreTrainedTokenizer
from ..arguments import DataArguments, DRPretrainingDataArguments
from ..data_augmentation_strategy import Cropping, NullStrategy, SequentialStrategies
from ..trainer import DRTrainer
logger = logging.getLogger(__name__)
class TrainDatasetBase:
"""
Abstract base class for all train datasets in Openmatch.\n
This implants arguments and data preparation, but should be mostly used for identifying an OpenMatch Train Dataset.\n
All future dataset ABCs would subclass this and `(Iterable)Dataset`.
"""
def __init__(
self,
tokenizer: PreTrainedTokenizer,
data_args: DataArguments,
trainer: DRTrainer = None,
is_eval: bool = False,
shuffle_seed: int = None,
cache_dir: str = None,
) -> None:
self.tokenizer = tokenizer
self.data_args = data_args
self.q_max_len = data_args.q_max_len
self.p_max_len = data_args.p_max_len
self.trainer = trainer
self.is_eval = is_eval
self._prepare_data(data_args, shuffle_seed, cache_dir)
def _prepare_data(self, data_args, shuffle_seed, cache_dir):
if not self.is_eval:
self.data_files = (
[data_args.train_path]
if data_args.train_dir is None
else glob.glob(os.path.join(data_args.train_dir, "*.jsonl"))
)
else:
self.data_files = [data_args.eval_path]
def get_process_fn(self, epoch, hashed_seed):
raise NotImplementedError
class StreamTrainDatasetMixin(IterableDataset):
def _prepare_data(self, data_args, shuffle_seed, cache_dir):
super()._prepare_data(data_args, shuffle_seed, cache_dir)
self.dataset = load_dataset(
"json", data_files=self.data_files, streaming=True, cache_dir=cache_dir
)["train"]
self.dataset = (
self.dataset.shuffle(seed=shuffle_seed, buffer_size=10_000)
if shuffle_seed is not None
else self.dataset
)
sample = list(self.dataset.take(1))[0]
self.all_columns = sample.keys()
def __len__(self):
concat_filenames = " ".join(self.data_files)
count = 0
with os.popen("wc -l {}".format(concat_filenames)) as f:
for line in f:
lc, filename = line.strip().split()
lc = int(lc)
if filename != "total":
count += lc
return count
def __iter__(self):
if not self.is_eval:
epoch = int(self.trainer.state.epoch)
_hashed_seed = hash(self.trainer.args.seed)
self.dataset.set_epoch(epoch)
return iter(
self.dataset.map(
self.get_process_fn(epoch, _hashed_seed),
remove_columns=self.all_columns,
)
)
return iter(
self.dataset.map(
self.get_process_fn(0, None), remove_columns=self.all_columns
)
)
class MappingTrainDatasetMixin(Dataset):
def _prepare_data(self, data_args, shuffle_seed, cache_dir):
super()._prepare_data(data_args, shuffle_seed, cache_dir)
self.dataset = load_dataset(
"json", data_files=self.data_files, streaming=False, cache_dir=cache_dir
)["train"]
sample = self.dataset[0]
self.all_columns = sample.keys()
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
group = self.dataset[index]
if not self.is_eval:
epoch = int(self.trainer.state.epoch)
_hashed_seed = hash(index + self.trainer.args.seed)
return self.get_process_fn(epoch, _hashed_seed)(group)
return self.get_process_fn(0, None)(group)
class DRTrainDataset(TrainDatasetBase):
def create_one_example(
self, text_encoding: List[int], is_query=False
) -> BatchEncoding:
item = self.tokenizer.encode_plus(
text_encoding,
truncation="only_first",
max_length=self.data_args.q_max_len
if is_query
else self.data_args.p_max_len,
padding=False,
return_attention_mask=False,
return_token_type_ids=False,
)
return item
def get_process_fn(self, epoch, hashed_seed):
def process_fn(example):
qry = example["query"]
encoded_query = self.create_one_example(qry, is_query=True)
encoded_passages = []
group_positives = example["positives"]
group_negatives = example["negatives"]
if not self.data_args.use_all_positive_passages:
if self.data_args.positive_passage_no_shuffle or hashed_seed is None:
pos_psg = group_positives[0]
else:
pos_psg = group_positives[
(hashed_seed + epoch) % len(group_positives)
]
encoded_passages.append(self.create_one_example(pos_psg))
else:
for pos_psg in group_positives:
encoded_passages.append(self.create_one_example(pos_psg))
negative_size = self.data_args.train_n_passages - 1
if len(group_negatives) < negative_size:
if hashed_seed is not None:
negs = random.choices(group_negatives, k=negative_size)
else:
negs = [x for x in group_negatives]
negs = negs * 2
negs = negs[:negative_size]
elif self.data_args.train_n_passages == 1:
negs = []
elif self.data_args.negative_passage_no_shuffle:
negs = group_negatives[:negative_size]
else:
_offset = epoch * negative_size % len(group_negatives)
negs = [x for x in group_negatives]
if hashed_seed is not None:
random.Random(hashed_seed).shuffle(negs)
negs = negs * 2
negs = negs[_offset : _offset + negative_size]
for neg_psg in negs:
encoded_passages.append(self.create_one_example(neg_psg))
if not self.data_args.use_all_positive_passages:
assert len(encoded_passages) == self.data_args.train_n_passages
else:
assert (
len(encoded_passages)
== self.data_args.train_n_passages + len(group_positives) - 1
)
return {
"query_": encoded_query,
"passages": encoded_passages,
} # Avoid name conflict with query in the original dataset
return process_fn
class StreamDRTrainDataset(StreamTrainDatasetMixin, DRTrainDataset):
pass
class MappingDRTrainDataset(MappingTrainDatasetMixin, DRTrainDataset):
pass
class DRPretrainDataset(TrainDatasetBase):
def __init__(
self,
tokenizer: PreTrainedTokenizer,
data_args: DRPretrainingDataArguments,
trainer: DRTrainer = None,
is_eval: bool = False,
shuffle_seed: int = None,
cache_dir: str = None,
) -> None:
super(DRPretrainDataset, self).__init__(
tokenizer, data_args, trainer, is_eval, shuffle_seed, cache_dir
)
pretrain_strategies_str = (
data_args.pretrain_strategies.split(",")
if data_args.pretrain_strategies is not None
else []
)
strategies = []
for strategy_str in pretrain_strategies_str:
if strategy_str == "null":
strategies.append(NullStrategy())
logger.info("Adding NullStrategy")
elif strategy_str == "crop":
strategies.append(
Cropping(
ratio_min=data_args.cropping_ratio_min,
ratio_max=data_args.cropping_ratio_max,
)
)
logger.info(
"Adding Cropping, ratio_min={}, ratio_max={}".format(
data_args.cropping_ratio_min, data_args.cropping_ratio_max
)
)
else:
raise ValueError(
"Unknown pretraining strategy: {}".format(strategy_str)
)
self.apply_strategy = SequentialStrategies(*strategies)
def create_one_example(
self, text_encoding: List[int], is_query=False
) -> BatchEncoding:
text_encoding = self.apply_strategy(text_encoding)
item = self.tokenizer.encode_plus(
text_encoding,
truncation="only_first",
max_length=self.data_args.q_max_len
if is_query
else self.data_args.p_max_len,
padding=False,
return_attention_mask=False,
return_token_type_ids=False,
)
return item
def get_process_fn(self, epoch, hashed_seed):
def process_fn(example):
content = example[self.data_args.pretrain_target_field]
encoded_query = self.create_one_example(content, is_query=True)
encoded_passages = [self.create_one_example(content)]
return {"query": encoded_query, "passages": encoded_passages}
return process_fn
class StreamDRPretrainDataset(StreamTrainDatasetMixin, DRPretrainDataset):
pass
class MappingDRPretrainDataset(MappingTrainDatasetMixin, DRPretrainDataset):
pass
class RRTrainDataset(TrainDatasetBase):
def create_one_example(self, qry_encoding, psg_encoding) -> BatchEncoding:
item = self.tokenizer.encode_plus(
qry_encoding + psg_encoding,
truncation="longest_first",
max_length=self.data_args.q_max_len + self.data_args.p_max_len + 2,
padding=False,
return_attention_mask=False,
return_token_type_ids=False,
)
return item
def get_process_fn(self, epoch, hashed_seed):
def process_fn(example):
qry = example["query"]
group_positives = example["positives"]
group_negatives = example["negatives"]
if self.data_args.positive_passage_no_shuffle or hashed_seed is None:
pos_psg = group_positives[0]
else:
pos_psg = group_positives[(hashed_seed + epoch) % len(group_positives)]
encoded_pos_pair = self.create_one_example(qry, pos_psg)
if hashed_seed is None:
neg_psg = group_negatives[0]
else:
neg_psg = group_negatives[(hashed_seed + epoch) % len(group_negatives)]
encoded_neg_pair = self.create_one_example(qry, neg_psg)
return {"pos_pair": encoded_pos_pair, "neg_pair": encoded_neg_pair}
return process_fn
class StreamRRTrainDataset(StreamTrainDatasetMixin, RRTrainDataset):
pass
class MappingRRTrainDataset(MappingTrainDatasetMixin, RRTrainDataset):
pass
| 11,428 | 34.604361 | 121 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/Retriever/modeling/reranking_model.py | import copy
import json
import logging
import os
from dataclasses import dataclass
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from transformers import (AutoModel, BatchEncoding, PreTrainedModel,
T5EncoderModel, PreTrainedTokenizer, AutoConfig, T5ForConditionalGeneration)
from transformers.modeling_outputs import ModelOutput
from ..arguments import DataArguments
from ..arguments import RRTrainingArguments as TrainingArguments
from ..arguments import ModelArguments
from ..loss import rr_loss_functions, CrossEntropyLoss
from ..utils import mean_pooling
from .linear import LinearHead
logger = logging.getLogger(__name__)
@dataclass
class RROutput(ModelOutput):
pos_pair_scores: Tensor = None
neg_pair_scores: Tensor = None
loss: Tensor = None
class RRModel(nn.Module):
def __init__(
self,
lm: PreTrainedModel,
head: nn.Module,
feature: str = "last_hidden_state",
pooling: str = "first",
pos_token: str = None,
neg_token: str = None,
tokenizer: PreTrainedTokenizer = None,
model_args: ModelArguments = None,
data_args: DataArguments = None,
train_args: TrainingArguments = None,
):
super().__init__()
self.lm = lm
self.head = head
self.feature = feature
self.pooling = pooling
self.pos_token = pos_token
self.neg_token = neg_token
self.tokenizer = tokenizer
self.pos_token_id = tokenizer.encode(self.pos_token, add_special_tokens=False)[0] if self.pos_token else None
self.neg_token_id = tokenizer.encode(self.neg_token, add_special_tokens=False)[0] if self.neg_token else None
self.model_args = model_args
self.data_args = data_args
self.train_args = train_args
if train_args is not None:
self.loss_fn_str = train_args.loss_fn
self.loss_fn = rr_loss_functions[self.loss_fn_str]()
self.margin = train_args.margin
if "T5" in type(self.lm).__name__ and not self.model_args.encoder_only:
self.loss_fn_str = "ce"
self.loss_fn = CrossEntropyLoss()
def _get_config_dict(self):
config = {
"plm_backbone": {
"type": type(self.lm).__name__,
"feature": self.feature,
},
"pooling": self.pooling,
"pos_token": self.pos_token,
"neg_token": self.neg_token,
}
return config
def forward(
self,
pos_pairs: Dict[str, Tensor] = None,
neg_pairs: Dict[str, Tensor] = None,
):
pos_pair_scores = self.encode(pos_pairs)
neg_pair_scores = self.encode(neg_pairs)
if self.loss_fn_str in ["mr", "smr"]:
loss = self.loss_fn(pos_pair_scores, neg_pair_scores, margin=self.margin)
else:
loss = self.loss_fn(pos_pair_scores, neg_pair_scores)
return RROutput(
loss=loss,
pos_pair_scores=pos_pair_scores,
neg_pair_scores=neg_pair_scores,
)
def encode(self, items):
if items is None:
return None, None
items = BatchEncoding(items)
if "T5" in type(self.lm).__name__ and not self.model_args.encoder_only:
decoder_input_ids = torch.zeros((items.input_ids.shape[0], 1), dtype=torch.long).to(items.input_ids.device)
items_out = self.lm(**items, decoder_input_ids=decoder_input_ids, return_dict=True)
logits = items_out.logits
scores = logits[:, 0, [self.neg_token_id, self.pos_token_id]] # batch_size * 2
else:
items_out = self.lm(**items, return_dict=True)
hidden = getattr(items_out, self.feature)
if self.pooling == "first":
reps = hidden[:, 0, :]
elif self.pooling == "mean":
reps = mean_pooling(hidden, items.attention_mask)
else:
raise ValueError("Unknown pooling type: {}".format(self.pooling))
scores = self.head(reps) # batch_size * 1
return scores
@classmethod
def build(
cls,
model_args: ModelArguments,
data_args: DataArguments = None,
train_args: TrainingArguments = None,
tokenizer: PreTrainedTokenizer = None,
**hf_kwargs,
):
# load local
config = None
model_class = None
hf_config = AutoConfig.from_pretrained(model_args.model_name_or_path, **hf_kwargs)
if model_args.encoder_only:
model_class = T5EncoderModel
elif "T5" in hf_config.architectures[0]: # Pre-trained T5 model
model_class = T5ForConditionalGeneration
else:
model_class = AutoModel
if os.path.exists(os.path.join(model_args.model_name_or_path, "openmatch_config.json")):
with open(os.path.join(model_args.model_name_or_path, "openmatch_config.json")) as f:
config = json.load(f)
if os.path.isdir(model_args.model_name_or_path) and config is not None: # not a raw Huggingface model
logger.info(f'loading reranking model weight from {model_args.model_name_or_path}')
lm = model_class.from_pretrained(
model_args.model_name_or_path,
**hf_kwargs
)
head = LinearHead.load(ckpt_dir=model_args.model_name_or_path)
else: # a Huggingface model
lm = model_class.from_pretrained(model_args.model_name_or_path, **hf_kwargs)
head = LinearHead(model_args.projection_in_dim, 1)
model = cls(
lm=lm,
head=head,
feature=model_args.feature if config is None else config["plm_backbone"]["feature"],
pooling=model_args.pooling if config is None else config["pooling"],
pos_token=model_args.pos_token if config is None else config["pos_token"],
neg_token=model_args.neg_token if config is None else config["neg_token"],
tokenizer=tokenizer,
model_args=model_args,
data_args=data_args,
train_args=train_args,
)
return model
def save(self, output_dir: str):
self.lm.save_pretrained(output_dir)
self.head.save(output_dir)
with open(os.path.join(output_dir, 'openmatch_config.json'), 'w') as f:
json.dump(self._get_config_dict(), f, indent=4)
| 6,685 | 35.736264 | 119 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/Retriever/modeling/linear.py | import logging
import os
import json
import torch
import torch.nn as nn
from torch import Tensor
logger = logging.getLogger(__name__)
class LinearHead(nn.Module):
def __init__(
self,
input_dim: int = 768,
output_dim: int = 768,
):
super(LinearHead, self).__init__()
self.linear = nn.Linear(input_dim, output_dim, bias=False)
self.config = {'input_dim': input_dim, 'output_dim': output_dim}
def forward(self, rep: Tensor = None):
return self.linear(rep)
@classmethod
def load(cls, ckpt_dir: str):
logger.info(f'Loading linear head from {ckpt_dir}')
model_path = os.path.join(ckpt_dir, 'linear.pt')
config_path = os.path.join(ckpt_dir, 'head_config.json')
with open(config_path, 'r') as f:
config = json.load(f)
model = cls(**config)
model.load_state_dict(torch.load(model_path))
return model
def save(self, save_path):
torch.save(self.state_dict(), os.path.join(save_path, 'linear.pt'))
with open(os.path.join(save_path, 'head_config.json'), 'w') as f:
json.dump(self.config, f, indent=4) | 1,182 | 29.333333 | 75 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/Retriever/modeling/dense_retrieval_model.py | # Adapted from Tevatron (https://github.com/texttron/tevatron)
import copy
import importlib
import json
import logging
import os
from dataclasses import dataclass
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from transformers import (
AutoConfig,
AutoModel,
BatchEncoding,
PreTrainedModel,
T5EncoderModel,
)
from transformers.modeling_outputs import ModelOutput
from ..arguments import DataArguments
from ..arguments import DRTrainingArguments as TrainingArguments
from ..arguments import ModelArguments
from ..utils import mean_pooling
from .linear import LinearHead
logger = logging.getLogger(__name__)
@dataclass
class DROutput(ModelOutput):
q_reps: Tensor = None
p_reps: Tensor = None
loss: Tensor = None
scores: Tensor = None
class DRModel(nn.Module):
def __init__(
self,
lm_q: PreTrainedModel,
lm_p: PreTrainedModel,
tied: bool = True,
feature: str = "last_hidden_state",
pooling: str = "first",
head_q: nn.Module = None,
head_p: nn.Module = None,
normalize: bool = False,
model_args: ModelArguments = None,
data_args: DataArguments = None,
train_args: TrainingArguments = None,
):
super().__init__()
self.tied = tied
self.lm_q = lm_q
self.lm_p = lm_p
self.head_q = head_q
self.head_p = head_p
self.loss_fn = nn.CrossEntropyLoss(reduction="mean")
self.feature = feature
self.pooling = pooling
self.normalize = normalize
self.model_args = model_args
self.train_args = train_args
self.data_args = data_args
if train_args is not None and train_args.negatives_x_device:
if not dist.is_initialized():
raise ValueError(
"Distributed training has not been initialized for representation all gather."
)
self.process_rank = dist.get_rank()
self.world_size = dist.get_world_size()
def _get_config_dict(self):
config = {
"tied": self.tied,
"plm_backbone": {
"type": type(self.lm_q).__name__,
"feature": self.feature,
},
"pooling": self.pooling,
"linear_head": bool(self.head_q),
"normalize": self.normalize,
}
return config
def forward(
self,
query: Dict[str, Tensor] = None,
passage: Dict[str, Tensor] = None,
):
q_hidden, q_reps = self.encode_query(query)
p_hidden, p_reps = self.encode_passage(passage)
if q_reps is None or p_reps is None:
return DROutput(q_reps=q_reps, p_reps=p_reps)
# if self.training:
if self.train_args.negatives_x_device:
q_reps = self.dist_gather_tensor(q_reps)
p_reps = self.dist_gather_tensor(p_reps)
effective_bsz = (
self.train_args.per_device_train_batch_size * self.world_size
if self.train_args.negatives_x_device
else self.train_args.per_device_train_batch_size
)
scores = torch.matmul(q_reps, p_reps.transpose(0, 1))
# scores = torch.matmul(q_reps, p_reps.transpose(0, 1)) / 0.05 # contriever
if not self.data_args.use_all_positive_passages:
target = torch.arange(
scores.size(0), device=scores.device, dtype=torch.long
)
target = target * self.data_args.train_n_passages
loss = self.loss_fn(scores, target)
else:
batch_size = scores.size(0)
n_total_passages = int(scores.size(1) / scores.size(0))
n_positive_passages = n_total_passages - (
self.data_args.train_n_passages - 1
)
losses = None
num = 0
target = torch.arange(1, device=scores.device, dtype=torch.long)
for i in range(batch_size):
indices = [0]
positive_indices = [
i * n_total_passages + j for j in range(n_positive_passages)
]
for j in range(scores.size(1)):
if j not in positive_indices:
indices.append(j)
for j in range(n_positive_passages):
indices[0] = i * n_total_passages + j
now_scores = scores[i][indices].unsqueeze(0)
loss = self.loss_fn(now_scores, target)
losses = losses + loss if losses != None else loss
num += 1
loss = losses / num
if self.training and self.train_args.negatives_x_device:
loss = loss * self.world_size # counter average weight reduction
return DROutput(loss=loss, scores=scores, q_reps=q_reps, p_reps=p_reps)
def encode(self, items, model, head):
if items is None:
return None, None
items = BatchEncoding(items)
if "T5" in type(model).__name__ and not self.model_args.encoder_only:
decoder_input_ids = torch.zeros(
(items.input_ids.shape[0], 1), dtype=torch.long
).to(items.input_ids.device)
items_out = model(
**items, decoder_input_ids=decoder_input_ids, return_dict=True
)
hidden = items_out.last_hidden_state
reps = hidden[:, 0, :]
else:
items_out = model(**items, return_dict=True)
hidden = getattr(items_out, self.feature)
if self.pooling == "first":
reps = hidden[:, 0, :]
elif self.pooling == "mean":
reps = mean_pooling(hidden, items.attention_mask)
elif self.pooling == "no":
reps = hidden
else:
raise ValueError("Unknown pooling type: {}".format(self.pooling))
if head is not None:
reps = head(reps) # D * d
if self.normalize:
reps = F.normalize(reps, dim=1)
return hidden, reps
def encode_passage(self, psg):
return self.encode(psg, self.lm_p, self.head_p)
def encode_query(self, qry):
return self.encode(qry, self.lm_q, self.head_q)
@classmethod
def build(
cls,
model_args: ModelArguments,
data_args: DataArguments = None,
train_args: TrainingArguments = None,
**hf_kwargs,
):
# load local
config = None
head_q = head_p = None
if os.path.exists(
os.path.join(model_args.model_name_or_path, "openmatch_config.json")
):
with open(
os.path.join(model_args.model_name_or_path, "openmatch_config.json")
) as f:
config = json.load(f)
if (
os.path.isdir(model_args.model_name_or_path) and config is not None
): # an OpenMatch model
tied = config["tied"]
if tied:
logger.info(
f"loading query model weight from {model_args.model_name_or_path}"
)
model_name = config["plm_backbone"]["type"]
model_class = getattr(
importlib.import_module("transformers"), model_name
)
lm_q = lm_p = model_class.from_pretrained(
model_args.model_name_or_path, **hf_kwargs
)
if config["linear_head"]:
head_q = head_p = LinearHead.load(model_args.model_name_or_path)
else:
_qry_model_path = os.path.join(
model_args.model_name_or_path, "query_model"
)
_psg_model_path = os.path.join(
model_args.model_name_or_path, "passage_model"
)
_qry_head_path = os.path.join(
model_args.model_name_or_path, "query_head"
)
_psg_head_path = os.path.join(
model_args.model_name_or_path, "passage_head"
)
logger.info(f"loading query model weight from {_qry_model_path}")
model_name = config["plm_backbone"]["lm_q_type"]
model_class = getattr(
importlib.import_module("transformers"), model_name
)
if os.path.exists(os.path.join(_qry_model_path, "config.json")):
logger.info(f"loading query model config from {_qry_model_path}")
qry_model_config = AutoConfig.from_pretrained(_qry_model_path)
hf_kwargs["config"] = qry_model_config
lm_q = model_class.from_pretrained(_qry_model_path, **hf_kwargs)
logger.info(f"loading passage model weight from {_psg_model_path}")
model_name = config["plm_backbone"]["lm_p_type"]
model_class = getattr(
importlib.import_module("transformers"), model_name
)
if os.path.exists(os.path.join(_psg_model_path, "config.json")):
logger.info(f"loading passage model config from {_psg_model_path}")
psg_model_config = AutoConfig.from_pretrained(_psg_model_path)
hf_kwargs["config"] = psg_model_config
lm_p = model_class.from_pretrained(_psg_model_path, **hf_kwargs)
if config["linear_head"]:
head_q = LinearHead.load(_qry_head_path)
head_p = LinearHead.load(_psg_head_path)
else: # a Huggingface model
tied = not model_args.untie_encoder
model_class = T5EncoderModel if model_args.encoder_only else AutoModel
lm_q = model_class.from_pretrained(
model_args.model_name_or_path, **hf_kwargs
)
lm_p = copy.deepcopy(lm_q) if not tied else lm_q
if model_args.add_linear_head:
head_q = LinearHead(
model_args.projection_in_dim, model_args.projection_out_dim
)
head_p = copy.deepcopy(head_q) if not tied else head_q
model = cls(
lm_q=lm_q,
lm_p=lm_p,
tied=tied,
feature=model_args.feature
if config is None
else config["plm_backbone"]["feature"],
pooling=model_args.pooling if config is None else config["pooling"],
head_q=head_q,
head_p=head_p,
normalize=model_args.normalize if config is None else config["normalize"],
model_args=model_args,
data_args=data_args,
train_args=train_args,
)
return model
def save(self, output_dir: str):
if not self.tied:
os.makedirs(os.path.join(output_dir, "query_model"))
os.makedirs(os.path.join(output_dir, "passage_model"))
self.lm_q.save_pretrained(os.path.join(output_dir, "query_model"))
self.lm_p.save_pretrained(os.path.join(output_dir, "passage_model"))
if self.head_q is not None:
self.head_q.save(os.path.join(output_dir, "query_head"))
self.head_p.save(os.path.join(output_dir, "passage_head"))
else:
self.lm_q.save_pretrained(output_dir)
if self.head_q is not None:
self.head_q.save(output_dir)
with open(os.path.join(output_dir, "openmatch_config.json"), "w") as f:
json.dump(self._get_config_dict(), f, indent=4)
def dist_gather_tensor(self, t: Optional[torch.Tensor]):
if t is None:
return None
t = t.contiguous()
all_tensors = [torch.empty_like(t) for _ in range(self.world_size)]
dist.all_gather(all_tensors, t)
all_tensors[self.process_rank] = t
all_tensors = torch.cat(all_tensors, dim=0)
return all_tensors
class DRModelForInference(DRModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# self.eval()
@torch.no_grad()
def encode_passage(self, psg):
return super(DRModelForInference, self).encode_passage(psg)
@torch.no_grad()
def encode_query(self, qry):
return super(DRModelForInference, self).encode_query(qry)
def forward(
self,
query: Dict[str, Tensor] = None,
passage: Dict[str, Tensor] = None,
):
q_hidden, q_reps = self.encode_query(query)
p_hidden, p_reps = self.encode_passage(passage)
return DROutput(q_reps=q_reps, p_reps=p_reps)
| 12,826 | 35.440341 | 98 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/Retriever/retriever/reranker.py | import logging
import os
from contextlib import nullcontext
from typing import Dict
import torch
from torch.cuda import amp
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset, IterableDataset
from tqdm import tqdm
from transformers import PreTrainedTokenizer
from transformers.trainer_pt_utils import IterableDatasetShard
from ..arguments import InferenceArguments as EncodingArguments
from ..dataset import InferenceDataset, RRInferenceCollator
from ..modeling import RRModel
from ..utils import (load_from_trec, merge_retrieval_results_by_score,
save_as_trec)
logger = logging.getLogger(__name__)
def encode_pair(tokenizer, item1, item2, max_len_1=32, max_len_2=128):
return tokenizer.encode_plus(
item1 + item2,
truncation='longest_first',
padding='max_length',
max_length=max_len_1 + max_len_2 + 2,
)
def add_to_result_dict(result_dicts, qids, dids, scores):
for qid, did, score in zip(qids, dids, scores):
if qid not in result_dicts:
result_dicts[qid] = {}
result_dicts[qid][did] = float(score)
class RRPredictDataset(IterableDataset):
def __init__(
self,
tokenizer: PreTrainedTokenizer,
query_dataset: InferenceDataset,
corpus_dataset: InferenceDataset,
run: Dict[str, Dict[str, float]]
):
super(RRPredictDataset, self).__init__()
self.tokenizer = tokenizer
self.query_dataset = query_dataset
self.corpus_dataset = corpus_dataset
self.run = run
def __iter__(self):
def gen_q_d_pair():
for qid, did_and_scores in self.run.items():
for did, _ in did_and_scores.items():
yield {
"query_id": qid,
"doc_id": did,
**encode_pair(
self.tokenizer,
self.query_dataset[qid]["input_ids"],
self.corpus_dataset[did]["input_ids"],
self.query_dataset.max_len,
self.corpus_dataset.max_len
),
}
return gen_q_d_pair()
class Reranker:
def __init__(
self,
model: RRModel,
tokenizer: PreTrainedTokenizer,
corpus_dataset: Dataset,
args: EncodingArguments
):
logger.info("Initializing reranker")
self.model = model
self.tokenizer = tokenizer
self.corpus_dataset = corpus_dataset
self.args = args
self.model = model.to(self.args.device)
self.model.eval()
def rerank(self, query_dataset: InferenceDataset, run: Dict[str, Dict[str, float]]):
return_dict = {}
dataset = RRPredictDataset(self.tokenizer, query_dataset, self.corpus_dataset, run)
if self.args.world_size > 1:
dataset = IterableDatasetShard(
dataset,
batch_size=self.args.per_device_eval_batch_size,
drop_last=False,
num_processes=self.args.world_size,
process_index=self.args.process_index
)
dataloader = DataLoader(
dataset,
batch_size=self.args.eval_batch_size,
collate_fn=RRInferenceCollator(),
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
with torch.no_grad():
for qids, dids, batch in tqdm(dataloader, desc="Reranking", disable=self.args.local_process_index > 0):
with amp.autocast() if self.args.fp16 else nullcontext():
for k, v in batch.items():
batch[k] = v.to(self.args.device)
outputs = self.model.encode(batch)
if len(outputs.shape) == 2 and outputs.shape[1] == 2:
outputs = F.log_softmax(outputs, dim=1)[:, 1]
scores = outputs.cpu().numpy()
add_to_result_dict(return_dict, qids, dids, scores)
if self.args.world_size > 1:
save_as_trec(return_dict, self.args.trec_save_path + ".rank.{}".format(self.args.process_index))
torch.distributed.barrier()
if self.args.process_index == 0:
# aggregate results
all_results = []
for i in range(self.args.world_size):
all_results.append(load_from_trec(self.args.trec_save_path + ".rank.{}".format(i)))
return_dict = merge_retrieval_results_by_score(all_results)
# remove temp files
for i in range(self.args.world_size):
os.remove(self.args.trec_save_path + ".rank.{}".format(i))
torch.distributed.barrier()
return return_dict
| 4,921 | 35.731343 | 115 | py |
Augmentation-Adapted-Retriever | Augmentation-Adapted-Retriever-main/src/Retriever/retriever/dense_retriever.py | import gc
import glob
import logging
import os
import pickle
from contextlib import nullcontext
from typing import Dict, List
import faiss
import numpy as np
import torch
from torch.cuda import amp
from torch.utils.data import DataLoader, IterableDataset
from tqdm import tqdm
from ..arguments import InferenceArguments as EncodingArguments
from ..dataset import DRInferenceCollator
from ..modeling import DRModelForInference, DROutput
from ..utils import merge_retrieval_results_by_score
logger = logging.getLogger(__name__)
class Retriever:
def __init__(self, model: DRModelForInference, corpus_dataset: IterableDataset, args: EncodingArguments):
logger.info("Initializing retriever")
self.model = model
self.corpus_dataset = corpus_dataset
self.args = args
self.doc_lookup = []
self.query_lookup = []
self.model.to(self.args.device)
self.model.eval()
def _initialize_faiss_index(self, dim: int):
self.index = None
cpu_index = faiss.IndexFlatIP(dim)
self.index = cpu_index
def _move_index_to_gpu(self):
logger.info("Moving index to GPU(s)")
ngpu = faiss.get_num_gpus()
gpu_resources = []
for i in range(ngpu):
res = faiss.StandardGpuResources()
gpu_resources.append(res)
co = faiss.GpuMultipleClonerOptions()
co.shard = True
co.usePrecomputed = False
vres = faiss.GpuResourcesVector()
vdev = faiss.IntVector()
for i in range(0, ngpu):
vdev.push_back(i)
vres.push_back(gpu_resources[i])
self.index = faiss.index_cpu_to_gpu_multiple(
vres, vdev, self.index, co)
def doc_embedding_inference(self):
# Note: during evaluation, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
if self.corpus_dataset is None:
raise ValueError("No corpus dataset provided")
dataloader = DataLoader(
self.corpus_dataset,
# Note that we do not support DataParallel here
batch_size=self.args.per_device_eval_batch_size,
collate_fn=DRInferenceCollator(),
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
os.makedirs(self.args.output_dir, exist_ok=True)
encoded = []
lookup_indices = []
idx = 0
prev_idx = 0
for (batch_ids, batch) in tqdm(dataloader, disable=self.args.process_index > 0):
lookup_indices.extend(batch_ids)
idx += len(batch_ids)
with amp.autocast() if self.args.fp16 else nullcontext():
with torch.no_grad():
for k, v in batch.items():
batch[k] = v.to(self.args.device)
model_output: DROutput = self.model(passage=batch)
encoded.append(model_output.p_reps.cpu().detach().numpy())
if len(lookup_indices) >= self.args.max_inmem_docs // self.args.world_size:
encoded = np.concatenate(encoded)
with open(os.path.join(self.args.output_dir, "embeddings.corpus.rank.{}.{}-{}".format(self.args.process_index, prev_idx, idx)), 'wb') as f:
pickle.dump((encoded, lookup_indices), f, protocol=4)
encoded = []
lookup_indices = []
prev_idx = idx
gc.collect()
if len(lookup_indices) > 0:
encoded = np.concatenate(encoded)
with open(os.path.join(self.args.output_dir, "embeddings.corpus.rank.{}.{}-{}".format(self.args.process_index, prev_idx, idx)), 'wb') as f:
pickle.dump((encoded, lookup_indices), f, protocol=4)
del encoded
del lookup_indices
if self.args.world_size > 1:
torch.distributed.barrier()
def init_index_and_add(self, partition: str = None):
logger.info(
"Initializing Faiss index from pre-computed document embeddings")
partitions = [partition] if partition is not None else glob.glob(
os.path.join(self.args.output_dir, "embeddings.corpus.rank.*"))
for i, part in enumerate(partitions):
with open(part, 'rb') as f:
data = pickle.load(f)
encoded = data[0]
lookup_indices = data[1]
if i == 0:
dim = encoded.shape[1]
self._initialize_faiss_index(dim)
self.index.add(encoded)
self.doc_lookup.extend(lookup_indices)
@classmethod
def build_all(cls, model: DRModelForInference, corpus_dataset: IterableDataset, args: EncodingArguments):
retriever = cls(model, corpus_dataset, args)
retriever.doc_embedding_inference()
if args.process_index == 0:
retriever.init_index_and_add()
if args.world_size > 1:
torch.distributed.barrier()
return retriever
@classmethod
def build_embeddings(cls, model: DRModelForInference, corpus_dataset: IterableDataset, args: EncodingArguments):
retriever = cls(model, corpus_dataset, args)
retriever.doc_embedding_inference()
return retriever
@classmethod
def from_embeddings(cls, model: DRModelForInference, args: EncodingArguments):
retriever = cls(model, None, args)
if args.process_index == 0:
retriever.init_index_and_add()
if args.world_size > 1:
torch.distributed.barrier()
return retriever
def reset_index(self):
if self.index:
self.index.reset()
self.doc_lookup = []
self.query_lookup = []
def query_embedding_inference(self, query_dataset: IterableDataset):
dataloader = DataLoader(
query_dataset,
batch_size=self.args.per_device_eval_batch_size,
collate_fn=DRInferenceCollator(),
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
encoded = []
lookup_indices = []
for (batch_ids, batch) in tqdm(dataloader, disable=self.args.process_index > 0):
lookup_indices.extend(batch_ids)
with amp.autocast() if self.args.fp16 else nullcontext():
with torch.no_grad():
for k, v in batch.items():
batch[k] = v.to(self.args.device)
if not self.args.encode_query_as_passage:
model_output: DROutput = self.model(query=batch)
encoded.append(
model_output.q_reps.cpu().detach().numpy())
else:
model_output: DROutput = self.model(passage=batch)
encoded.append(
model_output.p_reps.cpu().detach().numpy())
if len(encoded) > 0: # If there is no data in the process, we don't do anything
encoded = np.concatenate(encoded)
with open(os.path.join(self.args.output_dir, "embeddings.query.rank.{}".format(self.args.process_index)), 'wb') as f:
pickle.dump((encoded, lookup_indices), f, protocol=4)
if self.args.world_size > 1:
torch.distributed.barrier()
def search(self, topk: int = 100):
logger.info("Searching")
if self.index is None:
raise ValueError("Index is not initialized")
encoded = []
for i in range(self.args.world_size):
with open(os.path.join(self.args.output_dir, "embeddings.query.rank.{}".format(i)), 'rb') as f:
data = pickle.load(f)
lookup_indices = data[1]
if len(lookup_indices) == 0: # No data
continue
encoded.append(data[0])
self.query_lookup.extend(lookup_indices)
encoded = np.concatenate(encoded)
return_dict = {}
D, I = self.index.search(encoded, topk)
original_indices = np.array(self.doc_lookup)[I]
q = 0
for scores_per_q, doc_indices_per_q in zip(D, original_indices):
qid = str(self.query_lookup[q])
return_dict[qid] = {}
for doc_index, score in zip(doc_indices_per_q, scores_per_q):
doc_index = str(doc_index)
if self.args.remove_identical and qid == doc_index:
continue
return_dict[qid][doc_index] = float(score)
q += 1
logger.info("End searching with {} queries".format(len(return_dict)))
return return_dict
def retrieve(self, query_dataset: IterableDataset, topk: int = 100):
self.query_embedding_inference(query_dataset)
self.model.cpu()
del self.model
torch.cuda.empty_cache()
results = {}
if self.args.process_index == 0:
if self.args.use_gpu:
self._move_index_to_gpu()
results = self.search(topk)
if self.args.world_size > 1:
torch.distributed.barrier()
return results
class SuccessiveRetriever(Retriever):
def __init__(self, model: DRModelForInference, corpus_dataset: IterableDataset, args: EncodingArguments):
super().__init__(model, corpus_dataset, args)
@classmethod
def from_embeddings(cls, model: DRModelForInference, args: EncodingArguments):
retriever = cls(model, None, args)
return retriever
def retrieve(self, query_dataset: IterableDataset, topk: int = 100):
self.query_embedding_inference(query_dataset)
del self.model
torch.cuda.empty_cache()
final_result = {}
if self.args.process_index == 0:
all_partitions = glob.glob(os.path.join(
self.args.output_dir, "embeddings.corpus.rank.*"))
for partition in all_partitions:
logger.info("Loading partition {}".format(partition))
self.init_index_and_add(partition)
if self.args.use_gpu:
self._move_index_to_gpu()
cur_result = self.search(topk)
self.reset_index()
final_result = merge_retrieval_results_by_score(
[final_result, cur_result], topk)
if self.args.world_size > 1:
torch.distributed.barrier()
return final_result
| 10,514 | 38.382022 | 155 | py |
ccc_mse_ser | ccc_mse_ser-master/code/ser_iemocap_gemaps_ccc.py | # Dimensional speech emotion recognition
# To evaluate loss function (MSE vs CCC)
# Coded by Bagus Tris Atmaja ([email protected])
# changelog
# 2020-02-13: Modified from gemaps-paa hfs
# 2020-02-14: Use 'tanh' activation to lock the output range in [-1, 1]
# with RMSprop optimizer
import numpy as np
import pickle
import pandas as pd
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, CuDNNLSTM, Flatten, \
Embedding, Dropout, BatchNormalization, \
RNN, concatenate, Activation
from keras.callbacks import EarlyStopping
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
import random as rn
import tensorflow as tf
rn.seed(123)
np.random.seed(99)
tf.set_random_seed(1234)
# load feature and labels
feat = np.load('/home/s1820002/spro2020/data/feat_ws_3.npy')
vad = np.load('/home/s1820002/IEMOCAP-Emotion-Detection/y_egemaps.npy')
# use only mean and std
feat = feat[:,:-1]
# for LSTM input shape (batch, steps, features/channel)
#feat = feat.reshape(feat.shape[0], 1, feat.shape[1])
# remove outlier, < 1, > 5
vad = np.where(vad==5.5, 5.0, vad)
vad = np.where(vad==0.5, 1.0, vad)
# standardization
scaled_feature = True
# set Dropout
do = 0.3
if scaled_feature == True:
scaler = StandardScaler()
scaler = scaler.fit(feat) #.reshape(feat.shape[0]*feat.shape[1], feat.shape[2]))
scaled_feat = scaler.transform(feat) #.reshape(feat.shape[0]*feat.shape[1], feat.shape[2]))
#scaled_feat = scaled_feat.reshape(feat.shape[0], feat.shape[1], feat.shape[2])
feat = scaled_feat
else:
feat = feat
scaled_vad = True
# standardization
if scaled_vad:
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
scaled_vad = scaler.transform(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
vad = scaled_vad
else:
vad = vad
# Concordance correlation coefficient (CCC)-based loss function - using non-inductive statistics
def ccc(gold, pred):
gold = K.squeeze(gold, axis=-1)
pred = K.squeeze(pred, axis=-1)
gold_mean = K.mean(gold, axis=-1, keepdims=True)
pred_mean = K.mean(pred, axis=-1, keepdims=True)
covariance = (gold-gold_mean)*(pred-pred_mean)
gold_var = K.mean(K.square(gold-gold_mean), axis=-1, keepdims=True)
pred_var = K.mean(K.square(pred-pred_mean), axis=-1, keepdims=True)
ccc = K.constant(2.) * covariance / (gold_var + pred_var + K.square(gold_mean - pred_mean) + K.common.epsilon())
return ccc
def ccc_loss(gold, pred):
# input (num_batches, seq_len, 1)
ccc_loss = K.constant(1.) - ccc(gold, pred)
return ccc_loss
# reshape input feature for LSTM
feat = feat.reshape(feat.shape[0], 1, feat.shape[1])
# API model, if use RNN, first two rnn layer must return_sequences=True
def api_model(alpha, beta, gamma):
# speech network
input_speech = Input(shape=(feat.shape[1], feat.shape[2]), name='speech_input')
net_speech = BatchNormalization()(input_speech)
net_speech = CuDNNLSTM(feat.shape[2], return_sequences=True)(net_speech)
net_speech = CuDNNLSTM(256, return_sequences=True)(net_speech)
net_speech = CuDNNLSTM(256, return_sequences=False)(net_speech)
#net_speech = Flatten()(net_speech)
net_speech = Dense(64)(net_speech)
#net_speech = Dropout(0.1)(net_speech)
target_names = ('v', 'a', 'd')
model_combined = [Dense(1, name=name, activation='tanh')(net_speech) for name in target_names]
model = Model(input_speech, model_combined)
#model.compile(loss=ccc_loss, optimizer='rmsprop', metrics=[ccc])
model.compile(loss=ccc_loss,
loss_weights={'v': alpha, 'a': beta, 'd': gamma},
optimizer='rmsprop', metrics=[ccc, 'mse'])
return model
#def main(alpha, beta, gamma):
model = api_model(0.1, 0.5, 0.4)
model.summary()
# 7869 first data of session 5 (for LOSO)
earlystop = EarlyStopping(monitor='val_loss', mode='min', patience=10,
restore_best_weights=True)
hist = model.fit(feat[:7869], vad[:7869].T.tolist(), batch_size=64, #best:8
validation_split=0.2, epochs=200, verbose=1, shuffle=True,
callbacks=[earlystop])
metrik = model.evaluate(feat[7869:], vad[7869:].T.tolist())
print('CCC= ', np.array(metrik)[[-6,-4,-2]])
print('MSE= ', np.array(metrik)[[-5,-3,-1]])
# Plot scatter
#va = vad[7869:, :-1]
#predik_vad = model.predict(feat[7869:], batch_size=64)
#predik_va = np.array(predik_vad).T.reshape(2170,3)[:,:-1]
#import matplotlib.pyplot as plt
#plt.scatter(va[:,0], va[:,1])
#plt.scatter(predik_va[:,0], predik_va[:,1])
#plt.savefig('scatter_gemaps_mse.pdf')
## check max min
#predik_va.max()
#predik_va.min()
| 4,905 | 34.042857 | 123 | py |
ccc_mse_ser | ccc_mse_ser-master/code/ser_iemocap_gemaps_mse.py | # Dimensional speech emotion recognition
# To evaluate loss function (MSE vs CCC)
# Coded by Bagus Tris Atmaja ([email protected])
# changelog
# 2020-02-13: Modified from gemaps-paa hfs
import numpy as np
import pickle
import pandas as pd
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, CuDNNLSTM, Flatten, \
Embedding, Dropout, BatchNormalization, \
RNN, concatenate, Activation
from keras.callbacks import EarlyStopping
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
import random as rn
import tensorflow as tf
rn.seed(123)
np.random.seed(99)
tf.set_random_seed(1234)
# load feature and labels
feat = np.load('/home/s1820002/spro2020/data/feat_ws_3.npy')
vad = np.load('/home/s1820002/IEMOCAP-Emotion-Detection/y_egemaps.npy')
# use only mean and std
feat = feat[:,:-1]
# for LSTM input shape (batch, steps, features/channel)
#feat = feat.reshape(feat.shape[0], 1, feat.shape[1])
# remove outlier, < 1, > 5
vad = np.where(vad==5.5, 5.0, vad)
vad = np.where(vad==0.5, 1.0, vad)
# standardization
scaled_feature = True
if scaled_feature == True:
scaler = StandardScaler()
scaler = scaler.fit(feat) #.reshape(feat.shape[0]*feat.shape[1], feat.shape[2]))
scaled_feat = scaler.transform(feat)#.reshape(feat.shape[0]*feat.shape[1], feat.shape[2]))
#scaled_feat = scaled_feat.reshape(feat.shape[0], feat.shape[1], feat.shape[2])
feat = scaled_feat
else:
feat = feat
scaled_vad = True
# standardization
if scaled_vad:
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
scaled_vad = scaler.transform(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
vad = scaled_vad
else:
vad = vad
# Concordance correlation coefficient (CCC)-based loss function - using non-inductive statistics
def ccc(gold, pred):
gold = K.squeeze(gold, axis=-1)
pred = K.squeeze(pred, axis=-1)
gold_mean = K.mean(gold, axis=-1, keepdims=True)
pred_mean = K.mean(pred, axis=-1, keepdims=True)
covariance = (gold-gold_mean)*(pred-pred_mean)
gold_var = K.mean(K.square(gold-gold_mean), axis=-1, keepdims=True)
pred_var = K.mean(K.square(pred-pred_mean), axis=-1, keepdims=True)
ccc = K.constant(2.) * covariance / (gold_var + pred_var + K.square(gold_mean - pred_mean) + K.common.epsilon())
return ccc
def ccc_loss(gold, pred):
# input (num_batches, seq_len, 1)
ccc_loss = K.constant(1.) - ccc(gold, pred)
return ccc_loss
# reshape input feature for LSTM
feat = feat.reshape(feat.shape[0], 1, feat.shape[1])
# API model, if use RNN, first two rnn layer must return_sequences=True
def api_model(alpha, beta, gamma):
# speech network
input_speech = Input(shape=(feat.shape[1], feat.shape[2]), name='speech_input')
net_speech = BatchNormalization()(input_speech)
net_speech = CuDNNLSTM(feat.shape[2], return_sequences=True)(net_speech)
net_speech = CuDNNLSTM(256, return_sequences=True)(net_speech)
net_speech = CuDNNLSTM(256, return_sequences=False)(net_speech)
#net_speech = Flatten()(net_speech)
net_speech = Dense(64)(net_speech)
#net_speech = Dropout(0.1)(net_speech)
target_names = ('v', 'a', 'd')
model_combined = [Dense(1, name=name, activation='tanh')(net_speech) for name in target_names]
model = Model(input_speech, model_combined)
#model.compile(loss=ccc_loss, optimizer='rmsprop', metrics=[ccc])
model.compile(loss='mse',
loss_weights={'v': alpha, 'a': beta, 'd': gamma},
optimizer='rmsprop', metrics=[ccc, 'mse'])
return model
#def main(alpha, beta, gamma):
model = api_model(0.1, 0.5, 0.4)
model.summary()
# 7869 first data of session 5 (for LOSO)
earlystop = EarlyStopping(monitor='val_loss', mode='min', patience=10,
restore_best_weights=True)
hist = model.fit(feat[:7869], vad[:7869].T.tolist(), batch_size=64, #best:8
validation_split=0.2, epochs=200, verbose=1, shuffle=True,
callbacks=[earlystop])
metrik = model.evaluate(feat[7869:], vad[7869:].T.tolist())
print('CCC= ', np.array(metrik)[[-6,-4,-2]])
print('MSE= ', np.array(metrik)[[-5,-3,-1]])
# Plot scatter
#va = vad[7869:, :-1]
#predik_vad = model.predict(feat[7869:], batch_size=64)
#predik_va = np.array(predik_vad).T.reshape(2170,3)[:,:-1]
#import matplotlib.pyplot as plt
#plt.scatter(va[:,0], va[:,1])
#plt.scatter(predik_va[:,0], predik_va[:,1])
#plt.savefig('scatter_gemaps_mse.pdf')
## check max min
#predik_va.max()
#predik_va.min()
| 4,769 | 34.333333 | 123 | py |
ccc_mse_ser | ccc_mse_ser-master/code/ser_improv_gemaps_mse.py | # ser_improv_paa_ccc.py
# speech emotion recognition for MSP-IMPROV dataset with pyAudioAnalysis
# HFS features using CCC-based loss function
# coded by Bagus Tris Atmaja ([email protected])
# changelog:
# 2020-02-13: Inital code, modified from deepMLP repo
import numpy as np
import pickle
import pandas as pd
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, CuDNNLSTM, Flatten, \
Embedding, Dropout, BatchNormalization, \
RNN, concatenate, Activation
from keras.callbacks import EarlyStopping
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
import random as rn
import tensorflow as tf
rn.seed(123)
np.random.seed(99)
tf.set_random_seed(1234)
# loading file and label
feat_train = np.load('/home/s1820002/ccc_mse/data/feat_hfs_gemaps_msp_train.npy')
feat_test = np.load('/home/s1820002/ccc_mse/data/feat_hfs_gemaps_msp_test.npy')
feat = np.vstack([feat_train, feat_test])
list_path = '/home/s1820002/msp-improv/helper/improv_data.csv'
list_file = pd.read_csv(list_path, index_col=None)
list_file = pd.DataFrame(list_file)
data = list_file.sort_values(by=['wavfile'])
vad_train = []
vad_test = []
for index, row in data.iterrows():
#print(row['wavfile'], row['v'], row['a'], row['d'])
if int(row['wavfile'][18]) in range(1,6):
#print("Process vad..", row['wavfile'])
vad_train.append([row['v'], row['a'], row['d']])
else:
#print("Process..", row['wavfile'])
vad_test.append([row['v'], row['a'], row['d']])
vad = np.vstack([vad_train, vad_test])
# standardization
scaled_feature = True
if scaled_feature:
scaler = StandardScaler()
scaler = scaler.fit(feat)
scaled_feat = scaler.transform(feat)
feat = scaled_feat
else:
feat = feat
scaled_vad = True
# standardization
if scaled_vad:
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
scaled_vad = scaler.transform(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
vad = scaled_vad
else:
vad = vad
# reshape feat size to match LSTM config
feat = feat.reshape(feat.shape[0], 1, feat.shape[1])
# train/test split, LOSO
X_train = feat[:len(feat_train)]
X_test = feat[len(feat_train):]
y_train = vad[:len(vad_train)]
y_test = vad[len(vad_train):]
# Concordance correlation coefficient (CCC)-based loss function - using non-inductive statistics
def ccc(gold, pred):
gold = K.squeeze(gold, axis=-1)
pred = K.squeeze(pred, axis=-1)
gold_mean = K.mean(gold, axis=-1, keepdims=True)
pred_mean = K.mean(pred, axis=-1, keepdims=True)
covariance = (gold-gold_mean)*(pred-pred_mean)
gold_var = K.mean(K.square(gold-gold_mean), axis=-1, keepdims=True)
pred_var = K.mean(K.square(pred-pred_mean), axis=-1, keepdims=True)
ccc = K.constant(2.) * covariance / (gold_var + pred_var + K.square(gold_mean - pred_mean) + K.common.epsilon())
return ccc
def ccc_loss(gold, pred):
# input (num_batches, seq_len, 1)
ccc_loss = K.constant(1.) - ccc(gold, pred)
return ccc_loss
# API model, if use RNN, first two rnn layer must return_sequences=True
def api_model():
inputs = Input(shape=(feat.shape[1], feat.shape[2]), name='feat_input')
net = BatchNormalization()(inputs)
#net = Bidirectional(LSTM(64, return_sequences=True, dropout=do, recurrent_dropout=do))(net)
net = CuDNNLSTM(feat.shape[2], return_sequences=True)(net)
net = CuDNNLSTM(256, return_sequences=True)(net)
net = CuDNNLSTM(256, return_sequences=False)(net)
net = Dense(64)(net)
target_names = ('v', 'a', 'd')
outputs = [Dense(1, name=name, activation='tanh')(net) for name in target_names]
model = Model(inputs=inputs, outputs=outputs) #=[out1, out2, out3])
model.compile(loss='mse', #{'v': ccc_loss, 'a': ccc_loss, 'd': ccc_loss},
loss_weights={'v': 0.3, 'a': 0.6, 'd': 0.1},
optimizer='rmsprop', metrics=[ccc, 'mse'])
return model
model2 = api_model()
model2.summary()
earlystop = EarlyStopping(monitor='val_loss', mode='min', patience=10, restore_best_weights=True)
hist = model2.fit(X_train, y_train.T.tolist(), batch_size=64,
validation_split=0.2, epochs=50, verbose=1, shuffle=True,
callbacks=[earlystop])
metrik = model2.evaluate(X_test, y_test.T.tolist())
print('CCC= ', np.array(metrik)[[-6,-4,-2]])
print('MSE= ', np.array(metrik)[[-5,-3,-1]])
| 4,639 | 32.868613 | 123 | py |
ccc_mse_ser | ccc_mse_ser-master/code/ser_iemocap_paa_ccc.py | # Dimensional speech emotion recognition from acoustic
# Changelog:
# 2019-09-01: initial version
# 2019-10-06: optimizer MTL parameters with linear search (in progress)
# 2020-12-25: modified fot ser_iemocap_loso_hfs.py
# feature is either std+mean or std+mean+silence (uncomment line 44)
# 2020-02-13: Modified to evaluate loss function (MSE vs CCC) for EUSIPCO
import numpy as np
import pickle
import pandas as pd
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, CuDNNLSTM, Flatten, \
Embedding, Dropout, BatchNormalization, \
RNN, concatenate, Activation
from keras.callbacks import EarlyStopping
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
import random as rn
import tensorflow as tf
rn.seed(123)
np.random.seed(99)
tf.set_random_seed(1234)
# load feature and labels
feat = np.load('/home/s1820002/atsit/data/feat_34_hfs.npy')
vad = np.load('/home/s1820002/IEMOCAP-Emotion-Detection/y_egemaps.npy')
# for LSTM input shape (batch, steps, features/channel)
#feat = feat.reshape(feat.shape[0], 1, feat.shape[1])
# remove outlier, < 1, > 5
vad = np.where(vad==5.5, 5.0, vad)
vad = np.where(vad==0.5, 1.0, vad)
# standardization
scaled_feature = False
# set Dropout
do = 0.3
if scaled_feature == True:
scaler = StandardScaler()
scaler = scaler.fit(feat.reshape(feat.shape[0]*feat.shape[1], feat.shape[2]))
scaled_feat = scaler.transform(feat.reshape(feat.shape[0]*feat.shape[1], feat.shape[2]))
scaled_feat = scaled_feat.reshape(feat.shape[0], feat.shape[1], feat.shape[2])
feat = scaled_feat
else:
feat = feat
scaled_vad = True
# standardization
if scaled_vad:
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
scaled_vad = scaler.transform(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
vad = scaled_vad
else:
vad = vad
# Concordance correlation coefficient (CCC)-based loss function - using non-inductive statistics
def ccc(gold, pred):
gold = K.squeeze(gold, axis=-1)
pred = K.squeeze(pred, axis=-1)
gold_mean = K.mean(gold, axis=-1, keepdims=True)
pred_mean = K.mean(pred, axis=-1, keepdims=True)
covariance = (gold-gold_mean)*(pred-pred_mean)
gold_var = K.mean(K.square(gold-gold_mean), axis=-1, keepdims=True)
pred_var = K.mean(K.square(pred-pred_mean), axis=-1, keepdims=True)
ccc = K.constant(2.) * covariance / (gold_var + pred_var + K.square(gold_mean - pred_mean) + K.common.epsilon())
return ccc
def ccc_loss(gold, pred):
# input (num_batches, seq_len, 1)
ccc_loss = K.constant(1.) - ccc(gold, pred)
return ccc_loss
# API model, if use RNN, first two rnn layer must return_sequences=True
def api_model(alpha, beta, gamma):
# speech network
input_speech = Input(shape=(feat.shape[1], feat.shape[2]), name='speech_input')
net_speech = BatchNormalization()(input_speech)
net_speech = CuDNNLSTM(feat.shape[2], return_sequences=True)(net_speech)
net_speech = CuDNNLSTM(256, return_sequences=True)(net_speech)
net_speech = CuDNNLSTM(256, return_sequences=False)(net_speech)
#net_speech = Flatten()(net_speech)
net_speech = Dense(64)(net_speech)
#net_speech = Dropout(0.1)(net_speech)
target_names = ('v', 'a', 'd')
model_combined = [Dense(1, name=name, activation='tanh')(net_speech) for name in target_names]
model = Model(input_speech, model_combined)
#model.compile(loss=ccc_loss, optimizer='rmsprop', metrics=[ccc])
model.compile(loss=ccc_loss,
loss_weights={'v': alpha, 'a': beta, 'd': gamma},
optimizer='rmsprop', metrics=[ccc, 'mse'])
return model
#def main(alpha, beta, gamma):
model = api_model(0.1, 0.5, 0.4)
model.summary()
# 7869 first data of session 5 (for LOSO)
earlystop = EarlyStopping(monitor='val_loss', mode='min', patience=10,
restore_best_weights=True)
hist = model.fit(feat[:7869], vad[:7869].T.tolist(), batch_size=64, #best:8
validation_split=0.2, epochs=200, verbose=1, shuffle=True,
callbacks=[earlystop])
metrik = model.evaluate(feat[7869:], vad[7869:].T.tolist())
print('CCC= ', np.array(metrik)[[-6,-4,-2]])
print('MSE= ', np.array(metrik)[[-5,-3,-1]])
| 4,495 | 35.552846 | 123 | py |
ccc_mse_ser | ccc_mse_ser-master/code/ser_improv_paa_ccc.py | # ser_improv_paa_ccc.py
# speech emotion recognition for MSP-IMPROV dataset with pyAudioAnalysis
# HFS features using CCC-based loss function
# coded by Bagus Tris Atmaja ([email protected])
# changelog:
# 2020-02-13: Inital code, modified from deepMLP repo
import numpy as np
import pickle
import pandas as pd
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, CuDNNLSTM, Flatten, \
Embedding, Dropout, BatchNormalization, \
RNN, concatenate, Activation
from keras.callbacks import EarlyStopping
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
import random as rn
import tensorflow as tf
rn.seed(123)
np.random.seed(99)
tf.set_random_seed(1234)
# loading file and label
feat_train = np.load('/home/s1820002/ccc_mse/data/feat_hfs_paa_msp_train.npy')
feat_test = np.load('/home/s1820002/ccc_mse/data/feat_hfs_paa_msp_test.npy')
feat = np.vstack([feat_train, feat_test])
list_path = '/home/s1820002/msp-improv/helper/improv_data.csv'
list_file = pd.read_csv(list_path, index_col=None)
list_file = pd.DataFrame(list_file)
data = list_file.sort_values(by=['wavfile'])
vad_train = []
vad_test = []
for index, row in data.iterrows():
#print(row['wavfile'], row['v'], row['a'], row['d'])
if int(row['wavfile'][18]) in range(1,6):
#print("Process vad..", row['wavfile'])
vad_train.append([row['v'], row['a'], row['d']])
else:
#print("Process..", row['wavfile'])
vad_test.append([row['v'], row['a'], row['d']])
vad = np.vstack([vad_train, vad_test])
# standardization
scaled_feature = False
if scaled_feature:
scaler = StandardScaler()
scaler = scaler.fit(feat)
scaled_feat = scaler.transform(feat)
feat = scaled_feat
else:
feat = feat
scaled_vad = True
# standardization
if scaled_vad:
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
scaled_vad = scaler.transform(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
vad = scaled_vad
else:
vad = vad
# reshape feat size to match LSTM config
feat = feat.reshape(feat.shape[0], 1, feat.shape[1])
# train/test split, LOSO
X_train = feat[:len(feat_train)]
X_test = feat[len(feat_train):]
y_train = vad[:len(vad_train)]
y_test = vad[len(vad_train):]
# Concordance correlation coefficient (CCC)-based loss function - using non-inductive statistics
def ccc(gold, pred):
gold = K.squeeze(gold, axis=-1)
pred = K.squeeze(pred, axis=-1)
gold_mean = K.mean(gold, axis=-1, keepdims=True)
pred_mean = K.mean(pred, axis=-1, keepdims=True)
covariance = (gold-gold_mean)*(pred-pred_mean)
gold_var = K.mean(K.square(gold-gold_mean), axis=-1, keepdims=True)
pred_var = K.mean(K.square(pred-pred_mean), axis=-1, keepdims=True)
ccc = K.constant(2.) * covariance / (gold_var + pred_var + K.square(gold_mean - pred_mean) + K.common.epsilon())
return ccc
def ccc_loss(gold, pred):
# input (num_batches, seq_len, 1)
ccc_loss = K.constant(1.) - ccc(gold, pred)
return ccc_loss
# API model, if use RNN, first two rnn layer must return_sequences=True
def api_model():
inputs = Input(shape=(feat.shape[1], feat.shape[2]), name='feat_input')
net = BatchNormalization()(inputs)
#net = Bidirectional(LSTM(64, return_sequences=True, dropout=do, recurrent_dropout=do))(net)
net = CuDNNLSTM(feat.shape[2], return_sequences=True)(net)
net = CuDNNLSTM(256, return_sequences=True)(net)
net = CuDNNLSTM(256, return_sequences=False)(net)
net = Dense(64)(net)
#net = Dropout(0.1)(net)
target_names = ('v', 'a', 'd')
outputs = [Dense(1, name=name, activation='tanh')(net) for name in target_names]
model = Model(inputs=inputs, outputs=outputs) #=[out1, out2, out3])
model.compile(loss=ccc_loss, #{'v': ccc_loss, 'a': ccc_loss, 'd': ccc_loss},
loss_weights={'v': 0.3, 'a': 0.6, 'd': 0.1},
optimizer='rmsprop', metrics=[ccc, 'mse'])
return model
model2 = api_model()
model2.summary()
earlystop = EarlyStopping(monitor='val_loss', mode='min', patience=10, restore_best_weights=True)
hist = model2.fit(X_train, y_train.T.tolist(), batch_size=64,
validation_split=0.2, epochs=50, verbose=1, shuffle=True,
callbacks=[earlystop])
metrik = model2.evaluate(X_test, y_test.T.tolist())
print('CCC= ', np.array(metrik)[[-6,-4,-2]])
print('MSE= ', np.array(metrik)[[-5,-3,-1]])
| 4,666 | 32.818841 | 123 | py |
ccc_mse_ser | ccc_mse_ser-master/code/ser_improv_paa_mse.py | # ser_improv_paa_ccc.py
# speech emotion recognition for MSP-IMPROV dataset with pyAudioAnalysis
# HFS features using CCC-based loss function
# coded by Bagus Tris Atmaja ([email protected])
# changelog:
# 2020-02-13: Inital code, modified from deepMLP repo
import numpy as np
import pickle
import pandas as pd
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, CuDNNLSTM, Flatten, \
Embedding, Dropout, BatchNormalization, \
RNN, concatenate, Activation
from keras.callbacks import EarlyStopping
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
import random as rn
import tensorflow as tf
rn.seed(123)
np.random.seed(99)
tf.set_random_seed(1234)
# loading file and label
feat_train = np.load('/home/s1820002/ccc_mse/data/feat_hfs_paa_msp_train.npy')
feat_test = np.load('/home/s1820002/ccc_mse/data/feat_hfs_paa_msp_test.npy')
feat = np.vstack([feat_train, feat_test])
list_path = '/home/s1820002/msp-improv/helper/improv_data.csv'
list_file = pd.read_csv(list_path, index_col=None)
list_file = pd.DataFrame(list_file)
data = list_file.sort_values(by=['wavfile'])
vad_train = []
vad_test = []
for index, row in data.iterrows():
#print(row['wavfile'], row['v'], row['a'], row['d'])
if int(row['wavfile'][18]) in range(1,6):
#print("Process vad..", row['wavfile'])
vad_train.append([row['v'], row['a'], row['d']])
else:
#print("Process..", row['wavfile'])
vad_test.append([row['v'], row['a'], row['d']])
vad = np.vstack([vad_train, vad_test])
# standardization
scaled_feature = False
if scaled_feature:
scaler = StandardScaler()
scaler = scaler.fit(feat)
scaled_feat = scaler.transform(feat)
feat = scaled_feat
else:
feat = feat
scaled_vad = True
# standardization
if scaled_vad:
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
scaled_vad = scaler.transform(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
vad = scaled_vad
else:
vad = vad
# reshape feat size to match LSTM config
feat = feat.reshape(feat.shape[0], 1, feat.shape[1])
# train/test split, LOSO
X_train = feat[:len(feat_train)]
X_test = feat[len(feat_train):]
y_train = vad[:len(vad_train)]
y_test = vad[len(vad_train):]
# Concordance correlation coefficient (CCC)-based loss function - using non-inductive statistics
def ccc(gold, pred):
gold = K.squeeze(gold, axis=-1)
pred = K.squeeze(pred, axis=-1)
gold_mean = K.mean(gold, axis=-1, keepdims=True)
pred_mean = K.mean(pred, axis=-1, keepdims=True)
covariance = (gold-gold_mean)*(pred-pred_mean)
gold_var = K.mean(K.square(gold-gold_mean), axis=-1, keepdims=True)
pred_var = K.mean(K.square(pred-pred_mean), axis=-1, keepdims=True)
ccc = K.constant(2.) * covariance / (gold_var + pred_var + K.square(gold_mean - pred_mean) + K.common.epsilon())
return ccc
def ccc_loss(gold, pred):
# input (num_batches, seq_len, 1)
ccc_loss = K.constant(1.) - ccc(gold, pred)
return ccc_loss
# API model, if use RNN, first two rnn layer must return_sequences=True
def api_model():
inputs = Input(shape=(feat.shape[1], feat.shape[2]), name='feat_input')
net = BatchNormalization()(inputs)
#net = Bidirectional(LSTM(64, return_sequences=True, dropout=do, recurrent_dropout=do))(net)
net = CuDNNLSTM(feat.shape[2], return_sequences=True)(net)
net = CuDNNLSTM(256, return_sequences=True)(net)
net = CuDNNLSTM(256, return_sequences=False)(net)
net = Dense(64)(net)
#net = Dropout(0.1)(net)
target_names = ('v', 'a', 'd')
outputs = [Dense(1, name=name, activation='tanh')(net) for name in target_names]
model = Model(inputs=inputs, outputs=outputs) #=[out1, out2, out3])
model.compile(loss='mse', #{'v': ccc_loss, 'a': ccc_loss, 'd': ccc_loss},
loss_weights={'v': 0.3, 'a': 0.6, 'd': 0.1},
optimizer='rmsprop', metrics=[ccc, 'mse'])
return model
model2 = api_model()
model2.summary()
earlystop = EarlyStopping(monitor='val_loss', mode='min', patience=10, restore_best_weights=True)
hist = model2.fit(X_train, y_train.T.tolist(), batch_size=64,
validation_split=0.2, epochs=50, verbose=1, shuffle=True,
callbacks=[earlystop])
metrik = model2.evaluate(X_test, y_test.T.tolist())
print('CCC= ', np.array(metrik)[[-6,-4,-2]])
print('MSE= ', np.array(metrik)[[-5,-3,-1]])
| 4,663 | 32.797101 | 123 | py |
ccc_mse_ser | ccc_mse_ser-master/code/ser_improv_gemaps_ccc.py | # ser_improv_paa_ccc.py
# speech emotion recognition for MSP-IMPROV dataset with pyAudioAnalysis
# HFS features using CCC-based loss function
# coded by Bagus Tris Atmaja ([email protected])
# changelog:
# 2020-02-13: Inital code, modified from deepMLP repo
import numpy as np
import pickle
import pandas as pd
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, CuDNNLSTM, Flatten, \
Embedding, Dropout, BatchNormalization, \
RNN, concatenate, Activation
from keras.callbacks import EarlyStopping
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
import random as rn
import tensorflow as tf
rn.seed(123)
np.random.seed(99)
tf.set_random_seed(1234)
# loading file and label
feat_train = np.load('/home/s1820002/ccc_mse/data/feat_hfs_gemaps_msp_train.npy')
feat_test = np.load('/home/s1820002/ccc_mse/data/feat_hfs_gemaps_msp_test.npy')
feat = np.vstack([feat_train, feat_test])
list_path = '/home/s1820002/msp-improv/helper/improv_data.csv'
list_file = pd.read_csv(list_path, index_col=None)
list_file = pd.DataFrame(list_file)
data = list_file.sort_values(by=['wavfile'])
vad_train = []
vad_test = []
for index, row in data.iterrows():
#print(row['wavfile'], row['v'], row['a'], row['d'])
if int(row['wavfile'][18]) in range(1,6):
#print("Process vad..", row['wavfile'])
vad_train.append([row['v'], row['a'], row['d']])
else:
#print("Process..", row['wavfile'])
vad_test.append([row['v'], row['a'], row['d']])
vad = np.vstack([vad_train, vad_test])
# standardization
scaled_feature = True
if scaled_feature:
scaler = StandardScaler()
scaler = scaler.fit(feat)
scaled_feat = scaler.transform(feat)
feat = scaled_feat
else:
feat = feat
scaled_vad = True
# standardization
if scaled_vad:
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
scaled_vad = scaler.transform(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
vad = scaled_vad
else:
vad = vad
# reshape feat size to match LSTM config
feat = feat.reshape(feat.shape[0], 1, feat.shape[1])
# train/test split, LOSO
X_train = feat[:len(feat_train)]
X_test = feat[len(feat_train):]
y_train = vad[:len(vad_train)]
y_test = vad[len(vad_train):]
# Concordance correlation coefficient (CCC)-based loss function - using non-inductive statistics
def ccc(gold, pred):
gold = K.squeeze(gold, axis=-1)
pred = K.squeeze(pred, axis=-1)
gold_mean = K.mean(gold, axis=-1, keepdims=True)
pred_mean = K.mean(pred, axis=-1, keepdims=True)
covariance = (gold-gold_mean)*(pred-pred_mean)
gold_var = K.mean(K.square(gold-gold_mean), axis=-1, keepdims=True)
pred_var = K.mean(K.square(pred-pred_mean), axis=-1, keepdims=True)
ccc = K.constant(2.) * covariance / (gold_var + pred_var + K.square(gold_mean - pred_mean) + K.common.epsilon())
return ccc
def ccc_loss(gold, pred):
# input (num_batches, seq_len, 1)
ccc_loss = K.constant(1.) - ccc(gold, pred)
return ccc_loss
# API model, if use RNN, first two rnn layer must return_sequences=True
def api_model():
inputs = Input(shape=(feat.shape[1], feat.shape[2]), name='feat_input')
net = BatchNormalization()(inputs)
net = CuDNNLSTM(feat.shape[2], return_sequences=True)(net)
net = CuDNNLSTM(256, return_sequences=True)(net)
net = CuDNNLSTM(256, return_sequences=False)(net)
net = Dense(64)(net)
target_names = ('v', 'a', 'd')
outputs = [Dense(1, name=name, activation='tanh')(net) for name in target_names]
model = Model(inputs=inputs, outputs=outputs) #=[out1, out2, out3])
model.compile(loss=ccc_loss, #{'v': ccc_loss, 'a': ccc_loss, 'd': ccc_loss},
loss_weights={'v': 0.3, 'a': 0.6, 'd': 0.1},
optimizer='rmsprop', metrics=[ccc, 'mse'])
return model
model2 = api_model()
model2.summary()
earlystop = EarlyStopping(monitor='val_loss', mode='min', patience=10, restore_best_weights=True)
hist = model2.fit(X_train, y_train.T.tolist(), batch_size=64,
validation_split=0.2, epochs=50, verbose=1, shuffle=True,
callbacks=[earlystop])
metrik = model2.evaluate(X_test, y_test.T.tolist())
print('CCC= ', np.array(metrik)[[-6,-4,-2]])
print('MSE= ', np.array(metrik)[[-5,-3,-1]])
| 4,545 | 32.426471 | 123 | py |
ccc_mse_ser | ccc_mse_ser-master/code/ser_iemocap_paa_mse.py | # CSL Paper: Dimensional speech emotion recognition from acoustic and text
# Changelog:
# 2019-09-01: initial version
# 2019-10-06: optimizer MTL parameters with linear search (in progress)
# 2012-12-25: modified fot ser_iemocap_loso_hfs.py
# feature is either std+mean or std+mean+silence (uncomment line 44)
import numpy as np
import pickle
import pandas as pd
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, CuDNNLSTM, Flatten, \
Embedding, Dropout, BatchNormalization, \
RNN, concatenate, Activation
from keras.callbacks import EarlyStopping
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
import random as rn
import tensorflow as tf
rn.seed(123)
np.random.seed(99)
tf.set_random_seed(1234)
# load feature and labels
feat = np.load('/home/s1820002/atsit/data/feat_34_hfs.npy')
vad = np.load('/home/s1820002/IEMOCAP-Emotion-Detection/y_egemaps.npy')
# for LSTM input shape (batch, steps, features/channel)
#feat = feat.reshape(feat.shape[0], 1, feat.shape[1])
# remove outlier, < 1, > 5
vad = np.where(vad==5.5, 5.0, vad)
vad = np.where(vad==0.5, 1.0, vad)
# standardization
scaled_feature = False
# set Dropout
do = 0.3
if scaled_feature == True:
scaler = StandardScaler()
scaler = scaler.fit(feat.reshape(feat.shape[0]*feat.shape[1], feat.shape[2]))
scaled_feat = scaler.transform(feat.reshape(feat.shape[0]*feat.shape[1], feat.shape[2]))
scaled_feat = scaled_feat.reshape(feat.shape[0], feat.shape[1], feat.shape[2])
feat = scaled_feat
else:
feat = feat
scaled_vad = True
# standardization
if scaled_vad:
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
scaled_vad = scaler.transform(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
vad = scaled_vad
else:
vad = vad
# Concordance correlation coefficient (CCC)-based loss function - using non-inductive statistics
def ccc(gold, pred):
gold = K.squeeze(gold, axis=-1)
pred = K.squeeze(pred, axis=-1)
gold_mean = K.mean(gold, axis=-1, keepdims=True)
pred_mean = K.mean(pred, axis=-1, keepdims=True)
covariance = (gold-gold_mean)*(pred-pred_mean)
gold_var = K.mean(K.square(gold-gold_mean), axis=-1, keepdims=True)
pred_var = K.mean(K.square(pred-pred_mean), axis=-1, keepdims=True)
ccc = K.constant(2.) * covariance / (gold_var + pred_var + K.square(gold_mean - pred_mean) + K.common.epsilon())
return ccc
def ccc_loss(gold, pred):
# input (num_batches, seq_len, 1)
ccc_loss = K.constant(1.) - ccc(gold, pred)
return ccc_loss
# API model, if use RNN, first two rnn layer must return_sequences=True
def api_model(alpha, beta, gamma):
# speech network
input_speech = Input(shape=(feat.shape[1], feat.shape[2]), name='speech_input')
net_speech = BatchNormalization()(input_speech)
net_speech = CuDNNLSTM(feat.shape[2], return_sequences=True)(net_speech)
net_speech = CuDNNLSTM(256, return_sequences=True)(net_speech)
net_speech = CuDNNLSTM(256, return_sequences=False)(net_speech)
#net_speech = Flatten()(net_speech)
net_speech = Dense(64)(net_speech)
#net_speech = Dropout(0.1)(net_speech)
target_names = ('v', 'a', 'd')
model_combined = [Dense(1, name=name, activation='tanh')(net_speech) for name in target_names]
model = Model(input_speech, model_combined)
#model.compile(loss=ccc_loss, optimizer='rmsprop', metrics=[ccc])
model.compile(loss='mse',
loss_weights={'v': alpha, 'a': beta, 'd': gamma},
optimizer='rmsprop', metrics=[ccc, 'mse'])
return model
#def main(alpha, beta, gamma):
model = api_model(0.1, 0.5, 0.4)
model.summary()
# 7869 first data of session 5 (for LOSO)
earlystop = EarlyStopping(monitor='val_loss', mode='min', patience=10,
restore_best_weights=True)
hist = model.fit(feat[:7869], vad[:7869].T.tolist(), batch_size=64, #best:8
validation_split=0.2, epochs=200, verbose=1, shuffle=True,
callbacks=[earlystop])
metrik = model.evaluate(feat[7869:], vad[7869:].T.tolist())
print(metrik)
print('CCC= ', np.array(metrik)[[-6,-4,-2]])
print('MSE= ', np.array(metrik)[[-5,-3,-1]])
| 4,452 | 35.203252 | 123 | py |
LDU | LDU-main/monocular_depth_estimation/pytorch/distributed_sampler_no_evenly_divisible.py | import math
import torch
from torch.utils.data import Sampler
import torch.distributed as dist
class DistributedSamplerNoEvenlyDivisible(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
shuffle (optional): If true (default), sampler will shuffle the indices
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
num_samples = int(math.floor(len(self.dataset) * 1.0 / self.num_replicas))
rest = len(self.dataset) - num_samples * self.num_replicas
if self.rank < rest:
num_samples += 1
self.num_samples = num_samples
self.total_size = len(dataset)
# self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
# indices += indices[:(self.total_size - len(indices))]
# assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
self.num_samples = len(indices)
# assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
| 2,659 | 35.438356 | 82 | py |
LDU | LDU-main/monocular_depth_estimation/pytorch/bts_live_3d.py | # Copyright (C) 2019 Jin Han Lee
#
# This file is a part of BTS.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from __future__ import absolute_import, division, print_function
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
import os
import sys
import time
import argparse
import numpy as np
# Computer Vision
import cv2
from scipy import ndimage
from skimage.transform import resize
# Visualization
import matplotlib.pyplot as plt
plasma = plt.get_cmap('plasma')
greys = plt.get_cmap('Greys')
# UI and OpenGL
from PySide2 import QtCore, QtGui, QtWidgets, QtOpenGL
from OpenGL import GL, GLU
from OpenGL.arrays import vbo
from OpenGL.GL import shaders
import glm
# Argument Parser
parser = argparse.ArgumentParser(description='BTS Live 3D')
parser.add_argument('--model_name', type=str, help='model name', default='bts_nyu_v2')
parser.add_argument('--encoder', type=str, help='type of encoder, densenet121_bts or densenet161_bts', default='densenet161_bts')
parser.add_argument('--max_depth', type=float, help='maximum depth in estimation', default=10)
parser.add_argument('--checkpoint_path', type=str, help='path to a checkpoint to load', required=True)
parser.add_argument('--input_height', type=int, help='input height', default=480)
parser.add_argument('--input_width', type=int, help='input width', default=640)
parser.add_argument('--dataset', type=str, help='dataset this model trained on', default='nyu')
args = parser.parse_args()
model_dir = os.path.join("./models", args.model_name)
sys.path.append(model_dir)
for key, val in vars(__import__(args.model_name)).items():
if key.startswith('__') and key.endswith('__'):
continue
vars()[key] = val
# Image shapes
height_rgb, width_rgb = 480, 640
height_depth, width_depth = height_rgb, width_rgb
height_rgb = height_rgb
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
# Intrinsic parameters for your own webcam camera
camera_matrix = np.zeros(shape=(3, 3))
camera_matrix[0, 0] = 5.4765313594010649e+02
camera_matrix[0, 2] = 3.2516069906172453e+02
camera_matrix[1, 1] = 5.4801781476172562e+02
camera_matrix[1, 2] = 2.4794113960783835e+02
camera_matrix[2, 2] = 1
dist_coeffs = np.array([ 3.7230261423972011e-02, -1.6171708069773008e-01, -3.5260752900266357e-04, 1.7161234226767313e-04, 1.0192711400840315e-01 ])
# Parameters for a model trained on NYU Depth V2
new_camera_matrix = np.zeros(shape=(3, 3))
new_camera_matrix[0, 0] = 518.8579
new_camera_matrix[0, 2] = 320
new_camera_matrix[1, 1] = 518.8579
new_camera_matrix[1, 2] = 240
new_camera_matrix[2, 2] = 1
R = np.identity(3, dtype=np.float)
map1, map2 = cv2.initUndistortRectifyMap(camera_matrix, dist_coeffs, R, new_camera_matrix, (640, 480), cv2.CV_32FC1)
def load_model():
args.mode = 'test'
model = BtsModel(params=args)
model = torch.nn.DataParallel(model)
checkpoint = torch.load(args.checkpoint_path)
model.load_state_dict(checkpoint['model'])
model.eval()
model.cuda()
return model
# Function timing
ticTime = time.time()
def tic():
global ticTime;
ticTime = time.time()
def toc():
print('{0} seconds.'.format(time.time() - ticTime))
# Conversion from Numpy to QImage and back
def np_to_qimage(a):
im = a.copy()
return QtGui.QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QtGui.QImage.Format_RGB888).copy()
def qimage_to_np(img):
img = img.convertToFormat(QtGui.QImage.Format.Format_ARGB32)
return np.array(img.constBits()).reshape(img.height(), img.width(), 4)
# Compute edge magnitudes
def edges(d):
dx = ndimage.sobel(d, 0) # horizontal derivative
dy = ndimage.sobel(d, 1) # vertical derivative
return np.abs(dx) + np.abs(dy)
# Main window
class Window(QtWidgets.QWidget):
updateInput = QtCore.Signal()
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.model = None
self.capture = None
self.glWidget = GLWidget()
mainLayout = QtWidgets.QVBoxLayout()
# Input / output views
viewsLayout = QtWidgets.QGridLayout()
self.inputViewer = QtWidgets.QLabel("[Click to start]")
self.inputViewer.setPixmap(QtGui.QPixmap(width_rgb, height_rgb))
self.outputViewer = QtWidgets.QLabel("[Click to start]")
self.outputViewer.setPixmap(QtGui.QPixmap(width_rgb, height_rgb))
imgsFrame = QtWidgets.QFrame()
inputsLayout = QtWidgets.QVBoxLayout()
imgsFrame.setLayout(inputsLayout)
inputsLayout.addWidget(self.inputViewer)
inputsLayout.addWidget(self.outputViewer)
viewsLayout.addWidget(imgsFrame, 0, 0)
viewsLayout.addWidget(self.glWidget, 0, 1)
viewsLayout.setColumnStretch(1, 10)
mainLayout.addLayout(viewsLayout)
# Load depth estimation model
toolsLayout = QtWidgets.QHBoxLayout()
self.button2 = QtWidgets.QPushButton("Webcam")
self.button2.clicked.connect(self.loadCamera)
toolsLayout.addWidget(self.button2)
self.button4 = QtWidgets.QPushButton("Pause")
self.button4.clicked.connect(self.loadImage)
toolsLayout.addWidget(self.button4)
self.button6 = QtWidgets.QPushButton("Refresh")
self.button6.clicked.connect(self.updateCloud)
toolsLayout.addWidget(self.button6)
mainLayout.addLayout(toolsLayout)
self.setLayout(mainLayout)
self.setWindowTitle(self.tr("BTS Live"))
# Signals
self.updateInput.connect(self.update_input)
# Default example
if self.glWidget.rgb.any() and self.glWidget.depth.any():
img = (self.glWidget.rgb * 255).astype('uint8')
self.inputViewer.setPixmap(QtGui.QPixmap.fromImage(np_to_qimage(img)))
coloredDepth = (plasma(self.glWidget.depth[:, :, 0])[:, :, :3] * 255).astype('uint8')
self.outputViewer.setPixmap(QtGui.QPixmap.fromImage(np_to_qimage(coloredDepth)))
def loadModel(self):
QtGui.QGuiApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
tic()
self.model = load_model()
print('Model loaded.')
toc()
self.updateCloud()
QtGui.QGuiApplication.restoreOverrideCursor()
def loadCamera(self):
tic()
self.model = load_model()
print('Model loaded.')
toc()
self.capture = cv2.VideoCapture(0)
self.updateInput.emit()
def loadVideoFile(self):
self.capture = cv2.VideoCapture('video.mp4')
self.updateInput.emit()
def loadImage(self):
self.capture = None
img = (self.glWidget.rgb * 255).astype('uint8')
self.inputViewer.setPixmap(QtGui.QPixmap.fromImage(np_to_qimage(img)))
self.updateCloud()
def loadImageFile(self):
self.capture = None
filename = \
QtWidgets.QFileDialog.getOpenFileName(None, 'Select image', '', self.tr('Image files (*.jpg *.png)'))[0]
img = QtGui.QImage(filename).scaledToHeight(height_rgb)
xstart = 0
if img.width() > width_rgb: xstart = (img.width() - width_rgb) // 2
img = img.copy(xstart, 0, xstart + width_rgb, height_rgb)
self.inputViewer.setPixmap(QtGui.QPixmap.fromImage(img))
self.updateCloud()
def update_input(self):
# Don't update anymore if no capture device is set
if self.capture == None:
return
# Capture a frame
ret, frame = self.capture.read()
# Loop video playback if current stream is video file
if not ret:
self.capture.set(cv2.CAP_PROP_POS_FRAMES, 0)
ret, frame = self.capture.read()
# Prepare image and show in UI
frame_ud = cv2.remap(frame, map1, map2, interpolation=cv2.INTER_LINEAR)
frame = cv2.cvtColor(frame_ud, cv2.COLOR_BGR2RGB)
image = np_to_qimage(frame)
self.inputViewer.setPixmap(QtGui.QPixmap.fromImage(image))
# Update the point cloud
self.updateCloud()
def updateCloud(self):
rgb8 = qimage_to_np(self.inputViewer.pixmap().toImage())
self.glWidget.rgb = (rgb8[:, :, :3] / 255)[:, :, ::-1]
if self.model:
input_image = rgb8[:, :, :3].astype(np.float32)
# Normalize image
input_image[:, :, 0] = (input_image[:, :, 0] - 123.68) * 0.017
input_image[:, :, 1] = (input_image[:, :, 1] - 116.78) * 0.017
input_image[:, :, 2] = (input_image[:, :, 2] - 103.94) * 0.017
input_image_cropped = input_image[32:-1 - 31, 32:-1 - 31, :]
input_images = np.expand_dims(input_image_cropped, axis=0)
input_images = np.transpose(input_images, (0, 3, 1, 2))
with torch.no_grad():
image = Variable(torch.from_numpy(input_images)).cuda()
focal = Variable(torch.tensor([518.8579])).cuda()
# Predict
lpg8x8, lpg4x4, lpg2x2, reduc1x1, depth_cropped = self.model(image, focal)
depth = np.zeros((480, 640), dtype=np.float32)
depth[32:-1-31, 32:-1-31] = depth_cropped[0].cpu().squeeze() / args.max_depth
coloredDepth = (greys(np.log10(depth * args.max_depth))[:, :, :3] * 255).astype('uint8')
self.outputViewer.setPixmap(QtGui.QPixmap.fromImage(np_to_qimage(coloredDepth)))
self.glWidget.depth = depth
else:
self.glWidget.depth = 0.5 + np.zeros((height_rgb // 2, width_rgb // 2, 1))
self.glWidget.updateRGBD()
self.glWidget.updateGL()
# Update to next frame if we are live
QtCore.QTimer.singleShot(10, self.updateInput)
class GLWidget(QtOpenGL.QGLWidget):
def __init__(self, parent=None):
QtOpenGL.QGLWidget.__init__(self, parent)
self.object = 0
self.xRot = 5040
self.yRot = 40
self.zRot = 0
self.zoomLevel = 9
self.lastPos = QtCore.QPoint()
self.green = QtGui.QColor.fromCmykF(0.0, 0.0, 0.0, 1.0)
self.black = QtGui.QColor.fromCmykF(0.0, 0.0, 0.0, 1.0)
# Precompute for world coordinates
self.xx, self.yy = self.worldCoords(width=width_rgb, height=height_rgb)
self.rgb = np.zeros((480, 640, 3), dtype=np.uint8)
self.depth = np.zeros((480, 640), dtype=np.float32)
self.col_vbo = None
self.pos_vbo = None
if self.rgb.any() and self.detph.any():
self.updateRGBD()
def xRotation(self):
return self.xRot
def yRotation(self):
return self.yRot
def zRotation(self):
return self.zRot
def minimumSizeHint(self):
return QtCore.QSize(640, 480)
def sizeHint(self):
return QtCore.QSize(640, 480)
def setXRotation(self, angle):
if angle != self.xRot:
self.xRot = angle
self.emit(QtCore.SIGNAL("xRotationChanged(int)"), angle)
self.updateGL()
def setYRotation(self, angle):
if angle != self.yRot:
self.yRot = angle
self.emit(QtCore.SIGNAL("yRotationChanged(int)"), angle)
self.updateGL()
def setZRotation(self, angle):
if angle != self.zRot:
self.zRot = angle
self.emit(QtCore.SIGNAL("zRotationChanged(int)"), angle)
self.updateGL()
def resizeGL(self, width, height):
GL.glViewport(0, 0, width, height)
def mousePressEvent(self, event):
self.lastPos = QtCore.QPoint(event.pos())
def mouseMoveEvent(self, event):
dx = -(event.x() - self.lastPos.x())
dy = (event.y() - self.lastPos.y())
if event.buttons() & QtCore.Qt.LeftButton:
self.setXRotation(self.xRot + dy)
self.setYRotation(self.yRot + dx)
elif event.buttons() & QtCore.Qt.RightButton:
self.setXRotation(self.xRot + dy)
self.setZRotation(self.zRot + dx)
self.lastPos = QtCore.QPoint(event.pos())
def wheelEvent(self, event):
numDegrees = event.delta() / 8
numSteps = numDegrees / 15
self.zoomLevel = self.zoomLevel + numSteps
event.accept()
self.updateGL()
def initializeGL(self):
self.qglClearColor(self.black.darker())
GL.glShadeModel(GL.GL_FLAT)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glEnable(GL.GL_CULL_FACE)
VERTEX_SHADER = shaders.compileShader("""#version 330
layout(location = 0) in vec3 position;
layout(location = 1) in vec3 color;
uniform mat4 mvp; out vec4 frag_color;
void main() {gl_Position = mvp * vec4(position, 1.0);frag_color = vec4(color, 1.0);}""", GL.GL_VERTEX_SHADER)
FRAGMENT_SHADER = shaders.compileShader("""#version 330
in vec4 frag_color; out vec4 out_color;
void main() {out_color = frag_color;}""", GL.GL_FRAGMENT_SHADER)
self.shaderProgram = shaders.compileProgram(VERTEX_SHADER, FRAGMENT_SHADER)
self.UNIFORM_LOCATIONS = {
'position': GL.glGetAttribLocation(self.shaderProgram, 'position'),
'color': GL.glGetAttribLocation(self.shaderProgram, 'color'),
'mvp': GL.glGetUniformLocation(self.shaderProgram, 'mvp'),
}
shaders.glUseProgram(self.shaderProgram)
def paintGL(self):
if self.rgb.any() and self.depth.any():
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
self.drawObject()
def worldCoords(self, width, height):
cx, cy = width / 2, height / 2
fx = 518.8579
fy = 518.8579
xx, yy = np.tile(range(width), height), np.repeat(range(height), width)
xx = (xx - cx) / fx
yy = (yy - cy) / fy
return xx, yy
def posFromDepth(self, depth):
length = depth.shape[0] * depth.shape[1]
depth[edges(depth) > 0.3] = 1e6 # Hide depth edges
z = depth.reshape(length)
return np.dstack((self.xx * z, self.yy * z, z)).reshape((length, 3))
def createPointCloudVBOfromRGBD(self):
# Create position and color VBOs
self.pos_vbo = vbo.VBO(data=self.pos, usage=GL.GL_DYNAMIC_DRAW, target=GL.GL_ARRAY_BUFFER)
self.col_vbo = vbo.VBO(data=self.col, usage=GL.GL_DYNAMIC_DRAW, target=GL.GL_ARRAY_BUFFER)
def updateRGBD(self):
# RGBD dimensions
width, height = self.depth.shape[1], self.depth.shape[0]
# Reshape
points = self.posFromDepth(self.depth.copy())
colors = resize(self.rgb, (height, width)).reshape((height * width, 3))
# Flatten and convert to float32
self.pos = points.astype('float32')
self.col = colors.reshape(height * width, 3).astype('float32')
# Move center of scene
self.pos = self.pos + glm.vec3(0, -0.06, -0.3)
# Create VBOs
if not self.col_vbo:
self.createPointCloudVBOfromRGBD()
def drawObject(self):
# Update camera
model, view, proj = glm.mat4(1), glm.mat4(1), glm.perspective(45, self.width() / self.height(), 0.01, 100)
center, up, eye = glm.vec3(0, -0.075, 0), glm.vec3(0, -1, 0), glm.vec3(0, 0, -0.4 * (self.zoomLevel / 10))
view = glm.lookAt(eye, center, up)
model = glm.rotate(model, self.xRot / 160.0, glm.vec3(1, 0, 0))
model = glm.rotate(model, self.yRot / 160.0, glm.vec3(0, 1, 0))
model = glm.rotate(model, self.zRot / 160.0, glm.vec3(0, 0, 1))
mvp = proj * view * model
GL.glUniformMatrix4fv(self.UNIFORM_LOCATIONS['mvp'], 1, False, glm.value_ptr(mvp))
# Update data
self.pos_vbo.set_array(self.pos)
self.col_vbo.set_array(self.col)
# Point size
GL.glPointSize(2)
# Position
self.pos_vbo.bind()
GL.glEnableVertexAttribArray(0)
GL.glVertexAttribPointer(0, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
# Color
self.col_vbo.bind()
GL.glEnableVertexAttribArray(1)
GL.glVertexAttribPointer(1, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
# Draw
GL.glDrawArrays(GL.GL_POINTS, 0, self.pos.shape[0])
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = Window()
window.show()
res = app.exec_() | 17,345 | 34.4 | 148 | py |
LDU | LDU-main/monocular_depth_estimation/pytorch/bts_main.py | # Copyright (C) 2019 Jin Han Lee
#
# This file is a part of BTS.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
import time
import argparse
import sys
import os
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.multiprocessing as mp
from tensorboardX import SummaryWriter
import matplotlib
import matplotlib.cm
from tqdm import tqdm
from bts_dataloader import *
from bts_ldu import *
def convert_arg_line_to_args(arg_line):
for arg in arg_line.split():
if not arg.strip():
continue
yield arg
parser = argparse.ArgumentParser(description='BTS PyTorch implementation.', fromfile_prefix_chars='@')
parser.convert_arg_line_to_args = convert_arg_line_to_args
parser.add_argument('--mode', type=str, help='train or test', default='train')
parser.add_argument('--model_name', type=str, help='model name', default='bts_eigen_v2')
parser.add_argument('--encoder', type=str, help='type of encoder, desenet121_bts, densenet161_bts, '
'resnet101_bts, resnet50_bts, resnext50_bts or resnext101_bts',
default='densenet161_bts')
# Dataset
parser.add_argument('--dataset', type=str, help='dataset to train on, kitti or nyu', default='nyu')
parser.add_argument('--data_path', type=str, help='path to the data', required=True)
parser.add_argument('--gt_path', type=str, help='path to the groundtruth data', required=True)
parser.add_argument('--filenames_file', type=str, help='path to the filenames text file', required=True)
parser.add_argument('--input_height', type=int, help='input height', default=480)
parser.add_argument('--input_width', type=int, help='input width', default=640)
parser.add_argument('--max_depth', type=float, help='maximum depth in estimation', default=10)
# Log and save
parser.add_argument('--log_directory', type=str, help='directory to save checkpoints and summaries', default='')
parser.add_argument('--checkpoint_path', type=str, help='path to a checkpoint to load', default='')
parser.add_argument('--log_freq', type=int, help='Logging frequency in global steps', default=100)
parser.add_argument('--save_freq', type=int, help='Checkpoint saving frequency in global steps', default=500)
# Training
parser.add_argument('--fix_first_conv_blocks', help='if set, will fix the first two conv blocks', action='store_true')
parser.add_argument('--fix_first_conv_block', help='if set, will fix the first conv block', action='store_true')
parser.add_argument('--bn_no_track_stats', help='if set, will not track running stats in batch norm layers', action='store_true')
parser.add_argument('--weight_decay', type=float, help='weight decay factor for optimization', default=1e-2)
parser.add_argument('--bts_size', type=int, help='initial num_filters in bts', default=512)
parser.add_argument('--retrain', help='if used with checkpoint_path, will restart training from step zero', action='store_true')
parser.add_argument('--adam_eps', type=float, help='epsilon in Adam optimizer', default=1e-6)
parser.add_argument('--batch_size', type=int, help='batch size', default=4)
parser.add_argument('--num_epochs', type=int, help='number of epochs', default=50)
parser.add_argument('--learning_rate', type=float, help='initial learning rate', default=1e-4)
parser.add_argument('--end_learning_rate', type=float, help='end learning rate', default=-1)
parser.add_argument('--variance_focus', type=float, help='lambda in paper: [0, 1], higher value more focus on minimizing variance of error', default=0.85)
parser.add_argument('--nb_proto', type=int, help='initial num_proto in bts', default=30)
parser.add_argument('--loss_lambda', type=float, help='weight of the additional losses', default=0.1)
# Preprocessing
parser.add_argument('--do_random_rotate', help='if set, will perform random rotation for augmentation', action='store_true')
parser.add_argument('--degree', type=float, help='random rotation maximum degree', default=2.5)
parser.add_argument('--do_kb_crop', help='if set, crop input images as kitti benchmark images', action='store_true')
parser.add_argument('--use_right', help='if set, will randomly use right images when train on KITTI', action='store_true')
# Multi-gpu training
parser.add_argument('--num_threads', type=int, help='number of threads to use for data loading', default=1)
parser.add_argument('--world_size', type=int, help='number of nodes for distributed training', default=1)
parser.add_argument('--rank', type=int, help='node rank for distributed training', default=0)
parser.add_argument('--dist_url', type=str, help='url used to set up distributed training', default='tcp://127.0.0.1:1234')
parser.add_argument('--dist_backend', type=str, help='distributed backend', default='nccl')
parser.add_argument('--gpu', type=int, help='GPU id to use.', default=None)
parser.add_argument('--multiprocessing_distributed', help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training', action='store_true',)
# Online eval
parser.add_argument('--do_online_eval', help='if set, perform online eval in every eval_freq steps', action='store_true')
parser.add_argument('--data_path_eval', type=str, help='path to the data for online evaluation', required=False)
parser.add_argument('--gt_path_eval', type=str, help='path to the groundtruth data for online evaluation', required=False)
parser.add_argument('--filenames_file_eval', type=str, help='path to the filenames text file for online evaluation', required=False)
parser.add_argument('--min_depth_eval', type=float, help='minimum depth for evaluation', default=1e-3)
parser.add_argument('--max_depth_eval', type=float, help='maximum depth for evaluation', default=80)
parser.add_argument('--eigen_crop', help='if set, crops according to Eigen NIPS14', action='store_true')
parser.add_argument('--garg_crop', help='if set, crops according to Garg ECCV16', action='store_true')
parser.add_argument('--eval_freq', type=int, help='Online evaluation frequency in global steps', default=500)
parser.add_argument('--eval_summary_directory', type=str, help='output directory for eval summary,'
'if empty outputs to checkpoint folder', default='')
if sys.argv.__len__() == 2:
arg_filename_with_prefix = '@' + sys.argv[1]
args = parser.parse_args([arg_filename_with_prefix])
else:
args = parser.parse_args()
inv_normalize = transforms.Normalize(
mean=[-0.485/0.229, -0.456/0.224, -0.406/0.225],
std=[1/0.229, 1/0.224, 1/0.225]
)
eval_metrics = ['silog', 'abs_rel', 'log10', 'rms', 'sq_rel', 'log_rms', 'd1', 'd2', 'd3']
def compute_errors(gt, pred):
thresh = np.maximum((gt / pred), (pred / gt))
d1 = (thresh < 1.25).mean()
d2 = (thresh < 1.25 ** 2).mean()
d3 = (thresh < 1.25 ** 3).mean()
rms = (gt - pred) ** 2
rms = np.sqrt(rms.mean())
log_rms = (np.log(gt) - np.log(pred)) ** 2
log_rms = np.sqrt(log_rms.mean())
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred) ** 2) / gt)
err = np.log(pred) - np.log(gt)
silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100
err = np.abs(np.log10(pred) - np.log10(gt))
log10 = np.mean(err)
return [silog, abs_rel, log10, rms, sq_rel, log_rms, d1, d2, d3]
def block_print():
sys.stdout = open(os.devnull, 'w')
def enable_print():
sys.stdout = sys.__stdout__
def get_num_lines(file_path):
f = open(file_path, 'r')
lines = f.readlines()
f.close()
return len(lines)
def colorize(value, vmin=None, vmax=None, cmap='Greys'):
value = value.cpu().numpy()[:, :, :]
value = np.log10(value)
vmin = value.min() if vmin is None else vmin
vmax = value.max() if vmax is None else vmax
if vmin != vmax:
value = (value - vmin) / (vmax - vmin)
else:
value = value*0.
cmapper = matplotlib.cm.get_cmap(cmap)
value = cmapper(value, bytes=True)
img = value[:, :, :3]
return img.transpose((2, 0, 1))
def normalize_result(value, vmin=None, vmax=None):
value = value.cpu().numpy()[0, :, :]
vmin = value.min() if vmin is None else vmin
vmax = value.max() if vmax is None else vmax
if vmin != vmax:
value = (value - vmin) / (vmax - vmin)
else:
value = value * 0.
return np.expand_dims(value, 0)
def set_misc(model):
if args.bn_no_track_stats:
print("Disabling tracking running stats in batch norm layers")
model.apply(bn_init_as_tf)
if args.fix_first_conv_blocks:
if 'resne' in args.encoder:
fixing_layers = ['base_model.conv1', 'base_model.layer1.0', 'base_model.layer1.1', '.bn']
else:
fixing_layers = ['conv0', 'denseblock1.denselayer1', 'denseblock1.denselayer2', 'norm']
print("Fixing first two conv blocks")
elif args.fix_first_conv_block:
if 'resne' in args.encoder:
fixing_layers = ['base_model.conv1', 'base_model.layer1.0', '.bn']
else:
fixing_layers = ['conv0', 'denseblock1.denselayer1', 'norm']
print("Fixing first conv block")
else:
if 'resne' in args.encoder:
fixing_layers = ['base_model.conv1', '.bn']
else:
fixing_layers = ['conv0', 'norm']
print("Fixing first conv layer")
for name, child in model.named_children():
if not 'encoder' in name:
continue
for name2, parameters in child.named_parameters():
# print(name, name2)
if any(x in name2 for x in fixing_layers):
parameters.requires_grad = False
def online_eval(model, dataloader_eval, gpu, ngpus):
eval_measures = torch.zeros(10).cuda(device=gpu)
for _, eval_sample_batched in enumerate(tqdm(dataloader_eval.data)):
with torch.no_grad():
image = torch.autograd.Variable(eval_sample_batched['image'].cuda(gpu, non_blocking=True))
focal = torch.autograd.Variable(eval_sample_batched['focal'].cuda(gpu, non_blocking=True))
gt_depth = eval_sample_batched['depth']
has_valid_depth = eval_sample_batched['has_valid_depth']
if not has_valid_depth:
continue
pred_depth, _ = model(image, focal)
pred_depth = pred_depth.cpu().numpy().squeeze()
gt_depth = gt_depth.cpu().numpy().squeeze()
if args.do_kb_crop:
height, width = gt_depth.shape
top_margin = int(height - 352)
left_margin = int((width - 1216) / 2)
pred_depth_uncropped = np.zeros((height, width), dtype=np.float32)
pred_depth_uncropped[top_margin:top_margin + 352, left_margin:left_margin + 1216] = pred_depth
pred_depth = pred_depth_uncropped
pred_depth[pred_depth < args.min_depth_eval] = args.min_depth_eval
pred_depth[pred_depth > args.max_depth_eval] = args.max_depth_eval
pred_depth[np.isinf(pred_depth)] = args.max_depth_eval
pred_depth[np.isnan(pred_depth)] = args.min_depth_eval
valid_mask = np.logical_and(gt_depth > args.min_depth_eval, gt_depth < args.max_depth_eval)
if args.garg_crop or args.eigen_crop:
gt_height, gt_width = gt_depth.shape
eval_mask = np.zeros(valid_mask.shape)
if args.garg_crop:
eval_mask[int(0.40810811 * gt_height):int(0.99189189 * gt_height), int(0.03594771 * gt_width):int(0.96405229 * gt_width)] = 1
elif args.eigen_crop:
if args.dataset == 'kitti':
eval_mask[int(0.3324324 * gt_height):int(0.91351351 * gt_height), int(0.0359477 * gt_width):int(0.96405229 * gt_width)] = 1
else:
eval_mask[45:471, 41:601] = 1
valid_mask = np.logical_and(valid_mask, eval_mask)
measures = compute_errors(gt_depth[valid_mask], pred_depth[valid_mask])
eval_measures[:9] += torch.tensor(measures).cuda(device=gpu)
eval_measures[9] += 1
if args.multiprocessing_distributed:
group = dist.new_group([i for i in range(ngpus)])
dist.all_reduce(tensor=eval_measures, op=dist.ReduceOp.SUM, group=group)
if not args.multiprocessing_distributed or gpu == 0:
eval_measures_cpu = eval_measures.cpu()
cnt = eval_measures_cpu[9].item()
eval_measures_cpu /= cnt
print('Computing errors for {} eval samples'.format(int(cnt)))
print("{:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}".format('silog', 'abs_rel', 'log10', 'rms',
'sq_rel', 'log_rms', 'd1', 'd2',
'd3'))
for i in range(8):
print('{:7.3f}, '.format(eval_measures_cpu[i]), end='')
print('{:7.3f}'.format(eval_measures_cpu[8]))
return eval_measures_cpu
return None
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
# Create model
model = BtsModel(args)
model.train()
model.decoder.apply(weights_init_xavier)
set_misc(model)
num_params = sum([np.prod(p.size()) for p in model.parameters()])
print("Total number of parameters: {}".format(num_params))
num_params_update = sum([np.prod(p.shape) for p in model.parameters() if p.requires_grad])
print("Total number of learning parameters: {}".format(num_params_update))
if args.distributed:
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
args.batch_size = int(args.batch_size / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True)
else:
model = torch.nn.DataParallel(model)
model.cuda()
if args.distributed:
print("Model Initialized on GPU: {}".format(args.gpu))
else:
print("Model Initialized")
global_step = 0
best_eval_measures_lower_better = torch.zeros(6).cpu() + 1e3
best_eval_measures_higher_better = torch.zeros(3).cpu()
best_eval_steps = np.zeros(9, dtype=np.int32)
# Training parameters
optimizer = torch.optim.AdamW([{'params': model.module.encoder.parameters(), 'weight_decay': args.weight_decay},
{'params': model.module.decoder.parameters(), 'weight_decay': 0}],
lr=args.learning_rate, eps=args.adam_eps)
model_just_loaded = False
if args.checkpoint_path != '':
if os.path.isfile(args.checkpoint_path):
print("Loading checkpoint '{}'".format(args.checkpoint_path))
if args.gpu is None:
checkpoint = torch.load(args.checkpoint_path)
else:
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.checkpoint_path, map_location=loc)
global_step = checkpoint['global_step']
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
try:
best_eval_measures_higher_better = checkpoint['best_eval_measures_higher_better'].cpu()
best_eval_measures_lower_better = checkpoint['best_eval_measures_lower_better'].cpu()
best_eval_steps = checkpoint['best_eval_steps']
except KeyError:
print("Could not load values for online evaluation")
print("Loaded checkpoint '{}' (global_step {})".format(args.checkpoint_path, checkpoint['global_step']))
else:
print("No checkpoint found at '{}'".format(args.checkpoint_path))
model_just_loaded = True
if args.retrain:
global_step = 0
cudnn.benchmark = True
dataloader = BtsDataLoader(args, 'train')
dataloader_eval = BtsDataLoader(args, 'online_eval')
# Logging
if not args.multiprocessing_distributed or (args.multiprocessing_distributed and args.rank % ngpus_per_node == 0):
writer = SummaryWriter(args.log_directory + '/' + args.model_name + '/summaries', flush_secs=30)
if args.do_online_eval:
if args.eval_summary_directory != '':
eval_summary_path = os.path.join(args.eval_summary_directory, args.model_name)
else:
eval_summary_path = os.path.join(args.log_directory, 'eval')
eval_summary_writer = SummaryWriter(eval_summary_path, flush_secs=30)
criterion = silog_loss(args.variance_focus)
criterion_uncer = uncertainty_loss(args)
criterion_entro = entropy_loss()
criterion_dissi = dissimilar_loss()
start_time = time.time()
duration = 0
num_log_images = args.batch_size
end_learning_rate = args.end_learning_rate if args.end_learning_rate != -1 else 0.1 * args.learning_rate
steps_per_epoch = len(dataloader.data)
num_total_steps = args.num_epochs * steps_per_epoch
epoch = global_step // steps_per_epoch
while epoch < args.num_epochs:
if args.distributed:
dataloader.train_sampler.set_epoch(epoch)
for step, sample_batched in enumerate(dataloader.data):
optimizer.zero_grad()
before_op_time = time.time()
image = torch.autograd.Variable(sample_batched['image'].cuda(args.gpu, non_blocking=True))
focal = torch.autograd.Variable(sample_batched['focal'].cuda(args.gpu, non_blocking=True))
depth_gt = torch.autograd.Variable(sample_batched['depth'].cuda(args.gpu, non_blocking=True))
final_depth, final_uncer, omega, embedding_ = model(image, focal)
if args.dataset == 'nyu':
mask = depth_gt > 0.1
else:
mask = depth_gt > 1.0
mask = mask.to(torch.bool)
loss_depth = criterion.forward(final_depth, depth_gt, mask)
loss_uncer = criterion_uncer.forward(final_uncer, final_depth, depth_gt, mask)
loss_omega = criterion_entro.forward(embedding_)
loss_dissi = criterion_dissi.forward(omega)
loss = loss_depth + (loss_uncer + loss_omega + loss_dissi) * args.loss_lambda
loss.backward()
for param_group in optimizer.param_groups:
current_lr = (args.learning_rate - end_learning_rate) * (1 - global_step / num_total_steps) ** 0.9 + end_learning_rate
param_group['lr'] = current_lr
optimizer.step()
if not args.multiprocessing_distributed or (args.multiprocessing_distributed and args.rank % ngpus_per_node == 0):
print('[epoch][s/s_per_e/gs]: [{}][{}/{}/{}], lr: {:.12f}, loss: {:.12f}'.format(epoch, step, steps_per_epoch, global_step, current_lr, loss))
if np.isnan(loss.cpu().item()):
print('NaN in loss occurred. Aborting training.')
return -1
duration += time.time() - before_op_time
if global_step and global_step % args.log_freq == 0 and not model_just_loaded:
examples_per_sec = args.batch_size / duration * args.log_freq
duration = 0
time_sofar = (time.time() - start_time) / 3600
training_time_left = (num_total_steps / global_step - 1.0) * time_sofar
if not args.multiprocessing_distributed or (args.multiprocessing_distributed and args.rank % ngpus_per_node == 0):
print("{}".format(args.model_name))
print_string = 'GPU: {} | examples/s: {:4.2f} | loss: {:.5f} | time elapsed: {:.2f}h | time left: {:.2f}h'
print(print_string.format(args.gpu, examples_per_sec, loss, time_sofar, training_time_left))
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
writer.add_scalar('total_loss', loss, global_step)
writer.add_scalar('loss_depth', loss_depth, global_step)
writer.add_scalar('loss_uncer', loss_uncer, global_step)
writer.add_scalar('learning_rate', current_lr, global_step)
for i in range(num_log_images):
writer.add_image('depth_mean/image/{}'.format(i), normalize_result(1/(final_depth)[i, :, :, :].data), global_step)
writer.add_image('depth_var/image/{}'.format(i), normalize_result((final_uncer.detach().sigmoid())[i, :, :, :].data), global_step)
writer.add_image('image/image/{}'.format(i), inv_normalize(image[i, :, :, :]).data, global_step)
writer.flush()
if not args.do_online_eval and global_step and global_step % args.save_freq == 0:
if not args.multiprocessing_distributed or (args.multiprocessing_distributed and args.rank % ngpus_per_node == 0):
checkpoint = {'global_step': global_step,
'model': model.state_dict(),
'optimizer': optimizer.state_dict()}
torch.save(checkpoint, args.log_directory + '/' + args.model_name + '/model-{}'.format(global_step))
if args.do_online_eval and global_step and global_step % args.eval_freq == 0 and not model_just_loaded:
time.sleep(0.1)
model.eval()
eval_measures = online_eval(model, dataloader_eval, gpu, ngpus_per_node)
if eval_measures is not None:
for i in range(9):
eval_summary_writer.add_scalar(eval_metrics[i], eval_measures[i].cpu(), int(global_step))
measure = eval_measures[i]
is_best = False
if i < 6 and measure < best_eval_measures_lower_better[i]:
old_best = best_eval_measures_lower_better[i].item()
best_eval_measures_lower_better[i] = measure.item()
is_best = True
elif i >= 6 and measure > best_eval_measures_higher_better[i-6]:
old_best = best_eval_measures_higher_better[i-6].item()
best_eval_measures_higher_better[i-6] = measure.item()
is_best = True
if is_best:
old_best_step = best_eval_steps[i]
old_best_name = '/model-{}-best_{}_{:.5f}'.format(old_best_step, eval_metrics[i], old_best)
model_path = args.log_directory + '/' + args.model_name + old_best_name
if os.path.exists(model_path):
command = 'rm {}'.format(model_path)
os.system(command)
best_eval_steps[i] = global_step
model_save_name = '/model-{}-best_{}_{:.5f}'.format(global_step, eval_metrics[i], measure)
print('New best for {}. Saving model: {}'.format(eval_metrics[i], model_save_name))
checkpoint = {'global_step': global_step,
'model': model.state_dict(),
'best_eval_measures_higher_better': best_eval_measures_higher_better,
'best_eval_measures_lower_better': best_eval_measures_lower_better,
'best_eval_steps': best_eval_steps
}
torch.save(checkpoint, args.log_directory + '/' + args.model_name + model_save_name)
eval_summary_writer.flush()
model.train()
block_print()
set_misc(model)
enable_print()
model_just_loaded = False
global_step += 1
epoch += 1
if not args.multiprocessing_distributed or (args.multiprocessing_distributed and args.rank % ngpus_per_node == 0):
writer.close()
if args.do_online_eval:
eval_summary_writer.close()
def main():
if args.mode != 'train':
print('bts_main.py is only for training. Use bts_test.py instead.')
return -1
model_filename = args.model_name + '.py'
command = 'mkdir ' + args.log_directory + '/' + args.model_name
os.system(command)
args_out_path = args.log_directory + '/' + args.model_name + '/' + sys.argv[1]
command = 'cp ' + sys.argv[1] + ' ' + args_out_path
os.system(command)
if args.checkpoint_path == '':
model_out_path = args.log_directory + '/' + args.model_name + '/' + model_filename
command = 'cp bts.py ' + model_out_path
os.system(command)
aux_out_path = args.log_directory + '/' + args.model_name + '/.'
command = 'cp bts_main.py ' + aux_out_path
os.system(command)
command = 'cp bts_dataloader.py ' + aux_out_path
os.system(command)
else:
loaded_model_dir = os.path.dirname(args.checkpoint_path)
loaded_model_name = os.path.basename(loaded_model_dir)
loaded_model_filename = loaded_model_name + '.py'
model_out_path = args.log_directory + '/' + args.model_name + '/' + model_filename
command = 'cp ' + loaded_model_dir + '/' + loaded_model_filename + ' ' + model_out_path
os.system(command)
torch.cuda.empty_cache()
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if ngpus_per_node > 1 and not args.multiprocessing_distributed:
print("This machine has more than 1 gpu. Please specify --multiprocessing_distributed, or set \'CUDA_VISIBLE_DEVICES=0\'")
return -1
if args.do_online_eval:
print("You have specified --do_online_eval.")
print("This will evaluate the model every eval_freq {} steps and save best models for individual eval metrics."
.format(args.eval_freq))
if args.multiprocessing_distributed:
args.world_size = ngpus_per_node * args.world_size
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
main_worker(args.gpu, ngpus_per_node, args)
if __name__ == '__main__':
main()
| 28,993 | 47.976351 | 165 | py |
LDU | LDU-main/monocular_depth_estimation/pytorch/bts_ldu.py | # Copyright (C) 2019 Jin Han Lee
#
# This file is a part of BTS.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
import torch
import torch.nn as nn
import torch.nn.functional as torch_nn_func
import math
import numpy as np
# This sets the batch norm layers in pytorch as if {'is_training': False, 'scale': True} in tensorflow
def bn_init_as_tf(m):
if isinstance(m, nn.BatchNorm2d):
m.track_running_stats = True # These two lines enable using stats (moving mean and var) loaded from pretrained model
m.eval() # or zero mean and variance of one if the batch norm layer has no pretrained values
m.affine = True
m.requires_grad = True
def weights_init_xavier(m):
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
torch.nn.init.zeros_(m.bias)
#_______________________________________________________________________________________#
class silog_loss(nn.Module):
def __init__(self, variance_focus):
super(silog_loss, self).__init__()
self.variance_focus = variance_focus
def forward(self, depth_est, depth_gt, mask):
d = torch.log(depth_est[mask]) - torch.log(depth_gt[mask])
return torch.sqrt((d ** 2).mean() - self.variance_focus * (d.mean() ** 2)) * 10.0
class entropy_loss(nn.Module):
def __init__(self):
super(entropy_loss, self).__init__()
def forward(self, embedding):
embedding = nn.Softmax(dim=1)(embedding)
minus_entropy = embedding * torch.log(embedding)
minus_entropy = torch.sum(minus_entropy, dim=1)
return minus_entropy.mean()
class uncertainty_loss(nn.Module):
def __init__(self, args):
super(uncertainty_loss, self).__init__()
self.max_depth = args.max_depth
def forward(self, uncer, final_depth, depth_gt, mask):
abs_error = abs(final_depth.detach() - depth_gt)/self.max_depth
abs_error[abs_error>1] = 1
abs_error = abs_error[mask].detach()
loss = nn.BCEWithLogitsLoss(pos_weight = torch.tensor([5.0]).cuda(), reduction='mean')(uncer[mask], abs_error)
return loss
class dissimilar_loss(nn.Module):
def __init__(self):
super(dissimilar_loss, self).__init__()
def forward(self, protos):
loss = -1 * torch.mean(torch.cdist(protos, protos))
return loss
#_______________________________________________________________________________________#
class atrous_conv(nn.Sequential):
def __init__(self, in_channels, out_channels, dilation, apply_bn_first=True):
super(atrous_conv, self).__init__()
self.atrous_conv = torch.nn.Sequential()
if apply_bn_first:
self.atrous_conv.add_module('first_bn', nn.BatchNorm2d(in_channels, momentum=0.01, affine=True, track_running_stats=True, eps=1.1e-5))
self.atrous_conv.add_module('aconv_sequence', nn.Sequential(nn.ReLU(),
nn.Conv2d(in_channels=in_channels, out_channels=out_channels*2, bias=False, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(out_channels*2, momentum=0.01, affine=True, track_running_stats=True),
nn.ReLU(),
nn.Conv2d(in_channels=out_channels * 2, out_channels=out_channels, bias=False, kernel_size=3, stride=1,
padding=(dilation, dilation), dilation=dilation)))
def forward(self, x):
return self.atrous_conv.forward(x)
class upconv(nn.Module):
def __init__(self, in_channels, out_channels, ratio=2):
super(upconv, self).__init__()
self.elu = nn.ELU()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, bias=False, kernel_size=3, stride=1, padding=1)
self.ratio = ratio
def forward(self, x):
up_x = torch_nn_func.interpolate(x, scale_factor=self.ratio, mode='nearest')
out = self.conv(up_x)
out = self.elu(out)
return out
class reduction_1x1(nn.Sequential):
def __init__(self, num_in_filters, num_out_filters, max_depth, is_final=False):
super(reduction_1x1, self).__init__()
self.max_depth = max_depth
self.is_final = is_final
self.sigmoid = nn.Sigmoid()
self.reduc = torch.nn.Sequential()
while num_out_filters >= 4:
if num_out_filters < 8:
if self.is_final:
self.reduc.add_module('final', torch.nn.Sequential(nn.Conv2d(num_in_filters, out_channels=1, bias=False,
kernel_size=1, stride=1, padding=0),
nn.Sigmoid()))
else:
self.reduc.add_module('plane_params', torch.nn.Conv2d(num_in_filters, out_channels=3, bias=False,
kernel_size=1, stride=1, padding=0))
break
else:
self.reduc.add_module('inter_{}_{}'.format(num_in_filters, num_out_filters),
torch.nn.Sequential(nn.Conv2d(in_channels=num_in_filters, out_channels=num_out_filters,
bias=False, kernel_size=1, stride=1, padding=0),
nn.ELU()))
num_in_filters = num_out_filters
num_out_filters = num_out_filters // 2
def forward(self, net):
net = self.reduc.forward(net)
if not self.is_final:
theta = self.sigmoid(net[:, 0, :, :]) * math.pi / 3
phi = self.sigmoid(net[:, 1, :, :]) * math.pi * 2
dist = self.sigmoid(net[:, 2, :, :]) * self.max_depth
n1 = torch.mul(torch.sin(theta), torch.cos(phi)).unsqueeze(1)
n2 = torch.mul(torch.sin(theta), torch.sin(phi)).unsqueeze(1)
n3 = torch.cos(theta).unsqueeze(1)
n4 = dist.unsqueeze(1)
net = torch.cat([n1, n2, n3, n4], dim=1)
return net
class local_planar_guidance(nn.Module):
def __init__(self, upratio):
super(local_planar_guidance, self).__init__()
self.upratio = upratio
self.u = torch.arange(self.upratio).reshape([1, 1, self.upratio]).float()
self.v = torch.arange(int(self.upratio)).reshape([1, self.upratio, 1]).float()
self.upratio = float(upratio)
def forward(self, plane_eq, focal):
plane_eq_expanded = torch.repeat_interleave(plane_eq, int(self.upratio), 2)
plane_eq_expanded = torch.repeat_interleave(plane_eq_expanded, int(self.upratio), 3)
n1 = plane_eq_expanded[:, 0, :, :]
n2 = plane_eq_expanded[:, 1, :, :]
n3 = plane_eq_expanded[:, 2, :, :]
n4 = plane_eq_expanded[:, 3, :, :]
u = self.u.repeat(plane_eq.size(0), plane_eq.size(2) * int(self.upratio), plane_eq.size(3)).cuda()
u = (u - (self.upratio - 1) * 0.5) / self.upratio
v = self.v.repeat(plane_eq.size(0), plane_eq.size(2), plane_eq.size(3) * int(self.upratio)).cuda()
v = (v - (self.upratio - 1) * 0.5) / self.upratio
return n4 / (n1 * u + n2 * v + n3)
class Distanceminimi_Layer_learned(nn.Module):
def __init__(self, in_features=0, out_features=0, dist='lin'):
super(Distanceminimi_Layer_learned, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.dist=dist
self.omega = nn.Parameter(torch.Tensor(1, out_features, in_features, 1, 1))
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.omega, mean=0, std=1)#/self.out_features)
def forward(self, x):
x = x.unsqueeze(1)
out = torch_nn_func.cosine_similarity(x, self.omega, dim=2, eps=1e-30)
return out, self.omega
class bts(nn.Module):
def __init__(self, params, feat_out_channels, num_features=512, nb_prototype = 80):
super(bts, self).__init__()
self.params = params
self.upconv5 = upconv(feat_out_channels[4], num_features)
self.bn5 = nn.BatchNorm2d(num_features, momentum=0.01, affine=True, eps=1.1e-5)
self.conv5 = torch.nn.Sequential(nn.Conv2d(num_features + feat_out_channels[3], num_features, 3, 1, 1, bias=False),
nn.ELU())
self.upconv4 = upconv(num_features, num_features // 2)
self.bn4 = nn.BatchNorm2d(num_features // 2, momentum=0.01, affine=True, eps=1.1e-5)
self.conv4 = torch.nn.Sequential(nn.Conv2d(num_features // 2 + feat_out_channels[2], num_features // 2, 3, 1, 1, bias=False),
nn.ELU())
self.bn4_2 = nn.BatchNorm2d(num_features // 2, momentum=0.01, affine=True, eps=1.1e-5)
self.daspp_3 = atrous_conv(num_features // 2, num_features // 4, 3, apply_bn_first=False)
self.daspp_6 = atrous_conv(num_features // 2 + num_features // 4 + feat_out_channels[2], num_features // 4, 6)
self.daspp_12 = atrous_conv(num_features + feat_out_channels[2], num_features // 4, 12)
self.daspp_18 = atrous_conv(num_features + num_features // 4 + feat_out_channels[2], num_features // 4, 18)
self.daspp_24 = atrous_conv(num_features + num_features // 2 + feat_out_channels[2], num_features // 4, 24)
self.daspp_conv = torch.nn.Sequential(nn.Conv2d(num_features + num_features // 2 + num_features // 4, num_features // 4, 3, 1, 1, bias=False),
nn.ELU())
self.reduc8x8 = reduction_1x1(num_features // 4, num_features // 4, self.params.max_depth)
self.lpg8x8 = local_planar_guidance(8)
self.upconv3 = upconv(num_features // 4, num_features // 4)
self.bn3 = nn.BatchNorm2d(num_features // 4, momentum=0.01, affine=True, eps=1.1e-5)
self.conv3 = torch.nn.Sequential(nn.Conv2d(num_features // 4 + feat_out_channels[1] + 1, num_features // 4, 3, 1, 1, bias=False),
nn.ELU())
self.reduc4x4 = reduction_1x1(num_features // 4, num_features // 8, self.params.max_depth)
self.lpg4x4 = local_planar_guidance(4)
self.upconv2 = upconv(num_features // 4, num_features // 8)
self.bn2 = nn.BatchNorm2d(num_features // 8, momentum=0.01, affine=True, eps=1.1e-5)
self.conv2 = torch.nn.Sequential(nn.Conv2d(num_features // 8 + feat_out_channels[0] + 1, num_features // 8, 3, 1, 1, bias=False),
nn.ELU())
self.reduc2x2 = reduction_1x1(num_features // 8, num_features // 16, self.params.max_depth)
self.lpg2x2 = local_planar_guidance(2)
self.upconv1 = upconv(num_features // 8, num_features // 16)
self.reduc1x1 = reduction_1x1(num_features // 16, num_features // 32, self.params.max_depth, is_final=True)
self.conv1 = torch.nn.Sequential(nn.Conv2d(num_features // 16 + 4, num_features // 16, 3, 1, 1, bias=False),
nn.ELU())
self.DMlayer = Distanceminimi_Layer_learned(in_features=(num_features // 16), out_features = nb_prototype, dist='cos')
self.DMBN = nn.BatchNorm2d(nb_prototype)
self.get_uncer = nn.Conv2d(nb_prototype, 1, 1)
self.get_depth = nn.Sequential(nn.Conv2d(nb_prototype, 1, 1), nn.Sigmoid())
def forward(self, features, focal):
skip0, skip1, skip2, skip3 = features[0], features[1], features[2], features[3]
dense_features = torch.nn.ReLU()(features[4])
upconv5 = self.upconv5(dense_features) # H/16
upconv5 = self.bn5(upconv5)
concat5 = torch.cat([upconv5, skip3], dim=1)
iconv5 = self.conv5(concat5)
upconv4 = self.upconv4(iconv5) # H/8
upconv4 = self.bn4(upconv4)
concat4 = torch.cat([upconv4, skip2], dim=1)
iconv4 = self.conv4(concat4)
iconv4 = self.bn4_2(iconv4)
daspp_3 = self.daspp_3(iconv4)
concat4_2 = torch.cat([concat4, daspp_3], dim=1)
daspp_6 = self.daspp_6(concat4_2)
concat4_3 = torch.cat([concat4_2, daspp_6], dim=1)
daspp_12 = self.daspp_12(concat4_3)
concat4_4 = torch.cat([concat4_3, daspp_12], dim=1)
daspp_18 = self.daspp_18(concat4_4)
concat4_5 = torch.cat([concat4_4, daspp_18], dim=1)
daspp_24 = self.daspp_24(concat4_5)
concat4_daspp = torch.cat([iconv4, daspp_3, daspp_6, daspp_12, daspp_18, daspp_24], dim=1)
daspp_feat = self.daspp_conv(concat4_daspp)
reduc8x8 = self.reduc8x8(daspp_feat)
plane_normal_8x8 = reduc8x8[:, :3, :, :]
plane_normal_8x8 = torch_nn_func.normalize(plane_normal_8x8, 2, 1)
plane_dist_8x8 = reduc8x8[:, 3, :, :]
plane_eq_8x8 = torch.cat([plane_normal_8x8, plane_dist_8x8.unsqueeze(1)], 1)
depth_8x8 = self.lpg8x8(plane_eq_8x8, focal)
depth_8x8_scaled = depth_8x8.unsqueeze(1) / self.params.max_depth
depth_8x8_scaled_ds = torch_nn_func.interpolate(depth_8x8_scaled, scale_factor=0.25, mode='nearest')
upconv3 = self.upconv3(daspp_feat) # H/4
upconv3 = self.bn3(upconv3)
concat3 = torch.cat([upconv3, skip1, depth_8x8_scaled_ds], dim=1)
iconv3 = self.conv3(concat3)
reduc4x4 = self.reduc4x4(iconv3)
plane_normal_4x4 = reduc4x4[:, :3, :, :]
plane_normal_4x4 = torch_nn_func.normalize(plane_normal_4x4, 2, 1)
plane_dist_4x4 = reduc4x4[:, 3, :, :]
plane_eq_4x4 = torch.cat([plane_normal_4x4, plane_dist_4x4.unsqueeze(1)], 1)
depth_4x4 = self.lpg4x4(plane_eq_4x4, focal)
depth_4x4_scaled = depth_4x4.unsqueeze(1) / self.params.max_depth
depth_4x4_scaled_ds = torch_nn_func.interpolate(depth_4x4_scaled, scale_factor=0.5, mode='nearest')
upconv2 = self.upconv2(iconv3) # H/2
upconv2 = self.bn2(upconv2)
concat2 = torch.cat([upconv2, skip0, depth_4x4_scaled_ds], dim=1)
iconv2 = self.conv2(concat2)
reduc2x2 = self.reduc2x2(iconv2)
plane_normal_2x2 = reduc2x2[:, :3, :, :]
plane_normal_2x2 = torch_nn_func.normalize(plane_normal_2x2, 2, 1)
plane_dist_2x2 = reduc2x2[:, 3, :, :]
plane_eq_2x2 = torch.cat([plane_normal_2x2, plane_dist_2x2.unsqueeze(1)], 1)
depth_2x2 = self.lpg2x2(plane_eq_2x2, focal)
depth_2x2_scaled = depth_2x2.unsqueeze(1) / self.params.max_depth
upconv1 = self.upconv1(iconv2)
reduc1x1 = self.reduc1x1(upconv1)
concat1 = torch.cat([upconv1, reduc1x1, depth_2x2_scaled, depth_4x4_scaled, depth_8x8_scaled], dim=1)
feature_output = self.conv1(concat1)
# Before the last layer, DM layer is added
embedding_, omega = self.DMlayer(feature_output)
embedding = torch.exp(-embedding_)
out = self.DMBN(embedding)
final_uncer = self.get_uncer(out)
final_depth = self.get_depth(out) * self.params.max_depth
if self.training:
return final_depth, final_uncer, omega.squeeze(), embedding_
else:
return final_depth, torch.sigmoid(final_uncer)
class encoder(nn.Module):
def __init__(self, params):
super(encoder, self).__init__()
self.params = params
import torchvision.models as models
if params.encoder == 'densenet121_bts':
self.base_model = models.densenet121(pretrained=True).features
self.feat_names = ['relu0', 'pool0', 'transition1', 'transition2', 'norm5']
self.feat_out_channels = [64, 64, 128, 256, 1024]
elif params.encoder == 'densenet161_bts':
self.base_model = models.densenet161(pretrained=True).features
self.feat_names = ['relu0', 'pool0', 'transition1', 'transition2', 'norm5']
self.feat_out_channels = [96, 96, 192, 384, 2208]
elif params.encoder == 'resnet50_bts':
self.base_model = models.resnet50(pretrained=True)
self.feat_names = ['relu', 'layer1', 'layer2', 'layer3', 'layer4']
self.feat_out_channels = [64, 256, 512, 1024, 2048]
elif params.encoder == 'resnet101_bts':
self.base_model = models.resnet101(pretrained=True)
self.feat_names = ['relu', 'layer1', 'layer2', 'layer3', 'layer4']
self.feat_out_channels = [64, 256, 512, 1024, 2048]
elif params.encoder == 'resnext50_bts':
self.base_model = models.resnext50_32x4d(pretrained=True)
self.feat_names = ['relu', 'layer1', 'layer2', 'layer3', 'layer4']
self.feat_out_channels = [64, 256, 512, 1024, 2048]
elif params.encoder == 'resnext101_bts':
self.base_model = models.resnext101_32x8d(pretrained=True)
self.feat_names = ['relu', 'layer1', 'layer2', 'layer3', 'layer4']
self.feat_out_channels = [64, 256, 512, 1024, 2048]
elif params.encoder == 'mobilenetv2_bts':
self.base_model = models.mobilenet_v2(pretrained=True).features
self.feat_inds = [2, 4, 7, 11, 19]
self.feat_out_channels = [16, 24, 32, 64, 1280]
self.feat_names = []
else:
print('Not supported encoder: {}'.format(params.encoder))
def forward(self, x):
feature = x
skip_feat = []
i = 1
for k, v in self.base_model._modules.items():
if 'fc' in k or 'avgpool' in k:
continue
feature = v(feature)
if self.params.encoder == 'mobilenetv2_bts':
if i == 2 or i == 4 or i == 7 or i == 11 or i == 19:
skip_feat.append(feature)
else:
if any(x in k for x in self.feat_names):
skip_feat.append(feature)
i = i + 1
return skip_feat
class BtsModel(nn.Module):
def __init__(self, params):
super(BtsModel, self).__init__()
self.encoder = encoder(params)
self.decoder = bts(params, self.encoder.feat_out_channels, params.bts_size, params.nb_proto)
def forward(self, x, focal):
skip_feat = self.encoder(x)
return self.decoder(skip_feat, focal) | 19,379 | 47.693467 | 180 | py |
LDU | LDU-main/monocular_depth_estimation/pytorch/bts_test_kitti_ldu.py | # Copyright (C) 2019 Jin Han Lee
#
# This file is a part of BTS.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from __future__ import absolute_import, division, print_function
import os
import argparse
import time
import numpy as np
import sys
import torch
from torch.autograd import Variable
from tqdm import tqdm
from bts_dataloader import *
from sparsification import sparsification_error_gpu
from bts_ldu import BtsModel
def convert_arg_line_to_args(arg_line):
for arg in arg_line.split():
if not arg.strip():
continue
yield arg
parser = argparse.ArgumentParser(description='BTS PyTorch implementation.', fromfile_prefix_chars='@')
parser.convert_arg_line_to_args = convert_arg_line_to_args
parser.add_argument('--model_name', type=str, help='model name', default='bts_nyu_v2')
parser.add_argument('--encoder', type=str, help='type of encoder, vgg or desenet121_bts or densenet161_bts',
default='densenet161_bts')
parser.add_argument('--data_path_eval', type=str, help='path to the data', required=True)
parser.add_argument('--gt_path_eval', type=str, help='path to the data', required=True)
parser.add_argument('--filenames_file_eval', type=str, help='path to the filenames text file', required=True)
parser.add_argument('--input_height', type=int, help='input height', default=480)
parser.add_argument('--input_width', type=int, help='input width', default=640)
parser.add_argument('--max_depth', type=float, help='maximum depth in estimation', default=80)
parser.add_argument('--checkpoint_path', type=str, help='path to a specific checkpoint to load', default='')
parser.add_argument('--dataset', type=str, help='dataset to train on, make3d or nyudepthv2', default='nyu')
parser.add_argument('--do_kb_crop', help='if set, crop input images as kitti benchmark images', action='store_true')
parser.add_argument('--garg_crop', help='if set, crops according to Garg ECCV16', action='store_true')
parser.add_argument('--bts_size', type=int, help='initial num_filters in bts', default=512)
parser.add_argument('--clip_gt', help='if set, clipping the ground truth to the min-max depth', action='store_true')
parser.add_argument('--min_depth_eval', type=float, help='minimum depth for evaluation', default=1e-3)
parser.add_argument('--max_depth_eval', type=float, help='maximum depth for evaluation', default=80)
parser.add_argument('--nb_proto', type=int, help='initial num_proto in bts', default=30)
if sys.argv.__len__() == 2:
arg_filename_with_prefix = '@' + sys.argv[1]
args = parser.parse_args([arg_filename_with_prefix])
else:
args = parser.parse_args()
def get_num_lines(file_path):
f = open(file_path, 'r')
lines = f.readlines()
f.close()
return len(lines)
def compute_errors(gt, pred):
thresh = np.maximum((gt / pred), (pred / gt))
d1 = (thresh < 1.25).mean()
d2 = (thresh < 1.25 ** 2).mean()
d3 = (thresh < 1.25 ** 3).mean()
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred)**2) / gt)
err = np.log(pred) - np.log(gt)
silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100
err = np.abs(np.log10(pred) - np.log10(gt))
log10 = np.mean(err)
return silog, log10, abs_rel, sq_rel, rmse, rmse_log, d1, d2, d3
inv_normalize = transforms.Normalize(
mean=[-0.485/0.229, -0.456/0.224, -0.406/0.225],
std=[1/0.229, 1/0.224, 1/0.225]
)
def test():
"""Test function."""
args.mode = 'online_eval'
args.distributed = False
dataloader = BtsDataLoader(args, 'online_eval')
model = BtsModel(params=args)
model = torch.nn.DataParallel(model)
if os.path.exists(args.checkpoint_path):
checkpoint = torch.load(args.checkpoint_path)
else:
print('Wrong checkpoint path. Exit.')
exit()
model.load_state_dict(checkpoint['model'])
model.eval()
model.cuda()
num_params = sum([np.prod(p.size()) for p in model.parameters()])
print("Total number of parameters: {}".format(num_params))
num_test_samples = get_num_lines(args.filenames_file_eval)
print('now testing {} files with {}'.format(num_test_samples, args.checkpoint_path))
start_time = time.time()
with torch.no_grad():
num_samples = len(dataloader.data)
print(num_samples)
nb_valid = 0
silog = np.zeros(num_samples, np.float32)
log10 = np.zeros(num_samples, np.float32)
rms = np.zeros(num_samples, np.float32)
log_rms = np.zeros(num_samples, np.float32)
abs_rel = np.zeros(num_samples, np.float32)
sq_rel = np.zeros(num_samples, np.float32)
d1 = np.zeros(num_samples, np.float32)
d2 = np.zeros(num_samples, np.float32)
d3 = np.zeros(num_samples, np.float32)
hist_pred_rmses = 0
hist_oracle_rmses = 0
nb_remain_rmses = 0
hist_pred_rmses = 0
hist_oracle_rmses = 0
nb_remain_rmses = 0
ausc_rmse = np.zeros(num_samples, np.float32)
hist_pred_absrels = 0
hist_oracle_absrels = 0
nb_remain_absrels = 0
hist_pred_absrels = 0
hist_oracle_absrels = 0
nb_remain_absrels = 0
ausc_absrel = np.zeros(num_samples, np.float32)
spar_rmse = 0
spar_absr = 0
for i, sample in tqdm(enumerate(tqdm(dataloader.data))):
is_valid = sample['has_valid_depth']
if not is_valid: continue
else: nb_valid += 1
image = Variable(sample['image'].cuda())
focal = Variable(sample['focal'].cuda())
depth_gt = Variable(sample['depth'].cuda())
# Predict
depth_gt = depth_gt.cpu().numpy().squeeze()
depth_est, uncertainty = model(image, focal)
depth_est = depth_est.cpu().numpy().squeeze()
uncertainty = uncertainty.cpu().numpy().squeeze()
if args.clip_gt:
valid_mask = np.logical_and(depth_gt > args.min_depth_eval, depth_gt < args.max_depth_eval)
else:
valid_mask = (depth_gt > args.min_depth_eval)
# We are using online-eval here, and the following operation is to imitate the operation in the test case in the original work.
if args.do_kb_crop:
height, width = depth_gt.shape
top_margin = int(height - 352)
left_margin = int((width - 1216) / 2)
pred_depth_uncropped = np.zeros((height, width), dtype=np.float32)
pred_depth_uncropped[top_margin:top_margin + 352, left_margin:left_margin + 1216] = depth_est
depth_est = pred_depth_uncropped
pred_depth_uncropped = np.zeros((height, width), dtype=np.float32)
pred_depth_uncropped[top_margin:top_margin + 352, left_margin:left_margin + 1216] = uncertainty
uncertainty = pred_depth_uncropped
if args.clip_gt:
depth_est[depth_est < args.min_depth_eval] = args.min_depth_eval
depth_est[depth_est > args.max_depth_eval] = args.max_depth_eval
depth_est[np.isinf(depth_est)] = args.max_depth_eval
depth_gt[np.isinf(depth_gt)] = args.max_depth_eval
depth_gt[np.isnan(depth_gt)] = args.min_depth_eval
if args.garg_crop:
gt_height, gt_width = depth_gt.shape
eval_mask = np.zeros(valid_mask.shape)
eval_mask[int(0.40810811 * gt_height):int(0.99189189 * gt_height), int(0.03594771 * gt_width):int(0.96405229 * gt_width)] = 1
valid_mask = np.logical_and(valid_mask, eval_mask)
uncertainty = torch.tensor(uncertainty).cuda()
depth_est = torch.tensor(depth_est).cuda()
depth_gt = torch.tensor(depth_gt).cuda()
valid_mask = torch.tensor(valid_mask).cuda()
hist_pred_rmse, hist_oracle_rmse, nb_remain_rmse, ausc_rmse[i] = sparsification_error_gpu(unc_tensor = uncertainty[valid_mask], pred_tensor = depth_est[valid_mask], gt_tensor = depth_gt[valid_mask], is_rmse = True)
hist_pred_rmses += hist_pred_rmse
hist_oracle_rmses += hist_oracle_rmse
nb_remain_rmses += nb_remain_rmse
spar_rmse += np.trapz((hist_pred_rmse - hist_oracle_rmse), x = list(np.arange(start=0.0, stop=1.0, step=(1/100))))
hist_pred_absrel, hist_oracle_absrel, nb_remain_absrel, ausc_absrel[i] = sparsification_error_gpu(unc_tensor = uncertainty[valid_mask], pred_tensor = depth_est[valid_mask], gt_tensor = depth_gt[valid_mask], is_rmse = False)
hist_pred_absrels += hist_pred_absrel
hist_oracle_absrels += hist_oracle_absrel
nb_remain_absrels += nb_remain_absrel
spar_absr += np.trapz((hist_pred_absrel - hist_oracle_absrel), x = list(np.arange(start=0.0, stop=1.0, step=(1/100))))
depth_est = depth_est.cpu().numpy()
depth_gt = depth_gt.cpu().numpy()
valid_mask = valid_mask.cpu().numpy()
silog[i], log10[i], abs_rel[i], sq_rel[i], rms[i], log_rms[i], d1[i], d2[i], d3[i] = compute_errors(depth_gt[valid_mask], depth_est[valid_mask])
print(nb_valid)
print("{:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}".format(
'd1', 'd2', 'd3', 'AbsRel', 'SqRel', 'RMSE', 'RMSElog', 'SILog', 'log10'))
print("{:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}".format(
d1.sum()/nb_valid, d2.sum()/nb_valid, d3.sum()/nb_valid,
abs_rel.sum()/nb_valid, sq_rel.sum()/nb_valid, rms.sum()/nb_valid,
log_rms.sum()/nb_valid, silog.sum()/nb_valid, log10.sum()/nb_valid))
hist_pred_rmses = hist_pred_rmses/nb_valid
hist_oracle_rmses = hist_oracle_rmses/nb_valid
nb_remain_rmses = nb_remain_rmses/nb_valid
hist_pred_absrels = hist_pred_absrels/nb_valid
hist_oracle_absrels = hist_oracle_absrels/nb_valid
nb_remain_absrels = nb_remain_absrels/nb_valid
spar_rmse = spar_rmse/nb_valid
spar_absr = spar_absr/nb_valid
# to verify that the averages obtained by the two different methods are consistent.
print('ausc_rmse', np.trapz((hist_pred_rmses - hist_oracle_rmses), x = list(np.arange(start=0.0, stop=1.0, step=(1/100)))))
print('ausc_abrel', np.trapz((hist_pred_absrels - hist_oracle_absrels), x = list(np.arange(start=0.0, stop=1.0, step=(1/100)))))
print('ausc_rmse', spar_rmse)
print('ausc_abrel', spar_absr)
elapsed_time = time.time() - start_time
print('Elapesed time: %s' % str(elapsed_time))
print('Done.')
if __name__ == '__main__':
test() | 11,451 | 40.492754 | 235 | py |
LDU | LDU-main/monocular_depth_estimation/pytorch/sparsification.py | import numpy as np
import torch
"""Calculate the sparsification error.
Calcualte the sparsification error for a given array according to a reference array.
Args:
unc_tensor: Flatten estimated uncertainty tensor.
pred_tensor: Flatten depth prediction tensor.
gt_tensor: Flatten ground truth tensor.
nb_bins: Number of bins using for uncertainty estimation. Each time, 1/nb_bins * 100% items with highest value will be removed.
return_hist: if return histograms for drawing the sparsification curve, otherwise, directly return the sum of sparsification error.
Returns:
By default, sum of the sparsification error after removing all the items in two given vectors given nb_bins.
Given return_hist = True, three arrays corresponding to the components of sparsification curve.
"""
def sparsification_error_gpu(unc_tensor, pred_tensor, gt_tensor, nb_bins = 100, return_hist=True, is_rmse = True):
hist_pred = []
hist_oracle = []
nb_remain = []
# From small to big
argsorted_U = torch.argsort(unc_tensor)
err_tensor = abs(pred_tensor - gt_tensor)
if not is_rmse:
err_tensor = err_tensor/gt_tensor
else:
err_tensor = err_tensor**2
argsorted_E = torch.argsort(err_tensor)
total_len = len(unc_tensor)
sigma_pred_curves = []
error_curves = []
fractions = list(torch.arange(start=0.0, end=1.0, step=(1/nb_bins)))
for fraction in fractions:
if is_rmse:
sigma_pred_curve = torch.mean(err_tensor[argsorted_U[0:int((1.0-fraction)*total_len)]])
error_curve = torch.mean(err_tensor[argsorted_E[0:int((1.0-fraction)*total_len)]])
sigma_pred_curve = torch.sqrt(sigma_pred_curve)
error_curve = torch.sqrt(error_curve)
else:
sigma_pred_curve = torch.mean(err_tensor[argsorted_U[0:int((1.0-fraction)*total_len)]])
error_curve = torch.mean(err_tensor[argsorted_E[0:int((1.0-fraction)*total_len)]])
sigma_pred_curves.append(sigma_pred_curve)
error_curves.append(error_curve)
nb_remain.append(int((1.0-fraction)*total_len))
hist_oracle = torch.tensor(error_curves)/error_curves[0].cpu()
hist_pred = torch.tensor(sigma_pred_curves)/sigma_pred_curves[0].cpu()
nb_remain = torch.tensor(nb_remain)
sparsification_errors_pred = torch.trapz((hist_pred - hist_oracle), torch.arange(start=0.0, end=1.0, step=(1/nb_bins)))
# without normalization. in our paper we use the codes shown above.
# hist_oracle = torch.tensor(error_curves)
# hist_pred = torch.tensor(sigma_pred_curves)
# nb_remain = torch.tensor(nb_remain)
# sparsification_errors_pred = torch.trapz((hist_pred), torch.arange(start=0.0, end=1.0, step=(1/nb_bins))) - torch.trapz((hist_oracle), torch.arange(start=0.0, end=1.0, step=(1/nb_bins)))
if return_hist:
return hist_pred, hist_oracle, nb_remain, sparsification_errors_pred
else:
return sparsification_errors_pred | 3,034 | 42.357143 | 192 | py |
LDU | LDU-main/monocular_depth_estimation/pytorch/bts_dataloader.py | # Copyright (C) 2019 Jin Han Lee
#
# This file is a part of BTS.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import torch.utils.data.distributed
from torchvision import transforms
from PIL import Image
import os
import random
from distributed_sampler_no_evenly_divisible import *
def _is_pil_image(img):
return isinstance(img, Image.Image)
def _is_numpy_image(img):
return isinstance(img, np.ndarray) and (img.ndim in {2, 3})
def preprocessing_transforms(mode):
return transforms.Compose([
ToTensor(mode=mode)
])
class BtsDataLoader(object):
def __init__(self, args, mode):
if mode == 'train':
self.training_samples = DataLoadPreprocess(args, mode, transform=preprocessing_transforms(mode))
if args.distributed:
self.train_sampler = torch.utils.data.distributed.DistributedSampler(self.training_samples)
else:
self.train_sampler = None
self.data = DataLoader(self.training_samples, args.batch_size,
shuffle=(self.train_sampler is None),
num_workers=args.num_threads,
pin_memory=True,
sampler=self.train_sampler)
elif mode == 'online_eval':
self.testing_samples = DataLoadPreprocess(args, mode, transform=preprocessing_transforms(mode))
if args.distributed:
# self.eval_sampler = torch.utils.data.distributed.DistributedSampler(self.testing_samples, shuffle=False)
self.eval_sampler = DistributedSamplerNoEvenlyDivisible(self.testing_samples, shuffle=False)
else:
self.eval_sampler = None
self.data = DataLoader(self.testing_samples, 1,
shuffle=False,
num_workers=1,
pin_memory=True,
sampler=self.eval_sampler)
elif mode == 'test':
self.testing_samples = DataLoadPreprocess(args, mode, transform=preprocessing_transforms(mode))
self.data = DataLoader(self.testing_samples, 1, shuffle=False, num_workers=1)
else:
print('mode should be one of \'train, test, online_eval\'. Got {}'.format(mode))
class DataLoadPreprocess(Dataset):
def __init__(self, args, mode, transform=None, is_for_online_eval=False):
self.args = args
if mode == 'online_eval':
with open(args.filenames_file_eval, 'r') as f:
self.filenames = f.readlines()
else:
with open(args.filenames_file, 'r') as f:
self.filenames = f.readlines()
self.mode = mode
self.transform = transform
self.to_tensor = ToTensor
self.is_for_online_eval = is_for_online_eval
def __getitem__(self, idx):
sample_path = self.filenames[idx]
focal = float(sample_path.split()[2])
if self.mode == 'train':
if self.args.dataset == 'kitti' and self.args.use_right is True and random.random() > 0.5:
image_path = os.path.join(self.args.data_path, "./" + sample_path.split()[3])
depth_path = os.path.join(self.args.gt_path, "./" + sample_path.split()[4])
else:
image_path = os.path.join(self.args.data_path, "./" + sample_path.split()[0])
depth_path = os.path.join(self.args.gt_path, "./" + sample_path.split()[1])
image = Image.open(image_path)
depth_gt = Image.open(depth_path)
if self.args.do_kb_crop is True:
height = image.height
width = image.width
top_margin = int(height - 352)
left_margin = int((width - 1216) / 2)
depth_gt = depth_gt.crop((left_margin, top_margin, left_margin + 1216, top_margin + 352))
image = image.crop((left_margin, top_margin, left_margin + 1216, top_margin + 352))
# To avoid blank boundaries due to pixel registration
if self.args.dataset == 'nyu':
depth_gt = depth_gt.crop((43, 45, 608, 472))
image = image.crop((43, 45, 608, 472))
if self.args.do_random_rotate is True:
random_angle = (random.random() - 0.5) * 2 * self.args.degree
image = self.rotate_image(image, random_angle)
depth_gt = self.rotate_image(depth_gt, random_angle, flag=Image.NEAREST)
image = np.asarray(image, dtype=np.float32) / 255.0
depth_gt = np.asarray(depth_gt, dtype=np.float32)
depth_gt = np.expand_dims(depth_gt, axis=2)
if self.args.dataset == 'nyu':
depth_gt = depth_gt / 1000.0
else:
depth_gt = depth_gt / 256.0
image, depth_gt = self.random_crop(image, depth_gt, self.args.input_height, self.args.input_width)
image, depth_gt = self.train_preprocess(image, depth_gt)
sample = {'image': image, 'depth': depth_gt, 'focal': focal}
else:
if self.mode == 'online_eval':
data_path = self.args.data_path_eval
else:
data_path = self.args.data_path
image_path = os.path.join(data_path, "./" + sample_path.split()[0])
image = np.asarray(Image.open(image_path), dtype=np.float32) / 255.0
if self.mode == 'online_eval':
gt_path = self.args.gt_path_eval
depth_path = os.path.join(gt_path, "./" + sample_path.split()[1])
has_valid_depth = False
try:
depth_gt = Image.open(depth_path)
has_valid_depth = True
except IOError:
depth_gt = False
# print('Missing gt for {}'.format(image_path))
if has_valid_depth:
depth_gt = np.asarray(depth_gt, dtype=np.float32)
depth_gt = np.expand_dims(depth_gt, axis=2)
if self.args.dataset == 'nyu':
depth_gt = depth_gt / 1000.0
else:
depth_gt = depth_gt / 256.0
if self.args.do_kb_crop is True:
height = image.shape[0]
width = image.shape[1]
top_margin = int(height - 352)
left_margin = int((width - 1216) / 2)
image = image[top_margin:top_margin + 352, left_margin:left_margin + 1216, :]
if self.mode == 'online_eval' and has_valid_depth:
depth_gt = depth_gt[top_margin:top_margin + 352, left_margin:left_margin + 1216, :]
if self.mode == 'online_eval':
sample = {'image': image, 'depth': depth_gt, 'focal': focal, 'has_valid_depth': has_valid_depth}
else:
sample = {'image': image, 'focal': focal}
if self.transform:
sample = self.transform(sample)
return sample
def rotate_image(self, image, angle, flag=Image.BILINEAR):
result = image.rotate(angle, resample=flag)
return result
def random_crop(self, img, depth, height, width):
assert img.shape[0] >= height
assert img.shape[1] >= width
assert img.shape[0] == depth.shape[0]
assert img.shape[1] == depth.shape[1]
x = random.randint(0, img.shape[1] - width)
y = random.randint(0, img.shape[0] - height)
img = img[y:y + height, x:x + width, :]
depth = depth[y:y + height, x:x + width, :]
return img, depth
def train_preprocess(self, image, depth_gt):
# Random flipping
do_flip = random.random()
if do_flip > 0.5:
image = (image[:, ::-1, :]).copy()
depth_gt = (depth_gt[:, ::-1, :]).copy()
# Random gamma, brightness, color augmentation
do_augment = random.random()
if do_augment > 0.5:
image = self.augment_image(image)
return image, depth_gt
def augment_image(self, image):
# gamma augmentation
gamma = random.uniform(0.9, 1.1)
image_aug = image ** gamma
# brightness augmentation
if self.args.dataset == 'nyu':
brightness = random.uniform(0.75, 1.25)
else:
brightness = random.uniform(0.9, 1.1)
image_aug = image_aug * brightness
# color augmentation
colors = np.random.uniform(0.9, 1.1, size=3)
white = np.ones((image.shape[0], image.shape[1]))
color_image = np.stack([white * colors[i] for i in range(3)], axis=2)
image_aug *= color_image
image_aug = np.clip(image_aug, 0, 1)
return image_aug
def __len__(self):
return len(self.filenames)
class ToTensor(object):
def __init__(self, mode):
self.mode = mode
self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
def __call__(self, sample):
image, focal = sample['image'], sample['focal']
image = self.to_tensor(image)
image = self.normalize(image)
if self.mode == 'test':
return {'image': image, 'focal': focal}
depth = sample['depth']
if self.mode == 'train':
depth = self.to_tensor(depth)
return {'image': image, 'depth': depth, 'focal': focal}
else:
has_valid_depth = sample['has_valid_depth']
return {'image': image, 'depth': depth, 'focal': focal, 'has_valid_depth': has_valid_depth}
def to_tensor(self, pic):
if not (_is_pil_image(pic) or _is_numpy_image(pic)):
raise TypeError(
'pic should be PIL Image or ndarray. Got {}'.format(type(pic)))
if isinstance(pic, np.ndarray):
img = torch.from_numpy(pic.transpose((2, 0, 1)))
return img
# handle PIL Image
if pic.mode == 'I':
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == 'I;16':
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
elif pic.mode == 'I;16':
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float()
else:
return img
| 11,674 | 38.982877 | 122 | py |
LDU | LDU-main/monocular_depth_estimation/pytorch/bts_eval.py | # Copyright (C) 2019 Jin Han Lee
#
# This file is a part of BTS.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from __future__ import absolute_import, division, print_function
import os
import argparse
import time
import numpy as np
import cv2
import sys
import torch
import torch.nn as nn
import torch.nn.utils as utils
import torchvision.utils as vutils
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from tensorboardX import SummaryWriter
from bts_dataloader import *
def convert_arg_line_to_args(arg_line):
for arg in arg_line.split():
if not arg.strip():
continue
yield arg
parser = argparse.ArgumentParser(description='BTS PyTorch implementation.', fromfile_prefix_chars='@')
parser.convert_arg_line_to_args = convert_arg_line_to_args
parser.add_argument('--model_name', type=str, help='model name', default='bts_v0_0_1')
parser.add_argument('--encoder', type=str, help='type of encoder, desenet121_bts or densenet161_bts',
default='densenet161_bts')
parser.add_argument('--data_path', type=str, help='path to the data', required=True)
parser.add_argument('--gt_path', type=str, help='path to the groundtruth data', required=False)
parser.add_argument('--filenames_file', type=str, help='path to the filenames text file', required=True)
parser.add_argument('--input_height', type=int, help='input height', default=480)
parser.add_argument('--input_width', type=int, help='input width', default=640)
parser.add_argument('--max_depth', type=float, help='maximum depth in estimation', default=80)
parser.add_argument('--output_directory', type=str,
help='output directory for summary, if empty outputs to checkpoint folder', default='')
parser.add_argument('--checkpoint_path', type=str, help='path to a specific checkpoint to load', default='')
parser.add_argument('--dataset', type=str, help='dataset to train on, make3d or nyudepthv2', default='nyu')
parser.add_argument('--eigen_crop', help='if set, crops according to Eigen NIPS14', action='store_true')
parser.add_argument('--garg_crop', help='if set, crops according to Garg ECCV16', action='store_true')
parser.add_argument('--min_depth_eval', type=float, help='minimum depth for evaluation', default=1e-3)
parser.add_argument('--max_depth_eval', type=float, help='maximum depth for evaluation', default=80)
parser.add_argument('--do_kb_crop', help='if set, crop input images as kitti benchmark images', action='store_true')
parser.add_argument('--bts_size', type=int, help='initial num_filters in bts', default=512)
if sys.argv.__len__() == 2:
arg_filename_with_prefix = '@' + sys.argv[1]
args = parser.parse_args([arg_filename_with_prefix])
else:
args = parser.parse_args()
model_dir = os.path.dirname(args.checkpoint_path)
sys.path.append(model_dir)
for key, val in vars(__import__(args.model_name)).items():
if key.startswith('__') and key.endswith('__'):
continue
vars()[key] = val
def compute_errors(gt, pred):
thresh = np.maximum((gt / pred), (pred / gt))
d1 = (thresh < 1.25).mean()
d2 = (thresh < 1.25 ** 2).mean()
d3 = (thresh < 1.25 ** 3).mean()
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred) ** 2) / gt)
err = np.log(pred) - np.log(gt)
silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100
err = np.abs(np.log10(pred) - np.log10(gt))
log10 = np.mean(err)
return silog, log10, abs_rel, sq_rel, rmse, rmse_log, d1, d2, d3
def get_num_lines(file_path):
f = open(file_path, 'r')
lines = f.readlines()
f.close()
return len(lines)
def test(params):
global gt_depths, is_missing, missing_ids
gt_depths = []
is_missing = []
missing_ids = set()
write_summary = False
steps = set()
if os.path.isdir(args.checkpoint_path):
import glob
models = [f for f in glob.glob(args.checkpoint_path + "/model*")]
for model in models:
step = model.split('-')[-1]
steps.add('{:06d}'.format(int(step)))
lines = []
if os.path.exists(args.checkpoint_path + '/evaluated_checkpoints'):
with open(args.checkpoint_path + '/evaluated_checkpoints') as file:
lines = file.readlines()
for line in lines:
if line.rstrip() in steps:
steps.remove(line.rstrip())
steps = sorted(steps)
if args.output_directory != '':
summary_path = os.path.join(args.output_directory, args.model_name)
else:
summary_path = os.path.join(args.checkpoint_path, 'eval')
write_summary = True
else:
steps.add('{:06d}'.format(int(args.checkpoint_path.split('-')[-1])))
if len(steps) == 0:
print('No new model to evaluate. Abort.')
return
args.mode = 'test'
dataloader = BtsDataLoader(args, 'eval')
model = BtsModel(params=params)
model = torch.nn.DataParallel(model)
cudnn.benchmark = True
if write_summary:
summary_writer = SummaryWriter(summary_path, flush_secs=30)
for step in steps:
if os.path.isdir(args.checkpoint_path):
checkpoint = torch.load(os.path.join(args.checkpoint_path, 'model-' + str(int(step))))
model.load_state_dict(checkpoint['model'])
else:
checkpoint = torch.load(args.checkpoint_path)
model.load_state_dict(checkpoint['model'])
model.eval()
model.cuda()
num_test_samples = get_num_lines(args.filenames_file)
with open(args.filenames_file) as f:
lines = f.readlines()
print('now testing {} files for step {}'.format(num_test_samples, step))
pred_depths = []
start_time = time.time()
with torch.no_grad():
for _, sample in enumerate(dataloader.data):
image = Variable(sample['image'].cuda())
focal = Variable(sample['focal'].cuda())
# image = Variable(sample['image'])
# focal = Variable(sample['focal'])
# Predict
lpg8x8, lpg4x4, lpg2x2, reduc1x1, depth_est = model(image, focal)
pred_depths.append(depth_est.cpu().numpy().squeeze())
elapsed_time = time.time() - start_time
print('Elapesed time: %s' % str(elapsed_time))
print('Done.')
if len(gt_depths) == 0:
for t_id in range(num_test_samples):
gt_depth_path = os.path.join(args.gt_path, lines[t_id].split()[1])
depth = cv2.imread(gt_depth_path, -1)
if depth is None:
print('Missing: %s ' % gt_depth_path)
missing_ids.add(t_id)
continue
if args.dataset == 'nyu':
depth = depth.astype(np.float32) / 1000.0
else:
depth = depth.astype(np.float32) / 256.0
gt_depths.append(depth)
print('Computing errors')
silog, log10, abs_rel, sq_rel, rms, log_rms, d1, d2, d3 = eval(pred_depths, int(step))
if write_summary:
summary_writer.add_scalar('silog', silog.mean(), int(step))
summary_writer.add_scalar('abs_rel', abs_rel.mean(), int(step))
summary_writer.add_scalar('log10', log10.mean(), int(step))
summary_writer.add_scalar('sq_rel', sq_rel.mean(), int(step))
summary_writer.add_scalar('rms', rms.mean(), int(step))
summary_writer.add_scalar('log_rms', log_rms.mean(), int(step))
summary_writer.add_scalar('d1', d1.mean(), int(step))
summary_writer.add_scalar('d2', d2.mean(), int(step))
summary_writer.add_scalar('d3', d3.mean(), int(step))
summary_writer.flush()
with open(os.path.dirname(args.checkpoint_path) + '/evaluated_checkpoints', 'a') as file:
file.write(step + '\n')
print('Evaluation done')
def eval(pred_depths, step):
num_samples = get_num_lines(args.filenames_file)
pred_depths_valid = []
for t_id in range(num_samples):
if t_id in missing_ids:
continue
pred_depths_valid.append(pred_depths[t_id])
num_samples = num_samples - len(missing_ids)
silog = np.zeros(num_samples, np.float32)
log10 = np.zeros(num_samples, np.float32)
rms = np.zeros(num_samples, np.float32)
log_rms = np.zeros(num_samples, np.float32)
abs_rel = np.zeros(num_samples, np.float32)
sq_rel = np.zeros(num_samples, np.float32)
d1 = np.zeros(num_samples, np.float32)
d2 = np.zeros(num_samples, np.float32)
d3 = np.zeros(num_samples, np.float32)
for i in range(num_samples):
gt_depth = gt_depths[i]
pred_depth = pred_depths_valid[i]
if args.do_kb_crop:
height, width = gt_depth.shape
top_margin = int(height - 352)
left_margin = int((width - 1216) / 2)
pred_depth_uncropped = np.zeros((height, width), dtype=np.float32)
pred_depth_uncropped[top_margin:top_margin + 352, left_margin:left_margin + 1216] = pred_depth
pred_depth = pred_depth_uncropped
pred_depth[pred_depth < args.min_depth_eval] = args.min_depth_eval
pred_depth[pred_depth > args.max_depth_eval] = args.max_depth_eval
pred_depth[np.isinf(pred_depth)] = args.max_depth_eval
pred_depth[np.isnan(pred_depth)] = args.min_depth_eval
valid_mask = np.logical_and(gt_depth > args.min_depth_eval, gt_depth < args.max_depth_eval)
if args.garg_crop or args.eigen_crop:
gt_height, gt_width = gt_depth.shape
eval_mask = np.zeros(valid_mask.shape)
if args.garg_crop:
eval_mask[int(0.40810811 * gt_height):int(0.99189189 * gt_height), int(0.03594771 * gt_width):int(0.96405229 * gt_width)] = 1
elif args.eigen_crop:
if args.dataset == 'kitti':
eval_mask[int(0.3324324 * gt_height):int(0.91351351 * gt_height), int(0.0359477 * gt_width):int(0.96405229 * gt_width)] = 1
else:
eval_mask[45:471, 41:601] = 1
valid_mask = np.logical_and(valid_mask, eval_mask)
silog[i], log10[i], abs_rel[i], sq_rel[i], rms[i], log_rms[i], d1[i], d2[i], d3[i] = compute_errors(
gt_depth[valid_mask], pred_depth[valid_mask])
print("{:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}".format('silog', 'abs_rel', 'log10', 'rms',
'sq_rel', 'log_rms', 'd1', 'd2', 'd3'))
print("{:7.4f}, {:7.4f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}".format(
silog.mean(), abs_rel.mean(), log10.mean(), rms.mean(), sq_rel.mean(), log_rms.mean(), d1.mean(), d2.mean(),
d3.mean()))
return silog, log10, abs_rel, sq_rel, rms, log_rms, d1, d2, d3
if __name__ == '__main__':
test(args) | 12,104 | 38.819079 | 143 | py |
LDU | LDU-main/monocular_depth_estimation/pytorch/bts_test.py | # Copyright (C) 2019 Jin Han Lee
#
# This file is a part of BTS.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from __future__ import absolute_import, division, print_function
import os
import argparse
import time
import numpy as np
import cv2
import sys
import torch
import torch.nn as nn
from torch.autograd import Variable
from bts_dataloader import *
import errno
import matplotlib.pyplot as plt
from tqdm import tqdm
from bts_dataloader import *
def convert_arg_line_to_args(arg_line):
for arg in arg_line.split():
if not arg.strip():
continue
yield arg
parser = argparse.ArgumentParser(description='BTS PyTorch implementation.', fromfile_prefix_chars='@')
parser.convert_arg_line_to_args = convert_arg_line_to_args
parser.add_argument('--model_name', type=str, help='model name', default='bts_nyu_v2')
parser.add_argument('--encoder', type=str, help='type of encoder, vgg or desenet121_bts or densenet161_bts',
default='densenet161_bts')
parser.add_argument('--data_path', type=str, help='path to the data', required=True)
parser.add_argument('--filenames_file', type=str, help='path to the filenames text file', required=True)
parser.add_argument('--input_height', type=int, help='input height', default=480)
parser.add_argument('--input_width', type=int, help='input width', default=640)
parser.add_argument('--max_depth', type=float, help='maximum depth in estimation', default=80)
parser.add_argument('--checkpoint_path', type=str, help='path to a specific checkpoint to load', default='')
parser.add_argument('--dataset', type=str, help='dataset to train on, make3d or nyudepthv2', default='nyu')
parser.add_argument('--do_kb_crop', help='if set, crop input images as kitti benchmark images', action='store_true')
parser.add_argument('--save_lpg', help='if set, save outputs from lpg layers', action='store_true')
parser.add_argument('--bts_size', type=int, help='initial num_filters in bts', default=512)
if sys.argv.__len__() == 2:
arg_filename_with_prefix = '@' + sys.argv[1]
args = parser.parse_args([arg_filename_with_prefix])
else:
args = parser.parse_args()
model_dir = os.path.dirname(args.checkpoint_path)
sys.path.append(model_dir)
for key, val in vars(__import__(args.model_name)).items():
if key.startswith('__') and key.endswith('__'):
continue
vars()[key] = val
def get_num_lines(file_path):
f = open(file_path, 'r')
lines = f.readlines()
f.close()
return len(lines)
def test(params):
"""Test function."""
args.mode = 'test'
dataloader = BtsDataLoader(args, 'test')
model = BtsModel(params=args)
model = torch.nn.DataParallel(model)
checkpoint = torch.load(args.checkpoint_path)
model.load_state_dict(checkpoint['model'])
model.eval()
model.cuda()
num_params = sum([np.prod(p.size()) for p in model.parameters()])
print("Total number of parameters: {}".format(num_params))
num_test_samples = get_num_lines(args.filenames_file)
with open(args.filenames_file) as f:
lines = f.readlines()
print('now testing {} files with {}'.format(num_test_samples, args.checkpoint_path))
pred_depths = []
pred_8x8s = []
pred_4x4s = []
pred_2x2s = []
pred_1x1s = []
start_time = time.time()
with torch.no_grad():
for _, sample in enumerate(tqdm(dataloader.data)):
image = Variable(sample['image'].cuda())
focal = Variable(sample['focal'].cuda())
# Predict
lpg8x8, lpg4x4, lpg2x2, reduc1x1, depth_est = model(image, focal)
pred_depths.append(depth_est.cpu().numpy().squeeze())
pred_8x8s.append(lpg8x8[0].cpu().numpy().squeeze())
pred_4x4s.append(lpg4x4[0].cpu().numpy().squeeze())
pred_2x2s.append(lpg2x2[0].cpu().numpy().squeeze())
pred_1x1s.append(reduc1x1[0].cpu().numpy().squeeze())
elapsed_time = time.time() - start_time
print('Elapesed time: %s' % str(elapsed_time))
print('Done.')
save_name = 'result_' + args.model_name
print('Saving result pngs..')
if not os.path.exists(os.path.dirname(save_name)):
try:
os.mkdir(save_name)
os.mkdir(save_name + '/raw')
os.mkdir(save_name + '/cmap')
os.mkdir(save_name + '/rgb')
os.mkdir(save_name + '/gt')
except OSError as e:
if e.errno != errno.EEXIST:
raise
for s in tqdm(range(num_test_samples)):
if args.dataset == 'kitti':
date_drive = lines[s].split('/')[1]
filename_pred_png = save_name + '/raw/' + date_drive + '_' + lines[s].split()[0].split('/')[-1].replace(
'.jpg', '.png')
filename_cmap_png = save_name + '/cmap/' + date_drive + '_' + lines[s].split()[0].split('/')[
-1].replace('.jpg', '.png')
filename_image_png = save_name + '/rgb/' + date_drive + '_' + lines[s].split()[0].split('/')[-1]
elif args.dataset == 'kitti_benchmark':
filename_pred_png = save_name + '/raw/' + lines[s].split()[0].split('/')[-1].replace('.jpg', '.png')
filename_cmap_png = save_name + '/cmap/' + lines[s].split()[0].split('/')[-1].replace('.jpg', '.png')
filename_image_png = save_name + '/rgb/' + lines[s].split()[0].split('/')[-1]
else:
scene_name = lines[s].split()[0].split('/')[0]
filename_pred_png = save_name + '/raw/' + scene_name + '_' + lines[s].split()[0].split('/')[1].replace(
'.jpg', '.png')
filename_cmap_png = save_name + '/cmap/' + scene_name + '_' + lines[s].split()[0].split('/')[1].replace(
'.jpg', '.png')
filename_gt_png = save_name + '/gt/' + scene_name + '_' + lines[s].split()[0].split('/')[1].replace(
'.jpg', '.png')
filename_image_png = save_name + '/rgb/' + scene_name + '_' + lines[s].split()[0].split('/')[1]
rgb_path = os.path.join(args.data_path, './' + lines[s].split()[0])
image = cv2.imread(rgb_path)
if args.dataset == 'nyu':
gt_path = os.path.join(args.data_path, './' + lines[s].split()[1])
gt = cv2.imread(gt_path, -1).astype(np.float32) / 1000.0 # Visualization purpose only
gt[gt == 0] = np.amax(gt)
pred_depth = pred_depths[s]
pred_8x8 = pred_8x8s[s]
pred_4x4 = pred_4x4s[s]
pred_2x2 = pred_2x2s[s]
pred_1x1 = pred_1x1s[s]
if args.dataset == 'kitti' or args.dataset == 'kitti_benchmark':
pred_depth_scaled = pred_depth * 256.0
else:
pred_depth_scaled = pred_depth * 1000.0
pred_depth_scaled = pred_depth_scaled.astype(np.uint16)
cv2.imwrite(filename_pred_png, pred_depth_scaled, [cv2.IMWRITE_PNG_COMPRESSION, 0])
if args.save_lpg:
cv2.imwrite(filename_image_png, image[10:-1 - 9, 10:-1 - 9, :])
if args.dataset == 'nyu':
plt.imsave(filename_gt_png, np.log10(gt[10:-1 - 9, 10:-1 - 9]), cmap='Greys')
pred_depth_cropped = pred_depth[10:-1 - 9, 10:-1 - 9]
plt.imsave(filename_cmap_png, np.log10(pred_depth_cropped), cmap='Greys')
pred_8x8_cropped = pred_8x8[10:-1 - 9, 10:-1 - 9]
filename_lpg_cmap_png = filename_cmap_png.replace('.png', '_8x8.png')
plt.imsave(filename_lpg_cmap_png, np.log10(pred_8x8_cropped), cmap='Greys')
pred_4x4_cropped = pred_4x4[10:-1 - 9, 10:-1 - 9]
filename_lpg_cmap_png = filename_cmap_png.replace('.png', '_4x4.png')
plt.imsave(filename_lpg_cmap_png, np.log10(pred_4x4_cropped), cmap='Greys')
pred_2x2_cropped = pred_2x2[10:-1 - 9, 10:-1 - 9]
filename_lpg_cmap_png = filename_cmap_png.replace('.png', '_2x2.png')
plt.imsave(filename_lpg_cmap_png, np.log10(pred_2x2_cropped), cmap='Greys')
pred_1x1_cropped = pred_1x1[10:-1 - 9, 10:-1 - 9]
filename_lpg_cmap_png = filename_cmap_png.replace('.png', '_1x1.png')
plt.imsave(filename_lpg_cmap_png, np.log10(pred_1x1_cropped), cmap='Greys')
else:
plt.imsave(filename_cmap_png, np.log10(pred_depth), cmap='Greys')
filename_lpg_cmap_png = filename_cmap_png.replace('.png', '_8x8.png')
plt.imsave(filename_lpg_cmap_png, np.log10(pred_8x8), cmap='Greys')
filename_lpg_cmap_png = filename_cmap_png.replace('.png', '_4x4.png')
plt.imsave(filename_lpg_cmap_png, np.log10(pred_4x4), cmap='Greys')
filename_lpg_cmap_png = filename_cmap_png.replace('.png', '_2x2.png')
plt.imsave(filename_lpg_cmap_png, np.log10(pred_2x2), cmap='Greys')
filename_lpg_cmap_png = filename_cmap_png.replace('.png', '_1x1.png')
plt.imsave(filename_lpg_cmap_png, np.log10(pred_1x1), cmap='Greys')
return
if __name__ == '__main__':
test(args)
| 9,732 | 43.040724 | 116 | py |
LDU | LDU-main/monocular_depth_estimation/pytorch/bts.py | # Copyright (C) 2019 Jin Han Lee
#
# This file is a part of BTS.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
import torch
import torch.nn as nn
import torch.nn.functional as torch_nn_func
import math
from collections import namedtuple
# This sets the batch norm layers in pytorch as if {'is_training': False, 'scale': True} in tensorflow
def bn_init_as_tf(m):
if isinstance(m, nn.BatchNorm2d):
m.track_running_stats = True # These two lines enable using stats (moving mean and var) loaded from pretrained model
m.eval() # or zero mean and variance of one if the batch norm layer has no pretrained values
m.affine = True
m.requires_grad = True
def weights_init_xavier(m):
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
torch.nn.init.zeros_(m.bias)
class silog_loss(nn.Module):
def __init__(self, variance_focus):
super(silog_loss, self).__init__()
self.variance_focus = variance_focus
def forward(self, depth_est, depth_gt, mask):
d = torch.log(depth_est[mask]) - torch.log(depth_gt[mask])
return torch.sqrt((d ** 2).mean() - self.variance_focus * (d.mean() ** 2)) * 10.0
class atrous_conv(nn.Sequential):
def __init__(self, in_channels, out_channels, dilation, apply_bn_first=True):
super(atrous_conv, self).__init__()
self.atrous_conv = torch.nn.Sequential()
if apply_bn_first:
self.atrous_conv.add_module('first_bn', nn.BatchNorm2d(in_channels, momentum=0.01, affine=True, track_running_stats=True, eps=1.1e-5))
self.atrous_conv.add_module('aconv_sequence', nn.Sequential(nn.ReLU(),
nn.Conv2d(in_channels=in_channels, out_channels=out_channels*2, bias=False, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(out_channels*2, momentum=0.01, affine=True, track_running_stats=True),
nn.ReLU(),
nn.Conv2d(in_channels=out_channels * 2, out_channels=out_channels, bias=False, kernel_size=3, stride=1,
padding=(dilation, dilation), dilation=dilation)))
def forward(self, x):
return self.atrous_conv.forward(x)
class upconv(nn.Module):
def __init__(self, in_channels, out_channels, ratio=2):
super(upconv, self).__init__()
self.elu = nn.ELU()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, bias=False, kernel_size=3, stride=1, padding=1)
self.ratio = ratio
def forward(self, x):
up_x = torch_nn_func.interpolate(x, scale_factor=self.ratio, mode='nearest')
out = self.conv(up_x)
out = self.elu(out)
return out
class reduction_1x1(nn.Sequential):
def __init__(self, num_in_filters, num_out_filters, max_depth, is_final=False):
super(reduction_1x1, self).__init__()
self.max_depth = max_depth
self.is_final = is_final
self.sigmoid = nn.Sigmoid()
self.reduc = torch.nn.Sequential()
while num_out_filters >= 4:
if num_out_filters < 8:
if self.is_final:
self.reduc.add_module('final', torch.nn.Sequential(nn.Conv2d(num_in_filters, out_channels=1, bias=False,
kernel_size=1, stride=1, padding=0),
nn.Sigmoid()))
else:
self.reduc.add_module('plane_params', torch.nn.Conv2d(num_in_filters, out_channels=3, bias=False,
kernel_size=1, stride=1, padding=0))
break
else:
self.reduc.add_module('inter_{}_{}'.format(num_in_filters, num_out_filters),
torch.nn.Sequential(nn.Conv2d(in_channels=num_in_filters, out_channels=num_out_filters,
bias=False, kernel_size=1, stride=1, padding=0),
nn.ELU()))
num_in_filters = num_out_filters
num_out_filters = num_out_filters // 2
def forward(self, net):
net = self.reduc.forward(net)
if not self.is_final:
theta = self.sigmoid(net[:, 0, :, :]) * math.pi / 3
phi = self.sigmoid(net[:, 1, :, :]) * math.pi * 2
dist = self.sigmoid(net[:, 2, :, :]) * self.max_depth
n1 = torch.mul(torch.sin(theta), torch.cos(phi)).unsqueeze(1)
n2 = torch.mul(torch.sin(theta), torch.sin(phi)).unsqueeze(1)
n3 = torch.cos(theta).unsqueeze(1)
n4 = dist.unsqueeze(1)
net = torch.cat([n1, n2, n3, n4], dim=1)
return net
class local_planar_guidance(nn.Module):
def __init__(self, upratio):
super(local_planar_guidance, self).__init__()
self.upratio = upratio
self.u = torch.arange(self.upratio).reshape([1, 1, self.upratio]).float()
self.v = torch.arange(int(self.upratio)).reshape([1, self.upratio, 1]).float()
self.upratio = float(upratio)
def forward(self, plane_eq, focal):
plane_eq_expanded = torch.repeat_interleave(plane_eq, int(self.upratio), 2)
plane_eq_expanded = torch.repeat_interleave(plane_eq_expanded, int(self.upratio), 3)
n1 = plane_eq_expanded[:, 0, :, :]
n2 = plane_eq_expanded[:, 1, :, :]
n3 = plane_eq_expanded[:, 2, :, :]
n4 = plane_eq_expanded[:, 3, :, :]
u = self.u.repeat(plane_eq.size(0), plane_eq.size(2) * int(self.upratio), plane_eq.size(3)).cuda()
u = (u - (self.upratio - 1) * 0.5) / self.upratio
v = self.v.repeat(plane_eq.size(0), plane_eq.size(2), plane_eq.size(3) * int(self.upratio)).cuda()
v = (v - (self.upratio - 1) * 0.5) / self.upratio
return n4 / (n1 * u + n2 * v + n3)
class bts(nn.Module):
def __init__(self, params, feat_out_channels, num_features=512):
super(bts, self).__init__()
self.params = params
self.upconv5 = upconv(feat_out_channels[4], num_features)
self.bn5 = nn.BatchNorm2d(num_features, momentum=0.01, affine=True, eps=1.1e-5)
self.conv5 = torch.nn.Sequential(nn.Conv2d(num_features + feat_out_channels[3], num_features, 3, 1, 1, bias=False),
nn.ELU())
self.upconv4 = upconv(num_features, num_features // 2)
self.bn4 = nn.BatchNorm2d(num_features // 2, momentum=0.01, affine=True, eps=1.1e-5)
self.conv4 = torch.nn.Sequential(nn.Conv2d(num_features // 2 + feat_out_channels[2], num_features // 2, 3, 1, 1, bias=False),
nn.ELU())
self.bn4_2 = nn.BatchNorm2d(num_features // 2, momentum=0.01, affine=True, eps=1.1e-5)
self.daspp_3 = atrous_conv(num_features // 2, num_features // 4, 3, apply_bn_first=False)
self.daspp_6 = atrous_conv(num_features // 2 + num_features // 4 + feat_out_channels[2], num_features // 4, 6)
self.daspp_12 = atrous_conv(num_features + feat_out_channels[2], num_features // 4, 12)
self.daspp_18 = atrous_conv(num_features + num_features // 4 + feat_out_channels[2], num_features // 4, 18)
self.daspp_24 = atrous_conv(num_features + num_features // 2 + feat_out_channels[2], num_features // 4, 24)
self.daspp_conv = torch.nn.Sequential(nn.Conv2d(num_features + num_features // 2 + num_features // 4, num_features // 4, 3, 1, 1, bias=False),
nn.ELU())
self.reduc8x8 = reduction_1x1(num_features // 4, num_features // 4, self.params.max_depth)
self.lpg8x8 = local_planar_guidance(8)
self.upconv3 = upconv(num_features // 4, num_features // 4)
self.bn3 = nn.BatchNorm2d(num_features // 4, momentum=0.01, affine=True, eps=1.1e-5)
self.conv3 = torch.nn.Sequential(nn.Conv2d(num_features // 4 + feat_out_channels[1] + 1, num_features // 4, 3, 1, 1, bias=False),
nn.ELU())
self.reduc4x4 = reduction_1x1(num_features // 4, num_features // 8, self.params.max_depth)
self.lpg4x4 = local_planar_guidance(4)
self.upconv2 = upconv(num_features // 4, num_features // 8)
self.bn2 = nn.BatchNorm2d(num_features // 8, momentum=0.01, affine=True, eps=1.1e-5)
self.conv2 = torch.nn.Sequential(nn.Conv2d(num_features // 8 + feat_out_channels[0] + 1, num_features // 8, 3, 1, 1, bias=False),
nn.ELU())
self.reduc2x2 = reduction_1x1(num_features // 8, num_features // 16, self.params.max_depth)
self.lpg2x2 = local_planar_guidance(2)
self.upconv1 = upconv(num_features // 8, num_features // 16)
self.reduc1x1 = reduction_1x1(num_features // 16, num_features // 32, self.params.max_depth, is_final=True)
self.conv1 = torch.nn.Sequential(nn.Conv2d(num_features // 16 + 4, num_features // 16, 3, 1, 1, bias=False),
nn.ELU())
self.get_depth = torch.nn.Sequential(nn.Conv2d(num_features // 16, 1, 3, 1, 1, bias=False),
nn.Sigmoid())
def forward(self, features, focal):
skip0, skip1, skip2, skip3 = features[0], features[1], features[2], features[3]
dense_features = torch.nn.ReLU()(features[4])
upconv5 = self.upconv5(dense_features) # H/16
upconv5 = self.bn5(upconv5)
concat5 = torch.cat([upconv5, skip3], dim=1)
iconv5 = self.conv5(concat5)
upconv4 = self.upconv4(iconv5) # H/8
upconv4 = self.bn4(upconv4)
concat4 = torch.cat([upconv4, skip2], dim=1)
iconv4 = self.conv4(concat4)
iconv4 = self.bn4_2(iconv4)
daspp_3 = self.daspp_3(iconv4)
concat4_2 = torch.cat([concat4, daspp_3], dim=1)
daspp_6 = self.daspp_6(concat4_2)
concat4_3 = torch.cat([concat4_2, daspp_6], dim=1)
daspp_12 = self.daspp_12(concat4_3)
concat4_4 = torch.cat([concat4_3, daspp_12], dim=1)
daspp_18 = self.daspp_18(concat4_4)
concat4_5 = torch.cat([concat4_4, daspp_18], dim=1)
daspp_24 = self.daspp_24(concat4_5)
concat4_daspp = torch.cat([iconv4, daspp_3, daspp_6, daspp_12, daspp_18, daspp_24], dim=1)
daspp_feat = self.daspp_conv(concat4_daspp)
reduc8x8 = self.reduc8x8(daspp_feat)
plane_normal_8x8 = reduc8x8[:, :3, :, :]
plane_normal_8x8 = torch_nn_func.normalize(plane_normal_8x8, 2, 1)
plane_dist_8x8 = reduc8x8[:, 3, :, :]
plane_eq_8x8 = torch.cat([plane_normal_8x8, plane_dist_8x8.unsqueeze(1)], 1)
depth_8x8 = self.lpg8x8(plane_eq_8x8, focal)
depth_8x8_scaled = depth_8x8.unsqueeze(1) / self.params.max_depth
depth_8x8_scaled_ds = torch_nn_func.interpolate(depth_8x8_scaled, scale_factor=0.25, mode='nearest')
upconv3 = self.upconv3(daspp_feat) # H/4
upconv3 = self.bn3(upconv3)
concat3 = torch.cat([upconv3, skip1, depth_8x8_scaled_ds], dim=1)
iconv3 = self.conv3(concat3)
reduc4x4 = self.reduc4x4(iconv3)
plane_normal_4x4 = reduc4x4[:, :3, :, :]
plane_normal_4x4 = torch_nn_func.normalize(plane_normal_4x4, 2, 1)
plane_dist_4x4 = reduc4x4[:, 3, :, :]
plane_eq_4x4 = torch.cat([plane_normal_4x4, plane_dist_4x4.unsqueeze(1)], 1)
depth_4x4 = self.lpg4x4(plane_eq_4x4, focal)
depth_4x4_scaled = depth_4x4.unsqueeze(1) / self.params.max_depth
depth_4x4_scaled_ds = torch_nn_func.interpolate(depth_4x4_scaled, scale_factor=0.5, mode='nearest')
upconv2 = self.upconv2(iconv3) # H/2
upconv2 = self.bn2(upconv2)
concat2 = torch.cat([upconv2, skip0, depth_4x4_scaled_ds], dim=1)
iconv2 = self.conv2(concat2)
reduc2x2 = self.reduc2x2(iconv2)
plane_normal_2x2 = reduc2x2[:, :3, :, :]
plane_normal_2x2 = torch_nn_func.normalize(plane_normal_2x2, 2, 1)
plane_dist_2x2 = reduc2x2[:, 3, :, :]
plane_eq_2x2 = torch.cat([plane_normal_2x2, plane_dist_2x2.unsqueeze(1)], 1)
depth_2x2 = self.lpg2x2(plane_eq_2x2, focal)
depth_2x2_scaled = depth_2x2.unsqueeze(1) / self.params.max_depth
upconv1 = self.upconv1(iconv2)
reduc1x1 = self.reduc1x1(upconv1)
concat1 = torch.cat([upconv1, reduc1x1, depth_2x2_scaled, depth_4x4_scaled, depth_8x8_scaled], dim=1)
iconv1 = self.conv1(concat1)
final_depth = self.params.max_depth * self.get_depth(iconv1)
if self.params.dataset == 'kitti':
final_depth = final_depth * focal.view(-1, 1, 1, 1).float() / 715.0873
return depth_8x8_scaled, depth_4x4_scaled, depth_2x2_scaled, reduc1x1, final_depth
class encoder(nn.Module):
def __init__(self, params):
super(encoder, self).__init__()
self.params = params
import torchvision.models as models
if params.encoder == 'densenet121_bts':
self.base_model = models.densenet121(pretrained=True).features
self.feat_names = ['relu0', 'pool0', 'transition1', 'transition2', 'norm5']
self.feat_out_channels = [64, 64, 128, 256, 1024]
elif params.encoder == 'densenet161_bts':
self.base_model = models.densenet161(pretrained=True).features
self.feat_names = ['relu0', 'pool0', 'transition1', 'transition2', 'norm5']
self.feat_out_channels = [96, 96, 192, 384, 2208]
elif params.encoder == 'resnet50_bts':
self.base_model = models.resnet50(pretrained=True)
self.feat_names = ['relu', 'layer1', 'layer2', 'layer3', 'layer4']
self.feat_out_channels = [64, 256, 512, 1024, 2048]
elif params.encoder == 'resnet101_bts':
self.base_model = models.resnet101(pretrained=True)
self.feat_names = ['relu', 'layer1', 'layer2', 'layer3', 'layer4']
self.feat_out_channels = [64, 256, 512, 1024, 2048]
elif params.encoder == 'resnext50_bts':
self.base_model = models.resnext50_32x4d(pretrained=True)
self.feat_names = ['relu', 'layer1', 'layer2', 'layer3', 'layer4']
self.feat_out_channels = [64, 256, 512, 1024, 2048]
elif params.encoder == 'resnext101_bts':
self.base_model = models.resnext101_32x8d(pretrained=True)
self.feat_names = ['relu', 'layer1', 'layer2', 'layer3', 'layer4']
self.feat_out_channels = [64, 256, 512, 1024, 2048]
elif params.encoder == 'mobilenetv2_bts':
self.base_model = models.mobilenet_v2(pretrained=True).features
self.feat_inds = [2, 4, 7, 11, 19]
self.feat_out_channels = [16, 24, 32, 64, 1280]
self.feat_names = []
else:
print('Not supported encoder: {}'.format(params.encoder))
def forward(self, x):
feature = x
skip_feat = []
i = 1
for k, v in self.base_model._modules.items():
if 'fc' in k or 'avgpool' in k:
continue
feature = v(feature)
if self.params.encoder == 'mobilenetv2_bts':
if i == 2 or i == 4 or i == 7 or i == 11 or i == 19:
skip_feat.append(feature)
else:
if any(x in k for x in self.feat_names):
skip_feat.append(feature)
i = i + 1
return skip_feat
class BtsModel(nn.Module):
def __init__(self, params):
super(BtsModel, self).__init__()
self.encoder = encoder(params)
self.decoder = bts(params, self.encoder.feat_out_channels, params.bts_size)
def forward(self, x, focal):
skip_feat = self.encoder(x)
return self.decoder(skip_feat, focal)
| 17,122 | 50.575301 | 180 | py |
DelayResolvedRL | DelayResolvedRL-main/Gym(Stochastic)/agent.py | import tensorflow as tf
import numpy as np
import random
import copy
from statistics import mean
from collections import deque
GPUs = tf.config.experimental.list_physical_devices('GPU')
if GPUs:
try:
for gpu in GPUs:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
def to_onehot(size, value):
"""1 hot encoding for observed state"""
return np.eye(size)[value]
class Model(tf.keras.Model):
"""DQN Model"""
def __init__(self, num_states, hidden_units, num_actions, alg, use_stochastic_delay, max_dimension):
super(Model, self).__init__()
if alg == 'IS':
if use_stochastic_delay:
self.input_layer = tf.keras.layers.InputLayer(input_shape=(num_states + 1 + max_dimension,))
else:
self.input_layer = tf.keras.layers.InputLayer(input_shape=(num_states + max_dimension,))
else:
self.input_layer = tf.keras.layers.InputLayer(input_shape=(num_states,))
self.hidden_layers = []
for i in hidden_units:
self.hidden_layers.append(tf.keras.layers.Dense(
i, activation='tanh', kernel_initializer='RandomNormal'))
self.output_layer = tf.keras.layers.Dense(
num_actions, activation='linear', kernel_initializer='RandomNormal')
@tf.function
def call(self, inputs):
z = self.input_layer(inputs)
for layer in self.hidden_layers:
z = layer(z)
output = self.output_layer(z)
return output
class DQN:
def __init__(self, num_states, num_actions, model_params, alg_params):
np.random.seed(alg_params['seed'])
tf.random.set_seed(alg_params['seed'])
random.seed(alg_params['seed'])
self.num_actions = num_actions
self.alg = alg_params['algorithm']
self.batch_size = alg_params['batch_size']
self.optimizer = tf.optimizers.Adam(alg_params['learning_rate'])
self.use_stochastic_delay = alg_params['use_stochastic_delay']
self.max_dimension = model_params['max_dimension']
hidden_units = model_params['hidden_units']
self.delay = alg_params['delay']
self.gamma = alg_params['gamma']
self.model = Model(num_states, hidden_units, num_actions, self.use_stochastic_delay, self.max_dimension,
self.alg)
self.experience = {'s': [], 'a': [], 'r': [], 's2': [], 'done': []}
self.max_experiences = model_params['max_buffer_size']
self.min_experiences = model_params['min_buffer_size']
if self.alg != 'normal':
self.action_buffer = deque(maxlen=self.max_dimension + 1)
self.action_buffer_padded = deque(maxlen=self.max_dimension + 1)
def predict(self, inputs):
return self.model(np.atleast_2d(inputs.astype('float32')))
def fill_up_buffer(self):
self.action_buffer_padded.clear()
for _ in range(self.max_dimension):
self.action_buffer_padded.append(0)
def buffer_padding(self):
current_length = len(self.action_buffer)
self.action_buffer_padded = copy.deepcopy(self.action_buffer)
for _ in range(0, self.max_dimension - current_length):
self.action_buffer_padded.append(0)
def train(self, TargetNet):
if len(self.experience['s']) < self.min_experiences:
return 0
ids = np.random.randint(low=0, high=len(self.experience['s']), size=self.batch_size)
states = np.asarray([self.experience['s'][i] for i in ids])
actions = np.asarray([self.experience['a'][i] for i in ids])
rewards = np.asarray([self.experience['r'][i] for i in ids])
states_next = np.asarray([self.experience['s2'][i] for i in ids])
dones = np.asarray([self.experience['done'][i] for i in ids])
value_next = np.max(TargetNet.predict(states_next), axis=1)
actual_values = np.where(dones, rewards, rewards + self.gamma * value_next)
with tf.GradientTape() as tape:
selected_action_values = tf.math.reduce_sum(
self.predict(states) * tf.one_hot(actions, self.num_actions), axis=1)
loss = tf.math.reduce_mean(tf.square(actual_values - selected_action_values))
variables = self.model.trainable_variables
gradients = tape.gradient(loss, variables)
self.optimizer.apply_gradients(zip(gradients, variables))
return loss
def get_action(self, states, epsilon):
if np.random.random() < epsilon:
return np.random.choice(self.num_actions)
else:
return np.argmax(self.predict(np.atleast_2d(states))[0])
def add_experience(self, exp):
if len(self.experience['s']) >= self.max_experiences:
for key in self.experience.keys():
self.experience[key].pop(0)
for key, value in exp.items():
self.experience[key].append(value)
def copy_weights(self, TrainNet):
variables1 = self.model.trainable_variables
variables2 = TrainNet.model.trainable_variables
for v1, v2 in zip(variables1, variables2):
v1.assign(v2.numpy())
def play_game(global_step, env, TrainNet, TargetNet, epsilon, copy_step):
rewards = 0
episode_step = 0
last_state_observed = 0
done = False
observations = env.reset()
observations_original = observations
if env.game_name.startswith('Frozen'):
observations = to_onehot(env.state_space.n, observations)
if TrainNet.alg != 'normal':
TrainNet.fill_up_buffer()
losses = list()
clear = False
while not done:
delay = env.delay
len_buffer = len(env.state_buffer)
if TrainNet.alg == 'normal':
action = TrainNet.get_action(observations, epsilon)
prev_observations = observations
observations, reward, done = env.step(observations_original, action)
observations_original = observations
if env.game_name.startswith('Frozen'):
observations = to_onehot(env.state_space.n, observations)
else:
if episode_step == 0:
if env.use_stochastic_delay:
last_state_observed = (episode_step - env.turn_limit / 2) / env.turn_limit
action_state = np.append(last_state_observed, TrainNet.action_buffer_padded)
information_state = np.append(observations, action_state)
# information_state = np.append(observations, TrainNet.action_buffer_padded)
else:
information_state = np.append(observations, TrainNet.action_buffer_padded)
if TrainNet.alg == 'IS':
action = TrainNet.get_action(information_state, epsilon)
else:
action = TrainNet.get_action(observations, epsilon)
prev_observations = observations
prev_information_state = information_state
observations, reward, done = env.step(observations_original, action)
observations_original = observations
if env.game_name.startswith('Frozen'):
observations = to_onehot(env.state_space.n, observations)
episode_step += 1
if env.train:
last_state_observed = (episode_step - 1 - env.turn_limit / 2) / env.turn_limit
TrainNet.action_buffer.append(action + 1)
for i in range(len_buffer + 1 - delay):
TrainNet.action_buffer.popleft() - 1
TrainNet.buffer_padding()
else:
# delayed_action = random.randint(0, TrainNet.num_actions)
TrainNet.action_buffer.append(action + 1)
TrainNet.buffer_padding()
if env.delay == 0:
delayed_action = action
else:
if not TrainNet.action_buffer:
delayed_action = random.randint(0, TrainNet.num_actions)
else:
delayed_action = TrainNet.action_buffer[0]
if delay == 0:
delayed_action = action
if len(TrainNet.action_buffer) == TrainNet.max_dimension + 1:
TrainNet.action_buffer.clear()
TrainNet.buffer_padding()
observations = env.state_buffer.pop()
env.state_buffer.clear()
reward = np.sum(env.reward_buffer)
done = env.done_buffer.pop()
env.done_buffer.clear()
env.reward_buffer.clear()
clear = True
if env.use_stochastic_delay:
action_state = np.append(last_state_observed, TrainNet.action_buffer_padded)
information_state = np.append(observations, action_state)
# information_state = np.append(observations, TrainNet.action_buffer_padded)
else:
information_state = np.append(observations, TrainNet.action_buffer_padded)
rewards += reward
if done:
episode_step = 0
env.reset()
if TrainNet.alg != 'normal':
TrainNet.action_buffer.clear()
TrainNet.buffer_padding()
global_step += 1
if TrainNet.alg == 'normal':
exp = {'s': prev_observations, 'a': action, 'r': reward, 's2': observations, 'done': done}
if TrainNet.alg == 'delay':
exp = {'s': prev_observations, 'a': delayed_action, 'r': reward, 's2': observations, 'done': done}
if TrainNet.alg == 'IS':
exp = {'s': prev_information_state, 'a': action, 'r': reward, 's2': information_state, 'done': done}
TrainNet.add_experience(exp)
loss = TrainNet.train(TargetNet)
if isinstance(loss, int):
losses.append(loss)
else:
losses.append(loss.numpy())
if global_step % copy_step == 0:
TargetNet.copy_weights(TrainNet)
return global_step, rewards, mean(losses)
def test(env, TrainNet, logs, num_episodes):
for _ in range(num_episodes):
observation = env.reset()
rewards = 0
steps = 0
done = False
while not done:
action = TrainNet.get_action(observation, 0)
observation, reward, done, _ = env.step(action)
steps += 1
rewards += reward
with open(logs['log_file_name'], "a") as f:
print("Testing steps: {} rewards :{} ".format(steps, rewards), file=f)
print("Testing steps: {} rewards :{} ".format(steps, rewards))
def train_agent(env, num_frames, model_params, algorithm_params, logs, verbose):
num_actions = env.number_of_actions
try:
state_space = len(env.state_space.sample())
except TypeError:
state_space = env.state_space.n
copy_step = model_params['copy_step']
TrainNet = DQN(state_space, num_actions, model_params, algorithm_params)
TargetNet = DQN(state_space, num_actions, model_params, algorithm_params)
# N = num_episodes
total_rewards_list = []
total_losses_list = []
epsilon_start = algorithm_params['start_epsilon']
decay = algorithm_params['epsilon_decay']
min_epsilon = algorithm_params['stop_epsilon']
global_step = 1
n = 0
while True:
epsilon = min_epsilon + (epsilon_start - min_epsilon) * np.exp(-decay * global_step)
global_step, total_reward, losses = play_game(global_step, env, TrainNet, TargetNet, epsilon, copy_step)
total_rewards_list.append(total_reward)
total_losses_list.append(losses)
total_rewards = np.array(total_rewards_list)
total_losses = np.array(total_losses_list)
avg_rewards = total_rewards[max(0, n - 100):(n + 1)].mean()
avg_losses = total_losses[max(0, n - 100):(n + 1)].mean()
if n % logs['log_interval'] == 0:
if verbose:
with open(logs['log_file_name'], "a") as f:
print("episode:{}, eps:{:.3f}, avg reward (last 100):{:.2f}, avg loss:{:.2f}"
.format(n, epsilon, avg_rewards, avg_losses), file=f)
if not verbose:
print("episode:{}, eps:{:.3f}, avg reward (last 100):{:.2f}"
.format(n, epsilon, avg_rewards))
# test(env, TrainNet, logs, 100)
n += 1
if global_step > num_frames:
break
env.close()
return total_rewards, total_losses
| 12,554 | 41.849829 | 112 | py |
DelayResolvedRL | DelayResolvedRL-main/W-Maze/DQN/agent.py | import tensorflow as tf
import numpy as np
import random
import copy
from statistics import mean
from collections import deque
GPUs = tf.config.experimental.list_physical_devices('GPU')
if GPUs:
try:
for gpu in GPUs:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
'''DQN Model'''
class Model(tf.keras.Model):
def __init__(self, state_space_shape, hidden_units, num_actions, use_stochastic_delay, max_dimension, alg):
super(Model, self).__init__()
input_shape = state_space_shape.ndim
if alg == 'IS':
if use_stochastic_delay:
self.input_layer = tf.keras.layers.InputLayer(input_shape=(input_shape + 1 + max_dimension,))
else:
self.input_layer = tf.keras.layers.InputLayer(input_shape=(input_shape + max_dimension,))
else:
self.input_layer = tf.keras.layers.InputLayer(input_shape=(input_shape,))
self.hidden_layers = []
for i in hidden_units:
self.hidden_layers.append(tf.keras.layers.Dense(
i, activation='tanh', kernel_initializer='RandomNormal'))
self.output_layer = tf.keras.layers.Dense(
num_actions, activation='linear', kernel_initializer='RandomNormal')
@tf.function
def call(self, inputs):
z = self.input_layer(inputs)
for layer in self.hidden_layers:
z = layer(z)
output = self.output_layer(z)
return output
class DQN:
def __init__(self, state_space_shape, num_actions, model_params, alg_params):
self.num_actions = num_actions
self.actions = np.linspace(1, self.num_actions, num=self.num_actions, dtype=np.int32)
self.alg = alg_params['algorithm']
self.batch_size = alg_params['batch_size']
self.optimizer = tf.optimizers.Adam(alg_params['learning_rate'])
self.delay = alg_params['delay']
self.gamma = alg_params['gamma']
self.use_stochastic_delay = alg_params['use_stochastic_delay']
self.max_dimension = model_params['max_dimension']
hidden_units = model_params['hidden_units']
self.model = Model(state_space_shape, hidden_units, num_actions, self.use_stochastic_delay, self.max_dimension, self.alg)
self.experience = {'s': [], 'a': [], 'r': [], 's2': [], 'done': []}
self.max_experiences = model_params['max_buffer_size']
self.min_experiences = model_params['min_buffer_size']
if self.alg != 'normal':
self.action_buffer = deque(maxlen=self.max_dimension+1)
self.action_buffer_padded = deque(maxlen=self.max_dimension+1)
def predict(self, inputs):
return self.model(np.atleast_2d(inputs.astype('float32')))
def fill_up_buffer(self):
self.action_buffer_padded.clear()
for _ in range(self.max_dimension):
self.action_buffer_padded.append(0)
def buffer_padding(self):
current_length = len(self.action_buffer)
self.action_buffer_padded = copy.deepcopy(self.action_buffer)
for _ in range(0, self.max_dimension - current_length):
self.action_buffer_padded.append(0)
def train(self, TargetNet):
if len(self.experience['s']) < self.min_experiences:
return 0
ids = np.random.randint(low=0, high=len(self.experience['s']), size=self.batch_size)
states = np.asarray([self.experience['s'][i] for i in ids])
actions = np.asarray([self.experience['a'][i] for i in ids])
rewards = np.asarray([self.experience['r'][i] for i in ids])
states_next = np.asarray([self.experience['s2'][i] for i in ids])
dones = np.asarray([self.experience['done'][i] for i in ids])
value_next = np.max(TargetNet.predict(states_next), axis=1)
actual_values = np.where(dones, rewards, rewards + self.gamma * value_next)
with tf.GradientTape() as tape:
selected_action_values = tf.math.reduce_sum(
self.predict(states) * tf.one_hot(actions, self.num_actions), axis=1)
loss = tf.math.reduce_mean(tf.square(actual_values - selected_action_values))
variables = self.model.trainable_variables
gradients = tape.gradient(loss, variables)
self.optimizer.apply_gradients(zip(gradients, variables))
return loss
def get_action(self, states, epsilon):
if np.random.random() < epsilon:
return np.random.choice(self.num_actions)
else:
return np.argmax(self.predict(np.atleast_2d(states))[0])
def add_experience(self, exp):
if len(self.experience['s']) >= self.max_experiences:
for key in self.experience.keys():
self.experience[key].pop(0)
for key, value in exp.items():
self.experience[key].append(value)
def copy_weights(self, TrainNet):
variables1 = self.model.trainable_variables
variables2 = TrainNet.model.trainable_variables
for v1, v2 in zip(variables1, variables2):
v1.assign(v2.numpy())
def play_game(global_step, env, TrainNet, TargetNet, epsilon, copy_step):
rewards = 0
episode_step = 0
last_state_observed = 0
done = False
observations = env.reset()
if TrainNet.alg != 'normal':
TrainNet.fill_up_buffer()
losses = list()
clear = False
while not done:
delay = env.delay
len_buffer = len(env.state_buffer)
if TrainNet.alg == 'normal':
action = TrainNet.get_action(observations, epsilon)
prev_observations = observations
observations, reward, done = env.step(observations, action)
else:
if episode_step == 0:
if env.use_stochastic_delay: # append the last time this state was observed normalized by the max step of the episode
last_state_observed = (episode_step-env.turn_limit/2)/env.turn_limit
action_state = np.append(last_state_observed, TrainNet.action_buffer_padded)
information_state = np.append(observations, action_state)
else:
information_state = np.append(observations, TrainNet.action_buffer_padded)
if TrainNet.alg == 'IS':
action = TrainNet.get_action(information_state, epsilon)
else:
action = TrainNet.get_action(observations, epsilon)
prev_observations = observations
prev_information_state = information_state
observations, reward, done = env.step(observations, action)
episode_step += 1
if env.delay == 0:
delayed_action = action
else:
if not TrainNet.action_buffer: # buffer empty
delayed_action = random.randint(0, TrainNet.num_actions)
else:
delayed_action = TrainNet.action_buffer[0]
if env.train:
last_state_observed = (episode_step-env.turn_limit/2)/env.turn_limit
TrainNet.action_buffer.append(action + 1)
for i in range(len_buffer + 1 - delay):
TrainNet.action_buffer.popleft() - 1
TrainNet.buffer_padding()
else:
TrainNet.action_buffer.append(action + 1)
TrainNet.buffer_padding()
if len(TrainNet.action_buffer) == TrainNet.max_dimension+1:
TrainNet.action_buffer.clear()
TrainNet.buffer_padding()
observations = env.state_buffer.pop()
env.state_buffer.clear()
reward = np.sum(env.reward_buffer)
done = env.done_buffer.pop()
env.done_buffer.clear()
env.reward_buffer.clear()
clear = True
if env.use_stochastic_delay: # append the last time this state was observed normalized by the max step of the episode
action_state = np.append(last_state_observed, TrainNet.action_buffer_padded)
information_state = np.append(observations, action_state)
else:
information_state = np.append(observations, TrainNet.action_buffer_padded)
rewards += reward
if done:
episode_step = 0
env.reset()
if TrainNet.alg != 'normal':
TrainNet.action_buffer.clear()
TrainNet.buffer_padding()
global_step += 1
if TrainNet.alg == 'normal':
exp = {'s': prev_observations, 'a': action, 'r': reward, 's2': observations, 'done': done}
if TrainNet.alg == 'delay':
exp = {'s': prev_observations, 'a': delayed_action, 'r': reward, 's2': observations, 'done': done}
if TrainNet.alg == 'IS':
exp = {'s': prev_information_state, 'a': action, 'r': reward, 's2': information_state, 'done': done}
TrainNet.add_experience(exp)
loss = TrainNet.train(TargetNet)
if isinstance(loss, int):
losses.append(loss)
else:
losses.append(loss.numpy())
if global_step % copy_step == 0:
TargetNet.copy_weights(TrainNet)
return global_step, rewards, mean(losses)
def test(env, TrainNet, logs, num_episodes):
for _ in range(num_episodes):
observation = env.reset()
rewards = 0
steps = 0
done = False
while not done:
action = TrainNet.get_action(observation, 0)
observation, reward, done, _ = env.step(action)
steps += 1
rewards += reward
with open(logs['log_file_name'], "a") as f:
print("Testing steps: {} rewards :{} ".format(steps, rewards), file=f)
print("Testing steps: {} rewards :{} ".format(steps, rewards))
def train_agent(env, num_frames, model_params, algorithm_params, logs, verbose):
np.random.seed(algorithm_params['seed'])
tf.random.set_seed(algorithm_params['seed'])
random.seed(algorithm_params['seed'])
num_actions = env.number_of_actions
state_space = env.state_space.shape
copy_step = model_params['copy_step']
TrainNet = DQN(state_space, num_actions, model_params, algorithm_params)
TargetNet = DQN(state_space, num_actions, model_params, algorithm_params)
# N = num_episodes
total_rewards_list = []
total_losses_list = []
epsilon_start = algorithm_params['start_epsilon']
decay = algorithm_params['epsilon_decay']
min_epsilon = algorithm_params['stop_epsilon']
global_step = 1
n = 0
while True:
epsilon = min_epsilon + (epsilon_start - min_epsilon) * np.exp(-decay * global_step)
global_step, total_reward, losses = play_game(global_step, env, TrainNet, TargetNet, epsilon, copy_step)
total_rewards_list.append(total_reward)
total_losses_list.append(losses)
total_rewards = np.array(total_rewards_list)
total_losses = np.array(total_losses_list)
avg_rewards = total_rewards[max(0, n - 100):(n + 1)].mean()
avg_losses = total_losses[max(0, n - 100):(n + 1)].mean()
if n % logs['log_interval'] == 0:
if verbose:
with open(logs['log_file_name'], "a") as f:
print("episode:{}, eps:{:.4f}, avg reward (last 100):{:.2f}, avg loss:{:.2f}"
.format(n, epsilon, avg_rewards, avg_losses), file=f)
if not verbose:
print("episode:{}, eps:{:.3f}, avg reward (last 100):{:.2f}"
.format(n, epsilon, avg_rewards))
# test(env, TrainNet, logs, 100)
n += 1
if global_step > num_frames:
break
# env.close()
return total_rewards, total_losses
| 11,830 | 42.818519 | 134 | py |
DelayResolvedRL | DelayResolvedRL-main/Gym(Constant)/dqn_agents.py | from collections import deque
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D, Flatten
from copy import deepcopy
import random
from keras.optimizers import Adam
from keras import backend as K
import tensorflow as tf
import numpy as np
def reshape_state(state, is_atari_env, state_size):
reshaped = state
if not is_atari_env:
reshaped = np.reshape(state, [1, state_size])
else:
if len(state.shape) < 4:
reshaped = np.expand_dims(state, axis=0)
return reshaped
def update_loss(loss, sample_loss):
if loss is not None and sample_loss is not None:
for key, val in sample_loss.items():
if key in loss:
loss[key] += val
else:
loss[key] = val
def concatenate_state_action(state, action):
out = np.concatenate((state[0], [action]))
out = np.reshape(out, [1, len(out)])
return out
class DQNAgent:
def __init__(self, seed, state_size, action_size, is_atari_env, is_delayed_agent=False, delay_value=0, epsilon_min=0.001,
epsilon_decay=0.999, learning_rate=0.001, epsilon=1.0, use_m_step_reward=False, use_latest_reward=True,
loss='mse', **kwargs):
np.random.seed(seed)
tf.random.set_seed(seed)
random.seed(seed)
self.state_size = state_size
self.action_size = action_size
self.is_atari_env = is_atari_env
mem_len = 50000 if self.is_atari_env else 1000
self.memory = deque(maxlen=mem_len)
self.gamma = 0.99 # discount rate
self.epsilon = epsilon # exploration rate
self.epsilon_min = epsilon_min
self.epsilon_decay = epsilon_decay
self.learning_rate = learning_rate
self.sample_buffer = deque()
self.is_delayed_agent = is_delayed_agent
self.delay_value = delay_value
self.model = self._build_model(loss=loss)
self.use_m_step_reward = use_m_step_reward
self.use_latest_reward = use_latest_reward
def _huber_loss(self, y_true, y_pred, clip_delta=1.0):
"""Huber loss for Q Learning
References: https://en.wikipedia.org/wiki/Huber_loss
https://www.tensorflow.org/api_docs/python/tf/losses/huber_loss
"""
error = y_true - y_pred
cond = K.abs(error) <= clip_delta
squared_loss = 0.5 * K.square(error)
quadratic_loss = 0.5 * K.square(clip_delta) + clip_delta * (K.abs(error) - clip_delta)
return K.mean(tf.where(cond, squared_loss, quadratic_loss))
def _build_forward_model(self, loss='mse', input_size=None, output_size=None):
input_size = self.state_size if input_size is None else input_size
output_size = self.action_size if output_size is None else output_size
model = Sequential()
model.add(Dense(200, input_dim=input_size, activation='relu'))
model.add(Dense(200, activation='relu'))
model.add(Dense(output_size, activation='linear'))
model.compile(loss=loss,
optimizer=Adam(lr=self.learning_rate))
return model
def _build_model(self, loss=None, input_size=None, output_size=None):
loss = self._huber_loss if loss is 'huber' else loss
input_size = self.state_size if input_size is None else input_size
output_size = self.action_size if output_size is None else output_size
# Neural Net for Deep-Q learning Model
model = Sequential()
if self.is_atari_env:
model.add(Conv2D(32, 8, strides=(4,4), input_shape=input_size, activation='relu'))
model.add(MaxPool2D())
model.add(Conv2D(64, 4, strides=(2,2), activation='relu'))
model.add(MaxPool2D())
model.add(Conv2D(64, 3, strides=(1,1), activation='relu'))
model.add(MaxPool2D())
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(output_size, activation='linear'))
else:
# model.add(Dense(24, input_dim=input_size, activation='relu'))
# model.add(Dense(24, activation='relu'))
# model.add(Dense(output_size, activation='linear'))
model.add(Dense(200, input_dim=input_size, activation='tanh', kernel_initializer='RandomNormal'))
# model.add(Dense(200, activation='tanh'))
model.add(Dense(output_size, activation='linear', kernel_initializer='RandomNormal'))
model.compile(loss=loss,
optimizer=Adam(lr=self.learning_rate))
return model
def memorize(self, state, action, reward, next_state, done):
if self.is_delayed_agent:
# for earlier time than delay_value, the data is problematic (non-delayed response)
# Construct modified tuple by keeping old s_t with new a_{t+m}, r_{t+m} s_{t+m+1}
new_tuple = (state, action, reward, next_state, done)
self.sample_buffer.append(new_tuple)
if len(self.sample_buffer) - 1 >= self.delay_value:
old_tuple = self.sample_buffer.popleft()
modified_tuple = list(deepcopy(old_tuple))
modified_tuple[1] = action
modified_tuple[2] = self.m_step_reward(first_reward=old_tuple[2])
# trying to use s_{t+1} instead of s_{t+m} as in the original ICML2020 submission
# modified_tuple[3] = next_state
modified_tuple = tuple(modified_tuple)
self.memory.append(modified_tuple)
else:
self.memory.append((state, action, reward, next_state, done))
def act(self, state, eval=False):
if not eval and np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
act_values = self.model.predict(state)
return np.argmax(act_values[0]) # returns action
def m_step_reward(self, first_reward):
if not self.use_m_step_reward:
if self.use_latest_reward:
return self.sample_buffer[-1][2]
else:
return first_reward
else:
discounted_rew = first_reward
for i in range(self.delay_value):
discounted_rew += self.gamma ** (i + 1) * self.sample_buffer[i][2]
return discounted_rew
def effective_gamma(self):
return self.gamma if not self.use_m_step_reward else (self.gamma ** (self.delay_value + 1))
def replay(self, batch_size, global_step):
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = (reward + self.effective_gamma() *
np.amax(self.model.predict(next_state)[0]))
target_f = self.model.predict(state)
target_f[0][action] = target
# self.model.fit(state, target_f, epochs=1, verbose=0,
# callbacks=[WandbCallback()])
self.model.fit(state, target_f, epochs=1, verbose=0)
self.epsilon = self.epsilon_min + (1.0 - self.epsilon_min) * np.exp(-self.epsilon_decay * global_step)
def load(self, name):
self.model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
def clear_action_buffer(self):
self.sample_buffer.clear()
class DDQNAgent(DQNAgent):
def __init__(self, seed, state_size, action_size, is_atari_env, is_delayed_agent=False, delay_value=0, epsilon_min=0.001,
epsilon_decay=0.999, learning_rate=0.001, epsilon=1.0, use_m_step_reward=False, use_latest_reward=True):
super().__init__(seed, state_size, action_size, is_atari_env=is_atari_env, is_delayed_agent=is_delayed_agent, delay_value=delay_value,
epsilon_min=epsilon_min, epsilon_decay=epsilon_decay, learning_rate=learning_rate,
epsilon=epsilon, use_m_step_reward=use_m_step_reward, use_latest_reward=use_latest_reward,
loss='huber')
# self.model = self._build_model()
self.target_model = self._build_model(loss='mse')
self.update_target_model()
def update_target_model(self):
# copy weights from model to target_model
self.target_model.set_weights(self.model.get_weights())
def train_model(self, batch):
state_vec, action_vec, reward_vec, next_state_vec, done_vec = batch
target = self.model.predict(state_vec)
t = self.target_model.predict(next_state_vec)
not_done_arr = np.invert(np.asarray(done_vec))
new_targets = reward_vec + not_done_arr * self.effective_gamma() * np.amax(t, axis=1)
for i in range(len(batch[0])):
target[i][action_vec[i]] = new_targets[i]
train_history = self.model.fit(state_vec, target, epochs=1, verbose=0)
q_loss = train_history.history['loss'][0]
loss_dict = {'q_loss': q_loss}
return loss_dict
def _create_batch(self, indices):
state_vec, action_vec, reward_vec, next_state_vec, done_vec = [], [], [], [], []
for i in indices:
data = self.memory[i]
state, action, reward, next_state, done = data
state_vec.append(np.array(state, copy=False))
action_vec.append(action)
reward_vec.append(reward)
next_state_vec.append(np.array(next_state, copy=False))
done_vec.append(done)
return np.concatenate(state_vec, axis=0), action_vec, reward_vec, np.concatenate(next_state_vec, axis=0), done_vec
def replay(self, batch_size, global_step):
loss = {}
indices = np.random.choice(len(self.memory), batch_size)
batch = self._create_batch(indices)
sample_loss = self.train_model(batch)
update_loss(loss, sample_loss)
self.epsilon = self.epsilon_min + (1.0 - self.epsilon_min) * np.exp(-self.epsilon_decay * global_step)
return loss
class DDQNPlanningAgent(DDQNAgent):
def __init__(self, seed, state_size, action_size, is_atari_env, is_delayed_agent=False, delay_value=0, epsilon_min=0.001,
epsilon_decay=0.999, learning_rate=0.001, epsilon=1.0, use_m_step_reward=False,
use_latest_reward=True, env=None, use_learned_forward_model=True):
super().__init__(seed, state_size, action_size, is_atari_env=is_atari_env, is_delayed_agent=is_delayed_agent, delay_value=delay_value,
epsilon_min=epsilon_min, epsilon_decay=epsilon_decay, learning_rate=learning_rate,
epsilon=epsilon, use_m_step_reward=use_m_step_reward, use_latest_reward=use_latest_reward)
self.use_learned_forward_model = use_learned_forward_model
if self.use_learned_forward_model:
keras_forward_model = self._build_forward_model(loss='mse', input_size=self.state_size + 1, output_size=self.state_size)
self.forward_model = ForwardModel(keras_forward_model)
else:
self.forward_model = env
def train_model(self, batch):
loss_dict = super().train_model(batch)
if self.use_learned_forward_model and self.delay_value > 0:
state_vec, action_vec, _, next_state_vec, _ = batch
act_t = np.asarray([action_vec]).transpose()
concat_vec = np.concatenate((state_vec, act_t), axis=1)
train_history = self.forward_model.keras_model.fit(concat_vec, next_state_vec, epochs=1, verbose=0)
f_model_loss = train_history.history['loss'][0]
loss_dict['f_model_loss'] = f_model_loss
return loss_dict
def act(self, state, pending_actions, eval):
if not eval and np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
last_state = state
if self.delay_value > 0:
if not self.use_learned_forward_model:
self.forward_model.store_initial_state()
# initial_state = deepcopy(state)
for curr_action in pending_actions:
last_state = self.forward_model.get_next_state(state=last_state, action=curr_action)
if not self.use_learned_forward_model:
self.forward_model.restore_initial_state()
last_state_r = reshape_state(last_state, self.is_atari_env, self.state_size)
act_values = self.model.predict(last_state_r)
return np.argmax(act_values[0]) # returns best action for last state
def memorize(self, state, action, reward, next_state, done):
# for earlier time than delay_value, the data is problematic (non-delayed response)
# Construct modified tuple by keeping old s_t with new a_{t+m}, r_{t+m} s_{t+m+1}
new_tuple = (state, action, reward, next_state, done)
self.sample_buffer.append(new_tuple)
if len(self.sample_buffer) - 1 >= self.delay_value:
old_tuple = self.sample_buffer.popleft()
modified_tuple = list(deepcopy(old_tuple))
# build time-coherent tuple from new tuple and old action
modified_tuple[0] = state
# modified_tuple[1] = action
modified_tuple[2] = reward # self.m_step_reward(first_reward=old_tuple[2])
modified_tuple[3] = next_state
modified_tuple = tuple(modified_tuple)
self.memory.append(modified_tuple)
class ForwardModel:
def __init__(self, keras_model):
self.keras_model = keras_model
def get_next_state(self, state, action):
input = concatenate_state_action(state, action)
return self.keras_model.predict(input)
def reset_to_state(self, state):
# not necessary here. Only used if the forward_model is the actual env instance
pass | 13,916 | 46.498294 | 142 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/translate.py | #!/usr/bin/env python
from __future__ import division
from builtins import bytes
import os
import argparse
import math
import codecs
import torch
import onmt
import onmt.IO
import opts
from itertools import takewhile, count
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
parser = argparse.ArgumentParser(
description='translate.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
opts.add_md_help_argument(parser)
opts.translate_opts(parser)
opt = parser.parse_args()
if opt.batch_size != 1:
print("WARNING: -batch_size isn't supported currently, "
"we set it to 1 for now!")
opt.batch_size = 1
def report_score(name, score_total, words_total):
print("%s AVG SCORE: %.4f, %s PPL: %.4f" % (
name, score_total / words_total,
name, math.exp(-score_total/words_total)))
def get_src_words(src_indices, index2str):
words = []
raw_words = (index2str[i] for i in src_indices)
words = takewhile(lambda w: w != onmt.IO.PAD_WORD, raw_words)
return " ".join(words)
def main():
dummy_parser = argparse.ArgumentParser(description='train.py')
opts.model_opts(dummy_parser)
dummy_opt = dummy_parser.parse_known_args([])[0]
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
translator = onmt.Translator(opt, dummy_opt.__dict__)
out_file = codecs.open(opt.output, 'w', 'utf-8')
pred_score_total, pred_words_total = 0, 0
gold_score_total, gold_words_total = 0, 0
if opt.dump_beam != "":
import json
translator.initBeamAccum()
data = onmt.IO.ONMTDataset(
opt.src, opt.tgt, translator.fields,
use_filter_pred=False)
test_data = onmt.IO.OrderedIterator(
dataset=data, device=opt.gpu,
batch_size=opt.batch_size, train=False, sort=False,
shuffle=False)
counter = count(1)
for batch in test_data:
pred_batch, gold_batch, pred_scores, gold_scores, attn, src \
= translator.translate(batch, data)
pred_score_total += sum(score[0] for score in pred_scores)
pred_words_total += sum(len(x[0]) for x in pred_batch)
if opt.tgt:
gold_score_total += sum(gold_scores)
gold_words_total += sum(len(x) for x in batch.tgt[1:])
# z_batch: an iterator over the predictions, their scores,
# the gold sentence, its score, and the source sentence for each
# sentence in the batch. It has to be zip_longest instead of
# plain-old zip because the gold_batch has length 0 if the target
# is not included.
z_batch = zip_longest(
pred_batch, gold_batch,
pred_scores, gold_scores,
(sent.squeeze(1) for sent in src.split(1, dim=1)))
for pred_sents, gold_sent, pred_score, gold_score, src_sent in z_batch:
n_best_preds = [" ".join(pred) for pred in pred_sents[:opt.n_best]]
out_file.write('\n'.join(n_best_preds))
out_file.write('\n')
out_file.flush()
if opt.verbose:
sent_number = next(counter)
words = get_src_words(
src_sent, translator.fields["src"].vocab.itos)
os.write(1, bytes('\nSENT %d: %s\n' %
(sent_number, words), 'UTF-8'))
best_pred = n_best_preds[0]
best_score = pred_score[0]
os.write(1, bytes('PRED %d: %s\n' %
(sent_number, best_pred), 'UTF-8'))
print("PRED SCORE: %.4f" % best_score)
if opt.tgt:
tgt_sent = ' '.join(gold_sent)
os.write(1, bytes('GOLD %d: %s\n' %
(sent_number, tgt_sent), 'UTF-8'))
print("GOLD SCORE: %.4f" % gold_score)
if len(n_best_preds) > 1:
print('\nBEST HYP:')
for score, sent in zip(pred_score, n_best_preds):
os.write(1, bytes("[%.4f] %s\n" % (score, sent),
'UTF-8'))
report_score('PRED', pred_score_total, pred_words_total)
if opt.tgt:
report_score('GOLD', gold_score_total, gold_words_total)
if opt.dump_beam:
json.dump(translator.beam_accum,
codecs.open(opt.dump_beam, 'w', 'utf-8'))
if __name__ == "__main__":
main()
| 4,516 | 32.708955 | 79 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/train.py | #!/usr/bin/env python
from __future__ import division
import os
import sys
import argparse
import torch
import torch.nn as nn
from torch import cuda
import onmt
import onmt.Models
import onmt.ModelConstructor
import onmt.modules
from onmt.Utils import aeq, use_gpu
import opts
parser = argparse.ArgumentParser(
description='train.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# opts.py
opts.add_md_help_argument(parser)
opts.model_opts(parser)
opts.train_opts(parser)
opt = parser.parse_args()
if opt.word_vec_size != -1:
opt.src_word_vec_size = opt.word_vec_size
opt.tgt_word_vec_size = opt.word_vec_size
if opt.layers != -1:
opt.enc_layers = opt.layers
opt.dec_layers = opt.layers
opt.brnn = (opt.encoder_type == "brnn")
if opt.seed > 0:
torch.manual_seed(opt.seed)
if opt.rnn_type == "SRU" and not opt.gpuid:
raise AssertionError("Using SRU requires -gpuid set.")
if torch.cuda.is_available() and not opt.gpuid:
print("WARNING: You have a CUDA device, should run with -gpuid 0")
if opt.gpuid:
cuda.set_device(opt.gpuid[0])
if opt.seed > 0:
torch.cuda.manual_seed(opt.seed)
if len(opt.gpuid) > 1:
sys.stderr.write("Sorry, multigpu isn't supported yet, coming soon!\n")
sys.exit(1)
# Set up the Crayon logging server.
if opt.exp_host != "":
from pycrayon import CrayonClient
cc = CrayonClient(hostname=opt.exp_host)
experiments = cc.get_experiment_names()
print(experiments)
if opt.exp in experiments:
cc.remove_experiment(opt.exp)
experiment = cc.create_experiment(opt.exp)
def report_func(epoch, batch, num_batches,
start_time, lr, report_stats):
"""
This is the user-defined batch-level traing progress
report function.
Args:
epoch(int): current epoch count.
batch(int): current batch count.
num_batches(int): total number of batches.
start_time(float): last report time.
lr(float): current learning rate.
report_stats(Statistics): old Statistics instance.
Returns:
report_stats(Statistics): updated Statistics instance.
"""
if batch % opt.report_every == -1 % opt.report_every:
report_stats.output(epoch, batch+1, num_batches, start_time)
if opt.exp_host:
report_stats.log("progress", experiment, lr)
report_stats = onmt.Statistics()
return report_stats
def make_train_data_iter(train_data, opt):
"""
This returns user-defined train data iterator for the trainer
to iterate over during each train epoch. We implement simple
ordered iterator strategy here, but more sophisticated strategy
like curriculum learning is ok too.
"""
return onmt.IO.OrderedIterator(
dataset=train_data, batch_size=opt.batch_size,
device=opt.gpuid[0] if opt.gpuid else -1,
repeat=False)
def make_valid_data_iter(valid_data, opt):
"""
This returns user-defined validate data iterator for the trainer
to iterate over during each validate epoch. We implement simple
ordered iterator strategy here, but more sophisticated strategy
is ok too.
"""
return onmt.IO.OrderedIterator(
dataset=valid_data, batch_size=opt.batch_size,
device=opt.gpuid[0] if opt.gpuid else -1,
train=False, sort=True)
def make_loss_compute(model, tgt_vocab, dataset, opt):
"""
This returns user-defined LossCompute object, which is used to
compute loss in train/validate process. You can implement your
own *LossCompute class, by subclassing LossComputeBase.
"""
if opt.copy_attn:
compute = onmt.modules.CopyGeneratorLossCompute(
model.generator, tgt_vocab, dataset, opt.copy_attn_force)
else:
compute = onmt.Loss.NMTLossCompute(model.generator, tgt_vocab)
if use_gpu(opt):
compute.cuda()
return compute
def train_model(model, train_data, valid_data, fields, optim):
min_ppl, max_accuracy = float('inf'), -1
train_iter = make_train_data_iter(train_data, opt)
valid_iter = make_valid_data_iter(valid_data, opt)
train_loss = make_loss_compute(model, fields["tgt"].vocab,
train_data, opt)
valid_loss = make_loss_compute(model, fields["tgt"].vocab,
valid_data, opt)
trunc_size = opt.truncated_decoder # Badly named...
shard_size = opt.max_generator_batches
trainer = onmt.Trainer(model, train_iter, valid_iter,
train_loss, valid_loss, optim,
trunc_size, shard_size)
for epoch in range(opt.start_epoch, opt.epochs + 1):
print('')
# 1. Train for one epoch on the training set.
train_stats = trainer.train(epoch, report_func)
print('Train perplexity: %g' % train_stats.ppl())
print('Train accuracy: %g' % train_stats.accuracy())
# 2. Validate on the validation set.
valid_stats = trainer.validate()
print('Validation perplexity: %g' % valid_stats.ppl())
print('Validation accuracy: %g' % valid_stats.accuracy())
# 3. Log to remote server.
if opt.exp_host:
train_stats.log("train", experiment, optim.lr)
valid_stats.log("valid", experiment, optim.lr)
# 4. Update the learning rate
trainer.epoch_step(valid_stats.ppl(), epoch)
# 5. Drop a checkpoint if needed.
if epoch >= opt.start_checkpoint_at:
if valid_stats.accuracy() > max_accuracy:
# 5.1 drop checkpoint when bigger accuracy is achieved.
min_ppl = min(valid_stats.ppl(), min_ppl)
max_accuracy = max(valid_stats.accuracy(), max_accuracy)
trainer.drop_checkpoint(opt, epoch, fields, valid_stats)
print('Save model according to biggest-ever accuracy: acc: {0}, ppl: {1}'.format(max_accuracy, min_ppl))
elif valid_stats.ppl() < min_ppl:
# 5.2 drop checkpoint when smaller ppl is achieved.
min_ppl = min(valid_stats.ppl(), min_ppl)
max_accuracy = max(valid_stats.accuracy(), max_accuracy)
trainer.drop_checkpoint(opt, epoch, fields, valid_stats)
print('Save model according to lowest-ever ppl: acc: {0}, ppl: {1}'.format(max_accuracy, min_ppl))
def check_save_model_path():
save_model_path = os.path.abspath(opt.save_model)
model_dirname = os.path.dirname(save_model_path)
if not os.path.exists(model_dirname):
os.makedirs(model_dirname)
def tally_parameters(model):
n_params = sum([p.nelement() for p in model.parameters()])
print('* number of parameters: %d' % n_params)
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
elif 'decoder' or 'generator' in name:
dec += param.nelement()
print('encoder: ', enc)
print('decoder: ', dec)
def load_fields(train, valid, checkpoint):
fields = onmt.IO.load_fields(
torch.load(opt.data + '.vocab.pt'))
fields = dict([(k, f) for (k, f) in fields.items()
if k in train.examples[0].__dict__])
train.fields = fields
valid.fields = fields
if opt.train_from:
print('Loading vocab from checkpoint at %s.' % opt.train_from)
fields = onmt.IO.load_fields(checkpoint['vocab'])
print(' * vocabulary size. source = %d; target = %d' %
(len(fields['src'].vocab), len(fields['tgt'].vocab)))
return fields
def collect_features(train, fields):
# TODO: account for target features.
# Also, why does fields need to have the structure it does?
src_features = onmt.IO.collect_features(fields)
aeq(len(src_features), train.n_src_feats)
return src_features
def build_model(model_opt, opt, fields, checkpoint):
print('Building model...')
model = onmt.ModelConstructor.make_base_model(model_opt, fields,
use_gpu(opt), checkpoint)
if len(opt.gpuid) > 1:
print('Multi gpu training: ', opt.gpuid)
model = nn.DataParallel(model, device_ids=opt.gpuid, dim=1)
print(model)
return model
def build_optim(model, checkpoint):
if opt.train_from:
print('Loading optimizer from checkpoint.')
optim = checkpoint['optim']
optim.optimizer.load_state_dict(
checkpoint['optim'].optimizer.state_dict())
else:
# what members of opt does Optim need?
optim = onmt.Optim(
opt.optim, opt.learning_rate, opt.max_grad_norm,
lr_decay=opt.learning_rate_decay,
start_decay_at=opt.start_decay_at,
opt=opt
)
optim.set_parameters(model.parameters())
return optim
def main():
# Load train and validate data.
print("Loading train and validate data from '%s'" % opt.data)
train = torch.load(opt.data + '.train.pt')
valid = torch.load(opt.data + '.valid.pt')
print(' * number of training sentences: %d' % len(train))
print(' * maximum batch size: %d' % opt.batch_size)
# Load checkpoint if we resume from a previous training.
if opt.train_from:
print('Loading checkpoint from %s' % opt.train_from)
checkpoint = torch.load(opt.train_from,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
# I don't like reassigning attributes of opt: it's not clear
opt.start_epoch = checkpoint['epoch'] + 1
else:
checkpoint = None
model_opt = opt
# Load fields generated from preprocess phase.
fields = load_fields(train, valid, checkpoint)
# Collect features.
src_features = collect_features(train, fields)
for j, feat in enumerate(src_features):
print(' * src feature %d size = %d' % (j, len(fields[feat].vocab)))
# Build model.
model = build_model(model_opt, opt, fields, checkpoint)
tally_parameters(model)
check_save_model_path()
# Build optimizer.
optim = build_optim(model, checkpoint)
# Do training.
train_model(model, train, valid, fields, optim)
if __name__ == "__main__":
main()
| 10,352 | 31.556604 | 120 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/preprocess.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import codecs
import torch
import onmt
import onmt.IO
import opts
parser = argparse.ArgumentParser(
description='preprocess.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
opts.add_md_help_argument(parser)
# **Preprocess Options**
parser.add_argument('-config', help="Read options from this file")
parser.add_argument('-data_type', default="text",
help="Type of the source input. Options are [text|img].")
parser.add_argument('-data_img_dir', default=".",
help="Location of source images")
parser.add_argument('-train_src', required=True,
help="Path to the training source data")
parser.add_argument('-train_tgt', required=True,
help="Path to the training target data")
parser.add_argument('-valid_src', required=True,
help="Path to the validation source data")
parser.add_argument('-valid_tgt', required=True,
help="Path to the validation target data")
parser.add_argument('-save_data', required=True,
help="Output file for the prepared data")
parser.add_argument('-src_vocab',
help="Path to an existing source vocabulary")
parser.add_argument('-tgt_vocab',
help="Path to an existing target vocabulary")
parser.add_argument('-features_vocabs_prefix', type=str, default='',
help="Path prefix to existing features vocabularies")
parser.add_argument('-seed', type=int, default=3435,
help="Random seed")
parser.add_argument('-report_every', type=int, default=100000,
help="Report status every this many sentences")
opts.preprocess_opts(parser)
opt = parser.parse_args()
torch.manual_seed(opt.seed)
def main():
print('Preparing training ...')
with codecs.open(opt.train_src, "r", "utf-8") as src_file:
src_line = src_file.readline().strip().split()
_, _, n_src_features = onmt.IO.extract_features(src_line)
with codecs.open(opt.train_tgt, "r", "utf-8") as tgt_file:
tgt_line = tgt_file.readline().strip().split()
_, _, n_tgt_features = onmt.IO.extract_features(tgt_line)
fields = onmt.IO.get_fields(n_src_features, n_tgt_features)
print("Building Training...")
train = onmt.IO.ONMTDataset(
opt.train_src, opt.train_tgt, fields,
opt.src_seq_length, opt.tgt_seq_length,
src_seq_length_trunc=opt.src_seq_length_trunc,
tgt_seq_length_trunc=opt.tgt_seq_length_trunc,
dynamic_dict=opt.dynamic_dict)
print("Building Vocab...")
onmt.IO.build_vocab(train, opt)
print("Building Valid...")
valid = onmt.IO.ONMTDataset(
opt.valid_src, opt.valid_tgt, fields,
opt.src_seq_length, opt.tgt_seq_length,
src_seq_length_trunc=opt.src_seq_length_trunc,
tgt_seq_length_trunc=opt.tgt_seq_length_trunc,
dynamic_dict=opt.dynamic_dict)
print("Saving train/valid/fields")
# Can't save fields, so remove/reconstruct at training time.
torch.save(onmt.IO.save_vocab(fields),
open(opt.save_data + '.vocab.pt', 'wb'))
train.fields = []
valid.fields = []
torch.save(train, open(opt.save_data + '.train.pt', 'wb'))
torch.save(valid, open(opt.save_data + '.valid.pt', 'wb'))
if __name__ == "__main__":
main()
| 3,411 | 34.915789 | 77 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/tools/extract_embeddings.py | from __future__ import division
import torch
import argparse
from onmt.ModelConstructor import make_embeddings, \
make_encoder, make_decoder
parser = argparse.ArgumentParser(description='translate.py')
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-output_dir', default='.',
help="""Path to output the embeddings""")
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
def write_embeddings(filename, dict, embeddings):
with open(filename, 'w') as file:
for i in range(len(embeddings)):
str = dict.idxToLabel[i].encode("utf-8")
for j in range(len(embeddings[0])):
str = str + " %5f" % (embeddings[i][j])
file.write(str + "\n")
def main():
opt = parser.parse_args()
checkpoint = torch.load(opt.model)
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
model_opt = checkpoint['opt']
src_dict = checkpoint['dicts']['src']
tgt_dict = checkpoint['dicts']['tgt']
feature_dicts = []
embeddings = make_embeddings(model_opt, src_dict, feature_dicts)
encoder = make_encoder(model_opt, embeddings)
embeddings = make_embeddings(model_opt, tgt_dict, feature_dicts,
for_encoder=False)
decoder = make_decoder(model_opt, embeddings)
encoder_embeddings = encoder.word_lut.weight.data.tolist()
decoder_embeddings = decoder.word_lut.weight.data.tolist()
print("Writing source embeddings")
write_embeddings(opt.output_dir + "/src_embeddings.txt", src_dict,
encoder_embeddings)
print("Writing target embeddings")
write_embeddings(opt.output_dir + "/tgt_embeddings.txt", tgt_dict,
decoder_embeddings)
print('... done.')
print('Converting model...')
if __name__ == "__main__":
main()
| 1,987 | 30.0625 | 70 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/test/test_models.py | import argparse
import copy
import unittest
import torch
from torch.autograd import Variable
import onmt
import opts
from onmt.ModelConstructor import make_embeddings, \
make_encoder, make_decoder
parser = argparse.ArgumentParser(description='train.py')
opts.model_opts(parser)
opts.train_opts(parser)
# -data option is required, but not used in this test, so dummy.
opt = parser.parse_known_args(['-data', 'dummy'])[0]
print(opt)
class TestModel(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestModel, self).__init__(*args, **kwargs)
self.opt = opt
# Helper to generate a vocabulary
def get_vocab(self):
src = onmt.IO.get_fields(0, 0)["src"]
src.build_vocab([])
return src.vocab
def get_batch(self, sourceL=3, bsize=1):
# len x batch x nfeat
test_src = Variable(torch.ones(sourceL, bsize, 1)).long()
test_tgt = Variable(torch.ones(sourceL, bsize, 1)).long()
test_length = torch.ones(bsize).fill_(sourceL)
return test_src, test_tgt, test_length
def embeddings_forward(self, opt, sourceL=3, bsize=1):
'''
Tests if the embeddings works as expected
args:
opt: set of options
sourceL: Length of generated input sentence
bsize: Batchsize of generated input
'''
word_dict = self.get_vocab()
feature_dicts = []
emb = make_embeddings(opt, word_dict, feature_dicts)
test_src, _, __ = self.get_batch(sourceL=sourceL,
bsize=bsize)
if opt.decoder_type == 'transformer':
input = torch.cat([test_src, test_src], 0)
res = emb(input)
compare_to = torch.zeros(sourceL * 2, bsize, opt.src_word_vec_size)
else:
res = emb(test_src)
compare_to = torch.zeros(sourceL, bsize, opt.src_word_vec_size)
self.assertEqual(res.size(), compare_to.size())
def encoder_forward(self, opt, sourceL=3, bsize=1):
'''
Tests if the encoder works as expected
args:
opt: set of options
sourceL: Length of generated input sentence
bsize: Batchsize of generated input
'''
word_dict = self.get_vocab()
feature_dicts = []
embeddings = make_embeddings(opt, word_dict, feature_dicts)
enc = make_encoder(opt, embeddings)
test_src, test_tgt, test_length = self.get_batch(sourceL=sourceL,
bsize=bsize)
hidden_t, outputs = enc(test_src, test_length)
# Initialize vectors to compare size with
test_hid = torch.zeros(self.opt.enc_layers, bsize, opt.rnn_size)
test_out = torch.zeros(sourceL, bsize, opt.rnn_size)
# Ensure correct sizes and types
self.assertEqual(test_hid.size(),
hidden_t[0].size(),
hidden_t[1].size())
self.assertEqual(test_out.size(), outputs.size())
self.assertEqual(type(outputs), torch.autograd.Variable)
self.assertEqual(type(outputs.data), torch.FloatTensor)
def ntmmodel_forward(self, opt, sourceL=3, bsize=1):
"""
Creates a ntmmodel with a custom opt function.
Forwards a testbatch anc checks output size.
Args:
opt: Namespace with options
sourceL: length of input sequence
bsize: batchsize
"""
word_dict = self.get_vocab()
feature_dicts = []
embeddings = make_embeddings(opt, word_dict, feature_dicts)
enc = make_encoder(opt, embeddings)
embeddings = make_embeddings(opt, word_dict, feature_dicts,
for_encoder=False)
dec = make_decoder(opt, embeddings)
model = onmt.Models.NMTModel(enc, dec)
test_src, test_tgt, test_length = self.get_batch(sourceL=sourceL,
bsize=bsize)
outputs, attn, _ = model(test_src,
test_tgt,
test_length)
outputsize = torch.zeros(sourceL - 1, bsize, opt.rnn_size)
# Make sure that output has the correct size and type
self.assertEqual(outputs.size(), outputsize.size())
self.assertEqual(type(outputs), torch.autograd.Variable)
self.assertEqual(type(outputs.data), torch.FloatTensor)
def _add_test(paramSetting, methodname):
"""
Adds a Test to TestModel according to settings
Args:
paramSetting: list of tuples of (param, setting)
methodname: name of the method that gets called
"""
def test_method(self):
if paramSetting:
opt = copy.deepcopy(self.opt)
for param, setting in paramSetting:
setattr(opt, param, setting)
else:
opt = self.opt
getattr(self, methodname)(opt)
if paramSetting:
name = 'test_' + methodname + "_" + "_".join(str(paramSetting).split())
else:
name = 'test_' + methodname + '_standard'
setattr(TestModel, name, test_method)
test_method.__name__ = name
'''
TEST PARAMETERS
'''
test_embeddings = [[],
[('decoder_type', 'transformer')]
]
for p in test_embeddings:
_add_test(p, 'embeddings_forward')
tests_encoder = [[],
[('encoder_type', 'mean')],
# [('encoder_type', 'transformer'),
# ('word_vec_size', 16), ('rnn_size', 16)],
[]
]
for p in tests_encoder:
_add_test(p, 'encoder_forward')
tests_ntmodel = [[('rnn_type', 'GRU')],
[('layers', 10)],
[('input_feed', 0)],
[('decoder_type', 'transformer'),
('encoder_type', 'transformer'),
('src_word_vec_size', 16),
('tgt_word_vec_size', 16),
('rnn_size', 16)],
# [('encoder_type', 'transformer'),
# ('word_vec_size', 16),
# ('rnn_size', 16)],
[('decoder_type', 'transformer'),
('encoder_type', 'transformer'),
('src_word_vec_size', 16),
('tgt_word_vec_size', 16),
('rnn_size', 16),
('position_encoding', True)],
[('coverage_attn', True)],
[('copy_attn', True)],
[('global_attention', 'mlp')],
[('context_gate', 'both')],
[('context_gate', 'target')],
[('context_gate', 'source')],
[('encoder_type', "brnn"),
('brnn_merge', 'sum')],
[('encoder_type', "brnn")],
[('decoder_type', 'cnn'),
('encoder_type', 'cnn')],
[]
]
if onmt.modules.check_sru_requirement():
""" Only do SRU test if requirment is safisfied. """
# SRU doesn't support input_feed.
tests_ntmodel.append([('rnn_type', 'SRU'), ('input_feed', 0)])
for p in tests_ntmodel:
_add_test(p, 'ntmmodel_forward')
| 7,275 | 32.84186 | 79 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/test/test_preprocess.py | import argparse
import copy
import unittest
import onmt
import opts
import torchtext
from collections import Counter
parser = argparse.ArgumentParser(description='preprocess.py')
opts.preprocess_opts(parser)
opt = parser.parse_known_args()[0]
opt.train_src = 'data/src-train.txt'
opt.train_tgt = 'data/tgt-train.txt'
opt.valid_src = 'data/src-val.txt'
opt.valid_tgt = 'data/tgt-val.txt'
print(opt)
class TestData(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestData, self).__init__(*args, **kwargs)
self.opt = opt
def dataset_build(self, opt):
fields = onmt.IO.get_fields(0, 0)
train = onmt.IO.ONMTDataset(
opt.train_src, opt.train_tgt, fields,
opt.src_seq_length, opt.tgt_seq_length,
src_seq_length_trunc=opt.src_seq_length_trunc,
tgt_seq_length_trunc=opt.tgt_seq_length_trunc,
dynamic_dict=opt.dynamic_dict)
onmt.IO.build_vocab(train, opt)
onmt.IO.ONMTDataset(
opt.valid_src, opt.valid_tgt, fields,
opt.src_seq_length, opt.tgt_seq_length,
src_seq_length_trunc=opt.src_seq_length_trunc,
tgt_seq_length_trunc=opt.tgt_seq_length_trunc,
dynamic_dict=opt.dynamic_dict)
def test_merge_vocab(self):
va = torchtext.vocab.Vocab(Counter('abbccc'))
vb = torchtext.vocab.Vocab(Counter('eeabbcccf'))
merged = onmt.IO.merge_vocabs([va, vb], 2)
self.assertEqual(Counter({'c': 6, 'b': 4, 'a': 2, 'e': 2, 'f': 1}),
merged.freqs)
self.assertEqual(6, len(merged.itos))
self.assertTrue('b' in merged.itos)
def _add_test(paramSetting, methodname):
"""
Adds a Test to TestData according to settings
Args:
paramSetting: list of tuples of (param, setting)
methodname: name of the method that gets called
"""
def test_method(self):
if paramSetting:
opt = copy.deepcopy(self.opt)
for param, setting in paramSetting:
setattr(opt, param, setting)
else:
opt = self.opt
getattr(self, methodname)(opt)
if paramSetting:
name = 'test_' + methodname + "_" + "_".join(str(paramSetting).split())
else:
name = 'test_' + methodname + '_standard'
setattr(TestData, name, test_method)
test_method.__name__ = name
test_databuild = [[],
[('src_vocab_size', 1),
('tgt_vocab_size', 1)],
[('src_vocab_size', 10000),
('tgt_vocab_size', 10000)],
[('src_seq_length', 1)],
[('src_seq_length', 5000)],
[('src_seq_length_trunc', 1)],
[('src_seq_length_trunc', 5000)],
[('tgt_seq_length', 1)],
[('tgt_seq_length', 5000)],
[('tgt_seq_length_trunc', 1)],
[('tgt_seq_length_trunc', 5000)],
[('shuffle', 0)],
[('lower', True)],
[('dynamic_dict', True)],
[('share_vocab', True)],
[('dynamic_dict', True),
('share_vocab', True)],
]
for p in test_databuild:
_add_test(p, 'dataset_build')
| 3,317 | 30.009346 | 79 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/Loss.py | """
This file handles the details of the loss function during training.
This includes: LossComputeBase and the standard NMTLossCompute, and
sharded loss compute stuff.
"""
from __future__ import division
import torch
import torch.nn as nn
from torch.autograd import Variable
import onmt
class LossComputeBase(nn.Module):
"""
This is the loss criterion base class. Users can implement their own
loss computation strategy by making subclass of this one.
Users need to implement the compute_loss() and make_shard_state() methods.
We inherits from nn.Module to leverage the cuda behavior.
"""
def __init__(self, generator, tgt_vocab):
super(LossComputeBase, self).__init__()
self.generator = generator
self.tgt_vocab = tgt_vocab
self.padding_idx = tgt_vocab.stoi[onmt.IO.PAD_WORD]
def make_shard_state(self, batch, output, range_, attns=None):
"""
Make shard state dictionary for shards() to return iterable
shards for efficient loss computation. Subclass must define
this method to match its own compute_loss() interface.
Args:
batch: the current batch.
output: the predict output from the model.
range_: the range of examples for computing, the whole
batch or a trunc of it?
attns: the attns dictionary returned from the model.
"""
return NotImplementedError
def compute_loss(self, batch, output, target, **kwargs):
"""
Compute the loss. Subclass must define this method.
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
**kwargs(optional): additional info for computing loss.
"""
return NotImplementedError
def monolithic_compute_loss(self, batch, output, attns):
"""
Compute the loss monolithically, not dividing into shards.
"""
range_ = (0, batch.tgt.size(0))
shard_state = self.make_shard_state(batch, output, range_, attns)
_, batch_stats = self.compute_loss(batch, **shard_state)
return batch_stats
def sharded_compute_loss(self, batch, output, attns,
cur_trunc, trunc_size, shard_size):
"""
Compute the loss in shards for efficiency.
"""
batch_stats = onmt.Statistics()
range_ = (cur_trunc, cur_trunc + trunc_size)
shard_state = self.make_shard_state(batch, output, range_, attns)
for shard in shards(shard_state, shard_size):
loss, stats = self.compute_loss(batch, **shard)
loss.div(batch.batch_size).backward()
batch_stats.update(stats)
return batch_stats
def stats(self, loss, scores, target):
"""
Compute and return a Statistics object.
Args:
loss(Tensor): the loss computed by the loss criterion.
scores(Tensor): a sequence of predict output with scores.
"""
pred = scores.max(1)[1]
non_padding = target.ne(self.padding_idx)
num_correct = pred.eq(target) \
.masked_select(non_padding) \
.sum()
return onmt.Statistics(loss[0], non_padding.sum(), num_correct)
def bottle(self, v):
return v.view(-1, v.size(2))
def unbottle(self, v, batch_size):
return v.view(-1, batch_size, v.size(1))
class NMTLossCompute(LossComputeBase):
"""
Standard NMT Loss Computation.
"""
def __init__(self, generator, tgt_vocab):
super(NMTLossCompute, self).__init__(generator, tgt_vocab)
weight = torch.ones(len(tgt_vocab))
weight[self.padding_idx] = 0
self.criterion = nn.NLLLoss(weight, size_average=False)
def make_shard_state(self, batch, output, range_, attns=None):
""" See base class for args description. """
return {
"output": output,
"target": batch.tgt[range_[0] + 1: range_[1]],
}
def compute_loss(self, batch, output, target):
""" See base class for args description. """
scores = self.generator(self.bottle(output))
target = target.view(-1)
loss = self.criterion(scores, target)
loss_data = loss.data.clone()
stats = self.stats(loss_data, scores.data, target.data)
return loss, stats
def filter_shard_state(state):
for k, v in state.items():
if v is not None:
if isinstance(v, Variable) and v.requires_grad:
v = Variable(v.data, requires_grad=True, volatile=False)
yield k, v
def shards(state, shard_size, eval=False):
"""
Args:
state: A dictionary which corresponds to the output of
*LossCompute.make_shard_state(). The values for
those keys are Tensor-like or None.
shard_size: The maximum size of the shards yielded by the model.
eval: If True, only yield the state, nothing else.
Otherwise, yield shards.
Yields:
Each yielded shard is a dict.
Side effect:
After the last shard, this function does back-propagation.
"""
if eval:
yield state
else:
# non_none: the subdict of the state dictionary where the values
# are not None.
non_none = dict(filter_shard_state(state))
# Now, the iteration:
# state is a dictionary of sequences of tensor-like but we
# want a sequence of dictionaries of tensors.
# First, unzip the dictionary into a sequence of keys and a
# sequence of tensor-like sequences.
keys, values = zip(*((k, torch.split(v, shard_size))
for k, v in non_none.items()))
# Now, yield a dictionary for each shard. The keys are always
# the same. values is a sequence of length #keys where each
# element is a sequence of length #shards. We want to iterate
# over the shards, not over the keys: therefore, the values need
# to be re-zipped by shard and then each shard can be paired
# with the keys.
for shard_tensors in zip(*values):
yield dict(zip(keys, shard_tensors))
# Assumed backprop'd
variables = ((state[k], v.grad.data) for k, v in non_none.items()
if isinstance(v, Variable) and v.grad is not None)
inputs, grads = zip(*variables)
torch.autograd.backward(inputs, grads)
| 6,611 | 34.548387 | 78 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/Beam.py | from __future__ import division
import torch
import onmt
"""
Class for managing the internals of the beam search process.
Takes care of beams, back pointers, and scores.
"""
class Beam(object):
def __init__(self, size, n_best=1, cuda=False, vocab=None,
global_scorer=None):
self.size = size
self.tt = torch.cuda if cuda else torch
# The score for each translation on the beam.
self.scores = self.tt.FloatTensor(size).zero_()
self.allScores = []
# The backpointers at each time-step.
self.prevKs = []
# The outputs at each time-step.
self.nextYs = [self.tt.LongTensor(size)
.fill_(vocab.stoi[onmt.IO.PAD_WORD])]
self.nextYs[0][0] = vocab.stoi[onmt.IO.BOS_WORD]
self.vocab = vocab
# Has EOS topped the beam yet.
self._eos = self.vocab.stoi[onmt.IO.EOS_WORD]
self.eosTop = False
# The attentions (matrix) for each time.
self.attn = []
# Time and k pair for finished.
self.finished = []
self.n_best = n_best
# Information for global scoring.
self.globalScorer = global_scorer
self.globalState = {}
def getCurrentState(self):
"Get the outputs for the current timestep."
return self.nextYs[-1]
def getCurrentOrigin(self):
"Get the backpointers for the current timestep."
return self.prevKs[-1]
def advance(self, wordLk, attnOut):
"""
Given prob over words for every last beam `wordLk` and attention
`attnOut`: Compute and update the beam search.
Parameters:
* `wordLk`- probs of advancing from the last step (K x words)
* `attnOut`- attention at the last step
Returns: True if beam search is complete.
"""
numWords = wordLk.size(1)
# Sum the previous scores.
if len(self.prevKs) > 0:
beamLk = wordLk + self.scores.unsqueeze(1).expand_as(wordLk)
# Don't let EOS have children.
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] == self._eos:
beamLk[i] = -1e20
else:
beamLk = wordLk[0]
flatBeamLk = beamLk.view(-1)
bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True)
self.allScores.append(self.scores)
self.scores = bestScores
# bestScoresId is flattened beam x word array, so calculate which
# word and beam each score came from
prevK = bestScoresId / numWords
self.prevKs.append(prevK)
self.nextYs.append((bestScoresId - prevK * numWords))
self.attn.append(attnOut.index_select(0, prevK))
if self.globalScorer is not None:
self.globalScorer.updateGlobalState(self)
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] == self._eos:
s = self.scores[i]
if self.globalScorer is not None:
globalScores = self.globalScorer.score(self, self.scores)
s = globalScores[i]
self.finished.append((s, len(self.nextYs) - 1, i))
# End condition is when top-of-beam is EOS and no global score.
if self.nextYs[-1][0] == self.vocab.stoi[onmt.IO.EOS_WORD]:
# self.allScores.append(self.scores)
self.eosTop = True
def done(self):
return self.eosTop and len(self.finished) >= self.n_best
def sortFinished(self, minimum=None):
if minimum is not None:
i = 0
# Add from beam until we have minimum outputs.
while len(self.finished) < minimum:
s = self.scores[i]
if self.globalScorer is not None:
globalScores = self.globalScorer.score(self, self.scores)
s = globalScores[i]
self.finished.append((s, len(self.nextYs) - 1, i))
self.finished.sort(key=lambda a: -a[0])
scores = [sc for sc, _, _ in self.finished]
ks = [(t, k) for _, t, k in self.finished]
return scores, ks
def getHyp(self, timestep, k):
"""
Walk back to construct the full hypothesis.
"""
hyp, attn = [], []
for j in range(len(self.prevKs[:timestep]) - 1, -1, -1):
hyp.append(self.nextYs[j+1][k])
attn.append(self.attn[j][k])
k = self.prevKs[j][k]
return hyp[::-1], torch.stack(attn[::-1])
class GNMTGlobalScorer(object):
"""
Google NMT ranking score from Wu et al.
"""
def __init__(self, alpha, beta):
self.alpha = alpha
self.beta = beta
def score(self, beam, logprobs):
"Additional term add to log probability"
cov = beam.globalState["coverage"]
pen = self.beta * torch.min(cov, cov.clone().fill_(1.0)).log().sum(1)
l_term = (((5 + len(beam.nextYs)) ** self.alpha) /
((5 + 1) ** self.alpha))
return (logprobs / l_term) + pen
def updateGlobalState(self, beam):
"Keeps the coverage vector as sum of attens"
if len(beam.prevKs) == 1:
beam.globalState["coverage"] = beam.attn[-1]
else:
beam.globalState["coverage"] = beam.globalState["coverage"] \
.index_select(0, beam.prevKs[-1]).add(beam.attn[-1])
| 5,428 | 32.512346 | 77 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/Translator.py | import torch
from torch.autograd import Variable
import onmt
import onmt.Models
import onmt.ModelConstructor
import onmt.modules
import onmt.IO
from onmt.Utils import use_gpu
NOISE_TRANSELATE = False
class Translator(object):
def __init__(self, opt, dummy_opt={}):
# Add in default model arguments, possibly added since training.
self.opt = opt
checkpoint = torch.load(opt.model,
map_location=lambda storage, loc: storage)
self.fields = onmt.IO.load_fields(checkpoint['vocab'])
model_opt = checkpoint['opt']
for arg in dummy_opt:
if arg not in model_opt:
model_opt.__dict__[arg] = dummy_opt[arg]
self._type = model_opt.encoder_type
self.copy_attn = model_opt.copy_attn
self.model = onmt.ModelConstructor.make_base_model(
model_opt, self.fields, use_gpu(opt), checkpoint)
self.model.eval()
self.model.generator.eval()
# for debugging
self.beam_accum = None
def initBeamAccum(self):
self.beam_accum = {
"predicted_ids": [],
"beam_parent_ids": [],
"scores": [],
"log_probs": []}
def buildTargetTokens(self, pred, src, attn, copy_vocab):
vocab = self.fields["tgt"].vocab
tokens = []
for tok in pred:
if tok < len(vocab):
tokens.append(vocab.itos[tok])
else:
tokens.append(copy_vocab.itos[tok - len(vocab)])
if tokens[-1] == onmt.IO.EOS_WORD:
tokens = tokens[:-1]
break
if self.opt.replace_unk and attn is not None:
for i in range(len(tokens)):
if tokens[i] == vocab.itos[onmt.IO.UNK]:
_, maxIndex = attn[i].max(0)
tokens[i] = self.fields["src"].vocab.itos[src[maxIndex[0]]]
return tokens
def _runTarget(self, batch, data):
_, src_lengths = batch.src
src = onmt.IO.make_features(batch, 'src')
tgt_in = onmt.IO.make_features(batch, 'tgt')[:-1]
# (1) run the encoder on the src
encStates, context = self.model.encoder(src, src_lengths)
decStates = self.model.decoder.init_decoder_state(
src, context, encStates)
# (2) if a target is specified, compute the 'goldScore'
# (i.e. log likelihood) of the target under the model
tt = torch.cuda if self.opt.cuda else torch
goldScores = tt.FloatTensor(batch.batch_size).fill_(0)
decOut, decStates, attn = self.model.decoder(
tgt_in, context, decStates)
tgt_pad = self.fields["tgt"].vocab.stoi[onmt.IO.PAD_WORD]
for dec, tgt in zip(decOut, batch.tgt[1:].data):
# Log prob of each word.
out = self.model.generator.forward(dec)
tgt = tgt.unsqueeze(1)
scores = out.data.gather(1, tgt)
scores.masked_fill_(tgt.eq(tgt_pad), 0)
goldScores += scores
return goldScores
def translateBatch(self, batch, dataset):
beam_size = self.opt.beam_size
batch_size = batch.batch_size
# (1) Run the encoder on the src.
_, src_lengths = batch.src
src = onmt.IO.make_features(batch, 'src')
encStates, context = self.model.encoder(src, src_lengths) # return hidden_t, outputs
print(type(torch.autograd.Variable(torch.FloatTensor(encStates[0].data.shape).uniform_(-0.2, 0.2))))
print(type(encStates[0]))
newEncStates = (
encStates[0] + torch.autograd.Variable(torch.FloatTensor(encStates[0].data.shape).uniform_(-0.2, 0.2)).cuda(),
encStates[1] + torch.autograd.Variable(torch.FloatTensor(encStates[1].data.shape).uniform_(-0.2, 0.2).cuda())
)
if NOISE_TRANSELATE:
decStates = self.model.decoder.init_decoder_state(src, context, newEncStates)
else:
decStates = self.model.decoder.init_decoder_state(src, context, encStates)
# (1b) Initialize for the decoder.
def var(a): return Variable(a, volatile=True)
def rvar(a): return var(a.repeat(1, beam_size, 1))
# Repeat everything beam_size times.
context = rvar(context.data)
src = rvar(src.data)
srcMap = rvar(batch.src_map.data)
decStates.repeat_beam_size_times(beam_size)
scorer = None
# scorer=onmt.GNMTGlobalScorer(0.3, 0.4)
beam = [onmt.Beam(beam_size, n_best=self.opt.n_best,
cuda=self.opt.cuda,
vocab=self.fields["tgt"].vocab,
global_scorer=scorer)
for __ in range(batch_size)]
# (2) run the decoder to generate sentences, using beam search.
def bottle(m):
return m.view(batch_size * beam_size, -1)
def unbottle(m):
return m.view(beam_size, batch_size, -1)
for i in range(self.opt.max_sent_length):
if all((b.done() for b in beam)):
break
# Construct batch x beam_size nxt words.
# Get all the pending current beam words and arrange for forward.
inp = var(torch.stack([b.getCurrentState() for b in beam])
.t().contiguous().view(1, -1))
# Turn any copied words to UNKs
# 0 is unk
if self.copy_attn:
inp = inp.masked_fill(
inp.gt(len(self.fields["tgt"].vocab) - 1), 0)
# Temporary kludge solution to handle changed dim expectation
# in the decoder
inp = inp.unsqueeze(2)
# Run one step.
decOut, decStates, attn = \
self.model.decoder(inp, context, decStates)
decOut = decOut.squeeze(0)
# decOut: beam x rnn_size
# (b) Compute a vector of batch*beam word scores.
if not self.copy_attn:
out = self.model.generator.forward(decOut).data
out = unbottle(out)
# beam x tgt_vocab
else:
out = self.model.generator.forward(decOut,
attn["copy"].squeeze(0),
srcMap)
# beam x (tgt_vocab + extra_vocab)
out = dataset.collapse_copy_scores(
unbottle(out.data),
batch, self.fields["tgt"].vocab)
# beam x tgt_vocab
out = out.log()
# (c) Advance each beam.
for j, b in enumerate(beam):
b.advance(out[:, j], unbottle(attn["std"]).data[:, j])
decStates.beam_update(j, b.getCurrentOrigin(), beam_size)
if "tgt" in batch.__dict__:
allGold = self._runTarget(batch, dataset)
else:
allGold = [0] * batch_size
# (3) Package everything up.
allHyps, allScores, allAttn = [], [], []
for b in beam:
n_best = self.opt.n_best
scores, ks = b.sortFinished(minimum=n_best)
hyps, attn = [], []
for i, (times, k) in enumerate(ks[:n_best]):
hyp, att = b.getHyp(times, k)
hyps.append(hyp)
attn.append(att)
allHyps.append(hyps)
allScores.append(scores)
allAttn.append(attn)
return allHyps, allScores, allAttn, allGold
def translate(self, batch, data):
# (1) convert words to indexes
batch_size = batch.batch_size
# (2) translate
pred, predScore, attn, goldScore = self.translateBatch(batch, data)
assert(len(goldScore) == len(pred))
pred, predScore, attn, goldScore, i = list(zip(
*sorted(zip(pred, predScore, attn, goldScore,
batch.indices.data),
key=lambda x: x[-1])))
inds, perm = torch.sort(batch.indices.data)
# (3) convert indexes to words
predBatch, goldBatch = [], []
src = batch.src[0].data.index_select(1, perm)
if self.opt.tgt:
tgt = batch.tgt.data.index_select(1, perm)
for b in range(batch_size):
src_vocab = data.src_vocabs[inds[b]]
predBatch.append(
[self.buildTargetTokens(pred[b][n], src[:, b],
attn[b][n], src_vocab)
for n in range(self.opt.n_best)])
if self.opt.tgt:
goldBatch.append(
self.buildTargetTokens(tgt[1:, b], src[:, b],
None, None))
return predBatch, goldBatch, predScore, goldScore, attn, src
| 8,875 | 36.610169 | 122 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/IO.py | # -*- coding: utf-8 -*-
import codecs
from collections import Counter, defaultdict
from itertools import chain, count
import torch
import torchtext.data
import torchtext.vocab
PAD_WORD = '<blank>'
UNK = 0
BOS_WORD = '<s>'
EOS_WORD = '</s>'
def __getstate__(self):
return dict(self.__dict__, stoi=dict(self.stoi))
def __setstate__(self, state):
self.__dict__.update(state)
self.stoi = defaultdict(lambda: 0, self.stoi)
torchtext.vocab.Vocab.__getstate__ = __getstate__
torchtext.vocab.Vocab.__setstate__ = __setstate__
def load_fields(vocab):
vocab = dict(vocab)
n_src_features = len(collect_features(vocab, 'src'))
n_tgt_features = len(collect_features(vocab, 'tgt'))
fields = get_fields(n_src_features, n_tgt_features)
for k, v in vocab.items():
# Hack. Can't pickle defaultdict :(
v.stoi = defaultdict(lambda: 0, v.stoi)
fields[k].vocab = v
return fields
def collect_features(fields, side="src"):
assert side in ["src", "tgt"]
feats = []
for j in count():
key = side + "_feat_" + str(j)
if key not in fields:
break
feats.append(key)
return feats
def extract_features(tokens):
"""
tokens: A list of tokens, where each token consists of a word,
optionally followed by u"│"-delimited features.
returns: A sequence of words, A sequence of features, and
"""
if not tokens:
return [], [], -1
split_tokens = [token.split(u"│") for token in tokens]
split_tokens = [token for token in split_tokens if token[0]]
token_size = len(split_tokens[0])
assert all(len(token) == token_size for token in split_tokens), \
"all words must have the same number of features"
words_and_features = list(zip(*split_tokens))
words = words_and_features[0]
features = words_and_features[1:]
return words, features, token_size - 1
def read_corpus_file(path, truncate, side):
"""
path: location of a src or tgt file
truncate: maximum sequence length (0 for unlimited)
yields: (word, features, nfeat) triples for each line
"""
with codecs.open(path, "r", "utf-8") as corpus_file:
for i, line in enumerate(corpus_file):
line = line.split()
if truncate:
line = line[:truncate]
words, feats, n_feats = extract_features(line)
example_dict = {side: words, "indices": i}
if feats:
prefix = side + "_feat_"
example_dict.update((prefix + str(j), f)
for j, f in enumerate(feats))
yield example_dict, n_feats
def merge_vocabs(vocabs, vocab_size=None):
"""
Merge individual vocabularies (assumed to be generated from disjoint
documents) into a larger vocabulary.
Args:
vocabs: `torchtext.vocab.Vocab` vocabularies to be merged
vocab_size: `int` the final vocabulary size. `None` for no limit.
Return:
`torchtext.vocab.Vocab`
"""
merged = sum([vocab.freqs for vocab in vocabs], Counter())
return torchtext.vocab.Vocab(merged,
specials=[PAD_WORD, BOS_WORD, EOS_WORD],
max_size=vocab_size)
def make_features(batch, side):
"""
Args:
batch (Variable): a batch of source or target data.
side (str): for source or for target.
Returns:
A sequence of src/tgt tensors with optional feature tensors
of size (len x batch).
"""
assert side in ['src', 'tgt']
if isinstance(batch.__dict__[side], tuple):
data = batch.__dict__[side][0]
else:
data = batch.__dict__[side]
feat_start = side + "_feat_"
features = sorted(batch.__dict__[k]
for k in batch.__dict__ if feat_start in k)
levels = [data] + features
return torch.cat([level.unsqueeze(2) for level in levels], 2)
def save_vocab(fields):
vocab = []
for k, f in fields.items():
if 'vocab' in f.__dict__:
f.vocab.stoi = dict(f.vocab.stoi)
vocab.append((k, f.vocab))
return vocab
def collect_feature_dicts(fields, side):
assert side in ['src', 'tgt']
feature_dicts = []
for j in count():
key = side + "_feat_" + str(j)
if key not in fields:
break
feature_dicts.append(fields[key].vocab)
return feature_dicts
def get_fields(n_src_features, n_tgt_features):
"""
n_src_features: the number of source features to create Field objects for.
n_tgt_features: the number of target features to create Field objects for.
returns: A dictionary whose keys are strings and whose values are the
corresponding Field objects.
"""
fields = {}
fields["src"] = torchtext.data.Field(
pad_token=PAD_WORD,
include_lengths=True)
# fields = [("src_img", torchtext.data.Field(
# include_lengths=True))]
for j in range(n_src_features):
fields["src_feat_"+str(j)] = \
torchtext.data.Field(pad_token=PAD_WORD)
fields["tgt"] = torchtext.data.Field(
init_token=BOS_WORD, eos_token=EOS_WORD,
pad_token=PAD_WORD)
for j in range(n_tgt_features):
fields["tgt_feat_"+str(j)] = \
torchtext.data.Field(init_token=BOS_WORD, eos_token=EOS_WORD,
pad_token=PAD_WORD)
def make_src(data, _):
src_size = max([t.size(0) for t in data])
src_vocab_size = max([t.max() for t in data]) + 1
alignment = torch.zeros(src_size, len(data), src_vocab_size)
for i, sent in enumerate(data):
for j, t in enumerate(sent):
alignment[j, i, t] = 1
return alignment
fields["src_map"] = torchtext.data.Field(
use_vocab=False, tensor_type=torch.FloatTensor,
postprocessing=make_src, sequential=False)
def make_tgt(data, _):
tgt_size = max([t.size(0) for t in data])
alignment = torch.zeros(tgt_size, len(data)).long()
for i, sent in enumerate(data):
alignment[:sent.size(0), i] = sent
return alignment
fields["alignment"] = torchtext.data.Field(
use_vocab=False, tensor_type=torch.LongTensor,
postprocessing=make_tgt, sequential=False)
fields["indices"] = torchtext.data.Field(
use_vocab=False, tensor_type=torch.LongTensor,
sequential=False)
return fields
def build_vocab(train, opt):
"""
train: an ONMTDataset
"""
fields = train.fields
fields["src"].build_vocab(train, max_size=opt.src_vocab_size,
min_freq=opt.src_words_min_frequency)
for j in range(train.n_src_feats):
fields["src_feat_" + str(j)].build_vocab(train)
fields["tgt"].build_vocab(train, max_size=opt.tgt_vocab_size,
min_freq=opt.tgt_words_min_frequency)
for j in range(train.n_tgt_feats):
fields["tgt_feat_" + str(j)].build_vocab(train)
# Merge the input and output vocabularies.
if opt.share_vocab:
# `tgt_vocab_size` is ignored when sharing vocabularies
merged_vocab = merge_vocabs(
[fields["src"].vocab, fields["tgt"].vocab],
vocab_size=opt.src_vocab_size)
fields["src"].vocab = merged_vocab
fields["tgt"].vocab = merged_vocab
def join_dicts(*args):
"""
args: dictionaries with disjoint keys
returns: a single dictionary that has the union of these keys
"""
return dict(chain(*[d.items() for d in args]))
def peek(seq):
"""
sequence: an iterator
returns: the first thing returned by calling next() on the iterator
and an iterator created by re-chaining that value to the beginning
of the iterator.
"""
first = next(seq)
return first, chain([first], seq)
class OrderedIterator(torchtext.data.Iterator):
def create_batches(self):
if self.train:
self.batches = torchtext.data.pool(
self.data(), self.batch_size,
self.sort_key, self.batch_size_fn,
random_shuffler=self.random_shuffler)
else:
self.batches = []
for b in torchtext.data.batch(self.data(), self.batch_size,
self.batch_size_fn):
self.batches.append(sorted(b, key=self.sort_key))
class ONMTDataset(torchtext.data.Dataset):
"""
Defines a dataset for machine translation.
An ONMTDataset is a collection that supports iteration over its
examples. The parent class supports indexing as well, but future
developments here may make that difficult (lazy iteration over
examples because of large datasets, for example).
"""
@staticmethod
def sort_key(ex):
"Sort in reverse size order"
return -len(ex.src)
def __init__(self, src_path, tgt_path, fields,
src_seq_length=0, tgt_seq_length=0,
src_seq_length_trunc=0, tgt_seq_length_trunc=0,
use_filter_pred=True, dynamic_dict=True,
src_img_dir=None, **kwargs):
"""
Create a translation dataset given paths and fields.
src_path: location of source-side data
tgt_path: location of target-side data or None. If should be the
same length as the source-side data if it exists, but
at present this is not checked.
fields: a dictionary. keys are things like 'src', 'tgt', 'src_map',
and 'alignment'
src_img_dir: raises an error if not None because images are not
supported yet.
Initializes an ONMTDataset object with the following attributes:
self.examples (might be a generator, might be a list, hard to say):
A sequence of torchtext Example objects.
self.fields (dict):
A dictionary associating str keys with Field objects. Does not
necessarily have the same keys as the input fields.
A dataset basically supports iteration over all the examples it
contains.
"""
assert src_img_dir is None, "img data is not finished"
# self.src_vocabs: mutated in dynamic_dict, used in
# collapse_copy_scores and in Translator.py
self.src_vocabs = []
src_truncate = src_seq_length_trunc
src_examples = read_corpus_file(src_path, src_truncate, "src")
(_, src_feats), src_examples = peek(src_examples)
src_examples = (ex for ex, nfeats in src_examples)
self.n_src_feats = src_feats
# if tgt_path exists, then we need to do the same thing as we did
# for the source data
if tgt_path is not None:
tgt_truncate = tgt_seq_length_trunc
tgt_examples = read_corpus_file(tgt_path, tgt_truncate, "tgt")
(_, tgt_feats), tgt_examples = peek(tgt_examples)
tgt_examples = (ex for ex, nfeats in tgt_examples)
self.n_tgt_feats = tgt_feats
else:
self.n_tgt_feats = 0
tgt_examples = None
# examples: one for each src line or (src, tgt) line pair.
# Each element is a dictionary whose keys represent at minimum
# the src tokens and their indices and potentially also the
# src and tgt features and alignment information.
if tgt_examples is not None:
examples = (join_dicts(src, tgt)
for src, tgt in zip(src_examples, tgt_examples))
else:
examples = src_examples
if dynamic_dict:
examples = self.dynamic_dict(examples)
# Peek at the first to see which fields are used.
ex, examples = peek(examples)
keys = ex.keys()
fields = [(k, fields[k]) for k in keys]
example_values = ([ex[k] for k in keys] for ex in examples)
out_examples = (torchtext.data.Example.fromlist(ex_values, fields)
for ex_values in example_values)
def filter_pred(example):
return 0 < len(example.src) <= src_seq_length \
and 0 < len(example.tgt) <= tgt_seq_length
super(ONMTDataset, self).__init__(
out_examples,
fields,
filter_pred if use_filter_pred else None
)
def dynamic_dict(self, examples):
for example in examples:
src = example["src"]
src_vocab = torchtext.vocab.Vocab(Counter(src))
self.src_vocabs.append(src_vocab)
# mapping source tokens to indices in the dynamic dict
src_map = torch.LongTensor([src_vocab.stoi[w] for w in src])
example["src_map"] = src_map
if "tgt" in example:
tgt = example["tgt"]
mask = torch.LongTensor(
[0] + [src_vocab.stoi[w] for w in tgt] + [0])
example["alignment"] = mask
yield example
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__.update(d)
def __reduce_ex__(self, proto):
"This is a hack. Something is broken with torch pickle."
return super(ONMTDataset, self).__reduce_ex__()
def collapse_copy_scores(self, scores, batch, tgt_vocab):
"""
Given scores from an expanded dictionary
corresponeding to a batch, sums together copies,
with a dictionary word when it is ambigious.
"""
offset = len(tgt_vocab)
for b in range(batch.batch_size):
index = batch.indices.data[b]
src_vocab = self.src_vocabs[index]
for i in range(1, len(src_vocab)):
sw = src_vocab.itos[i]
ti = tgt_vocab.stoi[sw]
if ti != 0:
scores[:, b, ti] += scores[:, b, offset + i]
scores[:, b, offset + i].fill_(1e-20)
return scores
def load_image_libs():
"Conditional import of torch image libs."
global Image, transforms
from PIL import Image
from torchvision import transforms
| 14,171 | 33.231884 | 78 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/ModelConstructor.py | """
This file is for models creation, which consults options
and creates each encoder and decoder accordingly.
"""
import torch.nn as nn
import onmt
import onmt.Models
import onmt.modules
from onmt.Models import NMTModel, MeanEncoder, RNNEncoder, \
StdRNNDecoder, InputFeedRNNDecoder
from onmt.modules import Embeddings, ImageEncoder, CopyGenerator, \
TransformerEncoder, TransformerDecoder, \
CNNEncoder, CNNDecoder
def make_embeddings(opt, word_dict, feature_dicts, for_encoder=True):
"""
Make an Embeddings instance.
Args:
opt: the option in current environment.
word_dict(Vocab): words dictionary.
feature_dicts([Vocab], optional): a list of feature dictionary.
for_encoder(bool): make Embeddings for encoder or decoder?
"""
if for_encoder:
embedding_dim = opt.src_word_vec_size
else:
embedding_dim = opt.tgt_word_vec_size
word_padding_idx = word_dict.stoi[onmt.IO.PAD_WORD]
num_word_embeddings = len(word_dict)
feats_padding_idx = [feat_dict.stoi[onmt.IO.PAD_WORD]
for feat_dict in feature_dicts]
num_feat_embeddings = [len(feat_dict) for feat_dict in
feature_dicts]
return Embeddings(embedding_dim,
opt.position_encoding,
opt.feat_merge,
opt.feat_vec_exponent,
opt.feat_vec_size,
opt.dropout,
word_padding_idx,
feats_padding_idx,
num_word_embeddings,
num_feat_embeddings)
def make_encoder(opt, embeddings):
"""
Various encoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this encoder.
"""
if opt.encoder_type == "transformer":
return TransformerEncoder(opt.enc_layers, opt.rnn_size,
opt.dropout, embeddings)
elif opt.encoder_type == "cnn":
return CNNEncoder(opt.enc_layers, opt.rnn_size,
opt.cnn_kernel_width,
opt.dropout, embeddings)
elif opt.encoder_type == "mean":
return MeanEncoder(opt.enc_layers, embeddings)
else:
# "rnn" or "brnn"
return RNNEncoder(opt.rnn_type, opt.brnn, opt.enc_layers,
opt.rnn_size, opt.dropout, embeddings)
def make_decoder(opt, embeddings):
"""
Various decoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this decoder.
"""
if opt.decoder_type == "transformer":
return TransformerDecoder(opt.dec_layers, opt.rnn_size,
opt.global_attention, opt.copy_attn,
opt.dropout, embeddings)
elif opt.decoder_type == "cnn":
return CNNDecoder(opt.dec_layers, opt.rnn_size,
opt.global_attention, opt.copy_attn,
opt.cnn_kernel_width, opt.dropout,
embeddings)
elif opt.input_feed:
return InputFeedRNNDecoder(opt.rnn_type, opt.brnn,
opt.dec_layers, opt.rnn_size,
opt.global_attention,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout,
embeddings)
else:
return StdRNNDecoder(opt.rnn_type, opt.brnn,
opt.dec_layers, opt.rnn_size,
opt.global_attention,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout,
embeddings)
def make_base_model(model_opt, fields, gpu, checkpoint=None):
"""
Args:
model_opt: the option loaded from checkpoint.
fields: `Field` objects for the model.
gpu(bool): whether to use gpu.
checkpoint: the model gnerated by train phase, or a resumed snapshot
model from a stopped training.
Returns:
the NMTModel.
"""
assert model_opt.model_type in ["text", "img"], \
("Unsupported model type %s" % (model_opt.model_type))
# Make encoder.
if model_opt.model_type == "text":
src_dict = fields["src"].vocab
feature_dicts = onmt.IO.collect_feature_dicts(fields, 'src')
src_embeddings = make_embeddings(model_opt, src_dict,
feature_dicts)
encoder = make_encoder(model_opt, src_embeddings)
else:
encoder = ImageEncoder(model_opt.layers,
model_opt.brnn,
model_opt.rnn_size,
model_opt.dropout)
# Make decoder.
tgt_dict = fields["tgt"].vocab
# TODO: prepare for a future where tgt features are possible.
feature_dicts = onmt.IO.collect_feature_dicts(fields, 'tgt')
tgt_embeddings = make_embeddings(model_opt, tgt_dict,
feature_dicts, for_encoder=False)
# Share the embedding matrix - preprocess with share_vocab required
if model_opt.share_embeddings:
tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight
decoder = make_decoder(model_opt, tgt_embeddings)
# Make NMTModel(= encoder + decoder).
model = NMTModel(encoder, decoder)
# Make Generator.
if not model_opt.copy_attn:
generator = nn.Sequential(
nn.Linear(model_opt.rnn_size, len(fields["tgt"].vocab)),
nn.LogSoftmax())
if model_opt.share_decoder_embeddings:
generator[0].weight = decoder.embeddings.word_lut.weight
else:
generator = CopyGenerator(model_opt, fields["src"].vocab,
fields["tgt"].vocab)
# Load the model states from checkpoint or initialize them.
if checkpoint is not None:
print('Loading model parameters.')
model.load_state_dict(checkpoint['model'])
generator.load_state_dict(checkpoint['generator'])
else:
if model_opt.param_init != 0.0:
print('Intializing model parameters.')
for p in model.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in generator.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
model.encoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc)
model.decoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec)
# Add generator to model (this registers it as parameter of model).
model.generator = generator
# Make the whole model leverage GPU if indicated to do so.
if gpu:
model.cuda()
else:
model.cpu()
return model
| 7,331 | 37.589474 | 76 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/Trainer.py | from __future__ import division
"""
This is the loadable seq2seq trainer library that is
in charge of training details, loss compute, and statistics.
See train.py for a use case of this library.
Note!!! To make this a general library, we implement *only*
mechanism things here(i.e. what to do), and leave the strategy
things to users(i.e. how to do it). Also see train.py(one of the
users of this library) for the strategy things we do.
"""
import time
import sys
import math
import torch
import torch.nn as nn
import onmt
import onmt.modules
class Statistics(object):
"""
Train/validate loss statistics.
"""
def __init__(self, loss=0, n_words=0, n_correct=0):
self.loss = loss
self.n_words = n_words
self.n_correct = n_correct
self.n_src_words = 0
self.start_time = time.time()
def update(self, stat):
self.loss += stat.loss
self.n_words += stat.n_words
self.n_correct += stat.n_correct
def accuracy(self):
return 100 * (self.n_correct / self.n_words)
def ppl(self):
return math.exp(min(self.loss / self.n_words, 100))
def elapsed_time(self):
return time.time() - self.start_time
def output(self, epoch, batch, n_batches, start):
t = self.elapsed_time()
print(("Epoch %2d, %5d/%5d; acc: %6.2f; ppl: %6.2f; " +
"%3.0f src tok/s; %3.0f tgt tok/s; %6.0f s elapsed") %
(epoch, batch, n_batches,
self.accuracy(),
self.ppl(),
self.n_src_words / (t + 1e-5),
self.n_words / (t + 1e-5),
time.time() - start))
sys.stdout.flush()
def log(self, prefix, experiment, lr):
t = self.elapsed_time()
experiment.add_scalar_value(prefix + "_ppl", self.ppl())
experiment.add_scalar_value(prefix + "_accuracy", self.accuracy())
experiment.add_scalar_value(prefix + "_tgtper", self.n_words / t)
experiment.add_scalar_value(prefix + "_lr", lr)
class Trainer(object):
def __init__(self, model, train_iter, valid_iter,
train_loss, valid_loss, optim,
trunc_size, shard_size):
"""
Args:
model: the seq2seq model.
train_iter: the train data iterator.
valid_iter: the validate data iterator.
train_loss: the train side LossCompute object for computing loss.
valid_loss: the valid side LossCompute object for computing loss.
optim: the optimizer responsible for lr update.
trunc_size: a batch is divided by several truncs of this size.
shard_size: compute loss in shards of this size for efficiency.
"""
# Basic attributes.
self.model = model
self.train_iter = train_iter
self.valid_iter = valid_iter
self.train_loss = train_loss
self.valid_loss = valid_loss
self.optim = optim
self.trunc_size = trunc_size
self.shard_size = shard_size
# Set model in training mode.
self.model.train()
def train(self, epoch, report_func=None):
""" Called for each epoch to train. """
total_stats = Statistics()
report_stats = Statistics()
for i, batch in enumerate(self.train_iter):
target_size = batch.tgt.size(0)
# Truncated BPTT
trunc_size = self.trunc_size if self.trunc_size else target_size
dec_state = None
_, src_lengths = batch.src
src = onmt.IO.make_features(batch, 'src')
tgt_outer = onmt.IO.make_features(batch, 'tgt')
report_stats.n_src_words += src_lengths.sum()
for j in range(0, target_size-1, trunc_size):
# 1. Create truncated target.
tgt = tgt_outer[j: j + trunc_size]
# 2. F-prop all but generator.
self.model.zero_grad()
outputs, attns, dec_state = \
self.model(src, tgt, src_lengths, dec_state)
# 3. Compute loss in shards for memory efficiency.
batch_stats = self.train_loss.sharded_compute_loss(
batch, outputs, attns, j,
trunc_size, self.shard_size)
# 4. Update the parameters and statistics.
self.optim.step()
total_stats.update(batch_stats)
report_stats.update(batch_stats)
# If truncated, don't backprop fully.
if dec_state is not None:
dec_state.detach()
if report_func is not None:
report_stats = report_func(
epoch, i, len(self.train_iter),
total_stats.start_time, self.optim.lr, report_stats)
return total_stats
def validate(self):
""" Called for each epoch to validate. """
# Set model in validating mode.
self.model.eval()
stats = Statistics()
for batch in self.valid_iter:
_, src_lengths = batch.src
src = onmt.IO.make_features(batch, 'src')
tgt = onmt.IO.make_features(batch, 'tgt')
# F-prop through the model.
outputs, attns, _ = self.model(src, tgt, src_lengths)
# Compute loss.
batch_stats = self.valid_loss.monolithic_compute_loss(
batch, outputs, attns)
# Update statistics.
stats.update(batch_stats)
# Set model back to training mode.
self.model.train()
return stats
def epoch_step(self, ppl, epoch):
""" Called for each epoch to update learning rate. """
return self.optim.updateLearningRate(ppl, epoch)
def drop_checkpoint(self, opt, epoch, fields, valid_stats):
""" Called conditionally each epoch to save a snapshot. """
real_model = (self.model.module
if isinstance(self.model, nn.DataParallel)
else self.model)
real_generator = (real_model.generator.module
if isinstance(real_model.generator, nn.DataParallel)
else real_model.generator)
model_state_dict = real_model.state_dict()
model_state_dict = {k: v for k, v in model_state_dict.items()
if 'generator' not in k}
generator_state_dict = real_generator.state_dict()
checkpoint = {
'model': model_state_dict,
'generator': generator_state_dict,
'vocab': onmt.IO.save_vocab(fields),
'opt': opt,
'epoch': epoch,
'optim': self.optim
}
torch.save(checkpoint, '%s.pt' % opt.save_model)
| 6,823 | 33.994872 | 78 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/Optim.py | import torch.optim as optim
from torch.nn.utils import clip_grad_norm
class Optim(object):
def set_parameters(self, params):
self.params = [p for p in params if p.requires_grad]
if self.method == 'sgd':
self.optimizer = optim.SGD(self.params, lr=self.lr)
elif self.method == 'adagrad':
self.optimizer = optim.Adagrad(self.params, lr=self.lr)
elif self.method == 'adadelta':
self.optimizer = optim.Adadelta(self.params, lr=self.lr)
elif self.method == 'adam':
self.optimizer = optim.Adam(self.params, lr=self.lr,
betas=self.betas, eps=1e-9)
else:
raise RuntimeError("Invalid optim method: " + self.method)
def __init__(self, method, lr, max_grad_norm,
lr_decay=1, start_decay_at=None,
beta1=0.9, beta2=0.98,
opt=None):
self.last_ppl = None
self.lr = lr
self.max_grad_norm = max_grad_norm
self.method = method
self.lr_decay = lr_decay
self.start_decay_at = start_decay_at
self.start_decay = False
self._step = 0
self.betas = [beta1, beta2]
self.opt = opt
def _setRate(self, lr):
self.lr = lr
self.optimizer.param_groups[0]['lr'] = self.lr
def step(self):
"Compute gradients norm."
self._step += 1
# Decay method used in tensor2tensor.
if self.opt.__dict__.get("decay_method", "") == "noam":
self._setRate(
self.opt.learning_rate *
(self.opt.rnn_size ** (-0.5) *
min(self._step ** (-0.5),
self._step * self.opt.warmup_steps**(-1.5))))
if self.max_grad_norm:
clip_grad_norm(self.params, self.max_grad_norm)
self.optimizer.step()
def updateLearningRate(self, ppl, epoch):
"""
Decay learning rate if val perf does not improve
or we hit the start_decay_at limit.
"""
if self.start_decay_at is not None and epoch >= self.start_decay_at:
self.start_decay = True
if self.last_ppl is not None and ppl > self.last_ppl:
self.start_decay = True
if self.start_decay:
self.lr = self.lr * self.lr_decay
print("Decaying learning rate to %g" % self.lr)
self.last_ppl = ppl
self.optimizer.param_groups[0]['lr'] = self.lr
| 2,490 | 33.123288 | 76 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/Models.py | from __future__ import division
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence as pack
from torch.nn.utils.rnn import pad_packed_sequence as unpack
import onmt
from onmt.Utils import aeq
class EncoderBase(nn.Module):
"""
EncoderBase class for sharing code among various encoder.
"""
def _check_args(self, input, lengths=None, hidden=None):
s_len, n_batch, n_feats = input.size()
if lengths is not None:
n_batch_, = lengths.size()
aeq(n_batch, n_batch_)
def forward(self, input, lengths=None, hidden=None):
"""
Args:
input (LongTensor): len x batch x nfeat.
lengths (LongTensor): batch
hidden: Initial hidden state.
Returns:
hidden_t (Variable): Pair of layers x batch x rnn_size - final
encoder state
outputs (FloatTensor): len x batch x rnn_size - Memory bank
"""
raise NotImplementedError
class MeanEncoder(EncoderBase):
""" A trivial encoder without RNN, just takes mean as final state. """
def __init__(self, num_layers, embeddings):
super(MeanEncoder, self).__init__()
self.num_layers = num_layers
self.embeddings = embeddings
def forward(self, input, lengths=None, hidden=None):
""" See EncoderBase.forward() for description of args and returns. """
self._check_args(input, lengths, hidden)
emb = self.embeddings(input)
s_len, batch, emb_dim = emb.size()
mean = emb.mean(0).expand(self.num_layers, batch, emb_dim)
return (mean, mean), emb
class RNNEncoder(EncoderBase):
""" The standard RNN encoder. """
def __init__(self, rnn_type, bidirectional, num_layers,
hidden_size, dropout, embeddings):
super(RNNEncoder, self).__init__()
num_directions = 2 if bidirectional else 1
assert hidden_size % num_directions == 0
hidden_size = hidden_size // num_directions
self.embeddings = embeddings
self.no_pack_padded_seq = False
# Use pytorch version when available.
if rnn_type == "SRU":
# SRU doesn't support PackedSequence.
self.no_pack_padded_seq = True
self.rnn = onmt.modules.SRU(
input_size=embeddings.embedding_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional)
else:
self.rnn = getattr(nn, rnn_type)(
input_size=embeddings.embedding_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional)
def forward(self, input, lengths=None, hidden=None):
""" See EncoderBase.forward() for description of args and returns."""
self._check_args(input, lengths, hidden)
emb = self.embeddings(input)
s_len, batch, emb_dim = emb.size()
packed_emb = emb
if lengths is not None and not self.no_pack_padded_seq:
# Lengths data is wrapped inside a Variable.
lengths = lengths.view(-1).tolist()
packed_emb = pack(emb, lengths)
outputs, hidden_t = self.rnn(packed_emb, hidden)
if lengths is not None and not self.no_pack_padded_seq:
outputs = unpack(outputs)[0]
return hidden_t, outputs
class RNNDecoderBase(nn.Module):
"""
RNN decoder base class.
"""
def __init__(self, rnn_type, bidirectional_encoder, num_layers,
hidden_size, attn_type, coverage_attn, context_gate,
copy_attn, dropout, embeddings):
super(RNNDecoderBase, self).__init__()
# Basic attributes.
self.decoder_type = 'rnn'
self.bidirectional_encoder = bidirectional_encoder
self.num_layers = num_layers
self.hidden_size = hidden_size
self.embeddings = embeddings
self.dropout = nn.Dropout(dropout)
# Build the RNN.
self.rnn = self._build_rnn(rnn_type, self._input_size, hidden_size,
num_layers, dropout)
# Set up the context gate.
self.context_gate = None
if context_gate is not None:
self.context_gate = onmt.modules.ContextGateFactory(
context_gate, self._input_size,
hidden_size, hidden_size, hidden_size
)
# Set up the standard attention.
self._coverage = coverage_attn
self.attn = onmt.modules.GlobalAttention(
hidden_size, coverage=coverage_attn,
attn_type=attn_type
)
# Set up a separated copy attention layer, if needed.
self._copy = False
if copy_attn:
self.copy_attn = onmt.modules.GlobalAttention(
hidden_size, attn_type=attn_type
)
self._copy = True
def forward(self, input, context, state):
"""
Forward through the decoder.
Args:
input (LongTensor): a sequence of input tokens tensors
of size (len x batch x nfeats).
context (FloatTensor): output(tensor sequence) from the encoder
RNN of size (src_len x batch x hidden_size).
state (FloatTensor): hidden state from the encoder RNN for
initializing the decoder.
Returns:
outputs (FloatTensor): a Tensor sequence of output from the decoder
of shape (len x batch x hidden_size).
state (FloatTensor): final hidden state from the decoder.
attns (dict of (str, FloatTensor)): a dictionary of different
type of attention Tensor from the decoder
of shape (src_len x batch).
"""
# Args Check
assert isinstance(state, RNNDecoderState)
input_len, input_batch, _ = input.size()
contxt_len, contxt_batch, _ = context.size()
aeq(input_batch, contxt_batch)
# END Args Check
# Run the forward pass of the RNN.
hidden, outputs, attns, coverage = \
self._run_forward_pass(input, context, state)
# Update the state with the result.
final_output = outputs[-1]
state.update_state(hidden, final_output.unsqueeze(0),
coverage.unsqueeze(0)
if coverage is not None else None)
# Concatenates sequence of tensors along a new dimension.
outputs = torch.stack(outputs)
for k in attns:
attns[k] = torch.stack(attns[k])
return outputs, state, attns
def _fix_enc_hidden(self, h):
"""
The encoder hidden is (layers*directions) x batch x dim.
We need to convert it to layers x batch x (directions*dim).
"""
if self.bidirectional_encoder:
h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)
return h
def init_decoder_state(self, src, context, enc_hidden):
if isinstance(enc_hidden, tuple): # LSTM
return RNNDecoderState(context, self.hidden_size,
tuple([self._fix_enc_hidden(enc_hidden[i])
for i in range(len(enc_hidden))]))
else: # GRU
return RNNDecoderState(context, self.hidden_size,
self._fix_enc_hidden(enc_hidden))
class StdRNNDecoder(RNNDecoderBase):
"""
Stardard RNN decoder, with Attention.
Currently no 'coverage_attn' and 'copy_attn' support.
"""
def _run_forward_pass(self, input, context, state):
"""
Private helper for running the specific RNN forward pass.
Must be overriden by all subclasses.
Args:
input (LongTensor): a sequence of input tokens tensors
of size (len x batch x nfeats).
context (FloatTensor): output(tensor sequence) from the encoder
RNN of size (src_len x batch x hidden_size).
state (FloatTensor): hidden state from the encoder RNN for
initializing the decoder.
Returns:
hidden (Variable): final hidden state from the decoder.
outputs ([FloatTensor]): an array of output of every time
step from the decoder.
attns (dict of (str, [FloatTensor]): a dictionary of different
type of attention Tensor array of every time
step from the decoder.
coverage (FloatTensor, optional): coverage from the decoder.
"""
assert not self._copy # TODO, no support yet.
assert not self._coverage # TODO, no support yet.
# Initialize local and return variables.
outputs = []
attns = {"std": []}
coverage = None
emb = self.embeddings(input)
# Run the forward pass of the RNN.
if isinstance(self.rnn, nn.GRU):
rnn_output, hidden = self.rnn(emb, state.hidden[0])
else:
rnn_output, hidden = self.rnn(emb, state.hidden)
# Result Check
input_len, input_batch, _ = input.size()
output_len, output_batch, _ = rnn_output.size()
aeq(input_len, output_len)
aeq(input_batch, output_batch)
# END Result Check
# Calculate the attention.
attn_outputs, attn_scores = self.attn(
rnn_output.transpose(0, 1).contiguous(), # (output_len, batch, d)
context.transpose(0, 1) # (contxt_len, batch, d)
)
attns["std"] = attn_scores
# Calculate the context gate.
if self.context_gate is not None:
outputs = self.context_gate(
emb.view(-1, emb.size(2)),
rnn_output.view(-1, rnn_output.size(2)),
attn_outputs.view(-1, attn_outputs.size(2))
)
outputs = outputs.view(input_len, input_batch, self.hidden_size)
outputs = self.dropout(outputs)
else:
outputs = self.dropout(attn_outputs) # (input_len, batch, d)
# Return result.
return hidden, outputs, attns, coverage
def _build_rnn(self, rnn_type, input_size,
hidden_size, num_layers, dropout):
"""
Private helper for building standard decoder RNN.
"""
# Use pytorch version when available.
if rnn_type == "SRU":
return onmt.modules.SRU(
input_size, hidden_size,
num_layers=num_layers,
dropout=dropout)
return getattr(nn, rnn_type)(
input_size, hidden_size,
num_layers=num_layers,
dropout=dropout)
@property
def _input_size(self):
"""
Private helper returning the number of expected features.
"""
return self.embeddings.embedding_size
class InputFeedRNNDecoder(RNNDecoderBase):
"""
Stardard RNN decoder, with Input Feed and Attention.
"""
def _run_forward_pass(self, input, context, state):
"""
See StdRNNDecoder._run_forward_pass() for description
of arguments and return values.
"""
# Additional args check.
output = state.input_feed.squeeze(0)
output_batch, _ = output.size()
input_len, input_batch, _ = input.size()
aeq(input_batch, output_batch)
# END Additional args check.
# Initialize local and return variables.
outputs = []
attns = {"std": []}
if self._copy:
attns["copy"] = []
if self._coverage:
attns["coverage"] = []
emb = self.embeddings(input)
assert emb.dim() == 3 # len x batch x embedding_dim
hidden = state.hidden
coverage = state.coverage.squeeze(0) \
if state.coverage is not None else None
# Input feed concatenates hidden state with
# input at every time step.
for i, emb_t in enumerate(emb.split(1)):
emb_t = emb_t.squeeze(0)
emb_t = torch.cat([emb_t, output], 1)
rnn_output, hidden = self.rnn(emb_t, hidden)
attn_output, attn = self.attn(rnn_output,
context.transpose(0, 1))
if self.context_gate is not None:
output = self.context_gate(
emb_t, rnn_output, attn_output
)
output = self.dropout(output)
else:
output = self.dropout(attn_output)
outputs += [output]
attns["std"] += [attn]
# Update the coverage attention.
if self._coverage:
coverage = coverage + attn \
if coverage is not None else attn
attns["coverage"] += [coverage]
# Run the forward pass of the copy attention layer.
if self._copy:
_, copy_attn = self.copy_attn(output,
context.transpose(0, 1))
attns["copy"] += [copy_attn]
# Return result.
return hidden, outputs, attns, coverage
def _build_rnn(self, rnn_type, input_size,
hidden_size, num_layers, dropout):
assert not rnn_type == "SRU", "SRU doesn't support input feed! " \
"Please set -input_feed 0!"
if rnn_type == "LSTM":
stacked_cell = onmt.modules.StackedLSTM
else:
stacked_cell = onmt.modules.StackedGRU
return stacked_cell(num_layers, input_size,
hidden_size, dropout)
@property
def _input_size(self):
"""
Using input feed by concatenating input with attention vectors.
"""
return self.embeddings.embedding_size + self.hidden_size
class NMTModel(nn.Module):
"""
The encoder + decoder Neural Machine Translation Model.
"""
def __init__(self, encoder, decoder, multigpu=False):
"""
Args:
encoder(*Encoder): the various encoder.
decoder(*Decoder): the various decoder.
multigpu(bool): run parellel on multi-GPU?
"""
self.multigpu = multigpu
super(NMTModel, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, src, tgt, lengths, dec_state=None):
"""
Args:
src(FloatTensor): a sequence of source tensors with
optional feature tensors of size (len x batch).
tgt(FloatTensor): a sequence of target tensors with
optional feature tensors of size (len x batch).
lengths([int]): an array of the src length.
dec_state: A decoder state object
Returns:
outputs (FloatTensor): (len x batch x hidden_size): decoder outputs
attns (FloatTensor): Dictionary of (src_len x batch)
dec_hidden (FloatTensor): tuple (1 x batch x hidden_size)
Init hidden state
"""
src = src
tgt = tgt[:-1] # exclude last target from inputs
enc_hidden, context = self.encoder(src, lengths)
enc_state = self.decoder.init_decoder_state(src, context, enc_hidden)
out, dec_state, attns = self.decoder(tgt, context,
enc_state if dec_state is None
else dec_state)
if self.multigpu:
# Not yet supported on multi-gpu
dec_state = None
attns = None
return out, attns, dec_state
class DecoderState(object):
"""
DecoderState is a base class for models, used during translation
for storing translation states.
"""
def detach(self):
"""
Detaches all Variables from the graph
that created it, making it a leaf.
"""
for h in self._all:
if h is not None:
h.detach_()
def beam_update(self, idx, positions, beam_size):
""" Update when beam advances. """
for e in self._all:
a, br, d = e.size()
sentStates = e.view(a, beam_size, br // beam_size, d)[:, :, idx]
sentStates.data.copy_(
sentStates.data.index_select(1, positions))
class RNNDecoderState(DecoderState):
def __init__(self, context, hidden_size, rnnstate):
"""
Args:
context (FloatTensor): output from the encoder of size
len x batch x rnn_size.
hidden_size (int): the size of hidden layer of the decoder.
rnnstate (Variable): final hidden state from the encoder.
transformed to shape: layers x batch x (directions*dim).
input_feed (FloatTensor): output from last layer of the decoder.
coverage (FloatTensor): coverage output from the decoder.
"""
if not isinstance(rnnstate, tuple):
self.hidden = (rnnstate,)
else:
self.hidden = rnnstate
self.coverage = None
# Init the input feed.
batch_size = context.size(1)
h_size = (batch_size, hidden_size)
self.input_feed = Variable(context.data.new(*h_size).zero_(),
requires_grad=False).unsqueeze(0)
@property
def _all(self):
return self.hidden + (self.input_feed,)
def update_state(self, rnnstate, input_feed, coverage):
if not isinstance(rnnstate, tuple):
self.hidden = (rnnstate,)
else:
self.hidden = rnnstate
self.input_feed = input_feed
self.coverage = coverage
def repeat_beam_size_times(self, beam_size):
""" Repeat beam_size times along batch dimension. """
vars = [Variable(e.data.repeat(1, beam_size, 1), volatile=True)
for e in self._all]
self.hidden = tuple(vars[:-1])
self.input_feed = vars[-1]
| 18,492 | 36.209256 | 79 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/ConvMultiStepAttention.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.Utils import aeq
SCALE_WEIGHT = 0.5 ** 0.5
def seq_linear(linear, x):
# linear transform for 3-d tensor
batch, hidden_size, length, _ = x.size()
h = linear(torch.transpose(x, 1, 2).contiguous().view(
batch * length, hidden_size))
return torch.transpose(h.view(batch, length, hidden_size, 1), 1, 2)
class ConvMultiStepAttention(nn.Module):
def __init__(self, input_size):
super(ConvMultiStepAttention, self).__init__()
self.linear_in = nn.Linear(input_size, input_size)
self.mask = None
def applyMask(self, mask):
self.mask = mask
def forward(self, base_target_emb, input, encoder_out_top,
encoder_out_combine):
"""
It's like Luong Attetion.
Conv attention takes a key matrix, a value matrix and a query vector.
Attention weight is calculated by key matrix with the query vector
and sum on the value matrix. And the same operation is applied
in each decode conv layer.
Args:
base_target_emb: target emb tensor
input: output of decode conv
encoder_out_t: the key matrix for calculation of attetion weight,
which is the top output of encode conv
encoder_out_c: the value matrix for the attention-weighted sum,
which is the combination of base emb and top output of encode
"""
# checks
batch, channel, height, width = base_target_emb.size()
batch_, channel_, height_, width_ = input.size()
aeq(batch, batch_)
aeq(height, height_)
enc_batch, enc_channel, enc_height = encoder_out_top.size()
enc_batch_, enc_channel_, enc_height_ = encoder_out_combine.size()
aeq(enc_batch, enc_batch_)
aeq(enc_height, enc_height_)
preatt = seq_linear(self.linear_in, input)
target = (base_target_emb + preatt) * SCALE_WEIGHT
target = torch.squeeze(target, 3)
target = torch.transpose(target, 1, 2)
pre_attn = torch.bmm(target, encoder_out_top)
if self.mask is not None:
pre_attn.data.masked_fill_(self.mask, -float('inf'))
pre_attn = pre_attn.transpose(0, 2)
attn = F.softmax(pre_attn)
attn = attn.transpose(0, 2).contiguous()
context_output = torch.bmm(
attn, torch.transpose(encoder_out_combine, 1, 2))
context_output = torch.transpose(
torch.unsqueeze(context_output, 3), 1, 2)
return context_output, attn
| 2,610 | 34.767123 | 77 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/Transformer.py | """
Implementation of "Attention is All You Need"
"""
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import onmt
from onmt.Models import EncoderBase
from onmt.Models import DecoderState
from onmt.Utils import aeq
MAX_SIZE = 5000
class PositionwiseFeedForward(nn.Module):
""" A two-layer Feed-Forward-Network."""
def __init__(self, size, hidden_size, dropout=0.1):
"""
Args:
size(int): the size of input for the first-layer of the FFN.
hidden_size(int): the hidden layer size of the second-layer
of the FNN.
droput(float): dropout probability(0-1.0).
"""
super(PositionwiseFeedForward, self).__init__()
self.w_1 = onmt.modules.BottleLinear(size, hidden_size)
self.w_2 = onmt.modules.BottleLinear(hidden_size, size)
self.layer_norm = onmt.modules.BottleLayerNorm(size)
self.dropout = nn.Dropout(dropout)
self.relu = nn.ReLU()
def forward(self, x):
residual = x
output = self.dropout(self.w_2(self.relu(self.w_1(x))))
return self.layer_norm(output + residual)
class TransformerEncoderLayer(nn.Module):
def __init__(self, size, dropout,
head_count=8, hidden_size=2048):
"""
Args:
size(int): the dimension of keys/values/queries in
MultiHeadedAttention, also the input size of
the first-layer of the PositionwiseFeedForward.
droput(float): dropout probability(0-1.0).
head_count(int): the number of head for MultiHeadedAttention.
hidden_size(int): the second-layer of the PositionwiseFeedForward.
"""
super(TransformerEncoderLayer, self).__init__()
self.self_attn = onmt.modules.MultiHeadedAttention(
head_count, size, p=dropout)
self.feed_forward = PositionwiseFeedForward(size,
hidden_size,
dropout)
def forward(self, input, mask):
mid, _ = self.self_attn(input, input, input, mask=mask)
out = self.feed_forward(mid)
return out
class TransformerEncoder(EncoderBase):
"""
The Transformer encoder from "Attention is All You Need".
"""
def __init__(self, num_layers, hidden_size,
dropout, embeddings):
super(TransformerEncoder, self).__init__()
self.num_layers = num_layers
self.embeddings = embeddings
self.transformer = nn.ModuleList(
[TransformerEncoderLayer(hidden_size, dropout)
for i in range(num_layers)])
def forward(self, input, lengths=None, hidden=None):
""" See EncoderBase.forward() for description of args and returns."""
self._check_args(input, lengths, hidden)
emb = self.embeddings(input)
s_len, n_batch, emb_dim = emb.size()
out = emb.transpose(0, 1).contiguous()
words = input[:, :, 0].transpose(0, 1)
# CHECKS
out_batch, out_len, _ = out.size()
w_batch, w_len = words.size()
aeq(out_batch, w_batch)
aeq(out_len, w_len)
# END CHECKS
# Make mask.
padding_idx = self.embeddings.word_padding_idx
mask = words.data.eq(padding_idx).unsqueeze(1) \
.expand(w_batch, w_len, w_len)
# Run the forward pass of every layer of the tranformer.
for i in range(self.num_layers):
out = self.transformer[i](out, mask)
return Variable(emb.data), out.transpose(0, 1).contiguous()
class TransformerDecoderLayer(nn.Module):
def __init__(self, size, dropout,
head_count=8, hidden_size=2048):
"""
Args:
size(int): the dimension of keys/values/queries in
MultiHeadedAttention, also the input size of
the first-layer of the PositionwiseFeedForward.
droput(float): dropout probability(0-1.0).
head_count(int): the number of head for MultiHeadedAttention.
hidden_size(int): the second-layer of the PositionwiseFeedForward.
"""
super(TransformerDecoderLayer, self).__init__()
self.self_attn = onmt.modules.MultiHeadedAttention(
head_count, size, p=dropout)
self.context_attn = onmt.modules.MultiHeadedAttention(
head_count, size, p=dropout)
self.feed_forward = PositionwiseFeedForward(size,
hidden_size,
dropout)
self.dropout = dropout
mask = self._get_attn_subsequent_mask(MAX_SIZE)
# Register self.mask as a buffer in TransformerDecoderLayer, so
# it gets TransformerDecoderLayer's cuda behavior automatically.
self.register_buffer('mask', mask)
def forward(self, input, context, src_pad_mask, tgt_pad_mask):
# Args Checks
input_batch, input_len, _ = input.size()
contxt_batch, contxt_len, _ = context.size()
aeq(input_batch, contxt_batch)
src_batch, t_len, s_len = src_pad_mask.size()
tgt_batch, t_len_, t_len__ = tgt_pad_mask.size()
aeq(input_batch, contxt_batch, src_batch, tgt_batch)
aeq(t_len, t_len_, t_len__, input_len)
aeq(s_len, contxt_len)
# END Args Checks
dec_mask = torch.gt(tgt_pad_mask + self.mask[:, :tgt_pad_mask.size(1),
:tgt_pad_mask.size(1)]
.expand_as(tgt_pad_mask), 0)
query, attn = self.self_attn(input, input, input, mask=dec_mask)
mid, attn = self.context_attn(context, context, query,
mask=src_pad_mask)
output = self.feed_forward(mid)
# CHECKS
output_batch, output_len, _ = output.size()
aeq(input_len, output_len)
aeq(contxt_batch, output_batch)
n_batch_, t_len_, s_len_ = attn.size()
aeq(input_batch, n_batch_)
aeq(contxt_len, s_len_)
aeq(input_len, t_len_)
# END CHECKS
return output, attn
def _get_attn_subsequent_mask(self, size):
''' Get an attention mask to avoid using the subsequent info.'''
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
subsequent_mask = torch.from_numpy(subsequent_mask)
return subsequent_mask
class TransformerDecoder(nn.Module):
"""
The Transformer decoder from "Attention is All You Need".
"""
def __init__(self, num_layers, hidden_size, attn_type,
copy_attn, dropout, embeddings):
super(TransformerDecoder, self).__init__()
# Basic attributes.
self.decoder_type = 'transformer'
self.num_layers = num_layers
self.embeddings = embeddings
# Build TransformerDecoder.
self.transformer_layers = nn.ModuleList(
[TransformerDecoderLayer(hidden_size, dropout)
for _ in range(num_layers)])
# TransformerDecoder has its own attention mechanism.
# Set up a separated copy attention layer, if needed.
self._copy = False
if copy_attn:
self.copy_attn = onmt.modules.GlobalAttention(
hidden_size, attn_type=attn_type)
self._copy = True
def forward(self, input, context, state):
"""
Forward through the TransformerDecoder.
Args:
input (LongTensor): a sequence of input tokens tensors
of size (len x batch x nfeats).
context (FloatTensor): output(tensor sequence) from the encoder
of size (src_len x batch x hidden_size).
state (FloatTensor): hidden state from the encoder RNN for
initializing the decoder.
Returns:
outputs (FloatTensor): a Tensor sequence of output from the decoder
of shape (len x batch x hidden_size).
state (FloatTensor): final hidden state from the decoder.
attns (dict of (str, FloatTensor)): a dictionary of different
type of attention Tensor from the decoder
of shape (src_len x batch).
"""
# CHECKS
assert isinstance(state, TransformerDecoderState)
input_len, input_batch, _ = input.size()
contxt_len, contxt_batch, _ = context.size()
aeq(input_batch, contxt_batch)
if state.previous_input is not None:
input = torch.cat([state.previous_input, input], 0)
src = state.src
src_words = src[:, :, 0].transpose(0, 1)
tgt_words = input[:, :, 0].transpose(0, 1)
src_batch, src_len = src_words.size()
tgt_batch, tgt_len = tgt_words.size()
aeq(input_batch, contxt_batch, src_batch, tgt_batch)
aeq(contxt_len, src_len)
# aeq(input_len, tgt_len)
# END CHECKS
# Initialize return variables.
outputs = []
attns = {"std": []}
if self._copy:
attns["copy"] = []
# Run the forward pass of the TransformerDecoder.
emb = self.embeddings(input)
assert emb.dim() == 3 # len x batch x embedding_dim
output = emb.transpose(0, 1).contiguous()
src_context = context.transpose(0, 1).contiguous()
padding_idx = self.embeddings.word_padding_idx
src_pad_mask = src_words.data.eq(padding_idx).unsqueeze(1) \
.expand(src_batch, tgt_len, src_len)
tgt_pad_mask = tgt_words.data.eq(padding_idx).unsqueeze(1) \
.expand(tgt_batch, tgt_len, tgt_len)
for i in range(self.num_layers):
output, attn \
= self.transformer_layers[i](output, src_context,
src_pad_mask, tgt_pad_mask)
# Process the result and update the attentions.
outputs = output.transpose(0, 1).contiguous()
if state.previous_input is not None:
outputs = outputs[state.previous_input.size(0):]
attn = attn[:, state.previous_input.size(0):].squeeze()
attn = torch.stack([attn])
attns["std"] = attn
if self._copy:
attns["copy"] = attn
# Update the state.
state.update_state(input)
return outputs, state, attns
def init_decoder_state(self, src, context, enc_hidden):
return TransformerDecoderState(src)
class TransformerDecoderState(DecoderState):
def __init__(self, src):
"""
Args:
src (FloatTensor): a sequence of source words tensors
with optional feature tensors, of size (len x batch).
"""
self.src = src
self.previous_input = None
@property
def _all(self):
"""
Contains attributes that need to be updated in self.beam_update().
"""
return (self.previous_input, self.src)
def update_state(self, input):
""" Called for every decoder forward pass. """
self.previous_input = input
def repeat_beam_size_times(self, beam_size):
""" Repeat beam_size times along batch dimension. """
self.src = Variable(self.src.data.repeat(1, beam_size, 1),
volatile=True)
| 11,553 | 36.391586 | 79 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/Embeddings.py | import torch
import torch.nn as nn
from torch.autograd import Variable
from onmt.modules import BottleLinear, Elementwise
from onmt.Utils import aeq
class PositionalEncoding(nn.Module):
def __init__(self, dropout, dim, max_len=5000):
pe = torch.arange(0, max_len).unsqueeze(1).expand(max_len, dim)
div_term = 1 / torch.pow(10000, torch.arange(0, dim * 2, 2) / dim)
pe = pe * div_term.expand_as(pe)
pe[:, 0::2] = torch.sin(pe[:, 0::2])
pe[:, 1::2] = torch.cos(pe[:, 1::2])
pe = pe.unsqueeze(1)
super(PositionalEncoding, self).__init__()
self.register_buffer('pe', pe)
self.dropout = nn.Dropout(p=dropout)
def forward(self, emb):
# We must wrap the self.pe in Variable to compute, not the other
# way - unwrap emb(i.e. emb.data). Otherwise the computation
# wouldn't be watched to build the compute graph.
emb = emb + Variable(self.pe[:emb.size(0), :1, :emb.size(2)]
.expand_as(emb), requires_grad=False)
emb = self.dropout(emb)
return emb
class Embeddings(nn.Module):
"""
Words embeddings dictionary for encoder/decoder.
Args:
word_vec_size (int): size of the dictionary of embeddings.
position_encoding (bool): use a sin to mark relative words positions.
feat_merge (string): merge action for the features embeddings:
concat, sum or mlp.
feat_vec_exponent (float): when using '-feat_merge concat', feature
embedding size is N^feat_dim_exponent, where N is the
number of values of feature takes.
feat_vec_size (int): embedding dimension for features when using
'-feat_merge mlp'
dropout (float): dropout probability.
word_padding_idx (int): padding index for words in the embeddings.
feats_padding_idx ([int]): padding index for a list of features
in the embeddings.
word_vocab_size (int): size of dictionary of embeddings for words.
feat_vocab_sizes ([int], optional): list of size of dictionary
of embeddings for each feature.
"""
def __init__(self, word_vec_size, position_encoding, feat_merge,
feat_vec_exponent, feat_vec_size, dropout,
word_padding_idx, feat_padding_idx,
word_vocab_size, feat_vocab_sizes=[]):
self.word_padding_idx = word_padding_idx
# Dimensions and padding for constructing the word embedding matrix
vocab_sizes = [word_vocab_size]
emb_dims = [word_vec_size]
pad_indices = [word_padding_idx]
# Dimensions and padding for feature embedding matrices
# (these have no effect if feat_vocab_sizes is empty)
if feat_merge == 'sum':
feat_dims = [word_vec_size] * len(feat_vocab_sizes)
elif feat_vec_size > 0:
feat_dims = [feat_vec_size] * len(feat_vocab_sizes)
else:
feat_dims = [int(vocab ** feat_vec_exponent)
for vocab in feat_vocab_sizes]
vocab_sizes.extend(feat_vocab_sizes)
emb_dims.extend(feat_dims)
pad_indices.extend(feat_padding_idx)
# The embedding matrix look-up tables. The first look-up table
# is for words. Subsequent ones are for features, if any exist.
emb_params = zip(vocab_sizes, emb_dims, pad_indices)
embeddings = [nn.Embedding(vocab, dim, padding_idx=pad)
for vocab, dim, pad in emb_params]
emb_luts = Elementwise(feat_merge, embeddings)
# The final output size of word + feature vectors. This can vary
# from the word vector size if and only if features are defined.
# This is the attribute you should access if you need to know
# how big your embeddings are going to be.
self.embedding_size = (sum(emb_dims) if feat_merge == 'concat'
else word_vec_size)
# The sequence of operations that converts the input sequence
# into a sequence of embeddings. At minimum this consists of
# looking up the embeddings for each word and feature in the
# input. Model parameters may require the sequence to contain
# additional operations as well.
super(Embeddings, self).__init__()
self.make_embedding = nn.Sequential()
self.make_embedding.add_module('emb_luts', emb_luts)
if feat_merge == 'mlp':
in_dim = sum(emb_dims)
out_dim = word_vec_size
mlp = nn.Sequential(BottleLinear(in_dim, out_dim), nn.ReLU())
self.make_embedding.add_module('mlp', mlp)
if position_encoding:
pe = PositionalEncoding(dropout, self.embedding_size)
self.make_embedding.add_module('pe', pe)
@property
def word_lut(self):
return self.make_embedding[0][0]
@property
def emb_luts(self):
return self.make_embedding[0]
def load_pretrained_vectors(self, emb_file, fixed):
if emb_file:
pretrained = torch.load(emb_file)
self.word_lut.weight.data.copy_(pretrained)
if fixed:
self.word_lut.weight.requires_grad = False
def forward(self, input):
"""
Return the embeddings for words, and features if there are any.
Args:
input (LongTensor): len x batch x nfeat
Return:
emb (FloatTensor): len x batch x self.embedding_size
"""
in_length, in_batch, nfeat = input.size()
aeq(nfeat, len(self.emb_luts))
emb = self.make_embedding(input)
out_length, out_batch, emb_size = emb.size()
aeq(in_length, out_length)
aeq(in_batch, out_batch)
aeq(emb_size, self.embedding_size)
return emb
| 5,928 | 39.609589 | 77 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/CopyGenerator.py | import torch.nn as nn
import torch.nn.functional as F
import torch
import torch.cuda
import onmt
from onmt.Utils import aeq
class CopyGenerator(nn.Module):
"""
Generator module that additionally considers copying
words directly from the source.
"""
def __init__(self, opt, src_dict, tgt_dict):
super(CopyGenerator, self).__init__()
self.linear = nn.Linear(opt.rnn_size, len(tgt_dict))
self.linear_copy = nn.Linear(opt.rnn_size, 1)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
def forward(self, hidden, attn, src_map):
"""
Computes p(w) = p(z=1) p_{copy}(w|z=0) + p(z=0) * p_{softmax}(w|z=0)
"""
# CHECKS
batch_by_tlen, _ = hidden.size()
batch_by_tlen_, slen = attn.size()
slen_, batch, cvocab = src_map.size()
aeq(batch_by_tlen, batch_by_tlen_)
aeq(slen, slen_)
# Original probabilities.
logits = self.linear(hidden)
logits[:, self.tgt_dict.stoi[onmt.IO.PAD_WORD]] = -float('inf')
prob = F.softmax(logits)
# Probability of copying p(z=1) batch.
copy = F.sigmoid(self.linear_copy(hidden))
# Probibility of not copying: p_{word}(w) * (1 - p(z))
out_prob = torch.mul(prob, 1 - copy.expand_as(prob))
mul_attn = torch.mul(attn, copy.expand_as(attn))
copy_prob = torch.bmm(mul_attn.view(-1, batch, slen)
.transpose(0, 1),
src_map.transpose(0, 1)).transpose(0, 1)
copy_prob = copy_prob.contiguous().view(-1, cvocab)
return torch.cat([out_prob, copy_prob], 1)
class CopyGeneratorCriterion(object):
def __init__(self, vocab_size, force_copy, pad, eps=1e-20):
self.force_copy = force_copy
self.eps = eps
self.offset = vocab_size
self.pad = pad
def __call__(self, scores, align, target):
align = align.view(-1)
# Copy prob.
out = scores.gather(1, align.view(-1, 1) + self.offset) \
.view(-1).mul(align.ne(0).float())
tmp = scores.gather(1, target.view(-1, 1)).view(-1)
# Regular prob (no unks and unks that can't be copied)
if not self.force_copy:
out = out + self.eps + tmp.mul(target.ne(0).float()) + \
tmp.mul(align.eq(0).float()).mul(target.eq(0).float())
else:
# Forced copy.
out = out + self.eps + tmp.mul(align.eq(0).float())
# Drop padding.
loss = -out.log().mul(target.ne(self.pad).float()).sum()
return loss
class CopyGeneratorLossCompute(onmt.Loss.LossComputeBase):
"""
Copy Generator Loss Computation.
"""
def __init__(self, generator, tgt_vocab, dataset,
force_copy, eps=1e-20):
super(CopyGeneratorLossCompute, self).__init__(generator, tgt_vocab)
self.dataset = dataset
self.force_copy = force_copy
self.criterion = CopyGeneratorCriterion(len(tgt_vocab), force_copy,
self.padding_idx)
def make_shard_state(self, batch, output, range_, attns):
""" See base class for args description. """
if getattr(batch, "alignment", None) is None:
raise AssertionError("using -copy_attn you need to pass in "
"-dynamic_dict during preprocess stage.")
return {
"output": output,
"target": batch.tgt[range_[0] + 1: range_[1]],
"copy_attn": attns.get("copy"),
"align": batch.alignment[range_[0] + 1: range_[1]]
}
def compute_loss(self, batch, output, target, copy_attn, align):
"""
Compute the loss. The args must match self.make_shard_state().
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
copy_attn: the copy attention value.
align: the align info.
"""
target = target.view(-1)
align = align.view(-1)
scores = self.generator(self.bottle(output),
self.bottle(copy_attn),
batch.src_map)
loss = self.criterion(scores, align, target)
scores_data = scores.data.clone()
scores_data = self.dataset.collapse_copy_scores(
self.unbottle(scores_data, batch.batch_size),
batch, self.tgt_vocab)
scores_data = self.bottle(scores_data)
# Correct target is copy when only option.
# TODO: replace for loop with masking or boolean indexing
target_data = target.data.clone()
for i in range(target_data.size(0)):
if target_data[i] == 0 and align.data[i] != 0:
target_data[i] = align.data[i] + len(self.tgt_vocab)
# Coverage loss term.
loss_data = loss.data.clone()
stats = self.stats(loss_data, scores_data, target_data)
return loss, stats
| 5,090 | 34.852113 | 78 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/StackedRNN.py | import torch
import torch.nn as nn
class StackedLSTM(nn.Module):
"""
Our own implementation of stacked LSTM.
Needed for the decoder, because we do input feeding.
"""
def __init__(self, num_layers, input_size, rnn_size, dropout):
super(StackedLSTM, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.LSTMCell(input_size, rnn_size))
input_size = rnn_size
def forward(self, input, hidden):
h_0, c_0 = hidden
h_1, c_1 = [], []
for i, layer in enumerate(self.layers):
h_1_i, c_1_i = layer(input, (h_0[i], c_0[i]))
input = h_1_i
if i + 1 != self.num_layers:
input = self.dropout(input)
h_1 += [h_1_i]
c_1 += [c_1_i]
h_1 = torch.stack(h_1)
c_1 = torch.stack(c_1)
return input, (h_1, c_1)
class StackedGRU(nn.Module):
def __init__(self, num_layers, input_size, rnn_size, dropout):
super(StackedGRU, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.GRUCell(input_size, rnn_size))
input_size = rnn_size
def forward(self, input, hidden):
h_1 = []
for i, layer in enumerate(self.layers):
h_1_i = layer(input, hidden[0][i])
input = h_1_i
if i + 1 != self.num_layers:
input = self.dropout(input)
h_1 += [h_1_i]
h_1 = torch.stack(h_1)
return input, (h_1,)
| 1,755 | 28.266667 | 66 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/MultiHeadedAttn.py | import math
import torch
import torch.nn as nn
from torch.autograd import Variable
from onmt.Utils import aeq
from onmt.modules.UtilClass import BottleLinear, \
BottleLayerNorm, BottleSoftmax
class MultiHeadedAttention(nn.Module):
''' Multi-Head Attention module from
"Attention is All You Need".
'''
def __init__(self, head_count, model_dim, p=0.1):
"""
Args:
head_count(int): number of parallel heads.
model_dim(int): the dimension of keys/values/queries in this
MultiHeadedAttention, must be divisible by head_count.
"""
assert model_dim % head_count == 0
self.dim_per_head = model_dim // head_count
self.model_dim = model_dim
super(MultiHeadedAttention, self).__init__()
self.head_count = head_count
self.linear_keys = BottleLinear(model_dim,
head_count * self.dim_per_head,
bias=False)
self.linear_values = BottleLinear(model_dim,
head_count * self.dim_per_head,
bias=False)
self.linear_query = BottleLinear(model_dim,
head_count * self.dim_per_head,
bias=False)
self.sm = BottleSoftmax()
self.activation = nn.ReLU()
self.layer_norm = BottleLayerNorm(model_dim)
self.dropout = nn.Dropout(p)
self.res_dropout = nn.Dropout(p)
def forward(self, key, value, query, mask=None):
# CHECKS
batch, k_len, d = key.size()
batch_, k_len_, d_ = value.size()
aeq(batch, batch_)
aeq(k_len, k_len_)
aeq(d, d_)
batch_, q_len, d_ = query.size()
aeq(batch, batch_)
aeq(d, d_)
aeq(self.model_dim % 8, 0)
if mask is not None:
batch_, q_len_, k_len_ = mask.size()
aeq(batch_, batch)
aeq(k_len_, k_len)
aeq(q_len_ == q_len)
# END CHECKS
def shape_projection(x):
b, l, d = x.size()
return x.view(b, l, self.head_count, self.dim_per_head) \
.transpose(1, 2).contiguous() \
.view(b * self.head_count, l, self.dim_per_head)
def unshape_projection(x, q):
b, l, d = q.size()
return x.view(b, self.head_count, l, self.dim_per_head) \
.transpose(1, 2).contiguous() \
.view(b, l, self.head_count * self.dim_per_head)
residual = query
key_up = shape_projection(self.linear_keys(key))
value_up = shape_projection(self.linear_values(value))
query_up = shape_projection(self.linear_query(query))
scaled = torch.bmm(query_up, key_up.transpose(1, 2))
scaled = scaled / math.sqrt(self.dim_per_head)
bh, l, dim_per_head = scaled.size()
b = bh // self.head_count
if mask is not None:
scaled = scaled.view(b, self.head_count, l, dim_per_head)
mask = mask.unsqueeze(1).expand_as(scaled)
scaled = scaled.masked_fill(Variable(mask), -float('inf')) \
.view(bh, l, dim_per_head)
attn = self.sm(scaled)
# Return one attn
top_attn = attn \
.view(b, self.head_count, l, dim_per_head)[:, 0, :, :] \
.contiguous()
drop_attn = self.dropout(self.sm(scaled))
# values : (batch * 8) x qlen x dim
out = unshape_projection(torch.bmm(drop_attn, value_up), residual)
# Residual and layer norm
res = self.res_dropout(out) + residual
ret = self.layer_norm(res)
# CHECK
batch_, q_len_, d_ = ret.size()
aeq(q_len, q_len_)
aeq(batch, batch_)
aeq(d, d_)
# END CHECK
return ret, top_attn
| 3,966 | 34.738739 | 74 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/Gate.py | """
Context gate is a decoder module that takes as input the previous word
embedding, the current decoder state and the attention state, and produces a
gate.
The gate can be used to select the input from the target side context
(decoder state), from the source context (attention state) or both.
"""
import torch
import torch.nn as nn
def ContextGateFactory(type, embeddings_size, decoder_size,
attention_size, output_size):
"""Returns the correct ContextGate class"""
gate_types = {'source': SourceContextGate,
'target': TargetContextGate,
'both': BothContextGate}
assert type in gate_types, "Not valid ContextGate type: {0}".format(type)
return gate_types[type](embeddings_size, decoder_size, attention_size,
output_size)
class ContextGate(nn.Module):
"""Implement up to the computation of the gate"""
def __init__(self, embeddings_size, decoder_size,
attention_size, output_size):
super(ContextGate, self).__init__()
input_size = embeddings_size + decoder_size + attention_size
self.gate = nn.Linear(input_size, output_size, bias=True)
self.sig = nn.Sigmoid()
self.source_proj = nn.Linear(attention_size, output_size)
self.target_proj = nn.Linear(embeddings_size + decoder_size,
output_size)
def forward(self, prev_emb, dec_state, attn_state):
input_tensor = torch.cat((prev_emb, dec_state, attn_state), dim=1)
z = self.sig(self.gate(input_tensor))
proj_source = self.source_proj(attn_state)
proj_target = self.target_proj(
torch.cat((prev_emb, dec_state), dim=1))
return z, proj_source, proj_target
class SourceContextGate(nn.Module):
"""Apply the context gate only to the source context"""
def __init__(self, embeddings_size, decoder_size,
attention_size, output_size):
super(SourceContextGate, self).__init__()
self.context_gate = ContextGate(embeddings_size, decoder_size,
attention_size, output_size)
self.tanh = nn.Tanh()
def forward(self, prev_emb, dec_state, attn_state):
z, source, target = self.context_gate(
prev_emb, dec_state, attn_state)
return self.tanh(target + z * source)
class TargetContextGate(nn.Module):
"""Apply the context gate only to the target context"""
def __init__(self, embeddings_size, decoder_size,
attention_size, output_size):
super(TargetContextGate, self).__init__()
self.context_gate = ContextGate(embeddings_size, decoder_size,
attention_size, output_size)
self.tanh = nn.Tanh()
def forward(self, prev_emb, dec_state, attn_state):
z, source, target = self.context_gate(prev_emb, dec_state, attn_state)
return self.tanh(z * target + source)
class BothContextGate(nn.Module):
"""Apply the context gate to both contexts"""
def __init__(self, embeddings_size, decoder_size,
attention_size, output_size):
super(BothContextGate, self).__init__()
self.context_gate = ContextGate(embeddings_size, decoder_size,
attention_size, output_size)
self.tanh = nn.Tanh()
def forward(self, prev_emb, dec_state, attn_state):
z, source, target = self.context_gate(prev_emb, dec_state, attn_state)
return self.tanh((1. - z) * target + z * source)
| 3,596 | 38.527473 | 78 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/UtilClass.py | import torch
import torch.nn as nn
class Bottle(nn.Module):
def forward(self, input):
if len(input.size()) <= 2:
return super(Bottle, self).forward(input)
size = input.size()[:2]
out = super(Bottle, self).forward(input.view(size[0]*size[1], -1))
return out.contiguous().view(size[0], size[1], -1)
class Bottle2(nn.Module):
def forward(self, input):
if len(input.size()) <= 3:
return super(Bottle2, self).forward(input)
size = input.size()
out = super(Bottle2, self).forward(input.view(size[0]*size[1],
size[2], size[3]))
return out.contiguous().view(size[0], size[1], size[2], size[3])
class LayerNorm(nn.Module):
''' Layer normalization module '''
def __init__(self, d_hid, eps=1e-3):
super(LayerNorm, self).__init__()
self.eps = eps
self.a_2 = nn.Parameter(torch.ones(d_hid), requires_grad=True)
self.b_2 = nn.Parameter(torch.zeros(d_hid), requires_grad=True)
def forward(self, z):
if z.size(1) == 1:
return z
mu = torch.mean(z, dim=1)
sigma = torch.std(z, dim=1)
# HACK. PyTorch is changing behavior
if mu.dim() == 1:
mu = mu.unsqueeze(1)
sigma = sigma.unsqueeze(1)
ln_out = (z - mu.expand_as(z)) / (sigma.expand_as(z) + self.eps)
ln_out = ln_out.mul(self.a_2.expand_as(ln_out)) \
+ self.b_2.expand_as(ln_out)
return ln_out
class BottleLinear(Bottle, nn.Linear):
pass
class BottleLayerNorm(Bottle, LayerNorm):
pass
class BottleSoftmax(Bottle, nn.Softmax):
pass
class Elementwise(nn.ModuleList):
"""
A simple network container.
Parameters are a list of modules.
Inputs are a 3d Variable whose last dimension is the same length
as the list.
Outputs are the result of applying modules to inputs elementwise.
An optional merge parameter allows the outputs to be reduced to a
single Variable.
"""
def __init__(self, merge=None, *args):
assert merge in [None, 'first', 'concat', 'sum', 'mlp']
self.merge = merge
super(Elementwise, self).__init__(*args)
def forward(self, input):
inputs = [feat.squeeze(2) for feat in input.split(1, dim=2)]
assert len(self) == len(inputs)
outputs = [f(x) for f, x in zip(self, inputs)]
if self.merge == 'first':
return outputs[0]
elif self.merge == 'concat' or self.merge == 'mlp':
return torch.cat(outputs, 2)
elif self.merge == 'sum':
return sum(outputs)
else:
return outputs
| 2,769 | 30.123596 | 78 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/StructuredAttention.py | import torch.nn as nn
import torch
import torch.cuda
from torch.autograd import Variable
class MatrixTree(nn.Module):
"""Implementation of the matrix-tree theorem for computing marginals
of non-projective dependency parsing. This attention layer is used
in the paper "Learning Structured Text Representations."
"""
def __init__(self, eps=1e-5):
self.eps = eps
super(MatrixTree, self).__init__()
def forward(self, input):
laplacian = input.exp() + self.eps
output = input.clone()
for b in range(input.size(0)):
lap = laplacian[b].masked_fill(
Variable(torch.eye(input.size(1)).cuda().ne(0)), 0)
lap = -lap + torch.diag(lap.sum(0))
# store roots on diagonal
lap[0] = input[b].diag().exp()
inv_laplacian = lap.inverse()
factor = inv_laplacian.diag().unsqueeze(1)\
.expand_as(input[b]).transpose(0, 1)
term1 = input[b].exp().mul(factor).clone()
term2 = input[b].exp().mul(inv_laplacian.transpose(0, 1)).clone()
term1[:, 0] = 0
term2[0] = 0
output[b] = term1 - term2
roots_output = input[b].diag().exp().mul(
inv_laplacian.transpose(0, 1)[0])
output[b] = output[b] + torch.diag(roots_output)
return output
if __name__ == "__main__":
dtree = MatrixTree()
q = torch.rand(1, 5, 5).cuda()
marg = dtree.forward(Variable(q))
print(marg.sum(1))
| 1,556 | 33.6 | 77 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/Conv2Conv.py | """
Implementation of "Convolutional Sequence to Sequence Learning"
"""
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.autograd import Variable
import onmt.modules
from onmt.modules.WeightNorm import WeightNormConv2d
from onmt.Models import EncoderBase
from onmt.Models import DecoderState
from onmt.Utils import aeq
SCALE_WEIGHT = 0.5 ** 0.5
def shape_transform(x):
""" Tranform the size of the tensors to fit for conv input. """
return torch.unsqueeze(torch.transpose(x, 1, 2), 3)
class GatedConv(nn.Module):
def __init__(self, input_size, width=3, dropout=0.2, nopad=False):
super(GatedConv, self).__init__()
self.conv = WeightNormConv2d(input_size, 2 * input_size,
kernel_size=(width, 1), stride=(1, 1),
padding=(width // 2 * (1 - nopad), 0))
init.xavier_uniform(self.conv.weight, gain=(4 * (1 - dropout))**0.5)
self.dropout = nn.Dropout(dropout)
def forward(self, x_var, hidden=None):
x_var = self.dropout(x_var)
x_var = self.conv(x_var)
out, gate = x_var.split(int(x_var.size(1) / 2), 1)
out = out * F.sigmoid(gate)
return out
class StackedCNN(nn.Module):
def __init__(self, num_layers, input_size, cnn_kernel_width=3,
dropout=0.2):
super(StackedCNN, self).__init__()
self.dropout = dropout
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(
GatedConv(input_size, cnn_kernel_width, dropout))
def forward(self, x, hidden=None):
for conv in self.layers:
x = x + conv(x)
x *= SCALE_WEIGHT
return x
class CNNEncoder(EncoderBase):
"""
Encoder built on CNN.
"""
def __init__(self, num_layers, hidden_size,
cnn_kernel_width, dropout, embeddings):
super(CNNEncoder, self).__init__()
self.embeddings = embeddings
input_size = embeddings.embedding_size
self.linear = nn.Linear(input_size, hidden_size)
self.cnn = StackedCNN(num_layers, hidden_size,
cnn_kernel_width, dropout)
def forward(self, input, lengths=None, hidden=None):
""" See EncoderBase.forward() for description of args and returns."""
self._check_args(input, lengths, hidden)
emb = self.embeddings(input)
s_len, batch, emb_dim = emb.size()
emb = emb.transpose(0, 1).contiguous()
emb_reshape = emb.view(emb.size(0) * emb.size(1), -1)
emb_remap = self.linear(emb_reshape)
emb_remap = emb_remap.view(emb.size(0), emb.size(1), -1)
emb_remap = shape_transform(emb_remap)
out = self.cnn(emb_remap)
return emb_remap.squeeze(3).transpose(0, 1).contiguous(),\
out.squeeze(3).transpose(0, 1).contiguous()
class CNNDecoder(nn.Module):
"""
Decoder built on CNN, which consists of resduial convolutional layers,
with ConvMultiStepAttention.
"""
def __init__(self, num_layers, hidden_size, attn_type,
copy_attn, cnn_kernel_width, dropout, embeddings):
super(CNNDecoder, self).__init__()
# Basic attributes.
self.decoder_type = 'cnn'
self.num_layers = num_layers
self.hidden_size = hidden_size
self.cnn_kernel_width = cnn_kernel_width
self.embeddings = embeddings
self.dropout = dropout
# Build the CNN.
input_size = self.embeddings.embedding_size
self.linear = nn.Linear(input_size, self.hidden_size)
self.conv_layers = nn.ModuleList()
for i in range(self.num_layers):
self.conv_layers.append(
GatedConv(self.hidden_size, self.cnn_kernel_width,
self.dropout, True))
self.attn_layers = nn.ModuleList()
for i in range(self.num_layers):
self.attn_layers.append(
onmt.modules.ConvMultiStepAttention(self.hidden_size))
# CNNDecoder has its own attention mechanism.
# Set up a separated copy attention layer, if needed.
self._copy = False
if copy_attn:
self.copy_attn = onmt.modules.GlobalAttention(
hidden_size, attn_type=attn_type)
self._copy = True
def forward(self, input, context, state):
"""
Forward through the CNNDecoder.
Args:
input (LongTensor): a sequence of input tokens tensors
of size (len x batch x nfeats).
context (FloatTensor): output(tensor sequence) from the encoder
CNN of size (src_len x batch x hidden_size).
state (FloatTensor): hidden state from the encoder CNN for
initializing the decoder.
Returns:
outputs (FloatTensor): a Tensor sequence of output from the decoder
of shape (len x batch x hidden_size).
state (FloatTensor): final hidden state from the decoder.
attns (dict of (str, FloatTensor)): a dictionary of different
type of attention Tensor from the decoder
of shape (src_len x batch).
"""
# CHECKS
assert isinstance(state, CNNDecoderState)
input_len, input_batch, _ = input.size()
contxt_len, contxt_batch, _ = context.size()
aeq(input_batch, contxt_batch)
# END CHECKS
if state.previous_input is not None:
input = torch.cat([state.previous_input, input], 0)
# Initialize return variables.
outputs = []
attns = {"std": []}
assert not self._copy, "Copy mechanism not yet tested in conv2conv"
if self._copy:
attns["copy"] = []
emb = self.embeddings(input)
assert emb.dim() == 3 # len x batch x embedding_dim
tgt_emb = emb.transpose(0, 1).contiguous()
# The output of CNNEncoder.
src_context_t = context.transpose(0, 1).contiguous()
# The combination of output of CNNEncoder and source embeddings.
src_context_c = state.init_src.transpose(0, 1).contiguous()
# Run the forward pass of the CNNDecoder.
emb_reshape = tgt_emb.contiguous().view(
tgt_emb.size(0) * tgt_emb.size(1), -1)
linear_out = self.linear(emb_reshape)
x = linear_out.view(tgt_emb.size(0), tgt_emb.size(1), -1)
x = shape_transform(x)
pad = Variable(torch.zeros(x.size(0), x.size(1),
self.cnn_kernel_width - 1, 1))
pad = pad.type_as(x)
base_target_emb = x
for conv, attention in zip(self.conv_layers, self.attn_layers):
new_target_input = torch.cat([pad, x], 2)
out = conv(new_target_input)
c, attn = attention(base_target_emb, out,
src_context_t, src_context_c)
x = (x + (c + out) * SCALE_WEIGHT) * SCALE_WEIGHT
output = x.squeeze(3).transpose(1, 2)
# Process the result and update the attentions.
outputs = output.transpose(0, 1).contiguous()
if state.previous_input is not None:
outputs = outputs[state.previous_input.size(0):]
attn = attn[:, state.previous_input.size(0):].squeeze()
attn = torch.stack([attn])
attns["std"] = attn
if self._copy:
attns["copy"] = attn
# Update the state.
state.update_state(input)
return outputs, state, attns
def init_decoder_state(self, src, context, enc_hidden):
return CNNDecoderState(context, enc_hidden)
class CNNDecoderState(DecoderState):
def __init__(self, context, enc_hidden):
self.init_src = (context + enc_hidden) * SCALE_WEIGHT
self.previous_input = None
@property
def _all(self):
"""
Contains attributes that need to be updated in self.beam_update().
"""
return (self.previous_input,)
def update_state(self, input):
""" Called for every decoder forward pass. """
self.previous_input = input
def repeat_beam_size_times(self, beam_size):
""" Repeat beam_size times along batch dimension. """
self.init_src = Variable(
self.init_src.data.repeat(1, beam_size, 1), volatile=True)
| 8,557 | 35.57265 | 79 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/GlobalAttention.py | import torch
import torch.nn as nn
from onmt.modules.UtilClass import BottleLinear
from onmt.Utils import aeq
class GlobalAttention(nn.Module):
"""
Luong Attention.
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
H_1 H_2 H_3 ... H_n
q q q q
| | | |
\ | | /
.....
\ | /
a
Constructs a unit mapping.
$$(H_1 + H_n, q) => (a)$$
Where H is of `batch x n x dim` and q is of `batch x dim`.
Luong Attention (dot, general):
The full function is
$$\tanh(W_2 [(softmax((W_1 q + b_1) H) H), q] + b_2)$$.
* dot: $$score(h_t,{\overline{h}}_s) = h_t^T{\overline{h}}_s$$
* general: $$score(h_t,{\overline{h}}_s) = h_t^T W_a {\overline{h}}_s$$
Bahdanau Attention (mlp):
$$c = \sum_{j=1}^{SeqLength}\a_jh_j$$.
The Alignment-function $$a$$ computes an alignment as:
$$a_j = softmax(v_a^T \tanh(W_a q + U_a h_j) )$$.
"""
def __init__(self, dim, coverage=False, attn_type="dot"):
super(GlobalAttention, self).__init__()
self.dim = dim
self.attn_type = attn_type
assert (self.attn_type in ["dot", "general", "mlp"]), (
"Please select a valid attention type.")
if self.attn_type == "general":
self.linear_in = nn.Linear(dim, dim, bias=False)
elif self.attn_type == "mlp":
self.linear_context = BottleLinear(dim, dim, bias=False)
self.linear_query = nn.Linear(dim, dim, bias=True)
self.v = BottleLinear(dim, 1, bias=False)
# mlp wants it with bias
out_bias = self.attn_type == "mlp"
self.linear_out = nn.Linear(dim*2, dim, bias=out_bias)
self.sm = nn.Softmax()
self.tanh = nn.Tanh()
self.mask = None
if coverage:
self.linear_cover = nn.Linear(1, dim, bias=False)
def applyMask(self, mask):
self.mask = mask
def score(self, h_t, h_s):
"""
h_t (FloatTensor): batch x tgt_len x dim
h_s (FloatTensor): batch x src_len x dim
returns scores (FloatTensor): batch x tgt_len x src_len:
raw attention scores for each src index
"""
# Check input sizes
src_batch, src_len, src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, tgt_dim)
aeq(self.dim, src_dim)
if self.attn_type in ["general", "dot"]:
if self.attn_type == "general":
h_t_ = h_t.view(tgt_batch*tgt_len, tgt_dim)
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = h_s.transpose(1, 2)
# (batch, t_len, d) x (batch, d, s_len) --> (batch, t_len, s_len)
return torch.bmm(h_t, h_s_)
else:
dim = self.dim
wq = self.linear_query(h_t.view(-1, dim))
wq = wq.view(tgt_batch, tgt_len, 1, dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, dim)
uh = self.linear_context(h_s.contiguous().view(-1, dim))
uh = uh.view(src_batch, 1, src_len, dim)
uh = uh.expand(src_batch, tgt_len, src_len, dim)
# (batch, t_len, s_len, d)
wquh = self.tanh(wq + uh)
return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)
def forward(self, input, context, coverage=None):
"""
input (FloatTensor): batch x tgt_len x dim: decoder's rnn's output.
context (FloatTensor): batch x src_len x dim: src hidden states
coverage (FloatTensor): None (not supported yet)
"""
# one step input
if input.dim() == 2:
one_step = True
input = input.unsqueeze(1)
else:
one_step = False
batch, sourceL, dim = context.size()
batch_, targetL, dim_ = input.size()
aeq(batch, batch_)
aeq(dim, dim_)
aeq(self.dim, dim)
if coverage is not None:
batch_, sourceL_ = coverage.size()
aeq(batch, batch_)
aeq(sourceL, sourceL_)
if self.mask is not None:
beam_, batch_, sourceL_ = self.mask.size()
aeq(batch, batch_*beam_)
aeq(sourceL, sourceL_)
if coverage is not None:
cover = coverage.view(-1).unsqueeze(1)
context += self.linear_cover(cover).view_as(context)
context = self.tanh(context)
# compute attention scores, as in Luong et al.
align = self.score(input, context)
if self.mask is not None:
mask_ = self.mask.view(batch, 1, sourceL) # make it broardcastable
align.data.masked_fill_(mask_, -float('inf'))
# Softmax to normalize attention weights
align_vectors = self.sm(align.view(batch*targetL, sourceL))
align_vectors = align_vectors.view(batch, targetL, sourceL)
# each context vector c_t is the weighted average
# over all the source hidden states
c = torch.bmm(align_vectors, context)
# concatenate
concat_c = torch.cat([c, input], 2).view(batch*targetL, dim*2)
attn_h = self.linear_out(concat_c).view(batch, targetL, dim)
if self.attn_type in ["general", "dot"]:
attn_h = self.tanh(attn_h)
if one_step:
attn_h = attn_h.squeeze(1)
align_vectors = align_vectors.squeeze(1)
# Check output sizes
batch_, dim_ = attn_h.size()
aeq(batch, batch_)
aeq(dim, dim_)
batch_, sourceL_ = align_vectors.size()
aeq(batch, batch_)
aeq(sourceL, sourceL_)
else:
attn_h = attn_h.transpose(0, 1).contiguous()
align_vectors = align_vectors.transpose(0, 1).contiguous()
# Check output sizes
targetL_, batch_, dim_ = attn_h.size()
aeq(targetL, targetL_)
aeq(batch, batch_)
aeq(dim, dim_)
targetL_, batch_, sourceL_ = align_vectors.size()
aeq(targetL, targetL_)
aeq(batch, batch_)
aeq(sourceL, sourceL_)
return attn_h, align_vectors
| 6,419 | 32.968254 | 79 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/SRU.py | """
Implementation of "Training RNNs as Fast as CNNs".
TODO: turn to pytorch's implementation when it is available.
This implementation is adpoted from the author of the paper:
https://github.com/taolei87/sru/blob/master/cuda_functional.py.
"""
import subprocess
import platform
import os
import re
import argparse
import torch
import torch.nn as nn
from torch.autograd import Function, Variable
from collections import namedtuple
# For command-line option parsing
class CheckSRU(argparse.Action):
def __init__(self, option_strings, dest, **kwargs):
super(CheckSRU, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
if values == 'SRU':
check_sru_requirement(abort=True)
# Check pass, set the args.
setattr(namespace, self.dest, values)
# This SRU version implements its own cuda-level optimization,
# so it requires that:
# 1. `cupy` and `pynvrtc` python package installed.
# 2. pytorch is built with cuda support.
# 3. library path set: export LD_LIBRARY_PATH=<cuda lib path>.
def check_sru_requirement(abort=False):
"""
Return True if check pass; if check fails and abort is True,
raise an Exception, othereise return False.
"""
# Check 1.
try:
if platform.system() == 'Windows':
subprocess.check_output('pip freeze | findstr cupy', shell=True)
subprocess.check_output('pip freeze | findstr pynvrtc',
shell=True)
else: # Unix-like systems
subprocess.check_output('pip freeze | grep -w cupy', shell=True)
subprocess.check_output('pip freeze | grep -w pynvrtc',
shell=True)
except subprocess.CalledProcessError:
if not abort:
return False
raise AssertionError("Using SRU requires 'cupy' and 'pynvrtc' "
"python packages installed.")
# Check 2.
if torch.cuda.is_available() is False:
if not abort:
return False
raise AssertionError("Using SRU requires pytorch built with cuda.")
# Check 3.
pattern = re.compile(".*cuda/lib.*")
ld_path = os.getenv('LD_LIBRARY_PATH', "")
if re.match(pattern, ld_path) is None:
if not abort:
return False
raise AssertionError("Using SRU requires setting cuda lib path, e.g. "
"export LD_LIBRARY_PATH=/usr/local/cuda/lib64.")
return True
SRU_CODE = """
extern "C" {
__forceinline__ __device__ float sigmoidf(float x)
{
return 1.f / (1.f + expf(-x));
}
__forceinline__ __device__ float reluf(float x)
{
return (x > 0.f) ? x : 0.f;
}
__global__ void sru_fwd(const float * __restrict__ u,
const float * __restrict__ x,
const float * __restrict__ bias,
const float * __restrict__ init,
const float * __restrict__ mask_h,
const int len, const int batch,
const int d, const int k,
float * __restrict__ h,
float * __restrict__ c,
const int activation_type)
{
assert ((k == 3) || (x == NULL));
int ncols = batch*d;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float bias1 = *(bias + (col%d));
const float bias2 = *(bias + (col%d) + d);
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float cur = *(init + col);
const float *up = u + (col*k);
const float *xp = (k == 3) ? (x + col) : (up + 3);
float *cp = c + col;
float *hp = h + col;
for (int row = 0; row < len; ++row)
{
float g1 = sigmoidf((*(up+1))+bias1);
float g2 = sigmoidf((*(up+2))+bias2);
cur = (cur-(*up))*g1 + (*up);
*cp = cur;
float val = (activation_type == 1) ? tanh(cur) : (
(activation_type == 2) ? reluf(cur) : cur
);
*hp = (val*mask-(*xp))*g2 + (*xp);
up += ncols_u;
xp += ncols_x;
cp += ncols;
hp += ncols;
}
}
__global__ void sru_bwd(const float * __restrict__ u,
const float * __restrict__ x,
const float * __restrict__ bias,
const float * __restrict__ init,
const float * __restrict__ mask_h,
const float * __restrict__ c,
const float * __restrict__ grad_h,
const float * __restrict__ grad_last,
const int len,
const int batch, const int d, const int k,
float * __restrict__ grad_u,
float * __restrict__ grad_x,
float * __restrict__ grad_bias,
float * __restrict__ grad_init,
int activation_type)
{
assert((k == 3) || (x == NULL));
assert((k == 3) || (grad_x == NULL));
int ncols = batch*d;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float bias1 = *(bias + (col%d));
const float bias2 = *(bias + (col%d) + d);
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float gbias1 = 0;
float gbias2 = 0;
float cur = *(grad_last + col);
const float *up = u + (col*k) + (len-1)*ncols_u;
const float *xp = (k == 3) ? (x + col + (len-1)*ncols) : (up + 3);
const float *cp = c + col + (len-1)*ncols;
const float *ghp = grad_h + col + (len-1)*ncols;
float *gup = grad_u + (col*k) + (len-1)*ncols_u;
float *gxp = (k == 3) ? (grad_x + col + (len-1)*ncols) : (gup + 3);
for (int row = len-1; row >= 0; --row)
{
const float g1 = sigmoidf((*(up+1))+bias1);
const float g2 = sigmoidf((*(up+2))+bias2);
const float c_val = (activation_type == 1) ? tanh(*cp) : (
(activation_type == 2) ? reluf(*cp) : (*cp)
);
const float x_val = *xp;
const float u_val = *up;
const float prev_c_val = (row>0) ? (*(cp-ncols)) : (*(init+col));
const float gh_val = *ghp;
// h = c*g2 + x*(1-g2) = (c-x)*g2 + x
// c = c'*g1 + g0*(1-g1) = (c'-g0)*g1 + g0
// grad wrt x
*gxp = gh_val*(1-g2);
// grad wrt g2, u2 and bias2
float gg2 = gh_val*(c_val*mask-x_val)*(g2*(1-g2));
*(gup+2) = gg2;
gbias2 += gg2;
// grad wrt c
const float tmp = (activation_type == 1) ? (g2*(1-c_val*c_val)) : (
((activation_type == 0) || (c_val > 0)) ? g2 : 0.f
);
const float gc = gh_val*mask*tmp + cur;
// grad wrt u0
*gup = gc*(1-g1);
// grad wrt g1, u1, and bias1
float gg1 = gc*(prev_c_val-u_val)*(g1*(1-g1));
*(gup+1) = gg1;
gbias1 += gg1;
// grad wrt c'
cur = gc*g1;
up -= ncols_u;
xp -= ncols_x;
cp -= ncols;
gup -= ncols_u;
gxp -= ncols_x;
ghp -= ncols;
}
*(grad_bias + col) = gbias1;
*(grad_bias + col + ncols) = gbias2;
*(grad_init +col) = cur;
}
__global__ void sru_bi_fwd(const float * __restrict__ u,
const float * __restrict__ x,
const float * __restrict__ bias,
const float * __restrict__ init,
const float * __restrict__ mask_h,
const int len, const int batch,
const int d, const int k,
float * __restrict__ h,
float * __restrict__ c,
const int activation_type)
{
assert ((k == 3) || (x == NULL));
assert ((k == 3) || (k == 4));
int ncols = batch*d*2;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float cur = *(init + col);
const int d2 = d*2;
const bool flip = (col%d2) >= d;
const float bias1 = *(bias + (col%d2));
const float bias2 = *(bias + (col%d2) + d2);
const float *up = u + (col*k);
const float *xp = (k == 3) ? (x + col) : (up + 3);
float *cp = c + col;
float *hp = h + col;
if (flip) {
up += (len-1)*ncols_u;
xp += (len-1)*ncols_x;
cp += (len-1)*ncols;
hp += (len-1)*ncols;
}
int ncols_u_ = flip ? -ncols_u : ncols_u;
int ncols_x_ = flip ? -ncols_x : ncols_x;
int ncols_ = flip ? -ncols : ncols;
for (int cnt = 0; cnt < len; ++cnt)
{
float g1 = sigmoidf((*(up+1))+bias1);
float g2 = sigmoidf((*(up+2))+bias2);
cur = (cur-(*up))*g1 + (*up);
*cp = cur;
float val = (activation_type == 1) ? tanh(cur) : (
(activation_type == 2) ? reluf(cur) : cur
);
*hp = (val*mask-(*xp))*g2 + (*xp);
up += ncols_u_;
xp += ncols_x_;
cp += ncols_;
hp += ncols_;
}
}
__global__ void sru_bi_bwd(const float * __restrict__ u,
const float * __restrict__ x,
const float * __restrict__ bias,
const float * __restrict__ init,
const float * __restrict__ mask_h,
const float * __restrict__ c,
const float * __restrict__ grad_h,
const float * __restrict__ grad_last,
const int len, const int batch,
const int d, const int k,
float * __restrict__ grad_u,
float * __restrict__ grad_x,
float * __restrict__ grad_bias,
float * __restrict__ grad_init,
int activation_type)
{
assert((k == 3) || (x == NULL));
assert((k == 3) || (grad_x == NULL));
assert((k == 3) || (k == 4));
int ncols = batch*d*2;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float gbias1 = 0;
float gbias2 = 0;
float cur = *(grad_last + col);
const int d2 = d*2;
const bool flip = ((col%d2) >= d);
const float bias1 = *(bias + (col%d2));
const float bias2 = *(bias + (col%d2) + d2);
const float *up = u + (col*k);
const float *xp = (k == 3) ? (x + col) : (up + 3);
const float *cp = c + col;
const float *ghp = grad_h + col;
float *gup = grad_u + (col*k);
float *gxp = (k == 3) ? (grad_x + col) : (gup + 3);
if (!flip) {
up += (len-1)*ncols_u;
xp += (len-1)*ncols_x;
cp += (len-1)*ncols;
ghp += (len-1)*ncols;
gup += (len-1)*ncols_u;
gxp += (len-1)*ncols_x;
}
int ncols_u_ = flip ? -ncols_u : ncols_u;
int ncols_x_ = flip ? -ncols_x : ncols_x;
int ncols_ = flip ? -ncols : ncols;
for (int cnt = 0; cnt < len; ++cnt)
{
const float g1 = sigmoidf((*(up+1))+bias1);
const float g2 = sigmoidf((*(up+2))+bias2);
const float c_val = (activation_type == 1) ? tanh(*cp) : (
(activation_type == 2) ? reluf(*cp) : (*cp)
);
const float x_val = *xp;
const float u_val = *up;
const float prev_c_val = (cnt<len-1)?(*(cp-ncols_)):(*(init+col));
const float gh_val = *ghp;
// h = c*g2 + x*(1-g2) = (c-x)*g2 + x
// c = c'*g1 + g0*(1-g1) = (c'-g0)*g1 + g0
// grad wrt x
*gxp = gh_val*(1-g2);
// grad wrt g2, u2 and bias2
float gg2 = gh_val*(c_val*mask-x_val)*(g2*(1-g2));
*(gup+2) = gg2;
gbias2 += gg2;
// grad wrt c
const float tmp = (activation_type == 1) ? (g2*(1-c_val*c_val)) : (
((activation_type == 0) || (c_val > 0)) ? g2 : 0.f
);
const float gc = gh_val*mask*tmp + cur;
// grad wrt u0
*gup = gc*(1-g1);
// grad wrt g1, u1, and bias1
float gg1 = gc*(prev_c_val-u_val)*(g1*(1-g1));
*(gup+1) = gg1;
gbias1 += gg1;
// grad wrt c'
cur = gc*g1;
up -= ncols_u_;
xp -= ncols_x_;
cp -= ncols_;
gup -= ncols_u_;
gxp -= ncols_x_;
ghp -= ncols_;
}
*(grad_bias + col) = gbias1;
*(grad_bias + col + ncols) = gbias2;
*(grad_init +col) = cur;
}
}
"""
if check_sru_requirement():
from cupy.cuda import function
from pynvrtc.compiler import Program
# This cuda() is important, it sets up device to use.
tmp_ = torch.rand(1, 1).cuda()
sru_prog = Program(SRU_CODE.encode('utf-8'),
'sru_prog.cu'.encode('utf-8'))
sru_ptx = sru_prog.compile()
sru_mod = function.Module()
sru_mod.load(bytes(sru_ptx.encode()))
SRU_FWD_FUNC = sru_mod.get_function('sru_fwd')
SRU_BWD_FUNC = sru_mod.get_function('sru_bwd')
SRU_BiFWD_FUNC = sru_mod.get_function('sru_bi_fwd')
SRU_BiBWD_FUNC = sru_mod.get_function('sru_bi_bwd')
stream = namedtuple('Stream', ['ptr'])
SRU_STREAM = stream(ptr=torch.cuda.current_stream().cuda_stream)
class SRU_Compute(Function):
def __init__(self, activation_type, d_out, bidirectional=False):
super(SRU_Compute, self).__init__()
self.activation_type = activation_type
self.d_out = d_out
self.bidirectional = bidirectional
def forward(self, u, x, bias, init=None, mask_h=None):
bidir = 2 if self.bidirectional else 1
length = x.size(0) if x.dim() == 3 else 1
batch = x.size(-2)
d = self.d_out
k = u.size(-1) // d
k_ = k // 2 if self.bidirectional else k
ncols = batch * d * bidir
thread_per_block = min(512, ncols)
num_block = (ncols-1) // thread_per_block+1
init_ = x.new(ncols).zero_() if init is None else init
size = (length, batch, d*bidir) if x.dim() == 3 else (batch, d*bidir)
c = x.new(*size)
h = x.new(*size)
FUNC = SRU_FWD_FUNC if not self.bidirectional else SRU_BiFWD_FUNC
FUNC(args=[
u.contiguous().data_ptr(),
x.contiguous().data_ptr() if k_ == 3 else 0,
bias.data_ptr(),
init_.contiguous().data_ptr(),
mask_h.data_ptr() if mask_h is not None else 0,
length,
batch,
d,
k_,
h.data_ptr(),
c.data_ptr(),
self.activation_type],
block=(thread_per_block, 1, 1), grid=(num_block, 1, 1),
stream=SRU_STREAM
)
self.save_for_backward(u, x, bias, init, mask_h)
self.intermediate = c
if x.dim() == 2:
last_hidden = c
elif self.bidirectional:
# -> directions x batch x dim
last_hidden = torch.stack((c[-1, :, :d], c[0, :, d:]))
else:
last_hidden = c[-1]
return h, last_hidden
def backward(self, grad_h, grad_last):
if self.bidirectional:
grad_last = torch.cat((grad_last[0], grad_last[1]), 1)
bidir = 2 if self.bidirectional else 1
u, x, bias, init, mask_h = self.saved_tensors
c = self.intermediate
length = x.size(0) if x.dim() == 3 else 1
batch = x.size(-2)
d = self.d_out
k = u.size(-1) // d
k_ = k//2 if self.bidirectional else k
ncols = batch*d*bidir
thread_per_block = min(512, ncols)
num_block = (ncols-1) // thread_per_block+1
init_ = x.new(ncols).zero_() if init is None else init
grad_u = u.new(*u.size())
grad_bias = x.new(2, batch, d*bidir)
grad_init = x.new(batch, d*bidir)
# For DEBUG
# size = (length, batch, x.size(-1)) \
# if x.dim() == 3 else (batch, x.size(-1))
# grad_x = x.new(*x.size()) if k_ == 3 else x.new(*size).zero_()
# Normal use
grad_x = x.new(*x.size()) if k_ == 3 else None
FUNC = SRU_BWD_FUNC if not self.bidirectional else SRU_BiBWD_FUNC
FUNC(args=[
u.contiguous().data_ptr(),
x.contiguous().data_ptr() if k_ == 3 else 0,
bias.data_ptr(),
init_.contiguous().data_ptr(),
mask_h.data_ptr() if mask_h is not None else 0,
c.data_ptr(),
grad_h.contiguous().data_ptr(),
grad_last.contiguous().data_ptr(),
length,
batch,
d,
k_,
grad_u.data_ptr(),
grad_x.data_ptr() if k_ == 3 else 0,
grad_bias.data_ptr(),
grad_init.data_ptr(),
self.activation_type],
block=(thread_per_block, 1, 1), grid=(num_block, 1, 1),
stream=SRU_STREAM
)
return grad_u, grad_x, grad_bias.sum(1).view(-1), grad_init, None
class SRUCell(nn.Module):
def __init__(self, n_in, n_out, dropout=0, rnn_dropout=0,
bidirectional=False, use_tanh=1, use_relu=0):
super(SRUCell, self).__init__()
self.n_in = n_in
self.n_out = n_out
self.rnn_dropout = rnn_dropout
self.dropout = dropout
self.bidirectional = bidirectional
self.activation_type = 2 if use_relu else (1 if use_tanh else 0)
out_size = n_out*2 if bidirectional else n_out
k = 4 if n_in != out_size else 3
self.size_per_dir = n_out*k
self.weight = nn.Parameter(torch.Tensor(
n_in,
self.size_per_dir*2 if bidirectional else self.size_per_dir
))
self.bias = nn.Parameter(torch.Tensor(
n_out*4 if bidirectional else n_out*2
))
self.init_weight()
def init_weight(self):
val_range = (3.0/self.n_in)**0.5
self.weight.data.uniform_(-val_range, val_range)
self.bias.data.zero_()
def set_bias(self, bias_val=0):
n_out = self.n_out
if self.bidirectional:
self.bias.data[n_out*2:].zero_().add_(bias_val)
else:
self.bias.data[n_out:].zero_().add_(bias_val)
def forward(self, input, c0=None):
assert input.dim() == 2 or input.dim() == 3
n_in, n_out = self.n_in, self.n_out
batch = input.size(-2)
if c0 is None:
c0 = Variable(input.data.new(
batch, n_out if not self.bidirectional else n_out*2
).zero_())
if self.training and (self.rnn_dropout > 0):
mask = self.get_dropout_mask_((batch, n_in), self.rnn_dropout)
x = input * mask.expand_as(input)
else:
x = input
x_2d = x if x.dim() == 2 else x.contiguous().view(-1, n_in)
u = x_2d.mm(self.weight)
if self.training and (self.dropout > 0):
bidir = 2 if self.bidirectional else 1
mask_h = self.get_dropout_mask_((batch, n_out*bidir), self.dropout)
h, c = SRU_Compute(self.activation_type, n_out,
self.bidirectional)(
u, input, self.bias, c0, mask_h
)
else:
h, c = SRU_Compute(self.activation_type, n_out,
self.bidirectional)(
u, input, self.bias, c0
)
return h, c
def get_dropout_mask_(self, size, p):
w = self.weight.data
return Variable(w.new(*size).bernoulli_(1-p).div_(1-p))
class SRU(nn.Module):
def __init__(self, input_size, hidden_size,
num_layers=2, dropout=0, rnn_dropout=0,
bidirectional=False, use_tanh=1, use_relu=0):
# An entry check here, will catch on train side and translate side
# if requirements are not satisfied.
check_sru_requirement(abort=True)
super(SRU, self).__init__()
self.n_in = input_size
self.n_out = hidden_size
self.depth = num_layers
self.dropout = dropout
self.rnn_dropout = rnn_dropout
self.rnn_lst = nn.ModuleList()
self.bidirectional = bidirectional
self.out_size = hidden_size*2 if bidirectional else hidden_size
for i in range(num_layers):
sru_cell = SRUCell(
n_in=self.n_in if i == 0 else self.out_size,
n_out=self.n_out,
dropout=dropout if i+1 != num_layers else 0,
rnn_dropout=rnn_dropout,
bidirectional=bidirectional,
use_tanh=use_tanh,
use_relu=use_relu,
)
self.rnn_lst.append(sru_cell)
def set_bias(self, bias_val=0):
for l in self.rnn_lst:
l.set_bias(bias_val)
def forward(self, input, c0=None, return_hidden=True):
assert input.dim() == 3 # (len, batch, n_in)
dir_ = 2 if self.bidirectional else 1
if c0 is None:
zeros = Variable(input.data.new(
input.size(1), self.n_out*dir_
).zero_())
c0 = [zeros for i in range(self.depth)]
else:
if isinstance(c0, tuple):
# RNNDecoderState wraps hidden as a tuple.
c0 = c0[0]
assert c0.dim() == 3 # (depth, batch, dir_*n_out)
c0 = [h.squeeze(0) for h in c0.chunk(self.depth, 0)]
prevx = input
lstc = []
for i, rnn in enumerate(self.rnn_lst):
h, c = rnn(prevx, c0[i])
prevx = h
lstc.append(c)
if self.bidirectional:
# fh -> (layers*directions) x batch x dim
fh = torch.cat(lstc)
else:
fh = torch.stack(lstc)
if return_hidden:
return prevx, fh
else:
return prevx
| 23,318 | 36.672052 | 79 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/WeightNorm.py | """
Implementation of "Weight Normalization: A Simple Reparameterization
to Accelerate Training of Deep Neural Networks"
As a reparameterization method, weight normalization is same
as BatchNormalization, but it doesn't depend on minibatch.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
from torch.autograd import Variable
def get_var_maybe_avg(namespace, var_name, training, polyak_decay):
# utility for retrieving polyak averaged params
# Update average
v = getattr(namespace, var_name)
v_avg = getattr(namespace, var_name + '_avg')
v_avg -= (1 - polyak_decay) * (v_avg - v.data)
if training:
return v
else:
return Variable(v_avg)
def get_vars_maybe_avg(namespace, var_names, training, polyak_decay):
# utility for retrieving polyak averaged params
vars = []
for vn in var_names:
vars.append(get_var_maybe_avg(
namespace, vn, training, polyak_decay))
return vars
class WeightNormLinear(nn.Linear):
def __init__(self, in_features, out_features,
init_scale=1., polyak_decay=0.9995):
super(WeightNormLinear, self).__init__(
in_features, out_features, bias=True)
self.V = self.weight
self.g = Parameter(torch.Tensor(out_features))
self.b = self.bias
self.register_buffer(
'V_avg', torch.zeros(out_features, in_features))
self.register_buffer('g_avg', torch.zeros(out_features))
self.register_buffer('b_avg', torch.zeros(out_features))
self.init_scale = init_scale
self.polyak_decay = polyak_decay
self.reset_parameters()
def reset_parameters(self):
return
def forward(self, x, init=False):
if init is True:
# out_features * in_features
self.V.data.copy_(torch.randn(self.V.data.size()).type_as(
self.V.data) * 0.05)
# norm is out_features * 1
V_norm = self.V.data / \
self.V.data.norm(2, 1).expand_as(self.V.data)
# batch_size * out_features
x_init = F.linear(x, Variable(V_norm)).data
# out_features
m_init, v_init = x_init.mean(0).squeeze(
0), x_init.var(0).squeeze(0)
# out_features
scale_init = self.init_scale / \
torch.sqrt(v_init + 1e-10)
self.g.data.copy_(scale_init)
self.b.data.copy_(-m_init * scale_init)
x_init = scale_init.view(1, -1).expand_as(x_init) \
* (x_init - m_init.view(1, -1).expand_as(x_init))
self.V_avg.copy_(self.V.data)
self.g_avg.copy_(self.g.data)
self.b_avg.copy_(self.b.data)
return Variable(x_init)
else:
V, g, b = get_vars_maybe_avg(self, ['V', 'g', 'b'],
self.training,
polyak_decay=self.polyak_decay)
# batch_size * out_features
x = F.linear(x, V)
scalar = g / torch.norm(V, 2, 1).squeeze(1)
x = scalar.view(1, -1).expand_as(x) * x + \
b.view(1, -1).expand_as(x)
return x
class WeightNormConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, init_scale=1.,
polyak_decay=0.9995):
super(WeightNormConv2d, self).__init__(in_channels, out_channels,
kernel_size, stride, padding,
dilation, groups)
self.V = self.weight
self.g = Parameter(torch.Tensor(out_channels))
self.b = self.bias
self.register_buffer('V_avg', torch.zeros(self.V.size()))
self.register_buffer('g_avg', torch.zeros(out_channels))
self.register_buffer('b_avg', torch.zeros(out_channels))
self.init_scale = init_scale
self.polyak_decay = polyak_decay
self.reset_parameters()
def reset_parameters(self):
return
def forward(self, x, init=False):
if init is True:
# out_channels, in_channels // groups, * kernel_size
self.V.data.copy_(torch.randn(self.V.data.size()
).type_as(self.V.data) * 0.05)
V_norm = self.V.data / self.V.data.view(self.out_channels, -1)\
.norm(2, 1).view(self.out_channels, *(
[1] * (len(self.kernel_size) + 1))).expand_as(self.V.data)
x_init = F.conv2d(x, Variable(V_norm), None, self.stride,
self.padding, self.dilation, self.groups).data
t_x_init = x_init.transpose(0, 1).contiguous().view(
self.out_channels, -1)
m_init, v_init = t_x_init.mean(1).squeeze(
1), t_x_init.var(1).squeeze(1)
# out_features
scale_init = self.init_scale / \
torch.sqrt(v_init + 1e-10)
self.g.data.copy_(scale_init)
self.b.data.copy_(-m_init * scale_init)
scale_init_shape = scale_init.view(
1, self.out_channels, *([1] * (len(x_init.size()) - 2)))
m_init_shape = m_init.view(
1, self.out_channels, *([1] * (len(x_init.size()) - 2)))
x_init = scale_init_shape.expand_as(
x_init) * (x_init - m_init_shape.expand_as(x_init))
self.V_avg.copy_(self.V.data)
self.g_avg.copy_(self.g.data)
self.b_avg.copy_(self.b.data)
return Variable(x_init)
else:
V, g, b = get_vars_maybe_avg(
self, ['V', 'g', 'b'], self.training,
polyak_decay=self.polyak_decay)
scalar = torch.norm(V.view(self.out_channels, -1), 2, 1)
if len(scalar.size()) == 2:
scalar = g / scalar.squeeze(1)
else:
scalar = g / scalar
W = scalar.view(self.out_channels, *
([1] * (len(V.size()) - 1))).expand_as(V) * V
x = F.conv2d(x, W, b, self.stride,
self.padding, self.dilation, self.groups)
return x
class WeightNormConvTranspose2d(nn.ConvTranspose2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, output_padding=0, groups=1, init_scale=1.,
polyak_decay=0.9995):
super(WeightNormConvTranspose2d, self).__init__(
in_channels, out_channels,
kernel_size, stride,
padding, output_padding,
groups)
# in_channels, out_channels, *kernel_size
self.V = self.weight
self.g = Parameter(torch.Tensor(out_channels))
self.b = self.bias
self.register_buffer('V_avg', torch.zeros(self.V.size()))
self.register_buffer('g_avg', torch.zeros(out_channels))
self.register_buffer('b_avg', torch.zeros(out_channels))
self.init_scale = init_scale
self.polyak_decay = polyak_decay
self.reset_parameters()
def reset_parameters(self):
return
def forward(self, x, init=False):
if init is True:
# in_channels, out_channels, *kernel_size
self.V.data.copy_(torch.randn(self.V.data.size()).type_as(
self.V.data) * 0.05)
V_norm = self.V.data / self.V.data.transpose(0, 1).contiguous() \
.view(self.out_channels, -1).norm(2, 1).view(
self.in_channels, self.out_channels,
*([1] * len(self.kernel_size))).expand_as(self.V.data)
x_init = F.conv_transpose2d(
x, Variable(V_norm), None, self.stride,
self.padding, self.output_padding, self.groups).data
# self.out_channels, 1
t_x_init = x_init.tranpose(0, 1).contiguous().view(
self.out_channels, -1)
# out_features
m_init, v_init = t_x_init.mean(1).squeeze(
1), t_x_init.var(1).squeeze(1)
# out_features
scale_init = self.init_scale / \
torch.sqrt(v_init + 1e-10)
self.g.data.copy_(scale_init)
self.b.data.copy_(-m_init * scale_init)
scale_init_shape = scale_init.view(
1, self.out_channels, *([1] * (len(x_init.size()) - 2)))
m_init_shape = m_init.view(
1, self.out_channels, *([1] * (len(x_init.size()) - 2)))
x_init = scale_init_shape.expand_as(x_init)\
* (x_init - m_init_shape.expand_as(x_init))
self.V_avg.copy_(self.V.data)
self.g_avg.copy_(self.g.data)
self.b_avg.copy_(self.b.data)
return Variable(x_init)
else:
V, g, b = get_vars_maybe_avg(
self, ['V', 'g', 'b'], self.training,
polyak_decay=self.polyak_decay)
scalar = g / \
torch.norm(V.transpose(0, 1).contiguous().view(
self.out_channels, -1), 2, 1).squeeze(1)
W = scalar.view(self.in_channels, self.out_channels,
*([1] * (len(V.size()) - 2))).expand_as(V) * V
x = F.conv_transpose2d(x, W, b, self.stride,
self.padding, self.output_padding,
self.groups)
return x
| 9,574 | 39.231092 | 78 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/ImageEncoder.py | import torch.nn as nn
import torch.nn.functional as F
import torch
import torch.cuda
from torch.autograd import Variable
class ImageEncoder(nn.Module):
"""
Encoder recurrent neural network for Images.
"""
def __init__(self, num_layers, bidirectional, rnn_size, dropout):
"""
Args:
num_layers (int): number of encoder layers.
bidirectional (bool): bidirectional encoder.
rnn_size (int): size of hidden states of the rnn.
dropout (float): dropout probablity.
"""
super(ImageEncoder, self).__init__()
self.num_layers = num_layers
self.num_directions = 2 if bidirectional else 1
self.hidden_size = rnn_size
self.layer1 = nn.Conv2d(3, 64, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.layer2 = nn.Conv2d(64, 128, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.layer3 = nn.Conv2d(128, 256, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.layer4 = nn.Conv2d(256, 256, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.layer5 = nn.Conv2d(256, 512, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.layer6 = nn.Conv2d(512, 512, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.batch_norm1 = nn.BatchNorm2d(256)
self.batch_norm2 = nn.BatchNorm2d(512)
self.batch_norm3 = nn.BatchNorm2d(512)
input_size = 512
self.rnn = nn.LSTM(input_size, rnn_size,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional)
self.pos_lut = nn.Embedding(1000, input_size)
def load_pretrained_vectors(self, opt):
# Pass in needed options only when modify function definition.
pass
def forward(self, input, lengths=None):
batchSize = input.size(0)
# (batch_size, 64, imgH, imgW)
# layer 1
input = F.relu(self.layer1(input[:, :, :, :]-0.5), True)
# (batch_size, 64, imgH/2, imgW/2)
input = F.max_pool2d(input, kernel_size=(2, 2), stride=(2, 2))
# (batch_size, 128, imgH/2, imgW/2)
# layer 2
input = F.relu(self.layer2(input), True)
# (batch_size, 128, imgH/2/2, imgW/2/2)
input = F.max_pool2d(input, kernel_size=(2, 2), stride=(2, 2))
# (batch_size, 256, imgH/2/2, imgW/2/2)
# layer 3
# batch norm 1
input = F.relu(self.batch_norm1(self.layer3(input)), True)
# (batch_size, 256, imgH/2/2, imgW/2/2)
# layer4
input = F.relu(self.layer4(input), True)
# (batch_size, 256, imgH/2/2/2, imgW/2/2)
input = F.max_pool2d(input, kernel_size=(1, 2), stride=(1, 2))
# (batch_size, 512, imgH/2/2/2, imgW/2/2)
# layer 5
# batch norm 2
input = F.relu(self.batch_norm2(self.layer5(input)), True)
# (batch_size, 512, imgH/2/2/2, imgW/2/2/2)
input = F.max_pool2d(input, kernel_size=(2, 1), stride=(2, 1))
# (batch_size, 512, imgH/2/2/2, imgW/2/2/2)
input = F.relu(self.batch_norm3(self.layer6(input)), True)
# # (batch_size, 512, H, W)
# # (batch_size, H, W, 512)
all_outputs = []
for row in range(input.size(2)):
inp = input[:, :, row, :].transpose(0, 2)\
.transpose(1, 2)
pos_emb = self.pos_lut(
Variable(torch.cuda.LongTensor(batchSize).fill_(row)))
with_pos = torch.cat(
(pos_emb.view(1, pos_emb.size(0), pos_emb.size(1)), inp), 0)
outputs, hidden_t = self.rnn(with_pos)
all_outputs.append(outputs)
out = torch.cat(all_outputs, 0)
return hidden_t, out
| 3,998 | 36.373832 | 76 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/Generation/seq2seq_translation_tutorial.py | # -*- coding: utf-8 -*-
"""
Translation with a Sequence to Sequence Network and Attention
*************************************************************
**Author**: `Sean Robertson <https://github.com/spro/practical-pytorch>`_
In this project we will be teaching a neural network to translate from
French to English.
::
[KEY: > input, = target, < output]
> il est en train de peindre un tableau .
= he is painting a picture .
< he is painting a picture .
> pourquoi ne pas essayer ce vin delicieux ?
= why not try that delicious wine ?
< why not try that delicious wine ?
> elle n est pas poete mais romanciere .
= she is not a poet but a novelist .
< she not not a poet but a novelist .
> vous etes trop maigre .
= you re too skinny .
< you re all alone .
... to varying degrees of success.
This is made possible by the simple but powerful idea of the `sequence
to sequence network <http://arxiv.org/abs/1409.3215>`__, in which two
recurrent neural networks work together to transform one sequence to
another. An encoder network condenses an input sequence into a vector,
and a decoder network unfolds that vector into a new sequence.
.. figure:: /_static/img/seq-seq-images/seq2seq.png
:alt:
To improve upon this model we'll use an `attention
mechanism <https://arxiv.org/abs/1409.0473>`__, which lets the decoder
learn to focus over a specific range of the input sequence.
**Recommended Reading:**
I assume you have at least installed PyTorch, know Python, and
understand Tensors:
- http://pytorch.org/ For installation instructions
- :doc:`/beginner/deep_learning_60min_blitz` to get started with PyTorch in general
- :doc:`/beginner/pytorch_with_examples` for a wide and deep overview
- :doc:`/beginner/former_torchies_tutorial` if you are former Lua Torch user
It would also be useful to know about Sequence to Sequence networks and
how they work:
- `Learning Phrase Representations using RNN Encoder-Decoder for
Statistical Machine Translation <http://arxiv.org/abs/1406.1078>`__
- `Sequence to Sequence Learning with Neural
Networks <http://arxiv.org/abs/1409.3215>`__
- `Neural Machine Translation by Jointly Learning to Align and
Translate <https://arxiv.org/abs/1409.0473>`__
- `A Neural Conversational Model <http://arxiv.org/abs/1506.05869>`__
You will also find the previous tutorials on
:doc:`/intermediate/char_rnn_classification_tutorial`
and :doc:`/intermediate/char_rnn_generation_tutorial`
helpful as those concepts are very similar to the Encoder and Decoder
models, respectively.
And for more, read the papers that introduced these topics:
- `Learning Phrase Representations using RNN Encoder-Decoder for
Statistical Machine Translation <http://arxiv.org/abs/1406.1078>`__
- `Sequence to Sequence Learning with Neural
Networks <http://arxiv.org/abs/1409.3215>`__
- `Neural Machine Translation by Jointly Learning to Align and
Translate <https://arxiv.org/abs/1409.0473>`__
- `A Neural Conversational Model <http://arxiv.org/abs/1506.05869>`__
**Requirements**
"""
from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import string
import re
import random
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
use_cuda = torch.cuda.is_available()
######################################################################
# Loading data files
# ==================
#
# The data for this project is a set of many thousands of English to
# French translation pairs.
#
# `This question on Open Data Stack
# Exchange <http://opendata.stackexchange.com/questions/3888/dataset-of-sentences-translated-into-many-languages>`__
# pointed me to the open translation site http://tatoeba.org/ which has
# downloads available at http://tatoeba.org/eng/downloads - and better
# yet, someone did the extra work of splitting language pairs into
# individual text files here: http://www.manythings.org/anki/
#
# The English to French pairs are too big to include in the repo, so
# download to ``data/eng-fra.txt`` before continuing. The file is a tab
# separated list of translation pairs:
#
# ::
#
# I am cold. Je suis froid.
#
# .. Note::
# Download the data from
# `here <https://download.pytorch.org/tutorial/data.zip>`_
# and extract it to the current directory.
######################################################################
# Similar to the character encoding used in the character-level RNN
# tutorials, we will be representing each word in a language as a one-hot
# vector, or giant vector of zeros except for a single one (at the index
# of the word). Compared to the dozens of characters that might exist in a
# language, there are many many more words, so the encoding vector is much
# larger. We will however cheat a bit and trim the data to only use a few
# thousand words per language.
#
# .. figure:: /_static/img/seq-seq-images/word-encoding.png
# :alt:
#
#
######################################################################
# We'll need a unique index per word to use as the inputs and targets of
# the networks later. To keep track of all this we will use a helper class
# called ``Lang`` which has word → index (``word2index``) and index → word
# (``index2word``) dictionaries, as well as a count of each word
# ``word2count`` to use to later replace rare words.
#
SOS_token = 0
EOS_token = 1
class Lang:
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS"}
self.n_words = 2 # Count SOS and EOS
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
######################################################################
# The files are all in Unicode, to simplify we will turn Unicode
# characters to ASCII, make everything lowercase, and trim most
# punctuation.
#
# Turn a Unicode string to plain ASCII, thanks to
# http://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# Lowercase, trim, and remove non-letter characters
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
return s
######################################################################
# To read the data file we will split the file into lines, and then split
# lines into pairs. The files are all English → Other Language, so if we
# want to translate from Other Language → English I added the ``reverse``
# flag to reverse the pairs.
#
def readLangs(lang1, lang2, reverse=False):
print("Reading lines...")
# Read the file and split into lines
lines = open('data/%s-%s.txt' % (lang1, lang2), encoding='utf-8').\
read().strip().split('\n')
# Split every line into pairs and normalize
pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines]
# Reverse pairs, make Lang instances
if reverse:
pairs = [list(reversed(p)) for p in pairs]
input_lang = Lang(lang2)
output_lang = Lang(lang1)
else:
input_lang = Lang(lang1)
output_lang = Lang(lang2)
return input_lang, output_lang, pairs
######################################################################
# Since there are a *lot* of example sentences and we want to train
# something quickly, we'll trim the data set to only relatively short and
# simple sentences. Here the maximum length is 10 words (that includes
# ending punctuation) and we're filtering to sentences that translate to
# the form "I am" or "He is" etc. (accounting for apostrophes replaced
# earlier).
#
MAX_LENGTH = 10
eng_prefixes = (
"i am ", "i m ",
"he is", "he s ",
"she is", "she s",
"you are", "you re ",
"we are", "we re ",
"they are", "they re "
)
def filterPair(p):
return len(p[0].split(' ')) < MAX_LENGTH and \
len(p[1].split(' ')) < MAX_LENGTH and \
p[1].startswith(eng_prefixes)
def filterPairs(pairs):
return [pair for pair in pairs if filterPair(pair)]
######################################################################
# The full process for preparing the data is:
#
# - Read text file and split into lines, split lines into pairs
# - Normalize text, filter by length and content
# - Make word lists from sentences in pairs
#
def prepareData(lang1, lang2, reverse=False):
input_lang, output_lang, pairs = readLangs(lang1, lang2, reverse)
print("Read %s sentence pairs" % len(pairs))
pairs = filterPairs(pairs)
print("Trimmed to %s sentence pairs" % len(pairs))
print("Counting words...")
for pair in pairs:
input_lang.addSentence(pair[0])
output_lang.addSentence(pair[1])
print("Counted words:")
print(input_lang.name, input_lang.n_words)
print(output_lang.name, output_lang.n_words)
return input_lang, output_lang, pairs
input_lang, output_lang, pairs = prepareData('eng', 'fra', True)
print(random.choice(pairs))
######################################################################
# The Seq2Seq Model
# =================
#
# A Recurrent Neural Network, or RNN, is a network that operates on a
# sequence and uses its own output as input for subsequent steps.
#
# A `Sequence to Sequence network <http://arxiv.org/abs/1409.3215>`__, or
# seq2seq network, or `Encoder Decoder
# network <https://arxiv.org/pdf/1406.1078v3.pdf>`__, is a model
# consisting of two RNNs called the encoder and decoder. The encoder reads
# an input sequence and outputs a single vector, and the decoder reads
# that vector to produce an output sequence.
#
# .. figure:: /_static/img/seq-seq-images/seq2seq.png
# :alt:
#
# Unlike sequence prediction with a single RNN, where every input
# corresponds to an output, the seq2seq model frees us from sequence
# length and order, which makes it ideal for translation between two
# languages.
#
# Consider the sentence "Je ne suis pas le chat noir" → "I am not the
# black cat". Most of the words in the input sentence have a direct
# translation in the output sentence, but are in slightly different
# orders, e.g. "chat noir" and "black cat". Because of the "ne/pas"
# construction there is also one more word in the input sentence. It would
# be difficult to produce a correct translation directly from the sequence
# of input words.
#
# With a seq2seq model the encoder creates a single vector which, in the
# ideal case, encodes the "meaning" of the input sequence into a single
# vector — a single point in some N dimensional space of sentences.
#
######################################################################
# The Encoder
# -----------
#
# The encoder of a seq2seq network is a RNN that outputs some value for
# every word from the input sentence. For every input word the encoder
# outputs a vector and a hidden state, and uses the hidden state for the
# next input word.
#
# .. figure:: /_static/img/seq-seq-images/encoder-network.png
# :alt:
#
#
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size, n_layers=1):
super(EncoderRNN, self).__init__()
self.n_layers = n_layers
self.hidden_size = hidden_size
self.embedding = nn.Embedding(input_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size)
def forward(self, input, hidden):
embedded = self.embedding(input).view(1, 1, -1)
output = embedded
for i in range(self.n_layers):
output, hidden = self.gru(output, hidden)
return output, hidden
def initHidden(self):
result = Variable(torch.zeros(1, 1, self.hidden_size))
if use_cuda:
return result.cuda()
else:
return result
######################################################################
# The Decoder
# -----------
#
# The decoder is another RNN that takes the encoder output vector(s) and
# outputs a sequence of words to create the translation.
#
######################################################################
# Simple Decoder
# ^^^^^^^^^^^^^^
#
# In the simplest seq2seq decoder we use only last output of the encoder.
# This last output is sometimes called the *context vector* as it encodes
# context from the entire sequence. This context vector is used as the
# initial hidden state of the decoder.
#
# At every step of decoding, the decoder is given an input token and
# hidden state. The initial input token is the start-of-string ``<SOS>``
# token, and the first hidden state is the context vector (the encoder's
# last hidden state).
#
# .. figure:: /_static/img/seq-seq-images/decoder-network.png
# :alt:
#
#
class DecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, n_layers=1):
super(DecoderRNN, self).__init__()
self.n_layers = n_layers
self.hidden_size = hidden_size
self.embedding = nn.Embedding(output_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size)
self.out = nn.Linear(hidden_size, output_size)
self.softmax = nn.LogSoftmax()
def forward(self, input, hidden):
output = self.embedding(input).view(1, 1, -1)
for i in range(self.n_layers):
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = self.softmax(self.out(output[0]))
return output, hidden
def initHidden(self):
result = Variable(torch.zeros(1, 1, self.hidden_size))
if use_cuda:
return result.cuda()
else:
return result
######################################################################
# I encourage you to train and observe the results of this model, but to
# save space we'll be going straight for the gold and introducing the
# Attention Mechanism.
#
######################################################################
# Attention Decoder
# ^^^^^^^^^^^^^^^^^
#
# If only the context vector is passed betweeen the encoder and decoder,
# that single vector carries the burden of encoding the entire sentence.
#
# Attention allows the decoder network to "focus" on a different part of
# the encoder's outputs for every step of the decoder's own outputs. First
# we calculate a set of *attention weights*. These will be multiplied by
# the encoder output vectors to create a weighted combination. The result
# (called ``attn_applied`` in the code) should contain information about
# that specific part of the input sequence, and thus help the decoder
# choose the right output words.
#
# .. figure:: https://i.imgur.com/1152PYf.png
# :alt:
#
# Calculating the attention weights is done with another feed-forward
# layer ``attn``, using the decoder's input and hidden state as inputs.
# Because there are sentences of all sizes in the training data, to
# actually create and train this layer we have to choose a maximum
# sentence length (input length, for encoder outputs) that it can apply
# to. Sentences of the maximum length will use all the attention weights,
# while shorter sentences will only use the first few.
#
# .. figure:: /_static/img/seq-seq-images/attention-decoder-network.png
# :alt:
#
#
class AttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, n_layers=1, dropout_p=0.1, max_length=MAX_LENGTH):
super(AttnDecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.dropout_p = dropout_p
self.max_length = max_length
self.embedding = nn.Embedding(self.output_size, self.hidden_size)
self.attn = nn.Linear(self.hidden_size * 2, self.max_length)
self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.dropout = nn.Dropout(self.dropout_p)
self.gru = nn.GRU(self.hidden_size, self.hidden_size)
self.out = nn.Linear(self.hidden_size, self.output_size)
def forward(self, input, hidden, encoder_output, encoder_outputs):
embedded = self.embedding(input).view(1, 1, -1)
embedded = self.dropout(embedded)
attn_weights = F.softmax(
self.attn(torch.cat((embedded[0], hidden[0]), 1)))
attn_applied = torch.bmm(attn_weights.unsqueeze(0),
encoder_outputs.unsqueeze(0))
output = torch.cat((embedded[0], attn_applied[0]), 1)
output = self.attn_combine(output).unsqueeze(0)
for i in range(self.n_layers):
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = F.log_softmax(self.out(output[0]))
return output, hidden, attn_weights
def initHidden(self):
result = Variable(torch.zeros(1, 1, self.hidden_size))
if use_cuda:
return result.cuda()
else:
return result
######################################################################
# .. note:: There are other forms of attention that work around the length
# limitation by using a relative position approach. Read about "local
# attention" in `Effective Approaches to Attention-based Neural Machine
# Translation <https://arxiv.org/abs/1508.04025>`__.
#
# Training
# ========
#
# Preparing Training Data
# -----------------------
#
# To train, for each pair we will need an input tensor (indexes of the
# words in the input sentence) and target tensor (indexes of the words in
# the target sentence). While creating these vectors we will append the
# EOS token to both sequences.
#
def indexesFromSentence(lang, sentence):
return [lang.word2index[word] for word in sentence.split(' ')]
def variableFromSentence(lang, sentence):
indexes = indexesFromSentence(lang, sentence)
indexes.append(EOS_token)
result = Variable(torch.LongTensor(indexes).view(-1, 1))
if use_cuda:
return result.cuda()
else:
return result
def variablesFromPair(pair):
input_variable = variableFromSentence(input_lang, pair[0])
target_variable = variableFromSentence(output_lang, pair[1])
return (input_variable, target_variable)
######################################################################
# Training the Model
# ------------------
#
# To train we run the input sentence through the encoder, and keep track
# of every output and the latest hidden state. Then the decoder is given
# the ``<SOS>`` token as its first input, and the last hidden state of the
# encoder as its first hidden state.
#
# "Teacher forcing" is the concept of using the real target outputs as
# each next input, instead of using the decoder's guess as the next input.
# Using teacher forcing causes it to converge faster but `when the trained
# network is exploited, it may exhibit
# instability <http://minds.jacobs-university.de/sites/default/files/uploads/papers/ESNTutorialRev.pdf>`__.
#
# You can observe outputs of teacher-forced networks that read with
# coherent grammar but wander far from the correct translation -
# intuitively it has learned to represent the output grammar and can "pick
# up" the meaning once the teacher tells it the first few words, but it
# has not properly learned how to create the sentence from the translation
# in the first place.
#
# Because of the freedom PyTorch's autograd gives us, we can randomly
# choose to use teacher forcing or not with a simple if statement. Turn
# ``teacher_forcing_ratio`` up to use more of it.
#
teacher_forcing_ratio = 0.5
def train(input_variable, target_variable, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH):
encoder_hidden = encoder.initHidden()
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
input_length = input_variable.size()[0]
target_length = target_variable.size()[0]
encoder_outputs = Variable(torch.zeros(max_length, encoder.hidden_size))
encoder_outputs = encoder_outputs.cuda() if use_cuda else encoder_outputs
loss = 0
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_variable[ei], encoder_hidden)
encoder_outputs[ei] = encoder_output[0][0]
decoder_input = Variable(torch.LongTensor([[SOS_token]]))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
decoder_hidden = encoder_hidden
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
if use_teacher_forcing:
# Teacher forcing: Feed the target as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_output, encoder_outputs)
loss += criterion(decoder_output, target_variable[di])
decoder_input = target_variable[di] # Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_output, encoder_outputs)
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
decoder_input = Variable(torch.LongTensor([[ni]]))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
loss += criterion(decoder_output, target_variable[di])
if ni == EOS_token:
break
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
return loss.data[0] / target_length
######################################################################
# This is a helper function to print time elapsed and estimated time
# remaining given the current time and progress %.
#
import time
import math
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
######################################################################
# The whole training process looks like this:
#
# - Start a timer
# - Initialize optimizers and criterion
# - Create set of training pairs
# - Start empty losses array for plotting
#
# Then we call ``train`` many times and occasionally print the progress (%
# of examples, time so far, estimated time) and average loss.
#
def trainIters(encoder, decoder, n_iters, print_every=1000, plot_every=100, learning_rate=0.01):
start = time.time()
plot_losses = []
print_loss_total = 0 # Reset every print_every
plot_loss_total = 0 # Reset every plot_every
encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
training_pairs = [variablesFromPair(random.choice(pairs))
for i in range(n_iters)]
criterion = nn.NLLLoss()
for iter in range(1, n_iters + 1):
training_pair = training_pairs[iter - 1]
input_variable = training_pair[0]
target_variable = training_pair[1]
loss = train(input_variable, target_variable, encoder,
decoder, encoder_optimizer, decoder_optimizer, criterion)
print_loss_total += loss
plot_loss_total += loss
if iter % print_every == 0:
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
print('%s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),
iter, iter / n_iters * 100, print_loss_avg))
if iter % plot_every == 0:
plot_loss_avg = plot_loss_total / plot_every
plot_losses.append(plot_loss_avg)
plot_loss_total = 0
showPlot(plot_losses)
######################################################################
# Plotting results
# ----------------
#
# Plotting is done with matplotlib, using the array of loss values
# ``plot_losses`` saved while training.
#
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
def showPlot(points):
plt.figure()
fig, ax = plt.subplots()
# this locator puts ticks at regular intervals
loc = ticker.MultipleLocator(base=0.2)
ax.yaxis.set_major_locator(loc)
plt.plot(points)
######################################################################
# Evaluation
# ==========
#
# Evaluation is mostly the same as training, but there are no targets so
# we simply feed the decoder's predictions back to itself for each step.
# Every time it predicts a word we add it to the output string, and if it
# predicts the EOS token we stop there. We also store the decoder's
# attention outputs for display later.
#
def evaluate(encoder, decoder, sentence, max_length=MAX_LENGTH):
input_variable = variableFromSentence(input_lang, sentence)
input_length = input_variable.size()[0]
encoder_hidden = encoder.initHidden()
encoder_outputs = Variable(torch.zeros(max_length, encoder.hidden_size))
encoder_outputs = encoder_outputs.cuda() if use_cuda else encoder_outputs
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_variable[ei],
encoder_hidden)
encoder_outputs[ei] = encoder_outputs[ei] + encoder_output[0][0]
decoder_input = Variable(torch.LongTensor([[SOS_token]])) # SOS
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
decoder_hidden = encoder_hidden
decoded_words = []
decoder_attentions = torch.zeros(max_length, max_length)
for di in range(max_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_output, encoder_outputs)
decoder_attentions[di] = decoder_attention.data
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
if ni == EOS_token:
decoded_words.append('<EOS>')
break
else:
decoded_words.append(output_lang.index2word[ni])
decoder_input = Variable(torch.LongTensor([[ni]]))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
return decoded_words, decoder_attentions[:di + 1]
######################################################################
# We can evaluate random sentences from the training set and print out the
# input, target, and output to make some subjective quality judgements:
#
def evaluateRandomly(encoder, decoder, n=10):
for i in range(n):
pair = random.choice(pairs)
print('>', pair[0])
print('=', pair[1])
output_words, attentions = evaluate(encoder, decoder, pair[0])
output_sentence = ' '.join(output_words)
print('<', output_sentence)
print('')
######################################################################
# Training and Evaluating
# =======================
#
# With all these helper functions in place (it looks like extra work, but
# it's easier to run multiple experiments easier) we can actually
# initialize a network and start training.
#
# Remember that the input sentences were heavily filtered. For this small
# dataset we can use relatively small networks of 256 hidden nodes and a
# single GRU layer. After about 40 minutes on a MacBook CPU we'll get some
# reasonable results.
#
# .. Note::
# If you run this notebook you can train, interrupt the kernel,
# evaluate, and continue training later. Comment out the lines where the
# encoder and decoder are initialized and run ``trainIters`` again.
#
hidden_size = 256
encoder1 = EncoderRNN(input_lang.n_words, hidden_size)
attn_decoder1 = AttnDecoderRNN(hidden_size, output_lang.n_words,
1, dropout_p=0.1)
if use_cuda:
encoder1 = encoder1.cuda()
attn_decoder1 = attn_decoder1.cuda()
trainIters(encoder1, attn_decoder1, 75000, print_every=5000)
######################################################################
#
evaluateRandomly(encoder1, attn_decoder1)
######################################################################
# Visualizing Attention
# ---------------------
#
# A useful property of the attention mechanism is its highly interpretable
# outputs. Because it is used to weight specific encoder outputs of the
# input sequence, we can imagine looking where the network is focused most
# at each time step.
#
# You could simply run ``plt.matshow(attentions)`` to see attention output
# displayed as a matrix, with the columns being input steps and rows being
# output steps:
#
output_words, attentions = evaluate(
encoder1, attn_decoder1, "je suis trop froid .")
plt.matshow(attentions.numpy())
######################################################################
# For a better viewing experience we will do the extra work of adding axes
# and labels:
#
def showAttention(input_sentence, output_words, attentions):
# Set up figure with colorbar
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(attentions.numpy(), cmap='bone')
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + input_sentence.split(' ') +
['<EOS>'], rotation=90)
ax.set_yticklabels([''] + output_words)
# Show label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
def evaluateAndShowAttention(input_sentence):
output_words, attentions = evaluate(
encoder1, attn_decoder1, input_sentence)
print('input =', input_sentence)
print('output =', ' '.join(output_words))
showAttention(input_sentence, output_words, attentions)
evaluateAndShowAttention("elle a cinq ans de moins que moi .")
evaluateAndShowAttention("elle est trop petit .")
evaluateAndShowAttention("je ne crains pas de mourir .")
evaluateAndShowAttention("c est un jeune directeur plein de talent .")
######################################################################
# Exercises
# =========
#
# - Try with a different dataset
#
# - Another language pair
# - Human → Machine (e.g. IOT commands)
# - Chat → Response
# - Question → Answer
#
# - Replace the embeddings with pre-trained word embeddings such as word2vec or
# GloVe
# - Try with more layers, more hidden units, and more sentences. Compare
# the training time and results.
# - If you use a translation file where pairs have two of the same phrase
# (``I am test \t I am test``), you can use this as an autoencoder. Try
# this:
#
# - Train as an autoencoder
# - Save only the Encoder network
# - Train a new Decoder for translation from there
#
| 31,375 | 33.939866 | 133 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/Generation/Evaluation.py | # coding: utf-8
######################################################################
# Evaluation
# ==========
#
# Evaluation is mostly the same as training, but there are no targets so
# we simply feed the decoder's predictions back to itself for each step.
# Every time it predicts a word we add it to the output string, and if it
# predicts the EOS token we stop there. We also store the decoder's
# attention outputs for display later.
#
import random
import torch
from torch.autograd import Variable
from source.AuxiliaryTools.nn_tool import variable_from_sentence
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
SOS_token = 0
EOS_token = 1
teacher_forcing_ratio = 0.5
def evaluate(encoder, decoder, sentence, input_word_table, output_word_table, max_length, use_cuda):
input_variable = variable_from_sentence(input_word_table, sentence)
input_length = input_variable.size()[0]
encoder_hidden = encoder.initHidden()
encoder_outputs = Variable(torch.zeros(max_length, encoder.hidden_size))
encoder_outputs = encoder_outputs.cuda() if use_cuda else encoder_outputs
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_variable[ei],
encoder_hidden)
encoder_outputs[ei] = encoder_outputs[ei] + encoder_output[0][0]
decoder_input = Variable(torch.LongTensor([[SOS_token]])) # SOS
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
decoder_hidden = encoder_hidden
decoded_words = []
decoder_attentions = torch.zeros(max_length, max_length)
last_time_best = 0
for di in range(max_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_output, encoder_outputs)
decoder_attentions[di] = decoder_attention.data
topv, topi = decoder_output.data.topk(1)
print('wait to check the shape')
ni = topi[0][0]
if ni == EOS_token:
decoded_words.append('<EOS>')
break
else:
decoded_words.append(output_word_table.index2word[ni])
decoder_input = Variable(torch.LongTensor([[ni]]))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
return decoded_words, decoder_attentions[:di + 1]
######################################################################
# We can evaluate random sentences from the training set and print out the
# input, target, and output to make some subjective quality judgements:
#
def evaluate_randomly(all_pairs, encoder, decoder, input_word_table, output_word_table, max_length, use_cuda, n=10):
for i in range(n):
pair = random.choice(all_pairs)
print('>', pair[0])
print('=', pair[1])
output_words, attentions = evaluate(encoder, decoder, pair[0],
input_word_table, output_word_table, max_length, use_cuda)
output_sentence = ' '.join(output_words)
print('<', output_sentence)
print('')
######################################################################
# For a better viewing experience we will do the extra work of adding axes
# and labels:
#
def show_attention(input_sentence, output_words, attentions):
# Set up figure with colorbar
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(attentions.numpy(), cmap='bone')
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + input_sentence.split(' ') +
['<EOS>'], rotation=90)
ax.set_yticklabels([''] + output_words)
# Show label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
def evaluate_and_show_attention(input_sentence, encoder, attn_decoder, input_word_table, output_word_table):
output_words, attentions = evaluate(encoder, attn_decoder, input_sentence, input_word_table, output_word_table)
print('input =', input_sentence)
print('output =', ' '.join(output_words))
show_attention(input_sentence, output_words, attentions)
| 4,187 | 34.794872 | 116 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/Generation/Seq2SeqModel.py | # coding: utf-8
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size, use_cuda, n_layers=1):
super(EncoderRNN, self).__init__()
self.n_layers = n_layers
self.hidden_size = hidden_size
self.use_cuda = use_cuda
self.embedding = nn.Embedding(input_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size)
def forward(self, input, hidden):
embedded = self.embedding(input).view(1, 1, -1)
output = embedded
for i in range(self.n_layers):
output, hidden = self.gru(output, hidden)
return output, hidden
def initHidden(self):
result = Variable(torch.zeros(1, 1, self.hidden_size))
if self.use_cuda:
return result.cuda()
else:
return result
class DecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, use_cuda, n_layers=1):
super(DecoderRNN, self).__init__()
self.n_layers = n_layers
self.hidden_size = hidden_size
self.use_cuda = use_cuda
self.embedding = nn.Embedding(output_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size)
self.out = nn.Linear(hidden_size, output_size)
self.softmax = nn.LogSoftmax()
def forward(self, input, hidden):
output = self.embedding(input).view(1, 1, -1)
for i in range(self.n_layers):
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = self.softmax(self.out(output[0]))
return output, hidden
def initHidden(self):
result = Variable(torch.zeros(1, 1, self.hidden_size))
if self.use_cuda:
return result.cuda()
else:
return result
class AttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, max_length, use_cuda, n_layers=1, dropout_p=0.1):
super(AttnDecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.dropout_p = dropout_p
self.max_length = max_length
self.use_cuda = use_cuda
self.embedding = nn.Embedding(self.output_size, self.hidden_size)
self.attn = nn.Linear(self.hidden_size * 2, self.max_length)
self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.dropout = nn.Dropout(self.dropout_p)
self.gru = nn.GRU(self.hidden_size, self.hidden_size)
self.out = nn.Linear(self.hidden_size, self.output_size)
def forward(self, input, hidden, encoder_output, encoder_outputs):
embedded = self.embedding(input).view(1, 1, -1)
embedded = self.dropout(embedded)
attn_weights = F.softmax(self.attn(torch.cat((embedded[0], hidden[0]), 1)))
# print('debug', embedded.size(), attn_weights.unsqueeze(0).size(), encoder_outputs.unsqueeze(0).size())
attn_applied = torch.bmm(attn_weights.unsqueeze(0), encoder_outputs.unsqueeze(0))
output = torch.cat((embedded[0], attn_applied[0]), 1)
output = self.attn_combine(output).unsqueeze(0)
for i in range(self.n_layers):
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = F.log_softmax(self.out(output[0]))
return output, hidden, attn_weights
def initHidden(self):
result = Variable(torch.zeros(1, 1, self.hidden_size))
if self.use_cuda:
return result.cuda()
else:
return result
| 3,661 | 34.901961 | 112 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/Generation/Training.py | import time
import math
import random
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
from source.AuxiliaryTools.nn_tool import show_plot, variables_from_pair
SOS_token = 0
EOS_token = 1
teacher_forcing_ratio = 0.5
def train(input_variable, target_variable, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion,
max_length, use_cuda):
encoder_hidden = encoder.initHidden()
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
input_length = input_variable.size()[0]
target_length = target_variable.size()[0]
encoder_outputs = Variable(torch.zeros(max_length, encoder.hidden_size))
encoder_outputs = encoder_outputs.cuda() if use_cuda else encoder_outputs
loss = 0
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_variable[ei], encoder_hidden)
encoder_outputs[ei] = encoder_output[0][0]
decoder_input = Variable(torch.LongTensor([[SOS_token]]))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
decoder_hidden = encoder_hidden
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
if use_teacher_forcing:
# Teacher forcing: Feed the target as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_output, encoder_outputs)
loss += criterion(decoder_output, target_variable[di])
decoder_input = target_variable[di] # Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_output, encoder_outputs)
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
decoder_input = Variable(torch.LongTensor([[ni]]))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
loss += criterion(decoder_output, target_variable[di])
if ni == EOS_token:
break
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
return loss.data[0] / target_length
def as_minutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def time_since(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (as_minutes(s), as_minutes(rs))
def train_iters(encoder, decoder, n_iters, pairs, input_word_table, output_word_table, max_length, use_cuda,
print_every=1000, plot_every=100, learning_rate=0.01):
start = time.time()
plot_losses = []
print_loss_total = 0 # Reset every print_every
plot_loss_total = 0 # Reset every plot_every
encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
training_pairs = [variables_from_pair(random.choice(pairs), input_word_table, output_word_table)
for i in range(n_iters)]
criterion = nn.NLLLoss()
for iter in range(1, n_iters + 1):
training_pair = training_pairs[iter - 1]
input_variable = training_pair[0].cuda() if use_cuda else training_pair[0]
target_variable = training_pair[1].cuda() if use_cuda else training_pair[1]
loss = train(input_variable, target_variable, encoder,
decoder, encoder_optimizer, decoder_optimizer, criterion, max_length, use_cuda)
print_loss_total += loss
plot_loss_total += loss
if iter % print_every == 0:
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
print('%s (%d %d%%) %.4f' % (time_since(start, iter / n_iters),
iter, iter / n_iters * 100, print_loss_avg))
if iter % plot_every == 0:
plot_loss_avg = plot_loss_total / plot_every
plot_losses.append(plot_loss_avg)
plot_loss_total = 0
# show_plot(plot_losses)
| 4,278 | 33.788618 | 109 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/AuxiliaryTools/nn_tool.py | # coding: utf-8
from __future__ import unicode_literals, print_function, division
import torch
from torch.autograd import Variable
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
SOS_token = 0
EOS_token = 1
teacher_forcing_ratio = 0.5
MAX_LENGTH = 10
def show_plot(points):
plt.figure()
fig, ax = plt.subplots()
# this locator puts ticks at regular intervals
loc = ticker.MultipleLocator(base=0.2)
ax.yaxis.set_major_locator(loc)
plt.plot(points)
def indexes_from_sentence(word_table, sentence):
return [word_table.word2index[word] for word in sentence]
def variable_from_sentence(word_table, sentence):
indexes = indexes_from_sentence(word_table, sentence)
indexes.append(EOS_token)
result = Variable(torch.LongTensor(indexes).view(-1, 1))
return result
def variables_from_pair(pair, input_word_table, output_word_table):
input_variable = variable_from_sentence(input_word_table, pair[0])
target_variable = variable_from_sentence(output_word_table, pair[1])
return input_variable, target_variable
| 1,083 | 26.794872 | 72 | py |
EmpTransfo | EmpTransfo-master/train_full.py | # Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree.
import os
import math
import logging
from pprint import pformat
from argparse import ArgumentParser
from collections import defaultdict
from itertools import chain
import torch
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader, TensorDataset
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint
from ignite.metrics import Accuracy, Loss, MetricsLambda, RunningAverage
from ignite.contrib.handlers import ProgressBar, PiecewiseLinear
from config import Config
from ignite.contrib.handlers.tensorboard_logger import TensorboardLogger, OutputHandler, OptimizerParamsHandler
from pytorch_pretrained_bert import (OpenAIAdam, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer,
GPT2DoubleHeadsModel, GPT2Tokenizer, WEIGHTS_NAME, CONFIG_NAME,
BertModel, BertTokenizer)
from utils import get_dataset, get_dataset_for_daily_dialog
SPECIAL_TOKENS = ["<bos>", "<eos>", "<speaker1>", "<speaker2>",
"<no_emotion>", "<happiness>", "<surprise>", "<sadness>", "<disgust>", "<anger>", "<fear>",
"<directive>", "<inform>", "<commissive>", "<question>",
"<pad>"]
MODEL_INPUTS = ["input_ids", "mc_token_ids", "lm_labels", "mc_labels", "token_type_ids", "token_emotion_ids"]
PADDED_INPUTS = ["input_ids", "lm_labels", "token_type_ids", "token_emotion_ids"]
logger = logging.getLogger(__file__)
def average_distributed_scalar(scalar, config):
""" Average a scalar over the nodes if we are in distributed training. We use this for distributed evaluation. """
if config.local_rank == -1:
return scalar
scalar_t = torch.tensor(scalar, dtype=torch.float, device=config.device) / torch.distributed.get_world_size()
torch.distributed.all_reduce(scalar_t, op=torch.distributed.ReduceOp.SUM)
return scalar_t.item()
def pad_dataset(dataset, padding=0):
""" Pad the dataset. This could be optimized by defining a Dataset class and padd only batches but this is simpler. """
max_l = max(len(x) for x in dataset["input_ids"])
for name in PADDED_INPUTS:
dataset[name] = [x + [padding if name != "lm_labels" else -1] * (max_l - len(x)) for x in dataset[name]]
return dataset
def build_input_from_segments(history, emotions, reply, candidate_emotion, tokenizer, lm_labels=False, with_eos=True):
""" Build a sequence of input from 3 segments: persona, history and last reply """
bos, eos, speaker1, speaker2 = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[:4])
instance = {}
#sequence = [[bos] + history[0] + list(chain(*history[1:]))] + [reply + ([eos] if with_eos else [])] #seq = [personas, history, reply] concatenate all persona sentences
sequence = [[bos] + history[0]] + history[1:] +[reply +([eos] if with_eos else [])]
sequence = [[speaker2 if (len(sequence)-i) % 2 else speaker1] + s for i, s in enumerate(sequence)]
all_emotions = emotions + [candidate_emotion]
sequence = [[all_emotions[i]] + s for i, s in enumerate(sequence)]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [speaker2 if i % 2 else speaker1 for i, s in enumerate(sequence) for _ in s] # the last for is for repeating the speaker1 and speaker2 for all tokens
instance["token_emotion_ids"] = [emotions[i] for i, s in enumerate(sequence[:-1]) for _ in s]+[candidate_emotion]*len(sequence[-1])
instance["mc_token_ids"] = len(instance["input_ids"]) - 1
instance["lm_labels"] = [-1] * len(instance["input_ids"])
if lm_labels:
instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:] #all -1 except for reply, reply is just the ids
return instance, sequence
def get_data_loaders(config, tokenizer):
""" Prepare the dataset for training and evaluation """
personachat = get_dataset_for_daily_dialog(tokenizer, config.dataset_path, config.dataset_cache, SPECIAL_TOKENS)
# personachat["train"] = personachat["train"][:100]
# personachat["valid"] = personachat["valid"][:10]
logger.info("Build inputs and labels")
datasets = {"train": defaultdict(list), "valid": defaultdict(list)}
gpu_max_length = 310
for dataset_name, dataset in personachat.items():
num_candidates = len(dataset[0]["utterances"][0]["candidates"])
if config.num_candidates > 0 and dataset_name == 'train':
num_candidates = min(config.num_candidates, num_candidates)
for dialog in dataset:
for utterance in dialog["utterances"]:
history = utterance["history"][-(2*config.max_history+1):]
emotions = utterance["emotion"][-(2 * config.max_history + 1):]
for j, candidate in enumerate(utterance["candidates"][-num_candidates:]):
lm_labels = bool(j == num_candidates-1) #the true label is always the last one in list of candidates
candidate_emotion = utterance['candidates_emotions'][j]
instance, _ = build_input_from_segments(history, emotions, candidate, candidate_emotion, tokenizer, lm_labels)
#print(len(instance["input_ids"]))
if len(instance["input_ids"]) > gpu_max_length:
truncated_history = [hist[:10] for hist in history]
truncated_candidate = candidate[:10]
instance, _ = build_input_from_segments(truncated_history, emotions, truncated_candidate, candidate_emotion, tokenizer, lm_labels)
for input_name, input_array in instance.items():
datasets[dataset_name][input_name].append(input_array)
datasets[dataset_name]["mc_labels"].append(num_candidates - 1)
datasets[dataset_name]["n_candidates"] = num_candidates
logger.info("Pad inputs and convert to Tensor")
tensor_datasets = {"train": [], "valid": []}
for dataset_name, dataset in datasets.items():
dataset = pad_dataset(dataset, padding=tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[-1]))
for input_name in MODEL_INPUTS:
tensor = torch.tensor(dataset[input_name])
if input_name != "mc_labels":
tensor = tensor.view((-1, datasets[dataset_name]["n_candidates"]) + tensor.shape[1:])
tensor_datasets[dataset_name].append(tensor)
logger.info("Build train and validation dataloaders")
train_dataset, valid_dataset = TensorDataset(*tensor_datasets["train"]), TensorDataset(*tensor_datasets["valid"])
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if config.distributed else None
valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if config.distributed else None
train_loader = DataLoader(train_dataset, sampler=train_sampler, batch_size=config.train_batch_size, shuffle=False)
valid_loader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=config.valid_batch_size, shuffle=False)
logger.info("Train dataset (Batch, Candidates, Seq length): {}".format(train_dataset.tensors[0].shape))
logger.info("Valid dataset (Batch, Candidates, Seq length): {}".format(valid_dataset.tensors[0].shape))
return train_loader, valid_loader, train_sampler, valid_sampler
def train():
config_file = "configs/train_full_config.json"
config = Config.from_json_file(config_file)
# logging is set to INFO (resp. WARN) for main (resp. auxiliary) process. logger.info => log main process only, logger.warning => log all processes
logging.basicConfig(level=logging.INFO if config.local_rank in [-1, 0] else logging.WARN)
logger.warning("Running process %d", config.local_rank) # This is a logger.warning: it will be printed by all distributed processes
logger.info("Arguments: %s", pformat(config))
# Initialize distributed training if needed
config.distributed = (config.local_rank != -1)
if config.distributed:
torch.cuda.set_device(config.local_rank)
config.device = torch.device("cuda", config.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
logger.info("Prepare tokenizer, pretrained model and optimizer - add special tokens for fine-tuning")
tokenizer_class = GPT2Tokenizer if "gpt2" in config.model_checkpoint else OpenAIGPTTokenizer
tokenizer = tokenizer_class.from_pretrained(config.model_checkpoint)
model_class = GPT2DoubleHeadsModel if "gpt2" in config.model_checkpoint else OpenAIGPTDoubleHeadsModel
model = model_class.from_pretrained(config.model_checkpoint)
tokenizer.set_special_tokens(SPECIAL_TOKENS)
model.set_num_special_tokens(len(SPECIAL_TOKENS))
model.to(config.device)
optimizer = OpenAIAdam(model.parameters(), lr=config.lr)
# Prepare model for FP16 and distributed training if needed (order is important, distributed should be the last)
if config.fp16:
from apex import amp # Apex is only required if we use fp16 training
model, optimizer = amp.initialize(model, optimizer, opt_level=config.fp16)
if config.distributed:
model = DistributedDataParallel(model, device_ids=[config.local_rank], output_device=config.local_rank)
logger.info("Prepare datasets")
train_loader, val_loader, train_sampler, valid_sampler = get_data_loaders(config, tokenizer)
# Training function and trainer
def update(engine, batch):
model.train()
input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids, token_emotion_ids = tuple(input_tensor.to(config.device) for input_tensor in batch)
lm_loss, mc_loss = model(input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids, token_emotion_ids)
loss = (lm_loss * config.lm_coef + mc_loss * config.mc_coef) / config.gradient_accumulation_steps
if config.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), config.max_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), config.max_norm)
if engine.state.iteration % config.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
return loss.item()
trainer = Engine(update)
# Evaluation function and evaluator (evaluator output is the input of the metrics)
def inference(engine, batch):
model.eval()
with torch.no_grad():
batch = tuple(input_tensor.to(config.device) for input_tensor in batch)
input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids, token_emotion_ids = batch
#logger.info(tokenizer.decode(input_ids[0, -1, :].tolist()))
model_outputs = model(input_ids, mc_token_ids, token_type_ids=token_type_ids, token_emotion_ids=token_emotion_ids)
lm_logits, mc_logits = model_outputs[0], model_outputs[1] # So we can also use GPT2 outputs
lm_logits_flat_shifted = lm_logits[..., :-1, :].contiguous().view(-1, lm_logits.size(-1))
lm_labels_flat_shifted = lm_labels[..., 1:].contiguous().view(-1)
return (lm_logits_flat_shifted, mc_logits), (lm_labels_flat_shifted, mc_labels)
evaluator = Engine(inference)
# Attach evaluation to trainer: we evaluate when we start the training and at the end of each epoch
trainer.add_event_handler(Events.EPOCH_COMPLETED, lambda _: evaluator.run(val_loader))
if config.n_epochs < 1:
trainer.add_event_handler(Events.COMPLETED, lambda _: evaluator.run(val_loader))
if config.eval_before_start:
trainer.add_event_handler(Events.STARTED, lambda _: evaluator.run(val_loader))
# Make sure distributed data samplers split the dataset nicely between the distributed processes
if config.distributed:
trainer.add_event_handler(Events.EPOCH_STARTED, lambda engine: train_sampler.set_epoch(engine.state.epoch))
evaluator.add_event_handler(Events.EPOCH_STARTED, lambda engine: valid_sampler.set_epoch(engine.state.epoch))
# Linearly decrease the learning rate from lr to zero
scheduler = PiecewiseLinear(optimizer, "lr", [(0, config.lr), (config.n_epochs * len(train_loader), 0.0)])
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
# Prepare metrics - note how we compute distributed metrics
RunningAverage(output_transform=lambda x: x).attach(trainer, "loss")
metrics = {"nll": Loss(torch.nn.CrossEntropyLoss(ignore_index=-1), output_transform=lambda x: (x[0][0], x[1][0])),
"accuracy": Accuracy(output_transform=lambda x: (x[0][1], x[1][1]))}
metrics.update({"average_nll": MetricsLambda(average_distributed_scalar, metrics["nll"], config),
"average_accuracy": MetricsLambda(average_distributed_scalar, metrics["accuracy"], config)})
metrics["average_ppl"] = MetricsLambda(math.exp, metrics["average_nll"])
for name, metric in metrics.items():
metric.attach(evaluator, name)
# On the main process: add progress bar, tensorboard, checkpoints and save model, configuration and tokenizer before we start to train
if config.local_rank in [-1, 0]:
pbar = ProgressBar(persist=True)
pbar.attach(trainer, metric_names=["loss"])
evaluator.add_event_handler(Events.COMPLETED, lambda _: pbar.log_message("Validation: %s" % pformat(evaluator.state.metrics)))
tb_logger = TensorboardLogger(log_dir=config.log_dir)
tb_logger.attach(trainer, log_handler=OutputHandler(tag="training", metric_names=["loss"]), event_name=Events.ITERATION_COMPLETED)
tb_logger.attach(trainer, log_handler=OptimizerParamsHandler(optimizer), event_name=Events.ITERATION_STARTED)
tb_logger.attach(evaluator, log_handler=OutputHandler(tag="validation", metric_names=list(metrics.keys()), another_engine=trainer), event_name=Events.EPOCH_COMPLETED)
checkpoint_handler = ModelCheckpoint(tb_logger.writer.log_dir, 'checkpoint', save_interval=1, n_saved=3)
trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler, {'mymodel': getattr(model, 'module', model)}) # "getattr" take care of distributed encapsulation
torch.save(config, tb_logger.writer.log_dir + '/model_training_args.bin')
getattr(model, 'module', model).config.to_json_file(os.path.join(tb_logger.writer.log_dir, CONFIG_NAME))
tokenizer.save_vocabulary(tb_logger.writer.log_dir)
# Run the training
trainer.run(train_loader, max_epochs=config.n_epochs)
# On the main process: close tensorboard logger and rename the last checkpoint (for easy re-loading with OpenAIGPTModel.from_pretrained method)
if config.local_rank in [-1, 0] and config.n_epochs > 0:
os.rename(checkpoint_handler._saved[-1][1][-1], os.path.join(tb_logger.writer.log_dir, WEIGHTS_NAME)) # TODO: PR in ignite to have better access to saved file paths (cleaner)
tb_logger.close()
if __name__ == "__main__":
train()
| 15,365 | 59.496063 | 183 | py |
EmpTransfo | EmpTransfo-master/evaluate.py | # # Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import random
from argparse import ArgumentParser
from itertools import chain
from pprint import pformat
import numpy as np
import torch
import torch.nn.functional as F
from tqdm import tqdm
from config import InteractConfig
from pytorch_pretrained_bert import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, GPT2LMHeadModel, GPT2Tokenizer
from utils import download_pretrained_model, get_dataset, _bleu, _f1_score
def build_input_from_segments(persona, history, reply, tokenizer, SPECIAL_TOKENS, lm_labels=False, with_eos=True):
""" Build a sequence of input from 3 segments: persona, history and last reply """
bos, eos, speaker1, speaker2 = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[:-1])
instance = {}
sequence = [[bos] + list(chain(*persona))] + history + [
reply + ([eos] if with_eos else [])] # seq = [personas, history, reply] concatenate all persona sentences
sequence = [sequence[0]] + [[speaker2 if (len(sequence) - i) % 2 else speaker1] + s for i, s in
enumerate(sequence[1:])]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [speaker2 if i % 2 else speaker1 for i, s in enumerate(sequence) for _ in
s] # the last for is for repeating the speaker1 and speaker2 for all tokens
instance["mc_token_ids"] = len(instance["input_ids"]) - 1
instance["lm_labels"] = [-1] * len(instance["input_ids"])
if lm_labels:
instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:] # all -1 except for reply, reply is just the ids
return instance, sequence
def top_filtering(logits, top_k=0, top_p=0.0, threshold=-float('Inf'), filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering
Args:
logits: logits distribution shape (..., vocabulary size)
top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.
top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset
whose total probability mass is greater than or equal to the threshold top_p.
In practice, we select the highest probability tokens whose cumulative probability mass exceeds
the threshold top_p.
threshold: a minimal threshold to keep logits
"""
top_k = min(top_k, logits.size(-1))
if top_k > 0:
# Remove all tokens with a probability less than the last token in the top-k tokens
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# Compute cumulative probabilities of sorted tokens
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probabilities > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# Back to unsorted indices and set them to -infinity
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
indices_to_remove = logits < threshold
logits[indices_to_remove] = filter_value
return logits
def get_emotions(dataset):
for data in tqdm(dataset['valid']):
utterances = data['utterances']
for utterance in utterances:
true_emotion = utterance["emotion"]
def calculate_metrics(args, model, tokenizer, dataset, special_tokens):
special_tokens_ids = tokenizer.convert_tokens_to_ids(special_tokens)
all_blues = []
all_f1_scores = []
all_true_sentences = []
all_predicted_sentences = []
for data in tqdm(dataset['valid']):
personality = data['personality']
utterances = data['utterances']
#utterance = utterances[-1] #only the longest conversaion
for utterance in utterances:
true_label = utterance['candidates'][-1]
history = utterance['history']
predicted_output = []
for i in range(args.max_length):
instance, _ = build_input_from_segments(personality, history, predicted_output, tokenizer, special_tokens, with_eos=False)
try:
if len(instance["input_ids"]) > 310:
truncated_history = [hist[:5] for hist in history]
instance, _ = build_input_from_segments(personality, truncated_history, predicted_output, tokenizer, special_tokens, with_eos=False)
input_ids = torch.tensor(instance["input_ids"], device=args.device).unsqueeze(0)
token_type_ids = torch.tensor(instance["token_type_ids"], device=args.device).unsqueeze(0)
logits = model(input_ids, token_type_ids=token_type_ids)
except:
print("exception")
continue
if "gpt2" == args.model:
logits = logits[0]
logits = logits[0, -1, :] / args.temperature
logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
probs = F.softmax(logits, dim=-1)
prev = torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
# if i < args.min_length and prev.item() in special_tokens_ids:
# k=0
# while prev.item() in special_tokens_ids and k < 100:
# prev = torch.multinomial(probs, num_samples=1)
# k+=1
if i < args.min_length:
prev = torch.multinomial(probs, num_samples=1)
# if prev.item() in special_tokens_ids:
# break
predicted_output.append(prev.item())
predicted_sentence = tokenizer.decode(predicted_output, skip_special_tokens=True)
true_sentence = tokenizer.decode(true_label, skip_special_tokens=True)
#looks like zero gives the best results
all_predicted_sentences.append(predicted_sentence)
all_true_sentences.append(true_sentence)
bleus = [_bleu(predicted_sentence, [true_sentence], method="method"+str(i)) for i in [0,1,2,3,5]]
#bleu = _bleu(predicted_sentence, [true_sentence])
f1_score = _f1_score(predicted_sentence, [true_sentence])
#print(f1_score)
all_blues.append(bleus)
all_f1_scores.append(f1_score)
#compare predicted and label with bleu
print("avg bleu", np.array(all_blues).mean(axis=0))
print("avg f1 score", np.mean(all_f1_scores))
print("max bleu", np.array(all_blues).max(axis=0))
def run():
config_file = "configs/interact_config.json"
config = InteractConfig.from_json_file(config_file)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__file__)
logger.info(pformat(config))
if config.model_checkpoint == "":
config.model_checkpoint = download_pretrained_model()
random.seed(config.seed)
torch.random.manual_seed(config.seed)
torch.cuda.manual_seed(config.seed)
logger.info("Get pretrained model and tokenizer")
tokenizer_class = GPT2Tokenizer if "gpt2" == config.model else OpenAIGPTTokenizer
tokenizer = tokenizer_class.from_pretrained(config.model_checkpoint)
model_class = GPT2LMHeadModel if "gpt2" == config.model else OpenAIGPTLMHeadModel
model = model_class.from_pretrained(config.model_checkpoint)
model.to(config.device)
model.eval()
dataset = get_dataset(tokenizer, config.dataset_path, config.dataset_cache)
special_tokens = ["<bos>", "<eos>", "<speaker1>", "<speaker2>", "<pad>"]
calculate_metrics(config, model, tokenizer, dataset, special_tokens)
if __name__ == "__main__":
run()
| 8,472 | 42.229592 | 156 | py |
EmpTransfo | EmpTransfo-master/utils.py | # Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved. This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import os
import tarfile
import tempfile
import re
import torch
from pytorch_pretrained_bert import cached_path
from collections import Counter
try:
from nltk.translate import bleu_score as nltkbleu
except ImportError:
# User doesn't have nltk installed, so we can't use it for bleu
# We'll just turn off things, but we might want to warn the user
nltkbleu = None
PERSONACHAT_URL = "https://s3.amazonaws.com/datasets.huggingface.co/personachat/personachat_self_original.json"
HF_FINETUNED_MODEL = "https://s3.amazonaws.com/models.huggingface.co/transfer-learning-chatbot/finetuned_chatbot_gpt.tar.gz"
logger = logging.getLogger(__file__)
re_art = re.compile(r'\b(a|an|the)\b')
re_punc = re.compile(r'[!"#$%&()*+,-./:;<=>?@\[\]\\^`{|}~_\']')
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re_art.sub(' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
return re_punc.sub(' ', text) # convert punctuation to spaces
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def download_pretrained_model():
""" Download and extract finetuned model from S3 """
resolved_archive_file = cached_path(HF_FINETUNED_MODEL)
tempdir = tempfile.mkdtemp()
logger.info("extracting archive file {} to temp dir {}".format(resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
return tempdir
def get_dataset(tokenizer, dataset_path, dataset_cache=None):
""" Get PERSONACHAT from S3 """
dataset_path = dataset_path or PERSONACHAT_URL
dataset_cache = dataset_cache + '_' + type(tokenizer).__name__ # Do avoid using GPT cache for GPT-2 and vice-versa
if dataset_cache and os.path.isfile(dataset_cache):
logger.info("Load tokenized dataset from cache at %s", dataset_cache)
dataset = torch.load(dataset_cache)
else:
logger.info("Download dataset from %s", dataset_path)
personachat_file = cached_path(dataset_path)
with open(personachat_file, "r", encoding="utf-8") as f:
dataset = json.loads(f.read())
logger.info("Tokenize and encode the dataset")
def tokenize(obj):
if isinstance(obj, str):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
if isinstance(obj, dict):
return dict((n, tokenize(o)) for n, o in obj.items())
return list(tokenize(o) for o in obj)
dataset = tokenize(dataset)
if dataset_cache:
torch.save(dataset, dataset_cache)
return dataset
def get_dataset_for_daily_dialog(tokenizer, dataset_path, dataset_cache=None, special_tokens=None):
""" Get PERSONACHAT from S3 """
dataset_path = dataset_path or PERSONACHAT_URL
dataset_cache = dataset_cache + '_' + type(tokenizer).__name__ # Do avoid using GPT cache for GPT-2 and vice-versa
if dataset_cache and os.path.isfile(dataset_cache):
logger.info("Load tokenized dataset from cache at %s", dataset_cache)
dataset = torch.load(dataset_cache)
else:
logger.info("Download dataset from %s", dataset_path)
personachat_file = cached_path(dataset_path)
with open(personachat_file, "r", encoding="utf-8") as f:
dataset = json.loads(f.read())
logger.info("Tokenize and encode the dataset")
def tokenize(obj):
if isinstance(obj, str):
if obj in special_tokens:
return tokenizer.convert_tokens_to_ids(obj)
else:
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
if isinstance(obj, dict):
return dict((n, tokenize(o)) for n, o in obj.items())
return list(tokenize(o) for o in obj)
dataset = tokenize(dataset)
if dataset_cache:
torch.save(dataset, dataset_cache)
return dataset
def get_dataset_personalities(tokenizer, dataset_path, dataset_cache=None):
""" Get personalities from PERSONACHAT """
dataset_path = dataset_path or PERSONACHAT_URL
dataset_cache = dataset_cache + '_' + type(tokenizer).__name__ # Do avoid using GPT cache for GPT-2 and vice-versa
if os.path.isfile(dataset_cache):
logger.info("Load tokenized dataset from cache at %s", dataset_cache)
personachat = torch.load(dataset_cache)
else:
logger.info("Download PERSONACHAT dataset from %s", dataset_path)
personachat_file = cached_path(dataset_path)
with open(personachat_file, "r", encoding="utf-8") as f:
personachat = json.loads(f.read())
logger.info("Tokenize and encode the dataset")
def tokenize(obj):
if isinstance(obj, str):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
if isinstance(obj, dict):
return dict((n, tokenize(o)) for n, o in obj.items())
return list(tokenize(o) for o in obj)
personachat = tokenize(personachat)
torch.save(personachat, dataset_cache)
logger.info("Filter personalities")
personalities = []
for dataset in personachat.values():
for dialog in dataset:
personalities.append(dialog["personality"])
logger.info("Gathered {} personalities".format(len(personalities)))
return personalities
def _prec_recall_f1_score(pred_items, gold_items):
"""
Compute precision, recall and f1 given a set of gold and prediction items.
:param pred_items: iterable of predicted values
:param gold_items: iterable of gold values
:return: tuple (p, r, f1) for precision, recall, f1
"""
common = Counter(gold_items) & Counter(pred_items)
num_same = sum(common.values())
if num_same == 0:
return 0, 0, 0
precision = 1.0 * num_same / len(pred_items)
recall = 1.0 * num_same / len(gold_items)
f1 = (2 * precision * recall) / (precision + recall)
return precision, recall, f1
def _f1_score(guess, answers):
"""Return the max F1 score between the guess and *any* answer."""
if guess is None or answers is None:
return 0
g_tokens = normalize_answer(guess).split()
scores = [
_prec_recall_f1_score(g_tokens, normalize_answer(a).split())for a in answers
]
return max(f1 for p, r, f1 in scores)
def _bleu(guess, answers, method=None):
"""Compute approximate BLEU score between guess and a set of answers."""
if nltkbleu is None:
# bleu library not installed, just return a default value
return None
# Warning: BLEU calculation *should* include proper tokenization and
# punctuation etc. We're using the normalize_answer for everything though,
# so we're over-estimating our BLEU scores. Also note that NLTK's bleu is
# going to be slower than fairseq's (which is written in C), but fairseq's
# requires that everything be in arrays of ints (i.e. as tensors). NLTK's
# works with strings, which is better suited for this module.
if method == "method0":
smoothing_func = nltkbleu.SmoothingFunction(epsilon=1e-12).method0
elif method == "method1":
smoothing_func = nltkbleu.SmoothingFunction(epsilon=1e-12).method1
elif method == "method2":
smoothing_func = nltkbleu.SmoothingFunction(epsilon=1e-12).method2
elif method == "method3":
smoothing_func = nltkbleu.SmoothingFunction(epsilon=1e-12).method3
elif method == "method4":
smoothing_func = nltkbleu.SmoothingFunction(epsilon=1e-12).method4
elif method == "method5":
smoothing_func = nltkbleu.SmoothingFunction(epsilon=1e-12).method5
elif method == "method6":
smoothing_func = nltkbleu.SmoothingFunction(epsilon=1e-12).method6
elif method == "method7":
smoothing_func = nltkbleu.SmoothingFunction(epsilon=1e-12).method7
else:
smoothing_func = nltkbleu.SmoothingFunction(epsilon=1e-12).method3
return nltkbleu.sentence_bleu(
[normalize_answer(a).split(" ") for a in answers],
normalize_answer(guess).split(" "),
smoothing_function=smoothing_func,
)
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
| 8,740 | 37.676991 | 124 | py |
EmpTransfo | EmpTransfo-master/train_emotion_recognition.py | # Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree.
import os
import math
import logging
from pprint import pformat
from argparse import ArgumentParser
from collections import defaultdict
from itertools import chain
import torch
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader, TensorDataset
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint
from ignite.metrics import Accuracy, Recall, Loss, MetricsLambda, RunningAverage, Precision, ConfusionMatrix
from ignite.contrib.handlers import ProgressBar, PiecewiseLinear
from ignite.contrib.handlers.tensorboard_logger import TensorboardLogger, OutputHandler, OptimizerParamsHandler
from config import Config
from pytorch_pretrained_bert import (OpenAIAdam, OpenAIGPTDoubleHeadLMEmotionRecognitionModel, OpenAIGPTTokenizer,
GPT2DoubleHeadsModel, GPT2Tokenizer, WEIGHTS_NAME, CONFIG_NAME)
from utils import get_dataset, get_dataset_for_daily_dialog
SPECIAL_TOKENS = ["<bos>", "<eos>", "<speaker1>", "<speaker2>",
"<no_emotion>", "<happiness>", "<surprise>", "<sadness>", "<disgust>", "<anger>", "<fear>",
"<directive>", "<inform>", "<commissive>", "<question>",
"<pad>"]
MODEL_INPUTS = ["input_ids", "mc_token_ids", "lm_labels", "mc_labels", "token_type_ids", "token_emotion_ids"]
PADDED_INPUTS = ["input_ids", "lm_labels", "token_type_ids", "token_emotion_ids"]
logger = logging.getLogger(__file__)
def average_distributed_scalar(scalar, config):
""" Average a scalar over the nodes if we are in distributed training. We use this for distributed evaluation. """
if config.local_rank == -1:
return scalar
scalar_t = torch.tensor(scalar, dtype=torch.float, device=config.device) / torch.distributed.get_world_size()
torch.distributed.all_reduce(scalar_t, op=torch.distributed.ReduceOp.SUM)
return scalar_t.item()
def pad_dataset(dataset, padding=0):
""" Pad the dataset. This could be optimized by defining a Dataset class and padd only batches but this is simpler. """
max_l = max(len(x) for x in dataset["input_ids"])
for name in PADDED_INPUTS:
dataset[name] = [x + [padding if name != "lm_labels" else -1] * (max_l - len(x)) for x in dataset[name]]
return dataset
def get_emotion_label(tokenizer, candidate_emotion):
_, _, _, _, no_emotion_id, happiness_id, surprise_id, sadness_id, disgust_id, anger_id, fear_id, _, _, _, _, _ = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
if candidate_emotion == happiness_id:
return 0
elif candidate_emotion == surprise_id:
return 1
elif candidate_emotion == sadness_id:
return 2
elif candidate_emotion == disgust_id:
return 3
elif candidate_emotion == anger_id:
return 4
elif candidate_emotion == fear_id:
return 5
elif candidate_emotion == no_emotion_id:
return 6
def build_input_from_segments(history, emotions, reply, true_emotion, tokenizer, with_eos=True):
""" Build a sequence of input from 3 segments: persona, history and last reply """
bos, eos, speaker1, speaker2 = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[:4])
#tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[-1])
instance = {}
# sequence = [[bos] + history[0] + list(chain(*history[1:]))] + [reply + ([eos] if with_eos else [])] #seq = [personas, history, reply] concatenate all persona sentences
sequence = [[bos] + history[0]] + history[1:] + [reply + ([eos] if with_eos else [])]
sequence = [[speaker2 if (len(sequence)-i) % 2 else speaker1] + s for i, s in enumerate(sequence)]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [speaker2 if i % 2 else speaker1 for i, s in enumerate(sequence) for _ in s] # the last for is for repeating the speaker1 and speaker2 for all tokens
#instance["token_emotion_ids"] = [emotions[i] for i, s in enumerate(sequence[:-1]) for _ in s] + [true_emotion] * len(sequence[-1])
instance["token_emotion_ids"] = [emotions[i] for i, s in enumerate(sequence[:-1]) for _ in s]
instance["mc_token_ids"] = len(instance["input_ids"]) - 1
instance["mc_labels"] = get_emotion_label(tokenizer, true_emotion)
instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:] #all -1 except for reply, reply is just the ids
return instance, sequence
def get_data_loaders(config, tokenizer):
""" Prepare the dataset for training and evaluation """
personachat = get_dataset_for_daily_dialog(tokenizer, config.dataset_path, config.dataset_cache, SPECIAL_TOKENS)
# personachat["train"] = personachat["train"][:100]
# personachat["valid"] = personachat["valid"][:10]
logger.info("Build inputs and labels")
datasets = {"train": defaultdict(list), "valid": defaultdict(list)}
gpu_max_length = 310
for dataset_name, dataset in personachat.items():
num_candidates = 2#len(dataset[0]["utterances"][0]["candidates"])
if config.num_candidates > 0 and dataset_name == 'train':
num_candidates = min(config.num_candidates, num_candidates)
for dialog in dataset:
for utterance in dialog["utterances"]:
history = utterance["history"][-(2 * config.max_history + 1):]
emotions = utterance["emotion"][-(2 * config.max_history + 1):]
reply = utterance["candidates"][-1]
true_emotion = utterance['candidates_emotions'][-1]
if true_emotion == tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)[4]:
continue
instance, _ = build_input_from_segments(history,
emotions,
reply,
true_emotion,
tokenizer)
if len(instance["input_ids"]) > gpu_max_length:
truncated_history = [hist[:10] for hist in history]
truncated_candidate = reply[:10]
true_emotion = utterance['candidates_emotions'][-1]
instance, _ = build_input_from_segments(truncated_history,
emotions,
truncated_candidate,
true_emotion,
tokenizer)
for input_name, input_array in instance.items():
datasets[dataset_name][input_name].append(input_array)
datasets[dataset_name]["n_candidates"] = num_candidates
logger.info("Pad inputs and convert to Tensor")
tensor_datasets = {"train": [], "valid": []}
for dataset_name, dataset in datasets.items():
dataset = pad_dataset(dataset, padding=tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[-1]))
for input_name in MODEL_INPUTS:
tensor = torch.tensor(dataset[input_name])
#if input_name != "mc_labels":
# tensor = tensor.view((-1, datasets[dataset_name]["n_candidates"]) + tensor.shape[1:])
tensor_datasets[dataset_name].append(tensor)
logger.info("Build train and validation dataloaders")
train_dataset, valid_dataset = TensorDataset(*tensor_datasets["train"]), TensorDataset(*tensor_datasets["valid"])
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if config.distributed else None
valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if config.distributed else None
train_loader = DataLoader(train_dataset, sampler=train_sampler, batch_size=config.train_batch_size, shuffle=False)
valid_loader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=config.valid_batch_size, shuffle=False)
logger.info("Train dataset (Batch, Candidates, Seq length): {}".format(train_dataset.tensors[0].shape))
logger.info("Valid dataset (Batch, Candidates, Seq length): {}".format(valid_dataset.tensors[0].shape))
return train_loader, valid_loader, train_sampler, valid_sampler
def train():
config_file = "configs/train_emotion_recognition_config.json"
config = Config.from_json_file(config_file)
# logging is set to INFO (resp. WARN) for main (resp. auxiliary) process. logger.info => log main process only, logger.warning => log all processes
logging.basicConfig(level=logging.INFO if config.local_rank in [-1, 0] else logging.WARN)
logger.warning("Running process %d", config.local_rank) # This is a logger.warning: it will be printed by all distributed processes
logger.info("Arguments: %s", pformat(config))
# Initialize distributed training if needed
config.distributed = (config.local_rank != -1)
if config.distributed:
torch.cuda.set_device(config.local_rank)
config.device = torch.device("cuda", config.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
logger.info("Prepare tokenizer, pretrained model and optimizer - add special tokens for fine-tuning")
tokenizer_class = GPT2Tokenizer if "gpt2" in config.model_checkpoint else OpenAIGPTTokenizer
tokenizer = tokenizer_class.from_pretrained(config.model_checkpoint)
model_class = OpenAIGPTDoubleHeadLMEmotionRecognitionModel
model = model_class.from_pretrained(config.model_checkpoint)
tokenizer.set_special_tokens(SPECIAL_TOKENS)
model.set_num_special_tokens(len(SPECIAL_TOKENS))
model.to(config.device)
optimizer = OpenAIAdam(model.parameters(), lr=config.lr)
# Prepare model for FP16 and distributed training if needed (order is important, distributed should be the last)
if config.fp16:
from apex import amp # Apex is only required if we use fp16 training
model, optimizer = amp.initialize(model, optimizer, opt_level=config.fp16)
if config.distributed:
model = DistributedDataParallel(model, device_ids=[config.local_rank], output_device=config.local_rank)
logger.info("Prepare datasets")
train_loader, val_loader, train_sampler, valid_sampler = get_data_loaders(config, tokenizer)
# Training function and trainer
def update(engine, batch):
model.train()
input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids, token_emotion_ids = tuple(input_tensor.to(config.device) for input_tensor in batch)
#token_emotion_ids = None
lm_loss, mc_loss = model(input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids, token_emotion_ids)
loss = (lm_loss * config.lm_coef + mc_loss * config.mc_coef) / config.gradient_accumulation_steps
if config.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), config.max_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), config.max_norm)
if engine.state.iteration % config.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
return loss.item()
trainer = Engine(update)
# Evaluation function and evaluator (evaluator output is the input of the metrics)
def inference(engine, batch):
model.eval()
with torch.no_grad():
batch = tuple(input_tensor.to(config.device) for input_tensor in batch)
input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids, token_emotion_ids = batch
#token_emotion_ids = None
model_outputs = model(input_ids, mc_token_ids, token_type_ids=token_type_ids, token_emotion_ids=token_emotion_ids)
lm_logits, mc_logits = model_outputs[0], model_outputs[1] # So we can also use GPT2 outputs
lm_logits_flat_shifted = lm_logits[..., :-1, :].contiguous().view(-1, lm_logits.size(-1))
lm_labels_flat_shifted = lm_labels[..., 1:].contiguous().view(-1)
return (lm_logits_flat_shifted, mc_logits), (lm_labels_flat_shifted, mc_labels)
evaluator = Engine(inference)
# Attach evaluation to trainer: we evaluate when we start the training and at the end of each epoch
trainer.add_event_handler(Events.EPOCH_COMPLETED, lambda _: evaluator.run(val_loader))
if config.n_epochs < 1:
trainer.add_event_handler(Events.COMPLETED, lambda _: evaluator.run(val_loader))
if config.eval_before_start:
trainer.add_event_handler(Events.STARTED, lambda _: evaluator.run(val_loader))
# Make sure distributed data samplers split the dataset nicely between the distributed processes
if config.distributed:
trainer.add_event_handler(Events.EPOCH_STARTED, lambda engine: train_sampler.set_epoch(engine.state.epoch))
evaluator.add_event_handler(Events.EPOCH_STARTED, lambda engine: valid_sampler.set_epoch(engine.state.epoch))
# Linearly decrease the learning rate from lr to zero
scheduler = PiecewiseLinear(optimizer, "lr", [(0, config.lr), (config.n_epochs * len(train_loader), 0.0)])
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
# Prepare metrics - note how we compute distributed metrics
RunningAverage(output_transform=lambda x: x).attach(trainer, "loss")
metrics = {"nll": Loss(torch.nn.CrossEntropyLoss(ignore_index=-1), output_transform=lambda x: (x[0][0], x[1][0])),
"accuracy": Accuracy(output_transform=lambda x: (x[0][1], x[1][1]))}
metrics.update({"precision": Precision(output_transform=lambda x: (x[0][1], x[1][1])),
"recall": Recall(output_transform=lambda x: (x[0][1], x[1][1]))})
metrics.update({"average_nll": MetricsLambda(average_distributed_scalar, metrics["nll"], config),
"average_accuracy": MetricsLambda(average_distributed_scalar, metrics["accuracy"], config)})
metrics.update({"confusion_matrix": ConfusionMatrix(num_classes=6, output_transform=lambda x: (x[0][1], x[1][1]))})
metrics["average_ppl"] = MetricsLambda(math.exp, metrics["average_nll"])
for name, metric in metrics.items():
metric.attach(evaluator, name)
# On the main process: add progress bar, tensorboard, checkpoints and save model, configuration and tokenizer before we start to train
if config.local_rank in [-1, 0]:
pbar = ProgressBar(persist=True)
pbar.attach(trainer, metric_names=["loss"])
evaluator.add_event_handler(Events.COMPLETED, lambda _: pbar.log_message("Validation: %s" % pformat(evaluator.state.metrics)))
tb_logger = TensorboardLogger(log_dir=config.log_dir)
tb_logger.attach(trainer, log_handler=OutputHandler(tag="training", metric_names=["loss"]), event_name=Events.ITERATION_COMPLETED)
tb_logger.attach(trainer, log_handler=OptimizerParamsHandler(optimizer), event_name=Events.ITERATION_STARTED)
tb_logger.attach(evaluator, log_handler=OutputHandler(tag="validation", metric_names=list(metrics.keys()), another_engine=trainer), event_name=Events.EPOCH_COMPLETED)
checkpoint_handler = ModelCheckpoint(tb_logger.writer.log_dir, 'checkpoint', save_interval=1, n_saved=3)
trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler, {'mymodel': getattr(model, 'module', model)}) # "getattr" take care of distributed encapsulation
torch.save(config, tb_logger.writer.log_dir + '/model_training_args.bin')
getattr(model, 'module', model).config.to_json_file(os.path.join(tb_logger.writer.log_dir, CONFIG_NAME))
tokenizer.save_vocabulary(tb_logger.writer.log_dir)
# Run the training
trainer.run(train_loader, max_epochs=config.n_epochs)
# On the main process: close tensorboard logger and rename the last checkpoint (for easy re-loading with OpenAIGPTModel.from_pretrained method)
if config.local_rank in [-1, 0] and config.n_epochs > 0:
os.rename(checkpoint_handler._saved[-1][1][-1], os.path.join(tb_logger.writer.log_dir, WEIGHTS_NAME)) # TODO: PR in ignite to have better access to saved file paths (cleaner)
tb_logger.close()
if __name__ == "__main__":
train()
| 16,552 | 56.675958 | 183 | py |
EmpTransfo | EmpTransfo-master/interact.py | # # Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import random
from argparse import ArgumentParser
from itertools import chain
from pprint import pformat
import torch
import torch.nn.functional as F
from config import InteractConfig
from pytorch_pretrained_bert import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, GPT2LMHeadModel, GPT2Tokenizer, \
BertTokenizer
from pytorch_pretrained_bert.modeling import BertLMHeadModel
from utils import get_dataset_personalities, download_pretrained_model, get_dataset
def build_input_from_segments(history, reply, tokenizer, SPECIAL_TOKENS, lm_labels=False, with_eos=True):
""" Build a sequence of input from 3 segments: persona, history and last reply """
bos, eos, speaker1, speaker2 = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[:-1])
persona = []
instance = {}
sequence = [[bos] + list(chain(*persona))] + history + [
reply + ([eos] if with_eos else [])] # seq = [personas, history, reply] concatenate all persona sentences
sequence = [sequence[0]] + [[speaker2 if (len(sequence) - i) % 2 else speaker1] + s for i, s in
enumerate(sequence[1:])]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [speaker2 if i % 2 else speaker1 for i, s in enumerate(sequence) for _ in
s] # the last for is for repeating the speaker1 and speaker2 for all tokens
instance["mc_token_ids"] = len(instance["input_ids"]) - 1
instance["lm_labels"] = [-1] * len(instance["input_ids"])
if lm_labels:
instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:] # all -1 except for reply, reply is just the ids
return instance, sequence
def top_filtering(logits, top_k=0, top_p=0.0, threshold=-float('Inf'), filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering
Args:
logits: logits distribution shape (..., vocabulary size)
top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.
top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset
whose total probability mass is greater than or equal to the threshold top_p.
In practice, we select the highest probability tokens whose cumulative probability mass exceeds
the threshold top_p.
threshold: a minimal threshold to keep logits
"""
top_k = min(top_k, logits.size(-1))
if top_k > 0:
# Remove all tokens with a probability less than the last token in the top-k tokens
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# Compute cumulative probabilities of sorted tokens
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probabilities > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# Back to unsorted indices and set them to -infinity
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
indices_to_remove = logits < threshold
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(history, tokenizer, model, args, SPECIAL_TOKENS, current_output=None):
special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
if current_output is None:
current_output = []
for i in range(args.max_length):
instance, sequence = build_input_from_segments(history, current_output, tokenizer, SPECIAL_TOKENS,
with_eos=False)
input_ids = torch.tensor(instance["input_ids"], device=args.device).unsqueeze(0)
token_type_ids = torch.tensor(instance["token_type_ids"], device=args.device).unsqueeze(0)
logits = model(input_ids, token_type_ids=token_type_ids)
if "gpt2" == args.model:
logits = logits[0]
logits = logits[0, -1, :] / args.temperature
logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
probs = F.softmax(logits, dim=-1)
prev = torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
if i < args.min_length and prev.item() in special_tokens_ids:
while prev.item() in special_tokens_ids:
prev = torch.multinomial(probs, num_samples=1)
if prev.item() in special_tokens_ids:
break
current_output.append(prev.item())
return current_output
def run():
config_file = "configs/interact_config.json"
config = InteractConfig.from_json_file(config_file)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__file__)
logger.info(pformat(config))
if config.model_checkpoint == "":
config.model_checkpoint = download_pretrained_model()
torch.random.manual_seed(config.seed)
torch.cuda.manual_seed(config.seed)
logger.info("Get pretrained model and tokenizer")
if config.model == "bert":
tokenizer_class = BertTokenizer
model_class = BertLMHeadModel
elif config.model == "gpt2":
tokenizer_class = GPT2Tokenizer
model_class = GPT2LMHeadModel
else:
tokenizer_class = OpenAIGPTTokenizer
model_class = OpenAIGPTLMHeadModel
SPECIAL_TOKENS = ["<bos>", "<eos>", "<speaker1>", "<speaker2>", "<pad>"]
tokenizer = tokenizer_class.from_pretrained(config.model_checkpoint)
model = model_class.from_pretrained(config.model_checkpoint)
model.to(config.device)
model.eval()
history = []
while True:
raw_text = input(">>> ")
while not raw_text:
print('Prompt should not be empty!')
raw_text = input(">>> ")
history.append(tokenizer.encode(raw_text))
with torch.no_grad():
out_ids = sample_sequence(history, tokenizer, model, config, SPECIAL_TOKENS)
history.append(out_ids)
history = history[-(2 * config.max_history + 1):]
out_text = tokenizer.decode(out_ids, skip_special_tokens=True)
print(out_text)
if __name__ == "__main__":
run()
| 6,871 | 41.419753 | 151 | py |
EmpTransfo | EmpTransfo-master/train.py | # Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree.
import os
import math
import logging
from pprint import pformat
from argparse import ArgumentParser
from collections import defaultdict
from itertools import chain
from config import Config
import torch
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader, TensorDataset
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint
from ignite.metrics import Accuracy, Loss, MetricsLambda, RunningAverage
from ignite.contrib.handlers import ProgressBar, PiecewiseLinear
from ignite.contrib.handlers.tensorboard_logger import TensorboardLogger, OutputHandler, OptimizerParamsHandler
from pytorch_pretrained_bert import (OpenAIAdam, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer,
GPT2DoubleHeadsModel, GPT2Tokenizer, WEIGHTS_NAME, CONFIG_NAME,
BertModel, BertTokenizer)
from utils import get_dataset
SPECIAL_TOKENS = ["<bos>", "<eos>", "<speaker1>", "<speaker2>", "<pad>"]
MODEL_INPUTS = ["input_ids", "mc_token_ids", "lm_labels", "mc_labels", "token_type_ids"]
PADDED_INPUTS = ["input_ids", "lm_labels", "token_type_ids"]
logger = logging.getLogger(__file__)
def average_distributed_scalar(scalar, config):
""" Average a scalar over the nodes if we are in distributed training. We use this for distributed evaluation. """
if config.local_rank == -1:
return scalar
scalar_t = torch.tensor(scalar, dtype=torch.float, device=config.device) / torch.distributed.get_world_size()
torch.distributed.all_reduce(scalar_t, op=torch.distributed.ReduceOp.SUM)
return scalar_t.item()
def pad_dataset(dataset, padding=0):
""" Pad the dataset. This could be optimized by defining a Dataset class and padd only batches but this is simpler. """
max_l = max(len(x) for x in dataset["input_ids"])
for name in PADDED_INPUTS:
dataset[name] = [x + [padding if name != "lm_labels" else -1] * (max_l - len(x)) for x in dataset[name]]
return dataset
def build_input_from_segments(history, reply, tokenizer, lm_labels=False, with_eos=True):
""" Build a sequence of input from 3 segments: persona, history and last reply """
bos, eos, speaker1, speaker2 = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[:-1])
instance = {}
sequence = [[bos] + history[0]] + history[1:] +[reply +([eos] if with_eos else [])]
sequence = [sequence[0]] + [[speaker2 if (len(sequence)-i) % 2 else speaker1] + s for i, s in enumerate(sequence[1:])]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [speaker2 if i % 2 else speaker1 for i, s in enumerate(sequence) for _ in s] # the last for is for repeating the speaker1 and speaker2 for all tokens
instance["mc_token_ids"] = len(instance["input_ids"]) - 1
instance["lm_labels"] = [-1] * len(instance["input_ids"])
if lm_labels:
instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:] #all -1 except for reply, reply is just the ids
return instance, sequence
def get_data_loaders(config, tokenizer):
""" Prepare the dataset for training and evaluation """
personachat = get_dataset(tokenizer, config.dataset_path, config.dataset_cache)
logger.info("Build inputs and labels")
datasets = {"train": defaultdict(list), "valid": defaultdict(list)}
gpu_max_length = 310 #this depends on the gpu memory size, using bigger gpu memory you can increase this to include longer inputs
for dataset_name, dataset in personachat.items():
num_candidates = len(dataset[0]["utterances"][0]["candidates"])
if config.num_candidates > 0 and dataset_name == 'train':
num_candidates = min(config.num_candidates, num_candidates)
for dialog in dataset:
for utterance in dialog["utterances"]:
history = utterance["history"][-(2*config.max_history+1):]
for j, candidate in enumerate(utterance["candidates"][-num_candidates:]):
lm_labels = bool(j == num_candidates-1) #the true label is always the last one in list of candidates
instance, _ = build_input_from_segments(history, candidate, tokenizer, lm_labels)
#print(len(instance["input_ids"]))
##
if len(instance["input_ids"]) > gpu_max_length:
truncated_history = [hist[:10] for hist in history]
truncated_candidate = candidate[:10]
instance, _ = build_input_from_segments(truncated_history, truncated_candidate, tokenizer, lm_labels)
for input_name, input_array in instance.items():
datasets[dataset_name][input_name].append(input_array)
datasets[dataset_name]["mc_labels"].append(num_candidates - 1)
datasets[dataset_name]["n_candidates"] = num_candidates
logger.info("Pad inputs and convert to Tensor")
tensor_datasets = {"train": [], "valid": []}
for dataset_name, dataset in datasets.items():
dataset = pad_dataset(dataset, padding=tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[-1]))
for input_name in MODEL_INPUTS:
tensor = torch.tensor(dataset[input_name])
if input_name != "mc_labels":
tensor = tensor.view((-1, datasets[dataset_name]["n_candidates"]) + tensor.shape[1:])
tensor_datasets[dataset_name].append(tensor)
logger.info("Build train and validation dataloaders")
train_dataset, valid_dataset = TensorDataset(*tensor_datasets["train"]), TensorDataset(*tensor_datasets["valid"])
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if config.distributed else None
valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if config.distributed else None
train_loader = DataLoader(train_dataset, sampler=train_sampler, batch_size=config.train_batch_size, shuffle=False)
valid_loader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=config.valid_batch_size, shuffle=False)
logger.info("Train dataset (Batch, Candidates, Seq length): {}".format(train_dataset.tensors[0].shape))
logger.info("Valid dataset (Batch, Candidates, Seq length): {}".format(valid_dataset.tensors[0].shape))
return train_loader, valid_loader, train_sampler, valid_sampler
def train():
config_file = "configs/train_full_config.json"
config = Config.from_json_file(config_file)
# logging is set to INFO (resp. WARN) for main (resp. auxiliary) process. logger.info => log main process only, logger.warning => log all processes
logging.basicConfig(level=logging.INFO if config.local_rank in [-1, 0] else logging.WARN)
logger.warning("Running process %d", config.local_rank) # This is a logger.warning: it will be printed by all distributed processes
logger.info("Arguments: %s", pformat(config))
# Initialize distributed training if needed
config.distributed = (config.local_rank != -1)
if config.distributed:
torch.cuda.set_device(config.local_rank)
config.device = torch.device("cuda", config.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
logger.info("Prepare tokenizer, pretrained model and optimizer - add special tokens for fine-tuning")
tokenizer_class = GPT2Tokenizer if "gpt2" in config.model_checkpoint else OpenAIGPTTokenizer
tokenizer = tokenizer_class.from_pretrained(config.model_checkpoint)
model_class = GPT2DoubleHeadsModel if "gpt2" in config.model_checkpoint else OpenAIGPTDoubleHeadsModel
model = model_class.from_pretrained(config.model_checkpoint)
tokenizer.set_special_tokens(SPECIAL_TOKENS)
model.set_num_special_tokens(len(SPECIAL_TOKENS))
model.to(config.device)
optimizer = OpenAIAdam(model.parameters(), lr=config.lr)
# Prepare model for FP16 and distributed training if needed (order is important, distributed should be the last)
if config.fp16:
from apex import amp # Apex is only required if we use fp16 training
model, optimizer = amp.initialize(model, optimizer, opt_level=config.fp16)
if config.distributed:
model = DistributedDataParallel(model, device_ids=[config.local_rank], output_device=config.local_rank)
logger.info("Prepare datasets")
train_loader, val_loader, train_sampler, valid_sampler = get_data_loaders(config, tokenizer)
# Training function and trainer
def update(engine, batch):
model.train()
batch = tuple(input_tensor.to(config.device) for input_tensor in batch)
lm_loss, mc_loss = model(*batch)
loss = (lm_loss * config.lm_coef + mc_loss * config.mc_coef) / config.gradient_accumulation_steps
if config.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), config.max_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), config.max_norm)
if engine.state.iteration % config.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
return loss.item()
trainer = Engine(update)
# Evaluation function and evaluator (evaluator output is the input of the metrics)
def inference(engine, batch):
model.eval()
with torch.no_grad():
batch = tuple(input_tensor.to(config.device) for input_tensor in batch)
input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids = batch
#logger.info(tokenizer.decode(input_ids[0, -1, :].tolist()))
model_outputs = model(input_ids, mc_token_ids, token_type_ids=token_type_ids)
lm_logits, mc_logits = model_outputs[0], model_outputs[1] # So we can also use GPT2 outputs
lm_logits_flat_shifted = lm_logits[..., :-1, :].contiguous().view(-1, lm_logits.size(-1))
lm_labels_flat_shifted = lm_labels[..., 1:].contiguous().view(-1)
return (lm_logits_flat_shifted, mc_logits), (lm_labels_flat_shifted, mc_labels)
evaluator = Engine(inference)
# Attach evaluation to trainer: we evaluate when we start the training and at the end of each epoch
trainer.add_event_handler(Events.EPOCH_COMPLETED, lambda _: evaluator.run(val_loader))
if config.n_epochs < 1:
trainer.add_event_handler(Events.COMPLETED, lambda _: evaluator.run(val_loader))
if config.eval_before_start:
trainer.add_event_handler(Events.STARTED, lambda _: evaluator.run(val_loader))
# Make sure distributed data samplers split the dataset nicely between the distributed processes
if config.distributed:
trainer.add_event_handler(Events.EPOCH_STARTED, lambda engine: train_sampler.set_epoch(engine.state.epoch))
evaluator.add_event_handler(Events.EPOCH_STARTED, lambda engine: valid_sampler.set_epoch(engine.state.epoch))
# Linearly decrease the learning rate from lr to zero
scheduler = PiecewiseLinear(optimizer, "lr", [(0, config.lr), (config.n_epochs * len(train_loader), 0.0)])
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
# Prepare metrics - note how we compute distributed metrics
RunningAverage(output_transform=lambda x: x).attach(trainer, "loss")
metrics = {"nll": Loss(torch.nn.CrossEntropyLoss(ignore_index=-1), output_transform=lambda x: (x[0][0], x[1][0])),
"accuracy": Accuracy(output_transform=lambda x: (x[0][1], x[1][1]))}
metrics.update({"average_nll": MetricsLambda(average_distributed_scalar, metrics["nll"], config),
"average_accuracy": MetricsLambda(average_distributed_scalar, metrics["accuracy"], config)})
metrics["average_ppl"] = MetricsLambda(math.exp, metrics["average_nll"])
for name, metric in metrics.items():
metric.attach(evaluator, name)
# On the main process: add progress bar, tensorboard, checkpoints and save model, configuration and tokenizer before we start to train
if config.local_rank in [-1, 0]:
pbar = ProgressBar(persist=True)
pbar.attach(trainer, metric_names=["loss"])
evaluator.add_event_handler(Events.COMPLETED, lambda _: pbar.log_message("Validation: %s" % pformat(evaluator.state.metrics)))
tb_logger = TensorboardLogger(log_dir=config.log_dir)
tb_logger.attach(trainer, log_handler=OutputHandler(tag="training", metric_names=["loss"]), event_name=Events.ITERATION_COMPLETED)
tb_logger.attach(trainer, log_handler=OptimizerParamsHandler(optimizer), event_name=Events.ITERATION_STARTED)
tb_logger.attach(evaluator, log_handler=OutputHandler(tag="validation", metric_names=list(metrics.keys()), another_engine=trainer), event_name=Events.EPOCH_COMPLETED)
checkpoint_handler = ModelCheckpoint(tb_logger.writer.log_dir, 'checkpoint', save_interval=1, n_saved=3)
trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler, {'mymodel': getattr(model, 'module', model)}) # "getattr" take care of distributed encapsulation
torch.save(config, tb_logger.writer.log_dir + '/model_training_args.bin')
getattr(model, 'module', model).config.to_json_file(os.path.join(tb_logger.writer.log_dir, CONFIG_NAME))
tokenizer.save_vocabulary(tb_logger.writer.log_dir)
# Run the training
trainer.run(train_loader, max_epochs=config.n_epochs)
# On the main process: close tensorboard logger and rename the last checkpoint (for easy re-loading with OpenAIGPTModel.from_pretrained method)
if config.local_rank in [-1, 0] and config.n_epochs > 0:
os.rename(checkpoint_handler._saved[-1][1][-1], os.path.join(tb_logger.writer.log_dir, WEIGHTS_NAME)) # TODO: PR in ignite to have better access to saved file paths (cleaner)
tb_logger.close()
if __name__ == "__main__":
train()
| 14,215 | 58.233333 | 183 | py |
EmpTransfo | EmpTransfo-master/train_multihead.py | # Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree.
import os
import math
import logging
from pprint import pformat
from argparse import ArgumentParser
from collections import defaultdict
from itertools import chain
import torch
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader, TensorDataset
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint
from ignite.metrics import Accuracy, Loss, MetricsLambda, RunningAverage
from ignite.contrib.handlers import ProgressBar, PiecewiseLinear
from config import Config
from ignite.contrib.handlers.tensorboard_logger import TensorboardLogger, OutputHandler, OptimizerParamsHandler
from pytorch_pretrained_bert import (OpenAIAdam, OpenAIGPTMultiHeadModel, OpenAIGPTTokenizer,
GPT2DoubleHeadsModel, GPT2Tokenizer, WEIGHTS_NAME, CONFIG_NAME,
BertModel, BertTokenizer)
from utils import get_dataset, get_dataset_for_daily_dialog
SPECIAL_TOKENS = ["<bos>", "<eos>", "<speaker1>", "<speaker2>",
"<no_emotion>", "<happiness>", "<surprise>", "<sadness>", "<disgust>", "<anger>", "<fear>",
"<work>", "<finance>", "<relationship>", "<attitude_and_emotion>", "<culture_and_education>",
"<school_life>", "<tourism>", "<ordinary_life>", "<politics>", "<health>",
"<directive>", "<inform>", "<commissive>", "<question>",
"<pad>"]
MODEL_INPUTS = ["input_ids", "ec_token_ids", "sc_token_ids", "lm_labels", "ec_labels", "sc_labels",
"token_type_ids", "token_emotion_ids", "token_action_ids"]
PADDED_INPUTS = ["input_ids", "lm_labels", "token_type_ids", "token_emotion_ids", "token_action_ids"]
logger = logging.getLogger(__file__)
def average_distributed_scalar(scalar, config):
""" Average a scalar over the nodes if we are in distributed training. We use this for distributed evaluation. """
if config.local_rank == -1:
return scalar
scalar_t = torch.tensor(scalar, dtype=torch.float, device=config.device) / torch.distributed.get_world_size()
torch.distributed.all_reduce(scalar_t, op=torch.distributed.ReduceOp.SUM)
return scalar_t.item()
def pad_dataset(dataset, padding=0):
""" Pad the dataset. This could be optimized by defining a Dataset class and padd only batches but this is simpler. """
max_l = max(len(x) for x in dataset["input_ids"])
for name in PADDED_INPUTS:
dataset[name] = [x + [padding if name != "lm_labels" else -1] * (max_l - len(x)) for x in dataset[name]]
return dataset
def get_emotion_label(tokenizer, candidate_emotion):
no_emotion_id, happiness_id, surprise_id, sadness_id, disgust_id, anger_id, fear_id = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[4:11])
if candidate_emotion == no_emotion_id:
return 0
elif candidate_emotion == happiness_id:
return 1
elif candidate_emotion == surprise_id:
return 2
elif candidate_emotion == sadness_id:
return 3
elif candidate_emotion == disgust_id:
return 4
elif candidate_emotion == anger_id:
return 5
elif candidate_emotion == fear_id:
return 6
def build_input_from_segments(topic, history, emotions, actions, reply, candidate_emotion, canidate_act, tokenizer, lm_labels=False, with_eos=True):
""" Build a sequence of input from 3 segments: persona, history and last reply """
bos, eos, speaker1, speaker2, no_emotion = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[:5])
inform = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[-4])
emotions = [no_emotion] + emotions
actions = [inform] + actions
instance = {}
sequence = [[bos] + [topic]] + history + [reply + ([eos] if with_eos else [])]
sequence = [[speaker2 if (len(sequence) - i) % 2 else speaker1] + s for i, s in enumerate(sequence)]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [speaker2 if i % 2 else speaker1 for i, s in enumerate(sequence) for _ in
s] # the last for is for repeating the speaker1 and speaker2 for all tokens
instance["token_emotion_ids"] = [emotions[i] for i, s in enumerate(sequence[:-1]) for _ in s] + [
candidate_emotion] * len(sequence[-1])
instance["token_action_ids"] = [actions[i] for i, s in enumerate(sequence[:-1]) for _ in s] + [canidate_act] * len(
sequence[-1])
instance["ec_token_ids"] = len(instance["input_ids"]) - 1
instance["sc_token_ids"] = len(instance["input_ids"]) - 2
instance["ec_labels"] = -1
instance["lm_labels"] = [-1] * len(instance["input_ids"])
if lm_labels:
instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][
1:] # all -1 except for reply, reply is just the ids
instance["ec_labels"] = get_emotion_label(tokenizer, candidate_emotion)
return instance, sequence
def get_data_loaders(config, tokenizer):
""" Prepare the dataset for training and evaluation """
personachat = get_dataset_for_daily_dialog(tokenizer, config.dataset_path, config.dataset_cache, SPECIAL_TOKENS)
logger.info("Build inputs and labels")
datasets = {"train": defaultdict(list), "valid": defaultdict(list)}
gpu_max_length = 310
for dataset_name, dataset in personachat.items():
num_candidates = len(dataset[0]["utterances"][0]["candidates"])
if config.num_candidates > 0 and dataset_name == 'train':
num_candidates = min(config.num_candidates, num_candidates)
for dialog in dataset:
topic = dialog["topic"]
for utterance in dialog["utterances"]:
history = utterance["history"][-(2 * config.max_history + 1):]
emotions = utterance["emotion"][-(2 * config.max_history + 1):]
actions = utterance["act"][-(2 * config.max_history + 1):]
for j, candidate in enumerate(utterance["candidates"][-num_candidates:]):
lm_labels = bool(
j == num_candidates - 1) # the true label is always the last one in list of candidates
candidate_emotion = utterance['candidates_emotions'][j]
candidate_act = utterance['candidates_acts'][j]
instance, _ = build_input_from_segments(topic, history, emotions, actions, candidate,
candidate_emotion, candidate_act, tokenizer, lm_labels)
if len(instance["input_ids"]) > gpu_max_length:
truncated_history = [hist[:10] for hist in history]
truncated_candidate = candidate[:10]
instance, _ = build_input_from_segments(topic, truncated_history, emotions, actions,
truncated_candidate,
candidate_emotion, candidate_act, tokenizer, lm_labels)
for input_name, input_array in instance.items():
datasets[dataset_name][input_name].append(input_array)
datasets[dataset_name]["sc_labels"].append(num_candidates - 1)
datasets[dataset_name]["n_candidates"] = num_candidates
logger.info("Pad inputs and convert to Tensor")
tensor_datasets = {"train": [], "valid": []}
for dataset_name, dataset in datasets.items():
dataset = pad_dataset(dataset, padding=tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[-1]))
for input_name in MODEL_INPUTS:
tensor = torch.tensor(dataset[input_name])
if input_name != "sc_labels":
tensor = tensor.view((-1, datasets[dataset_name]["n_candidates"]) + tensor.shape[1:])
tensor_datasets[dataset_name].append(tensor)
logger.info("Build train and validation dataloaders")
train_dataset, valid_dataset = TensorDataset(*tensor_datasets["train"]), TensorDataset(*tensor_datasets["valid"])
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if config.distributed else None
valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if config.distributed else None
train_loader = DataLoader(train_dataset, sampler=train_sampler, batch_size=config.train_batch_size, shuffle=False)
valid_loader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=config.valid_batch_size, shuffle=False)
logger.info("Train dataset (Batch, Candidates, Seq length): {}".format(train_dataset.tensors[0].shape))
logger.info("Valid dataset (Batch, Candidates, Seq length): {}".format(valid_dataset.tensors[0].shape))
return train_loader, valid_loader, train_sampler, valid_sampler
def train():
config_file = "configs/train_multihead_config.json"
config = Config.from_json_file(config_file)
ec_coef = 1
sc_coef = 1
# logging is set to INFO (resp. WARN) for main (resp. auxiliary) process. logger.info => log main process only, logger.warning => log all processes
logging.basicConfig(level=logging.INFO if config.local_rank in [-1, 0] else logging.WARN)
logger.warning("Running process %d",
config.local_rank) # This is a logger.warning: it will be printed by all distributed processes
logger.info("Arguments: %s", pformat(config))
# Initialize distributed training if needed
config.distributed = (config.local_rank != -1)
if config.distributed:
torch.cuda.set_device(config.local_rank)
config.device = torch.device("cuda", config.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
logger.info("Prepare tokenizer, pretrained model and optimizer - add special tokens for fine-tuning")
tokenizer_class = OpenAIGPTTokenizer
tokenizer = tokenizer_class.from_pretrained(config.model_checkpoint)
model_class = OpenAIGPTMultiHeadModel
model = model_class.from_pretrained(config.model_checkpoint)
tokenizer.set_special_tokens(SPECIAL_TOKENS)
model.set_num_special_tokens(len(SPECIAL_TOKENS))
model.to(config.device)
optimizer = OpenAIAdam(model.parameters(), lr=config.lr)
# Prepare model for FP16 and distributed training if needed (order is important, distributed should be the last)
if config.fp16:
from apex import amp # Apex is only required if we use fp16 training
model, optimizer = amp.initialize(model, optimizer, opt_level=config.fp16)
if config.distributed:
model = DistributedDataParallel(model, device_ids=[config.local_rank], output_device=config.local_rank)
logger.info("Prepare datasets")
train_loader, val_loader, train_sampler, valid_sampler = get_data_loaders(config, tokenizer)
# Training function and trainer
def update(engine, batch):
model.train()
# input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids, token_emotion_ids, token_action_ids = tuple(input_tensor.to(config.device) for input_tensor in batch)
input_ids, ec_token_ids, sc_token_ids, lm_labels, ec_labels, sc_labels, token_type_ids, token_emotion_ids, token_action_ids = tuple(
input_tensor.to(config.device) for input_tensor in batch)
lm_loss, emotion_loss, sentence_loss = model(input_ids, ec_token_ids, sc_token_ids,
lm_labels, ec_labels, sc_labels, token_type_ids,
token_emotion_ids, token_action_ids)
loss = (lm_loss * config.lm_coef + emotion_loss * ec_coef + sentence_loss * sc_coef) / config.gradient_accumulation_steps
if config.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), config.max_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), config.max_norm)
if engine.state.iteration % config.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
return loss.item()
trainer = Engine(update)
# Evaluation function and evaluator (evaluator output is the input of the metrics)
def inference(engine, batch):
model.eval()
with torch.no_grad():
batch = tuple(input_tensor.to(config.device) for input_tensor in batch)
input_ids, ec_token_ids, sc_token_ids, lm_labels, ec_labels, \
sc_labels, token_type_ids, token_emotion_ids, token_action_ids = batch
# logger.info(tokenizer.decode(input_ids[0, -1, :].tolist()))
model_outputs = model(input_ids, ec_token_ids, sc_token_ids, token_type_ids=token_type_ids,
token_emotion_ids=token_emotion_ids,
token_action_ids=token_action_ids)
lm_logits, mc_logits = model_outputs[0], model_outputs[2] # So we can also use GPT2 outputs
lm_logits_flat_shifted = lm_logits[..., :-1, :].contiguous().view(-1, lm_logits.size(-1))
lm_labels_flat_shifted = lm_labels[..., 1:].contiguous().view(-1)
return (lm_logits_flat_shifted, mc_logits), (lm_labels_flat_shifted, sc_labels)
evaluator = Engine(inference)
# Attach evaluation to trainer: we evaluate when we start the training and at the end of each epoch
trainer.add_event_handler(Events.EPOCH_COMPLETED, lambda _: evaluator.run(val_loader))
if config.n_epochs < 1:
trainer.add_event_handler(Events.COMPLETED, lambda _: evaluator.run(val_loader))
if config.eval_before_start:
trainer.add_event_handler(Events.STARTED, lambda _: evaluator.run(val_loader))
# Make sure distributed data samplers split the dataset nicely between the distributed processes
if config.distributed:
trainer.add_event_handler(Events.EPOCH_STARTED, lambda engine: train_sampler.set_epoch(engine.state.epoch))
evaluator.add_event_handler(Events.EPOCH_STARTED, lambda engine: valid_sampler.set_epoch(engine.state.epoch))
# Linearly decrease the learning rate from lr to zero
scheduler = PiecewiseLinear(optimizer, "lr", [(0, config.lr), (config.n_epochs * len(train_loader), 0.0)])
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
# Prepare metrics - note how we compute distributed metrics
RunningAverage(output_transform=lambda x: x).attach(trainer, "loss")
metrics = {"nll": Loss(torch.nn.CrossEntropyLoss(ignore_index=-1), output_transform=lambda x: (x[0][0], x[1][0])),
"accuracy": Accuracy(output_transform=lambda x: (x[0][1], x[1][1]))}
metrics.update({"average_nll": MetricsLambda(average_distributed_scalar, metrics["nll"], config),
"average_accuracy": MetricsLambda(average_distributed_scalar, metrics["accuracy"], config)})
metrics["average_ppl"] = MetricsLambda(math.exp, metrics["average_nll"])
for name, metric in metrics.items():
metric.attach(evaluator, name)
# On the main process: add progress bar, tensorboard, checkpoints and save model, configuration and tokenizer before we start to train
if config.local_rank in [-1, 0]:
pbar = ProgressBar(persist=True)
pbar.attach(trainer, metric_names=["loss"])
evaluator.add_event_handler(Events.COMPLETED,
lambda _: pbar.log_message("Validation: %s" % pformat(evaluator.state.metrics)))
tb_logger = TensorboardLogger(log_dir=config.log_dir)
tb_logger.attach(trainer, log_handler=OutputHandler(tag="training", metric_names=["loss"]),
event_name=Events.ITERATION_COMPLETED)
tb_logger.attach(trainer, log_handler=OptimizerParamsHandler(optimizer), event_name=Events.ITERATION_STARTED)
tb_logger.attach(evaluator, log_handler=OutputHandler(tag="validation", metric_names=list(metrics.keys()),
another_engine=trainer),
event_name=Events.EPOCH_COMPLETED)
checkpoint_handler = ModelCheckpoint(tb_logger.writer.log_dir, 'checkpoint', save_interval=1, n_saved=3)
trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler, {
'mymodel': getattr(model, 'module', model)}) # "getattr" take care of distributed encapsulation
torch.save(config, tb_logger.writer.log_dir + '/model_training_args.bin')
getattr(model, 'module', model).config.to_json_file(os.path.join(tb_logger.writer.log_dir, CONFIG_NAME))
tokenizer.save_vocabulary(tb_logger.writer.log_dir)
# Run the training
trainer.run(train_loader, max_epochs=config.n_epochs)
# On the main process: close tensorboard logger and rename the last checkpoint (for easy re-loading with OpenAIGPTModel.from_pretrained method)
if config.local_rank in [-1, 0] and config.n_epochs > 0:
os.rename(checkpoint_handler._saved[-1][1][-1], os.path.join(tb_logger.writer.log_dir,
WEIGHTS_NAME)) # TODO: PR in ignite to have better access to saved file paths (cleaner)
tb_logger.close()
if __name__ == "__main__":
train()
| 17,664 | 55.800643 | 174 | py |
EmpTransfo | EmpTransfo-master/eval_emotion_recognition.py | # Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree.
import logging
from pprint import pformat
from collections import defaultdict
from itertools import chain
import torch
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader, TensorDataset
from config import Config
from pytorch_pretrained_bert import (OpenAIAdam, OpenAIGPTDoubleHeadLMEmotionRecognitionModel, OpenAIGPTTokenizer,
GPT2DoubleHeadsModel, GPT2Tokenizer, WEIGHTS_NAME, CONFIG_NAME,
BertModel, BertTokenizer)
from utils import get_dataset, get_dataset_for_daily_dialog
SPECIAL_TOKENS = ["<bos>", "<eos>", "<speaker1>", "<speaker2>",
"<no_emotion>", "<happiness>", "<surprise>", "<sadness>", "<disgust>", "<anger>", "<fear>",
"<directive>", "<inform>", "<commissive>", "<question>",
"<pad>"]
MODEL_INPUTS = ["input_ids", "mc_token_ids", "lm_labels", "mc_labels", "token_type_ids", "token_emotion_ids"]
PADDED_INPUTS = ["input_ids", "lm_labels", "token_type_ids", "token_emotion_ids"]
logger = logging.getLogger(__file__)
def average_distributed_scalar(scalar, config):
""" Average a scalar over the nodes if we are in distributed training. We use this for distributed evaluation. """
if config.local_rank == -1:
return scalar
scalar_t = torch.tensor(scalar, dtype=torch.float, device=config.device) / torch.distributed.get_world_size()
torch.distributed.all_reduce(scalar_t, op=torch.distributed.ReduceOp.SUM)
return scalar_t.item()
def pad_dataset(dataset, padding=0):
""" Pad the dataset. This could be optimized by defining a Dataset class and padd only batches but this is simpler. """
max_l = max(len(x) for x in dataset["input_ids"])
for name in PADDED_INPUTS:
dataset[name] = [x + [padding if name != "lm_labels" else -1] * (max_l - len(x)) for x in dataset[name]]
return dataset
def get_emotion_label(tokenizer, candidate_emotion):
_, _, _, _, no_emotion_id, happiness_id, surprise_id, sadness_id, disgust_id, anger_id, fear_id, _, _, _, _, _ = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
if candidate_emotion == happiness_id:
return 0
elif candidate_emotion == surprise_id:
return 1
elif candidate_emotion == sadness_id:
return 2
elif candidate_emotion == disgust_id:
return 3
elif candidate_emotion == anger_id:
return 4
elif candidate_emotion == fear_id:
return 5
elif candidate_emotion == no_emotion_id:
return 6
def build_input_from_segments(history, emotions, reply, true_emotion, tokenizer, with_eos=True):
""" Build a sequence of input from 3 segments: persona, history and last reply """
bos, eos, speaker1, speaker2 = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[:4])
#tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[-1])
instance = {}
# sequence = [[bos] + history[0] + list(chain(*history[1:]))] + [reply + ([eos] if with_eos else [])] #seq = [personas, history, reply] concatenate all persona sentences
sequence = [[bos] + history[0]] + history[1:] + [reply + ([eos] if with_eos else [])]
sequence = [[speaker2 if (len(sequence)-i) % 2 else speaker1] + s for i, s in enumerate(sequence)]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [speaker2 if i % 2 else speaker1 for i, s in enumerate(sequence) for _ in s] # the last for is for repeating the speaker1 and speaker2 for all tokens
#instance["token_emotion_ids"] = [emotions[i] for i, s in enumerate(sequence[:-1]) for _ in s] + [true_emotion] * len(sequence[-1])
instance["token_emotion_ids"] = [emotions[i] for i, s in enumerate(sequence[:-1]) for _ in s]
instance["mc_token_ids"] = len(instance["input_ids"]) - 1
instance["mc_labels"] = get_emotion_label(tokenizer, true_emotion)
instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:] #all -1 except for reply, reply is just the ids
return instance, sequence
def get_data_loaders(config, tokenizer):
""" Prepare the dataset for training and evaluation """
personachat = get_dataset_for_daily_dialog(tokenizer, config.dataset_path, config.dataset_cache, SPECIAL_TOKENS)
#personachat["train"] = personachat["train"][:100]
#personachat["valid"] = personachat["valid"][:10]
logger.info("Build inputs and labels")
datasets = {"train": defaultdict(list), "valid": defaultdict(list)}
c = 0
for dataset_name, dataset in personachat.items():
num_candidates = 2#len(dataset[0]["utterances"][0]["candidates"])
if config.num_candidates > 0 and dataset_name == 'train':
num_candidates = min(config.num_candidates, num_candidates)
for dialog in dataset:
for utterance in dialog["utterances"]:
history = utterance["history"][-(2 * config.max_history + 1):]
emotions = utterance["emotion"][-(2 * config.max_history + 1):]
reply = utterance["candidates"][-1]
true_emotion = utterance['candidates_emotions'][-1]
if true_emotion == tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)[4]:
continue
instance, _ = build_input_from_segments(history,
emotions,
reply,
true_emotion,
tokenizer)
if len(instance["input_ids"]) > 310:
truncated_history = [hist[:10] for hist in history]
truncated_candidate = reply[:10]
true_emotion = utterance['candidates_emotions'][-1]
instance, _ = build_input_from_segments(truncated_history,
emotions,
truncated_candidate,
true_emotion,
tokenizer)
c+=1
for input_name, input_array in instance.items():
datasets[dataset_name][input_name].append(input_array)
#datasets[dataset_name]["mc_labels"].append(num_candidates - 1)
datasets[dataset_name]["n_candidates"] = num_candidates
print(c)
logger.info("Pad inputs and convert to Tensor")
tensor_datasets = {"train": [], "valid": []}
for dataset_name, dataset in datasets.items():
dataset = pad_dataset(dataset, padding=tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[-1]))
for input_name in MODEL_INPUTS:
tensor = torch.tensor(dataset[input_name])
#if input_name != "mc_labels":
# tensor = tensor.view((-1, datasets[dataset_name]["n_candidates"]) + tensor.shape[1:])
tensor_datasets[dataset_name].append(tensor)
logger.info("Build train and validation dataloaders")
train_dataset, valid_dataset = TensorDataset(*tensor_datasets["train"]), TensorDataset(*tensor_datasets["valid"])
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if config.distributed else None
valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if config.distributed else None
train_loader = DataLoader(train_dataset, sampler=train_sampler, batch_size=config.train_batch_size, shuffle=False)
valid_loader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=config.valid_batch_size, shuffle=False)
logger.info("Train dataset (Batch, Candidates, Seq length): {}".format(train_dataset.tensors[0].shape))
logger.info("Valid dataset (Batch, Candidates, Seq length): {}".format(valid_dataset.tensors[0].shape))
return train_loader, valid_loader, train_sampler, valid_sampler
def train():
config_file = "configs/train_full_pipeline_config.json"
config = Config.from_json_file(config_file)
# logging is set to INFO (resp. WARN) for main (resp. auxiliary) process. logger.info => log main process only, logger.warning => log all processes
logging.basicConfig(level=logging.INFO if config.local_rank in [-1, 0] else logging.WARN)
logger.warning("Running process %d", config.local_rank) # This is a logger.warning: it will be printed by all distributed processes
logger.info("Arguments: %s", pformat(config))
# Initialize distributed training if needed
config.distributed = (config.local_rank != -1)
if config.distributed:
torch.cuda.set_device(config.local_rank)
config.device = torch.device("cuda", config.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
logger.info("Prepare tokenizer, pretrained model and optimizer - add special tokens for fine-tuning")
tokenizer_class = GPT2Tokenizer if "gpt2" in config.model_checkpoint else OpenAIGPTTokenizer
tokenizer = tokenizer_class.from_pretrained(config.model_checkpoint)
model_class = GPT2DoubleHeadsModel if "gpt2" in config.model_checkpoint else OpenAIGPTDoubleHeadLMEmotionRecognitionModel
model = model_class.from_pretrained(config.model_checkpoint)
tokenizer.set_special_tokens(SPECIAL_TOKENS)
model.set_num_special_tokens(len(SPECIAL_TOKENS))
model.to(config.device)
optimizer = OpenAIAdam(model.parameters(), lr=config.lr)
# Prepare model for FP16 and distributed training if needed (order is important, distributed should be the last)
if config.fp16:
from apex import amp # Apex is only required if we use fp16 training
model, optimizer = amp.initialize(model, optimizer, opt_level=config.fp16)
if config.distributed:
model = DistributedDataParallel(model, device_ids=[config.local_rank], output_device=config.local_rank)
logger.info("Prepare datasets")
train_loader, val_loader, train_sampler, valid_sampler = get_data_loaders(config, tokenizer)
# Evaluation function and evaluator (evaluator output is the input of the metrics)
model.eval()
num_correct = 0
num_all = len(val_loader)
for batch in val_loader:
with torch.no_grad():
batch = tuple(input_tensor.to(config.device) for input_tensor in batch)
input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids, token_emotion_ids = batch
model_outputs = model(input_ids, mc_token_ids, token_type_ids=token_type_ids, token_emotion_ids=token_emotion_ids)
lm_logits, mc_logits = model_outputs[0], model_outputs[1] # So we can also use GPT2 outputs
indices = torch.argmax(mc_logits, dim=1)
correct = torch.eq(indices, mc_labels).view(-1)
num_correct += torch.sum(correct).item()
print(num_correct / num_all)
if __name__ == "__main__":
train()
| 11,203 | 52.607656 | 182 | py |