python_code
stringlengths 0
290k
| repo_name
stringclasses 30
values | file_path
stringlengths 6
125
|
---|---|---|
from setuptools import find_packages, setup
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
# version.py defines the VERSION and VERSION_SHORT variables.
# We use exec here so we don't import allennlp whilst setting up.
VERSION = {} # type: ignore
with open("allennlp/version.py", "r") as version_file:
exec(version_file.read(), VERSION)
setup(
name="allennlp",
version=VERSION["VERSION"],
description="An open-source NLP research library, built on PyTorch.",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
classifiers=[
"Intended Audience :: Science/Research",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="allennlp NLP deep learning machine reading",
url="https://github.com/allenai/allennlp",
author="Allen Institute for Artificial Intelligence",
author_email="[email protected]",
license="Apache",
packages=find_packages(
exclude=[
"*.tests",
"*.tests.*",
"tests.*",
"tests",
"test_fixtures",
"test_fixtures.*",
"benchmarks",
"benchmarks.*",
]
),
install_requires=[
"torch>=1.6.0,<1.8.0",
"jsonnet>=0.10.0 ; sys.platform != 'win32'",
"overrides==3.1.0",
"nltk",
"spacy>=2.1.0,<2.4",
"numpy",
"tensorboardX>=1.2",
"boto3>=1.14,<2.0",
"requests>=2.18",
"tqdm>=4.19",
"h5py",
"scikit-learn",
"scipy",
"pytest",
"transformers>=4.0,<4.1",
"sentencepiece",
"jsonpickle",
"dataclasses;python_version<'3.7'",
"filelock>=3.0,<3.1",
],
entry_points={"console_scripts": ["allennlp=allennlp.__main__:run"]},
include_package_data=True,
python_requires=">=3.6.1",
zip_safe=False,
)
| allennlp-master | setup.py |
allennlp-master | test_fixtures/__init__.py |
|
from d.d import D
| allennlp-master | test_fixtures/plugins/d/__init__.py |
import argparse
from overrides import overrides
from allennlp.commands import Subcommand
def do_nothing(_):
pass
@Subcommand.register("d")
class D(Subcommand):
@overrides
def add_subparser(self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
subparser = parser.add_parser(self.name, description="fake", help="fake help")
subparser.set_defaults(func=do_nothing)
return subparser
| allennlp-master | test_fixtures/plugins/d/d.py |
import os
_MAJOR = "1"
_MINOR = "3"
# On master and in a nightly release the patch should be one ahead of the last
# released build.
_PATCH = "0"
# This is mainly for nightly builds which have the suffix ".dev$DATE". See
# https://semver.org/#is-v123-a-semantic-version for the semantics.
_SUFFIX = os.environ.get("ALLENNLP_VERSION_SUFFIX", "")
VERSION_SHORT = "{0}.{1}".format(_MAJOR, _MINOR)
VERSION = "{0}.{1}.{2}{3}".format(_MAJOR, _MINOR, _PATCH, _SUFFIX)
| allennlp-master | allennlp/version.py |
# Make sure that allennlp is running on Python 3.6.1 or later
# (to avoid running into this bug: https://bugs.python.org/issue29246)
import sys
if sys.version_info < (3, 6, 1):
raise RuntimeError("AllenNLP requires Python 3.6.1 or later")
# We get a lot of these spurious warnings,
# see https://github.com/ContinuumIO/anaconda-issues/issues/6678
import warnings # noqa
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
try:
# On some systems this prevents the dreaded
# ImportError: dlopen: cannot load any more object with static TLS
import transformers, spacy, torch, numpy # noqa
except ModuleNotFoundError:
print(
"Using AllenNLP requires the python packages Spacy, "
"Pytorch and Numpy to be installed. Please see "
"https://github.com/allenai/allennlp for installation instructions."
)
raise
from allennlp.version import VERSION as __version__ # noqa
| allennlp-master | allennlp/__init__.py |
#!/usr/bin/env python
import logging
import os
import sys
if os.environ.get("ALLENNLP_DEBUG"):
LEVEL = logging.DEBUG
else:
level_name = os.environ.get("ALLENNLP_LOG_LEVEL", "INFO")
LEVEL = logging._nameToLevel.get(level_name, logging.INFO)
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir))))
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=LEVEL)
# filelock emits too many messages, so tell it to be quiet unless it has something
# important to say.
logging.getLogger("filelock").setLevel(logging.WARNING)
# transformers emits an annoying log message everytime it's imported, so we filter that
# one message out specifically.
def _transformers_log_filter(record):
if record.msg.startswith("PyTorch version"):
return False
return True
logging.getLogger("transformers.file_utils").addFilter(_transformers_log_filter)
from allennlp.commands import main # noqa
def run():
main(prog="allennlp")
if __name__ == "__main__":
run()
| allennlp-master | allennlp/__main__.py |
allennlp-master | allennlp/tools/__init__.py |
|
import os
from allennlp.common.file_utils import CACHE_DIRECTORY
from allennlp.common.file_utils import filename_to_url
def main():
print(f"Looking for datasets in {CACHE_DIRECTORY}...")
if not os.path.exists(CACHE_DIRECTORY):
print("Directory does not exist.")
print("No cached datasets found.")
cached_files = os.listdir(CACHE_DIRECTORY)
if not cached_files:
print("Directory is empty.")
print("No cached datasets found.")
for filename in cached_files:
if not filename.endswith("json"):
url, etag = filename_to_url(filename)
print("Filename: %s" % filename)
print("Url: %s" % url)
print("ETag: %s" % etag)
print()
if __name__ == "__main__":
main()
| allennlp-master | allennlp/tools/inspect_cache.py |
#! /usr/bin/env python
"""
Helper script for modifying config.json files that are locked inside
model.tar.gz archives. This is useful if you need to rename things or
add or remove values, usually because of changes to the library.
This script will untar the archive to a temp directory, launch an editor
to modify the config.json, and then re-tar everything to a new archive.
If your $EDITOR environment variable is not set, you'll have to explicitly
specify which editor to use.
"""
import argparse
import atexit
import logging
import os
import shutil
import subprocess
import tempfile
import tarfile
from allennlp.common.file_utils import cached_path
from allennlp.models.archival import CONFIG_NAME
logger = logging.getLogger()
logger.setLevel(logging.ERROR)
def main():
parser = argparse.ArgumentParser(
description="Perform surgery on a model.tar.gz archive",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("--input-file", required=True, help="path to input file")
parser.add_argument(
"--editor",
default=os.environ.get("EDITOR"),
help="editor to launch, whose default value is `$EDITOR` the environment variable",
)
output = parser.add_mutually_exclusive_group()
output.add_argument("--output-file", help="path to output file")
output.add_argument(
"--inplace",
action="store_true",
help="overwrite the input file with the modified configuration",
)
parser.add_argument(
"-f", "--force", action="store_true", help="overwrite the output file if it exists"
)
args = parser.parse_args()
if args.editor is None:
raise RuntimeError("please specify an editor or set the $EDITOR environment variable")
if not args.inplace and os.path.exists(args.output_file) and not args.force:
raise ValueError("output file already exists, use --force to override")
archive_file = cached_path(args.input_file)
if not os.path.exists(archive_file):
raise ValueError("input file doesn't exist")
if args.inplace:
output_file = archive_file
else:
output_file = args.output_file
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
with tarfile.open(archive_file, "r:gz") as archive:
archive.extractall(tempdir)
atexit.register(lambda: shutil.rmtree(tempdir))
config_path = os.path.join(tempdir, CONFIG_NAME)
subprocess.run([args.editor, config_path], check=False)
with tarfile.open(output_file, "w:gz") as tar:
tar.add(tempdir, arcname=os.path.sep)
if __name__ == "__main__":
main()
| allennlp-master | allennlp/tools/archive_surgery.py |
import argparse
import gzip
import os
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.data import Token, Vocabulary
from allennlp.data.token_indexers import ELMoTokenCharactersIndexer
from allennlp.data.vocabulary import DEFAULT_OOV_TOKEN
from allennlp.modules.elmo import _ElmoCharacterEncoder
def main(
vocab_path: str,
elmo_config_path: str,
elmo_weights_path: str,
output_dir: str,
batch_size: int,
device: int,
use_custom_oov_token: bool = False,
):
"""
Creates ELMo word representations from a vocabulary file. These
word representations are _independent_ - they are the result of running
the CNN and Highway layers of the ELMo model, but not the Bidirectional LSTM.
ELMo requires 2 additional tokens: <S> and </S>. The first token
in this file is assumed to be an unknown token.
This script produces two artifacts: A new vocabulary file
with the <S> and </S> tokens inserted and a glove formatted embedding
file containing word : vector pairs, one per line, with all values
separated by a space.
"""
# Load the vocabulary words and convert to char ids
with open(vocab_path, "r") as vocab_file:
tokens = vocab_file.read().strip().split("\n")
# Insert the sentence boundary tokens which elmo uses at positions 1 and 2.
if tokens[0] != DEFAULT_OOV_TOKEN and not use_custom_oov_token:
raise ConfigurationError("ELMo embeddings require the use of a OOV token.")
tokens = [tokens[0]] + ["<S>", "</S>"] + tokens[1:]
indexer = ELMoTokenCharactersIndexer()
indices = indexer.tokens_to_indices([Token(token) for token in tokens], Vocabulary())["tokens"]
sentences = []
for k in range((len(indices) // 50) + 1):
sentences.append(
indexer.as_padded_tensor_dict(
indices[(k * 50) : ((k + 1) * 50)], padding_lengths={"tokens": 50}
)
)
last_batch_remainder = 50 - (len(indices) % 50)
if device != -1:
elmo_token_embedder = _ElmoCharacterEncoder(elmo_config_path, elmo_weights_path).cuda(
device
)
else:
elmo_token_embedder = _ElmoCharacterEncoder(elmo_config_path, elmo_weights_path)
all_embeddings = []
for i in range((len(sentences) // batch_size) + 1):
batch = torch.stack(sentences[i * batch_size : (i + 1) * batch_size])
if device != -1:
batch = batch.cuda(device)
token_embedding = elmo_token_embedder(batch)["token_embedding"].data
# Reshape back to a list of words of shape (batch_size * 50, encoding_dim)
# We also need to remove the <S>, </S> tokens appended by the encoder.
per_word_embeddings = (
token_embedding[:, 1:-1, :].contiguous().view(-1, token_embedding.size(-1))
)
all_embeddings.append(per_word_embeddings)
# Remove the embeddings associated with padding in the last batch.
all_embeddings[-1] = all_embeddings[-1][:-last_batch_remainder, :]
embedding_weight = torch.cat(all_embeddings, 0).cpu().numpy()
# Write out the embedding in a glove format.
os.makedirs(output_dir, exist_ok=True)
with gzip.open(os.path.join(output_dir, "elmo_embeddings.txt.gz"), "wb") as embeddings_file:
for i, word in enumerate(tokens):
string_array = " ".join(str(x) for x in list(embedding_weight[i, :]))
embeddings_file.write(f"{word} {string_array}\n".encode("utf-8"))
# Write out the new vocab with the <S> and </S> tokens.
_, vocab_file_name = os.path.split(vocab_path)
with open(os.path.join(output_dir, vocab_file_name), "w") as new_vocab_file:
for word in tokens:
new_vocab_file.write(f"{word}\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate CNN representations for a vocabulary using ELMo",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--vocab_path",
type=str,
help="A path to a vocabulary file to generate representations for.",
)
parser.add_argument(
"--elmo_config", type=str, help="The path to a directory containing an ELMo config file."
)
parser.add_argument(
"--elmo_weights", type=str, help="The path to a directory containing an ELMo weight file."
)
parser.add_argument(
"--output_dir", type=str, help="The output directory to store the serialised embeddings."
)
parser.add_argument("--batch_size", type=int, default=64, help="The batch size to use.")
parser.add_argument("--device", type=int, default=-1, help="The device to run on.")
parser.add_argument(
"--use_custom_oov_token",
type=bool,
default=False,
help="AllenNLP requires a particular OOV token."
"To generate embeddings with a custom OOV token,"
"add this flag.",
)
args = parser.parse_args()
main(
args.vocab_path,
args.elmo_config,
args.elmo_weights,
args.output_dir,
args.batch_size,
args.device,
args.use_custom_oov_token,
)
| allennlp-master | allennlp/tools/create_elmo_embeddings_from_vocab.py |
"""
Assorted utilities for working with neural networks in AllenNLP.
"""
import copy
import json
import logging
from collections import defaultdict
from typing import Any, Dict, List, Optional, Sequence, Tuple, TypeVar, Union
import math
import numpy
import torch
from allennlp.common.checks import ConfigurationError
logger = logging.getLogger(__name__)
T = TypeVar("T")
def has_tensor(obj) -> bool:
"""
Given a possibly complex data structure,
check if it has any torch.Tensors in it.
"""
if isinstance(obj, torch.Tensor):
return True
elif isinstance(obj, dict):
return any(has_tensor(value) for value in obj.values())
elif isinstance(obj, (list, tuple)):
return any(has_tensor(item) for item in obj)
else:
return False
def move_to_device(obj, cuda_device: Union[torch.device, int]):
"""
Given a structure (possibly) containing Tensors on the CPU,
move all the Tensors to the specified GPU (or do nothing, if they should be on the CPU).
"""
from allennlp.common.util import int_to_device
cuda_device = int_to_device(cuda_device)
if cuda_device == torch.device("cpu") or not has_tensor(obj):
return obj
elif isinstance(obj, torch.Tensor):
return obj.cuda(cuda_device)
elif isinstance(obj, dict):
return {key: move_to_device(value, cuda_device) for key, value in obj.items()}
elif isinstance(obj, list):
return [move_to_device(item, cuda_device) for item in obj]
elif isinstance(obj, tuple) and hasattr(obj, "_fields"):
# This is the best way to detect a NamedTuple, it turns out.
return obj.__class__(*(move_to_device(item, cuda_device) for item in obj))
elif isinstance(obj, tuple):
return tuple(move_to_device(item, cuda_device) for item in obj)
else:
return obj
def clamp_tensor(tensor, minimum, maximum):
"""
Supports sparse and dense tensors.
Returns a tensor with values clamped between the provided minimum and maximum,
without modifying the original tensor.
"""
if tensor.is_sparse:
coalesced_tensor = tensor.coalesce()
coalesced_tensor._values().clamp_(minimum, maximum)
return coalesced_tensor
else:
return tensor.clamp(minimum, maximum)
def batch_tensor_dicts(
tensor_dicts: List[Dict[str, torch.Tensor]], remove_trailing_dimension: bool = False
) -> Dict[str, torch.Tensor]:
"""
Takes a list of tensor dictionaries, where each dictionary is assumed to have matching keys,
and returns a single dictionary with all tensors with the same key batched together.
# Parameters
tensor_dicts : `List[Dict[str, torch.Tensor]]`
The list of tensor dictionaries to batch.
remove_trailing_dimension : `bool`
If `True`, we will check for a trailing dimension of size 1 on the tensors that are being
batched, and remove it if we find it.
"""
key_to_tensors: Dict[str, List[torch.Tensor]] = defaultdict(list)
for tensor_dict in tensor_dicts:
for key, tensor in tensor_dict.items():
key_to_tensors[key].append(tensor)
batched_tensors = {}
for key, tensor_list in key_to_tensors.items():
batched_tensor = torch.stack(tensor_list)
if remove_trailing_dimension and all(tensor.size(-1) == 1 for tensor in tensor_list):
batched_tensor = batched_tensor.squeeze(-1)
batched_tensors[key] = batched_tensor
return batched_tensors
def get_lengths_from_binary_sequence_mask(mask: torch.BoolTensor) -> torch.LongTensor:
"""
Compute sequence lengths for each batch element in a tensor using a
binary mask.
# Parameters
mask : `torch.BoolTensor`, required.
A 2D binary mask of shape (batch_size, sequence_length) to
calculate the per-batch sequence lengths from.
# Returns
`torch.LongTensor`
A torch.LongTensor of shape (batch_size,) representing the lengths
of the sequences in the batch.
"""
return mask.sum(-1)
def get_mask_from_sequence_lengths(
sequence_lengths: torch.Tensor, max_length: int
) -> torch.BoolTensor:
"""
Given a variable of shape `(batch_size,)` that represents the sequence lengths of each batch
element, this function returns a `(batch_size, max_length)` mask variable. For example, if
our input was `[2, 2, 3]`, with a `max_length` of 4, we'd return
`[[1, 1, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0]]`.
We require `max_length` here instead of just computing it from the input `sequence_lengths`
because it lets us avoid finding the max, then copying that value from the GPU to the CPU so
that we can use it to construct a new tensor.
"""
# (batch_size, max_length)
ones = sequence_lengths.new_ones(sequence_lengths.size(0), max_length)
range_tensor = ones.cumsum(dim=1)
return sequence_lengths.unsqueeze(1) >= range_tensor
def sort_batch_by_length(tensor: torch.Tensor, sequence_lengths: torch.Tensor):
"""
Sort a batch first tensor by some specified lengths.
# Parameters
tensor : `torch.FloatTensor`, required.
A batch first Pytorch tensor.
sequence_lengths : `torch.LongTensor`, required.
A tensor representing the lengths of some dimension of the tensor which
we want to sort by.
# Returns
sorted_tensor : `torch.FloatTensor`
The original tensor sorted along the batch dimension with respect to sequence_lengths.
sorted_sequence_lengths : `torch.LongTensor`
The original sequence_lengths sorted by decreasing size.
restoration_indices : `torch.LongTensor`
Indices into the sorted_tensor such that
`sorted_tensor.index_select(0, restoration_indices) == original_tensor`
permutation_index : `torch.LongTensor`
The indices used to sort the tensor. This is useful if you want to sort many
tensors using the same ordering.
"""
if not isinstance(tensor, torch.Tensor) or not isinstance(sequence_lengths, torch.Tensor):
raise ConfigurationError("Both the tensor and sequence lengths must be torch.Tensors.")
sorted_sequence_lengths, permutation_index = sequence_lengths.sort(0, descending=True)
sorted_tensor = tensor.index_select(0, permutation_index)
index_range = torch.arange(0, len(sequence_lengths), device=sequence_lengths.device)
# This is the equivalent of zipping with index, sorting by the original
# sequence lengths and returning the now sorted indices.
_, reverse_mapping = permutation_index.sort(0, descending=False)
restoration_indices = index_range.index_select(0, reverse_mapping)
return sorted_tensor, sorted_sequence_lengths, restoration_indices, permutation_index
def get_final_encoder_states(
encoder_outputs: torch.Tensor, mask: torch.BoolTensor, bidirectional: bool = False
) -> torch.Tensor:
"""
Given the output from a `Seq2SeqEncoder`, with shape `(batch_size, sequence_length,
encoding_dim)`, this method returns the final hidden state for each element of the batch,
giving a tensor of shape `(batch_size, encoding_dim)`. This is not as simple as
`encoder_outputs[:, -1]`, because the sequences could have different lengths. We use the
mask (which has shape `(batch_size, sequence_length)`) to find the final state for each batch
instance.
Additionally, if `bidirectional` is `True`, we will split the final dimension of the
`encoder_outputs` into two and assume that the first half is for the forward direction of the
encoder and the second half is for the backward direction. We will concatenate the last state
for each encoder dimension, giving `encoder_outputs[:, -1, :encoding_dim/2]` concatenated with
`encoder_outputs[:, 0, encoding_dim/2:]`.
"""
# These are the indices of the last words in the sequences (i.e. length sans padding - 1). We
# are assuming sequences are right padded.
# Shape: (batch_size,)
last_word_indices = mask.sum(1) - 1
batch_size, _, encoder_output_dim = encoder_outputs.size()
expanded_indices = last_word_indices.view(-1, 1, 1).expand(batch_size, 1, encoder_output_dim)
# Shape: (batch_size, 1, encoder_output_dim)
final_encoder_output = encoder_outputs.gather(1, expanded_indices)
final_encoder_output = final_encoder_output.squeeze(1) # (batch_size, encoder_output_dim)
if bidirectional:
final_forward_output = final_encoder_output[:, : (encoder_output_dim // 2)]
final_backward_output = encoder_outputs[:, 0, (encoder_output_dim // 2) :]
final_encoder_output = torch.cat([final_forward_output, final_backward_output], dim=-1)
return final_encoder_output
def get_dropout_mask(dropout_probability: float, tensor_for_masking: torch.Tensor):
"""
Computes and returns an element-wise dropout mask for a given tensor, where
each element in the mask is dropped out with probability dropout_probability.
Note that the mask is NOT applied to the tensor - the tensor is passed to retain
the correct CUDA tensor type for the mask.
# Parameters
dropout_probability : `float`, required.
Probability of dropping a dimension of the input.
tensor_for_masking : `torch.Tensor`, required.
# Returns
`torch.FloatTensor`
A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability).
This scaling ensures expected values and variances of the output of applying this mask
and the original tensor are the same.
"""
binary_mask = (torch.rand(tensor_for_masking.size()) > dropout_probability).to(
tensor_for_masking.device
)
# Scale mask by 1/keep_prob to preserve output statistics.
dropout_mask = binary_mask.float().div(1.0 - dropout_probability)
return dropout_mask
def masked_softmax(
vector: torch.Tensor,
mask: torch.BoolTensor,
dim: int = -1,
memory_efficient: bool = False,
) -> torch.Tensor:
"""
`torch.nn.functional.softmax(vector)` does not work if some elements of `vector` should be
masked. This performs a softmax on just the non-masked portions of `vector`. Passing
`None` in for the mask is also acceptable; you'll just get a regular softmax.
`vector` can have an arbitrary number of dimensions; the only requirement is that `mask` is
broadcastable to `vector's` shape. If `mask` has fewer dimensions than `vector`, we will
unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,
do it yourself before passing the mask into this function.
If `memory_efficient` is set to true, we will simply use a very large negative number for those
masked positions so that the probabilities of those positions would be approximately 0.
This is not accurate in math, but works for most cases and consumes less memory.
In the case that the input vector is completely masked and `memory_efficient` is false, this function
returns an array of `0.0`. This behavior may cause `NaN` if this is used as the last layer of
a model that uses categorical cross-entropy loss. Instead, if `memory_efficient` is true, this function
will treat every element as equal, and do softmax over equal numbers.
"""
if mask is None:
result = torch.nn.functional.softmax(vector, dim=dim)
else:
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
if not memory_efficient:
# To limit numerical errors from large vector elements outside the mask, we zero these out.
result = torch.nn.functional.softmax(vector * mask, dim=dim)
result = result * mask
result = result / (
result.sum(dim=dim, keepdim=True) + tiny_value_of_dtype(result.dtype)
)
else:
masked_vector = vector.masked_fill(~mask, min_value_of_dtype(vector.dtype))
result = torch.nn.functional.softmax(masked_vector, dim=dim)
return result
def masked_log_softmax(vector: torch.Tensor, mask: torch.BoolTensor, dim: int = -1) -> torch.Tensor:
"""
`torch.nn.functional.log_softmax(vector)` does not work if some elements of `vector` should be
masked. This performs a log_softmax on just the non-masked portions of `vector`. Passing
`None` in for the mask is also acceptable; you'll just get a regular log_softmax.
`vector` can have an arbitrary number of dimensions; the only requirement is that `mask` is
broadcastable to `vector's` shape. If `mask` has fewer dimensions than `vector`, we will
unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,
do it yourself before passing the mask into this function.
In the case that the input vector is completely masked, the return value of this function is
arbitrary, but not `nan`. You should be masking the result of whatever computation comes out
of this in that case, anyway, so the specific values returned shouldn't matter. Also, the way
that we deal with this case relies on having single-precision floats; mixing half-precision
floats with fully-masked vectors will likely give you `nans`.
If your logits are all extremely negative (i.e., the max value in your logit vector is -50 or
lower), the way we handle masking here could mess you up. But if you've got logit values that
extreme, you've got bigger problems than this.
"""
if mask is not None:
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
# vector + mask.log() is an easy way to zero out masked elements in logspace, but it
# results in nans when the whole vector is masked. We need a very small value instead of a
# zero in the mask for these cases.
vector = vector + (mask + tiny_value_of_dtype(vector.dtype)).log()
return torch.nn.functional.log_softmax(vector, dim=dim)
def masked_max(
vector: torch.Tensor,
mask: torch.BoolTensor,
dim: int,
keepdim: bool = False,
) -> torch.Tensor:
"""
To calculate max along certain dimensions on masked values
# Parameters
vector : `torch.Tensor`
The vector to calculate max, assume unmasked parts are already zeros
mask : `torch.BoolTensor`
The mask of the vector. It must be broadcastable with vector.
dim : `int`
The dimension to calculate max
keepdim : `bool`
Whether to keep dimension
# Returns
`torch.Tensor`
A `torch.Tensor` of including the maximum values.
"""
replaced_vector = vector.masked_fill(~mask, min_value_of_dtype(vector.dtype))
max_value, _ = replaced_vector.max(dim=dim, keepdim=keepdim)
return max_value
def masked_mean(
vector: torch.Tensor, mask: torch.BoolTensor, dim: int, keepdim: bool = False
) -> torch.Tensor:
"""
To calculate mean along certain dimensions on masked values
# Parameters
vector : `torch.Tensor`
The vector to calculate mean.
mask : `torch.BoolTensor`
The mask of the vector. It must be broadcastable with vector.
dim : `int`
The dimension to calculate mean
keepdim : `bool`
Whether to keep dimension
# Returns
`torch.Tensor`
A `torch.Tensor` of including the mean values.
"""
replaced_vector = vector.masked_fill(~mask, 0.0)
value_sum = torch.sum(replaced_vector, dim=dim, keepdim=keepdim)
value_count = torch.sum(mask, dim=dim, keepdim=keepdim)
return value_sum / value_count.float().clamp(min=tiny_value_of_dtype(torch.float))
def masked_flip(padded_sequence: torch.Tensor, sequence_lengths: List[int]) -> torch.Tensor:
"""
Flips a padded tensor along the time dimension without affecting masked entries.
# Parameters
padded_sequence : `torch.Tensor`
The tensor to flip along the time dimension.
Assumed to be of dimensions (batch size, num timesteps, ...)
sequence_lengths : `torch.Tensor`
A list containing the lengths of each unpadded sequence in the batch.
# Returns
`torch.Tensor`
A `torch.Tensor` of the same shape as padded_sequence.
"""
assert padded_sequence.size(0) == len(
sequence_lengths
), f"sequence_lengths length ${len(sequence_lengths)} does not match batch size ${padded_sequence.size(0)}"
num_timesteps = padded_sequence.size(1)
flipped_padded_sequence = torch.flip(padded_sequence, [1])
sequences = [
flipped_padded_sequence[i, num_timesteps - length :]
for i, length in enumerate(sequence_lengths)
]
return torch.nn.utils.rnn.pad_sequence(sequences, batch_first=True)
def viterbi_decode(
tag_sequence: torch.Tensor,
transition_matrix: torch.Tensor,
tag_observations: Optional[List[int]] = None,
allowed_start_transitions: torch.Tensor = None,
allowed_end_transitions: torch.Tensor = None,
top_k: int = None,
):
"""
Perform Viterbi decoding in log space over a sequence given a transition matrix
specifying pairwise (transition) potentials between tags and a matrix of shape
(sequence_length, num_tags) specifying unary potentials for possible tags per
timestep.
# Parameters
tag_sequence : `torch.Tensor`, required.
A tensor of shape (sequence_length, num_tags) representing scores for
a set of tags over a given sequence.
transition_matrix : `torch.Tensor`, required.
A tensor of shape (num_tags, num_tags) representing the binary potentials
for transitioning between a given pair of tags.
tag_observations : `Optional[List[int]]`, optional, (default = `None`)
A list of length `sequence_length` containing the class ids of observed
elements in the sequence, with unobserved elements being set to -1. Note that
it is possible to provide evidence which results in degenerate labelings if
the sequences of tags you provide as evidence cannot transition between each
other, or those transitions are extremely unlikely. In this situation we log a
warning, but the responsibility for providing self-consistent evidence ultimately
lies with the user.
allowed_start_transitions : `torch.Tensor`, optional, (default = `None`)
An optional tensor of shape (num_tags,) describing which tags the START token
may transition *to*. If provided, additional transition constraints will be used for
determining the start element of the sequence.
allowed_end_transitions : `torch.Tensor`, optional, (default = `None`)
An optional tensor of shape (num_tags,) describing which tags may transition *to* the
end tag. If provided, additional transition constraints will be used for determining
the end element of the sequence.
top_k : `int`, optional, (default = `None`)
Optional integer specifying how many of the top paths to return. For top_k>=1, returns
a tuple of two lists: top_k_paths, top_k_scores, For top_k==None, returns a flattened
tuple with just the top path and its score (not in lists, for backwards compatibility).
# Returns
viterbi_path : `List[int]`
The tag indices of the maximum likelihood tag sequence.
viterbi_score : `torch.Tensor`
The score of the viterbi path.
"""
if top_k is None:
top_k = 1
flatten_output = True
elif top_k >= 1:
flatten_output = False
else:
raise ValueError(f"top_k must be either None or an integer >=1. Instead received {top_k}")
sequence_length, num_tags = list(tag_sequence.size())
has_start_end_restrictions = (
allowed_end_transitions is not None or allowed_start_transitions is not None
)
if has_start_end_restrictions:
if allowed_end_transitions is None:
allowed_end_transitions = torch.zeros(num_tags)
if allowed_start_transitions is None:
allowed_start_transitions = torch.zeros(num_tags)
num_tags = num_tags + 2
new_transition_matrix = torch.zeros(num_tags, num_tags)
new_transition_matrix[:-2, :-2] = transition_matrix
# Start and end transitions are fully defined, but cannot transition between each other.
allowed_start_transitions = torch.cat(
[allowed_start_transitions, torch.tensor([-math.inf, -math.inf])]
)
allowed_end_transitions = torch.cat(
[allowed_end_transitions, torch.tensor([-math.inf, -math.inf])]
)
# First define how we may transition FROM the start and end tags.
new_transition_matrix[-2, :] = allowed_start_transitions
# We cannot transition from the end tag to any tag.
new_transition_matrix[-1, :] = -math.inf
new_transition_matrix[:, -1] = allowed_end_transitions
# We cannot transition to the start tag from any tag.
new_transition_matrix[:, -2] = -math.inf
transition_matrix = new_transition_matrix
if tag_observations:
if len(tag_observations) != sequence_length:
raise ConfigurationError(
"Observations were provided, but they were not the same length "
"as the sequence. Found sequence of length: {} and evidence: {}".format(
sequence_length, tag_observations
)
)
else:
tag_observations = [-1 for _ in range(sequence_length)]
if has_start_end_restrictions:
tag_observations = [num_tags - 2] + tag_observations + [num_tags - 1]
zero_sentinel = torch.zeros(1, num_tags)
extra_tags_sentinel = torch.ones(sequence_length, 2) * -math.inf
tag_sequence = torch.cat([tag_sequence, extra_tags_sentinel], -1)
tag_sequence = torch.cat([zero_sentinel, tag_sequence, zero_sentinel], 0)
sequence_length = tag_sequence.size(0)
path_scores = []
path_indices = []
if tag_observations[0] != -1:
one_hot = torch.zeros(num_tags)
one_hot[tag_observations[0]] = 100000.0
path_scores.append(one_hot.unsqueeze(0))
else:
path_scores.append(tag_sequence[0, :].unsqueeze(0))
# Evaluate the scores for all possible paths.
for timestep in range(1, sequence_length):
# Add pairwise potentials to current scores.
summed_potentials = path_scores[timestep - 1].unsqueeze(2) + transition_matrix
summed_potentials = summed_potentials.view(-1, num_tags)
# Best pairwise potential path score from the previous timestep.
max_k = min(summed_potentials.size()[0], top_k)
scores, paths = torch.topk(summed_potentials, k=max_k, dim=0)
# If we have an observation for this timestep, use it
# instead of the distribution over tags.
observation = tag_observations[timestep]
# Warn the user if they have passed
# invalid/extremely unlikely evidence.
if tag_observations[timestep - 1] != -1 and observation != -1:
if transition_matrix[tag_observations[timestep - 1], observation] < -10000:
logger.warning(
"The pairwise potential between tags you have passed as "
"observations is extremely unlikely. Double check your evidence "
"or transition potentials!"
)
if observation != -1:
one_hot = torch.zeros(num_tags)
one_hot[observation] = 100000.0
path_scores.append(one_hot.unsqueeze(0))
else:
path_scores.append(tag_sequence[timestep, :] + scores)
path_indices.append(paths.squeeze())
# Construct the most likely sequence backwards.
path_scores_v = path_scores[-1].view(-1)
max_k = min(path_scores_v.size()[0], top_k)
viterbi_scores, best_paths = torch.topk(path_scores_v, k=max_k, dim=0)
viterbi_paths = []
for i in range(max_k):
viterbi_path = [best_paths[i]]
for backward_timestep in reversed(path_indices):
viterbi_path.append(int(backward_timestep.view(-1)[viterbi_path[-1]]))
# Reverse the backward path.
viterbi_path.reverse()
if has_start_end_restrictions:
viterbi_path = viterbi_path[1:-1]
# Viterbi paths uses (num_tags * n_permutations) nodes; therefore, we need to modulo.
viterbi_path = [j % num_tags for j in viterbi_path]
viterbi_paths.append(viterbi_path)
if flatten_output:
return viterbi_paths[0], viterbi_scores[0]
return viterbi_paths, viterbi_scores
def get_text_field_mask(
text_field_tensors: Dict[str, Dict[str, torch.Tensor]],
num_wrapping_dims: int = 0,
padding_id: int = 0,
) -> torch.BoolTensor:
"""
Takes the dictionary of tensors produced by a `TextField` and returns a mask
with 0 where the tokens are padding, and 1 otherwise. `padding_id` specifies the id of padding tokens.
We also handle `TextFields` wrapped by an arbitrary number of `ListFields`, where the number of wrapping
`ListFields` is given by `num_wrapping_dims`.
If `num_wrapping_dims == 0`, the returned mask has shape `(batch_size, num_tokens)`.
If `num_wrapping_dims > 0` then the returned mask has `num_wrapping_dims` extra
dimensions, so the shape will be `(batch_size, ..., num_tokens)`.
There could be several entries in the tensor dictionary with different shapes (e.g., one for
word ids, one for character ids). In order to get a token mask, we use the tensor in
the dictionary with the lowest number of dimensions. After subtracting `num_wrapping_dims`,
if this tensor has two dimensions we assume it has shape `(batch_size, ..., num_tokens)`,
and use it for the mask. If instead it has three dimensions, we assume it has shape
`(batch_size, ..., num_tokens, num_features)`, and sum over the last dimension to produce
the mask. Most frequently this will be a character id tensor, but it could also be a
featurized representation of each token, etc.
If the input `text_field_tensors` contains the "mask" key, this is returned instead of inferring the mask.
"""
masks = []
for indexer_name, indexer_tensors in text_field_tensors.items():
if "mask" in indexer_tensors:
masks.append(indexer_tensors["mask"].bool())
if len(masks) == 1:
return masks[0]
elif len(masks) > 1:
# TODO(mattg): My guess is this will basically never happen, so I'm not writing logic to
# handle it. Should be straightforward to handle, though. If you see this error in
# practice, open an issue on github.
raise ValueError("found two mask outputs; not sure which to use!")
tensor_dims = [
(tensor.dim(), tensor)
for indexer_output in text_field_tensors.values()
for tensor in indexer_output.values()
]
tensor_dims.sort(key=lambda x: x[0])
smallest_dim = tensor_dims[0][0] - num_wrapping_dims
if smallest_dim == 2:
token_tensor = tensor_dims[0][1]
return token_tensor != padding_id
elif smallest_dim == 3:
character_tensor = tensor_dims[0][1]
return (character_tensor != padding_id).any(dim=-1)
else:
raise ValueError("Expected a tensor with dimension 2 or 3, found {}".format(smallest_dim))
def get_token_ids_from_text_field_tensors(
text_field_tensors: Dict[str, Dict[str, torch.Tensor]],
) -> torch.Tensor:
"""
Our `TextFieldTensors` are complex output structures, because they try to handle a lot of
potential variation. Sometimes, you just want to grab the token ids from this data structure,
and that's not trivial without hard-coding assumptions about your data processing, which defeats
the entire purpose of that generality. This method tries to let you get the token ids out of the
data structure in your model without hard-coding any assumptions.
"""
for indexer_name, indexer_tensors in text_field_tensors.items():
for argument_name, tensor in indexer_tensors.items():
if argument_name in ["tokens", "token_ids", "input_ids"]:
return tensor
raise NotImplementedError(
"Our heuristic for guessing the right token ids failed. Please open an issue on "
"github with more detail on how you got this error, so we can implement more robust "
"logic in this method."
)
def weighted_sum(matrix: torch.Tensor, attention: torch.Tensor) -> torch.Tensor:
"""
Takes a matrix of vectors and a set of weights over the rows in the matrix (which we call an
"attention" vector), and returns a weighted sum of the rows in the matrix. This is the typical
computation performed after an attention mechanism.
Note that while we call this a "matrix" of vectors and an attention "vector", we also handle
higher-order tensors. We always sum over the second-to-last dimension of the "matrix", and we
assume that all dimensions in the "matrix" prior to the last dimension are matched in the
"vector". Non-matched dimensions in the "vector" must be `directly after the batch dimension`.
For example, say I have a "matrix" with dimensions `(batch_size, num_queries, num_words,
embedding_dim)`. The attention "vector" then must have at least those dimensions, and could
have more. Both:
- `(batch_size, num_queries, num_words)` (distribution over words for each query)
- `(batch_size, num_documents, num_queries, num_words)` (distribution over words in a
query for each document)
are valid input "vectors", producing tensors of shape:
`(batch_size, num_queries, embedding_dim)` and
`(batch_size, num_documents, num_queries, embedding_dim)` respectively.
"""
# We'll special-case a few settings here, where there are efficient (but poorly-named)
# operations in pytorch that already do the computation we need.
if attention.dim() == 2 and matrix.dim() == 3:
return attention.unsqueeze(1).bmm(matrix).squeeze(1)
if attention.dim() == 3 and matrix.dim() == 3:
return attention.bmm(matrix)
if matrix.dim() - 1 < attention.dim():
expanded_size = list(matrix.size())
for i in range(attention.dim() - matrix.dim() + 1):
matrix = matrix.unsqueeze(1)
expanded_size.insert(i + 1, attention.size(i + 1))
matrix = matrix.expand(*expanded_size)
intermediate = attention.unsqueeze(-1).expand_as(matrix) * matrix
return intermediate.sum(dim=-2)
def sequence_cross_entropy_with_logits(
logits: torch.FloatTensor,
targets: torch.LongTensor,
weights: Union[torch.FloatTensor, torch.BoolTensor],
average: str = "batch",
label_smoothing: float = None,
gamma: float = None,
alpha: Union[float, List[float], torch.FloatTensor] = None,
) -> torch.FloatTensor:
"""
Computes the cross entropy loss of a sequence, weighted with respect to
some user provided weights. Note that the weighting here is not the same as
in the `torch.nn.CrossEntropyLoss()` criterion, which is weighting
classes; here we are weighting the loss contribution from particular elements
in the sequence. This allows loss computations for models which use padding.
# Parameters
logits : `torch.FloatTensor`, required.
A `torch.FloatTensor` of size (batch_size, sequence_length, num_classes)
which contains the unnormalized probability for each class.
targets : `torch.LongTensor`, required.
A `torch.LongTensor` of size (batch, sequence_length) which contains the
index of the true class for each corresponding step.
weights : `Union[torch.FloatTensor, torch.BoolTensor]`, required.
A `torch.FloatTensor` of size (batch, sequence_length)
average: `str`, optional (default = `"batch"`)
If "batch", average the loss across the batches. If "token", average
the loss across each item in the input. If `None`, return a vector
of losses per batch element.
label_smoothing : `float`, optional (default = `None`)
Whether or not to apply label smoothing to the cross-entropy loss.
For example, with a label smoothing value of 0.2, a 4 class classification
target would look like `[0.05, 0.05, 0.85, 0.05]` if the 3rd class was
the correct label.
gamma : `float`, optional (default = `None`)
Focal loss[*] focusing parameter `gamma` to reduces the relative loss for
well-classified examples and put more focus on hard. The greater value
`gamma` is, the more focus on hard examples.
alpha : `Union[float, List[float]]`, optional (default = `None`)
Focal loss[*] weighting factor `alpha` to balance between classes. Can be
used independently with `gamma`. If a single `float` is provided, it
is assumed binary case using `alpha` and `1 - alpha` for positive and
negative respectively. If a list of `float` is provided, with the same
length as the number of classes, the weights will match the classes.
[*] T. Lin, P. Goyal, R. Girshick, K. He and P. Dollár, "Focal Loss for
Dense Object Detection," 2017 IEEE International Conference on Computer
Vision (ICCV), Venice, 2017, pp. 2999-3007.
# Returns
`torch.FloatTensor`
A torch.FloatTensor representing the cross entropy loss.
If `average=="batch"` or `average=="token"`, the returned loss is a scalar.
If `average is None`, the returned loss is a vector of shape (batch_size,).
"""
if average not in {None, "token", "batch"}:
raise ValueError("Got average f{average}, expected one of None, 'token', or 'batch'")
# make sure weights are float
weights = weights.to(logits.dtype)
# sum all dim except batch
non_batch_dims = tuple(range(1, len(weights.shape)))
# shape : (batch_size,)
weights_batch_sum = weights.sum(dim=non_batch_dims)
# shape : (batch * sequence_length, num_classes)
logits_flat = logits.view(-1, logits.size(-1))
# shape : (batch * sequence_length, num_classes)
log_probs_flat = torch.nn.functional.log_softmax(logits_flat, dim=-1)
# shape : (batch * max_len, 1)
targets_flat = targets.view(-1, 1).long()
# focal loss coefficient
if gamma:
# shape : (batch * sequence_length, num_classes)
probs_flat = log_probs_flat.exp()
# shape : (batch * sequence_length,)
probs_flat = torch.gather(probs_flat, dim=1, index=targets_flat)
# shape : (batch * sequence_length,)
focal_factor = (1.0 - probs_flat) ** gamma
# shape : (batch, sequence_length)
focal_factor = focal_factor.view(*targets.size())
weights = weights * focal_factor
if alpha is not None:
# shape : () / (num_classes,)
if isinstance(alpha, (float, int)):
# shape : (2,)
alpha_factor = torch.tensor(
[1.0 - float(alpha), float(alpha)], dtype=weights.dtype, device=weights.device
)
elif isinstance(alpha, (list, numpy.ndarray, torch.Tensor)):
# shape : (c,)
alpha_factor = torch.tensor(alpha, dtype=weights.dtype, device=weights.device)
if not alpha_factor.size():
# shape : (1,)
alpha_factor = alpha_factor.view(1)
# shape : (2,)
alpha_factor = torch.cat([1 - alpha_factor, alpha_factor])
else:
raise TypeError(
("alpha must be float, list of float, or torch.FloatTensor, {} provided.").format(
type(alpha)
)
)
# shape : (batch, max_len)
alpha_factor = torch.gather(alpha_factor, dim=0, index=targets_flat.view(-1)).view(
*targets.size()
)
weights = weights * alpha_factor
if label_smoothing is not None and label_smoothing > 0.0:
num_classes = logits.size(-1)
smoothing_value = label_smoothing / num_classes
# Fill all the correct indices with 1 - smoothing value.
one_hot_targets = torch.zeros_like(log_probs_flat).scatter_(
-1, targets_flat, 1.0 - label_smoothing
)
smoothed_targets = one_hot_targets + smoothing_value
negative_log_likelihood_flat = -log_probs_flat * smoothed_targets
negative_log_likelihood_flat = negative_log_likelihood_flat.sum(-1, keepdim=True)
else:
# Contribution to the negative log likelihood only comes from the exact indices
# of the targets, as the target distributions are one-hot. Here we use torch.gather
# to extract the indices of the num_classes dimension which contribute to the loss.
# shape : (batch * sequence_length, 1)
negative_log_likelihood_flat = -torch.gather(log_probs_flat, dim=1, index=targets_flat)
# shape : (batch, sequence_length)
negative_log_likelihood = negative_log_likelihood_flat.view(*targets.size())
# shape : (batch, sequence_length)
negative_log_likelihood = negative_log_likelihood * weights
if average == "batch":
# shape : (batch_size,)
per_batch_loss = negative_log_likelihood.sum(non_batch_dims) / (
weights_batch_sum + tiny_value_of_dtype(negative_log_likelihood.dtype)
)
num_non_empty_sequences = (weights_batch_sum > 0).sum() + tiny_value_of_dtype(
negative_log_likelihood.dtype
)
return per_batch_loss.sum() / num_non_empty_sequences
elif average == "token":
return negative_log_likelihood.sum() / (
weights_batch_sum.sum() + tiny_value_of_dtype(negative_log_likelihood.dtype)
)
else:
# shape : (batch_size,)
per_batch_loss = negative_log_likelihood.sum(non_batch_dims) / (
weights_batch_sum + tiny_value_of_dtype(negative_log_likelihood.dtype)
)
return per_batch_loss
def replace_masked_values(
tensor: torch.Tensor, mask: torch.BoolTensor, replace_with: float
) -> torch.Tensor:
"""
Replaces all masked values in `tensor` with `replace_with`. `mask` must be broadcastable
to the same shape as `tensor`. We require that `tensor.dim() == mask.dim()`, as otherwise we
won't know which dimensions of the mask to unsqueeze.
This just does `tensor.masked_fill()`, except the pytorch method fills in things with a mask
value of 1, where we want the opposite. You can do this in your own code with
`tensor.masked_fill(~mask, replace_with)`.
"""
if tensor.dim() != mask.dim():
raise ConfigurationError(
"tensor.dim() (%d) != mask.dim() (%d)" % (tensor.dim(), mask.dim())
)
return tensor.masked_fill(~mask, replace_with)
def tensors_equal(tensor1: torch.Tensor, tensor2: torch.Tensor, tolerance: float = 1e-12) -> bool:
"""
A check for tensor equality (by value). We make sure that the tensors have the same shape,
then check all of the entries in the tensor for equality. We additionally allow the input
tensors to be lists or dictionaries, where we then do the above check on every position in the
list / item in the dictionary. If we find objects that aren't tensors as we're doing that, we
just defer to their equality check.
This is kind of a catch-all method that's designed to make implementing `__eq__` methods
easier, in a way that's really only intended to be useful for tests.
"""
if isinstance(tensor1, (list, tuple)):
if not isinstance(tensor2, (list, tuple)) or len(tensor1) != len(tensor2):
return False
return all(tensors_equal(t1, t2, tolerance) for t1, t2 in zip(tensor1, tensor2))
elif isinstance(tensor1, dict):
if not isinstance(tensor2, dict):
return False
if tensor1.keys() != tensor2.keys():
return False
return all(tensors_equal(tensor1[key], tensor2[key], tolerance) for key in tensor1)
elif isinstance(tensor1, torch.Tensor):
if not isinstance(tensor2, torch.Tensor):
return False
if tensor1.size() != tensor2.size():
return False
# Special case for bools since they don't support subtraction
if tensor1.dtype == torch.bool or tensor2.dtype == torch.bool:
return (tensor1 == tensor2).all()
return ((tensor1 - tensor2).abs().float() < tolerance).all()
else:
try:
return tensor1 == tensor2
except RuntimeError:
print(type(tensor1), type(tensor2))
raise
def device_mapping(cuda_device: int):
"""
In order to `torch.load()` a GPU-trained model onto a CPU (or specific GPU),
you have to supply a `map_location` function. Call this with
the desired `cuda_device` to get the function that `torch.load()` needs.
"""
def inner_device_mapping(storage: torch.Storage, location) -> torch.Storage:
if cuda_device >= 0:
return storage.cuda(cuda_device)
else:
return storage
return inner_device_mapping
def combine_tensors(combination: str, tensors: List[torch.Tensor]) -> torch.Tensor:
"""
Combines a list of tensors using element-wise operations and concatenation, specified by a
`combination` string. The string refers to (1-indexed) positions in the input tensor list,
and looks like `"1,2,1+2,3-1"`.
We allow the following kinds of combinations : `x`, `x*y`, `x+y`, `x-y`, and `x/y`,
where `x` and `y` are positive integers less than or equal to `len(tensors)`. Each of
the binary operations is performed elementwise. You can give as many combinations as you want
in the `combination` string. For example, for the input string `"1,2,1*2"`, the result
would be `[1;2;1*2]`, as you would expect, where `[;]` is concatenation along the last
dimension.
If you have a fixed, known way to combine tensors that you use in a model, you should probably
just use something like `torch.cat([x_tensor, y_tensor, x_tensor * y_tensor])`. This
function adds some complexity that is only necessary if you want the specific combination used
to be `configurable`.
If you want to do any element-wise operations, the tensors involved in each element-wise
operation must have the same shape.
This function also accepts `x` and `y` in place of `1` and `2` in the combination
string.
"""
if len(tensors) > 9:
raise ConfigurationError("Double-digit tensor lists not currently supported")
combination = combination.replace("x", "1").replace("y", "2")
to_concatenate = [_get_combination(piece, tensors) for piece in combination.split(",")]
return torch.cat(to_concatenate, dim=-1)
def _rindex(sequence: Sequence[T], obj: T) -> int:
"""
Return zero-based index in the sequence of the last item whose value is equal to obj. Raises a
ValueError if there is no such item.
# Parameters
sequence : `Sequence[T]`
obj : `T`
# Returns
`int`
zero-based index associated to the position of the last item equal to obj
"""
for i in range(len(sequence) - 1, -1, -1):
if sequence[i] == obj:
return i
raise ValueError(f"Unable to find {obj} in sequence {sequence}.")
def _get_combination(combination: str, tensors: List[torch.Tensor]) -> torch.Tensor:
if combination.isdigit():
index = int(combination) - 1
return tensors[index]
else:
if len(combination) != 3:
raise ConfigurationError("Invalid combination: " + combination)
first_tensor = _get_combination(combination[0], tensors)
second_tensor = _get_combination(combination[2], tensors)
operation = combination[1]
if operation == "*":
return first_tensor * second_tensor
elif operation == "/":
return first_tensor / second_tensor
elif operation == "+":
return first_tensor + second_tensor
elif operation == "-":
return first_tensor - second_tensor
else:
raise ConfigurationError("Invalid operation: " + operation)
def combine_tensors_and_multiply(
combination: str, tensors: List[torch.Tensor], weights: torch.nn.Parameter
) -> torch.Tensor:
"""
Like [`combine_tensors`](./util.md#combine_tensors), but does a weighted (linear)
multiplication while combining. This is a separate function from `combine_tensors`
because we try to avoid instantiating large intermediate tensors during the combination,
which is possible because we know that we're going to be multiplying by a weight vector in the end.
# Parameters
combination : `str`
Same as in `combine_tensors`
tensors : `List[torch.Tensor]`
A list of tensors to combine, where the integers in the `combination` are (1-indexed)
positions in this list of tensors. These tensors are all expected to have either three or
four dimensions, with the final dimension being an embedding. If there are four
dimensions, one of them must have length 1.
weights : `torch.nn.Parameter`
A vector of weights to use for the combinations. This should have shape (combined_dim,),
as calculated by `get_combined_dim`.
"""
if len(tensors) > 9:
raise ConfigurationError("Double-digit tensor lists not currently supported")
combination = combination.replace("x", "1").replace("y", "2")
pieces = combination.split(",")
tensor_dims = [tensor.size(-1) for tensor in tensors]
combination_dims = [_get_combination_dim(piece, tensor_dims) for piece in pieces]
dims_so_far = 0
to_sum = []
for piece, combination_dim in zip(pieces, combination_dims):
weight = weights[dims_so_far : (dims_so_far + combination_dim)]
dims_so_far += combination_dim
to_sum.append(_get_combination_and_multiply(piece, tensors, weight))
result = to_sum[0]
for result_piece in to_sum[1:]:
result = result + result_piece
return result
def _get_combination_and_multiply(
combination: str, tensors: List[torch.Tensor], weight: torch.nn.Parameter
) -> torch.Tensor:
if combination.isdigit():
index = int(combination) - 1
return torch.matmul(tensors[index], weight)
else:
if len(combination) != 3:
raise ConfigurationError("Invalid combination: " + combination)
first_tensor = _get_combination(combination[0], tensors)
second_tensor = _get_combination(combination[2], tensors)
operation = combination[1]
if operation == "*":
if first_tensor.dim() > 4 or second_tensor.dim() > 4:
raise ValueError("Tensors with dim > 4 not currently supported")
desired_dim = max(first_tensor.dim(), second_tensor.dim()) - 1
if first_tensor.dim() == 4:
expanded_dim = _rindex(first_tensor.size(), 1)
first_tensor = first_tensor.squeeze(expanded_dim)
if second_tensor.dim() == 4:
expanded_dim = _rindex(second_tensor.size(), 1)
second_tensor = second_tensor.squeeze(expanded_dim)
intermediate = first_tensor * weight
result = torch.matmul(intermediate, second_tensor.transpose(-1, -2))
if result.dim() == desired_dim + 1:
result = result.squeeze(-1)
return result
elif operation == "/":
if first_tensor.dim() > 4 or second_tensor.dim() > 4:
raise ValueError("Tensors with dim > 4 not currently supported")
desired_dim = max(first_tensor.dim(), second_tensor.dim()) - 1
if first_tensor.dim() == 4:
expanded_dim = _rindex(first_tensor.size(), 1)
first_tensor = first_tensor.squeeze(expanded_dim)
if second_tensor.dim() == 4:
expanded_dim = _rindex(second_tensor.size(), 1)
second_tensor = second_tensor.squeeze(expanded_dim)
intermediate = first_tensor * weight
result = torch.matmul(intermediate, second_tensor.pow(-1).transpose(-1, -2))
if result.dim() == desired_dim + 1:
result = result.squeeze(-1)
return result
elif operation == "+":
return torch.matmul(first_tensor, weight) + torch.matmul(second_tensor, weight)
elif operation == "-":
return torch.matmul(first_tensor, weight) - torch.matmul(second_tensor, weight)
else:
raise ConfigurationError("Invalid operation: " + operation)
def get_combined_dim(combination: str, tensor_dims: List[int]) -> int:
"""
For use with [`combine_tensors`](./util.md#combine_tensors).
This function computes the resultant dimension when calling `combine_tensors(combination, tensors)`,
when the tensor dimension is known. This is necessary for knowing the sizes of weight matrices
when building models that use `combine_tensors`.
# Parameters
combination : `str`
A comma-separated list of combination pieces, like `"1,2,1*2"`, specified identically to
`combination` in `combine_tensors`.
tensor_dims : `List[int]`
A list of tensor dimensions, where each dimension is from the `last axis` of the tensors
that will be input to `combine_tensors`.
"""
if len(tensor_dims) > 9:
raise ConfigurationError("Double-digit tensor lists not currently supported")
combination = combination.replace("x", "1").replace("y", "2")
return sum(_get_combination_dim(piece, tensor_dims) for piece in combination.split(","))
def _get_combination_dim(combination: str, tensor_dims: List[int]) -> int:
if combination.isdigit():
index = int(combination) - 1
return tensor_dims[index]
else:
if len(combination) != 3:
raise ConfigurationError("Invalid combination: " + combination)
first_tensor_dim = _get_combination_dim(combination[0], tensor_dims)
second_tensor_dim = _get_combination_dim(combination[2], tensor_dims)
operation = combination[1]
if first_tensor_dim != second_tensor_dim:
raise ConfigurationError('Tensor dims must match for operation "{}"'.format(operation))
return first_tensor_dim
def logsumexp(tensor: torch.Tensor, dim: int = -1, keepdim: bool = False) -> torch.Tensor:
"""
A numerically stable computation of logsumexp. This is mathematically equivalent to
`tensor.exp().sum(dim, keep=keepdim).log()`. This function is typically used for summing log
probabilities.
# Parameters
tensor : `torch.FloatTensor`, required.
A tensor of arbitrary size.
dim : `int`, optional (default = `-1`)
The dimension of the tensor to apply the logsumexp to.
keepdim: `bool`, optional (default = `False`)
Whether to retain a dimension of size one at the dimension we reduce over.
"""
max_score, _ = tensor.max(dim, keepdim=keepdim)
if keepdim:
stable_vec = tensor - max_score
else:
stable_vec = tensor - max_score.unsqueeze(dim)
return max_score + (stable_vec.exp().sum(dim, keepdim=keepdim)).log()
def get_device_of(tensor: torch.Tensor) -> int:
"""
Returns the device of the tensor.
"""
if not tensor.is_cuda:
return -1
else:
return tensor.get_device()
def flatten_and_batch_shift_indices(indices: torch.Tensor, sequence_length: int) -> torch.Tensor:
"""
This is a subroutine for [`batched_index_select`](./util.md#batched_index_select).
The given `indices` of size `(batch_size, d_1, ..., d_n)` indexes into dimension 2 of a
target tensor, which has size `(batch_size, sequence_length, embedding_size)`. This
function returns a vector that correctly indexes into the flattened target. The sequence
length of the target must be provided to compute the appropriate offsets.
```python
indices = torch.ones([2,3], dtype=torch.long)
# Sequence length of the target tensor.
sequence_length = 10
shifted_indices = flatten_and_batch_shift_indices(indices, sequence_length)
# Indices into the second element in the batch are correctly shifted
# to take into account that the target tensor will be flattened before
# the indices are applied.
assert shifted_indices == [1, 1, 1, 11, 11, 11]
```
# Parameters
indices : `torch.LongTensor`, required.
sequence_length : `int`, required.
The length of the sequence the indices index into.
This must be the second dimension of the tensor.
# Returns
offset_indices : `torch.LongTensor`
"""
# Shape: (batch_size)
if torch.max(indices) >= sequence_length or torch.min(indices) < 0:
raise ConfigurationError(
f"All elements in indices should be in range (0, {sequence_length - 1})"
)
offsets = get_range_vector(indices.size(0), get_device_of(indices)) * sequence_length
for _ in range(len(indices.size()) - 1):
offsets = offsets.unsqueeze(1)
# Shape: (batch_size, d_1, ..., d_n)
offset_indices = indices + offsets
# Shape: (batch_size * d_1 * ... * d_n)
offset_indices = offset_indices.view(-1)
return offset_indices
def batched_index_select(
target: torch.Tensor,
indices: torch.LongTensor,
flattened_indices: Optional[torch.LongTensor] = None,
) -> torch.Tensor:
"""
The given `indices` of size `(batch_size, d_1, ..., d_n)` indexes into the sequence
dimension (dimension 2) of the target, which has size `(batch_size, sequence_length,
embedding_size)`.
This function returns selected values in the target with respect to the provided indices, which
have size `(batch_size, d_1, ..., d_n, embedding_size)`. This can use the optionally
precomputed `flattened_indices` with size `(batch_size * d_1 * ... * d_n)` if given.
An example use case of this function is looking up the start and end indices of spans in a
sequence tensor. This is used in the
[CoreferenceResolver](https://docs.allennlp.org/models/master/models/coref/models/coref/)
model to select contextual word representations corresponding to the start and end indices of
mentions.
The key reason this can't be done with basic torch functions is that we want to be able to use look-up
tensors with an arbitrary number of dimensions (for example, in the coref model, we don't know
a-priori how many spans we are looking up).
# Parameters
target : `torch.Tensor`, required.
A 3 dimensional tensor of shape (batch_size, sequence_length, embedding_size).
This is the tensor to be indexed.
indices : `torch.LongTensor`
A tensor of shape (batch_size, ...), where each element is an index into the
`sequence_length` dimension of the `target` tensor.
flattened_indices : `Optional[torch.Tensor]`, optional (default = `None`)
An optional tensor representing the result of calling `flatten_and_batch_shift_indices`
on `indices`. This is helpful in the case that the indices can be flattened once and
cached for many batch lookups.
# Returns
selected_targets : `torch.Tensor`
A tensor with shape [indices.size(), target.size(-1)] representing the embedded indices
extracted from the batch flattened target tensor.
"""
if flattened_indices is None:
# Shape: (batch_size * d_1 * ... * d_n)
flattened_indices = flatten_and_batch_shift_indices(indices, target.size(1))
# Shape: (batch_size * sequence_length, embedding_size)
flattened_target = target.view(-1, target.size(-1))
# Shape: (batch_size * d_1 * ... * d_n, embedding_size)
flattened_selected = flattened_target.index_select(0, flattened_indices)
selected_shape = list(indices.size()) + [target.size(-1)]
# Shape: (batch_size, d_1, ..., d_n, embedding_size)
selected_targets = flattened_selected.view(*selected_shape)
return selected_targets
def masked_index_fill(
target: torch.Tensor, indices: torch.LongTensor, mask: torch.BoolTensor, fill_value: int = 1
) -> torch.Tensor:
"""
The given `indices` in `target` will be will be filled with `fill_value` given a `mask`.
# Parameters
target : `torch.Tensor`, required.
A 2 dimensional tensor of shape (batch_size, sequence_length).
This is the tensor to be filled.
indices : `torch.LongTensor`, required
A 2 dimensional tensor of shape (batch_size, num_indices),
These are the indices that will be filled in the original tensor.
mask : `torch.Tensor`, required.
A 2 dimensional tensor of shape (batch_size, num_indices), mask.sum() == `nonzero_indices`.
fill_value : `int`, optional (default = `1`)
The value we fill the tensor with.
# Returns
filled_target : `torch.Tensor`
A tensor with shape (batch_size, sequence_length) where 'indices' are filled with `fill_value`
"""
mask = mask.bool()
prev_shape = target.size()
# Shape: (batch_size * num_indices)
flattened_indices = flatten_and_batch_shift_indices(indices * mask, target.size(1))
# Shape: (batch_size * num_indices, 1)
mask = mask.view(-1)
# Shape: (batch_size * sequence_length, 1)
flattened_target = target.view(-1, 1)
# Shape: (nonzero_indices, 1)
unmasked_indices = flattened_indices[mask].unsqueeze(-1)
flattened_target = flattened_target.scatter(0, unmasked_indices, fill_value)
filled_target = flattened_target.reshape(prev_shape)
return filled_target
def masked_index_replace(
target: torch.Tensor,
indices: torch.LongTensor,
mask: torch.BoolTensor,
replace: torch.Tensor,
) -> torch.Tensor:
"""
The given `indices` in `target` will be will be replaced with corresponding index
from the `replace` tensor given a `mask`.
# Parameters
target : `torch.Tensor`, required.
A 3 dimensional tensor of shape (batch_size, sequence_length, embedding_dim).
This is the tensor to be replaced into.
indices : `torch.LongTensor`, required
A 2 dimensional tensor of shape (batch_size, num_indices),
These are the indices that will be replaced in the original tensor.
mask : `torch.Tensor`, required.
A 2 dimensional tensor of shape (batch_size, num_indices), mask.sum() == `nonzero_indices`.
replace : `torch.Tensor`, required.
A 3 dimensional tensor of shape (batch_size, num_indices, embedding_dim),
The tensor to perform scatter from.
# Returns
replaced_target : `torch.Tensor`
A tensor with shape (batch_size, sequence_length, embedding_dim) where 'indices'
are replaced with the corrosponding vector from `replace`
"""
target = target.clone()
mask = mask.bool()
prev_shape = target.size()
# Shape: (batch_size * num_indices)
flattened_indices = flatten_and_batch_shift_indices(indices * mask, target.size(1))
# Shape: (batch_size * sequence_length, embedding_size)
flattened_target = target.view(-1, target.size(-1))
# Shape: (nonzero_indices, 1)
mask = mask.view(-1)
flattened_target[flattened_indices[mask]] = replace.view(-1, replace.size(-1))[mask]
# Shape: (batch_size, sequence_length, embedding_dim)
replaced_target = flattened_target.reshape(prev_shape)
return replaced_target
def batched_span_select(target: torch.Tensor, spans: torch.LongTensor) -> torch.Tensor:
"""
The given `spans` of size `(batch_size, num_spans, 2)` indexes into the sequence
dimension (dimension 2) of the target, which has size `(batch_size, sequence_length,
embedding_size)`.
This function returns segmented spans in the target with respect to the provided span indices.
# Parameters
target : `torch.Tensor`, required.
A 3 dimensional tensor of shape (batch_size, sequence_length, embedding_size).
This is the tensor to be indexed.
indices : `torch.LongTensor`
A 3 dimensional tensor of shape (batch_size, num_spans, 2) representing start and end
indices (both inclusive) into the `sequence_length` dimension of the `target` tensor.
# Returns
span_embeddings : `torch.Tensor`
A tensor with shape (batch_size, num_spans, max_batch_span_width, embedding_size]
representing the embedded spans extracted from the batch flattened target tensor.
span_mask: `torch.BoolTensor`
A tensor with shape (batch_size, num_spans, max_batch_span_width) representing the mask on
the returned span embeddings.
"""
# both of shape (batch_size, num_spans, 1)
span_starts, span_ends = spans.split(1, dim=-1)
# shape (batch_size, num_spans, 1)
# These span widths are off by 1, because the span ends are `inclusive`.
span_widths = span_ends - span_starts
# We need to know the maximum span width so we can
# generate indices to extract the spans from the sequence tensor.
# These indices will then get masked below, such that if the length
# of a given span is smaller than the max, the rest of the values
# are masked.
max_batch_span_width = span_widths.max().item() + 1
# Shape: (1, 1, max_batch_span_width)
max_span_range_indices = get_range_vector(max_batch_span_width, get_device_of(target)).view(
1, 1, -1
)
# Shape: (batch_size, num_spans, max_batch_span_width)
# This is a broadcasted comparison - for each span we are considering,
# we are creating a range vector of size max_span_width, but masking values
# which are greater than the actual length of the span.
#
# We're using <= here (and for the mask below) because the span ends are
# inclusive, so we want to include indices which are equal to span_widths rather
# than using it as a non-inclusive upper bound.
span_mask = max_span_range_indices <= span_widths
raw_span_indices = span_starts + max_span_range_indices
# We also don't want to include span indices which greater than the sequence_length,
# which happens because some spans near the end of the sequence
# have a start index + max_batch_span_width > sequence_length, so we add this to the mask here.
span_mask = span_mask & (raw_span_indices < target.size(1)) & (0 <= raw_span_indices)
span_indices = raw_span_indices * span_mask
# Shape: (batch_size, num_spans, max_batch_span_width, embedding_dim)
span_embeddings = batched_index_select(target, span_indices)
return span_embeddings, span_mask
def flattened_index_select(target: torch.Tensor, indices: torch.LongTensor) -> torch.Tensor:
"""
The given `indices` of size `(set_size, subset_size)` specifies subsets of the `target`
that each of the set_size rows should select. The `target` has size
`(batch_size, sequence_length, embedding_size)`, and the resulting selected tensor has size
`(batch_size, set_size, subset_size, embedding_size)`.
# Parameters
target : `torch.Tensor`, required.
A Tensor of shape (batch_size, sequence_length, embedding_size).
indices : `torch.LongTensor`, required.
A LongTensor of shape (set_size, subset_size). All indices must be < sequence_length
as this tensor is an index into the sequence_length dimension of the target.
# Returns
selected : `torch.Tensor`, required.
A Tensor of shape (batch_size, set_size, subset_size, embedding_size).
"""
if indices.dim() != 2:
raise ConfigurationError(
"Indices passed to flattened_index_select had shape {} but "
"only 2 dimensional inputs are supported.".format(indices.size())
)
# Shape: (batch_size, set_size * subset_size, embedding_size)
flattened_selected = target.index_select(1, indices.view(-1))
# Shape: (batch_size, set_size, subset_size, embedding_size)
selected = flattened_selected.view(target.size(0), indices.size(0), indices.size(1), -1)
return selected
def get_range_vector(size: int, device: int) -> torch.Tensor:
"""
Returns a range vector with the desired size, starting at 0. The CUDA implementation
is meant to avoid copy data from CPU to GPU.
"""
if device > -1:
return torch.cuda.LongTensor(size, device=device).fill_(1).cumsum(0) - 1
else:
return torch.arange(0, size, dtype=torch.long)
def bucket_values(
distances: torch.Tensor, num_identity_buckets: int = 4, num_total_buckets: int = 10
) -> torch.Tensor:
"""
Places the given values (designed for distances) into `num_total_buckets`semi-logscale
buckets, with `num_identity_buckets` of these capturing single values.
The default settings will bucket values into the following buckets:
[0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+].
# Parameters
distances : `torch.Tensor`, required.
A Tensor of any size, to be bucketed.
num_identity_buckets: `int`, optional (default = `4`).
The number of identity buckets (those only holding a single value).
num_total_buckets : `int`, (default = `10`)
The total number of buckets to bucket values into.
# Returns
`torch.Tensor`
A tensor of the same shape as the input, containing the indices of the buckets
the values were placed in.
"""
# Chunk the values into semi-logscale buckets using .floor().
# This is a semi-logscale bucketing because we divide by log(2) after taking the log.
# We do this to make the buckets more granular in the initial range, where we expect
# most values to fall. We then add (num_identity_buckets - 1) because we want these indices
# to start _after_ the fixed number of buckets which we specified would only hold single values.
logspace_index = (distances.float().log() / math.log(2)).floor().long() + (
num_identity_buckets - 1
)
# create a mask for values which will go into single number buckets (i.e not a range).
use_identity_mask = (distances <= num_identity_buckets).long()
use_buckets_mask = 1 + (-1 * use_identity_mask)
# Use the original values if they are less than num_identity_buckets, otherwise
# use the logspace indices.
combined_index = use_identity_mask * distances + use_buckets_mask * logspace_index
# Clamp to put anything > num_total_buckets into the final bucket.
return combined_index.clamp(0, num_total_buckets - 1)
def add_sentence_boundary_token_ids(
tensor: torch.Tensor, mask: torch.BoolTensor, sentence_begin_token: Any, sentence_end_token: Any
) -> Tuple[torch.Tensor, torch.BoolTensor]:
"""
Add begin/end of sentence tokens to the batch of sentences.
Given a batch of sentences with size `(batch_size, timesteps)` or
`(batch_size, timesteps, dim)` this returns a tensor of shape
`(batch_size, timesteps + 2)` or `(batch_size, timesteps + 2, dim)` respectively.
Returns both the new tensor and updated mask.
# Parameters
tensor : `torch.Tensor`
A tensor of shape `(batch_size, timesteps)` or `(batch_size, timesteps, dim)`
mask : `torch.BoolTensor`
A tensor of shape `(batch_size, timesteps)`
sentence_begin_token: `Any`
Can be anything that can be broadcast in torch for assignment.
For 2D input, a scalar with the `<S>` id. For 3D input, a tensor with length dim.
sentence_end_token: `Any`
Can be anything that can be broadcast in torch for assignment.
For 2D input, a scalar with the `</S>` id. For 3D input, a tensor with length dim.
# Returns
tensor_with_boundary_tokens : `torch.Tensor`
The tensor with the appended and prepended boundary tokens. If the input was 2D,
it has shape (batch_size, timesteps + 2) and if the input was 3D, it has shape
(batch_size, timesteps + 2, dim).
new_mask : `torch.BoolTensor`
The new mask for the tensor, taking into account the appended tokens
marking the beginning and end of the sentence.
"""
sequence_lengths = mask.sum(dim=1).detach().cpu().numpy()
tensor_shape = list(tensor.data.shape)
new_shape = list(tensor_shape)
new_shape[1] = tensor_shape[1] + 2
tensor_with_boundary_tokens = tensor.new_zeros(*new_shape, device=tensor.device)
if len(tensor_shape) == 2:
tensor_with_boundary_tokens[:, 1:-1] = tensor
tensor_with_boundary_tokens[:, 0] = sentence_begin_token
for i, j in enumerate(sequence_lengths):
tensor_with_boundary_tokens[i, j + 1] = sentence_end_token
new_mask = tensor_with_boundary_tokens != 0
elif len(tensor_shape) == 3:
tensor_with_boundary_tokens[:, 1:-1, :] = tensor
sentence_begin_token = sentence_begin_token.detach().to(tensor.device)
sentence_end_token = sentence_end_token.detach().to(tensor.device)
for i, j in enumerate(sequence_lengths):
tensor_with_boundary_tokens[i, 0, :] = sentence_begin_token
tensor_with_boundary_tokens[i, j + 1, :] = sentence_end_token
new_mask = (tensor_with_boundary_tokens > 0).sum(dim=-1) > 0
else:
raise ValueError("add_sentence_boundary_token_ids only accepts 2D and 3D input")
return tensor_with_boundary_tokens, new_mask
def remove_sentence_boundaries(
tensor: torch.Tensor, mask: torch.BoolTensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Remove begin/end of sentence embeddings from the batch of sentences.
Given a batch of sentences with size `(batch_size, timesteps, dim)`
this returns a tensor of shape `(batch_size, timesteps - 2, dim)` after removing
the beginning and end sentence markers. The sentences are assumed to be padded on the right,
with the beginning of each sentence assumed to occur at index 0 (i.e., `mask[:, 0]` is assumed
to be 1).
Returns both the new tensor and updated mask.
This function is the inverse of `add_sentence_boundary_token_ids`.
# Parameters
tensor : `torch.Tensor`
A tensor of shape `(batch_size, timesteps, dim)`
mask : `torch.BoolTensor`
A tensor of shape `(batch_size, timesteps)`
# Returns
tensor_without_boundary_tokens : `torch.Tensor`
The tensor after removing the boundary tokens of shape `(batch_size, timesteps - 2, dim)`
new_mask : `torch.BoolTensor`
The new mask for the tensor of shape `(batch_size, timesteps - 2)`.
"""
sequence_lengths = mask.sum(dim=1).detach().cpu().numpy()
tensor_shape = list(tensor.data.shape)
new_shape = list(tensor_shape)
new_shape[1] = tensor_shape[1] - 2
tensor_without_boundary_tokens = tensor.new_zeros(*new_shape)
new_mask = tensor.new_zeros((new_shape[0], new_shape[1]), dtype=torch.bool)
for i, j in enumerate(sequence_lengths):
if j > 2:
tensor_without_boundary_tokens[i, : (j - 2), :] = tensor[i, 1 : (j - 1), :]
new_mask[i, : (j - 2)] = True
return tensor_without_boundary_tokens, new_mask
def add_positional_features(
tensor: torch.Tensor, min_timescale: float = 1.0, max_timescale: float = 1.0e4
):
"""
Implements the frequency-based positional encoding described
in [Attention is All you Need][0].
Adds sinusoids of different frequencies to a `Tensor`. A sinusoid of a
different frequency and phase is added to each dimension of the input `Tensor`.
This allows the attention heads to use absolute and relative positions.
The number of timescales is equal to hidden_dim / 2 within the range
(min_timescale, max_timescale). For each timescale, the two sinusoidal
signals sin(timestep / timescale) and cos(timestep / timescale) are
generated and concatenated along the hidden_dim dimension.
[0]: https://www.semanticscholar.org/paper/Attention-Is-All-You-Need-Vaswani-Shazeer/0737da0767d77606169cbf4187b83e1ab62f6077
# Parameters
tensor : `torch.Tensor`
a Tensor with shape (batch_size, timesteps, hidden_dim).
min_timescale : `float`, optional (default = `1.0`)
The smallest timescale to use.
max_timescale : `float`, optional (default = `1.0e4`)
The largest timescale to use.
# Returns
`torch.Tensor`
The input tensor augmented with the sinusoidal frequencies.
""" # noqa
_, timesteps, hidden_dim = tensor.size()
timestep_range = get_range_vector(timesteps, get_device_of(tensor)).data.float()
# We're generating both cos and sin frequencies,
# so half for each.
num_timescales = hidden_dim // 2
timescale_range = get_range_vector(num_timescales, get_device_of(tensor)).data.float()
log_timescale_increments = math.log(float(max_timescale) / float(min_timescale)) / float(
num_timescales - 1
)
inverse_timescales = min_timescale * torch.exp(timescale_range * -log_timescale_increments)
# Broadcasted multiplication - shape (timesteps, num_timescales)
scaled_time = timestep_range.unsqueeze(1) * inverse_timescales.unsqueeze(0)
# shape (timesteps, 2 * num_timescales)
sinusoids = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 1)
if hidden_dim % 2 != 0:
# if the number of dimensions is odd, the cos and sin
# timescales had size (hidden_dim - 1) / 2, so we need
# to add a row of zeros to make up the difference.
sinusoids = torch.cat([sinusoids, sinusoids.new_zeros(timesteps, 1)], 1)
return tensor + sinusoids.unsqueeze(0)
def clone(module: torch.nn.Module, num_copies: int) -> torch.nn.ModuleList:
"""Produce N identical layers."""
return torch.nn.ModuleList(copy.deepcopy(module) for _ in range(num_copies))
def combine_initial_dims(tensor: torch.Tensor) -> torch.Tensor:
"""
Given a (possibly higher order) tensor of ids with shape
(d1, ..., dn, sequence_length)
Return a view that's (d1 * ... * dn, sequence_length).
If original tensor is 1-d or 2-d, return it as is.
"""
if tensor.dim() <= 2:
return tensor
else:
return tensor.view(-1, tensor.size(-1))
def uncombine_initial_dims(tensor: torch.Tensor, original_size: torch.Size) -> torch.Tensor:
"""
Given a tensor of embeddings with shape
(d1 * ... * dn, sequence_length, embedding_dim)
and the original shape
(d1, ..., dn, sequence_length),
return the reshaped tensor of embeddings with shape
(d1, ..., dn, sequence_length, embedding_dim).
If original size is 1-d or 2-d, return it as is.
"""
if len(original_size) <= 2:
return tensor
else:
view_args = list(original_size) + [tensor.size(-1)]
return tensor.view(*view_args)
def inspect_parameters(module: torch.nn.Module, quiet: bool = False) -> Dict[str, Any]:
"""
Inspects the model/module parameters and their tunability. The output is structured
in a nested dict so that parameters in same sub-modules are grouped together.
This can be helpful to setup module path based regex, for example in initializer.
It prints it by default (optional) and returns the inspection dict. Eg. output::
{
"_text_field_embedder": {
"token_embedder_tokens": {
"_projection": {
"bias": "tunable",
"weight": "tunable"
},
"weight": "frozen"
}
}
}
"""
results: Dict[str, Any] = {}
for name, param in sorted(module.named_parameters()):
keys = name.split(".")
write_to = results
for key in keys[:-1]:
if key not in write_to:
write_to[key] = {}
write_to = write_to[key]
write_to[keys[-1]] = "tunable" if param.requires_grad else "frozen"
if not quiet:
print(json.dumps(results, indent=4))
return results
def find_text_field_embedder(model: torch.nn.Module) -> torch.nn.Module:
"""
Takes a `Model` and returns the `Module` that is a `TextFieldEmbedder`. We return just the
first one, as it's very rare to have more than one. If there isn't a `TextFieldEmbedder` in the
given `Model`, we raise a `ValueError`.
"""
from allennlp.modules.text_field_embedders.text_field_embedder import TextFieldEmbedder
for module in model.modules():
if isinstance(module, TextFieldEmbedder):
return module
raise ValueError("Couldn't find TextFieldEmbedder!")
def find_embedding_layer(model: torch.nn.Module) -> torch.nn.Module:
"""
Takes a model (typically an AllenNLP `Model`, but this works for any `torch.nn.Module`) and
makes a best guess about which module is the embedding layer. For typical AllenNLP models,
this often is the `TextFieldEmbedder`, but if you're using a pre-trained contextualizer, we
really want layer 0 of that contextualizer, not the output. So there are a bunch of hacks in
here for specific pre-trained contextualizers.
"""
# We'll look for a few special cases in a first pass, then fall back to just finding a
# TextFieldEmbedder in a second pass if we didn't find a special case.
from transformers.models.gpt2.modeling_gpt2 import GPT2Model
from transformers.models.bert.modeling_bert import BertEmbeddings
from transformers.models.albert.modeling_albert import AlbertEmbeddings
from transformers.models.roberta.modeling_roberta import RobertaEmbeddings
from allennlp.modules.text_field_embedders.text_field_embedder import TextFieldEmbedder
from allennlp.modules.text_field_embedders.basic_text_field_embedder import (
BasicTextFieldEmbedder,
)
from allennlp.modules.token_embedders.embedding import Embedding
for module in model.modules():
if isinstance(module, BertEmbeddings):
return module.word_embeddings
if isinstance(module, RobertaEmbeddings):
return module.word_embeddings
if isinstance(module, AlbertEmbeddings):
return module.word_embeddings
if isinstance(module, GPT2Model):
return module.wte
for module in model.modules():
if isinstance(module, TextFieldEmbedder):
if isinstance(module, BasicTextFieldEmbedder):
# We'll have a check for single Embedding cases, because we can be more efficient
# in cases like this. If this check fails, then for something like hotflip we need
# to actually run the text field embedder and construct a vector for each token.
if len(module._token_embedders) == 1:
embedder = list(module._token_embedders.values())[0]
if isinstance(embedder, Embedding):
if embedder._projection is None:
# If there's a projection inside the Embedding, then we need to return
# the whole TextFieldEmbedder, because there's more computation that
# needs to be done than just multiply by an embedding matrix.
return embedder
return module
raise RuntimeError("No embedding module found!")
def get_token_offsets_from_text_field_inputs(
text_field_inputs: List[Any],
) -> Optional[torch.Tensor]:
"""
Given a list of inputs to a TextFieldEmbedder, tries to find token offsets from those inputs, if
there are any. You will have token offsets if you are using a mismatched token embedder; if
you're not, the return value from this function should be None. This function is intended to be
called from a `forward_hook` attached to a `TextFieldEmbedder`, so the inputs are formatted just
as a list.
It's possible in theory that you could have multiple offsets as inputs to a single call to a
`TextFieldEmbedder`, but that's an extremely rare use case (I can't really imagine anyone
wanting to do that). In that case, we'll only return the first one. If you need different
behavior for your model, open an issue on github describing what you're doing.
"""
for input_index, text_field_input in enumerate(text_field_inputs):
if not isinstance(text_field_input, dict):
continue
for input_value in text_field_input.values():
if not isinstance(input_value, dict):
continue
for embedder_arg_name, embedder_arg_value in input_value.items():
if embedder_arg_name == "offsets":
return embedder_arg_value
return None
def extend_layer(layer: torch.nn.Module, new_dim: int) -> None:
valid_layers = [torch.nn.Linear, torch.nn.Bilinear]
if not any([isinstance(layer, i) for i in valid_layers]):
raise ConfigurationError("Inappropriate layer type")
extend_dim = new_dim - layer.out_features
if not extend_dim:
return layer
if isinstance(layer, torch.nn.Linear):
new_weight = torch.FloatTensor(extend_dim, layer.in_features)
elif isinstance(layer, torch.nn.Bilinear):
new_weight = torch.FloatTensor(extend_dim, layer.in1_features, layer.in2_features)
new_bias = torch.FloatTensor(extend_dim)
torch.nn.init.xavier_uniform_(new_weight)
torch.nn.init.zeros_(new_bias)
device = layer.weight.device
layer.weight = torch.nn.Parameter(
torch.cat([layer.weight.data, new_weight.to(device)], dim=0),
requires_grad=layer.weight.requires_grad,
)
layer.bias = torch.nn.Parameter(
torch.cat([layer.bias.data, new_bias.to(device)], dim=0),
requires_grad=layer.bias.requires_grad,
)
layer.out_features = new_dim
def masked_topk(
input_: torch.FloatTensor,
mask: torch.BoolTensor,
k: Union[int, torch.LongTensor],
dim: int = -1,
) -> Tuple[torch.LongTensor, torch.LongTensor, torch.FloatTensor]:
"""
Extracts the top-k items along a certain dimension. This is similar to `torch.topk` except:
(1) we allow of a `mask` that makes the function not consider certain elements;
(2) the returned top input, mask, and indices are sorted in their original order in the input;
(3) May use the same k for all dimensions, or different k for each.
# Parameters
input_ : `torch.FloatTensor`, required.
A tensor containing the items that we want to prune.
mask : `torch.BoolTensor`, required.
A tensor with the same shape as `input_` that makes the function not consider masked out
(i.e. False) elements.
k : `Union[int, torch.LongTensor]`, required.
If a tensor of shape as `input_` except without dimension `dim`, specifies the number of
items to keep for each dimension.
If an int, keep the same number of items for all dimensions.
# Returns
top_input : `torch.FloatTensor`
The values of the top-k scoring items.
Has the same shape as `input_` except dimension `dim` has value `k` when it's an `int`
or `k.max()` when it's a tensor.
top_mask : `torch.BoolTensor`
The corresponding mask for `top_input`.
Has the shape as `top_input`.
top_indices : `torch.IntTensor`
The indices of the top-k scoring items into the original `input_`
tensor. This is returned because it can be useful to retain pointers to
the original items, if each item is being scored by multiple distinct
scorers, for instance.
Has the shape as `top_input`.
"""
if input_.size() != mask.size():
raise ValueError("`input_` and `mask` must have the same shape.")
if not -input_.dim() <= dim < input_.dim():
raise ValueError("`dim` must be in `[-input_.dim(), input_.dim())`")
dim = (dim + input_.dim()) % input_.dim()
max_k = k if isinstance(k, int) else k.max()
# We put the dim in question to the last dimension by permutation, and squash all leading dims.
# [0, 1, ..., dim - 1, dim + 1, ..., input.dim() - 1, dim]
permutation = list(range(input_.dim()))
permutation.pop(dim)
permutation += [dim]
# [0, 1, ..., dim - 1, -1, dim, ..., input.dim() - 2]; for restoration
reverse_permutation = list(range(input_.dim() - 1))
reverse_permutation.insert(dim, -1)
other_dims_size = list(input_.size())
other_dims_size.pop(dim)
permuted_size = other_dims_size + [max_k] # for restoration
# If an int was given for number of items to keep, construct tensor by repeating the value.
if isinstance(k, int):
# Put the tensor on same device as the mask.
k = k * torch.ones(*other_dims_size, dtype=torch.long, device=mask.device)
else:
if list(k.size()) != other_dims_size:
raise ValueError(
"`k` must have the same shape as `input_` with dimension `dim` removed."
)
num_items = input_.size(dim)
# (batch_size, num_items) -- "batch_size" refers to all other dimensions stacked together
input_ = input_.permute(*permutation).reshape(-1, num_items)
mask = mask.permute(*permutation).reshape(-1, num_items)
k = k.reshape(-1)
# Make sure that we don't select any masked items by setting their scores to be very
# negative.
input_ = replace_masked_values(input_, mask, min_value_of_dtype(input_.dtype))
# Shape: (batch_size, max_k)
_, top_indices = input_.topk(max_k, 1)
# Mask based on number of items to keep for each sentence.
# Shape: (batch_size, max_k)
top_indices_mask = get_mask_from_sequence_lengths(k, max_k).bool()
# Fill all masked indices with largest "top" index for that sentence, so that all masked
# indices will be sorted to the end.
# Shape: (batch_size, 1)
fill_value, _ = top_indices.max(dim=1, keepdim=True)
# Shape: (batch_size, max_num_items_to_keep)
top_indices = torch.where(top_indices_mask, top_indices, fill_value)
# Now we order the selected indices in increasing order with
# respect to their indices (and hence, with respect to the
# order they originally appeared in the `embeddings` tensor).
top_indices, _ = top_indices.sort(1)
# Combine the masks on spans that are out-of-bounds, and the mask on spans that are outside
# the top k for each sentence.
# Shape: (batch_size, max_k)
sequence_mask = mask.gather(1, top_indices)
top_mask = top_indices_mask & sequence_mask
# Shape: (batch_size, max_k)
top_input = input_.gather(1, top_indices)
return (
top_input.reshape(*permuted_size).permute(*reverse_permutation),
top_mask.reshape(*permuted_size).permute(*reverse_permutation),
top_indices.reshape(*permuted_size).permute(*reverse_permutation),
)
def info_value_of_dtype(dtype: torch.dtype):
"""
Returns the `finfo` or `iinfo` object of a given PyTorch data type. Does not allow torch.bool.
"""
if dtype == torch.bool:
raise TypeError("Does not support torch.bool")
elif dtype.is_floating_point:
return torch.finfo(dtype)
else:
return torch.iinfo(dtype)
def min_value_of_dtype(dtype: torch.dtype):
"""
Returns the minimum value of a given PyTorch data type. Does not allow torch.bool.
"""
return info_value_of_dtype(dtype).min
def max_value_of_dtype(dtype: torch.dtype):
"""
Returns the maximum value of a given PyTorch data type. Does not allow torch.bool.
"""
return info_value_of_dtype(dtype).max
def tiny_value_of_dtype(dtype: torch.dtype):
"""
Returns a moderately tiny value for a given PyTorch data type that is used to avoid numerical
issues such as division by zero.
This is different from `info_value_of_dtype(dtype).tiny` because it causes some NaN bugs.
Only supports floating point dtypes.
"""
if not dtype.is_floating_point:
raise TypeError("Only supports floating point dtypes.")
if dtype == torch.float or dtype == torch.double:
return 1e-13
elif dtype == torch.half:
return 1e-4
else:
raise TypeError("Does not support dtype " + str(dtype))
| allennlp-master | allennlp/nn/util.py |
"""
An `Activation` is just a function
that takes some parameters and returns an element-wise activation function.
For the most part we just use
[PyTorch activations](https://pytorch.org/docs/master/nn.html#non-linear-activations).
Here we provide a thin wrapper to allow registering them and instantiating them `from_params`.
The available activation functions are
* "linear"
* ["mish"](https://arxiv.org/abs/1908.08681)
* ["swish"](https://arxiv.org/abs/1710.05941)
* ["relu"](https://pytorch.org/docs/master/nn.html#torch.nn.ReLU)
* ["relu6"](https://pytorch.org/docs/master/nn.html#torch.nn.ReLU6)
* ["elu"](https://pytorch.org/docs/master/nn.html#torch.nn.ELU)
* ["prelu"](https://pytorch.org/docs/master/nn.html#torch.nn.PReLU)
* ["leaky_relu"](https://pytorch.org/docs/master/nn.html#torch.nn.LeakyReLU)
* ["threshold"](https://pytorch.org/docs/master/nn.html#torch.nn.Threshold)
* ["hardtanh"](https://pytorch.org/docs/master/nn.html#torch.nn.Hardtanh)
* ["sigmoid"](https://pytorch.org/docs/master/nn.html#torch.nn.Sigmoid)
* ["tanh"](https://pytorch.org/docs/master/nn.html#torch.nn.Tanh)
* ["log_sigmoid"](https://pytorch.org/docs/master/nn.html#torch.nn.LogSigmoid)
* ["softplus"](https://pytorch.org/docs/master/nn.html#torch.nn.Softplus)
* ["softshrink"](https://pytorch.org/docs/master/nn.html#torch.nn.Softshrink)
* ["softsign"](https://pytorch.org/docs/master/nn.html#torch.nn.Softsign)
* ["tanhshrink"](https://pytorch.org/docs/master/nn.html#torch.nn.Tanhshrink)
* ["selu"](https://pytorch.org/docs/master/nn.html#torch.nn.SELU)
"""
from typing import Callable
import torch
from overrides import overrides
from allennlp.common import Registrable
class Activation(torch.nn.Module, Registrable):
"""
Pytorch has a number of built-in activation functions. We group those here under a common
type, just to make it easier to configure and instantiate them `from_params` using
`Registrable`.
Note that we're only including element-wise activation functions in this list. You really need
to think about masking when you do a softmax or other similar activation function, so it
requires a different API.
"""
def __call__(self, tensor: torch.Tensor) -> torch.Tensor:
"""
This function is here just to make mypy happy. We expect activation functions to follow
this API; the builtin pytorch activation functions follow this just fine, even though they
don't subclass `Activation`. We're just making it explicit here, so mypy knows that
activations are callable like this.
"""
raise NotImplementedError
class _ActivationLambda(torch.nn.Module):
"""Wrapper around non PyTorch, lambda based activations to display them as modules whenever printing model."""
def __init__(self, func: Callable[[torch.Tensor], torch.Tensor], name: str):
super().__init__()
self._name = name
self._func = func
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self._func(x)
@overrides
def _get_name(self):
return self._name
# There are no classes to decorate, so we hack these into Registrable._registry.
# If you want to instantiate it, you can do like this:
# Activation.by_name('relu')()
Registrable._registry[Activation] = {
"linear": (lambda: _ActivationLambda(lambda x: x, "Linear"), None), # type: ignore
"mish": ( # type: ignore
lambda: _ActivationLambda(
lambda x: x * torch.tanh(torch.nn.functional.softplus(x)), "Mish"
),
None,
),
"swish": (lambda: _ActivationLambda(lambda x: x * torch.sigmoid(x), "Swish"), None), # type: ignore
"relu": (torch.nn.ReLU, None),
"relu6": (torch.nn.ReLU6, None),
"elu": (torch.nn.ELU, None),
"prelu": (torch.nn.PReLU, None),
"leaky_relu": (torch.nn.LeakyReLU, None),
"threshold": (torch.nn.Threshold, None),
"hardtanh": (torch.nn.Hardtanh, None),
"sigmoid": (torch.nn.Sigmoid, None),
"tanh": (torch.nn.Tanh, None),
"log_sigmoid": (torch.nn.LogSigmoid, None),
"softplus": (torch.nn.Softplus, None),
"softshrink": (torch.nn.Softshrink, None),
"softsign": (torch.nn.Softsign, None),
"tanhshrink": (torch.nn.Tanhshrink, None),
"selu": (torch.nn.SELU, None),
"gelu": (torch.nn.GELU, None),
}
| allennlp-master | allennlp/nn/activations.py |
from allennlp.nn.activations import Activation
from allennlp.nn.initializers import Initializer, InitializerApplicator
from allennlp.nn.regularizers import RegularizerApplicator
| allennlp-master | allennlp/nn/__init__.py |
from inspect import signature
from typing import List, Callable, Tuple, Dict, cast, TypeVar
import warnings
from overrides import overrides
import torch
from allennlp.common import FromParams, Registrable
from allennlp.common.checks import ConfigurationError
from allennlp.nn.util import min_value_of_dtype
StateType = Dict[str, torch.Tensor]
StepFunctionTypeWithTimestep = Callable[
[torch.Tensor, StateType, int], Tuple[torch.Tensor, StateType]
]
StepFunctionTypeNoTimestep = Callable[[torch.Tensor, StateType], Tuple[torch.Tensor, StateType]]
StepFunctionType = TypeVar(
"StepFunctionType", StepFunctionTypeWithTimestep, StepFunctionTypeNoTimestep
)
"""
The type of step function that can be passed to [`BeamSearch.search`](#search).
This can either be [`StepFunctionTypeWithTimestep`](#stepfunctiontypewithtimestep)
or [`StepFunctionTypeNoTimestep`](#stepfunctiontypenotimestep).
"""
class Sampler(Registrable):
"""
An abstract class that can be used to sample candidates (either nodes or beams)
within `BeamSearch`.
A `Sampler` just has three methods, `init_state()`, `sample_nodes()` and `sample_beams()`.
`init_state()` takes three arguments:
- a tensor of starting log probs with shape `(batch_size,, num_classes)`,
- the batch size, an int,
- and the number of classes, also an int.
It returns a state dictionary with any state tensors needed for subsequent
calls to `sample_nodes()` and `sample_beams()`.
By default this method just returns an empty dictionary.
Both `sample_nodes()` and `sample_beams()` should take three arguments:
- tensor of normalized log probabilities with shape `(batch_size, num_examples)`,
- an integer representing the number of samples to take for each example in the batch,
- and a state dictionary which could contain any tensors needed for the `Sampler` to keep
track of state.
For `sample_nodes()`, `num_examples = num_classes`, but for `sample_beams`,
`num_examples = beam_size * per_node_beam_size`.
The return value should be a tuple containing:
- a tensor of log probabilities of the sampled examples with shape `(batch_size, num_samples)`,
- a tensor of indices of the sampled examples with shape `(batch_size, num_samples)`,
- and the updated state dictionary.
A default implementation of `sample_beams` is provided, which just deterministically
picks the `k` examples with highest log probability.
"""
default_implementation = "deterministic"
def init_state(
self, start_class_log_probabilities: torch.Tensor, batch_size: int, num_classes: int
) -> StateType:
return {}
def sample_nodes(
self, log_probs: torch.Tensor, per_node_beam_size: int, state: StateType
) -> Tuple[torch.Tensor, torch.Tensor, StateType]:
raise NotImplementedError
def sample_beams(
self, log_probs: torch.Tensor, beam_size: int, state: StateType
) -> Tuple[torch.Tensor, torch.Tensor, StateType]:
selected_log_probs, selected_indices = torch.topk(log_probs, beam_size, dim=-1)
return selected_log_probs, selected_indices, {}
@Sampler.register("deterministic")
class DeterministicSampler(Sampler):
"""
A `Sampler` that just deterministically returns the `k` nodes or beams with highest
log probability.
"""
@overrides
def sample_nodes(
self, log_probs: torch.Tensor, per_node_beam_size: int, state: StateType
) -> Tuple[torch.Tensor, torch.Tensor, StateType]:
selected_log_probs, selected_indices = torch.topk(log_probs, per_node_beam_size, dim=-1)
return selected_log_probs, selected_indices, {}
@Sampler.register("multinomial")
class MultinomialSampler(Sampler):
"""
A `Sampler` which samples nodes from the given multinomial distribution. Beams are sampled
in the default, non-deterministic way.
# Parameters
temperature : `float`, optional (default = `1.0`)
A `temperature` below 1.0 produces a sharper probability distribution and a `temperature` above 1.0
produces a flatter probability distribution.
with_replacement : `bool`, optional (default = `False`)
Whether to sample with replacement.
"""
def __init__(
self,
temperature: float = 1.0,
with_replacement: bool = False,
) -> None:
self.temperature = temperature
self.with_replacement = with_replacement
@overrides
def sample_nodes(
self, log_probs: torch.Tensor, per_node_beam_size: int, state: StateType
) -> Tuple[torch.Tensor, torch.Tensor, StateType]:
if self.temperature != 1.0:
_probabilities = torch.nn.functional.softmax(log_probs / self.temperature, dim=-1)
else:
_probabilities = log_probs.exp()
selected_indices = torch.multinomial(
_probabilities, per_node_beam_size, replacement=self.with_replacement
)
return torch.gather(log_probs, 1, selected_indices), selected_indices, state
@Sampler.register("top-k")
class TopKSampler(Sampler):
"""
A `Sampler` which redistributes the probability mass function for nodes among the
top `k` choices, then samples from that subset after re-normalizing the probabilities.
Beams are sampled in the default, deterministic way.
# Parameters
k : `int`, optional (default = `1`)
The number of top choices to be selected from.
temperature : `float`, optional (default = `1.0`)
A `temperature` below 1.0 produces a sharper probability distribution and a `temperature`
above 1.0 produces a flatter probability distribution.
with_replacement: `bool`, optional, (default = `False`)
If set to `True`, samples will be selected with replacement from the top k choices.
"""
def __init__(
self,
k: int = 1,
temperature: float = 1.0,
with_replacement: bool = False,
):
self.k = k
self.temperature = temperature or 1.0
self.with_replacement = with_replacement
@overrides
def sample_nodes(
self, log_probs: torch.Tensor, per_node_beam_size: int, state: StateType
) -> Tuple[torch.Tensor, torch.Tensor, StateType]:
if not per_node_beam_size <= self.k <= log_probs.size()[1]:
raise ValueError(
"k must be a postive integer no less than per_node_beam_size and no greater than vocabulary size"
)
# shape (both): (batch_size, k)
top_k_log_probs, top_k_indices = log_probs.topk(self.k, dim=-1)
# Apply temperature if necessary.
# shape: (batch_size, k)
if self.temperature != 1.0:
top_k_log_probs = top_k_log_probs / self.temperature
# Re-normalize the subset.
# shape: (batch_size, k)
normalized_top_k_probs = torch.nn.functional.softmax(top_k_log_probs, dim=-1)
# Sample from the re-normalized subset.
# NOTE: These indices are not indices into `log_probs`, they are indices into `top_k_log_probs`.
# shape: (batch_size, per_node_beam_size)
sampled_indices = torch.multinomial(
normalized_top_k_probs, per_node_beam_size, replacement=self.with_replacement
)
# Convert `sampled_indices` back to indices in the original `log_probs` tensor.
# shape: (batch_size, per_node_beam_size)
indices = top_k_indices.gather(-1, sampled_indices)
return log_probs.gather(1, indices), indices, state
@Sampler.register("top-p")
class TopPSampler(Sampler):
"""
A `Sampler` which redistributes the probability mass function for nodes among
the top choices with a cumulative probability of at least `p`, then samples from that subset
after re-normalizing the probabilities.
Beams are sampled in the default, deterministic way.
# Parameters
p : `float`, optional (default = `0.9`)
The cumulative probability cutoff threshold. A higher value of `p` will result in more possible
examples to sample from. If `with_replacement` is `False` and the number of possible samples is
insufficient to sample without replacement from when calling `sample_nodes`, then the top
`per_node_beam_size` examples will be chosen.
temperature : `float`, optional (default = `1.0`)
A `temperature` below 1.0 produces a sharper probability distribution and a `temperature`
above 1.0 produces a flatter probability distribution.
with_replacement : `bool`, optional, (default = `False`)
If set to `True`, samples will be selected with replacement from the top choices.
"""
def __init__(
self,
p: float = 0.9,
temperature: float = 1.0,
with_replacement: bool = False,
):
if p < 0.0 or p > 1.0:
raise ValueError("p must be a positive float no greater than 1.0")
self.p = p
self.temperature = temperature or 1.0
self.with_replacement = with_replacement
@overrides
def sample_nodes(
self, log_probs: torch.Tensor, per_node_beam_size: int, state: StateType
) -> Tuple[torch.Tensor, torch.Tensor, StateType]:
if not per_node_beam_size <= log_probs.size()[1]:
raise ValueError("per_node_beam_size cannot be greater than vocabulary size")
# First apply temperature coefficient:
if self.temperature != 1.0:
_log_probs = torch.nn.functional.log_softmax(log_probs / self.temperature, dim=-1)
else:
_log_probs = log_probs
# Sort the probabilities in descending order to then find cumulative sum
log_probs_descending, sorting_indices = torch.sort(_log_probs, descending=True)
# shape: (batch_size, num_classes)
probabilities_descending = log_probs_descending.exp()
probabilities_summed = torch.cumsum(probabilities_descending, dim=-1)
# Create a mask for filtering out probabilities that don't make the top `p`.
# shape: (batch_size, num_classes)
exclusion_mask = probabilities_summed >= self.p
# We want to include the first index where probabilities_summed >= p, so we shift over one.
exclusion_mask[..., 1:] = exclusion_mask[..., :-1].clone()
exclusion_mask[..., 0] = False
# Make sure there's at least `per_node_beam_size` options to be selected.
if not self.with_replacement:
exclusion_mask[..., :per_node_beam_size] = False
log_probs_descending[exclusion_mask] = min_value_of_dtype(log_probs.dtype)
# Now re-normalized the included log probs.
# shape: (batch_size, num_classes)
filtered_probabilities = torch.nn.functional.softmax(log_probs_descending, dim=-1)
# Sample from the re-normalized subset.
# NOTE: These indices are not indices into `log_probs`, they are indices into `log_probs_descending`.
# shape: (batch_size, per_node_beam_size)
sampled_indices = torch.multinomial(
filtered_probabilities, per_node_beam_size, replacement=self.with_replacement
)
# Convert `sampled_indices` back to indices in the original `log_probs` tensor.
# shape: (batch_size, per_node_beam_size)
selected_indices = sorting_indices.gather(-1, sampled_indices)
# Return (selected log probabilities, selected classes)
# shape: (len(log_probs),1) , (len(log_probs), 1)
return torch.gather(log_probs, 1, selected_indices), selected_indices, state
@Sampler.register("gumbel")
class GumbelSampler(Sampler):
"""
A `Sampler` which uses the Gumbel-Top-K trick to sample without replacement. See
[*Stochastic Beams and Where to Find Them: The Gumbel-Top-k Trick for Sampling
Sequences Without Replacement*, W Kool, H Van Hoof and M Welling, 2010]
(https://api.semanticscholar.org/CorpusID:76662039).
# Parameters
temperature : `float`, optional (default = `1.0`)
A `temperature` below 1.0 produces a sharper probability distribution and a `temperature`
above 1.0 produces a flatter probability distribution.
"""
def __init__(self, temperature: float = 1.0):
self.temperature = temperature
@overrides
def init_state(
self, start_class_log_probabilities: torch.Tensor, batch_size: int, num_classes: int
) -> StateType:
# shape: (batch_size, num_classes)
zeros = start_class_log_probabilities.new_zeros((batch_size, num_classes))
# shape: (batch_size, num_classes)
G_phi_S = self.gumbel_with_max(start_class_log_probabilities, zeros)
return {"G_phi_S": G_phi_S}
@overrides
def sample_nodes(
self,
log_probs: torch.Tensor,
per_node_beam_size: int,
state: StateType,
) -> Tuple[torch.Tensor, torch.Tensor, StateType]:
# First apply temperature coefficient:
# shape: (batch_size * beam_size, num_classes)
if self.temperature != 1.0:
_log_probs = torch.nn.functional.log_softmax(log_probs / self.temperature, dim=-1)
else:
_log_probs = log_probs
# shape: (group_size,)
phi_S = state["phi_S"]
# shape: (group_size, num_classes)
phi_S = phi_S.unsqueeze(-1).expand_as(_log_probs)
# shape: (group_size, num_classes)
phi_S_new = phi_S + _log_probs
# shape: (group_size, 1)
G_phi_S = state["G_phi_S"].unsqueeze(-1)
# shape: (group_size, num_classes)
G_phi_S_new = self.gumbel_with_max(phi_S_new, G_phi_S)
# Replace NaNs with very negative number.
# shape: (group_size, num_classes)
# G_phi_S_new[G_phi_S_new.isnan()] = min_value_of_dtype(G_phi_S_new.dtype)
# shape (both): (group_size, per_node_beam_size)
top_G_phi_S_new, top_indices = torch.topk(G_phi_S_new, per_node_beam_size, dim=-1)
# shape: (group_size, per_node_beam_size)
top_log_probs = log_probs.gather(1, top_indices)
return top_log_probs, top_indices, {"G_phi_S": top_G_phi_S_new}
@overrides
def sample_beams(
self,
log_probs: torch.Tensor,
beam_size: int,
state: StateType,
) -> Tuple[torch.Tensor, torch.Tensor, StateType]:
"""
Returns the beams with the highest perturbed log probabilities.
"""
# shape (log_probs): (batch_size, beam_size * per_node_beam_size)
batch_size = log_probs.size()[0]
# shape: (batch_size * beam_size, per_node_beam_size)
G_phi_S = state["G_phi_S"]
# shape: (batch_size, beam_size * per_node_beam_size)
G_phi_S = G_phi_S.reshape_as(log_probs)
# shape (both): (batch_size, beam_size)
G_phi_S_new, selected_indices = torch.topk(G_phi_S, beam_size, dim=-1)
# shape: (batch_size, beam_size)
selected_log_probs = log_probs.gather(1, selected_indices)
# Now sort the selected beams by their true log prob.
# shape (all): (batch_size, beam_size)
selected_log_probs, sort_indices = selected_log_probs.sort(dim=-1, descending=True)
selected_indices = selected_indices.gather(1, sort_indices)
G_phi_S_new = G_phi_S_new.gather(1, sort_indices)
# shape: (batch_size * beam_size,)
G_phi_S_new = G_phi_S_new.reshape(batch_size * beam_size)
# shape: (batch_size * beam_size,)
phi_S = selected_log_probs.reshape(batch_size * beam_size)
return selected_log_probs, selected_indices, {"G_phi_S": G_phi_S_new, "phi_S": phi_S}
def gumbel(self, phi) -> torch.Tensor:
"""
Sample `Gumbel(phi)`.
`phi` should have shape `(batch_size, num_classes)`.
"""
return -torch.log(-torch.log(torch.rand_like(phi))) + phi
def gumbel_with_max(self, phi, T) -> torch.Tensor:
"""
Sample `Gumbel(phi)` conditioned on the maximum value being equal to `T`.
`phi` should have shape `(batch_size, num_classes)` and `T` should have
shape `(batch_size, 1)`.
"""
# Shape: (batch_size, num_classes)
G_phi = self.gumbel(phi)
# Now we find the maximum from these samples.
# Shape: (batch_size, )
Z, _ = G_phi.max(dim=-1)
# Shape: (batch_size, num_classes)
v = T - G_phi + torch.log1p(-torch.exp(G_phi - Z.unsqueeze(-1)))
# Shape: (batch_size, num_classes)
return T - torch.nn.functional.relu(v) - torch.log1p(torch.exp(-v.abs()))
class BeamSearch(FromParams):
"""
Implements the beam search algorithm for decoding the most likely sequences.
# Parameters
end_index : `int`
The index of the "stop" or "end" token in the target vocabulary.
max_steps : `int`, optional (default = `50`)
The maximum number of decoding steps to take, i.e. the maximum length
of the predicted sequences.
beam_size : `int`, optional (default = `10`)
The width of the beam used.
per_node_beam_size : `int`, optional (default = `beam_size`)
The maximum number of candidates to consider per node, at each step in the search.
If not given, this just defaults to `beam_size`. Setting this parameter
to a number smaller than `beam_size` may give better results, as it can introduce
more diversity into the search. See
[*Beam Search Strategies for Neural Machine Translation*, Freitag and Al-Onaizan, 2017]
(https://api.semanticscholar.org/CorpusID:2229477).
sampler : `Sampler`, optional (default = `None`)
An optional `Sampler` which is used to pick next candidate nodes and beams.
If not specified, `DeterministicSampler` will be used, which just takes the
`per_node_beam_size` most likely nodes and the `beam_size` most likely beams.
Using the [`GumbelSampler`](#gumbelsampler), on the other hand, will give you
[Stochastic Beam Search](https://api.semanticscholar.org/CorpusID:76662039).
"""
def __init__(
self,
end_index: int,
max_steps: int = 50,
beam_size: int = 10,
per_node_beam_size: int = None,
sampler: Sampler = None,
) -> None:
if not max_steps > 0:
raise ValueError("max_steps must be positive")
if not beam_size > 0:
raise ValueError("beam_size must be positive")
if per_node_beam_size is not None and not per_node_beam_size > 0:
raise ValueError("per_node_beam_size must be positive")
self._end_index = end_index
self.max_steps = max_steps
self.beam_size = beam_size
self.per_node_beam_size = per_node_beam_size or beam_size
self.sampler = sampler or DeterministicSampler()
@staticmethod
def _reconstruct_sequences(predictions, backpointers):
# Reconstruct the sequences.
# shape: [(batch_size, beam_size, 1)]
reconstructed_predictions = [predictions[-1].unsqueeze(2)]
if not backpointers:
return reconstructed_predictions
# shape: (batch_size, beam_size)
cur_backpointers = backpointers[-1]
for timestep in range(len(predictions) - 2, 0, -1):
# shape: (batch_size, beam_size, 1)
cur_preds = predictions[timestep].gather(1, cur_backpointers).unsqueeze(2)
reconstructed_predictions.append(cur_preds)
# shape: (batch_size, beam_size)
cur_backpointers = backpointers[timestep - 1].gather(1, cur_backpointers)
# shape: (batch_size, beam_size, 1)
final_preds = predictions[0].gather(1, cur_backpointers).unsqueeze(2)
reconstructed_predictions.append(final_preds)
return reconstructed_predictions
@torch.no_grad()
def search(
self,
start_predictions: torch.Tensor,
start_state: StateType,
step: StepFunctionType,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Given a starting state and a step function, apply beam search to find the
most likely target sequences.
# Notes
If your step function returns `-inf` for some log probabilities
(like if you're using a masked log-softmax) then some of the "best"
sequences returned may also have `-inf` log probability. Specifically
this happens when the beam size is smaller than the number of actions
with finite log probability (non-zero probability) returned by the step function.
Therefore if you're using a mask you may want to check the results from `search`
and potentially discard sequences with non-finite log probability.
# Parameters
start_predictions : `torch.Tensor`
A tensor containing the initial predictions with shape `(batch_size,)`.
Usually the initial predictions are just the index of the "start" token
in the target vocabulary.
start_state : `StateType`
The initial state passed to the `step` function. Each value of the state dict
should be a tensor of shape `(batch_size, *)`, where `*` means any other
number of dimensions.
step : `StepFunctionType`
A function that is responsible for computing the next most likely tokens,
given the current state and the predictions from the last time step.
The function should accept two or three arguments:
- a tensor of shape `(group_size,)` representing the index of the predicted
tokens from the last time step,
- the current state, a `StateType`, and
- optionally, the timestep, an `int`.
The `group_size` will be `batch_size * beam_size`, except in the initial
step, for which it will just be `batch_size`.
The function is expected to return a tuple, where the first element
is a tensor of shape `(group_size, target_vocab_size)` containing
the log probabilities of the tokens for the next step, and the second
element is the updated state. The tensor in the state should have shape
`(group_size, *)`, where `*` means any other number of dimensions.
# Returns
`Tuple[torch.Tensor, torch.Tensor]`
Tuple of `(predictions, log_probabilities)`, where `predictions`
has shape `(batch_size, beam_size, max_steps)` and `log_probabilities`
has shape `(batch_size, beam_size)`.
"""
step_signature = signature(step)
if len(step_signature.parameters) < 3:
# If the step function we're given does not take the time step argument, wrap it
# in one that does.
old_step = cast(StepFunctionTypeNoTimestep, step)
def new_step(
last_predictions: torch.Tensor, state: Dict[str, torch.Tensor], time_step: int
):
return old_step(last_predictions, state)
return self._search(start_predictions, start_state, new_step)
else:
return self._search(
start_predictions, start_state, cast(StepFunctionTypeWithTimestep, step)
)
def _search(
self,
start_predictions: torch.Tensor,
start_state: StateType,
step: StepFunctionTypeWithTimestep,
) -> Tuple[torch.Tensor, torch.Tensor]:
batch_size = start_predictions.size()[0]
# List of (batch_size, beam_size) tensors. One for each time step. Does not
# include the start symbols, which are implicit.
predictions: List[torch.Tensor] = []
# List of (batch_size, beam_size) tensors. One for each time step. None for
# the first. Stores the index n for the parent prediction, i.e.
# predictions[t-1][i][n], that it came from.
backpointers: List[torch.Tensor] = []
# Calculate the first timestep. This is done outside the main loop
# because we are going from a single decoder input (the output from the
# encoder) to the top `beam_size` decoder outputs. On the other hand,
# within the main loop we are going from the `beam_size` elements of the
# beam to `beam_size`^2 candidates from which we will select the top
# `beam_size` elements for the next iteration.
# shape: (batch_size, num_classes)
start_class_log_probabilities, state = step(start_predictions, start_state, 0)
num_classes = start_class_log_probabilities.size()[1]
# Make sure `per_node_beam_size` is not larger than `num_classes`.
if self.per_node_beam_size > num_classes:
raise ConfigurationError(
f"Target vocab size ({num_classes:d}) too small "
f"relative to per_node_beam_size ({self.per_node_beam_size:d}).\n"
f"Please decrease beam_size or per_node_beam_size."
)
sampler_state = self.sampler.init_state(
start_class_log_probabilities, batch_size, num_classes
)
# Get the initial predicted classed and their log probabilities.
# shape: (batch_size, beam_size), (batch_size, beam_size)
(
start_top_log_probabilities,
start_predicted_classes,
sampler_state,
) = self.sampler.sample_beams(start_class_log_probabilities, self.beam_size, sampler_state)
if self.beam_size == 1 and (start_predicted_classes == self._end_index).all():
warnings.warn(
"Empty sequences predicted. You may want to increase the beam size or ensure "
"your step function is working properly.",
RuntimeWarning,
)
return start_predicted_classes.unsqueeze(-1), start_top_log_probabilities
# The log probabilities for the last time step.
# shape: (batch_size, beam_size)
last_log_probabilities = start_top_log_probabilities
# shape: [(batch_size, beam_size)]
predictions.append(start_predicted_classes)
# Log probability tensor that mandates that the end token is selected.
# shape: (batch_size * beam_size, num_classes)
log_probs_after_end = start_class_log_probabilities.new_full(
(batch_size * self.beam_size, num_classes), float("-inf")
)
log_probs_after_end[:, self._end_index] = 0.0
# Set the same state for each element in the beam.
self._update_initial_state(state, batch_size)
for timestep in range(self.max_steps - 1):
# shape: (batch_size * beam_size,)
last_predictions = predictions[-1].reshape(batch_size * self.beam_size)
# If every predicted token from the last step is `self._end_index`,
# then we can stop early.
if (last_predictions == self._end_index).all():
break
# Take a step. This get the predicted log probs of the next classes
# and updates the state.
# shape: (batch_size * beam_size, num_classes)
class_log_probabilities, state = step(last_predictions, state, timestep + 1)
# shape: (batch_size * beam_size, num_classes)
last_predictions_expanded = last_predictions.unsqueeze(-1).expand(
batch_size * self.beam_size, num_classes
)
# Here we are finding any beams where we predicted the end token in
# the previous timestep and replacing the distribution with a
# one-hot distribution, forcing the beam to predict the end token
# this timestep as well.
# shape: (batch_size * beam_size, num_classes)
cleaned_log_probabilities = torch.where(
last_predictions_expanded == self._end_index,
log_probs_after_end,
class_log_probabilities,
)
# shape (both): (batch_size * beam_size, per_node_beam_size)
top_log_probabilities, predicted_classes, sampler_state = self.sampler.sample_nodes(
cleaned_log_probabilities, self.per_node_beam_size, sampler_state
)
# Here we expand the last log probabilities to (batch_size * beam_size, per_node_beam_size)
# so that we can add them to the current log probs for this timestep.
# This lets us maintain the log probability of each element on the beam.
# shape: (batch_size * beam_size, per_node_beam_size)
expanded_last_log_probabilities = (
last_log_probabilities.unsqueeze(2)
.expand(batch_size, self.beam_size, self.per_node_beam_size)
.reshape(batch_size * self.beam_size, self.per_node_beam_size)
)
# shape: (batch_size * beam_size, per_node_beam_size)
summed_top_log_probabilities = top_log_probabilities + expanded_last_log_probabilities
# shape: (batch_size, beam_size * per_node_beam_size)
reshaped_summed = summed_top_log_probabilities.reshape(
batch_size, self.beam_size * self.per_node_beam_size
)
# shape: (batch_size, beam_size * per_node_beam_size)
reshaped_predicted_classes = predicted_classes.reshape(
batch_size, self.beam_size * self.per_node_beam_size
)
# Keep only the top `beam_size` beam indices.
# shape (both): (batch_size, beam_size)
(
restricted_beam_log_probs,
restricted_beam_indices,
sampler_state,
) = self.sampler.sample_beams(reshaped_summed, self.beam_size, sampler_state)
# Use the beam indices to extract the corresponding classes.
# shape: (batch_size, beam_size)
restricted_predicted_classes = reshaped_predicted_classes.gather(
1, restricted_beam_indices
)
predictions.append(restricted_predicted_classes)
# shape: (batch_size, beam_size)
last_log_probabilities = restricted_beam_log_probs
# The beam indices come from a `beam_size * per_node_beam_size` dimension where the
# indices with a common ancestor are grouped together. Hence
# dividing by per_node_beam_size gives the ancestor. (Note that this is integer
# division as the tensor is a LongTensor.)
# shape: (batch_size, beam_size)
backpointer = restricted_beam_indices // self.per_node_beam_size
backpointers.append(backpointer)
# Keep only the pieces of the state tensors corresponding to the
# ancestors created this iteration.
self._update_state(state, backpointer)
if not torch.isfinite(last_log_probabilities).all():
warnings.warn(
"Infinite log probabilities encountered. Some final sequences may not make sense. "
"This can happen when the beam size is larger than the number of valid (non-zero "
"probability) transitions that the step function produces.",
RuntimeWarning,
)
reconstructed_predictions = self._reconstruct_sequences(predictions, backpointers)
# shape: (batch_size, beam_size, max_steps)
all_predictions = torch.cat(list(reversed(reconstructed_predictions)), 2)
return all_predictions, last_log_probabilities
@staticmethod
def _is_multilayer_rnn_decoder(key: str, state_tensor: torch.Tensor) -> bool:
return state_tensor.dim() == 3 and key in {
"decoder_hidden",
"decoder_context",
}
def _update_initial_state(self, state: StateType, batch_size: int):
"""
Expand tensors in a state dictionary from `(batch_size, *)` to `(batch_size * beam_size, *)`.
"""
for key, state_tensor in state.items():
if state_tensor is None:
continue
multilayer_rnn_decoder = self._is_multilayer_rnn_decoder(key, state_tensor)
if multilayer_rnn_decoder:
# shape: (num_layers, batch_size * beam_size, *)
num_layers, _, *last_dims = state_tensor.size()
state[key] = (
state_tensor.unsqueeze(2)
.expand(num_layers, batch_size, self.beam_size, *last_dims)
.reshape(num_layers, batch_size * self.beam_size, *last_dims)
)
else:
# shape: (batch_size * beam_size, *)
_, *last_dims = state_tensor.size()
state[key] = (
state_tensor.unsqueeze(1)
.expand(batch_size, self.beam_size, *last_dims)
.reshape(batch_size * self.beam_size, *last_dims)
)
def _update_state(self, state: StateType, backpointer: torch.Tensor):
batch_size = backpointer.size()[0]
for key, state_tensor in state.items():
if state_tensor is None:
continue
multilayer_rnn_decoder = self._is_multilayer_rnn_decoder(key, state_tensor)
if multilayer_rnn_decoder:
# shape: (num_layers, batch_size * beam_size, *)
num_layers, _, *last_dims = state_tensor.size()
expanded_backpointer = backpointer.view(
batch_size, self.beam_size, *([1] * len(last_dims))
).expand(batch_size, self.beam_size, *last_dims)
expanded_backpointer = expanded_backpointer.unsqueeze(0).repeat(num_layers, 1, 1, 1)
# shape: (num_layers, batch_size * beam_size, *)
state[key] = (
state_tensor.reshape(num_layers, batch_size, self.beam_size, *last_dims)
.gather(2, expanded_backpointer)
.reshape(num_layers, batch_size * self.beam_size, *last_dims)
)
else:
_, *last_dims = state_tensor.size()
# shape: (batch_size, beam_size, *)
expanded_backpointer = backpointer.view(
batch_size, self.beam_size, *([1] * len(last_dims))
).expand(batch_size, self.beam_size, *last_dims)
# shape: (batch_size * beam_size, *)
state[key] = (
state_tensor.reshape(batch_size, self.beam_size, *last_dims)
.gather(1, expanded_backpointer)
.reshape(batch_size * self.beam_size, *last_dims)
)
| allennlp-master | allennlp/nn/beam_search.py |
"""
An initializer is just a PyTorch function.
Here we implement a proxy class that allows us
to register them and supply any additional function arguments
(for example, the `mean` and `std` of a normal initializer)
as named arguments to the constructor.
The available initialization functions are
* ["normal"](https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.normal_)
* ["uniform"](https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.uniform_)
* ["constant"](https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.constant_)
* ["eye"](https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.eye_)
* ["dirac"](https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.dirac_)
* ["xavier_uniform"](https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.xavier_uniform_)
* ["xavier_normal"](https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.xavier_normal_)
* ["kaiming_uniform"](https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.kaiming_uniform_)
* ["kaiming_normal"](https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.kaiming_normal_)
* ["orthogonal"](https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.orthogonal_)
* ["sparse"](https://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.sparse_)
* ["block_orthogonal"](./initializers.md#block_orthogonal)
* ["uniform_unit_scaling"](./initializers.md#uniform_unit_scaling)
* ["pretrained"](./initializers.md#PretrainedModelInitializer)
"""
import logging
import re
import math
from typing import Callable, List, Tuple, Dict
import itertools
from overrides import overrides
import tarfile
import torch
import torch.nn.init
from allennlp.common import FromParams, Registrable
from allennlp.common.checks import ConfigurationError
logger = logging.getLogger(__name__)
class Initializer(Registrable):
"""
An initializer is really just a bare pytorch function. This class
is a proxy that allows us to implement `Registrable` for those functions.
"""
default_implementation = "normal"
def __call__(self, tensor: torch.Tensor, **kwargs) -> None:
"""
This function is here just to make mypy happy. We expect initialization functions to
follow this API; the builtin pytorch initialization functions follow this just fine, even
though they don't subclass `Initialization`. We're just making it explicit here, so mypy
knows that initializers are callable like this.
"""
raise NotImplementedError
def uniform_unit_scaling(tensor: torch.Tensor, nonlinearity: str = "linear"):
"""
An initaliser which preserves output variance for approximately gaussian
distributed inputs. This boils down to initialising layers using a uniform
distribution in the range `(-sqrt(3/dim[0]) * scale, sqrt(3 / dim[0]) * scale)`, where
`dim[0]` is equal to the input dimension of the parameter and the `scale`
is a constant scaling factor which depends on the non-linearity used.
See `Random Walk Initialisation for Training Very Deep Feedforward Networks
<https://www.semanticscholar.org/paper/Random-Walk-Initialization-for-Training-Very-Deep-Sussillo-Abbott/be9728a0728b6acf7a485225b1e41592176eda0b>`_
for more information.
# Parameters
tensor : `torch.Tensor`, required.
The tensor to initialise.
nonlinearity : `str`, optional (default = `"linear"`)
The non-linearity which is performed after the projection that this
tensor is involved in. This must be the name of a function contained
in the `torch.nn.functional` package.
# Returns
The initialised tensor.
"""
size = 1.0
# Estimate the input size. This won't work perfectly,
# but it covers almost all use cases where this initialiser
# would be expected to be useful, i.e in large linear and
# convolutional layers, as the last dimension will almost
# always be the output size.
for dimension in list(tensor.size())[:-1]:
size *= dimension
activation_scaling = torch.nn.init.calculate_gain(nonlinearity, tensor)
max_value = math.sqrt(3 / size) * activation_scaling
return tensor.data.uniform_(-max_value, max_value)
def block_orthogonal(tensor: torch.Tensor, split_sizes: List[int], gain: float = 1.0) -> None:
"""
An initializer which allows initializing model parameters in "blocks". This is helpful
in the case of recurrent models which use multiple gates applied to linear projections,
which can be computed efficiently if they are concatenated together. However, they are
separate parameters which should be initialized independently.
# Parameters
tensor : `torch.Tensor`, required.
A tensor to initialize.
split_sizes : `List[int]`, required.
A list of length `tensor.ndim()` specifying the size of the
blocks along that particular dimension. E.g. `[10, 20]` would
result in the tensor being split into chunks of size 10 along the
first dimension and 20 along the second.
gain : `float`, optional (default = `1.0`)
The gain (scaling) applied to the orthogonal initialization.
"""
data = tensor.data
sizes = list(tensor.size())
if any(a % b != 0 for a, b in zip(sizes, split_sizes)):
raise ConfigurationError(
"tensor dimensions must be divisible by their respective "
"split_sizes. Found size: {} and split_sizes: {}".format(sizes, split_sizes)
)
indexes = [list(range(0, max_size, split)) for max_size, split in zip(sizes, split_sizes)]
# Iterate over all possible blocks within the tensor.
for block_start_indices in itertools.product(*indexes):
# A list of tuples containing the index to start at for this block
# and the appropriate step size (i.e split_size[i] for dimension i).
index_and_step_tuples = zip(block_start_indices, split_sizes)
# This is a tuple of slices corresponding to:
# tensor[index: index + step_size, ...]. This is
# required because we could have an arbitrary number
# of dimensions. The actual slices we need are the
# start_index: start_index + step for each dimension in the tensor.
block_slice = tuple(
slice(start_index, start_index + step) for start_index, step in index_and_step_tuples
)
data[block_slice] = torch.nn.init.orthogonal_(tensor[block_slice].contiguous(), gain=gain)
def zero(tensor: torch.Tensor) -> None:
return tensor.data.zero_()
def lstm_hidden_bias(tensor: torch.Tensor) -> None:
"""
Initialize the biases of the forget gate to 1, and all other gates to 0,
following Jozefowicz et al., An Empirical Exploration of Recurrent Network Architectures
"""
# gates are (b_hi|b_hf|b_hg|b_ho) of shape (4*hidden_size)
tensor.data.zero_()
hidden_size = tensor.shape[0] // 4
tensor.data[hidden_size : (2 * hidden_size)] = 1.0
class _InitializerWrapper(Initializer):
def __init__(self, init_function: Callable[..., None], **kwargs):
self._init_function = init_function
self._kwargs = kwargs
def __call__(self, tensor: torch.Tensor, **kwargs) -> None:
self._init_function(tensor, **self._kwargs)
def __repr__(self):
return "Init: %s, with params: %s" % (self._init_function, self._kwargs)
@Initializer.register("normal")
class NormalInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "normal".
"""
def __init__(self, mean: float = 0.0, std: float = 0.1):
super().__init__(init_function=torch.nn.init.normal_, mean=mean, std=std)
@Initializer.register("orthogonal")
class OrthogonalInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "orthogonal".
"""
def __init__(self, gain: float = 1.0):
super().__init__(init_function=torch.nn.init.orthogonal_, gain=gain)
@Initializer.register("uniform")
class UniformInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "uniform".
"""
def __init__(self, a: float = 0.0, b: float = 1.0):
super().__init__(init_function=torch.nn.init.uniform_, a=a, b=b)
@Initializer.register("constant")
class ConstantInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "constant".
"""
def __init__(self, val: float):
super().__init__(init_function=torch.nn.init.constant_, val=val)
@Initializer.register("dirac")
class DiracInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "dirac".
"""
def __init__(self):
super().__init__(init_function=torch.nn.init.dirac_)
@Initializer.register("xavier_uniform")
class XavierUniformInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "xavir_uniform".
"""
def __init__(self, gain: float = 1.0):
super().__init__(init_function=torch.nn.init.xavier_uniform_, gain=gain)
@Initializer.register("xavier_normal")
class XavierNormalInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "xavier_normal".
"""
def __init__(self, gain: float = 1.0):
super().__init__(init_function=torch.nn.init.xavier_normal_, gain=gain)
@Initializer.register("kaiming_uniform")
class KaimingUniformInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "kaiming_uniform".
"""
def __init__(self, a: float = 0.0, mode: str = "fan_in", nonlinearity: str = "leaky_relu"):
super().__init__(
init_function=torch.nn.init.kaiming_uniform_, a=a, mode=mode, nonlinearity=nonlinearity
)
@Initializer.register("kaiming_normal")
class KaimingNormalInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "kaiming_normal".
"""
def __init__(self, a: float = 0.0, mode: str = "fan_in", nonlinearity: str = "leaky_relu"):
super().__init__(
init_function=torch.nn.init.kaiming_normal_, a=a, mode=mode, nonlinearity=nonlinearity
)
@Initializer.register("sparse")
class SparseInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "sparse".
"""
def __init__(self, sparsity: float, std: float = 0.01):
super().__init__(init_function=torch.nn.init.sparse_, sparsity=sparsity, std=std)
@Initializer.register("eye")
class EyeInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "eye".
"""
def __init__(self):
super().__init__(init_function=torch.nn.init.eye_)
@Initializer.register("block_orthogonal")
class BlockOrthogonalInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "block_orthogonal".
"""
def __init__(self, split_sizes: List[int], gain: float = 1.0):
super().__init__(init_function=block_orthogonal, split_sizes=split_sizes, gain=gain)
@Initializer.register("uniform_unit_scaling")
class UniformUnitScalingInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "uniform_unit_scaling".
"""
def __init__(self, nonlinearity: str = "linear"):
super().__init__(init_function=uniform_unit_scaling, nonlinearity=nonlinearity)
@Initializer.register("zero")
class ZeroInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "zero".
"""
def __init__(self):
super().__init__(init_function=zero)
@Initializer.register("lstm_hidden_bias")
class LstmHiddenBiasInitializer(_InitializerWrapper):
"""
Registered as an `Initializer` with name "lstm_hidden_bias".
"""
def __init__(self):
super().__init__(init_function=lstm_hidden_bias)
@Initializer.register("pretrained")
class PretrainedModelInitializer(Initializer):
"""
An initializer which allows initializing parameters using a pretrained model. The
initializer will load all of the weights from the `weights_file_path` and use the
name of the new parameters to index into the pretrained parameters. Therefore,
by default, the names of the new and pretrained parameters must be the same.
However, this behavior can be overridden using the `parameter_name_overrides`,
which remaps the name of the new parameter to the key which should be used
to index into the pretrained parameters.
The initializer will load all of the weights from the `weights_file_path`
regardless of which parameters will actually be used to initialize the new model.
So, if you need to initialize several parameters using a pretrained model, the most
memory-efficient way to do this is to use one `PretrainedModelInitializer` per
weights file and use a regex to match all of the new parameters which need to be
initialized.
If you are using a configuration file to instantiate this object, the below entry
in the `InitializerApplicator` parameters will initialize `linear_1.weight` and
`linear_2.weight` using a pretrained model. `linear_1.weight` will be initialized
to the pretrained parameters called `linear_1.weight`, but `linear_2.weight` will
be initialized to the pretrained parameters called `linear_3.weight`::
```
["linear_1.weight|linear_2.weight",
{
"type": "pretrained",
"weights_file_path": "best.th",
"parameter_name_overrides": {
"linear_2.weight": "linear_3.weight"
}
}
]
```
To initialize weights for all the parameters from a pretrained model (assuming their names
remain unchanged), use the following instead:
```
[".*",
{
"type": "pretrained",
"weights_file_path": "best.th",
"parameter_name_overrides": {}
}
]
```
Registered as an `Initializer` with name "pretrained".
# Parameters
weights_file_path : `str`, required
The path to the weights file which has the pretrained model parameters.
parameter_name_overrides : `Dict[str, str]`, optional (default = `None`)
The mapping from the new parameter name to the name which should be used
to index into the pretrained model parameters. If a parameter name is not
specified, the initializer will use the parameter's default name as the key.
"""
def __init__(
self, weights_file_path: str, parameter_name_overrides: Dict[str, str] = None
) -> None:
from allennlp.models.archival import (
extracted_archive,
get_weights_path,
) # import here to avoid circular imports
self.weights: Dict[str, torch.Tensor]
if tarfile.is_tarfile(weights_file_path):
with extracted_archive(weights_file_path) as extraction_path:
self.weights = torch.load(get_weights_path(extraction_path), map_location="cpu")
else:
self.weights = torch.load(weights_file_path, map_location="cpu")
self.parameter_name_overrides = parameter_name_overrides or {}
@overrides
def __call__(self, tensor: torch.Tensor, parameter_name: str, **kwargs) -> None: # type: ignore
# Select the new parameter name if it's being overridden
if parameter_name in self.parameter_name_overrides:
parameter_name = self.parameter_name_overrides[parameter_name]
# If the size of the source and destination tensors are not the
# same, then we need to raise an error
source_weights = self.weights[parameter_name]
if tensor.data.size() != source_weights.size():
raise ConfigurationError(
"Incompatible sizes found for parameter %s. "
"Found %s and %s" % (parameter_name, tensor.data.size(), source_weights.size())
)
# Copy the parameters from the source to the destination
tensor.data.copy_(source_weights.data)
class InitializerApplicator(FromParams):
"""
Applies initializers to the parameters of a Module based on regex matches. Any parameter not
explicitly matching a regex will not be initialized, instead using whatever the default
initialization was in the module's code.
If you are instantiating this object from a config file, an example configuration is as
follows:
```json
{
"regexes": [
["parameter_regex_match1",
{
"type": "normal"
"mean": 0.01
"std": 0.1
}
],
["parameter_regex_match2", "uniform"]
],
"prevent_regexes": ["prevent_init_regex"]
}
```
where the first item in each tuple under the `regexes` parameters is the regex that matches to
parameters, and the second item specifies an `Initializer.` These values can either be strings,
in which case they correspond to the names of initializers, or dictionaries, in which case they
must contain the "type" key, corresponding to the name of an initializer. In addition, they may
contain auxiliary named parameters which will be fed to the initializer itself. To determine
valid auxiliary parameters, please refer to the torch.nn.init documentation.
# Parameters
regexes : `List[Tuple[str, Initializer]]`, optional (default = `[]`)
A list mapping parameter regexes to initializers. We will check each parameter against
each regex in turn, and apply the initializer paired with the first matching regex, if
any.
prevent_regexes: `List[str]`, optional (default=`None`)
Any parameter name matching one of these regexes will not be initialized, regardless of
whether it matches one of the regexes passed in the `regexes` parameter.
"""
def __init__(
self, regexes: List[Tuple[str, Initializer]] = None, prevent_regexes: List[str] = None
) -> None:
self._initializers = regexes or []
self._prevent_regex = None
if prevent_regexes:
self._prevent_regex = "(" + ")|(".join(prevent_regexes) + ")"
def __call__(self, module: torch.nn.Module) -> None:
"""
Applies an initializer to all parameters in a module that match one of the regexes we were
given in this object's constructor. Does nothing to parameters that do not match.
# Parameters
module : `torch.nn.Module`, required.
The Pytorch module to apply the initializers to.
"""
logger.info("Initializing parameters")
unused_regexes = {initializer[0] for initializer in self._initializers}
uninitialized_parameters = set()
# Store which initializers were applied to which parameters.
for name, parameter in module.named_parameters():
for initializer_regex, initializer in self._initializers:
allow = self._prevent_regex is None or not bool(
re.search(self._prevent_regex, name)
)
if allow and re.search(initializer_regex, name):
logger.info("Initializing %s using %s initializer", name, initializer_regex)
initializer(parameter, parameter_name=name)
unused_regexes.discard(initializer_regex)
break
else: # no break
uninitialized_parameters.add(name)
for regex in unused_regexes:
logger.warning("Did not use initialization regex that was passed: %s", regex)
logger.info(
"Done initializing parameters; the following parameters are using their "
"default initialization from their code"
)
uninitialized_parameter_list = list(uninitialized_parameters)
uninitialized_parameter_list.sort()
for name in uninitialized_parameter_list:
logger.info(" %s", name)
| allennlp-master | allennlp/nn/initializers.py |
from typing import List, Set, Tuple, Dict
import numpy
from allennlp.common.checks import ConfigurationError
def decode_mst(
energy: numpy.ndarray, length: int, has_labels: bool = True
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""
Note: Counter to typical intuition, this function decodes the _maximum_
spanning tree.
Decode the optimal MST tree with the Chu-Liu-Edmonds algorithm for
maximum spanning arborescences on graphs.
# Parameters
energy : `numpy.ndarray`, required.
A tensor with shape (num_labels, timesteps, timesteps)
containing the energy of each edge. If has_labels is `False`,
the tensor should have shape (timesteps, timesteps) instead.
length : `int`, required.
The length of this sequence, as the energy may have come
from a padded batch.
has_labels : `bool`, optional, (default = `True`)
Whether the graph has labels or not.
"""
if has_labels and energy.ndim != 3:
raise ConfigurationError("The dimension of the energy array is not equal to 3.")
elif not has_labels and energy.ndim != 2:
raise ConfigurationError("The dimension of the energy array is not equal to 2.")
input_shape = energy.shape
max_length = input_shape[-1]
# Our energy matrix might have been batched -
# here we clip it to contain only non padded tokens.
if has_labels:
energy = energy[:, :length, :length]
# get best label for each edge.
label_id_matrix = energy.argmax(axis=0)
energy = energy.max(axis=0)
else:
energy = energy[:length, :length]
label_id_matrix = None
# get original score matrix
original_score_matrix = energy
# initialize score matrix to original score matrix
score_matrix = numpy.array(original_score_matrix, copy=True)
old_input = numpy.zeros([length, length], dtype=numpy.int32)
old_output = numpy.zeros([length, length], dtype=numpy.int32)
current_nodes = [True for _ in range(length)]
representatives: List[Set[int]] = []
for node1 in range(length):
original_score_matrix[node1, node1] = 0.0
score_matrix[node1, node1] = 0.0
representatives.append({node1})
for node2 in range(node1 + 1, length):
old_input[node1, node2] = node1
old_output[node1, node2] = node2
old_input[node2, node1] = node2
old_output[node2, node1] = node1
final_edges: Dict[int, int] = {}
# The main algorithm operates inplace.
chu_liu_edmonds(
length, score_matrix, current_nodes, final_edges, old_input, old_output, representatives
)
heads = numpy.zeros([max_length], numpy.int32)
if has_labels:
head_type = numpy.ones([max_length], numpy.int32)
else:
head_type = None
for child, parent in final_edges.items():
heads[child] = parent
if has_labels:
head_type[child] = label_id_matrix[parent, child]
return heads, head_type
def chu_liu_edmonds(
length: int,
score_matrix: numpy.ndarray,
current_nodes: List[bool],
final_edges: Dict[int, int],
old_input: numpy.ndarray,
old_output: numpy.ndarray,
representatives: List[Set[int]],
):
"""
Applies the chu-liu-edmonds algorithm recursively
to a graph with edge weights defined by score_matrix.
Note that this function operates in place, so variables
will be modified.
# Parameters
length : `int`, required.
The number of nodes.
score_matrix : `numpy.ndarray`, required.
The score matrix representing the scores for pairs
of nodes.
current_nodes : `List[bool]`, required.
The nodes which are representatives in the graph.
A representative at it's most basic represents a node,
but as the algorithm progresses, individual nodes will
represent collapsed cycles in the graph.
final_edges : `Dict[int, int]`, required.
An empty dictionary which will be populated with the
nodes which are connected in the maximum spanning tree.
old_input : `numpy.ndarray`, required.
old_output : `numpy.ndarray`, required.
representatives : `List[Set[int]]`, required.
A list containing the nodes that a particular node
is representing at this iteration in the graph.
# Returns
Nothing - all variables are modified in place.
"""
# Set the initial graph to be the greedy best one.
parents = [-1]
for node1 in range(1, length):
parents.append(0)
if current_nodes[node1]:
max_score = score_matrix[0, node1]
for node2 in range(1, length):
if node2 == node1 or not current_nodes[node2]:
continue
new_score = score_matrix[node2, node1]
if new_score > max_score:
max_score = new_score
parents[node1] = node2
# Check if this solution has a cycle.
has_cycle, cycle = _find_cycle(parents, length, current_nodes)
# If there are no cycles, find all edges and return.
if not has_cycle:
final_edges[0] = -1
for node in range(1, length):
if not current_nodes[node]:
continue
parent = old_input[parents[node], node]
child = old_output[parents[node], node]
final_edges[child] = parent
return
# Otherwise, we have a cycle so we need to remove an edge.
# From here until the recursive call is the contraction stage of the algorithm.
cycle_weight = 0.0
# Find the weight of the cycle.
index = 0
for node in cycle:
index += 1
cycle_weight += score_matrix[parents[node], node]
# For each node in the graph, find the maximum weight incoming
# and outgoing edge into the cycle.
cycle_representative = cycle[0]
for node in range(length):
if not current_nodes[node] or node in cycle:
continue
in_edge_weight = float("-inf")
in_edge = -1
out_edge_weight = float("-inf")
out_edge = -1
for node_in_cycle in cycle:
if score_matrix[node_in_cycle, node] > in_edge_weight:
in_edge_weight = score_matrix[node_in_cycle, node]
in_edge = node_in_cycle
# Add the new edge score to the cycle weight
# and subtract the edge we're considering removing.
score = (
cycle_weight
+ score_matrix[node, node_in_cycle]
- score_matrix[parents[node_in_cycle], node_in_cycle]
)
if score > out_edge_weight:
out_edge_weight = score
out_edge = node_in_cycle
score_matrix[cycle_representative, node] = in_edge_weight
old_input[cycle_representative, node] = old_input[in_edge, node]
old_output[cycle_representative, node] = old_output[in_edge, node]
score_matrix[node, cycle_representative] = out_edge_weight
old_output[node, cycle_representative] = old_output[node, out_edge]
old_input[node, cycle_representative] = old_input[node, out_edge]
# For the next recursive iteration, we want to consider the cycle as a
# single node. Here we collapse the cycle into the first node in the
# cycle (first node is arbitrary), set all the other nodes not be
# considered in the next iteration. We also keep track of which
# representatives we are considering this iteration because we need
# them below to check if we're done.
considered_representatives: List[Set[int]] = []
for i, node_in_cycle in enumerate(cycle):
considered_representatives.append(set())
if i > 0:
# We need to consider at least one
# node in the cycle, arbitrarily choose
# the first.
current_nodes[node_in_cycle] = False
for node in representatives[node_in_cycle]:
considered_representatives[i].add(node)
if i > 0:
representatives[cycle_representative].add(node)
chu_liu_edmonds(
length, score_matrix, current_nodes, final_edges, old_input, old_output, representatives
)
# Expansion stage.
# check each node in cycle, if one of its representatives
# is a key in the final_edges, it is the one we need.
found = False
key_node = -1
for i, node in enumerate(cycle):
for cycle_rep in considered_representatives[i]:
if cycle_rep in final_edges:
key_node = node
found = True
break
if found:
break
previous = parents[key_node]
while previous != key_node:
child = old_output[parents[previous], previous]
parent = old_input[parents[previous], previous]
final_edges[child] = parent
previous = parents[previous]
def _find_cycle(
parents: List[int], length: int, current_nodes: List[bool]
) -> Tuple[bool, List[int]]:
added = [False for _ in range(length)]
added[0] = True
cycle = set()
has_cycle = False
for i in range(1, length):
if has_cycle:
break
# don't redo nodes we've already
# visited or aren't considering.
if added[i] or not current_nodes[i]:
continue
# Initialize a new possible cycle.
this_cycle = set()
this_cycle.add(i)
added[i] = True
has_cycle = True
next_node = i
while parents[next_node] not in this_cycle:
next_node = parents[next_node]
# If we see a node we've already processed,
# we can stop, because the node we are
# processing would have been in that cycle.
if added[next_node]:
has_cycle = False
break
added[next_node] = True
this_cycle.add(next_node)
if has_cycle:
original = next_node
cycle.add(original)
next_node = parents[original]
while next_node != original:
cycle.add(next_node)
next_node = parents[next_node]
break
return has_cycle, list(cycle)
| allennlp-master | allennlp/nn/chu_liu_edmonds.py |
import re
from typing import List, Tuple
import torch
from allennlp.common import FromParams
from allennlp.nn.regularizers.regularizer import Regularizer
class RegularizerApplicator(FromParams):
"""
Applies regularizers to the parameters of a Module based on regex matches.
"""
def __init__(self, regexes: List[Tuple[str, Regularizer]] = None) -> None:
"""
# Parameters
regexes : `List[Tuple[str, Regularizer]]`, optional (default = `None`)
A sequence of pairs (regex, Regularizer), where each Regularizer
applies to the parameters its regex matches (and that haven't previously
been matched).
"""
self._regularizers = regexes or []
def __call__(self, module: torch.nn.Module) -> torch.Tensor:
"""
# Parameters
module : `torch.nn.Module`, required
The module to regularize.
"""
accumulator = 0.0
for name, parameter in module.named_parameters():
# We first check if the parameter needs gradient updates or not
if parameter.requires_grad:
# For each parameter find the first matching regex.
for regex, regularizer in self._regularizers:
if re.search(regex, name):
penalty = regularizer(parameter)
accumulator = accumulator + penalty
break
return accumulator
| allennlp-master | allennlp/nn/regularizers/regularizer_applicator.py |
"""
This module contains classes representing regularization schemes
as well as a class for applying regularization to parameters.
"""
from allennlp.nn.regularizers.regularizer import Regularizer
from allennlp.nn.regularizers.regularizers import L1Regularizer
from allennlp.nn.regularizers.regularizers import L2Regularizer
from allennlp.nn.regularizers.regularizer_applicator import RegularizerApplicator
| allennlp-master | allennlp/nn/regularizers/__init__.py |
import torch
from allennlp.nn.regularizers.regularizer import Regularizer
@Regularizer.register("l1")
class L1Regularizer(Regularizer):
"""
Represents a penalty proportional to the sum of the absolute values of the parameters
Registered as a `Regularizer` with name "l1".
"""
def __init__(self, alpha: float = 0.01) -> None:
self.alpha = alpha
def __call__(self, parameter: torch.Tensor) -> torch.Tensor:
return self.alpha * torch.sum(torch.abs(parameter))
@Regularizer.register("l2")
class L2Regularizer(Regularizer):
"""
Represents a penalty proportional to the sum of squared values of the parameters
Registered as a `Regularizer` with name "l2".
"""
def __init__(self, alpha: float = 0.01) -> None:
self.alpha = alpha
def __call__(self, parameter: torch.Tensor) -> torch.Tensor:
return self.alpha * torch.sum(torch.pow(parameter, 2))
| allennlp-master | allennlp/nn/regularizers/regularizers.py |
import torch
from allennlp.common import Registrable
class Regularizer(Registrable):
"""
An abstract class representing a regularizer. It must implement
call, returning a scalar tensor.
"""
default_implementation = "l2"
def __call__(self, parameter: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
| allennlp-master | allennlp/nn/regularizers/regularizer.py |
from typing import Optional, Iterable, Dict, Any
from allennlp.common.checks import ConfigurationError
class MetricTracker:
"""
This class tracks a metric during training for the dual purposes of early stopping
and for knowing whether the current value is the best so far. It mimics the PyTorch
`state_dict` / `load_state_dict` interface, so that it can be checkpointed along with
your model and optimizer.
Some metrics improve by increasing; others by decreasing. Here you can either explicitly
supply `should_decrease`, or you can provide a `metric_name` in which case "should decrease"
is inferred from the first character, which must be "+" or "-".
# Parameters
patience : `int`, optional (default = `None`)
If provided, then `should_stop_early()` returns True if we go this
many epochs without seeing a new best value.
metric_name : `str`, optional (default = `None`)
If provided, it's used to infer whether we expect the metric values to
increase (if it starts with "+") or decrease (if it starts with "-").
It's an error if it doesn't start with one of those. If it's not provided,
you should specify `should_decrease` instead.
should_decrease : `str`, optional (default = `None`)
If `metric_name` isn't provided (in which case we can't infer `should_decrease`),
then you have to specify it here.
"""
def __init__(
self, patience: Optional[int] = None, metric_name: str = None, should_decrease: bool = None
) -> None:
self._best_so_far: Optional[float] = None
self._patience = patience
self._epochs_with_no_improvement = 0
self._is_best_so_far = True
self.best_epoch_metrics: Dict[str, float] = {}
self._epoch_number = 0
self.best_epoch: Optional[int] = None
# If the metric name starts with "+", we want it to increase.
# If the metric name starts with "-", we want it to decrease.
# We also allow you to not specify a metric name and just set `should_decrease` directly.
if should_decrease is not None and metric_name is not None:
raise ConfigurationError(
"must specify either `should_decrease` or `metric_name` (but not both)"
)
elif metric_name is not None:
if metric_name[0] == "-":
self._should_decrease = True
elif metric_name[0] == "+":
self._should_decrease = False
else:
raise ConfigurationError("metric_name must start with + or -")
elif should_decrease is not None:
self._should_decrease = should_decrease
else:
raise ConfigurationError(
"must specify either `should_decrease` or `metric_name` (but not both)"
)
def clear(self) -> None:
"""
Clears out the tracked metrics, but keeps the patience and should_decrease settings.
"""
self._best_so_far = None
self._epochs_with_no_improvement = 0
self._is_best_so_far = True
self._epoch_number = 0
self.best_epoch = None
def state_dict(self) -> Dict[str, Any]:
"""
A `Trainer` can use this to serialize the state of the metric tracker.
"""
return {
"best_so_far": self._best_so_far,
"patience": self._patience,
"epochs_with_no_improvement": self._epochs_with_no_improvement,
"is_best_so_far": self._is_best_so_far,
"should_decrease": self._should_decrease,
"best_epoch_metrics": self.best_epoch_metrics,
"epoch_number": self._epoch_number,
"best_epoch": self.best_epoch,
}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
"""
A `Trainer` can use this to hydrate a metric tracker from a serialized state.
"""
self._best_so_far = state_dict["best_so_far"]
self._patience = state_dict["patience"]
self._epochs_with_no_improvement = state_dict["epochs_with_no_improvement"]
self._is_best_so_far = state_dict["is_best_so_far"]
self._should_decrease = state_dict["should_decrease"]
self.best_epoch_metrics = state_dict["best_epoch_metrics"]
self._epoch_number = state_dict["epoch_number"]
self.best_epoch = state_dict["best_epoch"]
def add_metric(self, metric: float) -> None:
"""
Record a new value of the metric and update the various things that depend on it.
"""
new_best = (
(self._best_so_far is None)
or (self._should_decrease and metric < self._best_so_far)
or (not self._should_decrease and metric > self._best_so_far)
)
if new_best:
self.best_epoch = self._epoch_number
self._is_best_so_far = True
self._best_so_far = metric
self._epochs_with_no_improvement = 0
else:
self._is_best_so_far = False
self._epochs_with_no_improvement += 1
self._epoch_number += 1
def add_metrics(self, metrics: Iterable[float]) -> None:
"""
Helper to add multiple metrics at once.
"""
for metric in metrics:
self.add_metric(metric)
def is_best_so_far(self) -> bool:
"""
Returns true if the most recent value of the metric is the best so far.
"""
return self._is_best_so_far
def should_stop_early(self) -> bool:
"""
Returns true if improvement has stopped for long enough.
"""
if self._patience is None:
return False
else:
return self._epochs_with_no_improvement >= self._patience
| allennlp-master | allennlp/training/metric_tracker.py |
import os
from contextlib import contextmanager
from typing import Any, Dict, Iterator, Tuple
from allennlp.models import Model
from allennlp.training.checkpointer import Checkpointer
from allennlp.training.trainer import Trainer
@Trainer.register("no_op")
class NoOpTrainer(Trainer):
"""
Registered as a `Trainer` with name "no_op".
"""
def __init__(self, serialization_dir: str, model: Model) -> None:
"""
A trivial trainer to assist in making model archives for models that do not actually
require training. For instance, a majority class baseline.
In a typical AllenNLP configuration file, neither the `serialization_dir` nor the `model`
arguments would need an entry.
"""
super().__init__(serialization_dir, cuda_device=-1)
self.model = model
def train(self) -> Dict[str, Any]:
assert self._serialization_dir is not None
self.model.vocab.save_to_files(os.path.join(self._serialization_dir, "vocabulary"))
checkpointer = Checkpointer(self._serialization_dir)
checkpointer.save_checkpoint(epoch=0, trainer=self, is_best_so_far=True)
return {}
@contextmanager
def get_checkpoint_state(self) -> Iterator[Tuple[Dict[str, Any], Dict[str, Any]]]:
yield self.model.state_dict(), {}
| allennlp-master | allennlp/training/no_op_trainer.py |
"""
Helper functions for Trainers
"""
import datetime
import logging
import os
import shutil
import json
from os import PathLike
from typing import Any, Dict, Iterable, Optional, Union, Tuple, Set, List, TYPE_CHECKING
from collections import Counter
import torch
# import torch.distributed as dist
from torch.utils.data import DataLoader
from torch.nn.utils import clip_grad_norm_
from allennlp.common.checks import check_for_gpu, ConfigurationError
from allennlp.common.params import Params
from allennlp.common.tqdm import Tqdm
from allennlp.common.util import dump_metrics, sanitize
from allennlp.data import Instance, Vocabulary, Batch
from allennlp.data.dataset_readers import DatasetReader
from allennlp.models.archival import CONFIG_NAME
from allennlp.models.model import Model
from allennlp.nn import util as nn_util
if TYPE_CHECKING:
from allennlp.data import AllennlpDataset
from allennlp.data import AllennlpLazyDataset
logger = logging.getLogger(__name__)
# We want to warn people that tqdm ignores metrics that start with underscores
# exactly once. This variable keeps track of whether we have.
class HasBeenWarned:
tqdm_ignores_underscores = False
def move_optimizer_to_cuda(optimizer):
"""
Move the optimizer state to GPU, if necessary.
After calling, any parameter specific state in the optimizer
will be located on the same device as the parameter.
"""
for param_group in optimizer.param_groups:
for param in param_group["params"]:
if param.is_cuda:
param_state = optimizer.state[param]
for k in param_state.keys():
if isinstance(param_state[k], torch.Tensor):
param_state[k] = param_state[k].cuda(device=param.get_device())
def get_batch_size(batch: Union[Dict, torch.Tensor]) -> int:
"""
Returns the size of the batch dimension. Assumes a well-formed batch,
returns 0 otherwise.
"""
if isinstance(batch, torch.Tensor):
return batch.size(0) # type: ignore
elif isinstance(batch, Dict):
return get_batch_size(next(iter(batch.values())))
else:
return 0
def time_to_str(timestamp: int) -> str:
"""
Convert seconds past Epoch to human readable string.
"""
datetimestamp = datetime.datetime.fromtimestamp(timestamp)
return "{:04d}-{:02d}-{:02d}-{:02d}-{:02d}-{:02d}".format(
datetimestamp.year,
datetimestamp.month,
datetimestamp.day,
datetimestamp.hour,
datetimestamp.minute,
datetimestamp.second,
)
def str_to_time(time_str: str) -> datetime.datetime:
"""
Convert human readable string to datetime.datetime.
"""
pieces: Any = [int(piece) for piece in time_str.split("-")]
return datetime.datetime(*pieces)
def read_all_datasets(
train_data_path: str,
dataset_reader: DatasetReader,
validation_dataset_reader: DatasetReader = None,
validation_data_path: str = None,
test_data_path: str = None,
) -> Dict[str, Union["AllennlpDataset", "AllennlpLazyDataset"]]:
"""
Reads all datasets (perhaps lazily, if the corresponding dataset readers are lazy) and returns a
dictionary mapping dataset name ("train", "validation" or "test") to the iterable resulting from
`reader.read(filename)`.
"""
logger.info("Reading training data from %s", train_data_path)
train_data = dataset_reader.read(train_data_path)
datasets = {"train": train_data}
validation_dataset_reader = validation_dataset_reader or dataset_reader
if validation_data_path is not None:
logger.info("Reading validation data from %s", validation_data_path)
validation_data = validation_dataset_reader.read(validation_data_path)
datasets["validation"] = validation_data
if test_data_path is not None:
logger.info("Reading test data from %s", test_data_path)
test_data = validation_dataset_reader.read(test_data_path)
datasets["test"] = test_data
return datasets
def datasets_from_params(
params: Params,
train: bool = True,
validation: bool = True,
test: bool = True,
serialization_dir: Optional[Union[str, PathLike]] = None,
) -> Dict[str, Union["AllennlpDataset", "AllennlpLazyDataset"]]:
"""
Load datasets specified by the config.
"""
datasets: Dict[str, Union["AllennlpDataset", "AllennlpLazyDataset"]] = {}
train = train and ("train_data_path" in params)
validation = validation and ("validation_data_path" in params)
test = test and ("test_data_path" in params)
if not any((train, validation, test)):
# Return early so don't unnecessarily initialize the train data reader.
return datasets
dataset_reader_params = params.pop("dataset_reader")
dataset_reader = DatasetReader.from_params(
dataset_reader_params, serialization_dir=serialization_dir
)
if train:
train_data_path = params.pop("train_data_path")
logger.info("Reading training data from %s", train_data_path)
train_data = dataset_reader.read(train_data_path)
datasets["train"] = train_data
if not validation and not test:
# Return early so we don't unnecessarily initialize the validation/test data
# reader.
return datasets
validation_and_test_dataset_reader: DatasetReader = dataset_reader
validation_dataset_reader_params = params.pop("validation_dataset_reader", None)
if validation_dataset_reader_params is not None:
logger.info("Using a separate dataset reader to load validation and test data.")
validation_and_test_dataset_reader = DatasetReader.from_params(
validation_dataset_reader_params, serialization_dir=serialization_dir
)
if validation:
validation_data_path = params.pop("validation_data_path")
logger.info("Reading validation data from %s", validation_data_path)
validation_data = validation_and_test_dataset_reader.read(validation_data_path)
datasets["validation"] = validation_data
if test:
test_data_path = params.pop("test_data_path")
logger.info("Reading test data from %s", test_data_path)
test_data = validation_and_test_dataset_reader.read(test_data_path)
datasets["test"] = test_data
return datasets
def create_serialization_dir(
params: Params, serialization_dir: Union[str, PathLike], recover: bool, force: bool
) -> None:
"""
This function creates the serialization directory if it doesn't exist. If it already exists
and is non-empty, then it verifies that we're recovering from a training with an identical configuration.
# Parameters
params : `Params`
A parameter object specifying an AllenNLP Experiment.
serialization_dir : `str`
The directory in which to save results and logs.
recover : `bool`
If `True`, we will try to recover from an existing serialization directory, and crash if
the directory doesn't exist, or doesn't match the configuration we're given.
force : `bool`
If `True`, we will overwrite the serialization directory if it already exists.
"""
if recover and force:
raise ConfigurationError("Illegal arguments: both force and recover are true.")
if os.path.exists(serialization_dir) and force:
shutil.rmtree(serialization_dir)
if os.path.exists(serialization_dir) and os.listdir(serialization_dir):
if not recover:
raise ConfigurationError(
f"Serialization directory ({serialization_dir}) already exists and is "
f"not empty. Specify --recover to recover from an existing output folder."
)
logger.info(f"Recovering from prior training at {serialization_dir}.")
recovered_config_file = os.path.join(serialization_dir, CONFIG_NAME)
if not os.path.exists(recovered_config_file):
raise ConfigurationError(
"The serialization directory already exists but doesn't "
"contain a config.json. You probably gave the wrong directory."
)
loaded_params = Params.from_file(recovered_config_file)
# Check whether any of the training configuration differs from the configuration we are
# resuming. If so, warn the user that training may fail.
fail = False
flat_params = params.as_flat_dict()
flat_loaded = loaded_params.as_flat_dict()
for key in flat_params.keys() - flat_loaded.keys():
logger.error(
f"Key '{key}' found in training configuration but not in the serialization "
f"directory we're recovering from."
)
fail = True
for key in flat_loaded.keys() - flat_params.keys():
logger.error(
f"Key '{key}' found in the serialization directory we're recovering from "
f"but not in the training config."
)
fail = True
for key in flat_params.keys():
if flat_params.get(key) != flat_loaded.get(key):
logger.error(
f"Value for '{key}' in training configuration does not match that the value in "
f"the serialization directory we're recovering from: "
f"{flat_params[key]} != {flat_loaded[key]}"
)
fail = True
if fail:
raise ConfigurationError(
"Training configuration does not match the configuration we're recovering from."
)
else:
if recover:
raise ConfigurationError(
f"--recover specified but serialization_dir ({serialization_dir}) "
"does not exist. There is nothing to recover from."
)
os.makedirs(serialization_dir, exist_ok=True)
def enable_gradient_clipping(model: Model, grad_clipping: Optional[float]) -> None:
if grad_clipping is not None:
for parameter in model.parameters():
if parameter.requires_grad:
parameter.register_hook(
lambda grad: nn_util.clamp_tensor(
grad, minimum=-grad_clipping, maximum=grad_clipping
)
)
def rescale_gradients(model: Model, grad_norm: Optional[float] = None) -> Optional[float]:
"""
Performs gradient rescaling. Is a no-op if gradient rescaling is not enabled.
"""
if grad_norm:
parameters_to_clip = [p for p in model.parameters() if p.grad is not None]
return clip_grad_norm_(parameters_to_clip, grad_norm)
return None
def get_metrics(
model: Model,
total_loss: float,
total_reg_loss: Optional[float],
batch_loss: Optional[float],
batch_reg_loss: Optional[float],
num_batches: int,
reset: bool = False,
world_size: int = 1,
cuda_device: Union[int, torch.device] = torch.device("cpu"),
) -> Dict[str, float]:
"""
Gets the metrics but sets `"loss"` to
the total loss divided by the `num_batches` so that
the `"loss"` metric is "average loss per batch".
Returns the `"batch_loss"` separately.
"""
metrics = model.get_metrics(reset=reset)
if batch_loss is not None:
metrics["batch_loss"] = batch_loss
metrics["loss"] = float(total_loss / num_batches) if num_batches > 0 else 0.0
if total_reg_loss is not None:
if batch_reg_loss is not None:
metrics["batch_reg_loss"] = batch_reg_loss
metrics["reg_loss"] = float(total_reg_loss / num_batches) if num_batches > 0 else 0.0
return metrics
def evaluate(
model: Model,
data_loader: DataLoader,
cuda_device: int = -1,
batch_weight_key: str = None,
output_file: str = None,
predictions_output_file: str = None,
) -> Dict[str, Any]:
"""
# Parameters
model : `Model`
The model to evaluate
data_loader : `DataLoader`
The `DataLoader` that will iterate over the evaluation data (data loaders already contain
their data).
cuda_device : `int`, optional (default=`-1`)
The cuda device to use for this evaluation. The model is assumed to already be using this
device; this parameter is only used for moving the input data to the correct device.
batch_weight_key : `str`, optional (default=`None`)
If given, this is a key in the output dictionary for each batch that specifies how to weight
the loss for that batch. If this is not given, we use a weight of 1 for every batch.
metrics_output_file : `str`, optional (default=`None`)
Optional path to write the final metrics to.
predictions_output_file : `str`, optional (default=`None`)
Optional path to write the predictions to.
# Returns
`Dict[str, Any]`
The final metrics.
"""
check_for_gpu(cuda_device)
predictions_file = (
None if predictions_output_file is None else open(predictions_output_file, "w")
)
with torch.no_grad():
model.eval()
iterator = iter(data_loader)
logger.info("Iterating over dataset")
generator_tqdm = Tqdm.tqdm(iterator)
# Number of batches in instances.
batch_count = 0
# Number of batches where the model produces a loss.
loss_count = 0
# Cumulative weighted loss
total_loss = 0.0
# Cumulative weight across all batches.
total_weight = 0.0
for batch in generator_tqdm:
batch_count += 1
batch = nn_util.move_to_device(batch, cuda_device)
output_dict = model(**batch)
loss = output_dict.get("loss")
metrics = model.get_metrics()
if loss is not None:
loss_count += 1
if batch_weight_key:
weight = output_dict[batch_weight_key].item()
else:
weight = 1.0
total_weight += weight
total_loss += loss.item() * weight
# Report the average loss so far.
metrics["loss"] = total_loss / total_weight
if not HasBeenWarned.tqdm_ignores_underscores and any(
metric_name.startswith("_") for metric_name in metrics
):
logger.warning(
'Metrics with names beginning with "_" will '
"not be logged to the tqdm progress bar."
)
HasBeenWarned.tqdm_ignores_underscores = True
description = (
", ".join(
[
"%s: %.2f" % (name, value)
for name, value in metrics.items()
if not name.startswith("_")
]
)
+ " ||"
)
generator_tqdm.set_description(description, refresh=False)
if predictions_file is not None:
predictions = json.dumps(sanitize(model.make_output_human_readable(output_dict)))
predictions_file.write(predictions + "\n")
if predictions_file is not None:
predictions_file.close()
final_metrics = model.get_metrics(reset=True)
if loss_count > 0:
# Sanity check
if loss_count != batch_count:
raise RuntimeError(
"The model you are trying to evaluate only sometimes produced a loss!"
)
final_metrics["loss"] = total_loss / total_weight
if output_file is not None:
dump_metrics(output_file, final_metrics, log=True)
return final_metrics
def description_from_metrics(metrics: Dict[str, float]) -> str:
if not HasBeenWarned.tqdm_ignores_underscores and any(
metric_name.startswith("_") for metric_name in metrics
):
logger.warning(
'Metrics with names beginning with "_" will ' "not be logged to the tqdm progress bar."
)
HasBeenWarned.tqdm_ignores_underscores = True
return (
", ".join(
[
"%s: %.4f" % (name, value)
for name, value in metrics.items()
if not name.startswith("_")
]
)
+ " ||"
)
def make_vocab_from_params(
params: Params, serialization_dir: Union[str, PathLike], print_statistics: bool = False
) -> Vocabulary:
vocab_params = params.pop("vocabulary", {})
os.makedirs(serialization_dir, exist_ok=True)
vocab_dir = os.path.join(serialization_dir, "vocabulary")
if os.path.isdir(vocab_dir) and os.listdir(vocab_dir) is not None:
raise ConfigurationError(
"The 'vocabulary' directory in the provided serialization directory is non-empty"
)
datasets_for_vocab_creation: Optional[List[str]] = params.pop(
"datasets_for_vocab_creation", None
)
# Do a quick sanity check here. There's no need to load any datasets if the vocab
# type is "empty".
if datasets_for_vocab_creation is None and vocab_params.get("type") in ("empty", "from_files"):
datasets_for_vocab_creation = []
datasets: Dict[str, Union["AllennlpDataset", "AllennlpLazyDataset"]]
if datasets_for_vocab_creation is None:
# If `datasets_for_vocab_creation` was not specified, we'll use all datasets
# from the config.
datasets = datasets_from_params(params, serialization_dir=serialization_dir)
else:
for dataset_name in datasets_for_vocab_creation:
data_path = f"{dataset_name}_data_path"
if data_path not in params:
raise ConfigurationError(f"invalid 'datasets_for_vocab_creation' {dataset_name}")
datasets = datasets_from_params(
params,
serialization_dir=serialization_dir,
train=("train" in datasets_for_vocab_creation),
validation=("validation" in datasets_for_vocab_creation),
test=("test" in datasets_for_vocab_creation),
)
instances: Iterable[Instance] = (
instance
for key, dataset in datasets.items()
if datasets_for_vocab_creation is None or key in datasets_for_vocab_creation
for instance in dataset
)
if print_statistics:
instances = list(instances)
vocab = Vocabulary.from_params(vocab_params, instances=instances)
logger.info(f"writing the vocabulary to {vocab_dir}.")
vocab.save_to_files(vocab_dir)
logger.info("done creating vocab")
if print_statistics:
dataset = Batch(instances)
dataset.index_instances(vocab)
dataset.print_statistics()
vocab.print_statistics()
return vocab
def ngrams(
tensor: torch.LongTensor, ngram_size: int, exclude_indices: Set[int]
) -> Dict[Tuple[int, ...], int]:
ngram_counts: Dict[Tuple[int, ...], int] = Counter()
if ngram_size > tensor.size(-1):
return ngram_counts
for start_position in range(ngram_size):
for tensor_slice in tensor[start_position:].split(ngram_size, dim=-1):
if tensor_slice.size(-1) < ngram_size:
break
ngram = tuple(x.item() for x in tensor_slice)
if any(x in exclude_indices for x in ngram):
continue
ngram_counts[ngram] += 1
return ngram_counts
def get_valid_tokens_mask(tensor: torch.LongTensor, exclude_indices: Set[int]) -> torch.ByteTensor:
valid_tokens_mask = torch.ones_like(tensor, dtype=torch.bool)
for index in exclude_indices:
valid_tokens_mask &= tensor != index
return valid_tokens_mask
| allennlp-master | allennlp/training/util.py |
from typing import Iterable, Tuple, Optional
import torch
from allennlp.common.registrable import Registrable
NamedParameter = Tuple[str, torch.Tensor]
class MovingAverage(Registrable):
"""
Tracks a moving average of model parameters.
"""
default_implementation = "exponential"
def __init__(self, parameters: Iterable[NamedParameter]) -> None:
self._parameters = list(parameters)
self._shadows = {name: parameter.data.clone() for name, parameter in self._parameters}
self._backups = {name: parameter.data.clone() for name, parameter in self._parameters}
def apply(self, num_updates: Optional[int] = None):
"""
Update the moving averages based on the latest values of the parameters.
"""
raise NotImplementedError
def assign_average_value(self) -> None:
"""
Replace all the parameter values with the averages.
Save the current parameter values to restore later.
"""
for name, parameter in self._parameters:
self._backups[name].copy_(parameter.data)
parameter.data.copy_(self._shadows[name])
def restore(self) -> None:
"""
Restore the backed-up (non-average) parameter values.
"""
for name, parameter in self._parameters:
parameter.data.copy_(self._backups[name])
@MovingAverage.register("exponential")
class ExponentialMovingAverage(MovingAverage):
"""
Create shadow variables and maintain exponential moving average for model parameters.
Registered as a `MovingAverage` with name "exponential".
# Parameters
parameters : `Iterable[Tuple[str, Parameter]]`, required
The parameters whose averages we'll be tracking.
In a typical AllenNLP configuration file, this argument does not get an entry under the
"moving_average", it gets passed in separately.
decay : `float`, optional (default = `0.9999`)
The decay rate that will be used if `num_updates` is not passed
(and that will be used as an upper bound if `num_updates` is passed).
numerator : `float`, optional (default = `1.0`)
The numerator used to compute the decay rate if `num_updates` is passed.
denominator : `float`, optional (default = `10.0`)
The denominator used to compute the decay rate if `num_updates` is passed.
"""
def __init__(
self,
parameters: Iterable[NamedParameter],
decay: float = 0.9999,
numerator: float = 1.0,
denominator: float = 10.0,
) -> None:
super().__init__(parameters)
self._decay = decay
self._numerator = numerator
self._denominator = denominator
def apply(self, num_updates: Optional[int] = None) -> None:
"""
Apply exponential moving average to `named_parameters` if specified,
or we will apply this to all the trainable parameters of the model.
The optional `num_updates` parameter allows one to tweak the decay rate
dynamically. If passed, the actual decay rate used is:
`min(decay, (numerator + num_updates) / (denominator + num_updates))`
(This logic is based on the Tensorflow exponential moving average
<https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage>)
"""
if num_updates is not None:
decay = min(
self._decay, (self._numerator + num_updates) / (self._denominator + num_updates)
)
else:
decay = self._decay
for name, parameter in self._parameters:
self._shadows[name].mul_(decay).add_((1 - decay) * parameter.data)
| allennlp-master | allennlp/training/moving_average.py |
from typing import Union, Dict, Any, List, Tuple, Optional
import logging
import os
import re
import shutil
import time
import torch
import allennlp
from allennlp.common import Registrable
from allennlp.nn import util as nn_util
from allennlp.training import util as training_util
logger = logging.getLogger(__name__)
class Checkpointer(Registrable):
"""
This class implements the functionality for checkpointing your model and trainer state
during training. It is agnostic as to what those states look like (they are typed as
Dict[str, Any]), but they will be fed to `torch.save` so they should be serializable
in that sense. They will also be restored as Dict[str, Any], which means the calling
code is responsible for knowing what to do with them.
# Parameters
num_serialized_models_to_keep : `int`, optional (default=`2`)
Number of previous model checkpoints to retain. Default is to keep 2 checkpoints.
A value of None or -1 means all checkpoints will be kept.
In a typical AllenNLP configuration file, this argument does not get an entry under the
"checkpointer", it gets passed in separately.
keep_serialized_model_every_num_seconds : `int`, optional (default=`None`)
If num_serialized_models_to_keep is not None, then occasionally it's useful to
save models at a given interval in addition to the last num_serialized_models_to_keep.
To do so, specify keep_serialized_model_every_num_seconds as the number of seconds
between permanently saved checkpoints. Note that this option is only used if
num_serialized_models_to_keep is not None, otherwise all checkpoints are kept.
model_save_interval : `float`, optional (default=`None`)
If provided, then serialize models every `model_save_interval`
seconds within single epochs. In all cases, models are also saved
at the end of every epoch if `serialization_dir` is provided.
"""
default_implementation = "default"
def __init__(
self,
serialization_dir: str,
keep_serialized_model_every_num_seconds: int = None,
num_serialized_models_to_keep: int = 2,
model_save_interval: float = None,
) -> None:
self._serialization_dir = serialization_dir
self._keep_serialized_model_every_num_seconds = keep_serialized_model_every_num_seconds
self._num_serialized_models_to_keep = num_serialized_models_to_keep
self._model_save_interval = model_save_interval
self._last_permanent_saved_checkpoint_time = time.time()
self._serialized_paths: List[Tuple[float, str, str]] = []
self._last_save_time = time.time()
def maybe_save_checkpoint(
self, trainer: "allennlp.training.trainer.Trainer", epoch: int, batches_this_epoch: int
) -> None:
"""
Given amount of time lapsed between the last save and now (tracked internally), the
current epoch, and the number of batches seen so far this epoch, this method decides whether
to save a checkpoint or not. If we decide to save a checkpoint, we grab whatever state we
need out of the `Trainer` and save it.
This function is intended to be called at the end of each batch in an epoch (perhaps because
your data is large enough that you don't really have "epochs"). The default implementation
only looks at time, not batch or epoch number, though those parameters are available to you
if you want to customize the behavior of this function.
"""
if self._model_save_interval is None:
return
if time.time() - self._last_save_time < self._model_save_interval:
return
self._last_save_time = time.time()
epoch_str = f"{epoch}.{training_util.time_to_str(int(self._last_save_time))}"
self.save_checkpoint(epoch_str, trainer)
def save_checkpoint(
self,
epoch: Union[int, str],
trainer: "allennlp.training.trainer.Trainer",
is_best_so_far: bool = False,
save_model_only=False,
) -> None:
if self._serialization_dir is not None:
with trainer.get_checkpoint_state() as state:
model_state, training_states = state
model_path = os.path.join(
self._serialization_dir, "model_state_epoch_{}.th".format(epoch)
)
if not os.path.isfile(model_path):
torch.save(model_state, model_path)
if save_model_only:
return
training_path = os.path.join(
self._serialization_dir, "training_state_epoch_{}.th".format(epoch)
)
if not os.path.isfile(training_path):
torch.save({**training_states, "epoch": epoch}, training_path)
# The main checkpointing logic is now done, this is just shuffling files around, to keep
# track of best weights, and to remove old checkpoints, if desired.
if is_best_so_far:
logger.info(
"Best validation performance so far. Copying weights to '%s/best.th'.",
self._serialization_dir,
)
shutil.copyfile(model_path, os.path.join(self._serialization_dir, "best.th"))
if (
self._num_serialized_models_to_keep is not None
and self._num_serialized_models_to_keep >= 0
):
self._serialized_paths.append((time.time(), model_path, training_path))
if len(self._serialized_paths) > self._num_serialized_models_to_keep:
paths_to_remove = self._serialized_paths.pop(0)
# Check to see if we should keep this checkpoint, if it has been longer
# then self._keep_serialized_model_every_num_seconds since the last
# kept checkpoint.
remove_path = True
if self._keep_serialized_model_every_num_seconds is not None:
save_time = paths_to_remove[0]
time_since_checkpoint_kept = (
save_time - self._last_permanent_saved_checkpoint_time
)
if (
time_since_checkpoint_kept
> self._keep_serialized_model_every_num_seconds
):
# We want to keep this checkpoint.
remove_path = False
self._last_permanent_saved_checkpoint_time = save_time
if remove_path:
for fname in paths_to_remove[1:]:
if os.path.isfile(fname):
os.remove(fname)
def find_latest_checkpoint(self) -> Optional[Tuple[str, str]]:
"""
Return the location of the latest model and training state files.
If there isn't a valid checkpoint then return None.
"""
have_checkpoint = self._serialization_dir is not None and any(
"model_state_epoch_" in x for x in os.listdir(self._serialization_dir)
)
if not have_checkpoint:
return None
serialization_files = os.listdir(self._serialization_dir)
model_checkpoints = [x for x in serialization_files if "model_state_epoch" in x]
# Get the last checkpoint file. Epochs are specified as either an
# int (for end of epoch files) or with epoch and timestamp for
# within epoch checkpoints, e.g. 5.2018-02-02-15-33-42
found_epochs = [
re.search(r"model_state_epoch_([0-9\.\-]+)\.th", x).group(1) for x in model_checkpoints # type: ignore
]
int_epochs: Any = []
for epoch in found_epochs:
pieces = epoch.split(".")
if len(pieces) == 1:
# Just a single epoch without timestamp
int_epochs.append([int(pieces[0]), "0"])
else:
# has a timestamp
int_epochs.append([int(pieces[0]), pieces[1]])
last_epoch = sorted(int_epochs, reverse=True)[0]
if last_epoch[1] == "0":
epoch_to_load = str(last_epoch[0])
else:
epoch_to_load = "{0}.{1}".format(last_epoch[0], last_epoch[1])
model_path = os.path.join(
self._serialization_dir, "model_state_epoch_{}.th".format(epoch_to_load)
)
training_state_path = os.path.join(
self._serialization_dir, "training_state_epoch_{}.th".format(epoch_to_load)
)
return (model_path, training_state_path)
def restore_checkpoint(self) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
Restores a model from a serialization_dir to the last saved checkpoint.
This includes a training state (typically consisting of an epoch count and optimizer state),
which is serialized separately from model parameters. This function should only be used to
continue training - if you wish to load a model for inference/load parts of a model into a new
computation graph, you should use the native Pytorch functions:
` model.load_state_dict(torch.load("/path/to/model/weights.th"))`
If `self._serialization_dir` does not exist or does not contain any checkpointed weights,
this function will do nothing and return empty dicts.
# Returns
states : `Tuple[Dict[str, Any], Dict[str, Any]]`
The model state and the training state.
"""
latest_checkpoint = self.find_latest_checkpoint()
if latest_checkpoint is None:
# No checkpoint to restore, start at 0
return {}, {}
model_path, training_state_path = latest_checkpoint
# Load the parameters onto CPU, then transfer to GPU.
# This avoids potential OOM on GPU for large models that
# load parameters onto GPU then make a new GPU copy into the parameter
# buffer. The GPU transfer happens implicitly in load_state_dict.
model_state = torch.load(model_path, map_location=nn_util.device_mapping(-1))
training_state = torch.load(training_state_path, map_location=nn_util.device_mapping(-1))
return model_state, training_state
def best_model_state(self) -> Dict[str, Any]:
if self._serialization_dir:
logger.info("loading best weights")
best_model_state_path = os.path.join(self._serialization_dir, "best.th")
return torch.load(best_model_state_path, map_location=nn_util.device_mapping(-1))
else:
logger.info(
"cannot load best weights without `serialization_dir`, "
"so you're just getting the last weights"
)
return {}
Checkpointer.register("default")(Checkpointer)
| allennlp-master | allennlp/training/checkpointer.py |
from allennlp.training.checkpointer import Checkpointer
from allennlp.training.tensorboard_writer import TensorboardWriter
from allennlp.training.no_op_trainer import NoOpTrainer
from allennlp.training.trainer import (
Trainer,
GradientDescentTrainer,
BatchCallback,
EpochCallback,
TrainerCallback,
TrackEpochCallback,
)
| allennlp-master | allennlp/training/__init__.py |
"""
AllenNLP just uses
[PyTorch optimizers](https://pytorch.org/docs/master/optim.html),
with a thin wrapper to allow registering them and instantiating them `from_params`.
The available optimizers are
* [adadelta](https://pytorch.org/docs/master/optim.html#torch.optim.Adadelta)
* [adagrad](https://pytorch.org/docs/master/optim.html#torch.optim.Adagrad)
* [adam](https://pytorch.org/docs/master/optim.html#torch.optim.Adam)
* [adamw](https://pytorch.org/docs/master/optim.html#torch.optim.AdamW)
* [huggingface_adamw](https://huggingface.co/transformers/main_classes/optimizer_schedules.html#transformers.AdamW)
* [sparse_adam](https://pytorch.org/docs/master/optim.html#torch.optim.SparseAdam)
* [sgd](https://pytorch.org/docs/master/optim.html#torch.optim.SGD)
* [rmsprop](https://pytorch.org/docs/master/optim.html#torch.optim.RMSprop)
* [adamax](https://pytorch.org/docs/master/optim.html#torch.optim.Adamax)
* [averaged_sgd](https://pytorch.org/docs/master/optim.html#torch.optim.ASGD)
"""
import logging
import re
import math
from typing import Any, Dict, List, Tuple, Union
import torch
import transformers
from allennlp.common import Params, Registrable
logger = logging.getLogger(__name__)
def make_parameter_groups(
model_parameters: List[Tuple[str, torch.nn.Parameter]],
groups: List[Tuple[List[str], Dict[str, Any]]] = None,
) -> Union[List[Dict[str, Any]], List[torch.nn.Parameter]]:
"""
Takes a list of model parameters with associated names (typically coming from something like
`model.parameters`), along with a grouping (as specified below), and prepares them to be passed
to the `__init__` function of a `torch.Optimizer`. This means separating the parameters into
groups with the given regexes, and prepping whatever keyword arguments are given for those
regexes in `groups`.
`groups` contains something like:
```
[
(["regex1", "regex2"], {"lr": 1e-3}),
(["regex3"], {"lr": 1e-4})
]
```
All of key-value pairs specified in each of these dictionaries will passed passed as-is
to the optimizer, with the exception of a dictionaries that specify `requires_grad` to be `False`:
```
[
...
(["regex"], {"requires_grad": False})
]
```
When a parameter group has `{"requires_grad": False}`, the gradient on all matching parameters
will be disabled and that group will be dropped so that it's not actually passed to the optimizer.
Ultimately, the return value of this function is in the right format to be passed directly
as the `params` argument to a pytorch `Optimizer`.
If there are multiple groups specified, this is list of dictionaries, where each
dict contains a "parameter group" and groups specific options, e.g., {'params': [list of
parameters], 'lr': 1e-3, ...}. Any config option not specified in the additional options (e.g.
for the default group) is inherited from the top level arguments given in the constructor. See:
<https://pytorch.org/docs/0.3.0/optim.html?#per-parameter-options>. See also our
`test_optimizer_parameter_groups` test for an example of how this works in this code.
The dictionary's return type is labeled as `Any`, because it can be a `List[torch.nn.Parameter]`
(for the "params" key), or anything else (typically a float) for the other keys.
"""
if groups:
# In addition to any parameters that match group specific regex,
# we also need a group for the remaining "default" group.
# Those will be included in the last entry of parameter_groups.
parameter_groups: Union[List[Dict[str, Any]], List[torch.nn.Parameter]] = [
{"params": []} for _ in range(len(groups) + 1)
]
# add the group specific kwargs
for k in range(len(groups)):
parameter_groups[k].update(groups[k][1])
regex_use_counts: Dict[str, int] = {}
parameter_group_names: List[set] = [set() for _ in range(len(groups) + 1)]
for name, param in model_parameters:
# Determine the group for this parameter.
group_index = None
for k, group_regexes in enumerate(groups):
for regex in group_regexes[0]:
if regex not in regex_use_counts:
regex_use_counts[regex] = 0
if re.search(regex, name):
if group_index is not None and group_index != k:
raise ValueError(
"{} was specified in two separate parameter groups".format(name)
)
group_index = k
regex_use_counts[regex] += 1
if group_index is not None:
parameter_groups[group_index]["params"].append(param)
parameter_group_names[group_index].add(name)
else:
# the default group
parameter_groups[-1]["params"].append(param)
parameter_group_names[-1].add(name)
# find and remove any groups with 'requires_grad = False'
no_grad_group_indices: List[int] = []
for k, (names, group) in enumerate(zip(parameter_group_names, parameter_groups)):
if group.get("requires_grad") is False:
no_grad_group_indices.append(k)
logger.info("Disabling gradient for the following parameters: %s", names)
for param in group["params"]:
param.requires_grad_(False)
# warn about any other unused options in that group.
unused_options = {
key: val for key, val in group.items() if key not in ("params", "requires_grad")
}
if unused_options:
logger.warning("Ignoring unused options %s for %s", unused_options, names)
parameter_group_names = [
names
for (k, names) in enumerate(parameter_group_names)
if k not in no_grad_group_indices
]
parameter_groups = [
group for (k, group) in enumerate(parameter_groups) if k not in no_grad_group_indices
]
# log the remaining parameter groups
logger.info("Done constructing parameter groups.")
for k in range(len(parameter_groups)):
group_options = {
key: val for key, val in parameter_groups[k].items() if key != "params"
}
logger.info("Group %s: %s, %s", k, list(parameter_group_names[k]), group_options)
# check for unused regex
for regex, count in regex_use_counts.items():
if count == 0:
logger.warning(
"When constructing parameter groups, %s does not match any parameter name",
regex,
)
else:
parameter_groups = [param for name, param in model_parameters]
# Log the number of parameters to optimize
num_parameters = 0
for parameter_group in parameter_groups:
if isinstance(parameter_group, dict):
num_parameters += sum(parameter.numel() for parameter in parameter_group["params"])
else:
num_parameters += parameter_group.numel() # type: ignore
logger.info("Number of trainable parameters: %s", num_parameters)
return parameter_groups
class Optimizer(torch.optim.Optimizer, Registrable):
"""
This class just allows us to implement `Registrable` for Pytorch Optimizers. We do something a
little bit different with `Optimizers`, because they are implemented as classes in PyTorch, and
we want to use those classes. To make things easy, we just inherit from those classes, using
multiple inheritance to also inherit from `Optimizer`. The only reason we do this is to make
type inference on parameters possible, so we can construct these objects using our configuration
framework. If you are writing your own script, you can safely ignore these classes and just use
the `torch.optim` classes directly.
If you are implementing one of these classes, the `model_parameters` and `parameter_groups`
arguments to `__init__` are important, and should always be present. The trainer will pass
the trainable parameters in the model to the optimizer using the name `model_parameters`, so if
you use a different name, your code will crash. Nothing will technically crash if you use a
name other than `parameter_groups` for your second argument, it will just be annoyingly
inconsistent.
Most subclasses of `Optimizer` take both a `model_parameters` and a `parameter_groups`
constructor argument. The `model_parameters` argument does not get an entry in a typical
AllenNLP configuration file, but the `parameter_groups` argument does (if you want a non-default
value). See the documentation for the `make_parameter_groups` function for more information on
how the `parameter_groups` argument should be specified.
"""
default_implementation = "adam"
@staticmethod
def default(model_parameters: List) -> "Optimizer":
return Optimizer.from_params(model_parameters=model_parameters, params=Params({}))
@Optimizer.register("adam")
class AdamOptimizer(Optimizer, torch.optim.Adam):
"""
Registered as an `Optimizer` with name "adam".
"""
def __init__(
self,
model_parameters: List[Tuple[str, torch.nn.Parameter]],
parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,
lr: float = 0.001,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-08,
weight_decay: float = 0.0,
amsgrad: bool = False,
):
super().__init__(
params=make_parameter_groups(model_parameters, parameter_groups),
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
amsgrad=amsgrad,
)
@Optimizer.register("sparse_adam")
class SparseAdamOptimizer(Optimizer, torch.optim.SparseAdam):
"""
Registered as an `Optimizer` with name "sparse_adam".
"""
def __init__(
self,
model_parameters: List[Tuple[str, torch.nn.Parameter]],
parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,
lr: float = 0.001,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-08,
):
super().__init__(
params=make_parameter_groups(model_parameters, parameter_groups),
lr=lr,
betas=betas,
eps=eps,
)
@Optimizer.register("adamax")
class AdamaxOptimizer(Optimizer, torch.optim.Adamax):
"""
Registered as an `Optimizer` with name "adamax".
"""
def __init__(
self,
model_parameters: List[Tuple[str, torch.nn.Parameter]],
parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,
lr: float = 0.002,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-08,
weight_decay: float = 0.0,
):
super().__init__(
params=make_parameter_groups(model_parameters, parameter_groups),
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
)
@Optimizer.register("adamw")
class AdamWOptimizer(Optimizer, torch.optim.AdamW):
"""
Registered as an `Optimizer` with name "adamw".
"""
def __init__(
self,
model_parameters: List[Tuple[str, torch.nn.Parameter]],
parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,
lr: float = 0.001,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-08,
weight_decay: float = 0.01,
amsgrad: bool = False,
):
super().__init__(
params=make_parameter_groups(model_parameters, parameter_groups),
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
amsgrad=amsgrad,
)
@Optimizer.register("huggingface_adamw")
class HuggingfaceAdamWOptimizer(Optimizer, transformers.AdamW):
"""
Registered as an `Optimizer` with name "huggingface_adamw".
"""
def __init__(
self,
model_parameters: List[Tuple[str, torch.nn.Parameter]],
parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,
lr: float = 1e-5,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-08,
weight_decay: float = 0.0,
correct_bias: bool = True,
):
super().__init__(
params=make_parameter_groups(model_parameters, parameter_groups),
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
correct_bias=correct_bias,
)
@Optimizer.register("adagrad")
class AdagradOptimizer(Optimizer, torch.optim.Adagrad):
"""
Registered as an `Optimizer` with name "adagrad".
"""
def __init__(
self,
model_parameters: List[Tuple[str, torch.nn.Parameter]],
parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,
lr: float = 0.01,
lr_decay: float = 0.0,
weight_decay: float = 0.0,
initial_accumulator_value: float = 0.0,
eps: float = 1e-10,
):
super().__init__(
params=make_parameter_groups(model_parameters, parameter_groups),
lr=lr,
lr_decay=lr_decay,
weight_decay=weight_decay,
initial_accumulator_value=initial_accumulator_value,
eps=eps,
)
@Optimizer.register("adadelta")
class AdadeltaOptimizer(Optimizer, torch.optim.Adadelta):
"""
Registered as an `Optimizer` with name "adadelta".
"""
def __init__(
self,
model_parameters: List[Tuple[str, torch.nn.Parameter]],
parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,
lr: float = 1.0,
rho: float = 0.9,
eps: float = 1e-06,
weight_decay: float = 0.0,
):
super().__init__(
params=make_parameter_groups(model_parameters, parameter_groups),
lr=lr,
rho=rho,
eps=eps,
weight_decay=weight_decay,
)
@Optimizer.register("sgd")
class SgdOptimizer(Optimizer, torch.optim.SGD):
"""
Registered as an `Optimizer` with name "sgd".
"""
def __init__(
self,
model_parameters: List[Tuple[str, torch.nn.Parameter]],
lr: float,
parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,
momentum: float = 0.0,
dampening: float = 0,
weight_decay: float = 0.0,
nesterov: bool = False,
):
super().__init__(
params=make_parameter_groups(model_parameters, parameter_groups),
lr=lr,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov,
)
@Optimizer.register("rmsprop")
class RmsPropOptimizer(Optimizer, torch.optim.RMSprop):
"""
Registered as an `Optimizer` with name "rmsprop".
"""
def __init__(
self,
model_parameters: List[Tuple[str, torch.nn.Parameter]],
parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,
lr: float = 0.01,
alpha: float = 0.99,
eps: float = 1e-08,
weight_decay: float = 0.0,
momentum: float = 0.0,
centered: bool = False,
):
super().__init__(
params=make_parameter_groups(model_parameters, parameter_groups),
lr=lr,
alpha=alpha,
eps=eps,
weight_decay=weight_decay,
momentum=momentum,
centered=centered,
)
@Optimizer.register("averaged_sgd")
class AveragedSgdOptimizer(Optimizer, torch.optim.ASGD):
"""
Registered as an `Optimizer` with name "averaged_sgd".
"""
def __init__(
self,
model_parameters: List[Tuple[str, torch.nn.Parameter]],
parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,
lr: float = 0.01,
lambd: float = 0.0001,
alpha: float = 0.75,
t0: float = 1000000.0,
weight_decay: float = 0.0,
):
super().__init__(
params=make_parameter_groups(model_parameters, parameter_groups),
lr=lr,
lambd=lambd,
alpha=alpha,
t0=t0,
weight_decay=weight_decay,
)
@Optimizer.register("dense_sparse_adam")
class DenseSparseAdam(Optimizer, torch.optim.Optimizer):
"""
NOTE: This class has been copied verbatim from the separate Dense and
Sparse versions of Adam in Pytorch.
Implements Adam algorithm with dense & sparse gradients.
It has been proposed in Adam: A Method for Stochastic Optimization.
Registered as an `Optimizer` with name "dense_sparse_adam".
# Parameters
params : `iterable`
iterable of parameters to optimize or dicts defining parameter groups
lr : `float`, optional (default = `1e-3`)
The learning rate.
betas : `Tuple[float, float]`, optional (default = `(0.9, 0.999)`)
coefficients used for computing running averages of gradient
and its square.
eps : `float`, optional, (default = `1e-8`)
A term added to the denominator to improve numerical stability.
"""
def __init__(
self,
model_parameters: List[Tuple[str, torch.nn.Parameter]],
parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps)
super().__init__(make_parameter_groups(model_parameters, parameter_groups), defaults)
def step(self, closure=None):
"""
Performs a single optimization step.
# Parameters
closure : `callable`, optional.
A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.data)
state["step"] += 1
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
if grad.is_sparse:
grad = grad.coalesce() # the update is non-linear so indices must be unique
grad_indices = grad._indices()
grad_values = grad._values()
size = grad.size()
def make_sparse(values):
constructor = grad.new
if grad_indices.dim() == 0 or values.dim() == 0:
return constructor().resize_as_(grad)
return constructor(grad_indices, values, size)
# Decay the first and second moment running average coefficient
# old <- b * old + (1 - b) * new
# <==> old += (1 - b) * (new - old)
old_exp_avg_values = exp_avg.sparse_mask(grad)._values()
exp_avg_update_values = grad_values.sub(old_exp_avg_values).mul_(1 - beta1)
exp_avg.add_(make_sparse(exp_avg_update_values))
old_exp_avg_sq_values = exp_avg_sq.sparse_mask(grad)._values()
exp_avg_sq_update_values = (
grad_values.pow(2).sub_(old_exp_avg_sq_values).mul_(1 - beta2)
)
exp_avg_sq.add_(make_sparse(exp_avg_sq_update_values))
# Dense addition again is intended, avoiding another sparse_mask
numer = exp_avg_update_values.add_(old_exp_avg_values)
exp_avg_sq_update_values.add_(old_exp_avg_sq_values)
denom = exp_avg_sq_update_values.sqrt_().add_(group["eps"])
del exp_avg_update_values, exp_avg_sq_update_values
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
step_size = group["lr"] * math.sqrt(bias_correction2) / bias_correction1
p.data.add_(make_sparse(-step_size * numer.div_(denom)))
else:
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
denom = exp_avg_sq.sqrt().add_(group["eps"])
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
step_size = group["lr"] * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(exp_avg, denom, value=-step_size)
return loss
| allennlp-master | allennlp/training/optimizers.py |
from typing import Dict, Any
import torch
class Scheduler:
"""
A `Scheduler` is a generalization of PyTorch learning rate schedulers.
A scheduler can be used to update any field in an optimizer's parameter groups,
not just the learning rate.
During training using the AllenNLP `Trainer`, this is the API and calling
sequence for `step` and `step_batch`::
scheduler = ... # creates scheduler
batch_num_total = 0
for epoch in range(num_epochs):
for batch in batchs_in_epoch:
# compute loss, update parameters with current learning rates
# call step_batch AFTER updating parameters
batch_num_total += 1
scheduler.step_batch(batch_num_total)
# call step() at the END of each epoch
scheduler.step(validation_metrics, epoch)
"""
def __init__(
self, optimizer: torch.optim.Optimizer, param_group_field: str, last_epoch: int = -1
) -> None:
self.optimizer = optimizer
self.param_group_field = param_group_field
self._initial_param_group_field = f"initial_{param_group_field}"
if last_epoch == -1:
for i, group in enumerate(self.optimizer.param_groups):
if param_group_field not in group:
raise KeyError(f"{param_group_field} missing from param_groups[{i}]")
group.setdefault(self._initial_param_group_field, group[param_group_field])
else:
for i, group in enumerate(self.optimizer.param_groups):
if self._initial_param_group_field not in group:
raise KeyError(
f"{self._initial_param_group_field} missing from param_groups[{i}]"
)
self.base_values = [
group[self._initial_param_group_field] for group in self.optimizer.param_groups
]
self.last_epoch = last_epoch
def state_dict(self) -> Dict[str, Any]:
"""
Returns the state of the scheduler as a `dict`.
"""
return {key: value for key, value in self.__dict__.items() if key != "optimizer"}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
"""
Load the schedulers state.
# Parameters
state_dict : `Dict[str, Any]`
Scheduler state. Should be an object returned from a call to `state_dict`.
"""
self.__dict__.update(state_dict)
def get_values(self):
raise NotImplementedError
def step(self, metric: float = None) -> None:
self.last_epoch += 1
self.metric = metric
for param_group, value in zip(self.optimizer.param_groups, self.get_values()):
param_group[self.param_group_field] = value
def step_batch(self, batch_num_total: int = None) -> None:
"""
By default, a scheduler is assumed to only update every epoch, not every batch.
So this does nothing unless it's overriden.
"""
return
| allennlp-master | allennlp/training/scheduler.py |
import datetime
import logging
import math
import os
import re
import time
import traceback
from contextlib import contextmanager
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Type, Union
from allennlp.common.util import int_to_device
import torch
import torch.distributed as dist
from torch.cuda import amp
import torch.optim.lr_scheduler
from torch.nn.parallel import DistributedDataParallel
from torch.nn.utils import clip_grad_norm_
from allennlp.common import Lazy, Registrable, Tqdm
from allennlp.common import util as common_util
from allennlp.common.checks import ConfigurationError, check_for_gpu
from allennlp.data import DataLoader
from allennlp.data.dataloader import TensorDict
from allennlp.models.model import Model
from allennlp.nn import util as nn_util
from allennlp.training import util as training_util
from allennlp.training.checkpointer import Checkpointer
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
from allennlp.training.metric_tracker import MetricTracker
from allennlp.training.momentum_schedulers import MomentumScheduler
from allennlp.training.moving_average import MovingAverage
from allennlp.training.optimizers import Optimizer
from allennlp.training.tensorboard_writer import TensorboardWriter
logger = logging.getLogger(__name__)
class Trainer(Registrable):
"""
The base class for an AllenNLP trainer. It can do pretty much
anything you want. Your subclass should implement `train`
and also probably `from_params`.
"""
default_implementation = "gradient_descent"
def __init__(
self,
serialization_dir: str = None,
cuda_device: Optional[Union[int, torch.device]] = None,
distributed: bool = False,
local_rank: int = 0,
world_size: int = 1,
) -> None:
if cuda_device is None:
from torch import cuda
if cuda.device_count() > 0:
cuda_device = 0
else:
cuda_device = -1
check_for_gpu(cuda_device)
self._serialization_dir = serialization_dir
if isinstance(cuda_device, list):
raise ConfigurationError(
"In allennlp 1.0, the Trainer can only be assigned a single `cuda_device`. "
"Instead, we use torch's DistributedDataParallel at the command level, meaning "
"our Trainer always uses a single GPU per process."
)
if distributed and world_size <= 1:
raise ConfigurationError(
"Distributed training can be performed only with more than 1 device. Check "
"`cuda_device` key in the experiment configuration."
)
self.cuda_device = int_to_device(cuda_device)
self._distributed = distributed
self._rank = local_rank
self._master = self._rank == 0
self._world_size = world_size
def train(self) -> Dict[str, Any]:
"""
Train a model and return the results.
"""
raise NotImplementedError
@contextmanager
def get_checkpoint_state(self) -> Iterator[Tuple[Dict[str, Any], Dict[str, Any]]]:
"""
Returns a tuple of (model state, training state), where training state could have several
internal components (e.g., for an, optimizer, learning rate scheduler, etc.).
This is a context manager, and should be called as `with trainer.get_checkpoint_state() as
state:`, so that the trainer has the opportunity to change and restore its internal state
for checkpointing. This is used, e.g., for moving averages of model weights.
"""
raise NotImplementedError
class BatchCallback(Registrable):
"""
An optional callback that you can pass to the `GradientDescentTrainer` that will be called at
the end of every batch, during both training and validation. The default implementation
does nothing. You can implement your own callback and do whatever you want, such as saving
predictions to disk or extra logging.
"""
def __call__(
self,
trainer: "GradientDescentTrainer",
batch_inputs: List[List[TensorDict]],
batch_outputs: List[Dict[str, Any]],
batch_metrics: Dict[str, Any],
epoch: int,
batch_number: int,
is_training: bool,
is_master: bool,
) -> None:
pass
@BatchCallback.register("tensorboard-memory-usage")
class TensoboardBatchMemoryUsage(BatchCallback):
"""
Logs the CPU and GPU memory usage to tensorboard on every batch.
This is mainly used for debugging as it can cause a significant slowdown in training.
"""
def __call__(
self,
trainer: "GradientDescentTrainer",
batch_inputs: List[List[TensorDict]],
batch_outputs: List[Dict[str, Any]],
batch_metrics: Dict[str, Any],
epoch: int,
batch_number: int,
is_training: bool,
is_master: bool,
) -> None:
# In the distributed case we need to call this from every worker, since every
# worker reports its own memory usage.
cpu_memory_usage = common_util.peak_cpu_memory()
gpu_memory_usage = common_util.peak_gpu_memory()
# But we only want to log from the master process.
if is_master:
trainer._tensorboard.log_memory_usage(cpu_memory_usage, gpu_memory_usage)
BatchCallback.register("null")(BatchCallback)
class EpochCallback(Registrable):
"""
An optional callback that you can pass to the `GradientDescentTrainer` that will be called at
the end of every epoch (and before the start of training, with `epoch=-1`). The default
implementation does nothing. You can implement your own callback and do whatever you want, such
as additional modifications of the trainer's state in between epochs.
"""
def __call__(
self,
trainer: "GradientDescentTrainer",
metrics: Dict[str, Any],
epoch: int,
is_master: bool,
) -> None:
pass
EpochCallback.register("null")(EpochCallback)
@EpochCallback.register("track_epoch_callback")
class TrackEpochCallback:
"""
A callback that you can pass to the `GradientDescentTrainer` to access the current epoch number
in your model during training. This callback sets `model.epoch`, which can be read inside of
`model.forward()`. Since the EpochCallback passes `epoch=-1`
at the start of the training, we set `model.epoch = epoch + 1` which now denotes the number of
completed epochs at a given training state.
"""
def __init__(self):
super().__init__()
def __call__(
self,
trainer: "GradientDescentTrainer",
metrics: Dict[str, Any],
epoch: int,
is_master: bool,
) -> None:
trainer.model.epoch = epoch + 1
_BasicCallback = Union[BatchCallback, EpochCallback]
class _TrainerCallbackMeta(type):
def __new__(cls, name, bases, dct):
"""
Add subclasses that wrap the `TrainerCallback` into other interfaces.
"""
subtype = super().__new__(cls, name, bases, dct)
# These subtypes wrap the `TrainerCallback` into the `_BasicCallback` interfaces.
subtype.Batch = cls._make_callback_type(BatchCallback, subtype.on_batch)
subtype.Epoch = cls._make_callback_type(EpochCallback, subtype.on_epoch)
subtype.End = cls._make_callback_type(EpochCallback, subtype.on_end)
return subtype
@classmethod
def _make_callback_type(
cls,
call_type: Type[_BasicCallback],
call: Callable[[], None],
) -> Type[_BasicCallback]: # type: ignore
class _Wrapper(call_type): # type: ignore
def __init__(self, trainer_callback: "TrainerCallback"):
self.trainer_callback = trainer_callback
def __call__(self, trainer: "GradientDescentTrainer", *args, **kwargs):
call(self.trainer_callback, trainer, *args, **kwargs) # type: ignore
return _Wrapper
class TrainerCallback(Registrable, metaclass=_TrainerCallbackMeta):
"""
A general callback object that wraps all three types of callbacks into one.
Rather than a `__call__` method, this class has `on_batch`, `on_epoch`, and `on_end` methods, corresponding to
each callback type. Each one receives the state of the wrapper object as `self`. This enables easier state
sharing between related callbacks.
Under the hood, this is a metaclass that creates wrapping subclasses each time a subclass is created.
"""
def on_batch(
self,
trainer: "GradientDescentTrainer",
batch_inputs: List[List[TensorDict]],
batch_outputs: List[Dict[str, Any]],
batch_metrics: Dict[str, Any],
epoch: int,
batch_number: int,
is_training: bool,
is_master: bool,
) -> None:
"""
This callback hook is called after the end of each batch. This is equivalent to `BatchCallback`.
"""
pass
def on_epoch(
self,
trainer: "GradientDescentTrainer",
metrics: Dict[str, Any],
epoch: int,
is_master: bool,
) -> None:
"""
This callback hook is called after the end of each epoch. This is equivalent to `EpochCallback`.
"""
pass
def on_end(
self,
trainer: "GradientDescentTrainer",
metrics: Dict[str, Any],
epoch: int,
is_master: bool,
) -> None:
"""
This callback hook is called after the final training epoch. The `epoch` is passed as an argument.
"""
pass
def batch(self):
"""
Construct a `BatchCallback` wrapper for this `TrainCallback`.
The `cls.Batch` type is created by the metaclass.
"""
return self.Batch(self)
def epoch(self):
"""
Construct an `EpochCallback` wrapper for this instance.
The `cls.Epoch` type is created by the metaclass.
"""
return self.Epoch(self)
def end(self):
"""
Construct an `EpochCallback` wrapping the `on_end` end-of-training hook.
The `cls.End` type is created by the metaclass.
"""
return self.End(self)
TrainerCallback.register("null")(TrainerCallback)
@Trainer.register("gradient_descent", constructor="from_partial_objects")
class GradientDescentTrainer(Trainer):
"""
A trainer for doing supervised learning with gradient descent. It just takes a labeled dataset
and a `DataLoader`, and uses the supplied `Optimizer` to learn the weights for your model over
some fixed number of epochs. You can also pass in a validation dataloader and enable early
stopping. There are many other bells and whistles as well.
Registered as a `Trainer` with the name "gradient_descent" (and is also the default `Trainer`).
The constructor that is registered is `from_partial_objects` - see the arguments to that
function for the exact keys that should be used, if you are using a configuration file. They
largely match the arguments to `__init__`, and we don't repeat their docstrings in
`from_partial_objects`.
[0]: https://tinyurl.com/y5mv44fw
# Parameters
model : `Model`, required.
An AllenNLP model to be optimized. Pytorch Modules can also be optimized if
their `forward` method returns a dictionary with a "loss" key, containing a
scalar tensor representing the loss function to be optimized.
If you are training your model using GPUs, your model should already be
on the correct device. (If you are using our `train` command this will be
handled for you.)
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"trainer", it gets constructed separately.
optimizer : `torch.nn.Optimizer`, required.
An instance of a Pytorch Optimizer, instantiated with the parameters of the
model to be optimized.
data_loader : `DataLoader`, required.
A `DataLoader` containing your `Dataset`, yielding padded indexed batches.
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"trainer", it gets constructed separately.
patience : `Optional[int] > 0`, optional (default=`None`)
Number of epochs to be patient before early stopping: the training is stopped
after `patience` epochs with no improvement. If given, it must be `> 0`.
If None, early stopping is disabled.
validation_metric : `str`, optional (default=`"-loss"`)
Validation metric to measure for whether to stop training using patience
and whether to serialize an `is_best` model each epoch. The metric name
must be prepended with either "+" or "-", which specifies whether the metric
is an increasing or decreasing function.
validation_data_loader : `DataLoader`, optional (default=`None`)
A `DataLoader` to use for the validation set. If `None`, then
use the training `DataLoader` with the validation data.
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"trainer", it gets constructed separately.
num_epochs : `int`, optional (default = `20`)
Number of training epochs.
serialization_dir : `str`, optional (default=`None`)
Path to directory for saving and loading model files. Models will not be saved if
this parameter is not passed.
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"trainer", it gets constructed separately.
checkpointer : `Checkpointer`, optional (default=`None`)
A `Checkpointer` is responsible for periodically saving model weights. If none is given
here, we will construct one with default parameters.
cuda_device : `int`, optional (default = `-1`)
An integer specifying the CUDA device(s) to use for this process. If -1, the CPU is used.
Data parallelism is controlled at the allennlp train level, so each trainer will have a single
GPU.
grad_norm : `float`, optional, (default = `None`).
If provided, gradient norms will be rescaled to have a maximum of this value.
grad_clipping : `float`, optional (default = `None`).
If provided, gradients will be clipped `during the backward pass` to have an (absolute)
maximum of this value. If you are getting `NaNs` in your gradients during training
that are not solved by using `grad_norm`, you may need this.
learning_rate_scheduler : `LearningRateScheduler`, optional (default = `None`)
If specified, the learning rate will be decayed with respect to
this schedule at the end of each epoch (or batch, if the scheduler implements
the `step_batch` method). If you use `torch.optim.lr_scheduler.ReduceLROnPlateau`,
this will use the `validation_metric` provided to determine if learning has plateaued.
To support updating the learning rate on every batch, this can optionally implement
`step_batch(batch_num_total)` which updates the learning rate given the batch number.
momentum_scheduler : `MomentumScheduler`, optional (default = `None`)
If specified, the momentum will be updated at the end of each batch or epoch
according to the schedule.
tensorboard_writer : `TensorboardWriter`, optional
If this is not provided, we will construct a `TensorboardWriter` with default
parameters and use that.
moving_average : `MovingAverage`, optional, (default = `None`)
If provided, we will maintain moving averages for all parameters. During training, we
employ a shadow variable for each parameter, which maintains the moving average. During
evaluation, we backup the original parameters and assign the moving averages to corresponding
parameters. Be careful that when saving the checkpoint, we will save the moving averages of
parameters. This is necessary because we want the saved model to perform as well as the validated
model if we load it later. But this may cause problems if you restart the training from checkpoint.
batch_callbacks : `List[BatchCallback]`, optional (default = `None`)
A list of callbacks that will be called at the end of every batch, during both train and
validation.
epoch_callbacks : `List[EpochCallback]`, optional (default = `None`)
A list of callbacks that will be called at the end of every epoch, and at the start of
training (with epoch = -1).
end_callbacks : `List[EpochCallback]`, optional (default = `None`)
A list of callbacks that will be called after the final epoch at the end of training. The type of the
callbacks is the same as `epoch_callbacks`.
trainer_callbacks : `List[TrainerCallback]`, optional (default = `None`)
A list of callbacks that will be called at each batch, epoch, and at the start and end of training.
distributed : `bool`, optional, (default = `False`)
If set, PyTorch's `DistributedDataParallel` is used to train the model in multiple GPUs. This also
requires `world_size` to be greater than 1.
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"trainer", it gets constructed separately (you need a top-level "distributed" key, next to
the "trainer" entry, that specifies a list of "cuda_devices").
local_rank : `int`, optional, (default = `0`)
This is the unique identifier of the `Trainer` in a distributed process group. The GPU device id is
used as the rank.
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"trainer", it gets constructed separately.
world_size : `int`, (default = `1`)
The number of `Trainer` workers participating in the distributed training.
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"trainer", it gets constructed separately.
num_gradient_accumulation_steps : `int`, optional, (default = `1`)
Gradients are accumulated for the given number of steps before doing an optimizer step. This can
be useful to accommodate batches that are larger than the RAM size. Refer [Thomas Wolf's
post][0] for details on Gradient Accumulation.
use_amp : `bool`, optional, (default = `False`)
If `True`, we'll train using [Automatic Mixed Precision](https://pytorch.org/docs/stable/amp.html).
"""
def __init__(
self,
model: Model,
optimizer: torch.optim.Optimizer,
data_loader: DataLoader,
patience: Optional[int] = None,
validation_metric: str = "-loss",
validation_data_loader: DataLoader = None,
num_epochs: int = 20,
serialization_dir: Optional[str] = None,
checkpointer: Checkpointer = None,
cuda_device: Optional[Union[int, torch.device]] = None,
grad_norm: Optional[float] = None,
grad_clipping: Optional[float] = None,
learning_rate_scheduler: Optional[LearningRateScheduler] = None,
momentum_scheduler: Optional[MomentumScheduler] = None,
tensorboard_writer: TensorboardWriter = None,
moving_average: Optional[MovingAverage] = None,
batch_callbacks: List[BatchCallback] = None,
epoch_callbacks: List[EpochCallback] = None,
end_callbacks: List[EpochCallback] = None,
trainer_callbacks: List[TrainerCallback] = None,
distributed: bool = False,
local_rank: int = 0,
world_size: int = 1,
num_gradient_accumulation_steps: int = 1,
use_amp: bool = False,
) -> None:
super().__init__(serialization_dir, cuda_device, distributed, local_rank, world_size)
# I am not calling move_to_gpu here, because if the model is
# not already on the GPU then the optimizer is going to be wrong.
self.model = model
self.data_loader = data_loader
self._validation_data_loader = validation_data_loader
self.optimizer = optimizer
if patience is None: # no early stopping
if validation_data_loader is not None:
logger.warning(
"You provided a validation dataset but patience was set to None, "
"meaning that early stopping is disabled"
)
elif (not isinstance(patience, int)) or patience <= 0:
raise ConfigurationError(
'{} is an invalid value for "patience": it must be a positive integer '
"or None (if you want to disable early stopping)".format(patience)
)
# For tracking is_best_so_far and should_stop_early
self._metric_tracker = MetricTracker(patience, validation_metric)
# Get rid of + or -
self._validation_metric = validation_metric[1:]
self._num_epochs = num_epochs
self._checkpointer: Optional[Checkpointer] = checkpointer
if checkpointer is None and serialization_dir is not None:
self._checkpointer = Checkpointer(serialization_dir)
self._grad_norm = grad_norm
self._grad_clipping = grad_clipping
self._learning_rate_scheduler = learning_rate_scheduler
self._momentum_scheduler = momentum_scheduler
self._moving_average = moving_average
self._batch_callbacks = batch_callbacks or []
self._epoch_callbacks = epoch_callbacks or []
self._end_callbacks = end_callbacks or []
for callback in trainer_callbacks or []:
self._batch_callbacks.append(callback.batch())
self._epoch_callbacks.append(callback.epoch())
self._end_callbacks.append(callback.end())
# We keep the total batch number as an instance variable because it
# is used inside a closure for the hook which logs activations in
# `_enable_activation_logging`.
self._batch_num_total = 0
self._tensorboard = tensorboard_writer or TensorboardWriter(serialization_dir)
self._tensorboard.get_batch_num_total = lambda: self._batch_num_total
self._tensorboard.enable_activation_logging(self.model)
self._last_log = 0.0 # time of last logging
self._num_gradient_accumulation_steps = num_gradient_accumulation_steps
# Enable automatic mixed precision training.
self._scaler: Optional[amp.GradScaler] = None
self._use_amp = use_amp
if self._use_amp:
if self.cuda_device == torch.device("cpu"):
raise ValueError("Using AMP requires a cuda device")
self._scaler = amp.GradScaler()
# Using `DistributedDataParallel`(ddp) brings in a quirk wrt AllenNLP's `Model` interface and its
# usage. A `Model` object is wrapped by `ddp`, but assigning the wrapped model to `self.model`
# will break the usages such as `Model.get_regularization_penalty`, `Model.get_metrics`, etc.
#
# Hence a reference to Pytorch's object is maintained in the case of distributed training and in the
# normal case, reference to `Model` is retained. This reference is only used in
# these places: `model.__call__`, `model.train` and `model.eval`.
if self._distributed:
self._pytorch_model = DistributedDataParallel(
self.model,
device_ids=None if self.cuda_device == torch.device("cpu") else [self.cuda_device],
find_unused_parameters=True,
)
else:
self._pytorch_model = self.model
def rescale_gradients(self) -> float:
"""
Performs gradient rescaling. Is a no-op if gradient rescaling is not enabled.
Returns the norm of the gradients.
"""
parameters_to_clip = [p for p in self.model.parameters() if p.grad is not None]
if self._grad_norm:
if self._scaler is not None:
# Need to first unscale gradients in order to clip as usual.
self._scaler.unscale_(self.optimizer)
return clip_grad_norm_(parameters_to_clip, self._grad_norm)
else:
return torch.norm(
torch.stack([torch.norm(p.grad.detach()) for p in parameters_to_clip])
)
def batch_outputs(self, batch: TensorDict, for_training: bool) -> Dict[str, torch.Tensor]:
"""
Does a forward pass on the given batch and returns the output dictionary that the model
returns, after adding any specified regularization penalty to the loss (if training).
"""
batch = nn_util.move_to_device(batch, self.cuda_device)
output_dict = self._pytorch_model(**batch)
if for_training:
try:
assert "loss" in output_dict
regularization_penalty = self.model.get_regularization_penalty()
if regularization_penalty is not None:
output_dict["reg_loss"] = regularization_penalty
output_dict["loss"] += regularization_penalty
except AssertionError:
if for_training:
raise RuntimeError(
"The model you are trying to optimize does not contain a"
" 'loss' key in the output of model.forward(inputs)."
)
return output_dict
def _train_epoch(self, epoch: int) -> Dict[str, float]:
"""
Trains one epoch and returns metrics.
"""
logger.info("Epoch %d/%d", epoch, self._num_epochs - 1)
cpu_memory_usage = []
for worker, memory in common_util.peak_cpu_memory().items():
cpu_memory_usage.append((worker, memory))
logger.info(f"Worker {worker} memory usage: {common_util.format_size(memory)}")
gpu_memory_usage = []
for gpu, memory in common_util.peak_gpu_memory().items():
gpu_memory_usage.append((gpu, memory))
logger.info(f"GPU {gpu} memory usage: {common_util.format_size(memory)}")
regularization_penalty = self.model.get_regularization_penalty()
train_loss = 0.0
batch_loss = 0.0
train_reg_loss = None if regularization_penalty is None else 0.0
batch_reg_loss = None if regularization_penalty is None else 0.0
# Set the model to "train" mode.
self._pytorch_model.train()
# Get tqdm for the training batches
batch_generator = iter(self.data_loader)
batch_group_generator = common_util.lazy_groups_of(
batch_generator, self._num_gradient_accumulation_steps
)
logger.info("Training")
num_training_batches: Union[int, float]
try:
len_data_loader = len(self.data_loader)
num_training_batches = math.ceil(
len_data_loader / self._num_gradient_accumulation_steps
)
except TypeError:
num_training_batches = float("inf")
# Having multiple tqdm bars in case of distributed training will be a mess. Hence only the master's
# progress is shown
if self._master:
batch_group_generator_tqdm = Tqdm.tqdm(
batch_group_generator, total=num_training_batches
)
else:
batch_group_generator_tqdm = batch_group_generator
self._last_log = time.time()
batches_this_epoch = 0
if self._batch_num_total is None:
self._batch_num_total = 0
done_early = False
for batch_group in batch_group_generator_tqdm:
if self._distributed:
# Check whether the other workers have stopped already (due to differing amounts of
# data in each). If so, we can't proceed because we would hang when we hit the
# barrier implicit in Model.forward. We use a IntTensor instead a BoolTensor
# here because NCCL process groups apparently don't support BoolTensor.
done = torch.tensor(0, device=self.cuda_device)
torch.distributed.all_reduce(done, torch.distributed.ReduceOp.SUM)
if done.item() > 0:
done_early = True
logger.warning(
f"Worker {torch.distributed.get_rank()} finishing training early! "
"This implies that there is an imbalance in your training "
"data across the workers and that some amount of it will be "
"ignored. A small amount of this is fine, but a major imbalance "
"should be avoided. Note: This warning will appear unless your "
"data is perfectly balanced."
)
break
batches_this_epoch += 1
self._batch_num_total += 1
batch_num_total = self._batch_num_total
# Zero gradients.
# NOTE: this is actually more efficient than calling `self.optimizer.zero_grad()`
# because it avoids a read op when the gradients are first updated below.
for param_group in self.optimizer.param_groups:
for p in param_group["params"]:
p.grad = None
batch_loss = 0.0
batch_group_outputs = []
for batch in batch_group:
with amp.autocast(self._use_amp):
batch_outputs = self.batch_outputs(batch, for_training=True)
batch_group_outputs.append(batch_outputs)
loss = batch_outputs["loss"]
reg_loss = batch_outputs.get("reg_loss")
if torch.isnan(loss):
raise ValueError("nan loss encountered")
loss = loss / len(batch_group)
batch_loss += loss.item()
if reg_loss is not None:
reg_loss = reg_loss / len(batch_group)
batch_reg_loss = reg_loss.item()
train_reg_loss += batch_reg_loss # type: ignore
if self._scaler is not None:
self._scaler.scale(loss).backward()
else:
loss.backward()
train_loss += batch_loss
batch_grad_norm = self.rescale_gradients()
# This does nothing if batch_num_total is None or you are using a
# scheduler which doesn't update per batch.
if self._learning_rate_scheduler:
self._learning_rate_scheduler.step_batch(batch_num_total)
if self._momentum_scheduler:
self._momentum_scheduler.step_batch(batch_num_total)
param_updates = None
if self._tensorboard.should_log_histograms_this_batch() and self._master:
# Get the magnitude of parameter updates for logging. We need to do some
# computation before and after the optimizer step, and it's expensive because of
# GPU/CPU copies (necessary for large models, and for shipping to tensorboard), so
# we don't do this every batch, only when it's requested.
param_updates = {
name: param.detach().cpu().clone()
for name, param in self.model.named_parameters()
}
if self._scaler is not None:
self._scaler.step(self.optimizer)
self._scaler.update()
else:
self.optimizer.step()
for name, param in self.model.named_parameters():
param_updates[name].sub_(param.detach().cpu())
else:
if self._scaler is not None:
self._scaler.step(self.optimizer)
self._scaler.update()
else:
self.optimizer.step()
# Update moving averages
if self._moving_average is not None:
self._moving_average.apply(batch_num_total)
# Update the description with the latest metrics
metrics = training_util.get_metrics(
self.model,
train_loss,
train_reg_loss,
batch_loss,
batch_reg_loss,
batches_this_epoch,
world_size=self._world_size,
cuda_device=self.cuda_device,
)
if self._master:
# Updating tqdm only for the master as the trainers wouldn't have one
description = training_util.description_from_metrics(metrics)
batch_group_generator_tqdm.set_description(description, refresh=False)
self._tensorboard.log_batch(
self.model,
self.optimizer,
batch_grad_norm,
metrics,
batch_group,
param_updates,
)
if self._checkpointer is not None:
self._checkpointer.maybe_save_checkpoint(self, epoch, batches_this_epoch)
for callback in self._batch_callbacks:
callback(
self,
batch_group,
batch_group_outputs,
metrics,
epoch,
batches_this_epoch,
is_training=True,
is_master=self._master,
)
if self._distributed and not done_early:
logger.warning(
f"Worker {torch.distributed.get_rank()} completed its entire epoch (training)."
)
# Indicate that we're done so that any workers that have remaining data stop the epoch early.
done = torch.tensor(1, device=self.cuda_device)
torch.distributed.all_reduce(done, torch.distributed.ReduceOp.SUM)
assert done.item()
# Let all workers finish their epoch before computing
# the final statistics for the epoch.
if self._distributed:
dist.barrier()
metrics = training_util.get_metrics(
self.model,
train_loss,
train_reg_loss,
batch_loss=None,
batch_reg_loss=None,
num_batches=batches_this_epoch,
reset=True,
world_size=self._world_size,
cuda_device=self.cuda_device,
)
for (worker, memory) in cpu_memory_usage:
metrics["worker_" + str(worker) + "_memory_MB"] = memory / (1024 * 1024)
for (gpu_num, memory) in gpu_memory_usage:
metrics["gpu_" + str(gpu_num) + "_memory_MB"] = memory / (1024 * 1024)
return metrics
def _validation_loss(self, epoch: int) -> Tuple[float, Optional[float], int]:
"""
Computes the validation loss. Returns it and the number of batches.
"""
logger.info("Validating")
self._pytorch_model.eval()
# Replace parameter values with the shadow values from the moving averages.
if self._moving_average is not None:
self._moving_average.assign_average_value()
if self._validation_data_loader is not None:
validation_data_loader = self._validation_data_loader
else:
raise ConfigurationError(
"Validation results cannot be calculated without a validation_data_loader"
)
regularization_penalty = self.model.get_regularization_penalty()
# Having multiple tqdm bars in case of distributed training will be a mess. Hence only the master's
# progress is shown
if self._master:
val_generator_tqdm = Tqdm.tqdm(validation_data_loader)
else:
val_generator_tqdm = validation_data_loader
batches_this_epoch = 0
val_loss = 0.0
val_batch_loss = 0.0
val_reg_loss = None if regularization_penalty is None else 0.0
val_batch_reg_loss = None if regularization_penalty is None else 0.0
done_early = False
for batch in val_generator_tqdm:
if self._distributed:
# Check whether the other workers have stopped already (due to differing amounts of
# data in each). If so, we can't proceed because we would hang when we hit the
# barrier implicit in Model.forward. We use a IntTensor instead a BoolTensor
# here because NCCL process groups apparently don't support BoolTensor.
done = torch.tensor(0, device=self.cuda_device)
torch.distributed.all_reduce(done, torch.distributed.ReduceOp.SUM)
if done.item() > 0:
done_early = True
logger.warning(
f"Worker {torch.distributed.get_rank()} finishing validation early! "
"This implies that there is an imbalance in your validation "
"data across the workers and that some amount of it will be "
"ignored. A small amount of this is fine, but a major imbalance "
"should be avoided. Note: This warning will appear unless your "
"data is perfectly balanced."
)
break
with amp.autocast(self._use_amp):
batch_outputs = self.batch_outputs(batch, for_training=False)
loss = batch_outputs.get("loss")
reg_loss = batch_outputs.get("reg_loss")
if loss is not None:
# You shouldn't necessarily have to compute a loss for validation, so we allow for
# `loss` to be None. We need to be careful, though - `batches_this_epoch` is
# currently only used as the divisor for the loss function, so we can safely only
# count those batches for which we actually have a loss. If this variable ever
# gets used for something else, we might need to change things around a bit.
batches_this_epoch += 1
val_batch_loss = loss.item()
val_loss += val_batch_loss
if reg_loss is not None:
val_batch_reg_loss = reg_loss.item()
val_reg_loss += val_batch_reg_loss # type: ignore
# Update the description with the latest metrics
val_metrics = training_util.get_metrics(
self.model,
val_loss,
val_reg_loss,
val_batch_loss,
val_batch_reg_loss,
batches_this_epoch,
world_size=self._world_size,
cuda_device=self.cuda_device,
)
description = training_util.description_from_metrics(val_metrics)
if self._master:
val_generator_tqdm.set_description(description, refresh=False)
for callback in self._batch_callbacks:
callback(
self,
[batch],
[batch_outputs],
val_metrics,
epoch,
batches_this_epoch,
is_training=False,
is_master=self._master,
)
if self._distributed and not done_early:
logger.warning(
f"Worker {torch.distributed.get_rank()} completed its entire epoch (validation)."
)
# Indicate that we're done so that any workers that have remaining data stop validation early.
done = torch.tensor(1, device=self.cuda_device)
torch.distributed.all_reduce(done, torch.distributed.ReduceOp.SUM)
assert done.item()
# Now restore the original parameter values.
if self._moving_average is not None:
self._moving_average.restore()
return val_loss, val_reg_loss, batches_this_epoch
def train(self) -> Dict[str, Any]:
"""
Trains the supplied model with the supplied parameters.
"""
try:
return self._try_train()
finally:
# make sure pending events are flushed to disk and files are closed properly
self._tensorboard.close()
def _try_train(self) -> Dict[str, Any]:
try:
epoch_counter = self._restore_checkpoint()
except RuntimeError:
traceback.print_exc()
raise ConfigurationError(
"Could not recover training from the checkpoint. Did you mean to output to "
"a different serialization directory or delete the existing serialization "
"directory?"
)
training_util.enable_gradient_clipping(self.model, self._grad_clipping)
logger.info("Beginning training.")
val_metrics: Dict[str, float] = {}
this_epoch_val_metric: float = 0.0
metrics: Dict[str, Any] = {}
epochs_trained = 0
training_start_time = time.time()
metrics["best_epoch"] = self._metric_tracker.best_epoch
for key, value in self._metric_tracker.best_epoch_metrics.items():
metrics["best_validation_" + key] = value
for callback in self._epoch_callbacks:
callback(self, metrics={}, epoch=-1, is_master=self._master)
for epoch in range(epoch_counter, self._num_epochs):
epoch_start_time = time.time()
train_metrics = self._train_epoch(epoch)
if self._master and self._checkpointer is not None:
self._checkpointer.save_checkpoint(epoch, self, save_model_only=True)
# Wait for the master to finish saving the model checkpoint
if self._distributed:
dist.barrier()
# get peak of memory usage
for key, value in train_metrics.items():
if key.startswith("gpu_") and key.endswith("_memory_MB"):
metrics["peak_" + key] = max(metrics.get("peak_" + key, 0), value)
elif key.startswith("worker_") and key.endswith("_memory_MB"):
metrics["peak_" + key] = max(metrics.get("peak_" + key, 0), value)
if self._validation_data_loader is not None:
with torch.no_grad():
# We have a validation set, so compute all the metrics on it.
val_loss, val_reg_loss, num_batches = self._validation_loss(epoch)
# It is safe again to wait till the validation is done. This is
# important to get the metrics right.
if self._distributed:
dist.barrier()
val_metrics = training_util.get_metrics(
self.model,
val_loss,
val_reg_loss,
batch_loss=None,
batch_reg_loss=None,
num_batches=num_batches,
reset=True,
world_size=self._world_size,
cuda_device=self.cuda_device,
)
# Check validation metric for early stopping
this_epoch_val_metric = val_metrics[self._validation_metric]
self._metric_tracker.add_metric(this_epoch_val_metric)
if self._metric_tracker.should_stop_early():
logger.info("Ran out of patience. Stopping training.")
break
if self._master:
self._tensorboard.log_metrics(
train_metrics, val_metrics=val_metrics, log_to_console=True, epoch=epoch + 1
) # +1 because tensorboard doesn't like 0
# Create overall metrics dict
training_elapsed_time = time.time() - training_start_time
metrics["training_duration"] = str(datetime.timedelta(seconds=training_elapsed_time))
metrics["training_start_epoch"] = epoch_counter
metrics["training_epochs"] = epochs_trained
metrics["epoch"] = epoch
for key, value in train_metrics.items():
metrics["training_" + key] = value
for key, value in val_metrics.items():
metrics["validation_" + key] = value
if self._metric_tracker.is_best_so_far():
# Update all the best_ metrics.
# (Otherwise they just stay the same as they were.)
metrics["best_epoch"] = epoch
for key, value in val_metrics.items():
metrics["best_validation_" + key] = value
self._metric_tracker.best_epoch_metrics = val_metrics
if self._serialization_dir and self._master:
common_util.dump_metrics(
os.path.join(self._serialization_dir, f"metrics_epoch_{epoch}.json"),
metrics,
)
# The Scheduler API is agnostic to whether your schedule requires a validation metric -
# if it doesn't, the validation metric passed here is ignored.
if self._learning_rate_scheduler:
self._learning_rate_scheduler.step(this_epoch_val_metric)
if self._momentum_scheduler:
self._momentum_scheduler.step(this_epoch_val_metric)
if self._master and self._checkpointer is not None:
self._checkpointer.save_checkpoint(
epoch, self, is_best_so_far=self._metric_tracker.is_best_so_far()
)
# Wait for the master to finish saving the checkpoint
if self._distributed:
dist.barrier()
for callback in self._epoch_callbacks:
callback(self, metrics=metrics, epoch=epoch, is_master=self._master)
epoch_elapsed_time = time.time() - epoch_start_time
logger.info("Epoch duration: %s", datetime.timedelta(seconds=epoch_elapsed_time))
if epoch < self._num_epochs - 1:
training_elapsed_time = time.time() - training_start_time
estimated_time_remaining = training_elapsed_time * (
(self._num_epochs - epoch_counter) / float(epoch - epoch_counter + 1) - 1
)
formatted_time = str(datetime.timedelta(seconds=int(estimated_time_remaining)))
logger.info("Estimated training time remaining: %s", formatted_time)
epochs_trained += 1
for callback in self._end_callbacks:
callback(self, metrics=metrics, epoch=epoch, is_master=self._master)
# Load the best model state before returning
best_model_state = (
None if self._checkpointer is None else self._checkpointer.best_model_state()
)
if best_model_state:
self.model.load_state_dict(best_model_state)
return metrics
@contextmanager
def get_checkpoint_state(self) -> Iterator[Tuple[Dict[str, Any], Dict[str, Any]]]:
if self._moving_average is not None:
# Assigning average value to model parameters. The checkpointer will call
# `restore_state_after_checkpointing` when it is done to put this back to what it was.
self._moving_average.assign_average_value()
model_state = self.model.state_dict()
# These are the training states we need to persist.
training_states = {
"metric_tracker": self._metric_tracker.state_dict(),
"optimizer": self.optimizer.state_dict(),
"batch_num_total": self._batch_num_total,
}
# If we have a learning rate or momentum scheduler, we should persist them too.
if self._learning_rate_scheduler is not None:
training_states["learning_rate_scheduler"] = self._learning_rate_scheduler.state_dict()
if self._momentum_scheduler is not None:
training_states["momentum_scheduler"] = self._momentum_scheduler.state_dict()
try:
yield model_state, training_states
finally:
if self._moving_average is not None:
self._moving_average.restore()
def _restore_checkpoint(self) -> int:
"""
Restores the model and training state from the last saved checkpoint.
This includes an epoch count and optimizer state, which is serialized separately
from model parameters. This function should only be used to continue training -
if you wish to load a model for inference/load parts of a model into a new
computation graph, you should use the native Pytorch functions:
` model.load_state_dict(torch.load("/path/to/model/weights.th"))`
If `self._serialization_dir` does not exist or does not contain any checkpointed weights,
this function will do nothing and return 0.
# Returns
epoch: `int`
The epoch at which to resume training, which should be one after the epoch
in the saved training state.
"""
if self._checkpointer is None:
return 0
model_state, training_state = self._checkpointer.restore_checkpoint()
if not training_state:
# No checkpoint to restore, start at 0
return 0
self.model.load_state_dict(model_state)
self.optimizer.load_state_dict(training_state["optimizer"])
if (
self._learning_rate_scheduler is not None
and "learning_rate_scheduler" in training_state
):
self._learning_rate_scheduler.load_state_dict(training_state["learning_rate_scheduler"])
if self._momentum_scheduler is not None and "momentum_scheduler" in training_state:
self._momentum_scheduler.load_state_dict(training_state["momentum_scheduler"])
training_util.move_optimizer_to_cuda(self.optimizer)
# Currently the `training_state` contains a serialized `MetricTracker`.
if "metric_tracker" in training_state:
self._metric_tracker.load_state_dict(training_state["metric_tracker"])
# It used to be the case that we tracked `val_metric_per_epoch`.
elif "val_metric_per_epoch" in training_state:
self._metric_tracker.clear()
self._metric_tracker.add_metrics(training_state["val_metric_per_epoch"])
# And before that we didn't track anything.
else:
self._metric_tracker.clear()
if isinstance(training_state["epoch"], int):
epoch_to_return = training_state["epoch"] + 1
else:
epoch_to_return = int(training_state["epoch"].split(".")[0]) + 1
# For older checkpoints with batch_num_total missing, default to old behavior where
# it is unchanged.
batch_num_total = training_state.get("batch_num_total")
if batch_num_total is not None:
self._batch_num_total = batch_num_total
return epoch_to_return
@classmethod
def from_partial_objects(
cls,
model: Model,
serialization_dir: str,
data_loader: DataLoader,
validation_data_loader: DataLoader = None,
local_rank: int = 0,
patience: int = None,
validation_metric: str = "-loss",
num_epochs: int = 20,
cuda_device: Optional[Union[int, torch.device]] = None,
grad_norm: float = None,
grad_clipping: float = None,
distributed: bool = False,
world_size: int = 1,
num_gradient_accumulation_steps: int = 1,
use_amp: bool = False,
no_grad: List[str] = None,
optimizer: Lazy[Optimizer] = Lazy(Optimizer.default),
learning_rate_scheduler: Lazy[LearningRateScheduler] = None,
momentum_scheduler: Lazy[MomentumScheduler] = None,
tensorboard_writer: Lazy[TensorboardWriter] = Lazy(TensorboardWriter),
moving_average: Lazy[MovingAverage] = None,
checkpointer: Lazy[Checkpointer] = Lazy(Checkpointer),
batch_callbacks: List[BatchCallback] = None,
epoch_callbacks: List[EpochCallback] = None,
end_callbacks: List[EpochCallback] = None,
trainer_callbacks: List[TrainerCallback] = None,
) -> "Trainer":
"""
This method exists so that we can have a documented method to construct this class using
`FromParams`. If you are not using `FromParams` or config files, you can safely ignore this
method.
The reason we can't just use `__init__` with `FromParams` here is because there are
sequential dependencies to this class's arguments. Anything that has a `Lazy[]` type
annotation needs something from one of the non-`Lazy` arguments. The `Optimizer` needs to
have the parameters from the `Model` before it's constructed, and the `Schedulers` need to
have the `Optimizer`. Because of this, the typical way we construct things `FromParams`
doesn't work, so we use `Lazy` to allow for constructing the objects sequentially.
If you're not using `FromParams`, you can just construct these arguments in the right order
yourself in your code and call the constructor directly.
"""
if cuda_device is None:
from torch import cuda
if cuda.device_count() > 0:
cuda_device = 0
else:
cuda_device = -1
check_for_gpu(cuda_device)
if cuda_device >= 0:
# Moving model to GPU here so that the optimizer state gets constructed on
# the right device.
model = model.cuda(cuda_device)
if no_grad:
for name, parameter in model.named_parameters():
if any(re.search(regex, name) for regex in no_grad):
parameter.requires_grad_(False)
parameters = [[n, p] for n, p in model.named_parameters() if p.requires_grad]
optimizer_ = optimizer.construct(model_parameters=parameters)
common_util.log_frozen_and_tunable_parameter_names(model)
batches_per_epoch: Optional[int]
try:
batches_per_epoch = len(data_loader)
batches_per_epoch = math.ceil(batches_per_epoch / num_gradient_accumulation_steps)
except TypeError:
batches_per_epoch = None
moving_average_ = (
None if moving_average is None else moving_average.construct(parameters=parameters)
)
learning_rate_scheduler_ = (
None
if learning_rate_scheduler is None
else learning_rate_scheduler.construct(
optimizer=optimizer_, num_epochs=num_epochs, num_steps_per_epoch=batches_per_epoch
)
)
momentum_scheduler_ = (
None
if momentum_scheduler is None
else momentum_scheduler.construct(optimizer=optimizer_)
)
checkpointer_ = checkpointer.construct(serialization_dir=serialization_dir)
tensorboard_writer_ = tensorboard_writer.construct(serialization_dir=serialization_dir)
return cls(
model,
optimizer_,
data_loader,
patience=patience,
validation_metric=validation_metric,
validation_data_loader=validation_data_loader,
num_epochs=num_epochs,
serialization_dir=serialization_dir,
cuda_device=cuda_device,
grad_norm=grad_norm,
grad_clipping=grad_clipping,
learning_rate_scheduler=learning_rate_scheduler_,
momentum_scheduler=momentum_scheduler_,
tensorboard_writer=tensorboard_writer_,
checkpointer=checkpointer_,
moving_average=moving_average_,
batch_callbacks=batch_callbacks,
epoch_callbacks=epoch_callbacks,
end_callbacks=end_callbacks,
trainer_callbacks=trainer_callbacks,
distributed=distributed,
local_rank=local_rank,
world_size=world_size,
num_gradient_accumulation_steps=num_gradient_accumulation_steps,
use_amp=use_amp,
)
| allennlp-master | allennlp/training/trainer.py |
from typing import Any, Callable, Dict, List, Optional, Set
import logging
import os
from tensorboardX import SummaryWriter
import torch
from allennlp.common.from_params import FromParams
from allennlp.data.dataloader import TensorDict
from allennlp.nn import util as nn_util
from allennlp.training.optimizers import Optimizer
from allennlp.training import util as training_util
from allennlp.models.model import Model
logger = logging.getLogger(__name__)
class TensorboardWriter(FromParams):
"""
Class that handles Tensorboard (and other) logging.
# Parameters
serialization_dir : `str`, optional (default = `None`)
If provided, this is where the Tensorboard logs will be written.
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"tensorboard_writer", it gets passed in separately.
summary_interval : `int`, optional (default = `100`)
Most statistics will be written out only every this many batches.
histogram_interval : `int`, optional (default = `None`)
If provided, activation histograms will be written out every this many batches.
If None, activation histograms will not be written out.
When this parameter is specified, the following additional logging is enabled:
* Histograms of model parameters
* The ratio of parameter update norm to parameter norm
* Histogram of layer activations
We log histograms of the parameters returned by
`model.get_parameters_for_histogram_tensorboard_logging`.
The layer activations are logged for any modules in the `Model` that have
the attribute `should_log_activations` set to `True`. Logging
histograms requires a number of GPU-CPU copies during training and is typically
slow, so we recommend logging histograms relatively infrequently.
Note: only Modules that return tensors, tuples of tensors or dicts
with tensors as values currently support activation logging.
batch_size_interval : `int`, optional, (default = `None`)
If defined, how often to log the average batch size.
should_log_parameter_statistics : `bool`, optional (default = `True`)
Whether to log parameter statistics (mean and standard deviation of parameters and
gradients).
should_log_learning_rate : `bool`, optional (default = `False`)
Whether to log (parameter-specific) learning rate.
get_batch_num_total : `Callable[[], int]`, optional (default = `None`)
A thunk that returns the number of batches so far. Most likely this will
be a closure around an instance variable in your `Trainer` class. Because of circular
dependencies in constructing this object and the `Trainer`, this is typically `None` when
you construct the object, but it gets set inside the constructor of our `Trainer`.
"""
def __init__(
self,
serialization_dir: Optional[str] = None,
summary_interval: int = 100,
histogram_interval: int = None,
batch_size_interval: Optional[int] = None,
should_log_parameter_statistics: bool = True,
should_log_learning_rate: bool = False,
get_batch_num_total: Callable[[], int] = None,
) -> None:
if serialization_dir is not None:
# Create log directories prior to creating SummaryWriter objects
# in order to avoid race conditions during distributed training.
train_ser_dir = os.path.join(serialization_dir, "log", "train")
os.makedirs(train_ser_dir, exist_ok=True)
self._train_log = SummaryWriter(train_ser_dir)
val_ser_dir = os.path.join(serialization_dir, "log", "validation")
os.makedirs(val_ser_dir, exist_ok=True)
self._validation_log = SummaryWriter(val_ser_dir)
else:
self._train_log = self._validation_log = None
self._summary_interval = summary_interval
self._histogram_interval = histogram_interval
self._batch_size_interval = batch_size_interval
self._should_log_parameter_statistics = should_log_parameter_statistics
self._should_log_learning_rate = should_log_learning_rate
self.get_batch_num_total = get_batch_num_total
self._cumulative_batch_group_size = 0
self._batches_this_epoch = 0
self._histogram_parameters: Optional[Set[str]] = None
@staticmethod
def _item(value: Any):
if hasattr(value, "item"):
val = value.item()
else:
val = value
return val
def log_memory_usage(self, cpu_memory_usage: Dict[int, int], gpu_memory_usage: Dict[int, int]):
cpu_memory_usage_total = 0.0
for worker, mem_bytes in cpu_memory_usage.items():
memory = mem_bytes / (1024 * 1024)
self.add_train_scalar(f"memory_usage/worker_{worker}_cpu", memory)
cpu_memory_usage_total += memory
self.add_train_scalar("memory_usage/cpu", cpu_memory_usage_total)
for gpu, mem_bytes in gpu_memory_usage.items():
memory = mem_bytes / (1024 * 1024)
self.add_train_scalar(f"memory_usage/gpu_{gpu}", memory)
def log_batch(
self,
model: Model,
optimizer: Optimizer,
batch_grad_norm: Optional[float],
metrics: Dict[str, float],
batch_group: List[List[TensorDict]],
param_updates: Optional[Dict[str, torch.Tensor]],
) -> None:
if self.should_log_this_batch():
self.log_parameter_and_gradient_statistics(model, batch_grad_norm)
self.log_learning_rates(model, optimizer)
self.add_train_scalar("loss/loss_train", metrics["loss"])
self.log_metrics({"epoch_metrics/" + k: v for k, v in metrics.items()})
if self.should_log_histograms_this_batch():
assert param_updates is not None
self.log_histograms(model)
self.log_gradient_updates(model, param_updates)
if self._batch_size_interval:
# We're assuming here that `log_batch` will get called every batch, and only every
# batch. This is true with our current usage of this code (version 1.0); if that
# assumption becomes wrong, this code will break.
batch_group_size = sum(training_util.get_batch_size(batch) for batch in batch_group) # type: ignore
self._batches_this_epoch += 1
self._cumulative_batch_group_size += batch_group_size
if (self._batches_this_epoch - 1) % self._batch_size_interval == 0:
average = self._cumulative_batch_group_size / self._batches_this_epoch
logger.info(f"current batch size: {batch_group_size} mean batch size: {average}")
self.add_train_scalar("current_batch_size", batch_group_size)
self.add_train_scalar("mean_batch_size", average)
def reset_epoch(self) -> None:
self._cumulative_batch_group_size = 0
self._batches_this_epoch = 0
def should_log_this_batch(self) -> bool:
assert self.get_batch_num_total is not None
return self.get_batch_num_total() % self._summary_interval == 0
def should_log_histograms_this_batch(self) -> bool:
assert self.get_batch_num_total is not None
return (
self._histogram_interval is not None
and self.get_batch_num_total() % self._histogram_interval == 0
)
def add_train_scalar(self, name: str, value: float, timestep: int = None) -> None:
assert self.get_batch_num_total is not None
timestep = timestep or self.get_batch_num_total()
# get the scalar
if self._train_log is not None:
self._train_log.add_scalar(name, self._item(value), timestep)
def add_train_histogram(self, name: str, values: torch.Tensor) -> None:
assert self.get_batch_num_total is not None
if self._train_log is not None:
if isinstance(values, torch.Tensor):
values_to_write = values.cpu().data.numpy().flatten()
self._train_log.add_histogram(name, values_to_write, self.get_batch_num_total())
def add_validation_scalar(self, name: str, value: float, timestep: int = None) -> None:
assert self.get_batch_num_total is not None
timestep = timestep or self.get_batch_num_total()
if self._validation_log is not None:
self._validation_log.add_scalar(name, self._item(value), timestep)
def log_parameter_and_gradient_statistics(
self, model: Model, batch_grad_norm: float = None
) -> None:
"""
Send the mean and std of all parameters and gradients to tensorboard, as well
as logging the average gradient norm.
"""
if self._should_log_parameter_statistics:
# Log parameter values to Tensorboard
for name, param in model.named_parameters():
if param.data.numel() > 0:
self.add_train_scalar("parameter_mean/" + name, param.data.mean().item())
if param.data.numel() > 1:
self.add_train_scalar("parameter_std/" + name, param.data.std().item())
if param.grad is not None:
if param.grad.is_sparse:
grad_data = param.grad.data._values()
else:
grad_data = param.grad.data
# skip empty gradients
if torch.prod(torch.tensor(grad_data.shape)).item() > 0:
self.add_train_scalar("gradient_mean/" + name, grad_data.mean())
if grad_data.numel() > 1:
self.add_train_scalar("gradient_std/" + name, grad_data.std())
else:
# no gradient for a parameter with sparse gradients
logger.info("No gradient for %s, skipping tensorboard logging.", name)
# norm of gradients
if batch_grad_norm is not None:
self.add_train_scalar("gradient_norm", batch_grad_norm)
def log_learning_rates(self, model: Model, optimizer: Optimizer):
"""
Send current parameter specific learning rates to tensorboard
"""
if self._should_log_learning_rate:
# optimizer stores lr info keyed by parameter tensor
# we want to log with parameter name
names = {param: name for name, param in model.named_parameters()}
for group in optimizer.param_groups:
if "lr" not in group:
continue
rate = group["lr"]
for param in group["params"]:
# check whether params has requires grad or not
effective_rate = rate * float(param.requires_grad)
self.add_train_scalar("learning_rate/" + names[param], effective_rate)
def log_histograms(self, model: Model) -> None:
"""
Send histograms of parameters to tensorboard.
"""
if not self._histogram_parameters:
# Avoiding calling this every batch. If we ever use two separate models with a single
# writer, this is wrong, but I doubt that will ever happen.
self._histogram_parameters = set(
model.get_parameters_for_histogram_tensorboard_logging()
)
for name, param in model.named_parameters():
if name in self._histogram_parameters:
self.add_train_histogram("parameter_histogram/" + name, param)
def log_gradient_updates(self, model: Model, param_updates: Dict[str, torch.Tensor]) -> None:
for name, param in model.named_parameters():
update_norm = torch.norm(param_updates[name].view(-1))
param_norm = torch.norm(param.view(-1)).cpu()
self.add_train_scalar(
"gradient_update/" + name,
update_norm / (param_norm + nn_util.tiny_value_of_dtype(param_norm.dtype)),
)
def log_metrics(
self,
train_metrics: dict,
val_metrics: dict = None,
epoch: int = None,
log_to_console: bool = False,
) -> None:
"""
Sends all of the train metrics (and validation metrics, if provided) to tensorboard.
"""
metric_names = set(train_metrics.keys())
if val_metrics is not None:
metric_names.update(val_metrics.keys())
val_metrics = val_metrics or {}
# For logging to the console
if log_to_console:
dual_message_template = "%s | %8.3f | %8.3f"
no_val_message_template = "%s | %8.3f | %8s"
no_train_message_template = "%s | %8s | %8.3f"
header_template = "%s | %-10s"
name_length = max(len(x) for x in metric_names)
logger.info(header_template, "Training".rjust(name_length + 13), "Validation")
for name in sorted(metric_names):
# Log to tensorboard
train_metric = train_metrics.get(name)
if train_metric is not None:
self.add_train_scalar(name, train_metric, timestep=epoch)
val_metric = val_metrics.get(name)
if val_metric is not None:
self.add_validation_scalar(name, val_metric, timestep=epoch)
# And maybe log to console
if log_to_console and val_metric is not None and train_metric is not None:
logger.info(
dual_message_template, name.ljust(name_length), train_metric, val_metric
)
elif log_to_console and val_metric is not None:
logger.info(no_train_message_template, name.ljust(name_length), "N/A", val_metric)
elif log_to_console and train_metric is not None:
logger.info(no_val_message_template, name.ljust(name_length), train_metric, "N/A")
def enable_activation_logging(self, model: Model) -> None:
if self._histogram_interval is not None:
# To log activation histograms to the forward pass, we register
# a hook on forward to capture the output tensors.
# This uses a closure to determine whether to log the activations,
# since we don't want them on every call.
for _, module in model.named_modules():
if not getattr(module, "should_log_activations", False):
# skip it
continue
def hook(module_, inputs, outputs):
log_prefix = "activation_histogram/{0}".format(module_.__class__)
if self.should_log_histograms_this_batch():
self.log_activation_histogram(outputs, log_prefix)
module.register_forward_hook(hook)
def log_activation_histogram(self, outputs, log_prefix: str) -> None:
if isinstance(outputs, torch.Tensor):
log_name = log_prefix
self.add_train_histogram(log_name, outputs)
elif isinstance(outputs, (list, tuple)):
for i, output in enumerate(outputs):
log_name = "{0}_{1}".format(log_prefix, i)
self.add_train_histogram(log_name, output)
elif isinstance(outputs, dict):
for k, tensor in outputs.items():
log_name = "{0}_{1}".format(log_prefix, k)
self.add_train_histogram(log_name, tensor)
else:
# skip it
pass
def close(self) -> None:
"""
Calls the `close` method of the `SummaryWriter` s which makes sure that pending
scalars are flushed to disk and the tensorboard event files are closed properly.
"""
if self._train_log is not None:
self._train_log.close()
if self._validation_log is not None:
self._validation_log.close()
| allennlp-master | allennlp/training/tensorboard_writer.py |
from typing import Optional
from overrides import overrides
import torch
import torch.distributed as dist
import scipy.stats as stats
from allennlp.common.util import is_distributed
from allennlp.training.metrics.metric import Metric
@Metric.register("spearman_correlation")
class SpearmanCorrelation(Metric):
"""
This `Metric` calculates the sample Spearman correlation coefficient (r)
between two tensors. Each element in the two tensors is assumed to be
a different observation of the variable (i.e., the input tensors are
implicitly flattened into vectors and the correlation is calculated
between the vectors).
<https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient>
"""
def __init__(self) -> None:
super().__init__()
self.total_predictions = torch.zeros(0)
self.total_gold_labels = torch.zeros(0)
def __call__(
self,
predictions: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.BoolTensor] = None,
):
"""
# Parameters
predictions : `torch.Tensor`, required.
A tensor of predictions of shape (batch_size, ...).
gold_labels : `torch.Tensor`, required.
A tensor of the same shape as `predictions`.
mask : `torch.BoolTensor`, optional (default = `None`).
A tensor of the same shape as `predictions`.
"""
predictions, gold_labels, mask = self.detach_tensors(predictions, gold_labels, mask)
# Flatten predictions, gold_labels, and mask. We calculate the Spearman correlation between
# the vectors, since each element in the predictions and gold_labels tensor is assumed
# to be a separate observation.
predictions = predictions.reshape(-1)
gold_labels = gold_labels.reshape(-1)
self.total_predictions = self.total_predictions.to(predictions.device)
self.total_gold_labels = self.total_gold_labels.to(gold_labels.device)
if mask is not None:
mask = mask.reshape(-1)
self.total_predictions = torch.cat((self.total_predictions, predictions * mask), 0)
self.total_gold_labels = torch.cat((self.total_gold_labels, gold_labels * mask), 0)
else:
self.total_predictions = torch.cat((self.total_predictions, predictions), 0)
self.total_gold_labels = torch.cat((self.total_gold_labels, gold_labels), 0)
if is_distributed():
world_size = dist.get_world_size()
device = gold_labels.device
# Check if batch lengths are equal.
_all_batch_lengths = [torch.tensor(0) for i in range(world_size)]
dist.all_gather(
_all_batch_lengths, torch.tensor(self.total_predictions.shape[0], device=device)
)
_all_batch_lengths = [batch_length.item() for batch_length in _all_batch_lengths]
if len(set(_all_batch_lengths)) > 1:
# Subsequent dist.all_gather() calls currently do not handle tensors of different length.
raise RuntimeError(
"Distributed aggregation for SpearmanCorrelation is currently not supported "
"for batches of unequal length."
)
_total_predictions = [
torch.zeros(self.total_predictions.shape, device=device) for i in range(world_size)
]
_total_gold_labels = [
torch.zeros(self.total_gold_labels.shape, device=device) for i in range(world_size)
]
dist.all_gather(_total_predictions, self.total_predictions)
dist.all_gather(_total_gold_labels, self.total_gold_labels)
self.total_predictions = torch.cat(_total_predictions, dim=0)
self.total_gold_labels = torch.cat(_total_gold_labels, dim=0)
@overrides
def get_metric(self, reset: bool = False):
"""
# Returns
The accumulated sample Spearman correlation.
"""
spearman_correlation = stats.spearmanr(
self.total_predictions.cpu().numpy(), self.total_gold_labels.cpu().numpy()
)
if reset:
self.reset()
return spearman_correlation[0]
@overrides
def reset(self):
self.total_predictions = torch.zeros(0)
self.total_gold_labels = torch.zeros(0)
| allennlp-master | allennlp/training/metrics/spearman_correlation.py |
from typing import Optional
from overrides import overrides
import torch
import torch.distributed as dist
from allennlp.common.util import is_distributed
from allennlp.training.metrics.metric import Metric
@Metric.register("mean_absolute_error")
class MeanAbsoluteError(Metric):
"""
This `Metric` calculates the mean absolute error (MAE) between two tensors.
"""
def __init__(self) -> None:
self._absolute_error = 0.0
self._total_count = 0.0
def __call__(
self,
predictions: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.BoolTensor] = None,
):
"""
# Parameters
predictions : `torch.Tensor`, required.
A tensor of predictions of shape (batch_size, ...).
gold_labels : `torch.Tensor`, required.
A tensor of the same shape as `predictions`.
mask : `torch.BoolTensor`, optional (default = `None`).
A tensor of the same shape as `predictions`.
"""
predictions, gold_labels, mask = self.detach_tensors(predictions, gold_labels, mask)
device = gold_labels.device
absolute_errors = torch.abs(predictions - gold_labels)
if mask is not None:
absolute_errors *= mask
_total_count = torch.sum(mask)
else:
_total_count = gold_labels.numel()
_absolute_error = torch.sum(absolute_errors)
if is_distributed():
absolute_error = torch.tensor(_absolute_error, device=device)
total_count = torch.tensor(_total_count, device=device)
dist.all_reduce(absolute_error, op=dist.ReduceOp.SUM)
dist.all_reduce(total_count, op=dist.ReduceOp.SUM)
_absolute_error = absolute_error.item()
_total_count = total_count.item()
self._absolute_error += _absolute_error
self._total_count += _total_count
def get_metric(self, reset: bool = False):
"""
# Returns
The accumulated mean absolute error.
"""
mean_absolute_error = self._absolute_error / self._total_count
if reset:
self.reset()
return {"mae": mean_absolute_error}
@overrides
def reset(self):
self._absolute_error = 0.0
self._total_count = 0.0
| allennlp-master | allennlp/training/metrics/mean_absolute_error.py |
from typing import Dict
from allennlp.training.metrics.metric import Metric
from allennlp.training.metrics.fbeta_measure import FBetaMeasure
@Metric.register("f1")
class F1Measure(FBetaMeasure):
"""
Computes Precision, Recall and F1 with respect to a given `positive_label`.
For example, for a BIO tagging scheme, you would pass the classification index of
the tag you are interested in, resulting in the Precision, Recall and F1 score being
calculated for this tag only.
"""
def __init__(self, positive_label: int) -> None:
super().__init__(beta=1, labels=[positive_label])
self._positive_label = positive_label
def get_metric(self, reset: bool = False) -> Dict[str, float]:
"""
# Returns
precision : `float`
recall : `float`
f1-measure : `float`
"""
metric = super().get_metric(reset=reset)
# Because we just care about the class `positive_label`
# there is just one item in `precision`, `recall`, `fscore`
precision = metric["precision"][0]
recall = metric["recall"][0]
f1 = metric["fscore"][0]
return {"precision": precision, "recall": recall, "f1": f1}
@property
def _true_positives(self):
# When this metric is never called, `self._true_positive_sum` is None,
# under which case we return 0.0 for backward compatibility.
if self._true_positive_sum is None:
return 0.0
else:
return self._true_positive_sum[self._positive_label]
@property
def _true_negatives(self):
# When this metric is never called, `self._true_negative_sum` is None,
# under which case we return 0.0 for backward compatibility.
if self._true_negative_sum is None:
return 0.0
else:
return self._true_negative_sum[self._positive_label]
@property
def _false_positives(self):
# When this metric is never called, `self._pred_sum` is None,
# under which case we return 0.0 for backward compatibility.
if self._pred_sum is None:
return 0.0
else:
# `self._pred_sum` is the total number of instances under each _predicted_ class,
# including true positives and false positives.
return self._pred_sum[self._positive_label] - self._true_positives
@property
def _false_negatives(self):
# When this metric is never called, `self._true_sum` is None,
# under which case we return 0.0 for backward compatibility.
if self._true_sum is None:
return 0.0
else:
# `self._true_sum` is the total number of instances under each _true_ class,
# including true positives and false negatives.
return self._true_sum[self._positive_label] - self._true_positives
| allennlp-master | allennlp/training/metrics/f1_measure.py |
import logging
from typing import Optional
import math
import numpy as np
from overrides import overrides
import torch
# import torch.distributed as dist
from allennlp.common.util import is_distributed
from allennlp.training.metrics.covariance import Covariance
from allennlp.training.metrics.metric import Metric
logger = logging.getLogger(__name__)
@Metric.register("pearson_correlation")
class PearsonCorrelation(Metric):
"""
This `Metric` calculates the sample Pearson correlation coefficient (r)
between two tensors. Each element in the two tensors is assumed to be
a different observation of the variable (i.e., the input tensors are
implicitly flattened into vectors and the correlation is calculated
between the vectors).
This implementation is mostly modeled after the streaming_pearson_correlation function in Tensorflow. See
<https://github.com/tensorflow/tensorflow/blob/v1.10.1/tensorflow/contrib/metrics/python/ops/metric_ops.py#L3267>.
This metric delegates to the Covariance metric the tracking of three [co]variances:
- `covariance(predictions, labels)`, i.e. covariance
- `covariance(predictions, predictions)`, i.e. variance of `predictions`
- `covariance(labels, labels)`, i.e. variance of `labels`
If we have these values, the sample Pearson correlation coefficient is simply:
r = covariance / (sqrt(predictions_variance) * sqrt(labels_variance))
if predictions_variance or labels_variance is 0, r is 0
"""
def __init__(self) -> None:
self._predictions_labels_covariance = Covariance()
self._predictions_variance = Covariance()
self._labels_variance = Covariance()
def __call__(
self,
predictions: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.BoolTensor] = None,
):
"""
# Parameters
predictions : `torch.Tensor`, required.
A tensor of predictions of shape (batch_size, ...).
gold_labels : `torch.Tensor`, required.
A tensor of the same shape as `predictions`.
mask : `torch.BoolTensor`, optional (default = `None`).
A tensor of the same shape as `predictions`.
"""
predictions, gold_labels, mask = self.detach_tensors(predictions, gold_labels, mask)
if not is_distributed():
self._predictions_labels_covariance(predictions, gold_labels, mask)
self._predictions_variance(predictions, predictions, mask)
self._labels_variance(gold_labels, gold_labels, mask)
def get_metric(self, reset: bool = False):
"""
# Returns
The accumulated sample Pearson correlation.
"""
if is_distributed():
raise RuntimeError(
"Distributed aggregation for PearsonCorrelation is currently not supported."
)
covariance = self._predictions_labels_covariance.get_metric(reset=reset)
predictions_variance = self._predictions_variance.get_metric(reset=reset)
labels_variance = self._labels_variance.get_metric(reset=reset)
denominator = math.sqrt(predictions_variance) * math.sqrt(labels_variance)
if reset:
self.reset()
if np.around(denominator, decimals=5) == 0:
pearson_r = 0
else:
pearson_r = covariance / denominator
return pearson_r
@overrides
def reset(self):
self._predictions_labels_covariance.reset()
self._predictions_variance.reset()
self._labels_variance.reset()
| allennlp-master | allennlp/training/metrics/pearson_correlation.py |
from collections import Counter
import math
from typing import Iterable, Tuple, Dict, Set
from overrides import overrides
import torch
import torch.distributed as dist
from allennlp.common.util import is_distributed
from allennlp.training.metrics.metric import Metric
@Metric.register("bleu")
class BLEU(Metric):
"""
Bilingual Evaluation Understudy (BLEU).
BLEU is a common metric used for evaluating the quality of machine translations
against a set of reference translations. See
[Papineni et. al., "BLEU: a method for automatic evaluation of machine translation", 2002][1].
# Parameters
ngram_weights : `Iterable[float]`, optional (default = `(0.25, 0.25, 0.25, 0.25)`)
Weights to assign to scores for each ngram size.
exclude_indices : `Set[int]`, optional (default = `None`)
Indices to exclude when calculating ngrams. This should usually include
the indices of the start, end, and pad tokens.
# Notes
We chose to implement this from scratch instead of wrapping an existing implementation
(such as `nltk.translate.bleu_score`) for a two reasons. First, so that we could
pass tensors directly to this metric instead of first converting the tensors to lists of strings.
And second, because functions like `nltk.translate.bleu_score.corpus_bleu()` are
meant to be called once over the entire corpus, whereas it is more efficient
in our use case to update the running precision counts every batch.
This implementation only considers a reference set of size 1, i.e. a single
gold target sequence for each predicted sequence.
[1]: https://www.semanticscholar.org/paper/8ff93cfd37dced279134c9d642337a2085b31f59/
"""
def __init__(
self,
ngram_weights: Iterable[float] = (0.25, 0.25, 0.25, 0.25),
exclude_indices: Set[int] = None,
) -> None:
self._ngram_weights = ngram_weights
self._exclude_indices = exclude_indices or set()
self._precision_matches: Dict[int, int] = Counter()
self._precision_totals: Dict[int, int] = Counter()
self._prediction_lengths = 0
self._reference_lengths = 0
@overrides
def reset(self) -> None:
self._precision_matches = Counter()
self._precision_totals = Counter()
self._prediction_lengths = 0
self._reference_lengths = 0
def _get_modified_precision_counts(
self,
predicted_tokens: torch.LongTensor,
reference_tokens: torch.LongTensor,
ngram_size: int,
) -> Tuple[int, int]:
"""
Compare the predicted tokens to the reference (gold) tokens at the desired
ngram size and calculate the numerator and denominator for a modified
form of precision.
The numerator is the number of ngrams in the predicted sentences that match
with an ngram in the corresponding reference sentence, clipped by the total
count of that ngram in the reference sentence. The denominator is just
the total count of predicted ngrams.
"""
clipped_matches = 0
total_predicted = 0
from allennlp.training.util import ngrams
for predicted_row, reference_row in zip(predicted_tokens, reference_tokens):
predicted_ngram_counts = ngrams(predicted_row, ngram_size, self._exclude_indices)
reference_ngram_counts = ngrams(reference_row, ngram_size, self._exclude_indices)
for ngram, count in predicted_ngram_counts.items():
clipped_matches += min(count, reference_ngram_counts[ngram])
total_predicted += count
return clipped_matches, total_predicted
def _get_brevity_penalty(self) -> float:
if self._prediction_lengths > self._reference_lengths:
return 1.0
if self._reference_lengths == 0 or self._prediction_lengths == 0:
return 0.0
return math.exp(1.0 - self._reference_lengths / self._prediction_lengths)
@overrides
def __call__(
self, # type: ignore
predictions: torch.LongTensor,
gold_targets: torch.LongTensor,
) -> None:
"""
Update precision counts.
# Parameters
predictions : `torch.LongTensor`, required
Batched predicted tokens of shape `(batch_size, max_sequence_length)`.
references : `torch.LongTensor`, required
Batched reference (gold) translations with shape `(batch_size, max_gold_sequence_length)`.
# Returns
None
"""
predictions, gold_targets = self.detach_tensors(predictions, gold_targets)
device = gold_targets.device
if is_distributed():
world_size = dist.get_world_size()
for ngram_size, _ in enumerate(self._ngram_weights, start=1):
precision_matches, precision_totals = self._get_modified_precision_counts(
predictions, gold_targets, ngram_size
)
if is_distributed():
_precision_matches = torch.tensor(precision_matches, device=device)
_precision_totals = torch.tensor(precision_totals, device=device)
dist.all_reduce(_precision_matches, op=dist.ReduceOp.SUM)
dist.all_reduce(_precision_totals, op=dist.ReduceOp.SUM)
precision_matches = _precision_matches.item() / world_size
precision_totals = _precision_totals.item() / world_size
self._precision_matches[ngram_size] += precision_matches
self._precision_totals[ngram_size] += precision_totals
if not self._exclude_indices:
_prediction_lengths = predictions.size(0) * predictions.size(1)
_reference_lengths = gold_targets.size(0) * gold_targets.size(1)
else:
from allennlp.training.util import get_valid_tokens_mask
valid_predictions_mask = get_valid_tokens_mask(predictions, self._exclude_indices)
valid_gold_targets_mask = get_valid_tokens_mask(gold_targets, self._exclude_indices)
_prediction_lengths = valid_predictions_mask.sum().item()
_reference_lengths = valid_gold_targets_mask.sum().item()
if is_distributed():
prediction_lengths = torch.tensor(_prediction_lengths, device=device)
reference_lengths = torch.tensor(_reference_lengths, device=device)
dist.all_reduce(prediction_lengths, op=dist.ReduceOp.SUM)
dist.all_reduce(reference_lengths, op=dist.ReduceOp.SUM)
_prediction_lengths = prediction_lengths.item()
_reference_lengths = reference_lengths.item()
self._prediction_lengths += _prediction_lengths
self._reference_lengths += _reference_lengths
@overrides
def get_metric(self, reset: bool = False) -> Dict[str, float]:
brevity_penalty = self._get_brevity_penalty()
ngram_scores = (
weight
* (
math.log(self._precision_matches[n] + 1e-13)
- math.log(self._precision_totals[n] + 1e-13)
)
for n, weight in enumerate(self._ngram_weights, start=1)
)
bleu = brevity_penalty * math.exp(sum(ngram_scores))
if reset:
self.reset()
return {"BLEU": bleu}
| allennlp-master | allennlp/training/metrics/bleu.py |
from typing import Optional
from overrides import overrides
import torch
import torch.distributed as dist
from allennlp.common.util import is_distributed
from allennlp.training.metrics.metric import Metric
@Metric.register("boolean_accuracy")
class BooleanAccuracy(Metric):
"""
Just checks batch-equality of two tensors and computes an accuracy metric based on that.
That is, if your prediction has shape (batch_size, dim_1, ..., dim_n), this metric considers that
as a set of `batch_size` predictions and checks that each is *entirely* correct across the remaining dims.
This means the denominator in the accuracy computation is `batch_size`, with the caveat that predictions
that are totally masked are ignored (in which case the denominator is the number of predictions that have
at least one unmasked element).
This is similar to [`CategoricalAccuracy`](./categorical_accuracy.md), if you've already done a `.max()`
on your predictions. If you have categorical output, though, you should typically just use
`CategoricalAccuracy`. The reason you might want to use this instead is if you've done
some kind of constrained inference and don't have a prediction tensor that matches the API of
`CategoricalAccuracy`, which assumes a final dimension of size `num_classes`.
"""
def __init__(self) -> None:
self._correct_count = 0.0
self._total_count = 0.0
def __call__(
self,
predictions: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.BoolTensor] = None,
):
"""
# Parameters
predictions : `torch.Tensor`, required.
A tensor of predictions of shape (batch_size, ...).
gold_labels : `torch.Tensor`, required.
A tensor of the same shape as `predictions`.
mask : `torch.BoolTensor`, optional (default = `None`).
A tensor of the same shape as `predictions`.
"""
predictions, gold_labels, mask = self.detach_tensors(predictions, gold_labels, mask)
# Some sanity checks.
if gold_labels.size() != predictions.size():
raise ValueError(
f"gold_labels must have shape == predictions.size() but "
f"found tensor of shape: {gold_labels.size()}"
)
if mask is not None and mask.size() != predictions.size():
raise ValueError(
f"mask must have shape == predictions.size() but "
f"found tensor of shape: {mask.size()}"
)
batch_size = predictions.size(0)
if mask is not None:
# We can multiply by the mask up front, because we're just checking equality below, and
# this way everything that's masked will be equal.
predictions = predictions * mask
gold_labels = gold_labels * mask
# We want to skip predictions that are completely masked;
# so we'll keep predictions that aren't.
keep = mask.view(batch_size, -1).max(dim=1)[0]
else:
keep = torch.ones(batch_size, device=predictions.device).bool()
predictions = predictions.view(batch_size, -1)
gold_labels = gold_labels.view(batch_size, -1)
# At this point, predictions is (batch_size, rest_of_dims_combined),
# so .eq -> .prod will be 1 if every element of the instance prediction is correct
# and 0 if at least one element of the instance prediction is wrong.
# Because of how we're handling masking, masked positions are automatically "correct".
correct = predictions.eq(gold_labels).prod(dim=1).float()
# Since masked positions are correct, we need to explicitly exclude instance predictions
# where the entire prediction is masked (because they look "correct").
_correct_count = (correct * keep).sum()
_total_count = keep.sum()
if is_distributed():
dist.all_reduce(_correct_count, op=dist.ReduceOp.SUM)
dist.all_reduce(_total_count, op=dist.ReduceOp.SUM)
self._correct_count += _correct_count.item()
self._total_count += _total_count.item()
def get_metric(self, reset: bool = False):
"""
# Returns
The accumulated accuracy.
"""
if self._total_count > 0:
accuracy = float(self._correct_count) / float(self._total_count)
else:
accuracy = 0.0
if reset:
self.reset()
return accuracy
@overrides
def reset(self):
self._correct_count = 0.0
self._total_count = 0.0
| allennlp-master | allennlp/training/metrics/boolean_accuracy.py |
from typing import List, Optional, Union
import torch
import torch.distributed as dist
from overrides import overrides
from allennlp.common.util import is_distributed
from allennlp.common.checks import ConfigurationError
from allennlp.training.metrics.metric import Metric
@Metric.register("fbeta")
class FBetaMeasure(Metric):
"""Compute precision, recall, F-measure and support for each class.
The precision is the ratio `tp / (tp + fp)` where `tp` is the number of
true positives and `fp` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio `tp / (tp + fn)` where `tp` is the number of
true positives and `fn` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
If we have precision and recall, the F-beta score is simply:
`F-beta = (1 + beta ** 2) * precision * recall / (beta ** 2 * precision + recall)`
The F-beta score weights recall more than precision by a factor of
`beta`. `beta == 1.0` means recall and precision are equally important.
The support is the number of occurrences of each class in `y_true`.
# Parameters
beta : `float`, optional (default = `1.0`)
The strength of recall versus precision in the F-score.
average : `str`, optional (default = `None`)
If `None`, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
`'micro'`:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
`'macro'`:
Calculate metrics for each label, and find their unweighted mean.
This does not take label imbalance into account.
`'weighted'`:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
labels: `list`, optional
The set of labels to include and their order if `average is None`.
Labels present in the data can be excluded, for example to calculate a
multi-class average ignoring a majority negative class. Labels not present
in the data will result in 0 components in a macro or weighted average.
"""
def __init__(self, beta: float = 1.0, average: str = None, labels: List[int] = None) -> None:
average_options = {None, "micro", "macro", "weighted"}
if average not in average_options:
raise ConfigurationError(f"`average` has to be one of {average_options}.")
if beta <= 0:
raise ConfigurationError("`beta` should be >0 in the F-beta score.")
if labels is not None and len(labels) == 0:
raise ConfigurationError("`labels` cannot be an empty list.")
self._beta = beta
self._average = average
self._labels = labels
# statistics
# the total number of true positive instances under each class
# Shape: (num_classes, )
self._true_positive_sum: Union[None, torch.Tensor] = None
# the total number of instances
# Shape: (num_classes, )
self._total_sum: Union[None, torch.Tensor] = None
# the total number of instances under each _predicted_ class,
# including true positives and false positives
# Shape: (num_classes, )
self._pred_sum: Union[None, torch.Tensor] = None
# the total number of instances under each _true_ class,
# including true positives and false negatives
# Shape: (num_classes, )
self._true_sum: Union[None, torch.Tensor] = None
@overrides
def __call__(
self,
predictions: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.BoolTensor] = None,
):
"""
# Parameters
predictions : `torch.Tensor`, required.
A tensor of predictions of shape (batch_size, ..., num_classes).
gold_labels : `torch.Tensor`, required.
A tensor of integer class label of shape (batch_size, ...). It must be the same
shape as the `predictions` tensor without the `num_classes` dimension.
mask : `torch.BoolTensor`, optional (default = `None`).
A masking tensor the same size as `gold_labels`.
"""
predictions, gold_labels, mask = self.detach_tensors(predictions, gold_labels, mask)
device = gold_labels.device
# Calculate true_positive_sum, true_negative_sum, pred_sum, true_sum
num_classes = predictions.size(-1)
if (gold_labels >= num_classes).any():
raise ConfigurationError(
"A gold label passed to FBetaMeasure contains "
f"an id >= {num_classes}, the number of classes."
)
# It means we call this metric at the first time
# when `self._true_positive_sum` is None.
if self._true_positive_sum is None:
self._true_positive_sum = torch.zeros(num_classes, device=predictions.device)
self._true_sum = torch.zeros(num_classes, device=predictions.device)
self._pred_sum = torch.zeros(num_classes, device=predictions.device)
self._total_sum = torch.zeros(num_classes, device=predictions.device)
if mask is None:
mask = torch.ones_like(gold_labels).bool()
gold_labels = gold_labels.float()
# If the prediction tensor is all zeros, the record is not classified to any of the labels.
pred_mask = predictions.sum(dim=-1) != 0
argmax_predictions = predictions.max(dim=-1)[1].float()
true_positives = (gold_labels == argmax_predictions) & mask & pred_mask
true_positives_bins = gold_labels[true_positives]
# Watch it:
# The total numbers of true positives under all _predicted_ classes are zeros.
if true_positives_bins.shape[0] == 0:
true_positive_sum = torch.zeros(num_classes, device=device)
else:
true_positive_sum = torch.bincount(
true_positives_bins.long(), minlength=num_classes
).float()
pred_bins = argmax_predictions[mask & pred_mask].long()
# Watch it:
# When the `mask` is all 0, we will get an _empty_ tensor.
if pred_bins.shape[0] != 0:
pred_sum = torch.bincount(pred_bins, minlength=num_classes).float()
else:
pred_sum = torch.zeros(num_classes, device=device)
gold_labels_bins = gold_labels[mask].long()
if gold_labels.shape[0] != 0:
true_sum = torch.bincount(gold_labels_bins, minlength=num_classes).float()
else:
true_sum = torch.zeros(num_classes, device=predictions.device)
self._total_sum += mask.sum().to(torch.float)
if is_distributed():
true_positive_sum = torch.tensor(true_positive_sum, device=device)
dist.all_reduce(true_positive_sum, op=dist.ReduceOp.SUM)
dist.all_reduce(pred_sum, op=dist.ReduceOp.SUM)
dist.all_reduce(true_sum, op=dist.ReduceOp.SUM)
self._true_positive_sum += true_positive_sum
self._pred_sum += pred_sum
self._true_sum += true_sum
@overrides
def get_metric(self, reset: bool = False):
"""
# Returns
precisions : `List[float]`
recalls : `List[float]`
f1-measures : `List[float]`
!!! Note
If `self.average` is not `None`, you will get `float` instead of `List[float]`.
"""
if self._true_positive_sum is None:
raise RuntimeError("You never call this metric before.")
else:
tp_sum = self._true_positive_sum
pred_sum = self._pred_sum
true_sum = self._true_sum
if self._labels is not None:
# Retain only selected labels and order them
tp_sum = tp_sum[self._labels]
pred_sum = pred_sum[self._labels] # type: ignore
true_sum = true_sum[self._labels] # type: ignore
if self._average == "micro":
tp_sum = tp_sum.sum()
pred_sum = pred_sum.sum() # type: ignore
true_sum = true_sum.sum() # type: ignore
beta2 = self._beta ** 2
# Finally, we have all our sufficient statistics.
precision = _prf_divide(tp_sum, pred_sum)
recall = _prf_divide(tp_sum, true_sum)
fscore = (1 + beta2) * precision * recall / (beta2 * precision + recall)
fscore[tp_sum == 0] = 0.0
if self._average == "macro":
precision = precision.mean()
recall = recall.mean()
fscore = fscore.mean()
elif self._average == "weighted":
weights = true_sum
weights_sum = true_sum.sum() # type: ignore
precision = _prf_divide((weights * precision).sum(), weights_sum)
recall = _prf_divide((weights * recall).sum(), weights_sum)
fscore = _prf_divide((weights * fscore).sum(), weights_sum)
if reset:
self.reset()
if self._average is None:
return {
"precision": precision.tolist(),
"recall": recall.tolist(),
"fscore": fscore.tolist(),
}
else:
return {"precision": precision.item(), "recall": recall.item(), "fscore": fscore.item()}
@overrides
def reset(self) -> None:
self._true_positive_sum = None
self._pred_sum = None
self._true_sum = None
self._total_sum = None
@property
def _true_negative_sum(self):
if self._total_sum is None:
return None
else:
true_negative_sum = (
self._total_sum - self._pred_sum - self._true_sum + self._true_positive_sum
)
return true_negative_sum
def _prf_divide(numerator, denominator):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero.
"""
result = numerator / denominator
mask = denominator == 0.0
if not mask.any():
return result
# remove nan
result[mask] = 0.0
return result
| allennlp-master | allennlp/training/metrics/fbeta_measure.py |
from overrides import overrides
import math
from allennlp.training.metrics.average import Average
from allennlp.training.metrics.metric import Metric
@Metric.register("perplexity")
class Perplexity(Average):
"""
Perplexity is a common metric used for evaluating how well a language model
predicts a sample.
Notes
-----
Assumes negative log likelihood loss of each batch (base e). Provides the
average perplexity of the batches.
"""
@overrides
def get_metric(self, reset: bool = False):
"""
# Returns
The accumulated perplexity.
"""
average_loss = super().get_metric(reset)
if average_loss == 0:
return 0.0
# Exponentiate the loss to compute perplexity
return math.exp(average_loss)
| allennlp-master | allennlp/training/metrics/perplexity.py |
import logging
from typing import Optional
from overrides import overrides
import torch
# import torch.distributed as dist
from allennlp.common.util import is_distributed
from allennlp.training.metrics.metric import Metric
logger = logging.getLogger(__name__)
@Metric.register("covariance")
class Covariance(Metric):
"""
This `Metric` calculates the unbiased sample covariance between two tensors.
Each element in the two tensors is assumed to be a different observation of the
variable (i.e., the input tensors are implicitly flattened into vectors and the
covariance is calculated between the vectors).
This implementation is mostly modeled after the streaming_covariance function in Tensorflow. See:
<https://github.com/tensorflow/tensorflow/blob/v1.10.1/tensorflow/contrib/metrics/python/ops/metric_ops.py#L3127>
The following is copied from the Tensorflow documentation:
The algorithm used for this online computation is described in
<https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online>.
Specifically, the formula used to combine two sample comoments is
`C_AB = C_A + C_B + (E[x_A] - E[x_B]) * (E[y_A] - E[y_B]) * n_A * n_B / n_AB`
The comoment for a single batch of data is simply `sum((x - E[x]) * (y - E[y]))`, optionally masked.
"""
def __init__(self) -> None:
self._total_prediction_mean = 0.0
self._total_label_mean = 0.0
self._total_co_moment = 0.0
self._total_count = 0.0
def __call__(
self,
predictions: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.BoolTensor] = None,
):
"""
# Parameters
predictions : `torch.Tensor`, required.
A tensor of predictions of shape (batch_size, ...).
gold_labels : `torch.Tensor`, required.
A tensor of the same shape as `predictions`.
mask : `torch.BoolTensor`, optional (default = `None`).
A tensor of the same shape as `predictions`.
"""
predictions, gold_labels, mask = self.detach_tensors(predictions, gold_labels, mask)
# Flatten predictions, gold_labels, and mask. We calculate the covariance between
# the vectors, since each element in the predictions and gold_labels tensor is assumed
# to be a separate observation.
predictions = predictions.view(-1)
gold_labels = gold_labels.view(-1)
if mask is not None:
mask = mask.view(-1)
predictions = predictions * mask
gold_labels = gold_labels * mask
num_batch_items = torch.sum(mask).item()
else:
num_batch_items = gold_labels.numel()
# Note that self._total_count must be a float or int at all times
# If it is a 1-dimension Tensor, the previous count will equal the updated_count.
# The sampe applies for previous_total_prediction_mean and
# previous_total_label_mean below -- we handle this in the code by
# calling .item() judiciously.
previous_count = self._total_count
updated_count = previous_count + num_batch_items
batch_mean_prediction = torch.sum(predictions) / num_batch_items
delta_mean_prediction = (
(batch_mean_prediction - self._total_prediction_mean) * num_batch_items
) / updated_count
previous_total_prediction_mean = self._total_prediction_mean
batch_mean_label = torch.sum(gold_labels) / num_batch_items
delta_mean_label = (
(batch_mean_label - self._total_label_mean) * num_batch_items
) / updated_count
previous_total_label_mean = self._total_label_mean
batch_coresiduals = (predictions - batch_mean_prediction) * (gold_labels - batch_mean_label)
if mask is not None:
batch_co_moment = torch.sum(batch_coresiduals * mask)
else:
batch_co_moment = torch.sum(batch_coresiduals)
delta_co_moment = batch_co_moment + (
previous_total_prediction_mean - batch_mean_prediction
) * (previous_total_label_mean - batch_mean_label) * (
previous_count * num_batch_items / updated_count
)
# Due to the online computation of covariance, the following code can
# still lead to nan values in later iterations. To be revisited.
# if is_distributed():
# # Note: this gives an approximate aggregation of the covariance.
# device = gold_labels.device
# delta_mean_prediction = torch.tensor(delta_mean_prediction, device=device)
# delta_mean_label = torch.tensor(delta_mean_label, device=device)
# delta_co_moment = torch.tensor(delta_co_moment, device=device)
# _total_count = torch.tensor(updated_count, device=device)
# dist.all_reduce(delta_mean_prediction, op=dist.ReduceOp.SUM)
# dist.all_reduce(delta_mean_label, op=dist.ReduceOp.SUM)
# dist.all_reduce(delta_co_moment, op=dist.ReduceOp.SUM)
# dist.all_reduce(_total_count, op=dist.ReduceOp.SUM)
# updated_count = _total_count.item()
self._total_prediction_mean += delta_mean_prediction.item()
self._total_label_mean += delta_mean_label.item()
self._total_co_moment += delta_co_moment.item()
self._total_count = updated_count
def get_metric(self, reset: bool = False):
"""
# Returns
The accumulated covariance.
"""
if is_distributed():
raise RuntimeError("Distributed aggregation for Covariance is currently not supported.")
covariance = self._total_co_moment / (self._total_count - 1)
if reset:
self.reset()
return covariance
@overrides
def reset(self):
self._total_prediction_mean = 0.0
self._total_label_mean = 0.0
self._total_co_moment = 0.0
self._total_count = 0.0
| allennlp-master | allennlp/training/metrics/covariance.py |
from collections import defaultdict
from typing import Tuple, Dict, Set
from overrides import overrides
import torch
import torch.distributed as dist
from allennlp.common.util import is_distributed
from allennlp.training.metrics.metric import Metric
@Metric.register("rouge")
class ROUGE(Metric):
"""
Recall-Oriented Understudy for Gisting Evaluation (ROUGE)
ROUGE is a metric for measuring the quality of summaries. It is based on calculating the recall
between ngrams in the predicted summary and a set of reference summaries. See [Lin,
"ROUGE: A Package For Automatic Evaluation Of Summaries", 2004]
(https://api.semanticscholar.org/CorpusID:964287).
# Parameters
ngram_size : `int`, optional (default = `2`)
ROUGE scores are calculate for ROUGE-1 .. ROUGE-`ngram_size`
exclude_indices : `Set[int]`, optional (default = `None`)
Indices to exclude when calculating ngrams. This should usually include
the indices of the start, end, and pad tokens.
"""
def __init__(
self,
ngram_size: int = 2,
exclude_indices: Set[int] = None,
) -> None:
self._ngram_size = ngram_size
self._exclude_indices = exclude_indices or set()
self._total_rouge_n_recalls: Dict[int, float] = defaultdict(lambda: 0.0)
self._total_rouge_n_precisions: Dict[int, float] = defaultdict(lambda: 0.0)
self._total_rouge_n_f1s: Dict[int, float] = defaultdict(lambda: 0.0)
self._total_rouge_l_f1 = 0.0
self._total_sequence_count = 0
@overrides
def reset(self) -> None:
self._total_rouge_n_recalls = defaultdict(lambda: 0.0)
self._total_rouge_n_precisions = defaultdict(lambda: 0.0)
self._total_rouge_n_f1s = defaultdict(lambda: 0.0)
self._total_rouge_l_f1 = 0.0
self._total_sequence_count = 0
def _longest_common_subsequence(self, seq_1: torch.LongTensor, seq_2: torch.LongTensor):
"""
Computes the longest common subsequences between `seq_1` and `seq_2`, ignoring `self._exclude_indices`.
"""
m = len(seq_1)
n = len(seq_2)
# Slightly lower memory usage by iterating over the longer sequence in outer loop
# and storing previous lcs for the shorter sequence
if m < n:
seq_1, seq_2 = seq_2, seq_1
m, n = n, m
prev_lcs = torch.zeros(n + 1, dtype=torch.long)
for i in range(m - 1, -1, -1):
# Make sure we don't count special tokens as part of the subsequences
if seq_1[i].item() in self._exclude_indices:
continue
cur_lcs = torch.zeros_like(prev_lcs)
for j in range(n - 1, -1, -1):
if seq_1[i] == seq_2[j]:
cur_lcs[j] = 1 + prev_lcs[j + 1]
else:
cur_lcs[j] = max(cur_lcs[j + 1], prev_lcs[j])
prev_lcs = cur_lcs
return prev_lcs[0].item()
def _get_rouge_l_score(
self, predicted_tokens: torch.LongTensor, reference_tokens: torch.LongTensor
) -> float:
"""
Compute sum of F1 scores given batch of predictions and references.
"""
total_f1 = 0.0
for predicted_seq, reference_seq in zip(predicted_tokens, reference_tokens):
from allennlp.training.util import get_valid_tokens_mask
m = get_valid_tokens_mask(reference_seq, self._exclude_indices).sum().item()
n = get_valid_tokens_mask(predicted_seq, self._exclude_indices).sum().item()
lcs = self._longest_common_subsequence(reference_seq, predicted_seq)
# This also rules out the case that m or n are 0, so we don't worry about it later
if lcs == 0:
continue
recall_lcs = lcs / m
precision_lcs = lcs / n
f1 = 2 * recall_lcs * precision_lcs / (recall_lcs + precision_lcs)
total_f1 += f1
if is_distributed():
device = predicted_tokens.device
_total_f1 = torch.tensor(total_f1, device=device)
dist.all_reduce(_total_f1, op=dist.ReduceOp.SUM)
total_f1 = _total_f1.item()
return total_f1
def _get_rouge_n_stats(
self,
predicted_tokens: torch.LongTensor,
reference_tokens: torch.LongTensor,
ngram_size: int,
) -> Tuple[float, float, float]:
"""
Compare the predicted tokens to the reference (gold) tokens at the desired
ngram size and compute recall, precision and f1 sums
"""
total_recall = 0.0
total_precision = 0.0
total_f1 = 0.0
for predicted_seq, reference_seq in zip(predicted_tokens, reference_tokens):
from allennlp.training.util import ngrams
predicted_ngram_counts = ngrams(predicted_seq, ngram_size, self._exclude_indices)
reference_ngram_counts = ngrams(reference_seq, ngram_size, self._exclude_indices)
matches = 0
total_reference_ngrams = 0
for ngram, count in reference_ngram_counts.items():
matches += min(predicted_ngram_counts[ngram], count)
total_reference_ngrams += count
total_predicted_ngrams = sum(predicted_ngram_counts.values())
if total_reference_ngrams == 0 or total_predicted_ngrams == 0 or matches == 0:
continue
recall = matches / total_reference_ngrams
precision = matches / total_predicted_ngrams
f1 = 2.0 * recall * precision / (recall + precision)
# Accumulate stats
total_recall += recall
total_precision += precision
total_f1 += f1
if is_distributed():
device = predicted_tokens.device
_total_recall = torch.tensor(total_recall, device=device)
_total_precision = torch.tensor(total_precision, device=device)
_total_f1 = torch.tensor(total_f1, device=device)
dist.all_reduce(_total_recall, op=dist.ReduceOp.SUM)
dist.all_reduce(_total_precision, op=dist.ReduceOp.SUM)
dist.all_reduce(_total_f1, op=dist.ReduceOp.SUM)
total_recall = _total_recall.item()
total_precision = _total_precision.item()
total_f1 = _total_f1.item()
return total_recall, total_precision, total_f1
@overrides
def __call__(
self, # type: ignore
predictions: torch.LongTensor,
gold_targets: torch.LongTensor,
) -> None:
"""
Update recall counts.
# Parameters
predictions : `torch.LongTensor`
Batched predicted tokens of shape `(batch_size, max_sequence_length)`.
references : `torch.LongTensor`
Batched reference (gold) sequences with shape `(batch_size, max_gold_sequence_length)`.
# Returns
None
"""
# ROUGE-N
predictions, gold_targets = self.detach_tensors(predictions, gold_targets)
for n in range(1, self._ngram_size + 1):
recall, precision, f1 = self._get_rouge_n_stats(predictions, gold_targets, n)
self._total_rouge_n_recalls[n] += recall
self._total_rouge_n_precisions[n] += precision
self._total_rouge_n_f1s[n] += f1
# ROUGE-L
self._total_rouge_l_f1 += self._get_rouge_l_score(predictions, gold_targets)
sequence_count = len(predictions)
if is_distributed():
device = predictions.device
_sequence_count = torch.tensor(sequence_count, device=device)
dist.all_reduce(_sequence_count, op=dist.ReduceOp.SUM)
sequence_count = _sequence_count.item()
self._total_sequence_count += sequence_count
def _metric_mean(self, metric_sum):
if self._total_sequence_count == 0:
return 0.0
return metric_sum / self._total_sequence_count
@overrides
def get_metric(self, reset: bool = False) -> Dict[str, float]:
"""
# Parameters
reset : `bool`, optional (default = `False`)
Reset any accumulators or internal state.
# Returns
Dict[str, float]:
A dictionary containing `ROUGE-1` .. `ROUGE-ngram_size` scores.
"""
metrics = {}
# ROUGE-N
# Recall
metrics.update(
{
f"ROUGE-{i}_R": self._metric_mean(self._total_rouge_n_recalls[i])
for i in range(1, self._ngram_size + 1)
}
)
# Precision
metrics.update(
{
f"ROUGE-{i}_P": self._metric_mean(self._total_rouge_n_precisions[i])
for i in range(1, self._ngram_size + 1)
}
)
# F1
metrics.update(
{
f"ROUGE-{i}_F1": self._metric_mean(self._total_rouge_n_f1s[i])
for i in range(1, self._ngram_size + 1)
}
)
# ROUGE-L
# F1
metrics["ROUGE-L"] = self._metric_mean(self._total_rouge_l_f1)
if reset:
self.reset()
return metrics
| allennlp-master | allennlp/training/metrics/rouge.py |
"""
A `~allennlp.training.metrics.metric.Metric` is some quantity or quantities
that can be accumulated during training or evaluation; for example,
accuracy or F1 score.
"""
from allennlp.training.metrics.attachment_scores import AttachmentScores
from allennlp.training.metrics.average import Average
from allennlp.training.metrics.boolean_accuracy import BooleanAccuracy
from allennlp.training.metrics.bleu import BLEU
from allennlp.training.metrics.rouge import ROUGE
from allennlp.training.metrics.categorical_accuracy import CategoricalAccuracy
from allennlp.training.metrics.covariance import Covariance
from allennlp.training.metrics.entropy import Entropy
from allennlp.training.metrics.evalb_bracketing_scorer import (
EvalbBracketingScorer,
DEFAULT_EVALB_DIR,
)
from allennlp.training.metrics.fbeta_measure import FBetaMeasure
from allennlp.training.metrics.fbeta_multi_label_measure import FBetaMultiLabelMeasure
from allennlp.training.metrics.f1_measure import F1Measure
from allennlp.training.metrics.mean_absolute_error import MeanAbsoluteError
from allennlp.training.metrics.metric import Metric
from allennlp.training.metrics.pearson_correlation import PearsonCorrelation
from allennlp.training.metrics.spearman_correlation import SpearmanCorrelation
from allennlp.training.metrics.perplexity import Perplexity
from allennlp.training.metrics.sequence_accuracy import SequenceAccuracy
from allennlp.training.metrics.span_based_f1_measure import SpanBasedF1Measure
from allennlp.training.metrics.unigram_recall import UnigramRecall
from allennlp.training.metrics.auc import Auc
| allennlp-master | allennlp/training/metrics/__init__.py |
from typing import Optional
import sys
from overrides import overrides
import torch
import torch.distributed as dist
from allennlp.common.util import is_distributed
from allennlp.common.checks import ConfigurationError
from allennlp.training.metrics.metric import Metric
@Metric.register("unigram_recall")
class UnigramRecall(Metric):
"""
Unigram top-K recall. This does not take word order into account. Assumes
integer labels, with each item to be classified having a single correct
class.
"""
def __init__(self) -> None:
self.correct_count = 0.0
self.total_count = 0.0
def __call__(
self,
predictions: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.BoolTensor] = None,
end_index: int = sys.maxsize,
):
"""
# Parameters
predictions : `torch.Tensor`, required.
A tensor of predictions of shape (batch_size, k, sequence_length).
gold_labels : `torch.Tensor`, required.
A tensor of integer class label of shape (batch_size, sequence_length).
mask : `torch.BoolTensor`, optional (default = `None`).
A masking tensor the same size as `gold_labels`.
"""
predictions, gold_labels, mask = self.detach_tensors(predictions, gold_labels, mask)
device = predictions.device
# Some sanity checks.
if gold_labels.dim() != predictions.dim() - 1:
raise ConfigurationError(
"gold_labels must have dimension == predictions.dim() - 1 but "
"found tensor of shape: {}".format(gold_labels.size())
)
if mask is not None and mask.size() != gold_labels.size():
raise ConfigurationError(
"mask must have the same size as predictions but "
"found tensor of shape: {}".format(mask.size())
)
batch_size = predictions.size()[0]
correct = 0.0
for i in range(batch_size):
beams = predictions[i]
cur_gold = gold_labels[i]
if mask is not None:
masked_gold = cur_gold * mask[i]
else:
masked_gold = cur_gold
cleaned_gold = [x for x in masked_gold if x not in (0, end_index)]
retval = 0.0
for word in cleaned_gold:
stillsearch = True
for beam in beams:
# word is from cleaned gold which doesn't have 0 or
# end_index, so we don't need to explicitly remove those
# from beam.
if stillsearch and word in beam:
retval += 1 / len(cleaned_gold)
stillsearch = False
correct += retval
_correct_count = correct
_total_count = predictions.size()[0]
if is_distributed():
correct_count = torch.tensor(_correct_count, device=device)
total_count = torch.tensor(_total_count, device=device)
dist.all_reduce(correct_count, op=dist.ReduceOp.SUM)
dist.all_reduce(total_count, op=dist.ReduceOp.SUM)
_correct_count = correct_count.item()
_total_count = total_count.item()
self.correct_count += _correct_count
self.total_count += _total_count
def get_metric(self, reset: bool = False):
"""
# Returns
The accumulated recall.
"""
recall = self.correct_count / self.total_count if self.total_count > 0 else 0
if reset:
self.reset()
return {"unigram_recall": recall}
@overrides
def reset(self):
self.correct_count = 0.0
self.total_count = 0.0
| allennlp-master | allennlp/training/metrics/unigram_recall.py |
from typing import List, Optional
import torch
import torch.distributed as dist
from overrides import overrides
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import is_distributed
from allennlp.training.metrics import FBetaMeasure
from allennlp.training.metrics.metric import Metric
@Metric.register("fbeta_multi_label")
class FBetaMultiLabelMeasure(FBetaMeasure):
"""Compute precision, recall, F-measure and support for multi-label classification.
The precision is the ratio `tp / (tp + fp)` where `tp` is the number of
true positives and `fp` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio `tp / (tp + fn)` where `tp` is the number of
true positives and `fn` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
If we have precision and recall, the F-beta score is simply:
`F-beta = (1 + beta ** 2) * precision * recall / (beta ** 2 * precision + recall)`
The F-beta score weights recall more than precision by a factor of
`beta`. `beta == 1.0` means recall and precision are equally important.
The support is the number of occurrences of each class in `y_true`.
# Parameters
beta : `float`, optional (default = `1.0`)
The strength of recall versus precision in the F-score.
average : `str`, optional (default = `None`)
If `None`, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
`'micro'`:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
`'macro'`:
Calculate metrics for each label, and find their unweighted mean.
This does not take label imbalance into account.
`'weighted'`:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
labels: `list`, optional
The set of labels to include and their order if `average is None`.
Labels present in the data can be excluded, for example to calculate a
multi-class average ignoring a majority negative class. Labels not present
in the data will result in 0 components in a macro or weighted average.
threshold: `float`, optional (default = `0.5`)
Logits over this threshold will be considered predictions for the corresponding class.
"""
def __init__(
self,
beta: float = 1.0,
average: str = None,
labels: List[int] = None,
threshold: float = 0.5,
) -> None:
super().__init__(beta, average, labels)
self._threshold = threshold
@overrides
def __call__(
self,
predictions: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.BoolTensor] = None,
):
"""
# Parameters
predictions : `torch.Tensor`, required.
A tensor of predictions of shape (batch_size, ..., num_classes).
gold_labels : `torch.Tensor`, required.
A tensor of integer class label of shape (batch_size, ...). It must be the same
shape as the `predictions` tensor without the `num_classes` dimension.
mask : `torch.BoolTensor`, optional (default = `None`).
A masking tensor the same size as `gold_labels`.
"""
predictions, gold_labels, mask = self.detach_tensors(predictions, gold_labels, mask)
device = gold_labels.device
# Calculate true_positive_sum, true_negative_sum, pred_sum, true_sum
num_classes = predictions.size(-1)
if (gold_labels >= num_classes).any():
raise ConfigurationError(
"A gold label passed to FBetaMeasure contains "
f"an id >= {num_classes}, the number of classes."
)
# It means we call this metric at the first time
# when `self._true_positive_sum` is None.
if self._true_positive_sum is None:
self._true_positive_sum = torch.zeros(num_classes, device=predictions.device)
self._true_sum = torch.zeros(num_classes, device=predictions.device)
self._pred_sum = torch.zeros(num_classes, device=predictions.device)
self._total_sum = torch.zeros(num_classes, device=predictions.device)
if mask is None:
mask = torch.ones_like(gold_labels).bool()
gold_labels = gold_labels.float()
# If the prediction tensor is all zeros, the record is not classified to any of the labels.
pred_mask = (predictions.sum(dim=-1) != 0).unsqueeze(-1)
threshold_predictions = (predictions >= self._threshold).float()
class_indices = (
torch.arange(num_classes, device=predictions.device)
.unsqueeze(0)
.repeat(gold_labels.size(0), 1)
)
true_positives = (gold_labels * threshold_predictions).bool() & mask & pred_mask
true_positives_bins = class_indices[true_positives]
# Watch it:
# The total numbers of true positives under all _predicted_ classes are zeros.
if true_positives_bins.shape[0] == 0:
true_positive_sum = torch.zeros(num_classes, device=predictions.device)
else:
true_positive_sum = torch.bincount(
true_positives_bins.long(), minlength=num_classes
).float()
pred_bins = class_indices[threshold_predictions.bool() & mask & pred_mask]
# Watch it:
# When the `mask` is all 0, we will get an _empty_ tensor.
if pred_bins.shape[0] != 0:
pred_sum = torch.bincount(pred_bins, minlength=num_classes).float()
else:
pred_sum = torch.zeros(num_classes, device=predictions.device)
gold_labels_bins = class_indices[gold_labels.bool() & mask]
if gold_labels_bins.shape[0] != 0:
true_sum = torch.bincount(gold_labels_bins, minlength=num_classes).float()
else:
true_sum = torch.zeros(num_classes, device=predictions.device)
self._total_sum += mask.expand_as(gold_labels).sum().to(torch.float)
if is_distributed():
true_positive_sum = torch.tensor(true_positive_sum, device=device)
pred_sum = torch.tensor(pred_sum, device=device)
true_sum = torch.tensor(true_sum, device=device)
dist.all_reduce(true_positive_sum, op=dist.ReduceOp.SUM)
dist.all_reduce(pred_sum, op=dist.ReduceOp.SUM)
dist.all_reduce(true_sum, op=dist.ReduceOp.SUM)
self._true_positive_sum += true_positive_sum
self._pred_sum += pred_sum
self._true_sum += true_sum
@property
def _true_negative_sum(self):
if self._total_sum is None:
return None
else:
true_negative_sum = (
self._total_sum[0] / self._true_positive_sum.size(0)
- self._pred_sum
- self._true_sum
+ self._true_positive_sum
)
return true_negative_sum
@Metric.register("f1_multi_label")
class F1MultiLabelMeasure(FBetaMultiLabelMeasure):
def __init__(
self, average: str = None, labels: List[int] = None, threshold: float = 0.5
) -> None:
super().__init__(1.0, average, labels, threshold)
| allennlp-master | allennlp/training/metrics/fbeta_multi_label_measure.py |
from typing import Optional, List
from overrides import overrides
import torch
import torch.distributed as dist
from allennlp.common.util import is_distributed
from allennlp.training.metrics.metric import Metric
@Metric.register("attachment_scores")
class AttachmentScores(Metric):
"""
Computes labeled and unlabeled attachment scores for a
dependency parse, as well as sentence level exact match
for both labeled and unlabeled trees. Note that the input
to this metric is the sampled predictions, not the distribution
itself.
# Parameters
ignore_classes : `List[int]`, optional (default = `None`)
A list of label ids to ignore when computing metrics.
"""
def __init__(self, ignore_classes: List[int] = None) -> None:
self._labeled_correct = 0.0
self._unlabeled_correct = 0.0
self._exact_labeled_correct = 0.0
self._exact_unlabeled_correct = 0.0
self._total_words = 0.0
self._total_sentences = 0.0
self._ignore_classes: List[int] = ignore_classes or []
def __call__( # type: ignore
self,
predicted_indices: torch.Tensor,
predicted_labels: torch.Tensor,
gold_indices: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.BoolTensor] = None,
):
"""
# Parameters
predicted_indices : `torch.Tensor`, required.
A tensor of head index predictions of shape (batch_size, timesteps).
predicted_labels : `torch.Tensor`, required.
A tensor of arc label predictions of shape (batch_size, timesteps).
gold_indices : `torch.Tensor`, required.
A tensor of the same shape as `predicted_indices`.
gold_labels : `torch.Tensor`, required.
A tensor of the same shape as `predicted_labels`.
mask : `torch.BoolTensor`, optional (default = `None`).
A tensor of the same shape as `predicted_indices`.
"""
detached = self.detach_tensors(
predicted_indices, predicted_labels, gold_indices, gold_labels, mask
)
predicted_indices, predicted_labels, gold_indices, gold_labels, mask = detached
device = predicted_indices.device
if mask is None:
mask = torch.ones_like(predicted_indices).bool()
predicted_indices = predicted_indices.long()
predicted_labels = predicted_labels.long()
gold_indices = gold_indices.long()
gold_labels = gold_labels.long()
# Multiply by a mask denoting locations of
# gold labels which we should ignore.
for label in self._ignore_classes:
label_mask = gold_labels.eq(label)
mask = mask & ~label_mask
correct_indices = predicted_indices.eq(gold_indices).long() * mask
unlabeled_exact_match = (correct_indices + ~mask).prod(dim=-1)
correct_labels = predicted_labels.eq(gold_labels).long() * mask
correct_labels_and_indices = correct_indices * correct_labels
labeled_exact_match = (correct_labels_and_indices + ~mask).prod(dim=-1)
total_sentences = correct_indices.size(0)
total_words = correct_indices.numel() - (~mask).sum()
if is_distributed():
dist.all_reduce(correct_indices, op=dist.ReduceOp.SUM)
dist.all_reduce(unlabeled_exact_match, op=dist.ReduceOp.SUM)
dist.all_reduce(correct_labels_and_indices, op=dist.ReduceOp.SUM)
dist.all_reduce(labeled_exact_match, op=dist.ReduceOp.SUM)
total_sentences = torch.tensor(total_sentences, device=device)
total_words = torch.tensor(total_words, device=device)
dist.all_reduce(total_sentences, op=dist.ReduceOp.SUM)
dist.all_reduce(total_words, op=dist.ReduceOp.SUM)
total_sentences = total_sentences.item()
total_words = total_words.item()
self._unlabeled_correct += correct_indices.sum()
self._exact_unlabeled_correct += unlabeled_exact_match.sum()
self._labeled_correct += correct_labels_and_indices.sum()
self._exact_labeled_correct += labeled_exact_match.sum()
self._total_sentences += total_sentences
self._total_words += total_words
def get_metric(
self,
reset: bool = False,
):
"""
# Returns
The accumulated metrics as a dictionary.
"""
unlabeled_attachment_score = 0.0
labeled_attachment_score = 0.0
unlabeled_exact_match = 0.0
labeled_exact_match = 0.0
if self._total_words > 0.0:
unlabeled_attachment_score = float(self._unlabeled_correct) / float(self._total_words)
labeled_attachment_score = float(self._labeled_correct) / float(self._total_words)
if self._total_sentences > 0:
unlabeled_exact_match = float(self._exact_unlabeled_correct) / float(
self._total_sentences
)
labeled_exact_match = float(self._exact_labeled_correct) / float(self._total_sentences)
if reset:
self.reset()
metrics = {
"UAS": unlabeled_attachment_score,
"LAS": labeled_attachment_score,
"UEM": unlabeled_exact_match,
"LEM": labeled_exact_match,
}
return metrics
@overrides
def reset(self):
self._labeled_correct = 0.0
self._unlabeled_correct = 0.0
self._exact_labeled_correct = 0.0
self._exact_unlabeled_correct = 0.0
self._total_words = 0.0
self._total_sentences = 0.0
| allennlp-master | allennlp/training/metrics/attachment_scores.py |
from typing import Dict, Iterable, Optional, Any
import torch
from allennlp.common.registrable import Registrable
class Metric(Registrable):
"""
A very general abstract class representing a metric which can be
accumulated.
"""
supports_distributed = False
def __call__(
self, predictions: torch.Tensor, gold_labels: torch.Tensor, mask: Optional[torch.BoolTensor]
):
"""
# Parameters
predictions : `torch.Tensor`, required.
A tensor of predictions.
gold_labels : `torch.Tensor`, required.
A tensor corresponding to some gold label to evaluate against.
mask : `torch.BoolTensor`, optional (default = `None`).
A mask can be passed, in order to deal with metrics which are
computed over potentially padded elements, such as sequence labels.
"""
raise NotImplementedError
def get_metric(self, reset: bool) -> Dict[str, Any]:
"""
Compute and return the metric. Optionally also call `self.reset`.
"""
raise NotImplementedError
def reset(self) -> None:
"""
Reset any accumulators or internal state.
"""
raise NotImplementedError
@staticmethod
def detach_tensors(*tensors: torch.Tensor) -> Iterable[torch.Tensor]:
"""
If you actually passed gradient-tracking Tensors to a Metric, there will be
a huge memory leak, because it will prevent garbage collection for the computation
graph. This method ensures the tensors are detached.
"""
# Check if it's actually a tensor in case something else was passed.
return (x.detach() if isinstance(x, torch.Tensor) else x for x in tensors)
| allennlp-master | allennlp/training/metrics/metric.py |
from overrides import overrides
import torch
import torch.distributed as dist
from allennlp.common.util import is_distributed
from allennlp.training.metrics.metric import Metric
@Metric.register("average")
class Average(Metric):
"""
This [`Metric`](./metric.md) breaks with the typical `Metric` API and just stores values that were
computed in some fashion outside of a `Metric`. If you have some external code that computes
the metric for you, for instance, you can use this to report the average result using our
`Metric` API.
"""
def __init__(self) -> None:
self._total_value = 0.0
self._count = 0
@overrides
def __call__(self, value):
"""
# Parameters
value : `float`
The value to average.
"""
_total_value = list(self.detach_tensors(value))[0]
_count = 1
if is_distributed():
device = torch.device("cuda" if dist.get_backend() == "nccl" else "cpu")
count = torch.tensor(_count, device=device)
total_value = torch.tensor(_total_value, device=device)
dist.all_reduce(count, op=dist.ReduceOp.SUM)
dist.all_reduce(total_value, op=dist.ReduceOp.SUM)
_count = count.item()
_total_value = total_value.item()
self._count += _count
self._total_value += _total_value
@overrides
def get_metric(self, reset: bool = False):
"""
# Returns
The average of all values that were passed to `__call__`.
"""
average_value = self._total_value / self._count if self._count > 0 else 0.0
if reset:
self.reset()
return float(average_value)
@overrides
def reset(self):
self._total_value = 0.0
self._count = 0
| allennlp-master | allennlp/training/metrics/average.py |
from typing import Optional
from overrides import overrides
import torch
import torch.distributed as dist
from allennlp.common.util import is_distributed
from allennlp.common.checks import ConfigurationError
from allennlp.training.metrics.metric import Metric
@Metric.register("categorical_accuracy")
class CategoricalAccuracy(Metric):
"""
Categorical Top-K accuracy. Assumes integer labels, with
each item to be classified having a single correct class.
Tie break enables equal distribution of scores among the
classes with same maximum predicted scores.
"""
supports_distributed = True
def __init__(self, top_k: int = 1, tie_break: bool = False) -> None:
if top_k > 1 and tie_break:
raise ConfigurationError(
"Tie break in Categorical Accuracy can be done only for maximum (top_k = 1)"
)
if top_k <= 0:
raise ConfigurationError("top_k passed to Categorical Accuracy must be > 0")
self._top_k = top_k
self._tie_break = tie_break
self.correct_count = 0.0
self.total_count = 0.0
def __call__(
self,
predictions: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.BoolTensor] = None,
):
"""
# Parameters
predictions : `torch.Tensor`, required.
A tensor of predictions of shape (batch_size, ..., num_classes).
gold_labels : `torch.Tensor`, required.
A tensor of integer class label of shape (batch_size, ...). It must be the same
shape as the `predictions` tensor without the `num_classes` dimension.
mask : `torch.BoolTensor`, optional (default = `None`).
A masking tensor the same size as `gold_labels`.
"""
predictions, gold_labels, mask = self.detach_tensors(predictions, gold_labels, mask)
# Some sanity checks.
num_classes = predictions.size(-1)
if gold_labels.dim() != predictions.dim() - 1:
raise ConfigurationError(
"gold_labels must have dimension == predictions.size() - 1 but "
"found tensor of shape: {}".format(predictions.size())
)
if (gold_labels >= num_classes).any():
raise ConfigurationError(
"A gold label passed to Categorical Accuracy contains an id >= {}, "
"the number of classes.".format(num_classes)
)
predictions = predictions.view((-1, num_classes))
gold_labels = gold_labels.view(-1).long()
if not self._tie_break:
# Top K indexes of the predictions (or fewer, if there aren't K of them).
# Special case topk == 1, because it's common and .max() is much faster than .topk().
if self._top_k == 1:
top_k = predictions.max(-1)[1].unsqueeze(-1)
else:
_, sorted_indices = predictions.sort(dim=-1, descending=True)
top_k = sorted_indices[..., : min(self._top_k, predictions.shape[-1])]
# This is of shape (batch_size, ..., top_k).
correct = top_k.eq(gold_labels.unsqueeze(-1)).float()
else:
# prediction is correct if gold label falls on any of the max scores. distribute score by tie_counts
max_predictions = predictions.max(-1)[0]
max_predictions_mask = predictions.eq(max_predictions.unsqueeze(-1))
# max_predictions_mask is (rows X num_classes) and gold_labels is (batch_size)
# ith entry in gold_labels points to index (0-num_classes) for ith row in max_predictions
# For each row check if index pointed by gold_label is was 1 or not (among max scored classes)
correct = max_predictions_mask[
torch.arange(gold_labels.numel(), device=gold_labels.device).long(), gold_labels
].float()
tie_counts = max_predictions_mask.sum(-1)
correct /= tie_counts.float()
correct.unsqueeze_(-1)
if mask is not None:
correct *= mask.view(-1, 1)
_total_count = mask.sum()
else:
_total_count = torch.tensor(gold_labels.numel())
_correct_count = correct.sum()
if is_distributed():
device = torch.device("cuda" if dist.get_backend() == "nccl" else "cpu")
_correct_count = _correct_count.to(device)
_total_count = _total_count.to(device)
dist.all_reduce(_correct_count, op=dist.ReduceOp.SUM)
dist.all_reduce(_total_count, op=dist.ReduceOp.SUM)
self.correct_count += _correct_count.item()
self.total_count += _total_count.item()
def get_metric(self, reset: bool = False):
"""
# Returns
The accumulated accuracy.
"""
if self.total_count > 1e-12:
accuracy = float(self.correct_count) / float(self.total_count)
else:
accuracy = 0.0
if reset:
self.reset()
return accuracy
@overrides
def reset(self):
self.correct_count = 0.0
self.total_count = 0.0
| allennlp-master | allennlp/training/metrics/categorical_accuracy.py |
from typing import List
import logging
import os
import tempfile
import subprocess
import shutil
from overrides import overrides
from nltk import Tree
import torch
import torch.distributed as dist
from allennlp.common.util import is_distributed
from allennlp.common.checks import ConfigurationError
from allennlp.training.metrics.metric import Metric
logger = logging.getLogger(__name__)
DEFAULT_EVALB_DIR = os.path.abspath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir, "tools", "EVALB"
)
)
@Metric.register("evalb")
class EvalbBracketingScorer(Metric):
"""
This class uses the external EVALB software for computing a broad range of metrics
on parse trees. Here, we use it to compute the Precision, Recall and F1 metrics.
You can download the source for EVALB from here: <https://nlp.cs.nyu.edu/evalb/>.
Note that this software is 20 years old. In order to compile it on modern hardware,
you may need to remove an `include <malloc.h>` statement in `evalb.c` before it
will compile.
AllenNLP contains the EVALB software, but you will need to compile it yourself
before using it because the binary it generates is system dependent. To build it,
run `make` inside the `allennlp/tools/EVALB` directory.
Note that this metric reads and writes from disk quite a bit. You probably don't
want to include it in your training loop; instead, you should calculate this on
a validation set only.
# Parameters
evalb_directory_path : `str`, required.
The directory containing the EVALB executable.
evalb_param_filename : `str`, optional (default = `"COLLINS.prm"`)
The relative name of the EVALB configuration file used when scoring the trees.
By default, this uses the COLLINS.prm configuration file which comes with EVALB.
This configuration ignores POS tags and some punctuation labels.
evalb_num_errors_to_kill : `int`, optional (default = `"10"`)
The number of errors to tolerate from EVALB before terminating evaluation.
"""
def __init__(
self,
evalb_directory_path: str = DEFAULT_EVALB_DIR,
evalb_param_filename: str = "COLLINS.prm",
evalb_num_errors_to_kill: int = 10,
) -> None:
self._evalb_directory_path = evalb_directory_path
self._evalb_program_path = os.path.join(evalb_directory_path, "evalb")
self._evalb_param_path = os.path.join(evalb_directory_path, evalb_param_filename)
self._evalb_num_errors_to_kill = evalb_num_errors_to_kill
self._header_line = [
"ID",
"Len.",
"Stat.",
"Recal",
"Prec.",
"Bracket",
"gold",
"test",
"Bracket",
"Words",
"Tags",
"Accracy",
]
self._correct_predicted_brackets = 0.0
self._gold_brackets = 0.0
self._predicted_brackets = 0.0
@overrides
def __call__(self, predicted_trees: List[Tree], gold_trees: List[Tree]) -> None: # type: ignore
"""
# Parameters
predicted_trees : `List[Tree]`
A list of predicted NLTK Trees to compute score for.
gold_trees : `List[Tree]`
A list of gold NLTK Trees to use as a reference.
"""
if not os.path.exists(self._evalb_program_path):
logger.warning(
f"EVALB not found at {self._evalb_program_path}. Attempting to compile it."
)
EvalbBracketingScorer.compile_evalb(self._evalb_directory_path)
# If EVALB executable still doesn't exist, raise an error.
if not os.path.exists(self._evalb_program_path):
compile_command = (
f"python -c 'from allennlp.training.metrics import EvalbBracketingScorer; "
f'EvalbBracketingScorer.compile_evalb("{self._evalb_directory_path}")\''
)
raise ConfigurationError(
f"EVALB still not found at {self._evalb_program_path}. "
"You must compile the EVALB scorer before using it."
" Run 'make' in the '{}' directory or run: {}".format(
self._evalb_program_path, compile_command
)
)
tempdir = tempfile.mkdtemp()
gold_path = os.path.join(tempdir, "gold.txt")
predicted_path = os.path.join(tempdir, "predicted.txt")
with open(gold_path, "w") as gold_file:
for tree in gold_trees:
gold_file.write(f"{tree.pformat(margin=1000000)}\n")
with open(predicted_path, "w") as predicted_file:
for tree in predicted_trees:
predicted_file.write(f"{tree.pformat(margin=1000000)}\n")
command = [
self._evalb_program_path,
"-p",
self._evalb_param_path,
"-e",
str(self._evalb_num_errors_to_kill),
gold_path,
predicted_path,
]
completed_process = subprocess.run(
command, stdout=subprocess.PIPE, universal_newlines=True, check=True
)
_correct_predicted_brackets = 0.0
_gold_brackets = 0.0
_predicted_brackets = 0.0
for line in completed_process.stdout.split("\n"):
stripped = line.strip().split()
if len(stripped) == 12 and stripped != self._header_line:
# This line contains results for a single tree.
numeric_line = [float(x) for x in stripped]
_correct_predicted_brackets += numeric_line[5]
_gold_brackets += numeric_line[6]
_predicted_brackets += numeric_line[7]
shutil.rmtree(tempdir)
if is_distributed():
device = torch.device("cuda" if dist.get_backend() == "nccl" else "cpu")
correct_predicted_brackets = torch.tensor(_correct_predicted_brackets, device=device)
predicted_brackets = torch.tensor(_predicted_brackets, device=device)
gold_brackets = torch.tensor(_gold_brackets, device=device)
dist.all_reduce(correct_predicted_brackets, op=dist.ReduceOp.SUM)
dist.all_reduce(predicted_brackets, op=dist.ReduceOp.SUM)
dist.all_reduce(gold_brackets, op=dist.ReduceOp.SUM)
_correct_predicted_brackets = correct_predicted_brackets.item()
_predicted_brackets = predicted_brackets.item()
_gold_brackets = gold_brackets.item()
self._correct_predicted_brackets += _correct_predicted_brackets
self._gold_brackets += _gold_brackets
self._predicted_brackets += _predicted_brackets
@overrides
def get_metric(self, reset: bool = False):
"""
# Returns
The average precision, recall and f1.
"""
recall = (
self._correct_predicted_brackets / self._gold_brackets
if self._gold_brackets > 0
else 0.0
)
precision = (
self._correct_predicted_brackets / self._predicted_brackets
if self._gold_brackets > 0
else 0.0
)
f1_measure = (
2 * (precision * recall) / (precision + recall) if precision + recall > 0 else 0
)
if reset:
self.reset()
return {
"evalb_recall": recall,
"evalb_precision": precision,
"evalb_f1_measure": f1_measure,
}
@overrides
def reset(self):
self._correct_predicted_brackets = 0.0
self._gold_brackets = 0.0
self._predicted_brackets = 0.0
@staticmethod
def compile_evalb(evalb_directory_path: str = DEFAULT_EVALB_DIR):
logger.info(f"Compiling EVALB by running make in {evalb_directory_path}.")
os.system("cd {} && make && cd ../../../".format(evalb_directory_path))
@staticmethod
def clean_evalb(evalb_directory_path: str = DEFAULT_EVALB_DIR):
os.system("rm {}".format(os.path.join(evalb_directory_path, "evalb")))
| allennlp-master | allennlp/training/metrics/evalb_bracketing_scorer.py |
from typing import Optional
from overrides import overrides
import torch
import torch.distributed as dist
from allennlp.common.util import is_distributed
from allennlp.training.metrics.metric import Metric
@Metric.register("entropy")
class Entropy(Metric):
def __init__(self) -> None:
self._entropy = 0.0
self._count = 0
@overrides
def __call__(
self, # type: ignore
logits: torch.Tensor,
mask: Optional[torch.BoolTensor] = None,
):
"""
# Parameters
logits : `torch.Tensor`, required.
A tensor of unnormalized log probabilities of shape (batch_size, ..., num_classes).
mask : `torch.BoolTensor`, optional (default = `None`).
A masking tensor of shape (batch_size, ...).
"""
logits, mask = self.detach_tensors(logits, mask)
device = logits.device
if mask is None:
mask = torch.ones(logits.size()[:-1], device=logits.device).bool()
log_probs = torch.nn.functional.log_softmax(logits, dim=-1)
probabilities = torch.exp(log_probs) * mask.unsqueeze(-1)
weighted_negative_likelihood = -log_probs * probabilities
entropy = weighted_negative_likelihood.sum(-1)
_entropy = entropy.sum() / mask.sum()
_count = 1
if is_distributed():
count = torch.tensor(_count, device=device)
dist.all_reduce(_entropy, op=dist.ReduceOp.SUM)
dist.all_reduce(count, op=dist.ReduceOp.SUM)
_count = count.item()
self._entropy += _entropy.item()
self._count += _count
@overrides
def get_metric(self, reset: bool = False):
"""
# Returns
The scalar average entropy.
"""
average_value = self._entropy / self._count if self._count > 0 else 0
if reset:
self.reset()
return {"entropy": average_value}
@overrides
def reset(self):
self._entropy = 0.0
self._count = 0
| allennlp-master | allennlp/training/metrics/entropy.py |
from typing import Optional
from overrides import overrides
import torch
import torch.distributed as dist
from allennlp.common.util import is_distributed
from allennlp.common.checks import ConfigurationError
from allennlp.training.metrics.metric import Metric
@Metric.register("sequence_accuracy")
class SequenceAccuracy(Metric):
"""
Sequence Top-K accuracy. Assumes integer labels, with
each item to be classified having a single correct class.
"""
def __init__(self) -> None:
self.correct_count = 0.0
self.total_count = 0.0
def __call__(
self,
predictions: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.BoolTensor] = None,
):
"""
# Parameters
predictions : `torch.Tensor`, required.
A tensor of predictions of shape (batch_size, k, sequence_length).
gold_labels : `torch.Tensor`, required.
A tensor of integer class label of shape (batch_size, sequence_length).
mask : `torch.BoolTensor`, optional (default = `None`).
A masking tensor the same size as `gold_labels`.
"""
predictions, gold_labels, mask = self.detach_tensors(predictions, gold_labels, mask)
device = gold_labels.device
# Some sanity checks.
if gold_labels.dim() != predictions.dim() - 1:
raise ConfigurationError(
"gold_labels must have dimension == predictions.dim() - 1 but "
"found tensor of shape: {}".format(gold_labels.size())
)
if mask is not None and mask.size() != gold_labels.size():
raise ConfigurationError(
"mask must have the same size as predictions but "
"found tensor of shape: {}".format(mask.size())
)
k = predictions.size()[1]
expanded_size = list(gold_labels.size())
expanded_size.insert(1, k)
expanded_gold = gold_labels.unsqueeze(1).expand(expanded_size)
if mask is not None:
expanded_mask = mask.unsqueeze(1).expand(expanded_size)
masked_gold = expanded_mask * expanded_gold
masked_predictions = expanded_mask * predictions
else:
masked_gold = expanded_gold
masked_predictions = predictions
eqs = masked_gold.eq(masked_predictions)
matches_per_question = eqs.min(dim=2)[0]
some_match = matches_per_question.max(dim=1)[0]
correct = some_match.sum().item()
_total_count = predictions.size()[0]
_correct_count = correct
if is_distributed():
correct_count = torch.tensor(_correct_count, device=device)
total_count = torch.tensor(_total_count, device=device)
dist.all_reduce(correct_count, op=dist.ReduceOp.SUM)
dist.all_reduce(total_count, op=dist.ReduceOp.SUM)
_correct_count = correct_count.item()
_total_count = total_count.item()
self.correct_count += _correct_count
self.total_count += _total_count
def get_metric(self, reset: bool = False):
"""
# Returns
The accumulated accuracy.
"""
if self.total_count > 0:
accuracy = self.correct_count / self.total_count
else:
accuracy = 0
if reset:
self.reset()
return {"accuracy": accuracy}
@overrides
def reset(self):
self.correct_count = 0.0
self.total_count = 0.0
| allennlp-master | allennlp/training/metrics/sequence_accuracy.py |
from typing import Dict, List, Optional, Set, Callable
from collections import defaultdict
import torch
from allennlp.common.util import is_distributed
from allennlp.common.checks import ConfigurationError
from allennlp.nn.util import get_lengths_from_binary_sequence_mask
from allennlp.data.vocabulary import Vocabulary
from allennlp.training.metrics.metric import Metric
from allennlp.data.dataset_readers.dataset_utils.span_utils import (
bio_tags_to_spans,
bioul_tags_to_spans,
iob1_tags_to_spans,
bmes_tags_to_spans,
TypedStringSpan,
)
TAGS_TO_SPANS_FUNCTION_TYPE = Callable[[List[str], Optional[List[str]]], List[TypedStringSpan]]
@Metric.register("span_f1")
class SpanBasedF1Measure(Metric):
"""
The Conll SRL metrics are based on exact span matching. This metric
implements span-based precision and recall metrics for a BIO tagging
scheme. It will produce precision, recall and F1 measures per tag, as
well as overall statistics. Note that the implementation of this metric
is not exactly the same as the perl script used to evaluate the CONLL 2005
data - particularly, it does not consider continuations or reference spans
as constituents of the original span. However, it is a close proxy, which
can be helpful for judging model performance during training. This metric
works properly when the spans are unlabeled (i.e., your labels are
simply "B", "I", "O" if using the "BIO" label encoding).
"""
def __init__(
self,
vocabulary: Vocabulary,
tag_namespace: str = "tags",
ignore_classes: List[str] = None,
label_encoding: Optional[str] = "BIO",
tags_to_spans_function: Optional[TAGS_TO_SPANS_FUNCTION_TYPE] = None,
) -> None:
"""
# Parameters
vocabulary : `Vocabulary`, required.
A vocabulary containing the tag namespace.
tag_namespace : `str`, required.
This metric assumes that a BIO format is used in which the
labels are of the format: ["B-LABEL", "I-LABEL"].
ignore_classes : `List[str]`, optional.
Span labels which will be ignored when computing span metrics.
A "span label" is the part that comes after the BIO label, so it
would be "ARG1" for the tag "B-ARG1". For example by passing:
`ignore_classes=["V"]`
the following sequence would not consider the "V" span at index (2, 3)
when computing the precision, recall and F1 metrics.
["O", "O", "B-V", "I-V", "B-ARG1", "I-ARG1"]
This is helpful for instance, to avoid computing metrics for "V"
spans in a BIO tagging scheme which are typically not included.
label_encoding : `str`, optional (default = `"BIO"`)
The encoding used to specify label span endpoints in the sequence.
Valid options are "BIO", "IOB1", "BIOUL" or "BMES".
tags_to_spans_function : `Callable`, optional (default = `None`)
If `label_encoding` is `None`, `tags_to_spans_function` will be
used to generate spans.
"""
if label_encoding and tags_to_spans_function:
raise ConfigurationError(
"Both label_encoding and tags_to_spans_function are provided. "
'Set "label_encoding=None" explicitly to enable tags_to_spans_function.'
)
if label_encoding:
if label_encoding not in ["BIO", "IOB1", "BIOUL", "BMES"]:
raise ConfigurationError(
"Unknown label encoding - expected 'BIO', 'IOB1', 'BIOUL', 'BMES'."
)
elif tags_to_spans_function is None:
raise ConfigurationError(
"At least one of the (label_encoding, tags_to_spans_function) should be provided."
)
self._label_encoding = label_encoding
self._tags_to_spans_function = tags_to_spans_function
self._label_vocabulary = vocabulary.get_index_to_token_vocabulary(tag_namespace)
self._ignore_classes: List[str] = ignore_classes or []
# These will hold per label span counts.
self._true_positives: Dict[str, int] = defaultdict(int)
self._false_positives: Dict[str, int] = defaultdict(int)
self._false_negatives: Dict[str, int] = defaultdict(int)
def __call__(
self,
predictions: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.BoolTensor] = None,
prediction_map: Optional[torch.Tensor] = None,
):
"""
# Parameters
predictions : `torch.Tensor`, required.
A tensor of predictions of shape (batch_size, sequence_length, num_classes).
gold_labels : `torch.Tensor`, required.
A tensor of integer class label of shape (batch_size, sequence_length). It must be the same
shape as the `predictions` tensor without the `num_classes` dimension.
mask : `torch.BoolTensor`, optional (default = `None`).
A masking tensor the same size as `gold_labels`.
prediction_map : `torch.Tensor`, optional (default = `None`).
A tensor of size (batch_size, num_classes) which provides a mapping from the index of predictions
to the indices of the label vocabulary. If provided, the output label at each timestep will be
`vocabulary.get_index_to_token_vocabulary(prediction_map[batch, argmax(predictions[batch, t]))`,
rather than simply `vocabulary.get_index_to_token_vocabulary(argmax(predictions[batch, t]))`.
This is useful in cases where each Instance in the dataset is associated with a different possible
subset of labels from a large label-space (IE FrameNet, where each frame has a different set of
possible roles associated with it).
"""
if mask is None:
mask = torch.ones_like(gold_labels).bool()
predictions, gold_labels, mask, prediction_map = self.detach_tensors(
predictions, gold_labels, mask, prediction_map
)
num_classes = predictions.size(-1)
if (gold_labels >= num_classes).any():
raise ConfigurationError(
"A gold label passed to SpanBasedF1Measure contains an "
"id >= {}, the number of classes.".format(num_classes)
)
sequence_lengths = get_lengths_from_binary_sequence_mask(mask)
argmax_predictions = predictions.max(-1)[1]
if prediction_map is not None:
argmax_predictions = torch.gather(prediction_map, 1, argmax_predictions)
gold_labels = torch.gather(prediction_map, 1, gold_labels.long())
argmax_predictions = argmax_predictions.float()
# Iterate over timesteps in batch.
batch_size = gold_labels.size(0)
for i in range(batch_size):
sequence_prediction = argmax_predictions[i, :]
sequence_gold_label = gold_labels[i, :]
length = sequence_lengths[i]
if length == 0:
# It is possible to call this metric with sequences which are
# completely padded. These contribute nothing, so we skip these rows.
continue
predicted_string_labels = [
self._label_vocabulary[label_id]
for label_id in sequence_prediction[:length].tolist()
]
gold_string_labels = [
self._label_vocabulary[label_id]
for label_id in sequence_gold_label[:length].tolist()
]
tags_to_spans_function: TAGS_TO_SPANS_FUNCTION_TYPE
# `label_encoding` is empty and `tags_to_spans_function` is provided.
if self._label_encoding is None and self._tags_to_spans_function:
tags_to_spans_function = self._tags_to_spans_function
# Search by `label_encoding`.
elif self._label_encoding == "BIO":
tags_to_spans_function = bio_tags_to_spans
elif self._label_encoding == "IOB1":
tags_to_spans_function = iob1_tags_to_spans
elif self._label_encoding == "BIOUL":
tags_to_spans_function = bioul_tags_to_spans
elif self._label_encoding == "BMES":
tags_to_spans_function = bmes_tags_to_spans
else:
raise ValueError(f"Unexpected label encoding scheme '{self._label_encoding}'")
predicted_spans = tags_to_spans_function(predicted_string_labels, self._ignore_classes)
gold_spans = tags_to_spans_function(gold_string_labels, self._ignore_classes)
predicted_spans = self._handle_continued_spans(predicted_spans)
gold_spans = self._handle_continued_spans(gold_spans)
for span in predicted_spans:
if span in gold_spans:
self._true_positives[span[0]] += 1
gold_spans.remove(span)
else:
self._false_positives[span[0]] += 1
# These spans weren't predicted.
for span in gold_spans:
self._false_negatives[span[0]] += 1
@staticmethod
def _handle_continued_spans(spans: List[TypedStringSpan]) -> List[TypedStringSpan]:
"""
The official CONLL 2012 evaluation script for SRL treats continued spans (i.e spans which
have a `C-` prepended to another valid tag) as part of the span that they are continuing.
This is basically a massive hack to allow SRL models which produce a linear sequence of
predictions to do something close to structured prediction. However, this means that to
compute the metric, these continuation spans need to be merged into the span to which
they refer. The way this is done is to simply consider the span for the continued argument
to start at the start index of the first occurrence of the span and end at the end index
of the last occurrence of the span. Handling this is important, because predicting continued
spans is difficult and typically will effect overall average F1 score by ~ 2 points.
# Parameters
spans : `List[TypedStringSpan]`, required.
A list of (label, (start, end)) spans.
# Returns
A `List[TypedStringSpan]` with continued arguments replaced with a single span.
"""
span_set: Set[TypedStringSpan] = set(spans)
continued_labels: List[str] = [
label[2:] for (label, span) in span_set if label.startswith("C-")
]
for label in continued_labels:
continued_spans = {span for span in span_set if label in span[0]}
span_start = min(span[1][0] for span in continued_spans)
span_end = max(span[1][1] for span in continued_spans)
replacement_span: TypedStringSpan = (label, (span_start, span_end))
span_set.difference_update(continued_spans)
span_set.add(replacement_span)
return list(span_set)
def get_metric(self, reset: bool = False):
"""
# Returns
`Dict[str, float]`
A Dict per label containing following the span based metrics:
- precision : `float`
- recall : `float`
- f1-measure : `float`
Additionally, an `overall` key is included, which provides the precision,
recall and f1-measure for all spans.
"""
if is_distributed():
raise RuntimeError(
"Distributed aggregation for SpanBasedF1Measure is currently not supported."
)
all_tags: Set[str] = set()
all_tags.update(self._true_positives.keys())
all_tags.update(self._false_positives.keys())
all_tags.update(self._false_negatives.keys())
all_metrics = {}
for tag in all_tags:
precision, recall, f1_measure = self._compute_metrics(
self._true_positives[tag], self._false_positives[tag], self._false_negatives[tag]
)
precision_key = "precision" + "-" + tag
recall_key = "recall" + "-" + tag
f1_key = "f1-measure" + "-" + tag
all_metrics[precision_key] = precision
all_metrics[recall_key] = recall
all_metrics[f1_key] = f1_measure
# Compute the precision, recall and f1 for all spans jointly.
precision, recall, f1_measure = self._compute_metrics(
sum(self._true_positives.values()),
sum(self._false_positives.values()),
sum(self._false_negatives.values()),
)
all_metrics["precision-overall"] = precision
all_metrics["recall-overall"] = recall
all_metrics["f1-measure-overall"] = f1_measure
if reset:
self.reset()
return all_metrics
@staticmethod
def _compute_metrics(true_positives: int, false_positives: int, false_negatives: int):
precision = true_positives / (true_positives + false_positives + 1e-13)
recall = true_positives / (true_positives + false_negatives + 1e-13)
f1_measure = 2.0 * (precision * recall) / (precision + recall + 1e-13)
return precision, recall, f1_measure
def reset(self):
self._true_positives = defaultdict(int)
self._false_positives = defaultdict(int)
self._false_negatives = defaultdict(int)
| allennlp-master | allennlp/training/metrics/span_based_f1_measure.py |
from typing import Optional
from overrides import overrides
import torch
import torch.distributed as dist
from sklearn import metrics
from allennlp.common.util import is_distributed
from allennlp.common.checks import ConfigurationError
from allennlp.training.metrics.metric import Metric
@Metric.register("auc")
class Auc(Metric):
"""
The AUC Metric measures the area under the receiver-operating characteristic
(ROC) curve for binary classification problems.
"""
def __init__(self, positive_label=1):
super().__init__()
self._positive_label = positive_label
self._all_predictions = torch.FloatTensor()
self._all_gold_labels = torch.LongTensor()
def __call__(
self,
predictions: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.BoolTensor] = None,
):
"""
# Parameters
predictions : `torch.Tensor`, required.
A one-dimensional tensor of prediction scores of shape (batch_size).
gold_labels : `torch.Tensor`, required.
A one-dimensional label tensor of shape (batch_size), with {1, 0}
entries for positive and negative class. If it's not binary,
`positive_label` should be passed in the initialization.
mask : `torch.BoolTensor`, optional (default = `None`).
A one-dimensional label tensor of shape (batch_size).
"""
predictions, gold_labels, mask = self.detach_tensors(predictions, gold_labels, mask)
# Sanity checks.
if gold_labels.dim() != 1:
raise ConfigurationError(
"gold_labels must be one-dimensional, "
"but found tensor of shape: {}".format(gold_labels.size())
)
if predictions.dim() != 1:
raise ConfigurationError(
"predictions must be one-dimensional, "
"but found tensor of shape: {}".format(predictions.size())
)
unique_gold_labels = torch.unique(gold_labels)
if unique_gold_labels.numel() > 2:
raise ConfigurationError(
"AUC can be used for binary tasks only. gold_labels has {} unique labels, "
"expected at maximum 2.".format(unique_gold_labels.numel())
)
gold_labels_is_binary = set(unique_gold_labels.tolist()) <= {0, 1}
if not gold_labels_is_binary and self._positive_label not in unique_gold_labels:
raise ConfigurationError(
"gold_labels should be binary with 0 and 1 or initialized positive_label "
"{} should be present in gold_labels".format(self._positive_label)
)
if mask is None:
batch_size = gold_labels.shape[0]
mask = torch.ones(batch_size, device=gold_labels.device).bool()
self._all_predictions = self._all_predictions.to(predictions.device)
self._all_gold_labels = self._all_gold_labels.to(gold_labels.device)
self._all_predictions = torch.cat(
[self._all_predictions, torch.masked_select(predictions, mask).float()], dim=0
)
self._all_gold_labels = torch.cat(
[self._all_gold_labels, torch.masked_select(gold_labels, mask).long()], dim=0
)
if is_distributed():
world_size = dist.get_world_size()
device = gold_labels.device
# Check if batch lengths are equal.
_all_batch_lengths = [torch.tensor(0) for i in range(world_size)]
dist.all_gather(
_all_batch_lengths, torch.tensor(len(self._all_predictions), device=device)
)
_all_batch_lengths = [batch_length.item() for batch_length in _all_batch_lengths]
if len(set(_all_batch_lengths)) > 1:
# Subsequent dist.all_gather() calls currently do not handle tensors of different length.
raise RuntimeError(
"Distributed aggregation for AUC is currently not supported for batches of unequal length."
)
_all_predictions = [
torch.zeros(self._all_predictions.shape, device=device) for i in range(world_size)
]
_all_gold_labels = [
torch.zeros(self._all_gold_labels.shape, device=device, dtype=torch.long)
for i in range(world_size)
]
dist.all_gather(_all_predictions, self._all_predictions)
dist.all_gather(_all_gold_labels, self._all_gold_labels)
self._all_predictions = torch.cat(_all_predictions, dim=0)
self._all_gold_labels = torch.cat(_all_gold_labels, dim=0)
def get_metric(self, reset: bool = False):
if self._all_gold_labels.shape[0] == 0:
return 0.5
false_positive_rates, true_positive_rates, _ = metrics.roc_curve(
self._all_gold_labels.cpu().numpy(),
self._all_predictions.cpu().numpy(),
pos_label=self._positive_label,
)
auc = metrics.auc(false_positive_rates, true_positive_rates)
if reset:
self.reset()
return auc
@overrides
def reset(self):
self._all_predictions = torch.FloatTensor()
self._all_gold_labels = torch.LongTensor()
| allennlp-master | allennlp/training/metrics/auc.py |
from overrides import overrides
import torch
from allennlp.training.learning_rate_schedulers.learning_rate_scheduler import LearningRateScheduler
@LearningRateScheduler.register("polynomial_decay")
class PolynomialDecay(LearningRateScheduler):
"""
Implements polynomial decay Learning rate scheduling. The learning rate is first
linearly increased for the first `warmup_steps` training steps. Then it is decayed for
`total_steps` - `warmup_steps` from the initial learning rate to `end_learning_rate` using a polynomial
of degree `power`.
Formally,
`lr` = (`initial_lr` - `end_learning_rate`) *
((`total_steps` - `steps`)/(`total_steps` - `warmup_steps`)) ** `power`
# Parameters
total_steps: `int`, required
The total number of steps to adjust the learning rate for.
warmup_steps : `int`, required
The number of steps to linearly increase the learning rate.
power : `float`, optional (default = `1.0`)
The power of the polynomial used for decaying.
end_learning_rate : `float`, optional (default = `0.0`)
Final learning rate to decay towards.
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
num_epochs: int,
num_steps_per_epoch: int,
power=1.0,
warmup_steps=0,
end_learning_rate=0.0,
last_epoch: int = -1,
):
super().__init__(optimizer, last_epoch)
self.power = power
self.warmup_steps = warmup_steps
self.total_steps = num_epochs * num_steps_per_epoch
self.end_learning_rate = end_learning_rate
self.steps = 0
self.step_batch(0)
@overrides
def get_values(self):
if self.warmup_steps > 0 and self.steps < self.warmup_steps:
f = self.steps / self.warmup_steps
return [f * lr for lr in self.base_values]
if self.steps >= self.total_steps:
return [self.end_learning_rate for _ in self.base_values]
current_decay_steps = self.total_steps - self.steps
total_decay_steps = self.total_steps - self.warmup_steps
f = (current_decay_steps / total_decay_steps) ** self.power
return [
f * (lr - self.end_learning_rate) + self.end_learning_rate for lr in self.base_values
]
@overrides
def step(self, metric: float = None) -> None:
pass
@overrides
def step_batch(self, batch_num_total: int = None) -> None:
if batch_num_total is None:
self.steps += 1
else:
self.steps = batch_num_total
for param_group, lr in zip(self.optimizer.param_groups, self.get_values()):
param_group[self.param_group_field] = lr
| allennlp-master | allennlp/training/learning_rate_schedulers/polynomial_decay.py |
import logging
from overrides import overrides
import numpy as np
import torch
from allennlp.training.learning_rate_schedulers.learning_rate_scheduler import LearningRateScheduler
logger = logging.getLogger(__name__)
@LearningRateScheduler.register("cosine")
class CosineWithRestarts(LearningRateScheduler):
"""
Cosine annealing with restarts.
This is described in the paper https://arxiv.org/abs/1608.03983. Note that early
stopping should typically be avoided when using this schedule.
Registered as a `LearningRateScheduler` with name "cosine".
# Parameters
optimizer : `torch.optim.Optimizer`
This argument does not get an entry in a configuration file for the object.
t_initial : `int`
The number of iterations (epochs) within the first cycle.
t_mul : `float`, optional (default=`1`)
Determines the number of iterations (epochs) in the i-th decay cycle,
which is the length of the last cycle multiplied by `t_mul`.
eta_min : `float`, optional (default=`0`)
The minimum learning rate.
eta_mul : `float`, optional (default=`1`)
Determines the initial learning rate for the i-th decay cycle, which is the
last initial learning rate multiplied by `m_mul`.
last_epoch : `int`, optional (default=`-1`)
The index of the last epoch. This is used when restarting.
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
t_initial: int,
t_mul: float = 1.0,
eta_min: float = 0.0,
eta_mul: float = 1.0,
last_epoch: int = -1,
) -> None:
assert t_initial > 0
assert eta_min >= 0
if t_initial == 1 and t_mul == 1 and eta_mul == 1:
logger.warning(
"Cosine annealing scheduler will have no effect on the learning "
"rate since t_initial = t_mul = eta_mul = 1."
)
self.t_initial = t_initial
self.t_mul = t_mul
self.eta_min = eta_min
self.eta_mul = eta_mul
self._last_restart: int = 0
self._cycle_counter: int = 0
self._cycle_len: int = t_initial
self._n_restarts: int = 0
super().__init__(optimizer, last_epoch)
@overrides
def get_values(self):
"""Get updated learning rate."""
if self.last_epoch == -1:
return self.base_values
step = self.last_epoch + 1
self._cycle_counter = step - self._last_restart
if self._cycle_counter % self._cycle_len == 0:
self._n_restarts += 1
self._cycle_counter = 0
self._last_restart = step
base_lrs = [lr * self.eta_mul ** self._n_restarts for lr in self.base_values]
self._cycle_len = int(self.t_initial * self.t_mul ** self._n_restarts)
lrs = [
self.eta_min
+ ((lr - self.eta_min) / 2)
* (np.cos(np.pi * (self._cycle_counter % self._cycle_len) / self._cycle_len) + 1)
for lr in base_lrs
]
return lrs
| allennlp-master | allennlp/training/learning_rate_schedulers/cosine.py |
import logging
from typing import List, Optional
from overrides import overrides
import torch
from allennlp.training.learning_rate_schedulers.learning_rate_scheduler import LearningRateScheduler
logger = logging.getLogger(__name__)
@LearningRateScheduler.register("slanted_triangular")
class SlantedTriangular(LearningRateScheduler):
"""
Implements the Slanted Triangular Learning Rate schedule with optional gradual
unfreezing and discriminative fine-tuning. The schedule corresponds to first
linearly increasing the learning rate over some number of epochs, and then linearly
decreasing it over the remaining epochs.
If we gradually unfreeze, then in the first epoch of training, only the top
layer is trained; in the second epoch, the top two layers are trained, etc.
During freezing, the learning rate is increased and annealed over one epoch.
After freezing finished, the learning rate is increased and annealed over
the remaining training iterations.
Note that with this schedule, early stopping should typically be avoided.
Registered as a `LearningRateScheduler` with name "slanted_triangular".
# Parameters
optimizer : `torch.optim.Optimizer`
This argument does not get an entry in a configuration file for the object.
num_epochs : `int`, required.
The total number of epochs for which the model should be trained.
num_steps_per_epoch : `Optional[int]`, optional (default = `None`)
The number of steps (updates, batches) per training epoch.
cut_frac : `float`, optional (default = `0.1`).
The fraction of the steps to increase the learning rate.
ratio : `float`, optional (default = `32`).
The ratio of the smallest to the (largest) base learning rate.
gradual_unfreezing : `bool`, optional (default = `False`).
Whether gradual unfreezing should be used.
discriminative_fine_tuning : `bool`, optional (default = `False`).
Whether discriminative fine-tuning (different learning rates per layer)
are used.
decay_factor : `float`, optional (default = `0.38`).
The decay factor by which the learning rate is reduced with
discriminative fine-tuning when going a layer deeper.
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
num_epochs: int,
num_steps_per_epoch: Optional[int] = None,
cut_frac: float = 0.1,
ratio: int = 32,
last_epoch: int = -1,
gradual_unfreezing: bool = False,
discriminative_fine_tuning: bool = False,
decay_factor: float = 0.38,
) -> None:
self.num_epochs = num_epochs
self.num_steps_per_epoch = num_steps_per_epoch
self.cut_frac = cut_frac
self.ratio = ratio
self.gradual_unfreezing = gradual_unfreezing
self.freezing_current = self.gradual_unfreezing
self.is_first_epoch = True
# track the actual number of steps for each epoch
self.batch_num_total_epoch_end: List[int] = []
if self.gradual_unfreezing:
assert not optimizer.param_groups[-1]["params"], "The default group should be empty."
if self.gradual_unfreezing or discriminative_fine_tuning:
assert len(optimizer.param_groups) > 2, (
"There should be at least 3 param_groups (2 + empty default group)"
" for gradual unfreezing / discriminative fine-tuning to make sense."
)
super().__init__(optimizer, last_epoch)
self.step()
if discriminative_fine_tuning:
# skip the last param_group if it is has no parameters
exponent = 0
for i in range(len(self.base_values) - 1, -1, -1):
param_group = optimizer.param_groups[i]
if param_group["params"]:
param_group["lr"] = self.base_values[i] * decay_factor ** exponent
self.base_values[i] = param_group["lr"]
exponent += 1
# set up for the first batch
self.last_batch_num_total = -1
self.step_batch(0)
@overrides
def step(self, metric: float = None) -> None:
self.last_epoch += 1
if len(self.batch_num_total_epoch_end) == 0:
self.batch_num_total_epoch_end.append(0)
else:
self.batch_num_total_epoch_end.append(self.last_batch_num_total)
if self.gradual_unfreezing:
# the method is called once when initialising before the
# first epoch (epoch -1) and then always at the end of each
# epoch; so the first time, with epoch id -1, we want to set
# up for epoch #1; the second time, with epoch id 0,
# we want to set up for epoch #2, etc.
if self.is_first_epoch:
num_layers_to_unfreeze = 1
self.is_first_epoch = False
else:
# `last_epoch` has now been incremented, so it's set to the index of
# the current epoch. So, if we're now on epoch index 1 (the 2nd epoch),
# and we want unfreeze the top 2 layers, we set
# `num_layers_to_unfreeze = 2 = last_epoch + 1`.
num_layers_to_unfreeze = self.last_epoch + 1
if num_layers_to_unfreeze >= len(self.optimizer.param_groups) - 1:
logger.info("Gradual unfreezing finished. Training all layers.")
self.freezing_current = False
else:
logger.info(
f"Gradual unfreezing. Training only the top {num_layers_to_unfreeze} layers."
)
for i, param_group in enumerate(reversed(self.optimizer.param_groups)):
for param in param_group["params"]:
# i = 0 is the default group; we care about i > 0
param.requires_grad = bool(i <= num_layers_to_unfreeze)
def step_batch(self, batch_num_total: int = None):
if batch_num_total is None:
batch_num_total = self.last_batch_num_total + 1
self.last_batch_num_total = batch_num_total
for param_group, learning_rate in zip(self.optimizer.param_groups, self.get_values()):
param_group["lr"] = learning_rate
def get_values(self):
# get the actual number of batches per epoch seen in training
if len(self.batch_num_total_epoch_end) > 1:
# have finished an epoch
actual_num_steps_per_epoch = int(
self.batch_num_total_epoch_end[-1] / (len(self.batch_num_total_epoch_end) - 1)
)
else:
actual_num_steps_per_epoch = max(
self.num_steps_per_epoch or 1, self.last_batch_num_total
)
if self.freezing_current:
# if we are still freezing layers, we restrict the schedule to the current epoch
num_steps = actual_num_steps_per_epoch
step = min(self.last_batch_num_total - self.batch_num_total_epoch_end[-1], num_steps)
else:
# otherwise we use the schedule for the rest of training
if not self.gradual_unfreezing:
frozen_steps = 0
else:
num_frozen_epochs = len(self.optimizer.param_groups) - 2
frozen_steps = self.batch_num_total_epoch_end[num_frozen_epochs]
num_steps = self.num_epochs * actual_num_steps_per_epoch - frozen_steps
step = min(self.last_batch_num_total - frozen_steps, num_steps)
cut = int(num_steps * self.cut_frac)
prop = step / cut if step < cut else 1 - (step - cut) / (num_steps - cut)
return [lr * (1 + prop * (self.ratio - 1)) / self.ratio for lr in self.base_values]
| allennlp-master | allennlp/training/learning_rate_schedulers/slanted_triangular.py |
"""
AllenNLP uses most
`PyTorch learning rate schedulers <https://pytorch.org/docs/master/optim.html#how-to-adjust-learning-rate>`_,
with a thin wrapper to allow registering them and instantiating them `from_params`.
The available learning rate schedulers from PyTorch are
* `"step" <https://pytorch.org/docs/master/optim.html#torch.optim.lr_scheduler.StepLR>`_
* `"multi_step" <https://pytorch.org/docs/master/optim.html#torch.optim.lr_scheduler.MultiStepLR>`_
* `"exponential" <https://pytorch.org/docs/master/optim.html#torch.optim.lr_scheduler.ExponentialLR>`_
* `"reduce_on_plateau" <https://pytorch.org/docs/master/optim.html#torch.optim.lr_scheduler.ReduceLROnPlateau>`_
In addition, AllenNLP also provides `cosine with restarts <https://arxiv.org/abs/1608.03983>`_,
a Noam schedule, and a slanted triangular schedule, which are registered as
"cosine", "noam", and "slanted_triangular", respectively.
"""
from allennlp.training.learning_rate_schedulers.learning_rate_scheduler import (
LearningRateScheduler,
StepLearningRateScheduler,
MultiStepLearningRateScheduler,
ExponentialLearningRateScheduler,
ReduceOnPlateauLearningRateScheduler,
)
from allennlp.training.learning_rate_schedulers.combined import CombinedLearningRateScheduler
from allennlp.training.learning_rate_schedulers.cosine import CosineWithRestarts
from allennlp.training.learning_rate_schedulers.noam import NoamLR
from allennlp.training.learning_rate_schedulers.slanted_triangular import SlantedTriangular
from allennlp.training.learning_rate_schedulers.polynomial_decay import PolynomialDecay
from allennlp.training.learning_rate_schedulers.linear_with_warmup import LinearWithWarmup
| allennlp-master | allennlp/training/learning_rate_schedulers/__init__.py |
from overrides import overrides
import torch
from allennlp.training.learning_rate_schedulers.learning_rate_scheduler import LearningRateScheduler
@LearningRateScheduler.register("noam")
class NoamLR(LearningRateScheduler):
"""
Implements the Noam Learning rate schedule. This corresponds to increasing the learning rate
linearly for the first `warmup_steps` training steps, and decreasing it thereafter proportionally
to the inverse square root of the step number, scaled by the inverse square root of the
dimensionality of the model. Time will tell if this is just madness or it's actually important.
Registered as a `LearningRateScheduler` with name "noam".
# Parameters
optimizer : `torch.optim.Optimizer`
This argument does not get an entry in a configuration file for the object.
model_size : `int`, required.
The hidden size parameter which dominates the number of parameters in your model.
warmup_steps : `int`, required.
The number of steps to linearly increase the learning rate.
factor : `float`, optional (default = `1.0`).
The overall scale factor for the learning rate decay.
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
model_size: int,
warmup_steps: int,
factor: float = 1.0,
last_epoch: int = -1,
) -> None:
self.warmup_steps = warmup_steps
self.factor = factor
self.model_size = model_size
super().__init__(optimizer, last_epoch=last_epoch)
@overrides
def step(self, metric: float = None) -> None:
pass
def step_batch(self, batch_num_total: int = None) -> None:
if batch_num_total is None:
self.last_epoch += 1 # type: ignore
else:
self.last_epoch = batch_num_total
for param_group, learning_rate in zip(self.optimizer.param_groups, self.get_values()):
param_group["lr"] = learning_rate
def get_values(self):
step = max(self.last_epoch, 1)
scale = self.factor * (
self.model_size ** (-0.5) * min(step ** (-0.5), step * self.warmup_steps ** (-1.5))
)
return [scale for _ in range(len(self.base_values))]
| allennlp-master | allennlp/training/learning_rate_schedulers/noam.py |
from typing import Any, Dict, List, Union
from overrides import overrides
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.common.registrable import Registrable
from allennlp.training.scheduler import Scheduler
from allennlp.training.optimizers import Optimizer
class LearningRateScheduler(Scheduler, Registrable):
def __init__(self, optimizer: torch.optim.Optimizer, last_epoch: int = -1) -> None:
super().__init__(optimizer, "lr", last_epoch)
@overrides
def get_values(self):
raise NotImplementedError
class _PyTorchLearningRateSchedulerWrapper(LearningRateScheduler):
def __init__(self, lr_scheduler: torch.optim.lr_scheduler._LRScheduler) -> None:
self.lr_scheduler = lr_scheduler
def get_values(self):
return self.lr_scheduler.get_last_lr()
@overrides
def step(self, metric: float = None) -> None:
self.lr_scheduler.step()
@overrides
def state_dict(self) -> Dict[str, Any]:
return self.lr_scheduler.state_dict()
@overrides
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
self.lr_scheduler.load_state_dict(state_dict)
class _PyTorchLearningRateSchedulerWithMetricsWrapper(_PyTorchLearningRateSchedulerWrapper):
@overrides
def step(self, metric: float = None) -> None:
if metric is None:
raise ConfigurationError(
"This learning rate scheduler requires "
"a validation metric to compute the schedule and therefore "
"must be used with a validation dataset."
)
self.lr_scheduler.step(metric)
@LearningRateScheduler.register("step")
class StepLearningRateScheduler(_PyTorchLearningRateSchedulerWrapper):
"""
Registered as a `LearningRateScheduler` with name "step". The "optimizer" argument does not get
an entry in a configuration file for the object.
"""
def __init__(
self, optimizer: Optimizer, step_size: int, gamma: float = 0.1, last_epoch: int = -1
) -> None:
lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer=optimizer, step_size=step_size, gamma=gamma, last_epoch=last_epoch
)
super().__init__(lr_scheduler)
@LearningRateScheduler.register("multi_step")
class MultiStepLearningRateScheduler(_PyTorchLearningRateSchedulerWrapper):
"""
Registered as a `LearningRateScheduler` with name "multi_step". The "optimizer" argument does
not get an entry in a configuration file for the object.
"""
def __init__(
self, optimizer: Optimizer, milestones: List[int], gamma: float = 0.1, last_epoch: int = -1
) -> None:
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer=optimizer, milestones=milestones, gamma=gamma, last_epoch=last_epoch
)
super().__init__(lr_scheduler)
@LearningRateScheduler.register("exponential")
class ExponentialLearningRateScheduler(_PyTorchLearningRateSchedulerWrapper):
"""
Registered as a `LearningRateScheduler` with name "exponential". The "optimizer" argument does
not get an entry in a configuration file for the object.
"""
def __init__(self, optimizer: Optimizer, gamma: float = 0.1, last_epoch: int = -1) -> None:
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
optimizer=optimizer, gamma=gamma, last_epoch=last_epoch
)
super().__init__(lr_scheduler)
@LearningRateScheduler.register("reduce_on_plateau")
class ReduceOnPlateauLearningRateScheduler(_PyTorchLearningRateSchedulerWithMetricsWrapper):
"""
Registered as a `LearningRateScheduler` with name "reduce_on_plateau". The "optimizer" argument
does not get an entry in a configuration file for the object.
"""
def __init__(
self,
optimizer: Optimizer,
mode: str = "min",
factor: float = 0.1,
patience: int = 10,
verbose: bool = False,
threshold_mode: str = "rel",
threshold: float = 1e-4,
cooldown: int = 0,
min_lr: Union[float, List[float]] = 0,
eps: float = 1e-8,
) -> None:
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer=optimizer,
mode=mode,
factor=factor,
patience=patience,
verbose=verbose,
threshold_mode=threshold_mode,
threshold=threshold,
cooldown=cooldown,
min_lr=min_lr,
eps=eps,
)
super().__init__(lr_scheduler)
| allennlp-master | allennlp/training/learning_rate_schedulers/learning_rate_scheduler.py |
from typing import Dict, Any, List, Tuple, Optional
from overrides import overrides
import torch
from allennlp.common.lazy import Lazy
from allennlp.training.learning_rate_schedulers.learning_rate_scheduler import LearningRateScheduler
@LearningRateScheduler.register("combined")
class CombinedLearningRateScheduler(LearningRateScheduler):
"""
This `LearningRateScheduler` can be used to apply an arbitrary number of other schedulers
one after the other.
These schedulers are defined though the `schedulers` parameter, which takes a list
of `Tuple[int, Lazy[LearningRateScheduler]]`. The first field of the tuple, the `int`,
specifies how many epochs the corresponding scheduler will be used before the next
scheduler takes its place.
While it usually makes sense for the sum
```python
sum(n_epochs for (n_epochs, _) in schedulers)
```
to equal the total number of training epochs, it is not a requirement.
If training continues beyond the last defined scheduler, both `step()` and `step_batch()`
will be a no-op.
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
schedulers: List[Tuple[int, Lazy[LearningRateScheduler]]],
num_steps_per_epoch: Optional[int] = None,
last_epoch: int = -1,
) -> None:
super().__init__(optimizer, last_epoch=last_epoch)
self.num_steps_per_epoch = num_steps_per_epoch
self.schedulers = schedulers
# This is used to know when we need to update `self._current_scheduler`
# by comparing it to `self.last_epoch`, and so to start with it needs to
# not equal `self.last_epoch`.
self._last_epoch_updated = -2
self._current_scheduler: Optional[LearningRateScheduler] = None
self._current_scheduler_first_epoch: Optional[int] = None
# We call this here in order to initialize the current scheduler now, since some schedulers
# modify the LR when they are initialized.
self.current_scheduler
@property
def current_scheduler(self) -> Optional[LearningRateScheduler]:
if self._last_epoch_updated != self.last_epoch:
current_epoch = self.last_epoch + 1
scheduler_first_epoch, scheduler_last_epoch = 0, -1
for scheduler_epochs, lazy_scheduler in self.schedulers:
scheduler_last_epoch += scheduler_epochs
# Is it time for a new scheduler?
if current_epoch == scheduler_first_epoch or (
self._current_scheduler_first_epoch != scheduler_first_epoch
and scheduler_first_epoch <= current_epoch <= scheduler_last_epoch
):
# Reset the base values of the LR to whatever they're currently at.
for group in self.optimizer.param_groups:
group[self._initial_param_group_field] = group[self.param_group_field]
self._current_scheduler = lazy_scheduler.construct(
optimizer=self.optimizer,
num_epochs=scheduler_epochs,
num_steps_per_epoch=self.num_steps_per_epoch,
)
self._current_scheduler_first_epoch = scheduler_first_epoch
break
scheduler_first_epoch = scheduler_last_epoch + 1
else:
# If we didn't break out of the loop, then we might have trained past
# the last defined scheduler, so we're not going to use a scheduler anymore.
if current_epoch > scheduler_last_epoch:
self._current_scheduler = None
self._last_epoch_updated = self.last_epoch
return self._current_scheduler
@overrides
def state_dict(self) -> Dict[str, Any]:
current_scheduler = self.current_scheduler
return {
"last_epoch": self.last_epoch,
"num_steps_per_epoch": self.num_steps_per_epoch,
"current_scheduler": None
if current_scheduler is None
else current_scheduler.state_dict(),
}
@overrides
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
self.last_epoch = state_dict["last_epoch"]
self.num_steps_per_epoch = state_dict["num_steps_per_epoch"]
if self.current_scheduler is not None:
assert state_dict["current_scheduler"] is not None
self.current_scheduler.load_state_dict(state_dict["current_scheduler"])
@overrides
def get_values(self):
"""
This should never be called directly.
"""
raise NotImplementedError
@overrides
def step_batch(self, batch_num_total: int = None) -> None:
if self.current_scheduler is not None:
self.current_scheduler.step_batch(batch_num_total)
@overrides
def step(self, metric: float = None) -> None:
self.last_epoch += 1
self.metric = metric
if self.current_scheduler is not None:
self.current_scheduler.step(metric)
| allennlp-master | allennlp/training/learning_rate_schedulers/combined.py |
import torch
from allennlp.training.learning_rate_schedulers import PolynomialDecay
from allennlp.training.learning_rate_schedulers.learning_rate_scheduler import LearningRateScheduler
@LearningRateScheduler.register("linear_with_warmup")
class LinearWithWarmup(PolynomialDecay):
"""
Implements a learning rate scheduler that increases the learning rate to `lr` during the first
`warmup_steps` steps, and then decreases it to zero over the rest of the training steps.
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
num_epochs: int,
num_steps_per_epoch: int,
warmup_steps: int = 100,
last_epoch: int = -1,
) -> None:
super().__init__(
optimizer,
num_epochs,
num_steps_per_epoch,
power=1.0,
warmup_steps=warmup_steps,
end_learning_rate=0.0,
last_epoch=last_epoch,
)
| allennlp-master | allennlp/training/learning_rate_schedulers/linear_with_warmup.py |
import torch
from allennlp.training.momentum_schedulers.momentum_scheduler import MomentumScheduler
@MomentumScheduler.register("inverted_triangular")
class InvertedTriangular(MomentumScheduler):
"""
Adjust momentum during training according to an inverted triangle-like schedule.
The momentum starts off high, then decreases linearly for `cool_down` epochs,
until reaching `1 / ratio` th of the original value. Then the momentum increases
linearly for `warm_up` epochs until reaching its original value again. If there
are still more epochs left over to train, the momentum will stay flat at the original
value.
Registered as a `MomentumScheduler` with name "inverted_triangular". The "optimizer" argument
does not get an entry in a configuration file for the object.
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
cool_down: int,
warm_up: int,
ratio: int = 10,
last_epoch: int = -1,
) -> None:
self.cool_down = cool_down
self.warm_up = warm_up
self.ratio = ratio
super().__init__(optimizer, last_epoch)
def get_values(self):
step = self.last_epoch + 1
if step <= self.cool_down:
values = [m - (m - m / self.ratio) * (step / self.cool_down) for m in self.base_values]
elif step <= self.cool_down + self.warm_up:
values = [
(m / self.ratio) + (m - m / self.ratio) * (step - self.cool_down) / self.warm_up
for m in self.base_values
]
else:
values = self.base_values
return values
| allennlp-master | allennlp/training/momentum_schedulers/inverted_triangular.py |
import torch
from allennlp.common.registrable import Registrable
from allennlp.training.scheduler import Scheduler
class MomentumScheduler(Scheduler, Registrable):
def __init__(self, optimizer: torch.optim.Optimizer, last_epoch: int = -1) -> None:
super().__init__(optimizer, "momentum", last_epoch)
def get_values(self) -> None:
raise NotImplementedError
| allennlp-master | allennlp/training/momentum_schedulers/momentum_scheduler.py |
from allennlp.training.momentum_schedulers.momentum_scheduler import MomentumScheduler
from allennlp.training.momentum_schedulers.inverted_triangular import InvertedTriangular
| allennlp-master | allennlp/training/momentum_schedulers/__init__.py |
from typing import List, Iterator, Dict, Tuple, Any, Type, Union
import logging
import json
import re
from contextlib import contextmanager
from pathlib import Path
import numpy
import torch
from torch.utils.hooks import RemovableHandle
from torch import Tensor
from torch import backends
from allennlp.common import Registrable, plugins
from allennlp.common.util import JsonDict, sanitize
from allennlp.data import DatasetReader, Instance
from allennlp.data.batch import Batch
from allennlp.models import Model
from allennlp.models.archival import Archive, load_archive
from allennlp.nn import util
logger = logging.getLogger(__name__)
class Predictor(Registrable):
"""
a `Predictor` is a thin wrapper around an AllenNLP model that handles JSON -> JSON predictions
that can be used for serving models through the web API or making predictions in bulk.
"""
def __init__(self, model: Model, dataset_reader: DatasetReader, frozen: bool = True) -> None:
if frozen:
model.eval()
self._model = model
self._dataset_reader = dataset_reader
self.cuda_device = next(self._model.named_parameters())[1].get_device()
self._token_offsets: List[Tensor] = []
def load_line(self, line: str) -> JsonDict:
"""
If your inputs are not in JSON-lines format (e.g. you have a CSV)
you can override this function to parse them correctly.
"""
return json.loads(line)
def dump_line(self, outputs: JsonDict) -> str:
"""
If you don't want your outputs in JSON-lines format
you can override this function to output them differently.
"""
return json.dumps(outputs) + "\n"
def predict_json(self, inputs: JsonDict) -> JsonDict:
instance = self._json_to_instance(inputs)
return self.predict_instance(instance)
def json_to_labeled_instances(self, inputs: JsonDict) -> List[Instance]:
"""
Converts incoming json to a [`Instance`](../data/instance.md),
runs the model on the newly created instance, and adds labels to the
`Instance`s given by the model's output.
# Returns
`List[instance]`
A list of `Instance`'s.
"""
instance = self._json_to_instance(inputs)
outputs = self._model.forward_on_instance(instance)
new_instances = self.predictions_to_labeled_instances(instance, outputs)
return new_instances
def get_gradients(self, instances: List[Instance]) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
Gets the gradients of the logits with respect to the model inputs.
# Parameters
instances : `List[Instance]`
# Returns
`Tuple[Dict[str, Any], Dict[str, Any]]`
The first item is a Dict of gradient entries for each input.
The keys have the form `{grad_input_1: ..., grad_input_2: ... }`
up to the number of inputs given. The second item is the model's output.
# Notes
Takes a `JsonDict` representing the inputs of the model and converts
them to [`Instances`](../data/instance.md)), sends these through
the model [`forward`](../models/model.md#forward) function after registering hooks on the embedding
layer of the model. Calls `backward` on the logits and then removes the
hooks.
"""
# set requires_grad to true for all parameters, but save original values to
# restore them later
original_param_name_to_requires_grad_dict = {}
for param_name, param in self._model.named_parameters():
original_param_name_to_requires_grad_dict[param_name] = param.requires_grad
param.requires_grad = True
embedding_gradients: List[Tensor] = []
hooks: List[RemovableHandle] = self._register_embedding_gradient_hooks(embedding_gradients)
dataset = Batch(instances)
dataset.index_instances(self._model.vocab)
dataset_tensor_dict = util.move_to_device(dataset.as_tensor_dict(), self.cuda_device)
# To bypass "RuntimeError: cudnn RNN backward can only be called in training mode"
with backends.cudnn.flags(enabled=False):
outputs = self._model.make_output_human_readable(
self._model.forward(**dataset_tensor_dict) # type: ignore
)
predicted_logit = outputs["logits"].squeeze(0)[int(torch.argmax(outputs["probs"]))]
# Zero gradients.
# NOTE: this is actually more efficient than calling `self._model.zero_grad()`
# because it avoids a read op when the gradients are first updated below.
for p in self._model.parameters():
p.grad = None
predicted_logit.backward()
for hook in hooks:
hook.remove()
grad_dict = dict()
for idx, grad in enumerate(embedding_gradients):
key = "grad_input_" + str(idx + 1)
grad_dict[key] = grad.detach().cpu().numpy()
# restore the original requires_grad values of the parameters
for param_name, param in self._model.named_parameters():
param.requires_grad = original_param_name_to_requires_grad_dict[param_name]
return grad_dict, outputs
def get_interpretable_layer(self) -> torch.nn.Module:
"""
Returns the input/embedding layer of the model.
If the predictor wraps around a non-AllenNLP model,
this function should be overridden to specify the correct input/embedding layer.
For the cases where the input layer _is_ an embedding layer, this should be the
layer 0 of the embedder.
"""
try:
return util.find_embedding_layer(self._model)
except RuntimeError:
raise RuntimeError(
"If the model does not use `TextFieldEmbedder`, please override "
"`get_interpretable_layer` in your predictor to specify the embedding layer."
)
def get_interpretable_text_field_embedder(self) -> torch.nn.Module:
"""
Returns the first `TextFieldEmbedder` of the model.
If the predictor wraps around a non-AllenNLP model,
this function should be overridden to specify the correct embedder.
"""
try:
return util.find_text_field_embedder(self._model)
except RuntimeError:
raise RuntimeError(
"If the model does not use `TextFieldEmbedder`, please override "
"`get_interpretable_text_field_embedder` in your predictor to specify "
"the embedding layer."
)
def _register_embedding_gradient_hooks(self, embedding_gradients):
"""
Registers a backward hook on the embedding layer of the model. Used to save the gradients
of the embeddings for use in get_gradients()
When there are multiple inputs (e.g., a passage and question), the hook
will be called multiple times. We append all the embeddings gradients
to a list.
We additionally add a hook on the _forward_ pass of the model's `TextFieldEmbedder` to save
token offsets, if there are any. Having token offsets means that you're using a mismatched
token indexer, so we need to aggregate the gradients across wordpieces in a token. We do
that with a simple sum.
"""
def hook_layers(module, grad_in, grad_out):
grads = grad_out[0]
if self._token_offsets:
# If you have a mismatched indexer with multiple TextFields, it's quite possible
# that the order we deal with the gradients is wrong. We'll just take items from
# the list one at a time, and try to aggregate the gradients. If we got the order
# wrong, we should crash, so you'll know about it. If you get an error because of
# that, open an issue on github, and we'll see what we can do. The intersection of
# multiple TextFields and mismatched indexers is pretty small (currently empty, that
# I know of), so we'll ignore this corner case until it's needed.
offsets = self._token_offsets.pop(0)
span_grads, span_mask = util.batched_span_select(grads.contiguous(), offsets)
span_mask = span_mask.unsqueeze(-1)
span_grads *= span_mask # zero out paddings
span_grads_sum = span_grads.sum(2)
span_grads_len = span_mask.sum(2)
# Shape: (batch_size, num_orig_tokens, embedding_size)
grads = span_grads_sum / torch.clamp_min(span_grads_len, 1)
# All the places where the span length is zero, write in zeros.
grads[(span_grads_len == 0).expand(grads.shape)] = 0
embedding_gradients.append(grads)
def get_token_offsets(module, inputs, outputs):
offsets = util.get_token_offsets_from_text_field_inputs(inputs)
if offsets is not None:
self._token_offsets.append(offsets)
hooks = []
text_field_embedder = self.get_interpretable_text_field_embedder()
hooks.append(text_field_embedder.register_forward_hook(get_token_offsets))
embedding_layer = self.get_interpretable_layer()
hooks.append(embedding_layer.register_backward_hook(hook_layers))
return hooks
@contextmanager
def capture_model_internals(self, module_regex: str = ".*") -> Iterator[dict]:
"""
Context manager that captures the internal-module outputs of
this predictor's model. The idea is that you could use it as follows:
```
with predictor.capture_model_internals() as internals:
outputs = predictor.predict_json(inputs)
return {**outputs, "model_internals": internals}
```
"""
results = {}
hooks = []
# First we'll register hooks to add the outputs of each module to the results dict.
def add_output(idx: int):
def _add_output(mod, _, outputs):
results[idx] = {"name": str(mod), "output": sanitize(outputs)}
return _add_output
regex = re.compile(module_regex)
for idx, (name, module) in enumerate(self._model.named_modules()):
if regex.fullmatch(name) and module != self._model:
hook = module.register_forward_hook(add_output(idx))
hooks.append(hook)
# If you capture the return value of the context manager, you get the results dict.
yield results
# And then when you exit the context we remove all the hooks.
for hook in hooks:
hook.remove()
def predict_instance(self, instance: Instance) -> JsonDict:
outputs = self._model.forward_on_instance(instance)
return sanitize(outputs)
def predictions_to_labeled_instances(
self, instance: Instance, outputs: Dict[str, numpy.ndarray]
) -> List[Instance]:
"""
This function takes a model's outputs for an Instance, and it labels that instance according
to the output. For example, in classification this function labels the instance according
to the class with the highest probability. This function is used to to compute gradients
of what the model predicted. The return type is a list because in some tasks there are
multiple predictions in the output (e.g., in NER a model predicts multiple spans). In this
case, each instance in the returned list of Instances contains an individual
entity prediction as the label.
"""
raise RuntimeError("implement this method for model interpretations or attacks")
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Converts a JSON object into an [`Instance`](../data/instance.md)
and a `JsonDict` of information which the `Predictor` should pass through,
such as tokenised inputs.
"""
raise NotImplementedError
def predict_batch_json(self, inputs: List[JsonDict]) -> List[JsonDict]:
instances = self._batch_json_to_instances(inputs)
return self.predict_batch_instance(instances)
def predict_batch_instance(self, instances: List[Instance]) -> List[JsonDict]:
outputs = self._model.forward_on_instances(instances)
return sanitize(outputs)
def _batch_json_to_instances(self, json_dicts: List[JsonDict]) -> List[Instance]:
"""
Converts a list of JSON objects into a list of `Instance`s.
By default, this expects that a "batch" consists of a list of JSON blobs which would
individually be predicted by `predict_json`. In order to use this method for
batch prediction, `_json_to_instance` should be implemented by the subclass, or
if the instances have some dependency on each other, this method should be overridden
directly.
"""
instances = []
for json_dict in json_dicts:
instances.append(self._json_to_instance(json_dict))
return instances
@classmethod
def from_path(
cls,
archive_path: Union[str, Path],
predictor_name: str = None,
cuda_device: int = -1,
dataset_reader_to_load: str = "validation",
frozen: bool = True,
import_plugins: bool = True,
overrides: Union[str, Dict[str, Any]] = "",
) -> "Predictor":
"""
Instantiate a `Predictor` from an archive path.
If you need more detailed configuration options, such as overrides,
please use `from_archive`.
# Parameters
archive_path : `Union[str, Path]`
The path to the archive.
predictor_name : `str`, optional (default=`None`)
Name that the predictor is registered as, or None to use the
predictor associated with the model.
cuda_device : `int`, optional (default=`-1`)
If `cuda_device` is >= 0, the model will be loaded onto the
corresponding GPU. Otherwise it will be loaded onto the CPU.
dataset_reader_to_load : `str`, optional (default=`"validation"`)
Which dataset reader to load from the archive, either "train" or
"validation".
frozen : `bool`, optional (default=`True`)
If we should call `model.eval()` when building the predictor.
import_plugins : `bool`, optional (default=`True`)
If `True`, we attempt to import plugins before loading the predictor.
This comes with additional overhead, but means you don't need to explicitly
import the modules that your predictor depends on as long as those modules
can be found by `allennlp.common.plugins.import_plugins()`.
overrides : `Union[str, Dict[str, Any]]`, optional (default = `""`)
JSON overrides to apply to the unarchived `Params` object.
# Returns
`Predictor`
A Predictor instance.
"""
if import_plugins:
plugins.import_plugins()
return Predictor.from_archive(
load_archive(archive_path, cuda_device=cuda_device, overrides=overrides),
predictor_name,
dataset_reader_to_load=dataset_reader_to_load,
frozen=frozen,
)
@classmethod
def from_archive(
cls,
archive: Archive,
predictor_name: str = None,
dataset_reader_to_load: str = "validation",
frozen: bool = True,
) -> "Predictor":
"""
Instantiate a `Predictor` from an [`Archive`](../models/archival.md);
that is, from the result of training a model. Optionally specify which `Predictor`
subclass; otherwise, we try to find a corresponding predictor in `DEFAULT_PREDICTORS`, or if
one is not found, the base class (i.e. `Predictor`) will be used. Optionally specify
which [`DatasetReader`](../data/dataset_readers/dataset_reader.md) should be loaded;
otherwise, the validation one will be used if it exists followed by the training dataset reader.
Optionally specify if the loaded model should be frozen, meaning `model.eval()` will be called.
"""
# Duplicate the config so that the config inside the archive doesn't get consumed
config = archive.config.duplicate()
if not predictor_name:
model_type = config.get("model").get("type")
model_class, _ = Model.resolve_class_name(model_type)
predictor_name = model_class.default_predictor
predictor_class: Type[Predictor] = (
Predictor.by_name(predictor_name) if predictor_name is not None else cls # type: ignore
)
if dataset_reader_to_load == "validation":
dataset_reader = archive.validation_dataset_reader
else:
dataset_reader = archive.dataset_reader
model = archive.model
if frozen:
model.eval()
return predictor_class(model, dataset_reader)
| allennlp-master | allennlp/predictors/predictor.py |
"""
A `Predictor` is
a wrapper for an AllenNLP `Model`
that makes JSON predictions using JSON inputs. If you
want to serve up a model through the web service
(or using `allennlp.commands.predict`), you'll need
a `Predictor` that wraps it.
"""
from allennlp.predictors.predictor import Predictor
from allennlp.predictors.sentence_tagger import SentenceTaggerPredictor
from allennlp.predictors.text_classifier import TextClassifierPredictor
| allennlp-master | allennlp/predictors/__init__.py |
from typing import List, Dict
from overrides import overrides
import numpy
from allennlp.common.util import JsonDict
from allennlp.data import DatasetReader, Instance
from allennlp.data.fields import FlagField, TextField, SequenceLabelField
from allennlp.data.tokenizers.spacy_tokenizer import SpacyTokenizer
from allennlp.models import Model
from allennlp.predictors.predictor import Predictor
@Predictor.register("sentence_tagger")
class SentenceTaggerPredictor(Predictor):
"""
Predictor for any model that takes in a sentence and returns
a single set of tags for it. In particular, it can be used with
the [`CrfTagger`](https://docs.allennlp.org/models/master/models/tagging/models/crf_tagger/)
model and also the [`SimpleTagger`](../models/simple_tagger.md) model.
Registered as a `Predictor` with name "sentence_tagger".
"""
def __init__(
self, model: Model, dataset_reader: DatasetReader, language: str = "en_core_web_sm"
) -> None:
super().__init__(model, dataset_reader)
self._tokenizer = SpacyTokenizer(language=language, pos_tags=True)
def predict(self, sentence: str) -> JsonDict:
return self.predict_json({"sentence": sentence})
@overrides
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Expects JSON that looks like `{"sentence": "..."}`.
Runs the underlying model, and adds the `"words"` to the output.
"""
sentence = json_dict["sentence"]
tokens = self._tokenizer.tokenize(sentence)
return self._dataset_reader.text_to_instance(tokens)
@overrides
def predictions_to_labeled_instances(
self, instance: Instance, outputs: Dict[str, numpy.ndarray]
) -> List[Instance]:
"""
This function currently only handles BIOUL tags.
Imagine an NER model predicts three named entities (each one with potentially
multiple tokens). For each individual entity, we create a new Instance that has
the label set to only that entity and the rest of the tokens are labeled as outside.
We then return a list of those Instances.
For example:
```text
Mary went to Seattle to visit Microsoft Research
U-Per O O U-Loc O O B-Org L-Org
```
We create three instances.
```text
Mary went to Seattle to visit Microsoft Research
U-Per O O O O O O O
Mary went to Seattle to visit Microsoft Research
O O O U-LOC O O O O
Mary went to Seattle to visit Microsoft Research
O O O O O O B-Org L-Org
```
We additionally add a flag to these instances to tell the model to only compute loss on
non-O tags, so that we get gradients that are specific to the particular span prediction
that each instance represents.
"""
predicted_tags = outputs["tags"]
predicted_spans = []
i = 0
while i < len(predicted_tags):
tag = predicted_tags[i]
# if its a U, add it to the list
if tag[0] == "U":
current_tags = [t if idx == i else "O" for idx, t in enumerate(predicted_tags)]
predicted_spans.append(current_tags)
# if its a B, keep going until you hit an L.
elif tag[0] == "B":
begin_idx = i
while tag[0] != "L":
i += 1
tag = predicted_tags[i]
end_idx = i
current_tags = [
t if begin_idx <= idx <= end_idx else "O"
for idx, t in enumerate(predicted_tags)
]
predicted_spans.append(current_tags)
i += 1
# Creates a new instance for each contiguous tag
instances = []
for labels in predicted_spans:
new_instance = instance.duplicate()
text_field: TextField = instance["tokens"] # type: ignore
new_instance.add_field(
"tags", SequenceLabelField(labels, text_field), self._model.vocab
)
new_instance.add_field("ignore_loss_on_o_tags", FlagField(True))
instances.append(new_instance)
return instances
| allennlp-master | allennlp/predictors/sentence_tagger.py |
from typing import List, Dict
from overrides import overrides
import numpy
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.predictors.predictor import Predictor
from allennlp.data.fields import LabelField
from allennlp.data.tokenizers.spacy_tokenizer import SpacyTokenizer
@Predictor.register("text_classifier")
class TextClassifierPredictor(Predictor):
"""
Predictor for any model that takes in a sentence and returns
a single class for it. In particular, it can be used with
the [`BasicClassifier`](../models/basic_classifier.md) model.
Registered as a `Predictor` with name "text_classifier".
"""
def predict(self, sentence: str) -> JsonDict:
return self.predict_json({"sentence": sentence})
@overrides
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Expects JSON that looks like `{"sentence": "..."}`.
Runs the underlying model, and adds the `"label"` to the output.
"""
sentence = json_dict["sentence"]
reader_has_tokenizer = (
getattr(self._dataset_reader, "tokenizer", None) is not None
or getattr(self._dataset_reader, "_tokenizer", None) is not None
)
if not reader_has_tokenizer:
tokenizer = SpacyTokenizer()
sentence = tokenizer.tokenize(sentence)
return self._dataset_reader.text_to_instance(sentence)
@overrides
def predictions_to_labeled_instances(
self, instance: Instance, outputs: Dict[str, numpy.ndarray]
) -> List[Instance]:
new_instance = instance.duplicate()
label = numpy.argmax(outputs["probs"])
new_instance.add_field("label", LabelField(int(label), skip_indexing=True))
return [new_instance]
| allennlp-master | allennlp/predictors/text_classifier.py |
from typing import Dict, Optional, List, Any
import numpy
from overrides import overrides
import torch
from torch.nn.modules.linear import Linear
import torch.nn.functional as F
from allennlp.common.checks import check_dimensions_match, ConfigurationError
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator
from allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits
from allennlp.training.metrics import CategoricalAccuracy, SpanBasedF1Measure
@Model.register("simple_tagger")
class SimpleTagger(Model):
"""
This `SimpleTagger` simply encodes a sequence of text with a stacked `Seq2SeqEncoder`, then
predicts a tag for each token in the sequence.
Registered as a `Model` with name "simple_tagger".
# Parameters
vocab : `Vocabulary`, required
A Vocabulary, required in order to compute sizes for input/output projections.
text_field_embedder : `TextFieldEmbedder`, required
Used to embed the `tokens` `TextField` we get as input to the model.
encoder : `Seq2SeqEncoder`
The encoder (with its own internal stacking) that we will use in between embedding tokens
and predicting output tags.
calculate_span_f1 : `bool`, optional (default=`None`)
Calculate span-level F1 metrics during training. If this is `True`, then
`label_encoding` is required. If `None` and
label_encoding is specified, this is set to `True`.
If `None` and label_encoding is not specified, it defaults
to `False`.
label_encoding : `str`, optional (default=`None`)
Label encoding to use when calculating span f1.
Valid options are "BIO", "BIOUL", "IOB1", "BMES".
Required if `calculate_span_f1` is true.
label_namespace : `str`, optional (default=`labels`)
This is needed to compute the SpanBasedF1Measure metric, if desired.
Unless you did something unusual, the default value should be what you want.
verbose_metrics : `bool`, optional (default = `False`)
If true, metrics will be returned per label class in addition
to the overall statistics.
initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`)
Used to initialize the model parameters.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
calculate_span_f1: bool = None,
label_encoding: Optional[str] = None,
label_namespace: str = "labels",
verbose_metrics: bool = False,
initializer: InitializerApplicator = InitializerApplicator(),
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
self.label_namespace = label_namespace
self.text_field_embedder = text_field_embedder
self.num_classes = self.vocab.get_vocab_size(label_namespace)
self.encoder = encoder
self._verbose_metrics = verbose_metrics
self.tag_projection_layer = TimeDistributed(
Linear(self.encoder.get_output_dim(), self.num_classes)
)
check_dimensions_match(
text_field_embedder.get_output_dim(),
encoder.get_input_dim(),
"text field embedding dim",
"encoder input dim",
)
self.metrics = {
"accuracy": CategoricalAccuracy(),
"accuracy3": CategoricalAccuracy(top_k=3),
}
# We keep calculate_span_f1 as a constructor argument for API consistency with
# the CrfTagger, even it is redundant in this class
# (label_encoding serves the same purpose).
if calculate_span_f1 is None:
calculate_span_f1 = label_encoding is not None
self.calculate_span_f1 = calculate_span_f1
self._f1_metric: Optional[SpanBasedF1Measure] = None
if calculate_span_f1:
if not label_encoding:
raise ConfigurationError(
"calculate_span_f1 is True, but no label_encoding was specified."
)
self._f1_metric = SpanBasedF1Measure(
vocab, tag_namespace=label_namespace, label_encoding=label_encoding
)
initializer(self)
@overrides
def forward(
self, # type: ignore
tokens: TextFieldTensors,
tags: torch.LongTensor = None,
metadata: List[Dict[str, Any]] = None,
ignore_loss_on_o_tags: bool = False,
) -> Dict[str, torch.Tensor]:
"""
# Parameters
tokens : `TextFieldTensors`, required
The output of `TextField.as_array()`, which should typically be passed directly to a
`TextFieldEmbedder`. This output is a dictionary mapping keys to `TokenIndexer`
tensors. At its most basic, using a `SingleIdTokenIndexer` this is : `{"tokens":
Tensor(batch_size, num_tokens)}`. This dictionary will have the same keys as were used
for the `TokenIndexers` when you created the `TextField` representing your
sequence. The dictionary is designed to be passed directly to a `TextFieldEmbedder`,
which knows how to combine different word representations into a single vector per
token in your input.
tags : `torch.LongTensor`, optional (default = `None`)
A torch tensor representing the sequence of integer gold class labels of shape
`(batch_size, num_tokens)`.
metadata : `List[Dict[str, Any]]`, optional, (default = `None`)
metadata containing the original words in the sentence to be tagged under a 'words' key.
ignore_loss_on_o_tags : `bool`, optional (default = `False`)
If True, we compute the loss only for actual spans in `tags`, and not on `O` tokens.
This is useful for computing gradients of the loss on a _single span_, for
interpretation / attacking.
# Returns
An output dictionary consisting of:
- `logits` (`torch.FloatTensor`) :
A tensor of shape `(batch_size, num_tokens, tag_vocab_size)` representing
unnormalised log probabilities of the tag classes.
- `class_probabilities` (`torch.FloatTensor`) :
A tensor of shape `(batch_size, num_tokens, tag_vocab_size)` representing
a distribution of the tag classes per word.
- `loss` (`torch.FloatTensor`, optional) :
A scalar loss to be optimised.
"""
embedded_text_input = self.text_field_embedder(tokens)
batch_size, sequence_length, _ = embedded_text_input.size()
mask = get_text_field_mask(tokens)
encoded_text = self.encoder(embedded_text_input, mask)
logits = self.tag_projection_layer(encoded_text)
reshaped_log_probs = logits.view(-1, self.num_classes)
class_probabilities = F.softmax(reshaped_log_probs, dim=-1).view(
[batch_size, sequence_length, self.num_classes]
)
output_dict = {"logits": logits, "class_probabilities": class_probabilities}
if tags is not None:
if ignore_loss_on_o_tags:
o_tag_index = self.vocab.get_token_index("O", namespace=self.label_namespace)
tag_mask = mask & (tags != o_tag_index)
else:
tag_mask = mask
loss = sequence_cross_entropy_with_logits(logits, tags, tag_mask)
for metric in self.metrics.values():
metric(logits, tags, mask)
if self.calculate_span_f1:
self._f1_metric(logits, tags, mask) # type: ignore
output_dict["loss"] = loss
if metadata is not None:
output_dict["words"] = [x["words"] for x in metadata]
return output_dict
@overrides
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
Does a simple position-wise argmax over each token, converts indices to string labels, and
adds a `"tags"` key to the dictionary with the result.
"""
all_predictions = output_dict["class_probabilities"]
all_predictions = all_predictions.cpu().data.numpy()
if all_predictions.ndim == 3:
predictions_list = [all_predictions[i] for i in range(all_predictions.shape[0])]
else:
predictions_list = [all_predictions]
all_tags = []
for predictions in predictions_list:
argmax_indices = numpy.argmax(predictions, axis=-1)
tags = [
self.vocab.get_token_from_index(x, namespace=self.label_namespace)
for x in argmax_indices
]
all_tags.append(tags)
output_dict["tags"] = all_tags
return output_dict
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metrics_to_return = {
metric_name: metric.get_metric(reset) for metric_name, metric in self.metrics.items()
}
if self.calculate_span_f1:
f1_dict = self._f1_metric.get_metric(reset) # type: ignore
if self._verbose_metrics:
metrics_to_return.update(f1_dict)
else:
metrics_to_return.update({x: y for x, y in f1_dict.items() if "overall" in x})
return metrics_to_return
default_predictor = "sentence_tagger"
| allennlp-master | allennlp/models/simple_tagger.py |
"""
These submodules contain the classes for AllenNLP models,
all of which are subclasses of `Model`.
"""
from allennlp.models.model import Model
from allennlp.models.archival import archive_model, load_archive, Archive
from allennlp.models.simple_tagger import SimpleTagger
from allennlp.models.basic_classifier import BasicClassifier
| allennlp-master | allennlp/models/__init__.py |
"""
`Model` is an abstract class representing
an AllenNLP model.
"""
import logging
import os
from os import PathLike
import re
from typing import Dict, List, Set, Type, Optional, Union
import numpy
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.common.params import Params, remove_keys_from_params
from allennlp.common.registrable import Registrable
from allennlp.data import Instance, Vocabulary
from allennlp.data.batch import Batch
from allennlp.nn import util
from allennlp.nn.regularizers import RegularizerApplicator
logger = logging.getLogger(__name__)
# When training a model, many sets of weights are saved. By default we want to
# save/load this set of weights.
_DEFAULT_WEIGHTS = "best.th"
class Model(torch.nn.Module, Registrable):
"""
This abstract class represents a model to be trained. Rather than relying completely
on the Pytorch Module, we modify the output spec of `forward` to be a dictionary.
Models built using this API are still compatible with other pytorch models and can
be used naturally as modules within other models - outputs are dictionaries, which
can be unpacked and passed into other layers. One caveat to this is that if you
wish to use an AllenNLP model inside a Container (such as nn.Sequential), you must
interleave the models with a wrapper module which unpacks the dictionary into
a list of tensors.
In order for your model to be trained using the [`Trainer`](../training/trainer.md)
api, the output dictionary of your Model must include a "loss" key, which will be
optimised during the training process.
Finally, you can optionally implement :func:`Model.get_metrics` in order to make use
of early stopping and best-model serialization based on a validation metric in
`Trainer`. Metrics that begin with "_" will not be logged
to the progress bar by `Trainer`.
The `from_archive` method on this class is registered as a `Model` with name "from_archive".
So, if you are using a configuration file, you can specify a model as `{"type": "from_archive",
"archive_file": "/path/to/archive.tar.gz"}`, which will pull out the model from the given
location and return it.
# Parameters
vocab: `Vocabulary`
There are two typical use-cases for the `Vocabulary` in a `Model`: getting vocabulary sizes
when constructing embedding matrices or output classifiers (as the vocabulary holds the
number of classes in your output, also), and translating model output into human-readable
form.
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"model", it gets specified as a top-level parameter, then is passed in to the model
separately.
regularizer: `RegularizerApplicator`, optional
If given, the `Trainer` will use this to regularize model parameters.
serialization_dir: `str`, optional
The directory in which the training output is saved to, or the directory the model is loaded from.
"""
_warn_for_unseparable_batches: Set[str] = set()
default_predictor: Optional[str] = None
def __init__(
self,
vocab: Vocabulary,
regularizer: RegularizerApplicator = None,
serialization_dir: Optional[str] = None,
) -> None:
super().__init__()
self.vocab = vocab
self._regularizer = regularizer
self.serialization_dir = serialization_dir
def get_regularization_penalty(self) -> Optional[torch.Tensor]:
"""
Computes the regularization penalty for the model.
Returns None if the model was not configured to use regularization.
"""
if self._regularizer is None:
regularization_penalty = None
else:
try:
regularization_penalty = self._regularizer(self)
if isinstance(regularization_penalty, float):
assert regularization_penalty == 0.0
regularization_penalty = torch.tensor(regularization_penalty)
except AssertionError:
raise RuntimeError("The regularizer cannot be a non-zero float.")
return regularization_penalty
def get_parameters_for_histogram_tensorboard_logging(self) -> List[str]:
"""
Returns the name of model parameters used for logging histograms to tensorboard.
"""
return [name for name, _ in self.named_parameters()]
def forward(self, *inputs) -> Dict[str, torch.Tensor]:
"""
Defines the forward pass of the model. In addition, to facilitate easy training,
this method is designed to compute a loss function defined by a user.
The input is comprised of everything required to perform a
training update, `including` labels - you define the signature here!
It is down to the user to ensure that inference can be performed
without the presence of these labels. Hence, any inputs not available at
inference time should only be used inside a conditional block.
The intended sketch of this method is as follows::
def forward(self, input1, input2, targets=None):
....
....
output1 = self.layer1(input1)
output2 = self.layer2(input2)
output_dict = {"output1": output1, "output2": output2}
if targets is not None:
# Function returning a scalar torch.Tensor, defined by the user.
loss = self._compute_loss(output1, output2, targets)
output_dict["loss"] = loss
return output_dict
# Parameters
*inputs : `Any`
Tensors comprising everything needed to perform a training update, `including` labels,
which should be optional (i.e have a default value of `None`). At inference time,
simply pass the relevant inputs, not including the labels.
# Returns
output_dict : `Dict[str, torch.Tensor]`
The outputs from the model. In order to train a model using the
`Trainer` api, you must provide a "loss" key pointing to a
scalar `torch.Tensor` representing the loss to be optimized.
"""
raise NotImplementedError
def forward_on_instance(self, instance: Instance) -> Dict[str, numpy.ndarray]:
"""
Takes an [`Instance`](../data/instance.md), which typically has raw text in it, converts
that text into arrays using this model's [`Vocabulary`](../data/vocabulary.md), passes those
arrays through `self.forward()` and `self.make_output_human_readable()` (which by default
does nothing) and returns the result. Before returning the result, we convert any
`torch.Tensors` into numpy arrays and remove the batch dimension.
"""
return self.forward_on_instances([instance])[0]
def forward_on_instances(self, instances: List[Instance]) -> List[Dict[str, numpy.ndarray]]:
"""
Takes a list of `Instances`, converts that text into arrays using this model's `Vocabulary`,
passes those arrays through `self.forward()` and `self.make_output_human_readable()` (which
by default does nothing) and returns the result. Before returning the result, we convert
any `torch.Tensors` into numpy arrays and separate the batched output into a list of
individual dicts per instance. Note that typically this will be faster on a GPU (and
conditionally, on a CPU) than repeated calls to `forward_on_instance`.
# Parameters
instances : `List[Instance]`, required
The instances to run the model on.
# Returns
A list of the models output for each instance.
"""
batch_size = len(instances)
with torch.no_grad():
cuda_device = self._get_prediction_device()
dataset = Batch(instances)
dataset.index_instances(self.vocab)
model_input = util.move_to_device(dataset.as_tensor_dict(), cuda_device)
outputs = self.make_output_human_readable(self(**model_input))
instance_separated_output: List[Dict[str, numpy.ndarray]] = [
{} for _ in dataset.instances
]
for name, output in list(outputs.items()):
if isinstance(output, torch.Tensor):
# NOTE(markn): This is a hack because 0-dim pytorch tensors are not iterable.
# This occurs with batch size 1, because we still want to include the loss in that case.
if output.dim() == 0:
output = output.unsqueeze(0)
if output.size(0) != batch_size:
self._maybe_warn_for_unseparable_batches(name)
continue
output = output.detach().cpu().numpy()
elif len(output) != batch_size:
self._maybe_warn_for_unseparable_batches(name)
continue
for instance_output, batch_element in zip(instance_separated_output, output):
instance_output[name] = batch_element
return instance_separated_output
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
Takes the result of `forward` and makes it human readable. Most of the time, the only thing
this method does is convert tokens / predicted labels from tensors to strings that humans
might actually understand. Somtimes you'll also do an argmax or something in here, too, but
that most often happens in `Model.forward`, before you compute your metrics.
This method `modifies` the input dictionary, and also `returns` the same dictionary.
By default in the base class we do nothing.
"""
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
"""
Returns a dictionary of metrics. This method will be called by
`allennlp.training.Trainer` in order to compute and use model metrics for early
stopping and model serialization. We return an empty dictionary here rather than raising
as it is not required to implement metrics for a new model. A boolean `reset` parameter is
passed, as frequently a metric accumulator will have some state which should be reset
between epochs. This is also compatible with [`Metric`s](../training/metrics/metric.md). Metrics
should be populated during the call to `forward`, with the `Metric` handling the accumulation of
the metric until this method is called.
"""
return {}
def _get_prediction_device(self) -> int:
"""
This method checks the device of the model parameters to determine the cuda_device
this model should be run on for predictions. If there are no parameters, it returns -1.
# Returns
The cuda device this model should run on for predictions.
"""
devices = {util.get_device_of(param) for param in self.parameters()}
if len(devices) > 1:
devices_string = ", ".join(str(x) for x in devices)
raise ConfigurationError(f"Parameters have mismatching cuda_devices: {devices_string}")
elif len(devices) == 1:
return devices.pop()
else:
return -1
def _maybe_warn_for_unseparable_batches(self, output_key: str):
"""
This method warns once if a user implements a model which returns a dictionary with
values which we are unable to split back up into elements of the batch. This is controlled
by a class attribute `_warn_for_unseperable_batches` because it would be extremely verbose
otherwise.
"""
if output_key not in self._warn_for_unseparable_batches:
logger.warning(
f"Encountered the {output_key} key in the model's return dictionary which "
"couldn't be split by the batch size. Key will be ignored."
)
# We only want to warn once for this key,
# so we set this to false so we don't warn again.
self._warn_for_unseparable_batches.add(output_key)
@classmethod
def _load(
cls,
config: Params,
serialization_dir: Union[str, PathLike],
weights_file: Optional[Union[str, PathLike]] = None,
cuda_device: int = -1,
) -> "Model":
"""
Instantiates an already-trained model, based on the experiment
configuration and some optional overrides.
"""
weights_file = weights_file or os.path.join(serialization_dir, _DEFAULT_WEIGHTS)
# Load vocabulary from file
vocab_dir = os.path.join(serialization_dir, "vocabulary")
# If the config specifies a vocabulary subclass, we need to use it.
vocab_params = config.get("vocabulary", Params({}))
vocab_choice = vocab_params.pop_choice("type", Vocabulary.list_available(), True)
vocab_class, _ = Vocabulary.resolve_class_name(vocab_choice)
vocab = vocab_class.from_files(
vocab_dir, vocab_params.get("padding_token"), vocab_params.get("oov_token")
)
model_params = config.get("model")
# The experiment config tells us how to _train_ a model, including where to get pre-trained
# embeddings/weights from. We're now _loading_ the model, so those weights will already be
# stored in our model. We don't need any pretrained weight file or initializers anymore,
# and we don't want the code to look for it, so we remove it from the parameters here.
remove_keys_from_params(model_params)
model = Model.from_params(
vocab=vocab, params=model_params, serialization_dir=serialization_dir
)
# Force model to cpu or gpu, as appropriate, to make sure that the embeddings are
# in sync with the weights
if cuda_device >= 0:
model.cuda(cuda_device)
else:
model.cpu()
# If vocab+embedding extension was done, the model initialized from from_params
# and one defined by state dict in weights_file might not have same embedding shapes.
# Eg. when model embedder module was transferred along with vocab extension, the
# initialized embedding weight shape would be smaller than one in the state_dict.
# So calling model embedding extension is required before load_state_dict.
# If vocab and model embeddings are in sync, following would be just a no-op.
model.extend_embedder_vocab()
# Load state dict. We pass `strict=False` so PyTorch doesn't raise a RuntimeError
# if the state dict is missing keys because we handle this case below.
model_state = torch.load(weights_file, map_location=util.device_mapping(cuda_device))
missing_keys, unexpected_keys = model.load_state_dict(model_state, strict=False)
# Modules might define a class variable called `authorized_missing_keys`,
# a list of regex patterns, that tells us to ignore missing keys that match
# any of the patterns.
# We sometimes need this in order to load older models with newer versions of AllenNLP.
def filter_out_authorized_missing_keys(module, prefix=""):
nonlocal missing_keys
for pat in getattr(module.__class__, "authorized_missing_keys", None) or []:
missing_keys = [
k
for k in missing_keys
if k.startswith(prefix) and re.search(pat[len(prefix) :], k) is None
]
for name, child in module._modules.items():
if child is not None:
filter_out_authorized_missing_keys(child, prefix + name + ".")
filter_out_authorized_missing_keys(model)
if unexpected_keys or missing_keys:
raise RuntimeError(
f"Error loading state dict for {model.__class__.__name__}\n\t"
f"Missing keys: {missing_keys}\n\t"
f"Unexpected keys: {unexpected_keys}"
)
return model
@classmethod
def load(
cls,
config: Params,
serialization_dir: Union[str, PathLike],
weights_file: Optional[Union[str, PathLike]] = None,
cuda_device: int = -1,
) -> "Model":
"""
Instantiates an already-trained model, based on the experiment
configuration and some optional overrides.
# Parameters
config : `Params`
The configuration that was used to train the model. It should definitely
have a `model` section, and should probably have a `trainer` section
as well.
serialization_dir: `str = None`
The directory containing the serialized weights, parameters, and vocabulary
of the model.
weights_file: `str = None`
By default we load the weights from `best.th` in the serialization
directory, but you can override that value here.
cuda_device: `int = -1`
By default we load the model on the CPU, but if you want to load it
for GPU usage you can specify the id of your GPU here
# Returns
model : `Model`
The model specified in the configuration, loaded with the serialized
vocabulary and the trained weights.
"""
# Peak at the class of the model.
model_type = (
config["model"] if isinstance(config["model"], str) else config["model"]["type"]
)
# Load using an overridable _load method.
# This allows subclasses of Model to override _load.
model_class: Type[Model] = cls.by_name(model_type) # type: ignore
if not isinstance(model_class, type):
# If you're using from_archive to specify your model (e.g., for fine tuning), then you
# can't currently override the behavior of _load; we just use the default Model._load.
# If we really need to change this, we would need to implement a recursive
# get_model_class method, that recurses whenever it finds a from_archive model type.
model_class = Model
return model_class._load(config, serialization_dir, weights_file, cuda_device)
def extend_embedder_vocab(self, embedding_sources_mapping: Dict[str, str] = None) -> None:
"""
Iterates through all embedding modules in the model and assures it can embed
with the extended vocab. This is required in fine-tuning or transfer learning
scenarios where model was trained with original vocabulary but during
fine-tuning/transfer-learning, it will have it work with extended vocabulary
(original + new-data vocabulary).
# Parameters
embedding_sources_mapping : `Dict[str, str]`, optional (default = `None`)
Mapping from model_path to pretrained-file path of the embedding
modules. If pretrained-file used at time of embedding initialization
isn't available now, user should pass this mapping. Model path is
path traversing the model attributes upto this embedding module.
Eg. "_text_field_embedder.token_embedder_tokens".
"""
# self.named_modules() gives all sub-modules (including nested children)
# The path nesting is already separated by ".": eg. parent_module_name.child_module_name
embedding_sources_mapping = embedding_sources_mapping or {}
for model_path, module in self.named_modules():
if hasattr(module, "extend_vocab"):
pretrained_file = embedding_sources_mapping.get(model_path)
module.extend_vocab(
self.vocab,
extension_pretrained_file=pretrained_file,
model_path=model_path,
)
@classmethod
def from_archive(cls, archive_file: str, vocab: Vocabulary = None) -> "Model":
"""
Loads a model from an archive file. This basically just calls
`return archival.load_archive(archive_file).model`. It exists as a method here for
convenience, and so that we can register it for easy use for fine tuning an existing model
from a config file.
If `vocab` is given, we will extend the loaded model's vocabulary using the passed vocab
object (including calling `extend_embedder_vocab`, which extends embedding layers).
"""
from allennlp.models.archival import load_archive # here to avoid circular imports
model = load_archive(archive_file).model
if vocab:
model.vocab.extend_from_vocab(vocab)
model.extend_embedder_vocab()
return model
# We can't decorate `Model` with `Model.register()`, because `Model` hasn't been defined yet. So we
# put this down here.
Model.register("from_archive", constructor="from_archive")(Model)
def remove_weights_related_keys_from_params(
params: Params, keys: List[str] = ["pretrained_file", "initializer"]
):
remove_keys_from_params(params, keys)
def remove_pretrained_embedding_params(params: Params):
"""This function only exists for backwards compatibility.
Please use `remove_weights_related_keys_from_params()` instead."""
remove_keys_from_params(params, ["pretrained_file"])
| allennlp-master | allennlp/models/model.py |
from typing import Dict, Optional
from overrides import overrides
import torch
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward, Seq2SeqEncoder, Seq2VecEncoder, TextFieldEmbedder
from allennlp.nn import InitializerApplicator, util
from allennlp.nn.util import get_text_field_mask
from allennlp.training.metrics import CategoricalAccuracy
@Model.register("basic_classifier")
class BasicClassifier(Model):
"""
This `Model` implements a basic text classifier. After embedding the text into
a text field, we will optionally encode the embeddings with a `Seq2SeqEncoder`. The
resulting sequence is pooled using a `Seq2VecEncoder` and then passed to
a linear classification layer, which projects into the label space. If a
`Seq2SeqEncoder` is not provided, we will pass the embedded text directly to the
`Seq2VecEncoder`.
Registered as a `Model` with name "basic_classifier".
# Parameters
vocab : `Vocabulary`
text_field_embedder : `TextFieldEmbedder`
Used to embed the input text into a `TextField`
seq2seq_encoder : `Seq2SeqEncoder`, optional (default=`None`)
Optional Seq2Seq encoder layer for the input text.
seq2vec_encoder : `Seq2VecEncoder`
Required Seq2Vec encoder layer. If `seq2seq_encoder` is provided, this encoder
will pool its output. Otherwise, this encoder will operate directly on the output
of the `text_field_embedder`.
feedforward : `FeedForward`, optional, (default = `None`)
An optional feedforward layer to apply after the seq2vec_encoder.
dropout : `float`, optional (default = `None`)
Dropout percentage to use.
num_labels : `int`, optional (default = `None`)
Number of labels to project to in classification layer. By default, the classification layer will
project to the size of the vocabulary namespace corresponding to labels.
label_namespace : `str`, optional (default = `"labels"`)
Vocabulary namespace corresponding to labels. By default, we use the "labels" namespace.
initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`)
If provided, will be used to initialize the model parameters.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
seq2vec_encoder: Seq2VecEncoder,
seq2seq_encoder: Seq2SeqEncoder = None,
feedforward: Optional[FeedForward] = None,
dropout: float = None,
num_labels: int = None,
label_namespace: str = "labels",
namespace: str = "tokens",
initializer: InitializerApplicator = InitializerApplicator(),
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
self._text_field_embedder = text_field_embedder
self._seq2seq_encoder = seq2seq_encoder
self._seq2vec_encoder = seq2vec_encoder
self._feedforward = feedforward
if feedforward is not None:
self._classifier_input_dim = feedforward.get_output_dim()
else:
self._classifier_input_dim = self._seq2vec_encoder.get_output_dim()
if dropout:
self._dropout = torch.nn.Dropout(dropout)
else:
self._dropout = None
self._label_namespace = label_namespace
self._namespace = namespace
if num_labels:
self._num_labels = num_labels
else:
self._num_labels = vocab.get_vocab_size(namespace=self._label_namespace)
self._classification_layer = torch.nn.Linear(self._classifier_input_dim, self._num_labels)
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
def forward( # type: ignore
self, tokens: TextFieldTensors, label: torch.IntTensor = None
) -> Dict[str, torch.Tensor]:
"""
# Parameters
tokens : `TextFieldTensors`
From a `TextField`
label : `torch.IntTensor`, optional (default = `None`)
From a `LabelField`
# Returns
An output dictionary consisting of:
- `logits` (`torch.FloatTensor`) :
A tensor of shape `(batch_size, num_labels)` representing
unnormalized log probabilities of the label.
- `probs` (`torch.FloatTensor`) :
A tensor of shape `(batch_size, num_labels)` representing
probabilities of the label.
- `loss` : (`torch.FloatTensor`, optional) :
A scalar loss to be optimised.
"""
embedded_text = self._text_field_embedder(tokens)
mask = get_text_field_mask(tokens)
if self._seq2seq_encoder:
embedded_text = self._seq2seq_encoder(embedded_text, mask=mask)
embedded_text = self._seq2vec_encoder(embedded_text, mask=mask)
if self._dropout:
embedded_text = self._dropout(embedded_text)
if self._feedforward is not None:
embedded_text = self._feedforward(embedded_text)
logits = self._classification_layer(embedded_text)
probs = torch.nn.functional.softmax(logits, dim=-1)
output_dict = {"logits": logits, "probs": probs}
output_dict["token_ids"] = util.get_token_ids_from_text_field_tensors(tokens)
if label is not None:
loss = self._loss(logits, label.long().view(-1))
output_dict["loss"] = loss
self._accuracy(logits, label)
return output_dict
@overrides
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
Does a simple argmax over the probabilities, converts index to string label, and
add `"label"` key to the dictionary with the result.
"""
predictions = output_dict["probs"]
if predictions.dim() == 2:
predictions_list = [predictions[i] for i in range(predictions.shape[0])]
else:
predictions_list = [predictions]
classes = []
for prediction in predictions_list:
label_idx = prediction.argmax(dim=-1).item()
label_str = self.vocab.get_index_to_token_vocabulary(self._label_namespace).get(
label_idx, str(label_idx)
)
classes.append(label_str)
output_dict["label"] = classes
tokens = []
for instance_tokens in output_dict["token_ids"]:
tokens.append(
[
self.vocab.get_token_from_index(token_id.item(), namespace=self._namespace)
for token_id in instance_tokens
]
)
output_dict["tokens"] = tokens
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metrics = {"accuracy": self._accuracy.get_metric(reset)}
return metrics
default_predictor = "text_classifier"
| allennlp-master | allennlp/models/basic_classifier.py |
"""
Helper functions for archiving models and restoring archived models.
"""
from os import PathLike
from typing import NamedTuple, Union, Dict, Any, List, Optional
import logging
import os
import tempfile
import tarfile
import shutil
from pathlib import Path
from contextlib import contextmanager
import glob
from torch.nn import Module
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path
from allennlp.common.params import Params
from allennlp.data.dataset_readers import DatasetReader
from allennlp.models.model import Model, _DEFAULT_WEIGHTS
logger = logging.getLogger(__name__)
class Archive(NamedTuple):
""" An archive comprises a Model and its experimental config"""
model: Model
config: Params
dataset_reader: DatasetReader
validation_dataset_reader: DatasetReader
def extract_module(self, path: str, freeze: bool = True) -> Module:
"""
This method can be used to load a module from the pretrained model archive.
It is also used implicitly in FromParams based construction. So instead of using standard
params to construct a module, you can instead load a pretrained module from the model
archive directly. For eg, instead of using params like {"type": "module_type", ...}, you
can use the following template::
{
"_pretrained": {
"archive_file": "../path/to/model.tar.gz",
"path": "path.to.module.in.model",
"freeze": False
}
}
If you use this feature with FromParams, take care of the following caveat: Call to
initializer(self) at end of model initializer can potentially wipe the transferred parameters
by reinitializing them. This can happen if you have setup initializer regex that also
matches parameters of the transferred module. To safe-guard against this, you can either
update your initializer regex to prevent conflicting match or add extra initializer::
[
[".*transferred_module_name.*", "prevent"]]
]
# Parameters
path : `str`, required
Path of target module to be loaded from the model.
Eg. "_textfield_embedder.token_embedder_tokens"
freeze : `bool`, optional (default=`True`)
Whether to freeze the module parameters or not.
"""
modules_dict = {path: module for path, module in self.model.named_modules()}
module = modules_dict.get(path)
if not module:
raise ConfigurationError(
f"You asked to transfer module at path {path} from "
f"the model {type(self.model)}. But it's not present."
)
if not isinstance(module, Module):
raise ConfigurationError(
f"The transferred object from model {type(self.model)} at path "
f"{path} is not a PyTorch Module."
)
for parameter in module.parameters(): # type: ignore
parameter.requires_grad_(not freeze)
return module
# We archive a model by creating a tar.gz file with its weights, config, and vocabulary.
#
# These constants are the *known names* under which we archive them.
CONFIG_NAME = "config.json"
_WEIGHTS_NAME = "weights.th"
def verify_include_in_archive(include_in_archive: Optional[List[str]] = None):
if include_in_archive is None:
return
saved_names = [CONFIG_NAME, _WEIGHTS_NAME, _DEFAULT_WEIGHTS, "vocabulary"]
for archival_target in include_in_archive:
if archival_target in saved_names:
raise ConfigurationError(
f"{', '.join(saved_names)} are saved names and cannot be used for include_in_archive."
)
def archive_model(
serialization_dir: Union[str, PathLike],
weights: str = _DEFAULT_WEIGHTS,
archive_path: Union[str, PathLike] = None,
include_in_archive: Optional[List[str]] = None,
) -> None:
"""
Archive the model weights, its training configuration, and its vocabulary to `model.tar.gz`.
# Parameters
serialization_dir : `str`
The directory where the weights and vocabulary are written out.
weights : `str`, optional (default=`_DEFAULT_WEIGHTS`)
Which weights file to include in the archive. The default is `best.th`.
archive_path : `str`, optional, (default = `None`)
A full path to serialize the model to. The default is "model.tar.gz" inside the
serialization_dir. If you pass a directory here, we'll serialize the model
to "model.tar.gz" inside the directory.
include_in_archive : `List[str]`, optional, (default = `None`)
Paths relative to `serialization_dir` that should be archived in addition to the default ones.
"""
weights_file = os.path.join(serialization_dir, weights)
if not os.path.exists(weights_file):
logger.error("weights file %s does not exist, unable to archive model", weights_file)
return
config_file = os.path.join(serialization_dir, CONFIG_NAME)
if not os.path.exists(config_file):
logger.error("config file %s does not exist, unable to archive model", config_file)
if archive_path is not None:
archive_file = archive_path
if os.path.isdir(archive_file):
archive_file = os.path.join(archive_file, "model.tar.gz")
else:
archive_file = os.path.join(serialization_dir, "model.tar.gz")
logger.info("archiving weights and vocabulary to %s", archive_file)
with tarfile.open(archive_file, "w:gz") as archive:
archive.add(config_file, arcname=CONFIG_NAME)
archive.add(weights_file, arcname=_WEIGHTS_NAME)
archive.add(os.path.join(serialization_dir, "vocabulary"), arcname="vocabulary")
if include_in_archive is not None:
for archival_target in include_in_archive:
archival_target_path = os.path.join(serialization_dir, archival_target)
for path in glob.glob(archival_target_path):
if os.path.exists(path):
arcname = path[len(os.path.join(serialization_dir, "")) :]
archive.add(path, arcname=arcname)
def load_archive(
archive_file: Union[str, Path],
cuda_device: int = -1,
overrides: Union[str, Dict[str, Any]] = "",
weights_file: str = None,
) -> Archive:
"""
Instantiates an Archive from an archived `tar.gz` file.
# Parameters
archive_file : `Union[str, Path]`
The archive file to load the model from.
cuda_device : `int`, optional (default = `-1`)
If `cuda_device` is >= 0, the model will be loaded onto the
corresponding GPU. Otherwise it will be loaded onto the CPU.
overrides : `Union[str, Dict[str, Any]]`, optional (default = `""`)
JSON overrides to apply to the unarchived `Params` object.
weights_file : `str`, optional (default = `None`)
The weights file to use. If unspecified, weights.th in the archive_file will be used.
"""
# redirect to the cache, if necessary
resolved_archive_file = cached_path(archive_file)
if resolved_archive_file == archive_file:
logger.info(f"loading archive file {archive_file}")
else:
logger.info(f"loading archive file {archive_file} from cache at {resolved_archive_file}")
tempdir = None
try:
if os.path.isdir(resolved_archive_file):
serialization_dir = resolved_archive_file
else:
with extracted_archive(resolved_archive_file, cleanup=False) as tempdir:
serialization_dir = tempdir
if weights_file:
weights_path = weights_file
else:
weights_path = get_weights_path(serialization_dir)
# Load config
config = Params.from_file(os.path.join(serialization_dir, CONFIG_NAME), overrides)
# Instantiate model and dataset readers. Use a duplicate of the config, as it will get consumed.
dataset_reader, validation_dataset_reader = _load_dataset_readers(
config.duplicate(), serialization_dir
)
model = _load_model(config.duplicate(), weights_path, serialization_dir, cuda_device)
finally:
if tempdir is not None:
logger.info(f"removing temporary unarchived model dir at {tempdir}")
shutil.rmtree(tempdir, ignore_errors=True)
return Archive(
model=model,
config=config,
dataset_reader=dataset_reader,
validation_dataset_reader=validation_dataset_reader,
)
def _load_dataset_readers(config, serialization_dir):
dataset_reader_params = config.get("dataset_reader")
# Try to use the validation dataset reader if there is one - otherwise fall back
# to the default dataset_reader used for both training and validation.
validation_dataset_reader_params = config.get(
"validation_dataset_reader", dataset_reader_params.duplicate()
)
dataset_reader = DatasetReader.from_params(
dataset_reader_params, serialization_dir=serialization_dir
)
validation_dataset_reader = DatasetReader.from_params(
validation_dataset_reader_params, serialization_dir=serialization_dir
)
return dataset_reader, validation_dataset_reader
def _load_model(config, weights_path, serialization_dir, cuda_device):
return Model.load(
config,
weights_file=weights_path,
serialization_dir=serialization_dir,
cuda_device=cuda_device,
)
def get_weights_path(serialization_dir):
weights_path = os.path.join(serialization_dir, _WEIGHTS_NAME)
# Fallback for serialization directories.
if not os.path.exists(weights_path):
weights_path = os.path.join(serialization_dir, _DEFAULT_WEIGHTS)
return weights_path
@contextmanager
def extracted_archive(resolved_archive_file, cleanup=True):
tempdir = None
try:
tempdir = tempfile.mkdtemp()
logger.info(f"extracting archive file {resolved_archive_file} to temp dir {tempdir}")
with tarfile.open(resolved_archive_file, "r:gz") as archive:
archive.extractall(tempdir)
yield tempdir
finally:
if tempdir is not None and cleanup:
logger.info(f"removing temporary unarchived model dir at {tempdir}")
shutil.rmtree(tempdir, ignore_errors=True)
| allennlp-master | allennlp/models/archival.py |
"""
# Plugin management.
AllenNLP supports loading "plugins" dynamically. A plugin is just a Python package that
provides custom registered classes or additional `allennlp` subcommands.
In order for AllenNLP to find your plugins, you have to create either a local plugins
file named `.allennlp_plugins` in the directory where the `allennlp` command is run, or a global
plugins file at `~/.allennlp/plugins`. The file should list the plugin modules that you want to
be loaded, one per line.
"""
import importlib
import logging
import os
from pathlib import Path
import sys
from typing import Iterable, Set
from allennlp.common.util import push_python_path, import_module_and_submodules
logger = logging.getLogger(__name__)
LOCAL_PLUGINS_FILENAME = ".allennlp_plugins"
"""
Local plugin files should have this name.
"""
GLOBAL_PLUGINS_FILENAME = str(Path.home() / ".allennlp" / "plugins")
"""
The global plugins file will be found here.
"""
DEFAULT_PLUGINS = ("allennlp_models", "allennlp_semparse", "allennlp_server")
"""
Default plugins do not need to be declared in a plugins file. They will always
be imported when they are installed in the current Python environment.
"""
def discover_file_plugins(plugins_filename: str = LOCAL_PLUGINS_FILENAME) -> Iterable[str]:
"""
Returns an iterable of the plugins found, declared within a file whose path is `plugins_filename`.
"""
with open(plugins_filename) as file_:
for module_name in file_.readlines():
module_name = module_name.strip()
if module_name:
yield module_name
def discover_plugins() -> Iterable[str]:
"""
Returns an iterable of the plugins found.
"""
plugins: Set[str] = set()
if os.path.isfile(LOCAL_PLUGINS_FILENAME):
with push_python_path("."):
for plugin in discover_file_plugins(LOCAL_PLUGINS_FILENAME):
if plugin in plugins:
continue
yield plugin
plugins.add(plugin)
if os.path.isfile(GLOBAL_PLUGINS_FILENAME):
for plugin in discover_file_plugins(GLOBAL_PLUGINS_FILENAME):
if plugin in plugins:
continue
yield plugin
plugins.add(plugin)
def import_plugins() -> None:
"""
Imports the plugins found with `discover_plugins()`.
"""
# Workaround for a presumed Python issue where spawned processes can't find modules in the current directory.
cwd = os.getcwd()
if cwd not in sys.path:
sys.path.append(cwd)
for module_name in DEFAULT_PLUGINS:
try:
# For default plugins we recursively import everything.
import_module_and_submodules(module_name)
logger.info("Plugin %s available", module_name)
except ModuleNotFoundError as e:
if e.name != module_name:
logger.error(f"Plugin {module_name} could not be loaded: {e}")
for module_name in discover_plugins():
try:
importlib.import_module(module_name)
logger.info("Plugin %s available", module_name)
except ModuleNotFoundError as e:
logger.error(f"Plugin {module_name} could not be loaded: {e}")
| allennlp-master | allennlp/common/plugins.py |
import logging
from logging import Filter
import os
from os import PathLike
from typing import Union
import sys
class AllenNlpLogger(logging.Logger):
"""
A custom subclass of 'logging.Logger' that keeps a set of messages to
implement {debug,info,etc.}_once() methods.
"""
def __init__(self, name):
super().__init__(name)
self._seen_msgs = set()
def debug_once(self, msg, *args, **kwargs):
if msg not in self._seen_msgs:
self.debug(msg, *args, **kwargs)
self._seen_msgs.add(msg)
def info_once(self, msg, *args, **kwargs):
if msg not in self._seen_msgs:
self.info(msg, *args, **kwargs)
self._seen_msgs.add(msg)
def warning_once(self, msg, *args, **kwargs):
if msg not in self._seen_msgs:
self.warning(msg, *args, **kwargs)
self._seen_msgs.add(msg)
def error_once(self, msg, *args, **kwargs):
if msg not in self._seen_msgs:
self.error(msg, *args, **kwargs)
self._seen_msgs.add(msg)
def critical_once(self, msg, *args, **kwargs):
if msg not in self._seen_msgs:
self.critical(msg, *args, **kwargs)
self._seen_msgs.add(msg)
logging.setLoggerClass(AllenNlpLogger)
logger = logging.getLogger(__name__)
FILE_FRIENDLY_LOGGING: bool = False
"""
If this flag is set to `True`, we add newlines to tqdm output, even on an interactive terminal, and we slow
down tqdm's output to only once every 10 seconds.
By default, it is set to `False`.
"""
class ErrorFilter(Filter):
"""
Filters out everything that is at the ERROR level or higher. This is meant to be used
with a stdout handler when a stderr handler is also configured. That way ERROR
messages aren't duplicated.
"""
def filter(self, record):
return record.levelno < logging.ERROR
def prepare_global_logging(
serialization_dir: Union[str, PathLike],
rank: int = 0,
world_size: int = 1,
) -> None:
root_logger = logging.getLogger()
# create handlers
if world_size == 1:
log_file = os.path.join(serialization_dir, "out.log")
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(name)s - %(message)s")
else:
log_file = os.path.join(serialization_dir, f"out_worker{rank}.log")
formatter = logging.Formatter(
f"{rank} | %(asctime)s - %(levelname)s - %(name)s - %(message)s"
)
file_handler = logging.FileHandler(log_file)
stdout_handler = logging.StreamHandler(sys.stdout)
stderr_handler = logging.StreamHandler(sys.stderr)
handler: logging.Handler
for handler in [file_handler, stdout_handler, stderr_handler]:
handler.setFormatter(formatter)
# Remove the already set handlers in root logger.
# Not doing this will result in duplicate log messages
root_logger.handlers.clear()
if os.environ.get("ALLENNLP_DEBUG"):
LEVEL = logging.DEBUG
else:
level_name = os.environ.get("ALLENNLP_LOG_LEVEL", "INFO")
LEVEL = logging._nameToLevel.get(level_name, logging.INFO)
file_handler.setLevel(LEVEL)
stdout_handler.setLevel(LEVEL)
stdout_handler.addFilter(ErrorFilter()) # Make sure errors only go to stderr
stderr_handler.setLevel(logging.ERROR)
root_logger.setLevel(LEVEL)
# put all the handlers on the root logger
root_logger.addHandler(file_handler)
if rank == 0:
root_logger.addHandler(stdout_handler)
root_logger.addHandler(stderr_handler)
# write uncaught exceptions to the logs
def excepthook(exctype, value, traceback):
# For a KeyboardInterrupt, call the original exception handler.
if issubclass(exctype, KeyboardInterrupt):
sys.__excepthook__(exctype, value, traceback)
return
root_logger.critical("Uncaught exception", exc_info=(exctype, value, traceback))
sys.excepthook = excepthook
# also log tqdm
from allennlp.common.tqdm import logger as tqdm_logger
tqdm_logger.addHandler(file_handler)
| allennlp-master | allennlp/common/logging.py |
import copy
import json
import logging
import os
import zlib
from collections import OrderedDict
from collections.abc import MutableMapping
from os import PathLike
from typing import Any, Dict, List, Union, Optional
from overrides import overrides
# _jsonnet doesn't work on Windows, so we have to use fakes.
try:
from _jsonnet import evaluate_file, evaluate_snippet
except ImportError:
def evaluate_file(filename: str, **_kwargs) -> str:
logger.warning(
f"error loading _jsonnet (this is expected on Windows), treating {filename} as plain json"
)
with open(filename, "r") as evaluation_file:
return evaluation_file.read()
def evaluate_snippet(_filename: str, expr: str, **_kwargs) -> str:
logger.warning(
"error loading _jsonnet (this is expected on Windows), treating snippet as plain json"
)
return expr
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path
logger = logging.getLogger(__name__)
def infer_and_cast(value: Any):
"""
In some cases we'll be feeding params dicts to functions we don't own;
for example, PyTorch optimizers. In that case we can't use `pop_int`
or similar to force casts (which means you can't specify `int` parameters
using environment variables). This function takes something that looks JSON-like
and recursively casts things that look like (bool, int, float) to (bool, int, float).
"""
if isinstance(value, (int, float, bool)):
# Already one of our desired types, so leave as is.
return value
elif isinstance(value, list):
# Recursively call on each list element.
return [infer_and_cast(item) for item in value]
elif isinstance(value, dict):
# Recursively call on each dict value.
return {key: infer_and_cast(item) for key, item in value.items()}
elif isinstance(value, str):
# If it looks like a bool, make it a bool.
if value.lower() == "true":
return True
elif value.lower() == "false":
return False
else:
# See if it could be an int.
try:
return int(value)
except ValueError:
pass
# See if it could be a float.
try:
return float(value)
except ValueError:
# Just return it as a string.
return value
else:
raise ValueError(f"cannot infer type of {value}")
def _is_encodable(value: str) -> bool:
"""
We need to filter out environment variables that can't
be unicode-encoded to avoid a "surrogates not allowed"
error in jsonnet.
"""
# Idiomatically you'd like to not check the != b""
# but mypy doesn't like that.
return (value == "") or (value.encode("utf-8", "ignore") != b"")
def _environment_variables() -> Dict[str, str]:
"""
Wraps `os.environ` to filter out non-encodable values.
"""
return {key: value for key, value in os.environ.items() if _is_encodable(value)}
def unflatten(flat_dict: Dict[str, Any]) -> Dict[str, Any]:
"""
Given a "flattened" dict with compound keys, e.g.
{"a.b": 0}
unflatten it:
{"a": {"b": 0}}
"""
unflat: Dict[str, Any] = {}
for compound_key, value in flat_dict.items():
curr_dict = unflat
parts = compound_key.split(".")
for key in parts[:-1]:
curr_value = curr_dict.get(key)
if key not in curr_dict:
curr_dict[key] = {}
curr_dict = curr_dict[key]
elif isinstance(curr_value, dict):
curr_dict = curr_value
else:
raise ConfigurationError("flattened dictionary is invalid")
if not isinstance(curr_dict, dict) or parts[-1] in curr_dict:
raise ConfigurationError("flattened dictionary is invalid")
curr_dict[parts[-1]] = value
return unflat
def with_fallback(preferred: Dict[str, Any], fallback: Dict[str, Any]) -> Dict[str, Any]:
"""
Deep merge two dicts, preferring values from `preferred`.
"""
def merge(preferred_value: Any, fallback_value: Any) -> Any:
if isinstance(preferred_value, dict) and isinstance(fallback_value, dict):
return with_fallback(preferred_value, fallback_value)
elif isinstance(preferred_value, dict) and isinstance(fallback_value, list):
# treat preferred_value as a sparse list, where each key is an index to be overridden
merged_list = fallback_value
for elem_key, preferred_element in preferred_value.items():
try:
index = int(elem_key)
merged_list[index] = merge(preferred_element, fallback_value[index])
except ValueError:
raise ConfigurationError(
"could not merge dicts - the preferred dict contains "
f"invalid keys (key {elem_key} is not a valid list index)"
)
except IndexError:
raise ConfigurationError(
"could not merge dicts - the preferred dict contains "
f"invalid keys (key {index} is out of bounds)"
)
return merged_list
else:
return copy.deepcopy(preferred_value)
preferred_keys = set(preferred.keys())
fallback_keys = set(fallback.keys())
common_keys = preferred_keys & fallback_keys
merged: Dict[str, Any] = {}
for key in preferred_keys - fallback_keys:
merged[key] = copy.deepcopy(preferred[key])
for key in fallback_keys - preferred_keys:
merged[key] = copy.deepcopy(fallback[key])
for key in common_keys:
preferred_value = preferred[key]
fallback_value = fallback[key]
merged[key] = merge(preferred_value, fallback_value)
return merged
def parse_overrides(serialized_overrides: str) -> Dict[str, Any]:
if serialized_overrides:
ext_vars = _environment_variables()
return unflatten(json.loads(evaluate_snippet("", serialized_overrides, ext_vars=ext_vars)))
else:
return {}
def _is_dict_free(obj: Any) -> bool:
"""
Returns False if obj is a dict, or if it's a list with an element that _has_dict.
"""
if isinstance(obj, dict):
return False
elif isinstance(obj, list):
return all(_is_dict_free(item) for item in obj)
else:
return True
class Params(MutableMapping):
"""
Represents a parameter dictionary with a history, and contains other functionality around
parameter passing and validation for AllenNLP.
There are currently two benefits of a `Params` object over a plain dictionary for parameter
passing:
1. We handle a few kinds of parameter validation, including making sure that parameters
representing discrete choices actually have acceptable values, and making sure no extra
parameters are passed.
2. We log all parameter reads, including default values. This gives a more complete
specification of the actual parameters used than is given in a JSON file, because
those may not specify what default values were used, whereas this will log them.
!!! Consumption
The convention for using a `Params` object in AllenNLP is that you will consume the parameters
as you read them, so that there are none left when you've read everything you expect. This
lets us easily validate that you didn't pass in any `extra` parameters, just by making sure
that the parameter dictionary is empty. You should do this when you're done handling
parameters, by calling `Params.assert_empty`.
"""
# This allows us to check for the presence of "None" as a default argument,
# which we require because we make a distinction between passing a value of "None"
# and passing no value to the default parameter of "pop".
DEFAULT = object()
def __init__(self, params: Dict[str, Any], history: str = "") -> None:
self.params = _replace_none(params)
self.history = history
@overrides
def pop(self, key: str, default: Any = DEFAULT, keep_as_dict: bool = False) -> Any:
"""
Performs the functionality associated with dict.pop(key), along with checking for
returned dictionaries, replacing them with Param objects with an updated history
(unless keep_as_dict is True, in which case we leave them as dictionaries).
If `key` is not present in the dictionary, and no default was specified, we raise a
`ConfigurationError`, instead of the typical `KeyError`.
"""
if default is self.DEFAULT:
try:
value = self.params.pop(key)
except KeyError:
msg = f'key "{key}" is required'
if self.history:
msg += f' at location "{self.history}"'
raise ConfigurationError(msg)
else:
value = self.params.pop(key, default)
if keep_as_dict or _is_dict_free(value):
logger.info(f"{self.history}{key} = {value}")
return value
else:
return self._check_is_dict(key, value)
def pop_int(self, key: str, default: Any = DEFAULT) -> Optional[int]:
"""
Performs a pop and coerces to an int.
"""
value = self.pop(key, default)
if value is None:
return None
else:
return int(value)
def pop_float(self, key: str, default: Any = DEFAULT) -> Optional[float]:
"""
Performs a pop and coerces to a float.
"""
value = self.pop(key, default)
if value is None:
return None
else:
return float(value)
def pop_bool(self, key: str, default: Any = DEFAULT) -> Optional[bool]:
"""
Performs a pop and coerces to a bool.
"""
value = self.pop(key, default)
if value is None:
return None
elif isinstance(value, bool):
return value
elif value == "true":
return True
elif value == "false":
return False
else:
raise ValueError("Cannot convert variable to bool: " + value)
@overrides
def get(self, key: str, default: Any = DEFAULT):
"""
Performs the functionality associated with dict.get(key) but also checks for returned
dicts and returns a Params object in their place with an updated history.
"""
default = None if default is self.DEFAULT else default
value = self.params.get(key, default)
return self._check_is_dict(key, value)
def pop_choice(
self,
key: str,
choices: List[Any],
default_to_first_choice: bool = False,
allow_class_names: bool = True,
) -> Any:
"""
Gets the value of `key` in the `params` dictionary, ensuring that the value is one of
the given choices. Note that this `pops` the key from params, modifying the dictionary,
consistent with how parameters are processed in this codebase.
# Parameters
key: `str`
Key to get the value from in the param dictionary
choices: `List[Any]`
A list of valid options for values corresponding to `key`. For example, if you're
specifying the type of encoder to use for some part of your model, the choices might be
the list of encoder classes we know about and can instantiate. If the value we find in
the param dictionary is not in `choices`, we raise a `ConfigurationError`, because
the user specified an invalid value in their parameter file.
default_to_first_choice: `bool`, optional (default = `False`)
If this is `True`, we allow the `key` to not be present in the parameter
dictionary. If the key is not present, we will use the return as the value the first
choice in the `choices` list. If this is `False`, we raise a
`ConfigurationError`, because specifying the `key` is required (e.g., you `have` to
specify your model class when running an experiment, but you can feel free to use
default settings for encoders if you want).
allow_class_names: `bool`, optional (default = `True`)
If this is `True`, then we allow unknown choices that look like fully-qualified class names.
This is to allow e.g. specifying a model type as my_library.my_model.MyModel
and importing it on the fly. Our check for "looks like" is extremely lenient
and consists of checking that the value contains a '.'.
"""
default = choices[0] if default_to_first_choice else self.DEFAULT
value = self.pop(key, default)
ok_because_class_name = allow_class_names and "." in value
if value not in choices and not ok_because_class_name:
key_str = self.history + key
message = (
f"{value} not in acceptable choices for {key_str}: {choices}. "
"You should either use the --include-package flag to make sure the correct module "
"is loaded, or use a fully qualified class name in your config file like "
"""{"model": "my_module.models.MyModel"} to have it imported automatically."""
)
raise ConfigurationError(message)
return value
def as_dict(self, quiet: bool = False, infer_type_and_cast: bool = False):
"""
Sometimes we need to just represent the parameters as a dict, for instance when we pass
them to PyTorch code.
# Parameters
quiet: `bool`, optional (default = `False`)
Whether to log the parameters before returning them as a dict.
infer_type_and_cast: `bool`, optional (default = `False`)
If True, we infer types and cast (e.g. things that look like floats to floats).
"""
if infer_type_and_cast:
params_as_dict = infer_and_cast(self.params)
else:
params_as_dict = self.params
if quiet:
return params_as_dict
def log_recursively(parameters, history):
for key, value in parameters.items():
if isinstance(value, dict):
new_local_history = history + key + "."
log_recursively(value, new_local_history)
else:
logger.info(f"{history}{key} = {value}")
log_recursively(self.params, self.history)
return params_as_dict
def as_flat_dict(self) -> Dict[str, Any]:
"""
Returns the parameters of a flat dictionary from keys to values.
Nested structure is collapsed with periods.
"""
flat_params = {}
def recurse(parameters, path):
for key, value in parameters.items():
newpath = path + [key]
if isinstance(value, dict):
recurse(value, newpath)
else:
flat_params[".".join(newpath)] = value
recurse(self.params, [])
return flat_params
def duplicate(self) -> "Params":
"""
Uses `copy.deepcopy()` to create a duplicate (but fully distinct)
copy of these Params.
"""
return copy.deepcopy(self)
def assert_empty(self, class_name: str):
"""
Raises a `ConfigurationError` if `self.params` is not empty. We take `class_name` as
an argument so that the error message gives some idea of where an error happened, if there
was one. `class_name` should be the name of the `calling` class, the one that got extra
parameters (if there are any).
"""
if self.params:
raise ConfigurationError(
"Extra parameters passed to {}: {}".format(class_name, self.params)
)
def __getitem__(self, key):
if key in self.params:
return self._check_is_dict(key, self.params[key])
else:
raise KeyError
def __setitem__(self, key, value):
self.params[key] = value
def __delitem__(self, key):
del self.params[key]
def __iter__(self):
return iter(self.params)
def __len__(self):
return len(self.params)
def _check_is_dict(self, new_history, value):
if isinstance(value, dict):
new_history = self.history + new_history + "."
return Params(value, history=new_history)
if isinstance(value, list):
value = [self._check_is_dict(f"{new_history}.{i}", v) for i, v in enumerate(value)]
return value
@classmethod
def from_file(
cls,
params_file: Union[str, PathLike],
params_overrides: Union[str, Dict[str, Any]] = "",
ext_vars: dict = None,
) -> "Params":
"""
Load a `Params` object from a configuration file.
# Parameters
params_file: `str`
The path to the configuration file to load.
params_overrides: `Union[str, Dict[str, Any]]`, optional (default = `""`)
A dict of overrides that can be applied to final object.
e.g. {"model.embedding_dim": 10}
ext_vars: `dict`, optional
Our config files are Jsonnet, which allows specifying external variables
for later substitution. Typically we substitute these using environment
variables; however, you can also specify them here, in which case they
take priority over environment variables.
e.g. {"HOME_DIR": "/Users/allennlp/home"}
"""
if ext_vars is None:
ext_vars = {}
# redirect to cache, if necessary
params_file = cached_path(params_file)
ext_vars = {**_environment_variables(), **ext_vars}
file_dict = json.loads(evaluate_file(params_file, ext_vars=ext_vars))
if isinstance(params_overrides, dict):
params_overrides = json.dumps(params_overrides)
overrides_dict = parse_overrides(params_overrides)
param_dict = with_fallback(preferred=overrides_dict, fallback=file_dict)
return cls(param_dict)
def to_file(self, params_file: str, preference_orders: List[List[str]] = None) -> None:
with open(params_file, "w") as handle:
json.dump(self.as_ordered_dict(preference_orders), handle, indent=4)
def as_ordered_dict(self, preference_orders: List[List[str]] = None) -> OrderedDict:
"""
Returns Ordered Dict of Params from list of partial order preferences.
# Parameters
preference_orders: `List[List[str]]`, optional
`preference_orders` is list of partial preference orders. ["A", "B", "C"] means
"A" > "B" > "C". For multiple preference_orders first will be considered first.
Keys not found, will have last but alphabetical preference. Default Preferences:
`[["dataset_reader", "iterator", "model", "train_data_path", "validation_data_path",
"test_data_path", "trainer", "vocabulary"], ["type"]]`
"""
params_dict = self.as_dict(quiet=True)
if not preference_orders:
preference_orders = []
preference_orders.append(
[
"dataset_reader",
"iterator",
"model",
"train_data_path",
"validation_data_path",
"test_data_path",
"trainer",
"vocabulary",
]
)
preference_orders.append(["type"])
def order_func(key):
# Makes a tuple to use for ordering. The tuple is an index into each of the `preference_orders`,
# followed by the key itself. This gives us integer sorting if you have a key in one of the
# `preference_orders`, followed by alphabetical ordering if not.
order_tuple = [
order.index(key) if key in order else len(order) for order in preference_orders
]
return order_tuple + [key]
def order_dict(dictionary, order_func):
# Recursively orders dictionary according to scoring order_func
result = OrderedDict()
for key, val in sorted(dictionary.items(), key=lambda item: order_func(item[0])):
result[key] = order_dict(val, order_func) if isinstance(val, dict) else val
return result
return order_dict(params_dict, order_func)
def get_hash(self) -> str:
"""
Returns a hash code representing the current state of this `Params` object. We don't
want to implement `__hash__` because that has deeper python implications (and this is a
mutable object), but this will give you a representation of the current state.
We use `zlib.adler32` instead of Python's builtin `hash` because the random seed for the
latter is reset on each new program invocation, as discussed here:
https://stackoverflow.com/questions/27954892/deterministic-hashing-in-python-3.
"""
dumped = json.dumps(self.params, sort_keys=True)
hashed = zlib.adler32(dumped.encode())
return str(hashed)
def __str__(self) -> str:
return f"{self.history}Params({self.params})"
def pop_choice(
params: Dict[str, Any],
key: str,
choices: List[Any],
default_to_first_choice: bool = False,
history: str = "?.",
allow_class_names: bool = True,
) -> Any:
"""
Performs the same function as `Params.pop_choice`, but is required in order to deal with
places that the Params object is not welcome, such as inside Keras layers. See the docstring
of that method for more detail on how this function works.
This method adds a `history` parameter, in the off-chance that you know it, so that we can
reproduce `Params.pop_choice` exactly. We default to using "?." if you don't know the
history, so you'll have to fix that in the log if you want to actually recover the logged
parameters.
"""
value = Params(params, history).pop_choice(
key, choices, default_to_first_choice, allow_class_names=allow_class_names
)
return value
def _replace_none(params: Any) -> Any:
if params == "None":
return None
elif isinstance(params, dict):
for key, value in params.items():
params[key] = _replace_none(value)
return params
elif isinstance(params, list):
return [_replace_none(value) for value in params]
return params
def remove_keys_from_params(params: Params, keys: List[str] = ["pretrained_file", "initializer"]):
if isinstance(params, Params): # The model could possibly be a string, for example.
param_keys = params.keys()
for key in keys:
if key in param_keys:
del params[key]
for value in params.values():
if isinstance(value, Params):
remove_keys_from_params(value, keys)
| allennlp-master | allennlp/common/params.py |
import logging
from typing import NamedTuple, Optional, Dict, Tuple
import transformers
from transformers import AutoModel
logger = logging.getLogger(__name__)
class TransformerSpec(NamedTuple):
model_name: str
override_weights_file: Optional[str] = None
override_weights_strip_prefix: Optional[str] = None
_model_cache: Dict[TransformerSpec, transformers.PreTrainedModel] = {}
def get(
model_name: str,
make_copy: bool,
override_weights_file: Optional[str] = None,
override_weights_strip_prefix: Optional[str] = None,
**kwargs,
) -> transformers.PreTrainedModel:
"""
Returns a transformer model from the cache.
# Parameters
model_name : `str`
The name of the transformer, for example `"bert-base-cased"`
make_copy : `bool`
If this is `True`, return a copy of the model instead of the cached model itself. If you want to modify the
parameters of the model, set this to `True`. If you want only part of the model, set this to `False`, but
make sure to `copy.deepcopy()` the bits you are keeping.
override_weights_file : `str`, optional
If set, this specifies a file from which to load alternate weights that override the
weights from huggingface. The file is expected to contain a PyTorch `state_dict`, created
with `torch.save()`.
override_weights_strip_prefix : `str`, optional
If set, strip the given prefix from the state dict when loading it.
"""
global _model_cache
spec = TransformerSpec(model_name, override_weights_file, override_weights_strip_prefix)
transformer = _model_cache.get(spec, None)
if transformer is None:
if override_weights_file is not None:
from allennlp.common.file_utils import cached_path
import torch
override_weights_file = cached_path(override_weights_file)
override_weights = torch.load(override_weights_file)
if override_weights_strip_prefix is not None:
def strip_prefix(s):
if s.startswith(override_weights_strip_prefix):
return s[len(override_weights_strip_prefix) :]
else:
return s
valid_keys = {
k
for k in override_weights.keys()
if k.startswith(override_weights_strip_prefix)
}
if len(valid_keys) > 0:
logger.info(
"Loading %d tensors from %s", len(valid_keys), override_weights_file
)
else:
raise ValueError(
f"Specified prefix of '{override_weights_strip_prefix}' means no tensors "
f"will be loaded from {override_weights_file}."
)
override_weights = {strip_prefix(k): override_weights[k] for k in valid_keys}
transformer = AutoModel.from_pretrained(
model_name,
state_dict=override_weights,
**kwargs,
)
else:
transformer = AutoModel.from_pretrained(
model_name,
**kwargs,
)
_model_cache[spec] = transformer
if make_copy:
import copy
return copy.deepcopy(transformer)
else:
return transformer
_tokenizer_cache: Dict[Tuple[str, str], transformers.PreTrainedTokenizer] = {}
def get_tokenizer(model_name: str, **kwargs) -> transformers.PreTrainedTokenizer:
from allennlp.common.util import hash_object
cache_key = (model_name, hash_object(kwargs))
global _tokenizer_cache
tokenizer = _tokenizer_cache.get(cache_key, None)
if tokenizer is None:
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_name,
**kwargs,
)
_tokenizer_cache[cache_key] = tokenizer
return tokenizer
| allennlp-master | allennlp/common/cached_transformers.py |
"""
Various utilities that don't fit anywhere else.
"""
import hashlib
import io
import pickle
from datetime import timedelta
import importlib
import json
import logging
import os
import pkgutil
import random
import sys
from contextlib import contextmanager
from itertools import islice, zip_longest
from pathlib import Path
from typing import (
Any,
Callable,
Dict,
Generator,
Iterable,
Iterator,
List,
Optional,
Tuple,
TypeVar,
Union,
)
import numpy
import spacy
import torch
import torch.distributed as dist
from spacy.cli.download import download as spacy_download
from spacy.language import Language as SpacyModelType
from allennlp.common.checks import log_pytorch_version_info
from allennlp.common.params import Params
try:
import resource
except ImportError:
# resource doesn't exist on Windows systems
resource = None # type: ignore
logger = logging.getLogger(__name__)
JsonDict = Dict[str, Any]
# If you want to have start and/or end symbols for any reason in your code, we recommend you use
# these, to have a common place to import from. Also, it's important for some edge cases in how
# data is processed for these symbols to be lowercase, not uppercase (because we have code that
# will lowercase tokens for you in some circumstances, and we need this symbol to not change in
# those cases).
START_SYMBOL = "@start@"
END_SYMBOL = "@end@"
PathType = Union[os.PathLike, str]
T = TypeVar("T")
ContextManagerFunctionReturnType = Generator[T, None, None]
def sanitize(x: Any) -> Any:
"""
Sanitize turns PyTorch and Numpy types into basic Python types so they
can be serialized into JSON.
"""
# Import here to avoid circular references
from allennlp.data.tokenizers import Token
if isinstance(x, (str, float, int, bool)):
# x is already serializable
return x
elif isinstance(x, torch.Tensor):
# tensor needs to be converted to a list (and moved to cpu if necessary)
return x.cpu().tolist()
elif isinstance(x, numpy.ndarray):
# array needs to be converted to a list
return x.tolist()
elif isinstance(x, numpy.number):
# NumPy numbers need to be converted to Python numbers
return x.item()
elif isinstance(x, dict):
# Dicts need their values sanitized
return {key: sanitize(value) for key, value in x.items()}
elif isinstance(x, numpy.bool_):
# Numpy bool_ need to be converted to python bool.
return bool(x)
elif isinstance(x, (spacy.tokens.Token, Token)):
# Tokens get sanitized to just their text.
return x.text
elif isinstance(x, (list, tuple)):
# Lists and Tuples need their values sanitized
return [sanitize(x_i) for x_i in x]
elif x is None:
return "None"
elif hasattr(x, "to_json"):
return x.to_json()
else:
raise ValueError(
f"Cannot sanitize {x} of type {type(x)}. "
"If this is your own custom class, add a `to_json(self)` method "
"that returns a JSON-like object."
)
def group_by_count(iterable: List[Any], count: int, default_value: Any) -> List[List[Any]]:
"""
Takes a list and groups it into sublists of size `count`, using `default_value` to pad the
list at the end if the list is not divisable by `count`.
For example:
```
>>> group_by_count([1, 2, 3, 4, 5, 6, 7], 3, 0)
[[1, 2, 3], [4, 5, 6], [7, 0, 0]]
```
This is a short method, but it's complicated and hard to remember as a one-liner, so we just
make a function out of it.
"""
return [list(x) for x in zip_longest(*[iter(iterable)] * count, fillvalue=default_value)]
A = TypeVar("A")
def lazy_groups_of(iterable: Iterable[A], group_size: int) -> Iterator[List[A]]:
"""
Takes an iterable and batches the individual instances into lists of the
specified size. The last list may be smaller if there are instances left over.
"""
iterator = iter(iterable)
while True:
s = list(islice(iterator, group_size))
if len(s) > 0:
yield s
else:
break
def pad_sequence_to_length(
sequence: List,
desired_length: int,
default_value: Callable[[], Any] = lambda: 0,
padding_on_right: bool = True,
) -> List:
"""
Take a list of objects and pads it to the desired length, returning the padded list. The
original list is not modified.
# Parameters
sequence : `List`
A list of objects to be padded.
desired_length : `int`
Maximum length of each sequence. Longer sequences are truncated to this length, and
shorter ones are padded to it.
default_value: `Callable`, optional (default=`lambda: 0`)
Callable that outputs a default value (of any type) to use as padding values. This is
a lambda to avoid using the same object when the default value is more complex, like a
list.
padding_on_right : `bool`, optional (default=`True`)
When we add padding tokens (or truncate the sequence), should we do it on the right or
the left?
# Returns
padded_sequence : `List`
"""
# Truncates the sequence to the desired length.
if padding_on_right:
padded_sequence = sequence[:desired_length]
else:
padded_sequence = sequence[-desired_length:]
# Continues to pad with default_value() until we reach the desired length.
pad_length = desired_length - len(padded_sequence)
# This just creates the default value once, so if it's a list, and if it gets mutated
# later, it could cause subtle bugs. But the risk there is low, and this is much faster.
values_to_pad = [default_value()] * pad_length
if padding_on_right:
padded_sequence = padded_sequence + values_to_pad
else:
padded_sequence = values_to_pad + padded_sequence
return padded_sequence
def add_noise_to_dict_values(dictionary: Dict[A, float], noise_param: float) -> Dict[A, float]:
"""
Returns a new dictionary with noise added to every key in `dictionary`. The noise is
uniformly distributed within `noise_param` percent of the value for every value in the
dictionary.
"""
new_dict = {}
for key, value in dictionary.items():
noise_value = value * noise_param
noise = random.uniform(-noise_value, noise_value)
new_dict[key] = value + noise
return new_dict
def namespace_match(pattern: str, namespace: str):
"""
Matches a namespace pattern against a namespace string. For example, `*tags` matches
`passage_tags` and `question_tags` and `tokens` matches `tokens` but not
`stemmed_tokens`.
"""
if pattern[0] == "*" and namespace.endswith(pattern[1:]):
return True
elif pattern == namespace:
return True
return False
def prepare_environment(params: Params):
"""
Sets random seeds for reproducible experiments. This may not work as expected
if you use this from within a python project in which you have already imported Pytorch.
If you use the scripts/run_model.py entry point to training models with this library,
your experiments should be reasonably reproducible. If you are using this from your own
project, you will want to call this function before importing Pytorch. Complete determinism
is very difficult to achieve with libraries doing optimized linear algebra due to massively
parallel execution, which is exacerbated by using GPUs.
# Parameters
params: `Params`
A `Params` object or dict holding the json parameters.
"""
seed = params.pop_int("random_seed", 13370)
numpy_seed = params.pop_int("numpy_seed", 1337)
torch_seed = params.pop_int("pytorch_seed", 133)
if seed is not None:
random.seed(seed)
if numpy_seed is not None:
numpy.random.seed(numpy_seed)
if torch_seed is not None:
torch.manual_seed(torch_seed)
# Seed all GPUs with the same seed if available.
if torch.cuda.is_available():
torch.cuda.manual_seed_all(torch_seed)
log_pytorch_version_info()
LOADED_SPACY_MODELS: Dict[Tuple[str, bool, bool, bool], SpacyModelType] = {}
def get_spacy_model(
spacy_model_name: str, pos_tags: bool, parse: bool, ner: bool
) -> SpacyModelType:
"""
In order to avoid loading spacy models a whole bunch of times, we'll save references to them,
keyed by the options we used to create the spacy model, so any particular configuration only
gets loaded once.
"""
options = (spacy_model_name, pos_tags, parse, ner)
if options not in LOADED_SPACY_MODELS:
disable = ["vectors", "textcat"]
if not pos_tags:
disable.append("tagger")
if not parse:
disable.append("parser")
if not ner:
disable.append("ner")
try:
spacy_model = spacy.load(spacy_model_name, disable=disable)
except OSError:
logger.warning(
f"Spacy models '{spacy_model_name}' not found. Downloading and installing."
)
spacy_download(spacy_model_name)
# Import the downloaded model module directly and load from there
spacy_model_module = __import__(spacy_model_name)
spacy_model = spacy_model_module.load(disable=disable) # type: ignore
LOADED_SPACY_MODELS[options] = spacy_model
return LOADED_SPACY_MODELS[options]
@contextmanager
def pushd(new_dir: PathType, verbose: bool = False) -> ContextManagerFunctionReturnType[None]:
"""
Changes the current directory to the given path and prepends it to `sys.path`.
This method is intended to use with `with`, so after its usage, the current directory will be
set to the previous value.
"""
previous_dir = os.getcwd()
if verbose:
logger.info(f"Changing directory to {new_dir}") # type: ignore
os.chdir(new_dir)
try:
yield
finally:
if verbose:
logger.info(f"Changing directory back to {previous_dir}")
os.chdir(previous_dir)
@contextmanager
def push_python_path(path: PathType) -> ContextManagerFunctionReturnType[None]:
"""
Prepends the given path to `sys.path`.
This method is intended to use with `with`, so after its usage, its value willbe removed from
`sys.path`.
"""
# In some environments, such as TC, it fails when sys.path contains a relative path, such as ".".
path = Path(path).resolve()
path = str(path)
sys.path.insert(0, path)
try:
yield
finally:
# Better to remove by value, in case `sys.path` was manipulated in between.
sys.path.remove(path)
def import_module_and_submodules(package_name: str) -> None:
"""
Import all submodules under the given package.
Primarily useful so that people using AllenNLP as a library
can specify their own custom packages and have their custom
classes get loaded and registered.
"""
importlib.invalidate_caches()
# For some reason, python doesn't always add this by default to your path, but you pretty much
# always want it when using `--include-package`. And if it's already there, adding it again at
# the end won't hurt anything.
with push_python_path("."):
# Import at top level
module = importlib.import_module(package_name)
path = getattr(module, "__path__", [])
path_string = "" if not path else path[0]
# walk_packages only finds immediate children, so need to recurse.
for module_finder, name, _ in pkgutil.walk_packages(path):
# Sometimes when you import third-party libraries that are on your path,
# `pkgutil.walk_packages` returns those too, so we need to skip them.
if path_string and module_finder.path != path_string:
continue
subpackage = f"{package_name}.{name}"
import_module_and_submodules(subpackage)
def peak_cpu_memory() -> Dict[int, int]:
"""
Get peak memory usage for each worker, as measured by max-resident-set size:
https://unix.stackexchange.com/questions/30940/getrusage-system-call-what-is-maximum-resident-set-size
Only works on OSX and Linux, otherwise the result will be 0.0 for every worker.
"""
if resource is None or sys.platform not in ("linux", "darwin"):
peak_bytes = 0
else:
peak = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
if sys.platform == "darwin":
# On OSX the result is in bytes.
peak_bytes = peak
else:
# On Linux the result is in kilobytes.
peak_bytes = peak * 1_024
if is_distributed():
global_rank = dist.get_rank()
world_size = dist.get_world_size()
peak_bytes_tensor = torch.tensor([global_rank, peak_bytes])
# All of these tensors will be gathered into this list.
gather_results = [torch.tensor([0, 0]) for _ in range(world_size)]
# If the backend is 'nccl', this means we're training on GPUs, so these tensors
# need to be on GPU.
if dist.get_backend() == "nccl":
peak_bytes_tensor = peak_bytes_tensor.cuda()
gather_results = [x.cuda() for x in gather_results]
dist.all_gather(gather_results, peak_bytes_tensor)
results_dict: Dict[int, int] = {}
for peak_bytes_tensor in gather_results:
results_dict[int(peak_bytes_tensor[0])] = int(peak_bytes_tensor[1])
return results_dict
else:
return {0: peak_bytes}
def peak_gpu_memory() -> Dict[int, int]:
"""
Get the peak GPU memory usage in bytes by device.
# Returns
`Dict[int, int]`
Keys are device ids as integers.
Values are memory usage as integers in bytes.
Returns an empty `dict` if GPUs are not available.
"""
if not torch.cuda.is_available():
return {}
if is_distributed():
# If the backend is not 'nccl', we're training on CPU.
if dist.get_backend() != "nccl":
return {}
device = torch.cuda.current_device()
global_rank = dist.get_rank()
world_size = dist.get_world_size()
peak_bytes = torch.cuda.max_memory_allocated(device)
peak_bytes_tensor = torch.tensor([global_rank, peak_bytes], device=device)
# All of these tensors will be gathered into this list.
gather_results = [torch.tensor([0, 0], device=device) for _ in range(world_size)]
dist.all_gather(gather_results, peak_bytes_tensor)
results_dict: Dict[int, int] = {}
for peak_bytes_tensor in gather_results:
results_dict[int(peak_bytes_tensor[0])] = int(peak_bytes_tensor[1])
return results_dict
else:
return {0: torch.cuda.max_memory_allocated()}
def ensure_list(iterable: Iterable[A]) -> List[A]:
"""
An Iterable may be a list or a generator.
This ensures we get a list without making an unnecessary copy.
"""
if isinstance(iterable, list):
return iterable
else:
return list(iterable)
def is_lazy(iterable: Iterable[A]) -> bool:
"""
Checks if the given iterable is lazy,
which here just means it's not a list.
"""
return not isinstance(iterable, list)
def int_to_device(device: Union[int, torch.device]) -> torch.device:
if isinstance(device, torch.device):
return device
if device < 0:
return torch.device("cpu")
return torch.device(device)
def log_frozen_and_tunable_parameter_names(model: torch.nn.Module) -> None:
frozen_parameter_names, tunable_parameter_names = get_frozen_and_tunable_parameter_names(model)
logger.info("The following parameters are Frozen (without gradient):")
for name in frozen_parameter_names:
logger.info(name)
logger.info("The following parameters are Tunable (with gradient):")
for name in tunable_parameter_names:
logger.info(name)
def get_frozen_and_tunable_parameter_names(
model: torch.nn.Module,
) -> Tuple[Iterable[str], Iterable[str]]:
frozen_parameter_names = (
name for name, parameter in model.named_parameters() if not parameter.requires_grad
)
tunable_parameter_names = (
name for name, parameter in model.named_parameters() if parameter.requires_grad
)
return frozen_parameter_names, tunable_parameter_names
def dump_metrics(file_path: Optional[str], metrics: Dict[str, Any], log: bool = False) -> None:
metrics_json = json.dumps(metrics, indent=2)
if file_path:
with open(file_path, "w") as metrics_file:
metrics_file.write(metrics_json)
if log:
logger.info("Metrics: %s", metrics_json)
def flatten_filename(file_path: str) -> str:
return file_path.replace("/", "_SLASH_")
def is_distributed() -> bool:
"""
Checks if the distributed process group is available and has been initialized
"""
return dist.is_available() and dist.is_initialized()
def sanitize_wordpiece(wordpiece: str) -> str:
"""
Sanitizes wordpieces from BERT, RoBERTa or ALBERT tokenizers.
"""
if wordpiece.startswith("##"):
return wordpiece[2:]
elif wordpiece.startswith("Ġ"):
return wordpiece[1:]
elif wordpiece.startswith("▁"):
return wordpiece[1:]
else:
return wordpiece
def sanitize_ptb_tokenized_string(text: str) -> str:
"""
Sanitizes string that was tokenized using PTBTokenizer
"""
tokens = text.split(" ")
if len(tokens) == 0:
return text
# Replace quotation marks and parentheses
token_map = {
"``": '"',
"''": '"',
"-lrb-": "(",
"-rrb-": ")",
"-lsb-": "[",
"-rsb-": "]",
"-lcb-": "{",
"-rcb-": "}",
"<s>": "",
"</s>": "",
}
# Merge punctuation with previous tokens
punct_forward = {"`", "$", "#"}
punct_backward = {".", ",", "!", "?", ":", ";", "%", "'"}
# Exact matches that get merged forward or backward
em_forward = {"(", "[", "{"}
em_backward = {"n't", "na", ")", "]", "}"}
new_tokens: List[str] = []
merge_fwd = False
for i, orig_token in enumerate(tokens):
tokens[i] = token_map[orig_token.lower()] if orig_token.lower() in token_map else orig_token
new_token = tokens[i].lower()
# merge_fwd was set by previous token, so it should be prepended to current token
if merge_fwd:
tokens[i] = tokens[i - 1] + tokens[i]
if len(tokens[i]) == 0:
continue
# Special cases for `` and '', those tells us if " is the start or end of a quotation.
# Also always merge tokens starting with ' backward and don't merge back if we just merged forward
merge_bckwd = not merge_fwd and (
orig_token == "''"
or new_token in em_backward
or new_token.startswith("'")
or all(c in punct_backward for c in new_token)
)
merge_fwd = (
orig_token == "``"
or new_token in em_forward
or all(c in punct_forward for c in new_token)
)
if merge_bckwd and new_tokens:
new_tokens[-1] += tokens[i]
elif not new_tokens or not merge_fwd or i == len(tokens) - 1:
new_tokens.append(tokens[i])
return " ".join(new_tokens)
def find_open_port() -> int:
"""
Find a random open port on local host.
"""
import socket
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
# Passes 0 means find any open port.
# See https://stackoverflow.com/questions/1365265/on-localhost-how-do-i-pick-a-free-port-number
sock.bind(("", 0))
return sock.getsockname()[1]
def format_timedelta(td: timedelta) -> str:
"""
Format a timedelta for humans.
"""
if td.days > 1:
return f"{td.days} days"
elif td.days > 0:
return f"{td.days} day"
else:
hours, remainder = divmod(td.seconds, 3600)
minutes, _ = divmod(remainder, 60)
if hours > 1:
return f"{hours} hours"
elif hours > 0:
return f"{hours} hour, {minutes} mins"
else:
return f"{minutes} mins"
def format_size(size: int) -> str:
"""
Format a size (in bytes) for humans.
"""
GBs = size / (1024 * 1024 * 1024)
if GBs >= 10:
return f"{int(round(GBs, 0))}G"
if GBs >= 1:
return f"{round(GBs, 1):.1f}G"
MBs = size / (1024 * 1024)
if MBs >= 10:
return f"{int(round(MBs, 0))}M"
if MBs >= 1:
return f"{round(MBs, 1):.1f}M"
KBs = size / 1024
if KBs >= 10:
return f"{int(round(KBs, 0))}K"
if KBs >= 1:
return f"{round(KBs, 1):.1f}K"
return f"{size}B"
def hash_object(o: Any) -> str:
"""Returns a 32-character hash code of arbitrary Python objects."""
m = hashlib.blake2b()
with io.BytesIO() as buffer:
pickle.dump(o, buffer)
m.update(buffer.getbuffer())
return m.hexdigest()
| allennlp-master | allennlp/common/util.py |
import collections.abc
from copy import deepcopy
from pathlib import Path
from typing import (
Any,
Callable,
cast,
Dict,
Iterable,
List,
Mapping,
Set,
Tuple,
Type,
TypeVar,
Union,
)
import inspect
import logging
from allennlp.common.checks import ConfigurationError
from allennlp.common.lazy import Lazy
from allennlp.common.params import Params
logger = logging.getLogger(__name__)
T = TypeVar("T", bound="FromParams")
# If a function parameter has no default value specified,
# this is what the inspect module returns.
_NO_DEFAULT = inspect.Parameter.empty
def takes_arg(obj, arg: str) -> bool:
"""
Checks whether the provided obj takes a certain arg.
If it's a class, we're really checking whether its constructor does.
If it's a function or method, we're checking the object itself.
Otherwise, we raise an error.
"""
if inspect.isclass(obj):
signature = inspect.signature(obj.__init__)
elif inspect.ismethod(obj) or inspect.isfunction(obj):
signature = inspect.signature(obj)
else:
raise ConfigurationError(f"object {obj} is not callable")
return arg in signature.parameters
def takes_kwargs(obj) -> bool:
"""
Checks whether a provided object takes in any positional arguments.
Similar to takes_arg, we do this for both the __init__ function of
the class or a function / method
Otherwise, we raise an error
"""
if inspect.isclass(obj):
signature = inspect.signature(obj.__init__)
elif inspect.ismethod(obj) or inspect.isfunction(obj):
signature = inspect.signature(obj)
else:
raise ConfigurationError(f"object {obj} is not callable")
return any(
p.kind == inspect.Parameter.VAR_KEYWORD # type: ignore
for p in signature.parameters.values()
)
def can_construct_from_params(type_: Type) -> bool:
if type_ in [str, int, float, bool]:
return True
origin = getattr(type_, "__origin__", None)
if origin == Lazy:
return True
elif origin:
if hasattr(type_, "from_params"):
return True
args = getattr(type_, "__args__")
return all(can_construct_from_params(arg) for arg in args)
return hasattr(type_, "from_params")
def is_base_registrable(cls) -> bool:
"""
Checks whether this is a class that directly inherits from Registrable, or is a subclass of such
a class.
"""
from allennlp.common.registrable import Registrable # import here to avoid circular imports
if not issubclass(cls, Registrable):
return False
method_resolution_order = inspect.getmro(cls)[1:]
for base_class in method_resolution_order:
if issubclass(base_class, Registrable) and base_class is not Registrable:
return False
return True
def remove_optional(annotation: type):
"""
Optional[X] annotations are actually represented as Union[X, NoneType].
For our purposes, the "Optional" part is not interesting, so here we
throw it away.
"""
origin = getattr(annotation, "__origin__", None)
args = getattr(annotation, "__args__", ())
if origin == Union:
return Union[tuple([arg for arg in args if arg != type(None)])] # noqa: E721
else:
return annotation
def infer_params(
cls: Type[T], constructor: Union[Callable[..., T], Callable[[T], None]] = None
) -> Dict[str, Any]:
if constructor is None:
constructor = cls.__init__
signature = inspect.signature(constructor)
parameters = dict(signature.parameters)
has_kwargs = False
var_positional_key = None
for param in parameters.values():
if param.kind == param.VAR_KEYWORD:
has_kwargs = True
elif param.kind == param.VAR_POSITIONAL:
var_positional_key = param.name
if var_positional_key:
del parameters[var_positional_key]
if not has_kwargs:
return parameters
# "mro" is "method resolution order". The first one is the current class, the next is the
# first superclass, and so on. We take the first superclass we find that inherits from
# FromParams.
super_class = None
for super_class_candidate in cls.mro()[1:]:
if issubclass(super_class_candidate, FromParams):
super_class = super_class_candidate
break
if super_class:
super_parameters = infer_params(super_class)
else:
super_parameters = {}
return {**super_parameters, **parameters} # Subclass parameters overwrite superclass ones
def create_kwargs(
constructor: Callable[..., T], cls: Type[T], params: Params, **extras
) -> Dict[str, Any]:
"""
Given some class, a `Params` object, and potentially other keyword arguments,
create a dict of keyword args suitable for passing to the class's constructor.
The function does this by finding the class's constructor, matching the constructor
arguments to entries in the `params` object, and instantiating values for the parameters
using the type annotation and possibly a from_params method.
Any values that are provided in the `extras` will just be used as is.
For instance, you might provide an existing `Vocabulary` this way.
"""
# Get the signature of the constructor.
kwargs: Dict[str, Any] = {}
parameters = infer_params(cls, constructor)
accepts_kwargs = False
# Iterate over all the constructor parameters and their annotations.
for param_name, param in parameters.items():
# Skip "self". You're not *required* to call the first parameter "self",
# so in theory this logic is fragile, but if you don't call the self parameter
# "self" you kind of deserve what happens.
if param_name == "self":
continue
if param.kind == param.VAR_KEYWORD:
# When a class takes **kwargs, we do two things: first, we assume that the **kwargs are
# getting passed to the super class, so we inspect super class constructors to get
# allowed arguments (that happens in `infer_params` above). Second, we store the fact
# that the method allows extra keys; if we get extra parameters, instead of crashing,
# we'll just pass them as-is to the constructor, and hope that you know what you're
# doing.
accepts_kwargs = True
continue
# If the annotation is a compound type like typing.Dict[str, int],
# it will have an __origin__ field indicating `typing.Dict`
# and an __args__ field indicating `(str, int)`. We capture both.
annotation = remove_optional(param.annotation)
explicitly_set = param_name in params
constructed_arg = pop_and_construct_arg(
cls.__name__, param_name, annotation, param.default, params, **extras
)
# If the param wasn't explicitly set in `params` and we just ended up constructing
# the default value for the parameter, we can just omit it.
# Leaving it in can cause issues with **kwargs in some corner cases, where you might end up
# with multiple values for a single parameter (e.g., the default value gives you lazy=False
# for a dataset reader inside **kwargs, but a particular dataset reader actually hard-codes
# lazy=True - the superclass sees both lazy=True and lazy=False in its constructor).
if explicitly_set or constructed_arg is not param.default:
kwargs[param_name] = constructed_arg
if accepts_kwargs:
kwargs.update(params)
else:
params.assert_empty(cls.__name__)
return kwargs
def create_extras(cls: Type[T], extras: Dict[str, Any]) -> Dict[str, Any]:
"""
Given a dictionary of extra arguments, returns a dictionary of
kwargs that actually are a part of the signature of the cls.from_params
(or cls) method.
"""
subextras: Dict[str, Any] = {}
if hasattr(cls, "from_params"):
from_params_method = cls.from_params # type: ignore
else:
# In some rare cases, we get a registered subclass that does _not_ have a
# from_params method (this happens with Activations, for instance, where we
# register pytorch modules directly). This is a bit of a hack to make those work,
# instead of adding a `from_params` method for them somehow. Then the extras
# in the class constructor are what we are looking for, to pass on.
from_params_method = cls
if takes_kwargs(from_params_method):
# If annotation.params accepts **kwargs, we need to pass them all along.
# For example, `BasicTextFieldEmbedder.from_params` requires a Vocabulary
# object, but `TextFieldEmbedder.from_params` does not.
subextras = extras
else:
# Otherwise, only supply the ones that are actual args; any additional ones
# will cause a TypeError.
subextras = {k: v for k, v in extras.items() if takes_arg(from_params_method, k)}
return subextras
def pop_and_construct_arg(
class_name: str, argument_name: str, annotation: Type, default: Any, params: Params, **extras
) -> Any:
"""
Does the work of actually constructing an individual argument for
[`create_kwargs`](./#create_kwargs).
Here we're in the inner loop of iterating over the parameters to a particular constructor,
trying to construct just one of them. The information we get for that parameter is its name,
its type annotation, and its default value; we also get the full set of `Params` for
constructing the object (which we may mutate), and any `extras` that the constructor might
need.
We take the type annotation and default value here separately, instead of using an
`inspect.Parameter` object directly, so that we can handle `Union` types using recursion on
this method, trying the different annotation types in the union in turn.
"""
from allennlp.models.archival import load_archive # import here to avoid circular imports
# We used `argument_name` as the method argument to avoid conflicts with 'name' being a key in
# `extras`, which isn't _that_ unlikely. Now that we are inside the method, we can switch back
# to using `name`.
name = argument_name
# Some constructors expect extra non-parameter items, e.g. vocab: Vocabulary.
# We check the provided `extras` for these and just use them if they exist.
if name in extras:
if name not in params:
return extras[name]
else:
logger.warning(
f"Parameter {name} for class {class_name} was found in both "
"**extras and in params. Using the specification found in params, "
"but you probably put a key in a config file that you didn't need, "
"and if it is different from what we get from **extras, you might "
"get unexpected behavior."
)
# Next case is when argument should be loaded from pretrained archive.
elif (
name in params
and isinstance(params.get(name), Params)
and "_pretrained" in params.get(name)
):
load_module_params = params.pop(name).pop("_pretrained")
archive_file = load_module_params.pop("archive_file")
module_path = load_module_params.pop("module_path")
freeze = load_module_params.pop("freeze", True)
archive = load_archive(archive_file)
result = archive.extract_module(module_path, freeze)
if not isinstance(result, annotation):
raise ConfigurationError(
f"The module from model at {archive_file} at path {module_path} "
f"was expected of type {annotation} but is of type {type(result)}"
)
return result
popped_params = params.pop(name, default) if default != _NO_DEFAULT else params.pop(name)
if popped_params is None:
return None
return construct_arg(class_name, name, popped_params, annotation, default, **extras)
def construct_arg(
class_name: str,
argument_name: str,
popped_params: Params,
annotation: Type,
default: Any,
**extras,
) -> Any:
"""
The first two parameters here are only used for logging if we encounter an error.
"""
origin = getattr(annotation, "__origin__", None)
args = getattr(annotation, "__args__", [])
# The parameter is optional if its default value is not the "no default" sentinel.
optional = default != _NO_DEFAULT
if hasattr(annotation, "from_params"):
if popped_params is default:
return default
elif popped_params is not None:
# Our params have an entry for this, so we use that.
subextras = create_extras(annotation, extras)
# In some cases we allow a string instead of a param dict, so
# we need to handle that case separately.
if isinstance(popped_params, str):
popped_params = Params({"type": popped_params})
elif isinstance(popped_params, dict):
popped_params = Params(popped_params)
return annotation.from_params(params=popped_params, **subextras)
elif not optional:
# Not optional and not supplied, that's an error!
raise ConfigurationError(f"expected key {argument_name} for {class_name}")
else:
return default
# If the parameter type is a Python primitive, just pop it off
# using the correct casting pop_xyz operation.
elif annotation in {int, bool}:
if type(popped_params) in {int, bool}:
return annotation(popped_params)
else:
raise TypeError(f"Expected {argument_name} to be a {annotation.__name__}.")
elif annotation == str:
# Strings are special because we allow casting from Path to str.
if type(popped_params) == str or isinstance(popped_params, Path):
return str(popped_params) # type: ignore
else:
raise TypeError(f"Expected {argument_name} to be a string.")
elif annotation == float:
# Floats are special because in Python, you can put an int wherever you can put a float.
# https://mypy.readthedocs.io/en/stable/duck_type_compatibility.html
if type(popped_params) in {int, float}:
return popped_params
else:
raise TypeError(f"Expected {argument_name} to be numeric.")
# This is special logic for handling types like Dict[str, TokenIndexer],
# List[TokenIndexer], Tuple[TokenIndexer, Tokenizer], and Set[TokenIndexer],
# which it creates by instantiating each value from_params and returning the resulting structure.
elif (
origin in {collections.abc.Mapping, Mapping, Dict, dict}
and len(args) == 2
and can_construct_from_params(args[-1])
):
value_cls = annotation.__args__[-1]
value_dict = {}
for key, value_params in popped_params.items():
value_dict[key] = construct_arg(
str(value_cls),
argument_name + "." + key,
value_params,
value_cls,
_NO_DEFAULT,
**extras,
)
return value_dict
elif origin in (Tuple, tuple) and all(can_construct_from_params(arg) for arg in args):
value_list = []
for i, (value_cls, value_params) in enumerate(zip(annotation.__args__, popped_params)):
value = construct_arg(
str(value_cls),
argument_name + f".{i}",
value_params,
value_cls,
_NO_DEFAULT,
**extras,
)
value_list.append(value)
return tuple(value_list)
elif origin in (Set, set) and len(args) == 1 and can_construct_from_params(args[0]):
value_cls = annotation.__args__[0]
value_set = set()
for i, value_params in enumerate(popped_params):
value = construct_arg(
str(value_cls),
argument_name + f".{i}",
value_params,
value_cls,
_NO_DEFAULT,
**extras,
)
value_set.add(value)
return value_set
elif origin == Union:
# Storing this so we can recover it later if we need to.
backup_params = deepcopy(popped_params)
# We'll try each of the given types in the union sequentially, returning the first one that
# succeeds.
for arg_annotation in args:
try:
return construct_arg(
str(arg_annotation),
argument_name,
popped_params,
arg_annotation,
default,
**extras,
)
except (ValueError, TypeError, ConfigurationError, AttributeError):
# Our attempt to construct the argument may have modified popped_params, so we
# restore it here.
popped_params = deepcopy(backup_params)
# If none of them succeeded, we crash.
raise ConfigurationError(
f"Failed to construct argument {argument_name} with type {annotation}"
)
elif origin == Lazy:
if popped_params is default:
return default
value_cls = args[0]
subextras = create_extras(value_cls, extras)
def constructor(**kwargs):
# If there are duplicate keys between subextras and kwargs, this will overwrite the ones
# in subextras with what's in kwargs. If an argument shows up twice, we should take it
# from what's passed to Lazy.construct() instead of what we got from create_extras().
# Almost certainly these will be identical objects, anyway.
# We do this by constructing a new dictionary, instead of mutating subextras, just in
# case this constructor is called multiple times.
constructor_extras = {**subextras, **kwargs}
return value_cls.from_params(params=deepcopy(popped_params), **constructor_extras)
return Lazy(constructor) # type: ignore
# For any other kind of iterable, we will just assume that a list is good enough, and treat
# it the same as List. This condition needs to be at the end, so we don't catch other kinds
# of Iterables with this branch.
elif (
origin in {collections.abc.Iterable, Iterable, List, list}
and len(args) == 1
and can_construct_from_params(args[0])
):
value_cls = annotation.__args__[0]
value_list = []
for i, value_params in enumerate(popped_params):
value = construct_arg(
str(value_cls),
argument_name + f".{i}",
value_params,
value_cls,
_NO_DEFAULT,
**extras,
)
value_list.append(value)
return value_list
else:
# Pass it on as is and hope for the best. ¯\_(ツ)_/¯
if isinstance(popped_params, Params):
return popped_params.as_dict()
return popped_params
class FromParams:
"""
Mixin to give a from_params method to classes. We create a distinct base class for this
because sometimes we want non-Registrable classes to be instantiatable from_params.
"""
@classmethod
def from_params(
cls: Type[T],
params: Params,
constructor_to_call: Callable[..., T] = None,
constructor_to_inspect: Union[Callable[..., T], Callable[[T], None]] = None,
**extras,
) -> T:
"""
This is the automatic implementation of `from_params`. Any class that subclasses
`FromParams` (or `Registrable`, which itself subclasses `FromParams`) gets this
implementation for free. If you want your class to be instantiated from params in the
"obvious" way -- pop off parameters and hand them to your constructor with the same names --
this provides that functionality.
If you need more complex logic in your from `from_params` method, you'll have to implement
your own method that overrides this one.
The `constructor_to_call` and `constructor_to_inspect` arguments deal with a bit of
redirection that we do. We allow you to register particular `@classmethods` on a class as
the constructor to use for a registered name. This lets you, e.g., have a single
`Vocabulary` class that can be constructed in two different ways, with different names
registered to each constructor. In order to handle this, we need to know not just the class
we're trying to construct (`cls`), but also what method we should inspect to find its
arguments (`constructor_to_inspect`), and what method to call when we're done constructing
arguments (`constructor_to_call`). These two methods are the same when you've used a
`@classmethod` as your constructor, but they are `different` when you use the default
constructor (because you inspect `__init__`, but call `cls()`).
"""
from allennlp.common.registrable import Registrable # import here to avoid circular imports
logger.debug(
f"instantiating class {cls} from params {getattr(params, 'params', params)} "
f"and extras {set(extras.keys())}"
)
if params is None:
return None
if isinstance(params, str):
params = Params({"type": params})
if not isinstance(params, Params):
raise ConfigurationError(
"from_params was passed a `params` object that was not a `Params`. This probably "
"indicates malformed parameters in a configuration file, where something that "
"should have been a dictionary was actually a list, or something else. "
f"This happened when constructing an object of type {cls}."
)
registered_subclasses = Registrable._registry.get(cls)
if is_base_registrable(cls) and registered_subclasses is None:
# NOTE(mattg): There are some potential corner cases in this logic if you have nested
# Registrable types. We don't currently have any of those, but if we ever get them,
# adding some logic to check `constructor_to_call` should solve the issue. Not
# bothering to add that unnecessary complexity for now.
raise ConfigurationError(
"Tried to construct an abstract Registrable base class that has no registered "
"concrete types. This might mean that you need to use --include-package to get "
"your concrete classes actually registered."
)
if registered_subclasses is not None and not constructor_to_call:
# We know `cls` inherits from Registrable, so we'll use a cast to make mypy happy.
as_registrable = cast(Type[Registrable], cls)
default_to_first_choice = as_registrable.default_implementation is not None
choice = params.pop_choice(
"type",
choices=as_registrable.list_available(),
default_to_first_choice=default_to_first_choice,
)
subclass, constructor_name = as_registrable.resolve_class_name(choice)
# See the docstring for an explanation of what's going on here.
if not constructor_name:
constructor_to_inspect = subclass.__init__
constructor_to_call = subclass # type: ignore
else:
constructor_to_inspect = cast(Callable[..., T], getattr(subclass, constructor_name))
constructor_to_call = constructor_to_inspect
if hasattr(subclass, "from_params"):
# We want to call subclass.from_params.
extras = create_extras(subclass, extras)
# mypy can't follow the typing redirection that we do, so we explicitly cast here.
retyped_subclass = cast(Type[T], subclass)
return retyped_subclass.from_params(
params=params,
constructor_to_call=constructor_to_call,
constructor_to_inspect=constructor_to_inspect,
**extras,
)
else:
# In some rare cases, we get a registered subclass that does _not_ have a
# from_params method (this happens with Activations, for instance, where we
# register pytorch modules directly). This is a bit of a hack to make those work,
# instead of adding a `from_params` method for them somehow. We just trust that
# you've done the right thing in passing your parameters, and nothing else needs to
# be recursively constructed.
return subclass(**params) # type: ignore
else:
# This is not a base class, so convert our params and extras into a dict of kwargs.
# See the docstring for an explanation of what's going on here.
if not constructor_to_inspect:
constructor_to_inspect = cls.__init__
if not constructor_to_call:
constructor_to_call = cls
if constructor_to_inspect == object.__init__:
# This class does not have an explicit constructor, so don't give it any kwargs.
# Without this logic, create_kwargs will look at object.__init__ and see that
# it takes *args and **kwargs and look for those.
kwargs: Dict[str, Any] = {}
params.assert_empty(cls.__name__)
else:
# This class has a constructor, so create kwargs for it.
constructor_to_inspect = cast(Callable[..., T], constructor_to_inspect)
kwargs = create_kwargs(constructor_to_inspect, cls, params, **extras)
return constructor_to_call(**kwargs) # type: ignore
| allennlp-master | allennlp/common/from_params.py |
"""
Functions and exceptions for checking that
AllenNLP and its models are configured correctly.
"""
import logging
import re
import subprocess
from typing import List, Union
import torch
from torch import cuda
logger = logging.getLogger(__name__)
class ConfigurationError(Exception):
"""
The exception raised by any AllenNLP object when it's misconfigured
(e.g. missing properties, invalid properties, unknown properties).
"""
def __init__(self, message: str):
super().__init__()
self.message = message
def __str__(self):
# TODO(brendanr): Is there some reason why we need repr here? It
# produces horrible output for simple multi-line error messages.
return self.message
class ExperimentalFeatureWarning(RuntimeWarning):
"""
A warning that you are using an experimental feature
that may change or be deleted.
"""
pass
def log_pytorch_version_info():
import torch
logger.info("Pytorch version: %s", torch.__version__)
def check_dimensions_match(
dimension_1: int, dimension_2: int, dim_1_name: str, dim_2_name: str
) -> None:
if dimension_1 != dimension_2:
raise ConfigurationError(
f"{dim_1_name} must match {dim_2_name}, but got {dimension_1} "
f"and {dimension_2} instead"
)
def parse_cuda_device(cuda_device: Union[str, int, List[int]]) -> int:
"""
Disambiguates single GPU and multiple GPU settings for cuda_device param.
"""
message = """
In allennlp 1.0, the Trainer cannot be passed multiple cuda devices.
Instead, use the faster Distributed Data Parallel. For instance, if you previously had config like:
{
"trainer": {
"cuda_device": [0, 1, 2, 3],
"num_epochs": 20,
...
}
}
simply change it to:
{
"distributed": {
"cuda_devices": [0, 1, 2, 3],
},
"trainer": {
"num_epochs": 20,
...
}
}
"""
def from_list(strings):
if len(strings) > 1:
raise ConfigurationError(message)
elif len(strings) == 1:
return int(strings[0])
else:
return -1
if isinstance(cuda_device, str):
return from_list(re.split(r",\s*", cuda_device))
elif isinstance(cuda_device, int):
return cuda_device
elif isinstance(cuda_device, list):
return from_list(cuda_device)
else:
# TODO(brendanr): Determine why mypy can't tell that this matches the Union.
return int(cuda_device) # type: ignore
def check_for_gpu(device: Union[int, torch.device, List[Union[int, torch.device]]]):
if isinstance(device, list):
for did in device:
check_for_gpu(did)
elif device is None:
return
else:
from allennlp.common.util import int_to_device
device = int_to_device(device)
if device != torch.device("cpu"):
num_devices_available = cuda.device_count()
if num_devices_available == 0:
# Torch will give a more informative exception than ours, so we want to include
# that context as well if it's available. For example, if you try to run torch 1.5
# on a machine with CUDA10.1 you'll get the following:
#
# The NVIDIA driver on your system is too old (found version 10010).
#
torch_gpu_error = ""
try:
cuda._check_driver()
except Exception as e:
torch_gpu_error = "\n{0}".format(e)
raise ConfigurationError(
"Experiment specified a GPU but none is available;"
" if you want to run on CPU use the override"
" 'trainer.cuda_device=-1' in the json config file." + torch_gpu_error
)
elif device.index >= num_devices_available:
raise ConfigurationError(
f"Experiment specified GPU device {device.index}"
f" but there are only {num_devices_available} devices "
f" available."
)
def check_for_java() -> bool:
try:
java_version = subprocess.check_output(["java", "-version"], stderr=subprocess.STDOUT)
return "version" in java_version.decode()
except FileNotFoundError:
return False
| allennlp-master | allennlp/common/checks.py |
from allennlp.common.from_params import FromParams
from allennlp.common.lazy import Lazy
from allennlp.common.params import Params
from allennlp.common.registrable import Registrable
from allennlp.common.tqdm import Tqdm
from allennlp.common.util import JsonDict
| allennlp-master | allennlp/common/__init__.py |
"""
`allennlp.common.registrable.Registrable` is a "mixin" for endowing
any base class with a named registry for its subclasses and a decorator
for registering them.
"""
from collections import defaultdict
from typing import TypeVar, Type, Callable, Dict, List, Optional, Tuple
import importlib
import logging
from allennlp.common.checks import ConfigurationError
from allennlp.common.from_params import FromParams
logger = logging.getLogger(__name__)
T = TypeVar("T", bound="Registrable")
class Registrable(FromParams):
"""
Any class that inherits from `Registrable` gains access to a named registry for its
subclasses. To register them, just decorate them with the classmethod
`@BaseClass.register(name)`.
After which you can call `BaseClass.list_available()` to get the keys for the
registered subclasses, and `BaseClass.by_name(name)` to get the corresponding subclass.
Note that the registry stores the subclasses themselves; not class instances.
In most cases you would then call `from_params(params)` on the returned subclass.
You can specify a default by setting `BaseClass.default_implementation`.
If it is set, it will be the first element of `list_available()`.
Note that if you use this class to implement a new `Registrable` abstract class,
you must ensure that all subclasses of the abstract class are loaded when the module is
loaded, because the subclasses register themselves in their respective files. You can
achieve this by having the abstract class and all subclasses in the __init__.py of the
module in which they reside (as this causes any import of either the abstract class or
a subclass to load all other subclasses and the abstract class).
"""
_registry: Dict[Type, Dict[str, Tuple[Type, Optional[str]]]] = defaultdict(dict)
default_implementation: Optional[str] = None
@classmethod
def register(cls: Type[T], name: str, constructor: str = None, exist_ok: bool = False):
"""
Register a class under a particular name.
# Parameters
name : `str`
The name to register the class under.
constructor : `str`, optional (default=`None`)
The name of the method to use on the class to construct the object. If this is given,
we will use this method (which must be a `@classmethod`) instead of the default
constructor.
exist_ok : `bool`, optional (default=`False`)
If True, overwrites any existing models registered under `name`. Else,
throws an error if a model is already registered under `name`.
# Examples
To use this class, you would typically have a base class that inherits from `Registrable`:
```python
class Vocabulary(Registrable):
...
```
Then, if you want to register a subclass, you decorate it like this:
```python
@Vocabulary.register("my-vocabulary")
class MyVocabulary(Vocabulary):
def __init__(self, param1: int, param2: str):
...
```
Registering a class like this will let you instantiate a class from a config file, where you
give `"type": "my-vocabulary"`, and keys corresponding to the parameters of the `__init__`
method (note that for this to work, those parameters must have type annotations).
If you want to have the instantiation from a config file call a method other than the
constructor, either because you have several different construction paths that could be
taken for the same object (as we do in `Vocabulary`) or because you have logic you want to
happen before you get to the constructor (as we do in `Embedding`), you can register a
specific `@classmethod` as the constructor to use, like this:
```python
@Vocabulary.register("my-vocabulary-from-instances", constructor="from_instances")
@Vocabulary.register("my-vocabulary-from-files", constructor="from_files")
class MyVocabulary(Vocabulary):
def __init__(self, some_params):
...
@classmethod
def from_instances(cls, some_other_params) -> MyVocabulary:
... # construct some_params from instances
return cls(some_params)
@classmethod
def from_files(cls, still_other_params) -> MyVocabulary:
... # construct some_params from files
return cls(some_params)
```
"""
registry = Registrable._registry[cls]
def add_subclass_to_registry(subclass: Type[T]):
# Add to registry, raise an error if key has already been used.
if name in registry:
if exist_ok:
message = (
f"{name} has already been registered as {registry[name][0].__name__}, but "
f"exist_ok=True, so overwriting with {cls.__name__}"
)
logger.info(message)
else:
message = (
f"Cannot register {name} as {cls.__name__}; "
f"name already in use for {registry[name][0].__name__}"
)
raise ConfigurationError(message)
registry[name] = (subclass, constructor)
return subclass
return add_subclass_to_registry
@classmethod
def by_name(cls: Type[T], name: str) -> Callable[..., T]:
"""
Returns a callable function that constructs an argument of the registered class. Because
you can register particular functions as constructors for specific names, this isn't
necessarily the `__init__` method of some class.
"""
logger.debug(f"instantiating registered subclass {name} of {cls}")
subclass, constructor = cls.resolve_class_name(name)
if not constructor:
return subclass
else:
return getattr(subclass, constructor)
@classmethod
def resolve_class_name(cls: Type[T], name: str) -> Tuple[Type[T], Optional[str]]:
"""
Returns the subclass that corresponds to the given `name`, along with the name of the
method that was registered as a constructor for that `name`, if any.
This method also allows `name` to be a fully-specified module name, instead of a name that
was already added to the `Registry`. In that case, you cannot use a separate function as
a constructor (as you need to call `cls.register()` in order to tell us what separate
function to use).
"""
if name in Registrable._registry[cls]:
subclass, constructor = Registrable._registry[cls][name]
return subclass, constructor
elif "." in name:
# This might be a fully qualified class name, so we'll try importing its "module"
# and finding it there.
parts = name.split(".")
submodule = ".".join(parts[:-1])
class_name = parts[-1]
try:
module = importlib.import_module(submodule)
except ModuleNotFoundError:
raise ConfigurationError(
f"tried to interpret {name} as a path to a class "
f"but unable to import module {submodule}"
)
try:
subclass = getattr(module, class_name)
constructor = None
return subclass, constructor
except AttributeError:
raise ConfigurationError(
f"tried to interpret {name} as a path to a class "
f"but unable to find class {class_name} in {submodule}"
)
else:
# is not a qualified class name
raise ConfigurationError(
f"{name} is not a registered name for {cls.__name__}. "
"You probably need to use the --include-package flag "
"to load your custom code. Alternatively, you can specify your choices "
"""using fully-qualified paths, e.g. {"model": "my_module.models.MyModel"} """
"in which case they will be automatically imported correctly."
)
@classmethod
def list_available(cls) -> List[str]:
"""List default first if it exists"""
keys = list(Registrable._registry[cls].keys())
default = cls.default_implementation
if default is None:
return keys
elif default not in keys:
raise ConfigurationError(f"Default implementation {default} is not registered")
else:
return [default] + [k for k in keys if k != default]
| allennlp-master | allennlp/common/registrable.py |
"""
`allennlp.common.tqdm.Tqdm` wraps tqdm so we can add configurable
global defaults for certain tqdm parameters.
"""
import logging
from allennlp.common import logging as common_logging
import sys
from time import time
from typing import Optional
try:
SHELL = str(type(get_ipython())) # type:ignore # noqa: F821
except: # noqa: E722
SHELL = ""
if "zmqshell.ZMQInteractiveShell" in SHELL:
from tqdm import tqdm_notebook as _tqdm
else:
from tqdm import tqdm as _tqdm
# This is necessary to stop tqdm from hanging
# when exceptions are raised inside iterators.
# It should have been fixed in 4.2.1, but it still
# occurs.
# TODO(Mark): Remove this once tqdm cleans up after itself properly.
# https://github.com/tqdm/tqdm/issues/469
_tqdm.monitor_interval = 0
logger = logging.getLogger("tqdm")
logger.propagate = False
def replace_cr_with_newline(message: str) -> str:
"""
TQDM and requests use carriage returns to get the training line to update for each batch
without adding more lines to the terminal output. Displaying those in a file won't work
correctly, so we'll just make sure that each batch shows up on its one line.
"""
# In addition to carriage returns, nested progress-bars will contain extra new-line
# characters and this special control sequence which tells the terminal to move the
# cursor one line up.
message = message.replace("\r", "").replace("\n", "").replace("[A", "")
if message and message[-1] != "\n":
message += "\n"
return message
class TqdmToLogsWriter(object):
def __init__(self):
self.last_message_written_time = 0.0
def write(self, message):
file_friendly_message: Optional[str] = None
if common_logging.FILE_FRIENDLY_LOGGING:
file_friendly_message = replace_cr_with_newline(message)
if file_friendly_message.strip():
sys.stderr.write(file_friendly_message)
else:
sys.stderr.write(message)
# Every 10 seconds we also log the message.
now = time()
if now - self.last_message_written_time >= 10 or "100%" in message:
if file_friendly_message is None:
file_friendly_message = replace_cr_with_newline(message)
for message in file_friendly_message.split("\n"):
message = message.strip()
if len(message) > 0:
logger.info(message)
self.last_message_written_time = now
def flush(self):
sys.stderr.flush()
class Tqdm:
@staticmethod
def tqdm(*args, **kwargs):
# Use a slower interval when FILE_FRIENDLY_LOGGING is set.
default_mininterval = 2.0 if common_logging.FILE_FRIENDLY_LOGGING else 0.1
new_kwargs = {
"file": TqdmToLogsWriter(),
"mininterval": default_mininterval,
**kwargs,
}
return _tqdm(*args, **new_kwargs)
| allennlp-master | allennlp/common/tqdm.py |
"""
Utilities for working with the local dataset cache.
"""
import glob
import os
import logging
import tempfile
import json
from collections import defaultdict
from dataclasses import dataclass, asdict
from datetime import timedelta
from fnmatch import fnmatch
from os import PathLike
from urllib.parse import urlparse
from pathlib import Path
from typing import (
Optional,
Tuple,
Union,
IO,
Callable,
Set,
List,
Iterator,
Iterable,
Dict,
NamedTuple,
)
from hashlib import sha256
from functools import wraps
from zipfile import ZipFile, is_zipfile
import tarfile
import shutil
import time
import boto3
import botocore
from botocore.exceptions import ClientError, EndpointConnectionError
from filelock import FileLock
import requests
from requests.adapters import HTTPAdapter
from requests.exceptions import ConnectionError
from requests.packages.urllib3.util.retry import Retry
from allennlp.common.tqdm import Tqdm
logger = logging.getLogger(__name__)
CACHE_ROOT = Path(os.getenv("ALLENNLP_CACHE_ROOT", Path.home() / ".allennlp"))
CACHE_DIRECTORY = str(CACHE_ROOT / "cache")
DEPRECATED_CACHE_DIRECTORY = str(CACHE_ROOT / "datasets")
# This variable was deprecated in 0.7.2 since we use a single folder for caching
# all types of files (datasets, models, etc.)
DATASET_CACHE = CACHE_DIRECTORY
# Warn if the user is still using the deprecated cache directory.
if os.path.exists(DEPRECATED_CACHE_DIRECTORY):
logger.warning(
f"Deprecated cache directory found ({DEPRECATED_CACHE_DIRECTORY}). "
f"Please remove this directory from your system to free up space."
)
def _resource_to_filename(resource: str, etag: str = None) -> str:
"""
Convert a `resource` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the resources's, delimited
by a period.
"""
resource_bytes = resource.encode("utf-8")
resource_hash = sha256(resource_bytes)
filename = resource_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
return filename
def filename_to_url(filename: str, cache_dir: Union[str, Path] = None) -> Tuple[str, str]:
"""
Return the url and etag (which may be `None`) stored for `filename`.
Raise `FileNotFoundError` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = CACHE_DIRECTORY
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise FileNotFoundError("file {} not found".format(cache_path))
meta_path = cache_path + ".json"
if not os.path.exists(meta_path):
raise FileNotFoundError("file {} not found".format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
return url, etag
def check_tarfile(tar_file: tarfile.TarFile):
"""Tar files can contain files outside of the extraction directory, or symlinks that point
outside the extraction directory. We also don't want any block devices fifos, or other
weird file types extracted. This checks for those issues and throws an exception if there
is a problem."""
base_path = os.path.join("tmp", "pathtest")
base_path = os.path.normpath(base_path)
def normalize_path(path: str) -> str:
path = path.rstrip("/")
path = path.replace("/", os.sep)
path = os.path.join(base_path, path)
path = os.path.normpath(path)
return path
for tarinfo in tar_file:
if not (
tarinfo.isreg()
or tarinfo.isdir()
or tarinfo.isfile()
or tarinfo.islnk()
or tarinfo.issym()
):
raise ValueError(
f"Tar file {str(tar_file.name)} contains invalid member {tarinfo.name}."
)
target_path = normalize_path(tarinfo.name)
if os.path.commonprefix([base_path, target_path]) != base_path:
raise ValueError(
f"Tar file {str(tar_file.name)} is trying to create a file outside of its extraction directory."
)
if tarinfo.islnk() or tarinfo.issym():
target_path = normalize_path(tarinfo.linkname)
if os.path.commonprefix([base_path, target_path]) != base_path:
raise ValueError(
f"Tar file {str(tar_file.name)} is trying to link to a file "
"outside of its extraction directory."
)
def cached_path(
url_or_filename: Union[str, PathLike],
cache_dir: Union[str, Path] = None,
extract_archive: bool = False,
force_extract: bool = False,
) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
# Parameters
url_or_filename : `Union[str, Path]`
A URL or local file to parse and possibly download.
cache_dir : `Union[str, Path]`, optional (default = `None`)
The directory to cache downloads.
extract_archive : `bool`, optional (default = `False`)
If `True`, then zip or tar.gz archives will be automatically extracted.
In which case the directory is returned.
force_extract : `bool`, optional (default = `False`)
If `True` and the file is an archive file, it will be extracted regardless
of whether or not the extracted directory already exists.
"""
if cache_dir is None:
cache_dir = CACHE_DIRECTORY
cache_dir = os.path.expanduser(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
if not isinstance(url_or_filename, str):
url_or_filename = str(url_or_filename)
file_path: str
# If we're using the /a/b/foo.zip!c/d/file.txt syntax, handle it here.
exclamation_index = url_or_filename.find("!")
if extract_archive and exclamation_index >= 0:
archive_path = url_or_filename[:exclamation_index]
file_name = url_or_filename[exclamation_index + 1 :]
# Call 'cached_path' recursively now to get the local path to the archive itself.
cached_archive_path = cached_path(archive_path, cache_dir, True, force_extract)
if not os.path.isdir(cached_archive_path):
raise ValueError(
f"{url_or_filename} uses the ! syntax, but does not specify an archive file."
)
# Now return the full path to the desired file within the extracted archive,
# provided it exists.
file_path = os.path.join(cached_archive_path, file_name)
if not os.path.exists(file_path):
raise FileNotFoundError(f"file {file_name} not found within {archive_path}")
return file_path
parsed = urlparse(url_or_filename)
extraction_path: Optional[str] = None
if parsed.scheme in ("http", "https", "s3"):
# URL, so get it from the cache (downloading if necessary)
file_path = get_from_cache(url_or_filename, cache_dir)
if extract_archive and (is_zipfile(file_path) or tarfile.is_tarfile(file_path)):
# This is the path the file should be extracted to.
# For example ~/.allennlp/cache/234234.21341 -> ~/.allennlp/cache/234234.21341-extracted
extraction_path = file_path + "-extracted"
else:
url_or_filename = os.path.expanduser(url_or_filename)
if os.path.exists(url_or_filename):
# File, and it exists.
file_path = url_or_filename
# Normalize the path.
url_or_filename = os.path.abspath(url_or_filename)
if extract_archive and (is_zipfile(file_path) or tarfile.is_tarfile(file_path)):
# We'll use a unique directory within the cache to root to extract the archive to.
# The name of the directoy is a hash of the resource file path and it's modification
# time. That way, if the file changes, we'll know when to extract it again.
extraction_name = (
_resource_to_filename(url_or_filename, str(os.path.getmtime(file_path)))
+ "-extracted"
)
extraction_path = os.path.join(cache_dir, extraction_name)
elif parsed.scheme == "":
# File, but it doesn't exist.
raise FileNotFoundError(f"file {url_or_filename} not found")
else:
# Something unknown
raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path")
if extraction_path is not None:
# If the extracted directory already exists (and is non-empty), then no
# need to extract again unless `force_extract=True`.
if os.path.isdir(extraction_path) and os.listdir(extraction_path) and not force_extract:
return extraction_path
# Extract it.
with FileLock(extraction_path + ".lock"):
logger.info("Extracting %s to %s", url_or_filename, extraction_path)
shutil.rmtree(extraction_path, ignore_errors=True)
# We extract first to a temporary directory in case something goes wrong
# during the extraction process so we don't end up with a corrupted cache.
tmp_extraction_dir = tempfile.mkdtemp(dir=os.path.split(extraction_path)[0])
try:
if is_zipfile(file_path):
with ZipFile(file_path, "r") as zip_file:
zip_file.extractall(tmp_extraction_dir)
zip_file.close()
else:
tar_file = tarfile.open(file_path)
check_tarfile(tar_file)
tar_file.extractall(tmp_extraction_dir)
tar_file.close()
# Extraction was successful, rename temp directory to final
# cache directory and dump the meta data.
os.replace(tmp_extraction_dir, extraction_path)
meta = _Meta(
resource=url_or_filename,
cached_path=extraction_path,
creation_time=time.time(),
extraction_dir=True,
size=_get_resource_size(extraction_path),
)
meta.to_file()
finally:
shutil.rmtree(tmp_extraction_dir, ignore_errors=True)
return extraction_path
return file_path
def is_url_or_existing_file(url_or_filename: Union[str, Path, None]) -> bool:
"""
Given something that might be a URL (or might be a local path),
determine check if it's url or an existing file path.
"""
if url_or_filename is None:
return False
url_or_filename = os.path.expanduser(str(url_or_filename))
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https", "s3") or os.path.exists(url_or_filename)
def _split_s3_path(url: str) -> Tuple[str, str]:
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def _s3_request(func: Callable):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise FileNotFoundError("file {} not found".format(url))
else:
raise
return wrapper
def _get_s3_resource():
session = boto3.session.Session()
if session.get_credentials() is None:
# Use unsigned requests.
s3_resource = session.resource(
"s3", config=botocore.client.Config(signature_version=botocore.UNSIGNED)
)
else:
s3_resource = session.resource("s3")
return s3_resource
@_s3_request
def _s3_etag(url: str) -> Optional[str]:
"""Check ETag on S3 object."""
s3_resource = _get_s3_resource()
bucket_name, s3_path = _split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@_s3_request
def _s3_get(url: str, temp_file: IO) -> None:
"""Pull a file directly from S3."""
s3_resource = _get_s3_resource()
bucket_name, s3_path = _split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def _session_with_backoff() -> requests.Session:
"""
We ran into an issue where http requests to s3 were timing out,
possibly because we were making too many requests too quickly.
This helper function returns a requests session that has retry-with-backoff
built in. See
<https://stackoverflow.com/questions/23267409/how-to-implement-retry-mechanism-into-python-requests-library>.
"""
session = requests.Session()
retries = Retry(total=5, backoff_factor=1, status_forcelist=[502, 503, 504])
session.mount("http://", HTTPAdapter(max_retries=retries))
session.mount("https://", HTTPAdapter(max_retries=retries))
return session
def _http_etag(url: str) -> Optional[str]:
with _session_with_backoff() as session:
response = session.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError(
"HEAD request failed for url {} with status code {}".format(url, response.status_code)
)
return response.headers.get("ETag")
def _http_get(url: str, temp_file: IO) -> None:
with _session_with_backoff() as session:
req = session.get(url, stream=True)
content_length = req.headers.get("Content-Length")
total = int(content_length) if content_length is not None else None
progress = Tqdm.tqdm(unit="B", total=total, desc="downloading")
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def _find_latest_cached(url: str, cache_dir: Union[str, Path]) -> Optional[str]:
filename = _resource_to_filename(url)
cache_path = os.path.join(cache_dir, filename)
candidates: List[Tuple[str, float]] = []
for path in glob.glob(cache_path + "*"):
if path.endswith(".json") or path.endswith("-extracted") or path.endswith(".lock"):
continue
mtime = os.path.getmtime(path)
candidates.append((path, mtime))
# Sort candidates by modification time, newest first.
candidates.sort(key=lambda x: x[1], reverse=True)
if candidates:
return candidates[0][0]
return None
class CacheFile:
"""
This is a context manager that makes robust caching easier.
On `__enter__`, an IO handle to a temporarily file is returned, which can
be treated as if it's the actual cache file.
On `__exit__`, the temporarily file is renamed to the cache file. If anything
goes wrong while writing to the temporary file, it will be removed.
"""
def __init__(
self, cache_filename: Union[Path, str], mode: str = "w+b", suffix: str = ".tmp"
) -> None:
self.cache_filename = (
cache_filename if isinstance(cache_filename, Path) else Path(cache_filename)
)
self.cache_directory = os.path.dirname(self.cache_filename)
self.mode = mode
self.temp_file = tempfile.NamedTemporaryFile(
self.mode, dir=self.cache_directory, delete=False, suffix=suffix
)
def __enter__(self):
return self.temp_file
def __exit__(self, exc_type, exc_value, traceback):
self.temp_file.close()
if exc_value is None:
# Success.
logger.debug(
"Renaming temp file %s to cache at %s", self.temp_file.name, self.cache_filename
)
# Rename the temp file to the actual cache filename.
os.replace(self.temp_file.name, self.cache_filename)
return True
# Something went wrong, remove the temp file.
logger.debug("removing temp file %s", self.temp_file.name)
os.remove(self.temp_file.name)
return False
@dataclass
class _Meta:
"""
Any resource that is downloaded to - or extracted in - the cache directory will
have a meta JSON file written next to it, which corresponds to an instance
of this class.
In older versions of AllenNLP, this meta document just had two fields: 'url' and
'etag'. The 'url' field is now the more general 'resource' field, but these old
meta files are still compatible when a `_Meta` is instantiated with the `.from_path()`
class method.
"""
resource: str
"""
URL or normalized path to the resource.
"""
cached_path: str
"""
Path to the corresponding cached version of the resource.
"""
creation_time: float
"""
The unix timestamp of when the corresponding resource was cached or extracted.
"""
size: int = 0
"""
The size of the corresponding resource, in bytes.
"""
etag: Optional[str] = None
"""
Optional ETag associated with the current cached version of the resource.
"""
extraction_dir: bool = False
"""
Does this meta corresponded to an extraction directory?
"""
def to_file(self) -> None:
with open(self.cached_path + ".json", "w") as meta_file:
json.dump(asdict(self), meta_file)
@classmethod
def from_path(cls, path: Union[str, Path]) -> "_Meta":
path = str(path)
with open(path) as meta_file:
data = json.load(meta_file)
# For backwards compat:
if "resource" not in data:
data["resource"] = data.pop("url")
if "creation_time" not in data:
data["creation_time"] = os.path.getmtime(path[:-5])
if "extraction_dir" not in data and path.endswith("-extracted.json"):
data["extraction_dir"] = True
if "cached_path" not in data:
data["cached_path"] = path[:-5]
if "size" not in data:
data["size"] = _get_resource_size(data["cached_path"])
return cls(**data)
# TODO(joelgrus): do we want to do checksums or anything like that?
def get_from_cache(url: str, cache_dir: Union[str, Path] = None) -> str:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = CACHE_DIRECTORY
# Get eTag to add to filename, if it exists.
try:
if url.startswith("s3://"):
etag = _s3_etag(url)
else:
etag = _http_etag(url)
except (ConnectionError, EndpointConnectionError):
# We might be offline, in which case we don't want to throw an error
# just yet. Instead, we'll try to use the latest cached version of the
# target resource, if it exists. We'll only throw an exception if we
# haven't cached the resource at all yet.
logger.warning(
"Connection error occurred while trying to fetch ETag for %s. "
"Will attempt to use latest cached version of resource",
url,
)
latest_cached = _find_latest_cached(url, cache_dir)
if latest_cached:
logger.info(
"ETag request failed with connection error, using latest cached "
"version of %s: %s",
url,
latest_cached,
)
return latest_cached
else:
logger.error(
"Connection failed while trying to fetch ETag, "
"and no cached version of %s could be found",
url,
)
raise
except OSError:
# OSError may be triggered if we were unable to fetch the eTag.
# If this is the case, try to proceed without eTag check.
etag = None
filename = _resource_to_filename(url, etag)
# Get cache path to put the file.
cache_path = os.path.join(cache_dir, filename)
# Multiple processes may be trying to cache the same file at once, so we need
# to be a little careful to avoid race conditions. We do this using a lock file.
# Only one process can own this lock file at a time, and a process will block
# on the call to `lock.acquire()` until the process currently holding the lock
# releases it.
logger.debug("waiting to acquire lock on %s", cache_path)
with FileLock(cache_path + ".lock"):
if os.path.exists(cache_path):
logger.info("cache of %s is up-to-date", url)
else:
with CacheFile(cache_path) as cache_file:
logger.info("%s not found in cache, downloading to %s", url, cache_path)
# GET file object
if url.startswith("s3://"):
_s3_get(url, cache_file)
else:
_http_get(url, cache_file)
logger.debug("creating metadata file for %s", cache_path)
meta = _Meta(
resource=url,
cached_path=cache_path,
creation_time=time.time(),
etag=etag,
size=_get_resource_size(cache_path),
)
meta.to_file()
return cache_path
def read_set_from_file(filename: str) -> Set[str]:
"""
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
"""
collection = set()
with open(filename, "r") as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path: str, dot=True, lower: bool = True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
def open_compressed(
filename: Union[str, Path], mode: str = "rt", encoding: Optional[str] = "UTF-8", **kwargs
):
if isinstance(filename, Path):
filename = str(filename)
open_fn: Callable = open
if filename.endswith(".gz"):
import gzip
open_fn = gzip.open
elif filename.endswith(".bz2"):
import bz2
open_fn = bz2.open
return open_fn(filename, mode=mode, encoding=encoding, **kwargs)
def text_lines_from_file(filename: Union[str, Path], strip_lines: bool = True) -> Iterator[str]:
with open_compressed(filename, "rt", encoding="UTF-8", errors="replace") as p:
if strip_lines:
for line in p:
yield line.strip()
else:
yield from p
def json_lines_from_file(filename: Union[str, Path]) -> Iterable[Union[list, dict]]:
return (json.loads(line) for line in text_lines_from_file(filename))
def _get_resource_size(path: str) -> int:
"""
Get the size of a file or directory.
"""
if os.path.isfile(path):
return os.path.getsize(path)
inodes: Set[int] = set()
total_size = 0
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
# skip if it is symbolic link or the same as a file we've already accounted
# for (this could happen with hard links).
inode = os.stat(fp).st_ino
if not os.path.islink(fp) and inode not in inodes:
inodes.add(inode)
total_size += os.path.getsize(fp)
return total_size
class _CacheEntry(NamedTuple):
regular_files: List[_Meta]
extraction_dirs: List[_Meta]
def _find_entries(
patterns: List[str] = None,
cache_dir: Union[str, Path] = None,
) -> Tuple[int, Dict[str, _CacheEntry]]:
"""
Find all cache entries, filtering ones that don't match any of the glob patterns given.
Returns the total size of the matching entries and mapping or resource name to meta data.
The values in the returned mapping are tuples because we seperate meta entries that
correspond to extraction directories vs regular cache entries.
"""
cache_dir = os.path.expanduser(cache_dir or CACHE_DIRECTORY)
total_size: int = 0
cache_entries: Dict[str, _CacheEntry] = defaultdict(lambda: _CacheEntry([], []))
for meta_path in glob.glob(str(cache_dir) + "/*.json"):
meta = _Meta.from_path(meta_path)
if patterns and not any(fnmatch(meta.resource, p) for p in patterns):
continue
if meta.extraction_dir:
cache_entries[meta.resource].extraction_dirs.append(meta)
else:
cache_entries[meta.resource].regular_files.append(meta)
total_size += meta.size
# Sort entries for each resource by creation time, newest first.
for entry in cache_entries.values():
entry.regular_files.sort(key=lambda meta: meta.creation_time, reverse=True)
entry.extraction_dirs.sort(key=lambda meta: meta.creation_time, reverse=True)
return total_size, cache_entries
def remove_cache_entries(patterns: List[str], cache_dir: Union[str, Path] = None) -> int:
"""
Remove cache entries matching the given patterns.
Returns the total reclaimed space in bytes.
"""
total_size, cache_entries = _find_entries(patterns=patterns, cache_dir=cache_dir)
for resource, entry in cache_entries.items():
for meta in entry.regular_files:
logger.info("Removing cached version of %s at %s", resource, meta.cached_path)
os.remove(meta.cached_path)
if os.path.exists(meta.cached_path + ".lock"):
os.remove(meta.cached_path + ".lock")
os.remove(meta.cached_path + ".json")
for meta in entry.extraction_dirs:
logger.info("Removing extracted version of %s at %s", resource, meta.cached_path)
shutil.rmtree(meta.cached_path)
if os.path.exists(meta.cached_path + ".lock"):
os.remove(meta.cached_path + ".lock")
os.remove(meta.cached_path + ".json")
return total_size
def inspect_cache(patterns: List[str] = None, cache_dir: Union[str, Path] = None):
"""
Print out useful information about the cache directory.
"""
from allennlp.common.util import format_timedelta, format_size
cache_dir = os.path.expanduser(cache_dir or CACHE_DIRECTORY)
# Gather cache entries by resource.
total_size, cache_entries = _find_entries(patterns=patterns, cache_dir=cache_dir)
if patterns:
print(f"Cached resources matching {patterns}:")
else:
print("Cached resources:")
for resource, entry in sorted(
cache_entries.items(),
# Sort by creation time, latest first.
key=lambda x: max(
0 if not x[1][0] else x[1][0][0].creation_time,
0 if not x[1][1] else x[1][1][0].creation_time,
),
reverse=True,
):
print("\n-", resource)
if entry.regular_files:
td = timedelta(seconds=time.time() - entry.regular_files[0].creation_time)
n_versions = len(entry.regular_files)
size = entry.regular_files[0].size
print(
f" {n_versions} {'versions' if n_versions > 1 else 'version'} cached, "
f"latest {format_size(size)} from {format_timedelta(td)} ago"
)
if entry.extraction_dirs:
td = timedelta(seconds=time.time() - entry.extraction_dirs[0].creation_time)
n_versions = len(entry.extraction_dirs)
size = entry.extraction_dirs[0].size
print(
f" {n_versions} {'versions' if n_versions > 1 else 'version'} extracted, "
f"latest {format_size(size)} from {format_timedelta(td)} ago"
)
print(f"\nTotal size: {format_size(total_size)}")
| allennlp-master | allennlp/common/file_utils.py |
import inspect
from typing import Callable, Generic, TypeVar, Type, Union
from allennlp.common.params import Params
T = TypeVar("T")
class Lazy(Generic[T]):
"""
This class is for use when constructing objects using `FromParams`, when an argument to a
constructor has a _sequential dependency_ with another argument to the same constructor.
For example, in a `Trainer` class you might want to take a `Model` and an `Optimizer` as arguments,
but the `Optimizer` needs to be constructed using the parameters from the `Model`. You can give
the type annotation `Lazy[Optimizer]` to the optimizer argument, then inside the constructor
call `optimizer.construct(parameters=model.parameters)`.
This is only recommended for use when you have registered a `@classmethod` as the constructor
for your class, instead of using `__init__`. Having a `Lazy[]` type annotation on an argument
to an `__init__` method makes your class completely dependent on being constructed using the
`FromParams` pipeline, which is not a good idea.
The actual implementation here is incredibly simple; the logic that handles the lazy
construction is actually found in `FromParams`, where we have a special case for a `Lazy` type
annotation.
```python
@classmethod
def my_constructor(
cls,
some_object: Lazy[MyObject],
optional_object: Lazy[MyObject] = None,
required_object_with_default: Lazy[MyObject] = Lazy(MyObjectDefault),
) -> MyClass:
obj1 = some_object.construct()
obj2 = None if optional_object is None else optional_object.construct()
obj3 = required_object_with_default.construct()
```
"""
def __init__(self, constructor: Union[Type[T], Callable[..., T]]):
constructor_to_use: Callable[..., T]
if inspect.isclass(constructor):
def constructor_to_use(**kwargs):
return constructor.from_params(Params({}), **kwargs) # type: ignore[union-attr]
else:
constructor_to_use = constructor
self._constructor = constructor_to_use
def construct(self, **kwargs) -> T:
return self._constructor(**kwargs)
| allennlp-master | allennlp/common/lazy.py |
import copy
import json
from os import PathLike
import random
from typing import Any, Dict, Iterable, Set, Union
import torch
import numpy
from numpy.testing import assert_allclose
from allennlp.commands.train import train_model_from_file
from allennlp.common import Params
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.data import DatasetReader, Vocabulary
from allennlp.data import DataLoader
from allennlp.data.batch import Batch
from allennlp.models import load_archive, Model
from allennlp.training import GradientDescentTrainer
class ModelTestCase(AllenNlpTestCase):
"""
A subclass of [`AllenNlpTestCase`](./test_case.md)
with added methods for testing [`Model`](../../models/model.md) subclasses.
"""
def set_up_model(
self,
param_file: PathLike,
dataset_file: PathLike,
serialization_dir: PathLike = None,
seed: int = None,
):
if seed is not None:
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
self.param_file = str(param_file)
params = Params.from_file(self.param_file)
reader = DatasetReader.from_params(
params["dataset_reader"], serialization_dir=serialization_dir
)
# The dataset reader might be lazy, but a lazy list here breaks some of our tests.
instances = reader.read(str(dataset_file))
# Use parameters for vocabulary if they are present in the config file, so that choices like
# "non_padded_namespaces", "min_count" etc. can be set if needed.
if "vocabulary" in params:
vocab_params = params["vocabulary"]
vocab = Vocabulary.from_params(params=vocab_params, instances=instances)
else:
vocab = Vocabulary.from_instances(instances)
self.vocab = vocab
self.instances = instances
self.instances.index_with(vocab)
self.model = Model.from_params(
vocab=self.vocab, params=params["model"], serialization_dir=serialization_dir
)
# TODO(joelgrus) get rid of these
# (a lot of the model tests use them, so they'll have to be changed)
self.dataset = Batch(list(self.instances))
self.dataset.index_instances(self.vocab)
def ensure_model_can_train_save_and_load(
self,
param_file: Union[PathLike, str],
tolerance: float = 1e-4,
cuda_device: int = -1,
gradients_to_ignore: Set[str] = None,
overrides: str = "",
metric_to_check: str = None,
metric_terminal_value: float = None,
metric_tolerance: float = 1e-4,
disable_dropout: bool = True,
):
"""
# Parameters
param_file : `str`
Path to a training configuration file that we will use to train the model for this
test.
tolerance : `float`, optional (default=`1e-4`)
When comparing model predictions between the originally-trained model and the model
after saving and loading, we will use this tolerance value (passed as `rtol` to
`numpy.testing.assert_allclose`).
cuda_device : `int`, optional (default=`-1`)
The device to run the test on.
gradients_to_ignore : `Set[str]`, optional (default=`None`)
This test runs a gradient check to make sure that we're actually computing gradients
for all of the parameters in the model. If you really want to ignore certain
parameters when doing that check, you can pass their names here. This is not
recommended unless you're `really` sure you don't need to have non-zero gradients for
those parameters (e.g., some of the beam search / state machine models have
infrequently-used parameters that are hard to force the model to use in a small test).
overrides : `str`, optional (default = `""`)
A JSON string that we will use to override values in the input parameter file.
metric_to_check: `str`, optional (default = `None`)
We may want to automatically perform a check that model reaches given metric when
training (on validation set, if it is specified). It may be useful in CI, for example.
You can pass any metric that is in your model returned metrics.
metric_terminal_value: `str`, optional (default = `None`)
When you set `metric_to_check`, you need to set the value this metric must converge to
metric_tolerance: `float`, optional (default=`1e-4`)
Tolerance to check you model metric against metric terminal value. One can expect some
variance in model metrics when the training process is highly stochastic.
disable_dropout : `bool`, optional (default = `True`)
If True we will set all dropout to 0 before checking gradients. (Otherwise, with small
datasets, you may get zero gradients because of unlucky dropout.)
"""
save_dir = self.TEST_DIR / "save_and_load_test"
archive_file = save_dir / "model.tar.gz"
model = train_model_from_file(param_file, save_dir, overrides=overrides)
assert model is not None
metrics_file = save_dir / "metrics.json"
if metric_to_check is not None:
metrics = json.loads(metrics_file.read_text())
metric_value = metrics.get(f"best_validation_{metric_to_check}") or metrics.get(
f"training_{metric_to_check}"
)
assert metric_value is not None, f"Cannot find {metric_to_check} in metrics.json file"
assert metric_terminal_value is not None, "Please specify metric terminal value"
assert abs(metric_value - metric_terminal_value) < metric_tolerance
archive = load_archive(archive_file, cuda_device=cuda_device)
loaded_model = archive.model
state_keys = model.state_dict().keys()
loaded_state_keys = loaded_model.state_dict().keys()
assert state_keys == loaded_state_keys
# First we make sure that the state dict (the parameters) are the same for both models.
for key in state_keys:
assert_allclose(
model.state_dict()[key].cpu().numpy(),
loaded_model.state_dict()[key].cpu().numpy(),
err_msg=key,
)
reader = archive.dataset_reader
params = Params.from_file(param_file, params_overrides=overrides)
print("Reading with original model")
model_dataset = reader.read(params["validation_data_path"])
model_dataset.index_with(model.vocab)
print("Reading with loaded model")
loaded_dataset = reader.read(params["validation_data_path"])
loaded_dataset.index_with(loaded_model.vocab)
# Need to duplicate params because DataLoader.from_params will consume.
data_loader_params = params["data_loader"]
data_loader_params["shuffle"] = False
data_loader_params2 = Params(copy.deepcopy(data_loader_params.as_dict()))
data_loader = DataLoader.from_params(dataset=model_dataset, params=data_loader_params)
data_loader2 = DataLoader.from_params(dataset=loaded_dataset, params=data_loader_params2)
# We'll check that even if we index the dataset with each model separately, we still get
# the same result out.
model_batch = next(iter(data_loader))
loaded_batch = next(iter(data_loader2))
# Check gradients are None for non-trainable parameters and check that
# trainable parameters receive some gradient if they are trainable.
self.check_model_computes_gradients_correctly(
model, model_batch, gradients_to_ignore, disable_dropout
)
# The datasets themselves should be identical.
assert model_batch.keys() == loaded_batch.keys()
for key in model_batch.keys():
self.assert_fields_equal(model_batch[key], loaded_batch[key], key, 1e-6)
# Set eval mode, to turn off things like dropout, then get predictions.
model.eval()
loaded_model.eval()
# Models with stateful RNNs need their states reset to have consistent
# behavior after loading.
for model_ in [model, loaded_model]:
for module in model_.modules():
if hasattr(module, "stateful") and module.stateful:
module.reset_states()
print("Predicting with original model")
model_predictions = model(**model_batch)
print("Predicting with loaded model")
loaded_model_predictions = loaded_model(**loaded_batch)
# Both outputs should have the same keys and the values for these keys should be close.
for key in model_predictions.keys():
self.assert_fields_equal(
model_predictions[key], loaded_model_predictions[key], name=key, tolerance=tolerance
)
# Check loaded model's loss exists and we can compute gradients, for continuing training.
loaded_model.train()
loaded_model_predictions = loaded_model(**loaded_batch)
loaded_model_loss = loaded_model_predictions["loss"]
assert loaded_model_loss is not None
loaded_model_loss.backward()
return model, loaded_model
def ensure_model_can_train(
self,
trainer: GradientDescentTrainer,
gradients_to_ignore: Set[str] = None,
metric_to_check: str = None,
metric_terminal_value: float = None,
metric_tolerance: float = 1e-4,
disable_dropout: bool = True,
):
"""
A simple test for model training behavior when you are not using configuration files. In
this case, we don't have a story around saving and loading models (you need to handle that
yourself), so we don't have tests for that. We just test that the model can train, and that
it computes gradients for all parameters.
Because the `Trainer` already has a reference to a model and to a data loader, we just take
the `Trainer` object itself, and grab the `Model` and other necessary objects from there.
# Parameters
trainer: `GradientDescentTrainer`
The `Trainer` to use for the test, which already has references to a `Model` and a
`DataLoader`, which we will use in the test.
gradients_to_ignore : `Set[str]`, optional (default=`None`)
This test runs a gradient check to make sure that we're actually computing gradients
for all of the parameters in the model. If you really want to ignore certain
parameters when doing that check, you can pass their names here. This is not
recommended unless you're `really` sure you don't need to have non-zero gradients for
those parameters (e.g., some of the beam search / state machine models have
infrequently-used parameters that are hard to force the model to use in a small test).
metric_to_check: `str`, optional (default = `None`)
We may want to automatically perform a check that model reaches given metric when
training (on validation set, if it is specified). It may be useful in CI, for example.
You can pass any metric that is in your model returned metrics.
metric_terminal_value: `str`, optional (default = `None`)
When you set `metric_to_check`, you need to set the value this metric must converge to
metric_tolerance: `float`, optional (default=`1e-4`)
Tolerance to check you model metric against metric terminal value. One can expect some
variance in model metrics when the training process is highly stochastic.
disable_dropout : `bool`, optional (default = `True`)
If True we will set all dropout to 0 before checking gradients. (Otherwise, with small
datasets, you may get zero gradients because of unlucky dropout.)
"""
metrics = trainer.train()
if metric_to_check is not None:
metric_value = metrics.get(f"best_validation_{metric_to_check}") or metrics.get(
f"training_{metric_to_check}"
)
assert metric_value is not None, f"Cannot find {metric_to_check} in metrics.json file"
assert metric_terminal_value is not None, "Please specify metric terminal value"
assert abs(metric_value - metric_terminal_value) < metric_tolerance
model_batch = next(iter(trainer.data_loader))
# Check gradients are None for non-trainable parameters and check that
# trainable parameters receive some gradient if they are trainable.
self.check_model_computes_gradients_correctly(
trainer.model, model_batch, gradients_to_ignore, disable_dropout
)
def assert_fields_equal(self, field1, field2, name: str, tolerance: float = 1e-6) -> None:
if isinstance(field1, torch.Tensor):
assert_allclose(
field1.detach().cpu().numpy(),
field2.detach().cpu().numpy(),
rtol=tolerance,
err_msg=name,
)
elif isinstance(field1, dict):
assert field1.keys() == field2.keys()
for key in field1:
self.assert_fields_equal(
field1[key], field2[key], tolerance=tolerance, name=name + "." + str(key)
)
elif isinstance(field1, (list, tuple)):
assert len(field1) == len(field2)
for i, (subfield1, subfield2) in enumerate(zip(field1, field2)):
self.assert_fields_equal(
subfield1, subfield2, tolerance=tolerance, name=name + f"[{i}]"
)
elif isinstance(field1, (float, int)):
assert_allclose([field1], [field2], rtol=tolerance, err_msg=name)
else:
if field1 != field2:
for key in field1.__dict__:
print(key, getattr(field1, key) == getattr(field2, key))
assert field1 == field2, f"{name}, {type(field1)}, {type(field2)}"
@staticmethod
def check_model_computes_gradients_correctly(
model: Model,
model_batch: Dict[str, Union[Any, Dict[str, Any]]],
params_to_ignore: Set[str] = None,
disable_dropout: bool = True,
):
print("Checking gradients")
for p in model.parameters():
p.grad = None
model.train()
original_dropouts: Dict[str, float] = {}
if disable_dropout:
# Remember original dropouts so we can restore them.
for name, module in model.named_modules():
if isinstance(module, torch.nn.Dropout):
original_dropouts[name] = getattr(module, "p")
setattr(module, "p", 0)
result = model(**model_batch)
result["loss"].backward()
has_zero_or_none_grads = {}
for name, parameter in model.named_parameters():
zeros = torch.zeros(parameter.size())
if params_to_ignore and name in params_to_ignore:
continue
if parameter.requires_grad:
if parameter.grad is None:
has_zero_or_none_grads[
name
] = "No gradient computed (i.e parameter.grad is None)"
elif parameter.grad.is_sparse or parameter.grad.data.is_sparse:
pass
# Some parameters will only be partially updated,
# like embeddings, so we just check that any gradient is non-zero.
elif (parameter.grad.cpu() == zeros).all():
has_zero_or_none_grads[
name
] = f"zeros with shape ({tuple(parameter.grad.size())})"
else:
assert parameter.grad is None
if has_zero_or_none_grads:
for name, grad in has_zero_or_none_grads.items():
print(f"Parameter: {name} had incorrect gradient: {grad}")
raise Exception("Incorrect gradients found. See stdout for more info.")
# Now restore dropouts if we disabled them.
if disable_dropout:
for name, module in model.named_modules():
if name in original_dropouts:
setattr(module, "p", original_dropouts[name])
def ensure_batch_predictions_are_consistent(self, keys_to_ignore: Iterable[str] = ()):
"""
Ensures that the model performs the same on a batch of instances as on individual instances.
Ignores metrics matching the regexp .*loss.* and those specified explicitly.
# Parameters
keys_to_ignore : `Iterable[str]`, optional (default=`()`)
Names of metrics that should not be taken into account, e.g. "batch_weight".
"""
self.model.eval()
single_predictions = []
for i, instance in enumerate(self.instances):
dataset = Batch([instance])
tensors = dataset.as_tensor_dict(dataset.get_padding_lengths())
result = self.model(**tensors)
single_predictions.append(result)
full_dataset = Batch(self.instances)
batch_tensors = full_dataset.as_tensor_dict(full_dataset.get_padding_lengths())
batch_predictions = self.model(**batch_tensors)
for i, instance_predictions in enumerate(single_predictions):
for key, single_predicted in instance_predictions.items():
tolerance = 1e-6
if "loss" in key:
# Loss is particularly unstable; we'll just be satisfied if everything else is
# close.
continue
if key in keys_to_ignore:
continue
single_predicted = single_predicted[0]
batch_predicted = batch_predictions[key][i]
if isinstance(single_predicted, torch.Tensor):
if single_predicted.size() != batch_predicted.size():
slices = tuple(slice(0, size) for size in single_predicted.size())
batch_predicted = batch_predicted[slices]
assert_allclose(
single_predicted.data.numpy(),
batch_predicted.data.numpy(),
atol=tolerance,
err_msg=key,
)
else:
assert single_predicted == batch_predicted, key
| allennlp-master | allennlp/common/testing/model_test_case.py |
"""
Utilities and helpers for writing tests.
"""
from typing import Dict, Any, Optional, Union, Tuple, List
import torch
from torch.testing import assert_allclose
import pytest
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.common.testing.model_test_case import ModelTestCase
from allennlp.common.testing.distributed_test import run_distributed_test
from allennlp.training.metrics import Metric
_available_devices = ["cpu"] + (["cuda"] if torch.cuda.is_available() else [])
def multi_device(test_method):
"""
Decorator that provides an argument `device` of type `str` for each available PyTorch device.
"""
return pytest.mark.parametrize("device", _available_devices)(pytest.mark.gpu(test_method))
def requires_gpu(test_method):
"""
Decorator to indicate that a test requires a GPU device.
"""
return pytest.mark.gpu(
pytest.mark.skipif(not torch.cuda.is_available(), reason="No CUDA device registered.")(
test_method
)
)
def requires_multi_gpu(test_method):
"""
Decorator to indicate that a test requires multiple GPU devices.
"""
return pytest.mark.gpu(
pytest.mark.skipif(torch.cuda.device_count() < 2, reason="2 or more GPUs required.")(
test_method
)
)
def cpu_or_gpu(test_method):
"""
Decorator to indicate that a test should run on both CPU and GPU
"""
return pytest.mark.gpu(test_method)
# Helpers for testing distributed metrics
def assert_metrics_values(
metrics: Dict[str, Any],
desired_values: Dict[str, Any],
rtol: float = 0.0001,
atol: float = 1e-05,
):
for key in metrics:
assert_allclose(metrics[key], desired_values[key], rtol=rtol, atol=atol)
def global_distributed_metric(
global_rank: int,
world_size: int,
gpu_id: Union[int, torch.device],
metric: Metric,
metric_kwargs: Dict[str, List[Any]],
desired_values: Dict[str, Any],
exact: Union[bool, Tuple[float, float]] = True,
):
kwargs = {}
# Use the arguments meant for the process with rank `global_rank`.
for argname in metric_kwargs:
kwargs[argname] = metric_kwargs[argname][global_rank]
metric(**kwargs)
metrics = metric.get_metric(False)
if not isinstance(metrics, Dict) and not isinstance(desired_values, Dict):
metrics = {"metric_value": metrics}
desired_values = {"metric_value": desired_values}
# Call `assertion_metrics_values` to check if the metrics have the desired values.
if isinstance(exact, bool):
if exact:
rtol = 0.0
atol = 0.0
else:
rtol = 0.0001
atol = 1e-05
else:
rtol = exact[0]
atol = exact[1]
assert_metrics_values(metrics, desired_values, rtol, atol) # type: ignore
| allennlp-master | allennlp/common/testing/__init__.py |
import logging
import os
import pathlib
import shutil
import tempfile
from allennlp.common.checks import log_pytorch_version_info
TEST_DIR = tempfile.mkdtemp(prefix="allennlp_tests")
class AllenNlpTestCase:
"""
A custom testing class that disables some of the more verbose AllenNLP
logging and that creates and destroys a temp directory as a test fixture.
"""
PROJECT_ROOT = (pathlib.Path(__file__).parent / ".." / ".." / "..").resolve()
MODULE_ROOT = PROJECT_ROOT / "allennlp"
TOOLS_ROOT = MODULE_ROOT / "tools"
TESTS_ROOT = PROJECT_ROOT / "tests"
FIXTURES_ROOT = PROJECT_ROOT / "test_fixtures"
def setup_method(self):
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=logging.DEBUG
)
# Disabling some of the more verbose logging statements that typically aren't very helpful
# in tests.
logging.getLogger("allennlp.common.params").disabled = True
logging.getLogger("allennlp.nn.initializers").disabled = True
logging.getLogger("allennlp.modules.token_embedders.embedding").setLevel(logging.INFO)
logging.getLogger("urllib3.connectionpool").disabled = True
log_pytorch_version_info()
self.TEST_DIR = pathlib.Path(TEST_DIR)
os.makedirs(self.TEST_DIR, exist_ok=True)
def teardown_method(self):
shutil.rmtree(self.TEST_DIR)
| allennlp-master | allennlp/common/testing/test_case.py |
from allennlp.predictors import TextClassifierPredictor
from allennlp.models.model import Model
import torch
class FakeModelForTestingInterpret(Model):
def __init__(self, vocab, max_tokens=7, num_labels=2):
super().__init__(vocab)
self._max_tokens = max_tokens
self.embedder = torch.nn.Embedding(vocab.get_vocab_size(), 16)
self.linear = torch.nn.Linear(max_tokens * 16, num_labels)
self._loss = torch.nn.CrossEntropyLoss()
def forward(self, tokens, label=None):
tokens = tokens["tokens"]["tokens"][:, 0 : self._max_tokens]
embedded = self.embedder(tokens)
logits = self.linear(torch.flatten(embedded).unsqueeze(0))
probs = torch.nn.functional.softmax(logits, dim=-1)
output_dict = {"logits": logits, "probs": probs}
if label is not None:
output_dict["loss"] = self._loss(logits, label.long().view(-1))
return output_dict
def make_output_human_readable(self, output_dict):
preds = output_dict["probs"]
if len(preds.shape) == 1:
output_dict["probs"] = preds.unsqueeze(0)
output_dict["logits"] = output_dict["logits"].unsqueeze(0)
classes = []
for prediction in output_dict["probs"]:
label_idx = prediction.argmax(dim=-1).item()
output_dict["loss"] = self._loss(output_dict["logits"], torch.LongTensor([label_idx]))
label_str = str(label_idx)
classes.append(label_str)
output_dict["label"] = classes
return output_dict
class FakePredictorForTestingInterpret(TextClassifierPredictor):
def get_interpretable_layer(self):
return self._model.embedder
def get_interpretable_text_field_embedder(self):
return self._model.embedder
| allennlp-master | allennlp/common/testing/interpret_test.py |
import datetime
from typing import List, Dict, Any, Tuple, Callable
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from allennlp.common.checks import check_for_gpu
def init_process(
process_rank: int,
world_size: int,
distributed_device_ids: List[int],
func: Callable,
func_args: Tuple = None,
func_kwargs: Dict[str, Any] = None,
master_addr: str = "127.0.0.1",
master_port: int = 29500,
):
assert world_size > 1
global_rank = process_rank
gpu_id = distributed_device_ids[process_rank] # type: ignore
if gpu_id >= 0:
torch.cuda.set_device(int(gpu_id))
dist.init_process_group(
backend="nccl",
init_method=f"tcp://{master_addr}:{master_port}",
world_size=world_size,
rank=global_rank,
)
else:
dist.init_process_group(
backend="gloo",
init_method=f"tcp://{master_addr}:{master_port}",
world_size=world_size,
rank=global_rank,
timeout=datetime.timedelta(seconds=120),
)
func(global_rank, world_size, gpu_id, *(func_args or []), **(func_kwargs or {}))
dist.barrier()
def run_distributed_test(
device_ids: List[int] = None,
func: Callable = None,
*args,
**kwargs,
):
"""
This runs the `func` in a simulated distributed environment.
# Parameters
device_ids: `List[int]`
List of devices. There need to be at least 2 devices. Default is [-1, -1].
func: `Callable`
`func` needs to be global for spawning the processes, so that it can be pickled.
"""
device_ids = device_ids or [-1, -1]
check_for_gpu(device_ids)
# "fork" start method is the default and should be preferred, except when we're
# running the tests on GPU, in which case we need to use "spawn".
start_method = "spawn" if any(x >= 0 for x in device_ids) else "fork"
nprocs = world_size = len(device_ids)
mp.start_processes(
init_process,
args=(world_size, device_ids, func, args, kwargs),
nprocs=nprocs,
start_method=start_method,
)
| allennlp-master | allennlp/common/testing/distributed_test.py |
from allennlp.interpret.attackers.attacker import Attacker
from allennlp.interpret.saliency_interpreters.saliency_interpreter import SaliencyInterpreter
| allennlp-master | allennlp/interpret/__init__.py |
import math
from typing import List, Dict, Any
import numpy
import torch
from allennlp.common.util import JsonDict, sanitize
from allennlp.data import Instance
from allennlp.interpret.saliency_interpreters.saliency_interpreter import SaliencyInterpreter
from allennlp.nn import util
@SaliencyInterpreter.register("integrated-gradient")
class IntegratedGradient(SaliencyInterpreter):
"""
Interprets the prediction using Integrated Gradients (https://arxiv.org/abs/1703.01365)
Registered as a `SaliencyInterpreter` with name "integrated-gradient".
"""
def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:
# Convert inputs to labeled instances
labeled_instances = self.predictor.json_to_labeled_instances(inputs)
instances_with_grads = dict()
for idx, instance in enumerate(labeled_instances):
# Run integrated gradients
grads = self._integrate_gradients(instance)
# Normalize results
for key, grad in grads.items():
# The [0] here is undo-ing the batching that happens in get_gradients.
embedding_grad = numpy.sum(grad[0], axis=1)
norm = numpy.linalg.norm(embedding_grad, ord=1)
normalized_grad = [math.fabs(e) / norm for e in embedding_grad]
grads[key] = normalized_grad
instances_with_grads["instance_" + str(idx + 1)] = grads
return sanitize(instances_with_grads)
def _register_hooks(self, alpha: int, embeddings_list: List, token_offsets: List):
"""
Register a forward hook on the embedding layer which scales the embeddings by alpha. Used
for one term in the Integrated Gradients sum.
We store the embedding output into the embeddings_list when alpha is zero. This is used
later to element-wise multiply the input by the averaged gradients.
"""
def forward_hook(module, inputs, output):
# Save the input for later use. Only do so on first call.
if alpha == 0:
embeddings_list.append(output.squeeze(0).clone().detach())
# Scale the embedding by alpha
output.mul_(alpha)
def get_token_offsets(module, inputs, outputs):
offsets = util.get_token_offsets_from_text_field_inputs(inputs)
if offsets is not None:
token_offsets.append(offsets)
# Register the hooks
handles = []
embedding_layer = self.predictor.get_interpretable_layer()
handles.append(embedding_layer.register_forward_hook(forward_hook))
text_field_embedder = self.predictor.get_interpretable_text_field_embedder()
handles.append(text_field_embedder.register_forward_hook(get_token_offsets))
return handles
def _integrate_gradients(self, instance: Instance) -> Dict[str, numpy.ndarray]:
"""
Returns integrated gradients for the given [`Instance`](../../data/instance.md)
"""
ig_grads: Dict[str, Any] = {}
# List of Embedding inputs
embeddings_list: List[torch.Tensor] = []
token_offsets: List[torch.Tensor] = []
# Use 10 terms in the summation approximation of the integral in integrated grad
steps = 10
# Exclude the endpoint because we do a left point integral approximation
for alpha in numpy.linspace(0, 1.0, num=steps, endpoint=False):
handles = []
# Hook for modifying embedding value
handles = self._register_hooks(alpha, embeddings_list, token_offsets)
try:
grads = self.predictor.get_gradients([instance])[0]
finally:
for handle in handles:
handle.remove()
# Running sum of gradients
if ig_grads == {}:
ig_grads = grads
else:
for key in grads.keys():
ig_grads[key] += grads[key]
# Average of each gradient term
for key in ig_grads.keys():
ig_grads[key] /= steps
# Gradients come back in the reverse order that they were sent into the network
embeddings_list.reverse()
token_offsets.reverse()
embeddings_list = self._aggregate_token_embeddings(embeddings_list, token_offsets)
# Element-wise multiply average gradient by the input
for idx, input_embedding in enumerate(embeddings_list):
key = "grad_input_" + str(idx + 1)
ig_grads[key] *= input_embedding
return ig_grads
| allennlp-master | allennlp/interpret/saliency_interpreters/integrated_gradient.py |
from typing import List
import numpy
import torch
from allennlp.common import Registrable
from allennlp.common.util import JsonDict
from allennlp.nn import util
from allennlp.predictors import Predictor
class SaliencyInterpreter(Registrable):
"""
A `SaliencyInterpreter` interprets an AllenNLP Predictor's outputs by assigning a saliency
score to each input token.
"""
def __init__(self, predictor: Predictor) -> None:
self.predictor = predictor
def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:
"""
This function finds saliency values for each input token.
# Parameters
inputs : `JsonDict`
The input you want to interpret (the same as the argument to a Predictor, e.g., predict_json()).
# Returns
interpretation : `JsonDict`
Contains the normalized saliency values for each input token. The dict has entries for
each instance in the inputs JsonDict, e.g., `{instance_1: ..., instance_2:, ... }`.
Each one of those entries has entries for the saliency of the inputs, e.g.,
`{grad_input_1: ..., grad_input_2: ... }`.
"""
raise NotImplementedError("Implement this for saliency interpretations")
@staticmethod
def _aggregate_token_embeddings(
embeddings_list: List[torch.Tensor], token_offsets: List[torch.Tensor]
) -> List[numpy.ndarray]:
if len(token_offsets) == 0:
return [embeddings.numpy() for embeddings in embeddings_list]
aggregated_embeddings = []
# NOTE: This is assuming that embeddings and offsets come in the same order, which may not
# be true. But, the intersection of using multiple TextFields with mismatched indexers is
# currently zero, so we'll delay handling this corner case until it actually causes a
# problem. In practice, both of these lists will always be of size one at the moment.
for embeddings, offsets in zip(embeddings_list, token_offsets):
span_embeddings, span_mask = util.batched_span_select(embeddings.contiguous(), offsets)
span_mask = span_mask.unsqueeze(-1)
span_embeddings *= span_mask # zero out paddings
span_embeddings_sum = span_embeddings.sum(2)
span_embeddings_len = span_mask.sum(2)
# Shape: (batch_size, num_orig_tokens, embedding_size)
embeddings = span_embeddings_sum / torch.clamp_min(span_embeddings_len, 1)
# All the places where the span length is zero, write in zeros.
embeddings[(span_embeddings_len == 0).expand(embeddings.shape)] = 0
aggregated_embeddings.append(embeddings.numpy())
return aggregated_embeddings
| allennlp-master | allennlp/interpret/saliency_interpreters/saliency_interpreter.py |
import math
from typing import Dict, Any
import numpy
import torch
from allennlp.common.util import JsonDict, sanitize
from allennlp.data import Instance
from allennlp.interpret.saliency_interpreters.saliency_interpreter import SaliencyInterpreter
from allennlp.predictors import Predictor
@SaliencyInterpreter.register("smooth-gradient")
class SmoothGradient(SaliencyInterpreter):
"""
Interprets the prediction using SmoothGrad (https://arxiv.org/abs/1706.03825)
Registered as a `SaliencyInterpreter` with name "smooth-gradient".
"""
def __init__(self, predictor: Predictor) -> None:
super().__init__(predictor)
# Hyperparameters
self.stdev = 0.01
self.num_samples = 10
def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:
# Convert inputs to labeled instances
labeled_instances = self.predictor.json_to_labeled_instances(inputs)
instances_with_grads = dict()
for idx, instance in enumerate(labeled_instances):
# Run smoothgrad
grads = self._smooth_grads(instance)
# Normalize results
for key, grad in grads.items():
# TODO (@Eric-Wallace), SmoothGrad is not using times input normalization.
# Fine for now, but should fix for consistency.
# The [0] here is undo-ing the batching that happens in get_gradients.
embedding_grad = numpy.sum(grad[0], axis=1)
norm = numpy.linalg.norm(embedding_grad, ord=1)
normalized_grad = [math.fabs(e) / norm for e in embedding_grad]
grads[key] = normalized_grad
instances_with_grads["instance_" + str(idx + 1)] = grads
return sanitize(instances_with_grads)
def _register_forward_hook(self, stdev: float):
"""
Register a forward hook on the embedding layer which adds random noise to every embedding.
Used for one term in the SmoothGrad sum.
"""
def forward_hook(module, inputs, output):
# Random noise = N(0, stdev * (max-min))
scale = output.detach().max() - output.detach().min()
noise = torch.randn(output.shape, device=output.device) * stdev * scale
# Add the random noise
output.add_(noise)
# Register the hook
embedding_layer = self.predictor.get_interpretable_layer()
handle = embedding_layer.register_forward_hook(forward_hook)
return handle
def _smooth_grads(self, instance: Instance) -> Dict[str, numpy.ndarray]:
total_gradients: Dict[str, Any] = {}
for _ in range(self.num_samples):
handle = self._register_forward_hook(self.stdev)
try:
grads = self.predictor.get_gradients([instance])[0]
finally:
handle.remove()
# Sum gradients
if total_gradients == {}:
total_gradients = grads
else:
for key in grads.keys():
total_gradients[key] += grads[key]
# Average the gradients
for key in total_gradients.keys():
total_gradients[key] /= self.num_samples
return total_gradients
| allennlp-master | allennlp/interpret/saliency_interpreters/smooth_gradient.py |
import math
from typing import List
import numpy
import torch
from allennlp.common.util import JsonDict, sanitize
from allennlp.interpret.saliency_interpreters.saliency_interpreter import SaliencyInterpreter
from allennlp.nn import util
@SaliencyInterpreter.register("simple-gradient")
class SimpleGradient(SaliencyInterpreter):
"""
Registered as a `SaliencyInterpreter` with name "simple-gradient".
"""
def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:
"""
Interprets the model's prediction for inputs. Gets the gradients of the logits with respect
to the input and returns those gradients normalized and sanitized.
"""
labeled_instances = self.predictor.json_to_labeled_instances(inputs)
instances_with_grads = dict()
for idx, instance in enumerate(labeled_instances):
# List of embedding inputs, used for multiplying gradient by the input for normalization
embeddings_list: List[torch.Tensor] = []
token_offsets: List[torch.Tensor] = []
# Hook used for saving embeddings
handles = self._register_hooks(embeddings_list, token_offsets)
try:
grads = self.predictor.get_gradients([instance])[0]
finally:
for handle in handles:
handle.remove()
# Gradients come back in the reverse order that they were sent into the network
embeddings_list.reverse()
token_offsets.reverse()
embeddings_list = self._aggregate_token_embeddings(embeddings_list, token_offsets)
for key, grad in grads.items():
# Get number at the end of every gradient key (they look like grad_input_[int],
# we're getting this [int] part and subtracting 1 for zero-based indexing).
# This is then used as an index into the reversed input array to match up the
# gradient and its respective embedding.
input_idx = int(key[-1]) - 1
# The [0] here is undo-ing the batching that happens in get_gradients.
emb_grad = numpy.sum(grad[0] * embeddings_list[input_idx][0], axis=1)
norm = numpy.linalg.norm(emb_grad, ord=1)
normalized_grad = [math.fabs(e) / norm for e in emb_grad]
grads[key] = normalized_grad
instances_with_grads["instance_" + str(idx + 1)] = grads
return sanitize(instances_with_grads)
def _register_hooks(self, embeddings_list: List, token_offsets: List):
"""
Finds all of the TextFieldEmbedders, and registers a forward hook onto them. When forward()
is called, embeddings_list is filled with the embedding values. This is necessary because
our normalization scheme multiplies the gradient by the embedding value.
"""
def forward_hook(module, inputs, output):
embeddings_list.append(output.squeeze(0).clone().detach())
def get_token_offsets(module, inputs, outputs):
offsets = util.get_token_offsets_from_text_field_inputs(inputs)
if offsets is not None:
token_offsets.append(offsets)
# Register the hooks
handles = []
embedding_layer = self.predictor.get_interpretable_layer()
handles.append(embedding_layer.register_forward_hook(forward_hook))
text_field_embedder = self.predictor.get_interpretable_text_field_embedder()
handles.append(text_field_embedder.register_forward_hook(get_token_offsets))
return handles
| allennlp-master | allennlp/interpret/saliency_interpreters/simple_gradient.py |
from allennlp.interpret.saliency_interpreters.saliency_interpreter import SaliencyInterpreter
from allennlp.interpret.saliency_interpreters.simple_gradient import SimpleGradient
from allennlp.interpret.saliency_interpreters.integrated_gradient import IntegratedGradient
from allennlp.interpret.saliency_interpreters.smooth_gradient import SmoothGradient
| allennlp-master | allennlp/interpret/saliency_interpreters/__init__.py |
from allennlp.interpret.attackers.attacker import Attacker
from allennlp.interpret.attackers.input_reduction import InputReduction
from allennlp.interpret.attackers.hotflip import Hotflip
| allennlp-master | allennlp/interpret/attackers/__init__.py |
from allennlp.common.util import JsonDict
from allennlp.data import Instance
def get_fields_to_compare(
inputs: JsonDict, instance: Instance, input_field_to_attack: str
) -> JsonDict:
"""
Gets a list of the fields that should be checked for equality after an attack is performed.
# Parameters
inputs : `JsonDict`
The input you want to attack, similar to the argument to a Predictor, e.g., predict_json().
instance : `Instance`
A labeled instance that is output from json_to_labeled_instances().
input_field_to_attack : `str`
The key in the inputs JsonDict you want to attack, e.g., tokens.
# Returns
fields : `JsonDict`
The fields that must be compared for equality.
"""
# TODO(mattg): this really should live on the Predictor. We have some messy stuff for, e.g.,
# reading comprehension models, and the interpret code can't really know about the internals of
# that (or at least it shouldn't now, and once we split out the reading comprehension repo, it
# really *can't*).
fields_to_compare = {
key: instance[key]
for key in instance.fields
if key not in inputs
and key != input_field_to_attack
and key != "metadata"
and key != "output"
}
return fields_to_compare
def instance_has_changed(instance: Instance, fields_to_compare: JsonDict):
if "clusters" in fields_to_compare:
# Coref needs a special case here, apparently. I (mattg) am not sure why the check below
# doesn't catch this case; TODO: look into this.
original_clusters = set(tuple(x) for x in fields_to_compare["clusters"])
new_clusters = set(tuple(x) for x in instance["clusters"]) # type: ignore
return original_clusters != new_clusters
if any(instance[field] != fields_to_compare[field] for field in fields_to_compare):
return True
return False
| allennlp-master | allennlp/interpret/attackers/utils.py |
from copy import deepcopy
from typing import Dict, List, Tuple
import numpy
import torch
from allennlp.common.util import JsonDict, sanitize
from allennlp.data import Instance, Token
from allennlp.data.fields import TextField
from allennlp.data.token_indexers import (
ELMoTokenCharactersIndexer,
TokenCharactersIndexer,
SingleIdTokenIndexer,
)
from allennlp.interpret.attackers import utils
from allennlp.interpret.attackers.attacker import Attacker
from allennlp.modules.token_embedders import Embedding
from allennlp.nn import util
from allennlp.predictors.predictor import Predictor
DEFAULT_IGNORE_TOKENS = ["@@NULL@@", ".", ",", ";", "!", "?", "[MASK]", "[SEP]", "[CLS]"]
@Attacker.register("hotflip")
class Hotflip(Attacker):
"""
Runs the HotFlip style attack at the word-level https://arxiv.org/abs/1712.06751. We use the
first-order taylor approximation described in https://arxiv.org/abs/1903.06620, in the function
`_first_order_taylor()`.
We try to re-use the embedding matrix from the model when deciding what other words to flip a
token to. For a large class of models, this is straightforward. When there is a
character-level encoder, however (e.g., with ELMo, any char-CNN, etc.), or a combination of
encoders (e.g., ELMo + glove), we need to construct a fake embedding matrix that we can use in
`_first_order_taylor()`. We do this by getting a list of words from the model's vocabulary
and embedding them using the encoder. This can be expensive, both in terms of time and memory
usage, so we take a `max_tokens` parameter to limit the size of this fake embedding matrix.
This also requires a model to `have` a token vocabulary in the first place, which can be
problematic for models that only have character vocabularies.
Registered as an `Attacker` with name "hotflip".
# Parameters
predictor : `Predictor`
The model (inside a Predictor) that we're attacking. We use this to get gradients and
predictions.
vocab_namespace : `str`, optional (default=`'tokens'`)
We use this to know three things: (1) which tokens we should ignore when producing flips
(we don't consider non-alphanumeric tokens); (2) what the string value is of the token that
we produced, so we can show something human-readable to the user; and (3) if we need to
construct a fake embedding matrix, we use the tokens in the vocabulary as flip candidates.
max_tokens : `int`, optional (default=`5000`)
This is only used when we need to construct a fake embedding matrix. That matrix can take
a lot of memory when the vocab size is large. This parameter puts a cap on the number of
tokens to use, so the fake embedding matrix doesn't take as much memory.
"""
def __init__(
self, predictor: Predictor, vocab_namespace: str = "tokens", max_tokens: int = 5000
) -> None:
super().__init__(predictor)
self.vocab = self.predictor._model.vocab
self.namespace = vocab_namespace
# Force new tokens to be alphanumeric
self.max_tokens = max_tokens
self.invalid_replacement_indices: List[int] = []
for i in self.vocab._index_to_token[self.namespace]:
if not self.vocab._index_to_token[self.namespace][i].isalnum():
self.invalid_replacement_indices.append(i)
self.embedding_matrix: torch.Tensor = None
self.embedding_layer: torch.nn.Module = None
# get device number
self.cuda_device = predictor.cuda_device
def initialize(self):
"""
Call this function before running attack_from_json(). We put the call to
`_construct_embedding_matrix()` in this function to prevent a large amount of compute
being done when __init__() is called.
"""
if self.embedding_matrix is None:
self.embedding_matrix = self._construct_embedding_matrix()
def _construct_embedding_matrix(self) -> Embedding:
"""
For HotFlip, we need a word embedding matrix to search over. The below is necessary for
models such as ELMo, character-level models, or for models that use a projection layer
after their word embeddings.
We run all of the tokens from the vocabulary through the TextFieldEmbedder, and save the
final output embedding. We then group all of those output embeddings into an "embedding
matrix".
"""
embedding_layer = self.predictor.get_interpretable_layer()
self.embedding_layer = embedding_layer
if isinstance(embedding_layer, (Embedding, torch.nn.modules.sparse.Embedding)):
# If we're using something that already has an only embedding matrix, we can just use
# that and bypass this method.
return embedding_layer.weight
# We take the top `self.max_tokens` as candidates for hotflip. Because we have to
# construct a new vector for each of these, we can't always afford to use the whole vocab,
# for both runtime and memory considerations.
all_tokens = list(self.vocab._token_to_index[self.namespace])[: self.max_tokens]
max_index = self.vocab.get_token_index(all_tokens[-1], self.namespace)
self.invalid_replacement_indices = [
i for i in self.invalid_replacement_indices if i < max_index
]
inputs = self._make_embedder_input(all_tokens)
# pass all tokens through the fake matrix and create an embedding out of it.
embedding_matrix = embedding_layer(inputs).squeeze()
return embedding_matrix
def _make_embedder_input(self, all_tokens: List[str]) -> Dict[str, torch.Tensor]:
inputs = {}
# A bit of a hack; this will only work with some dataset readers, but it'll do for now.
indexers = self.predictor._dataset_reader._token_indexers # type: ignore
for indexer_name, token_indexer in indexers.items():
if isinstance(token_indexer, SingleIdTokenIndexer):
all_indices = [
self.vocab._token_to_index[self.namespace][token] for token in all_tokens
]
inputs[indexer_name] = {"tokens": torch.LongTensor(all_indices).unsqueeze(0)}
elif isinstance(token_indexer, TokenCharactersIndexer):
tokens = [Token(x) for x in all_tokens]
max_token_length = max(len(x) for x in all_tokens)
# sometime max_token_length is too short for cnn encoder
max_token_length = max(max_token_length, token_indexer._min_padding_length)
indexed_tokens = token_indexer.tokens_to_indices(tokens, self.vocab)
padding_lengths = token_indexer.get_padding_lengths(indexed_tokens)
padded_tokens = token_indexer.as_padded_tensor_dict(indexed_tokens, padding_lengths)
inputs[indexer_name] = {
"token_characters": torch.LongTensor(
padded_tokens["token_characters"]
).unsqueeze(0)
}
elif isinstance(token_indexer, ELMoTokenCharactersIndexer):
elmo_tokens = []
for token in all_tokens:
elmo_indexed_token = token_indexer.tokens_to_indices(
[Token(text=token)], self.vocab
)["elmo_tokens"]
elmo_tokens.append(elmo_indexed_token[0])
inputs[indexer_name] = {"elmo_tokens": torch.LongTensor(elmo_tokens).unsqueeze(0)}
else:
raise RuntimeError("Unsupported token indexer:", token_indexer)
return util.move_to_device(inputs, self.cuda_device)
def attack_from_json(
self,
inputs: JsonDict,
input_field_to_attack: str = "tokens",
grad_input_field: str = "grad_input_1",
ignore_tokens: List[str] = None,
target: JsonDict = None,
) -> JsonDict:
"""
Replaces one token at a time from the input until the model's prediction changes.
`input_field_to_attack` is for example `tokens`, it says what the input field is
called. `grad_input_field` is for example `grad_input_1`, which is a key into a grads
dictionary.
The method computes the gradient w.r.t. the tokens, finds the token with the maximum
gradient (by L2 norm), and replaces it with another token based on the first-order Taylor
approximation of the loss. This process is iteratively repeated until the prediction
changes. Once a token is replaced, it is not flipped again.
# Parameters
inputs : `JsonDict`
The model inputs, the same as what is passed to a `Predictor`.
input_field_to_attack : `str`, optional (default=`'tokens'`)
The field that has the tokens that we're going to be flipping. This must be a
`TextField`.
grad_input_field : `str`, optional (default=`'grad_input_1'`)
If there is more than one field that gets embedded in your model (e.g., a question and
a passage, or a premise and a hypothesis), this tells us the key to use to get the
correct gradients. This selects from the output of :func:`Predictor.get_gradients`.
ignore_tokens : `List[str]`, optional (default=`DEFAULT_IGNORE_TOKENS`)
These tokens will not be flipped. The default list includes some simple punctuation,
OOV and padding tokens, and common control tokens for BERT, etc.
target : `JsonDict`, optional (default=`None`)
If given, this will be a `targeted` hotflip attack, where instead of just trying to
change a model's prediction from what it current is predicting, we try to change it to
a `specific` target value. This is a `JsonDict` because it needs to specify the
field name and target value. For example, for a masked LM, this would be something
like `{"words": ["she"]}`, because `"words"` is the field name, there is one mask
token (hence the list of length one), and we want to change the prediction from
whatever it was to `"she"`.
"""
instance = self.predictor._json_to_instance(inputs)
if target is None:
output_dict = self.predictor._model.forward_on_instance(instance)
else:
output_dict = target
# This now holds the predictions that we want to change (either away from or towards,
# depending on whether `target` was passed). We'll use this in the loop below to check for
# when we've met our stopping criterion.
original_instances = self.predictor.predictions_to_labeled_instances(instance, output_dict)
# This is just for ease of access in the UI, so we know the original tokens. It's not used
# in the logic below.
original_text_field: TextField = original_instances[0][ # type: ignore
input_field_to_attack
]
original_tokens = deepcopy(original_text_field.tokens)
final_tokens = []
final_outputs = []
# `original_instances` is a list because there might be several different predictions that
# we're trying to attack (e.g., all of the NER tags for an input sentence). We attack them
# one at a time.
for instance in original_instances:
tokens, outputs = self.attack_instance(
instance=instance,
inputs=inputs,
input_field_to_attack=input_field_to_attack,
grad_input_field=grad_input_field,
ignore_tokens=ignore_tokens,
target=target,
)
final_tokens.append(tokens)
final_outputs.append(outputs)
return sanitize(
{"final": final_tokens, "original": original_tokens, "outputs": final_outputs}
)
def attack_instance(
self,
instance: Instance,
inputs: JsonDict,
input_field_to_attack: str = "tokens",
grad_input_field: str = "grad_input_1",
ignore_tokens: List[str] = None,
target: JsonDict = None,
) -> Tuple[List[Token], JsonDict]:
if self.embedding_matrix is None:
self.initialize()
ignore_tokens = DEFAULT_IGNORE_TOKENS if ignore_tokens is None else ignore_tokens
# If `target` is `None`, we move away from the current prediction, otherwise we move
# _towards_ the target.
sign = -1 if target is None else 1
# Gets a list of the fields that we want to check to see if they change.
fields_to_compare = utils.get_fields_to_compare(inputs, instance, input_field_to_attack)
# We'll be modifying the tokens in this text field below, and grabbing the modified
# list after the `while` loop.
text_field: TextField = instance[input_field_to_attack] # type: ignore
# Because we can save computation by getting grads and outputs at the same time, we do
# them together at the end of the loop, even though we use grads at the beginning and
# outputs at the end. This is our initial gradient for the beginning of the loop. The
# output can be ignored here.
grads, outputs = self.predictor.get_gradients([instance])
# Ignore any token that is in the ignore_tokens list by setting the token to already
# flipped.
flipped: List[int] = []
for index, token in enumerate(text_field.tokens):
if token.text in ignore_tokens:
flipped.append(index)
if "clusters" in outputs:
# Coref unfortunately needs a special case here. We don't want to flip words in
# the same predicted coref cluster, but we can't really specify a list of tokens,
# because, e.g., "he" could show up in several different clusters.
# TODO(mattg): perhaps there's a way to get `predictions_to_labeled_instances` to
# return the set of tokens that shouldn't be changed for each instance? E.g., you
# could imagine setting a field on the `Token` object, that we could then read
# here...
for cluster in outputs["clusters"]:
for mention in cluster:
for index in range(mention[0], mention[1] + 1):
flipped.append(index)
while True:
# Compute L2 norm of all grads.
grad = grads[grad_input_field][0]
grads_magnitude = [g.dot(g) for g in grad]
# only flip a token once
for index in flipped:
grads_magnitude[index] = -1
# We flip the token with highest gradient norm.
index_of_token_to_flip = numpy.argmax(grads_magnitude)
if grads_magnitude[index_of_token_to_flip] == -1:
# If we've already flipped all of the tokens, we give up.
break
flipped.append(index_of_token_to_flip)
text_field_tensors = text_field.as_tensor(text_field.get_padding_lengths())
input_tokens = util.get_token_ids_from_text_field_tensors(text_field_tensors)
original_id_of_token_to_flip = input_tokens[index_of_token_to_flip]
# Get new token using taylor approximation.
new_id = self._first_order_taylor(
grad[index_of_token_to_flip], original_id_of_token_to_flip, sign
)
# Flip token. We need to tell the instance to re-index itself, so the text field
# will actually update.
new_token = Token(self.vocab._index_to_token[self.namespace][new_id]) # type: ignore
text_field.tokens[index_of_token_to_flip] = new_token
instance.indexed = False
# Get model predictions on instance, and then label the instances
grads, outputs = self.predictor.get_gradients([instance]) # predictions
for key, output in outputs.items():
if isinstance(output, torch.Tensor):
outputs[key] = output.detach().cpu().numpy().squeeze()
elif isinstance(output, list):
outputs[key] = output[0]
# TODO(mattg): taking the first result here seems brittle, if we're in a case where
# there are multiple predictions.
labeled_instance = self.predictor.predictions_to_labeled_instances(instance, outputs)[0]
# If we've met our stopping criterion, we stop.
has_changed = utils.instance_has_changed(labeled_instance, fields_to_compare)
if target is None and has_changed:
# With no target, we just want to change the prediction.
break
if target is not None and not has_changed:
# With a given target, we want to *match* the target, which we check by
# `not has_changed`.
break
return text_field.tokens, outputs
def _first_order_taylor(self, grad: numpy.ndarray, token_idx: torch.Tensor, sign: int) -> int:
"""
The below code is based on
https://github.com/pmichel31415/translate/blob/paul/pytorch_translate/
research/adversarial/adversaries/brute_force_adversary.py
Replaces the current token_idx with another token_idx to increase the loss. In particular, this
function uses the grad, alongside the embedding_matrix to select the token that maximizes the
first-order taylor approximation of the loss.
"""
grad = util.move_to_device(torch.from_numpy(grad), self.cuda_device)
if token_idx.size() != ():
# We've got an encoder that only has character ids as input. We don't curently handle
# this case, and it's not clear it's worth it to implement it. We'll at least give a
# nicer error than some pytorch dimension mismatch.
raise NotImplementedError(
"You are using a character-level indexer with no other indexers. This case is not "
"currently supported for hotflip. If you would really like to see us support "
"this, please open an issue on github."
)
if token_idx >= self.embedding_matrix.size(0):
# This happens when we've truncated our fake embedding matrix. We need to do a dot
# product with the word vector of the current token; if that token is out of
# vocabulary for our truncated matrix, we need to run it through the embedding layer.
inputs = self._make_embedder_input([self.vocab.get_token_from_index(token_idx.item())])
word_embedding = self.embedding_layer(inputs)[0]
else:
word_embedding = torch.nn.functional.embedding(
util.move_to_device(torch.LongTensor([token_idx]), self.cuda_device),
self.embedding_matrix,
)
word_embedding = word_embedding.detach().unsqueeze(0)
grad = grad.unsqueeze(0).unsqueeze(0)
# solves equation (3) here https://arxiv.org/abs/1903.06620
new_embed_dot_grad = torch.einsum("bij,kj->bik", (grad, self.embedding_matrix))
prev_embed_dot_grad = torch.einsum("bij,bij->bi", (grad, word_embedding)).unsqueeze(-1)
neg_dir_dot_grad = sign * (prev_embed_dot_grad - new_embed_dot_grad)
neg_dir_dot_grad = neg_dir_dot_grad.detach().cpu().numpy()
# Do not replace with non-alphanumeric tokens
neg_dir_dot_grad[:, :, self.invalid_replacement_indices] = -numpy.inf
best_at_each_step = neg_dir_dot_grad.argmax(2)
return best_at_each_step[0].data[0]
| allennlp-master | allennlp/interpret/attackers/hotflip.py |
from copy import deepcopy
from typing import List, Tuple
import heapq
import numpy as np
import torch
from allennlp.common.util import JsonDict, sanitize
from allennlp.data import Instance
from allennlp.data.fields import TextField, SequenceLabelField
from allennlp.interpret.attackers import utils
from allennlp.interpret.attackers.attacker import Attacker
from allennlp.predictors import Predictor
@Attacker.register("input-reduction")
class InputReduction(Attacker):
"""
Runs the input reduction method from [Pathologies of Neural Models Make Interpretations
Difficult](https://arxiv.org/abs/1804.07781), which removes as many words as possible from
the input without changing the model's prediction.
The functions on this class handle a special case for NER by looking for a field called "tags"
This check is brittle, i.e., the code could break if the name of this field has changed, or if
a non-NER model has a field called "tags".
Registered as an `Attacker` with name "input-reduction".
"""
def __init__(self, predictor: Predictor, beam_size: int = 3) -> None:
super().__init__(predictor)
self.beam_size = beam_size
def attack_from_json(
self,
inputs: JsonDict,
input_field_to_attack: str = "tokens",
grad_input_field: str = "grad_input_1",
ignore_tokens: List[str] = None,
target: JsonDict = None,
):
if target is not None:
raise ValueError("Input reduction does not implement targeted attacks")
ignore_tokens = ["@@NULL@@"] if ignore_tokens is None else ignore_tokens
original_instances = self.predictor.json_to_labeled_instances(inputs)
original_text_field: TextField = original_instances[0][ # type: ignore
input_field_to_attack
]
original_tokens = deepcopy(original_text_field.tokens)
final_tokens = []
for instance in original_instances:
final_tokens.append(
self._attack_instance(
inputs, instance, input_field_to_attack, grad_input_field, ignore_tokens
)
)
return sanitize({"final": final_tokens, "original": original_tokens})
def _attack_instance(
self,
inputs: JsonDict,
instance: Instance,
input_field_to_attack: str,
grad_input_field: str,
ignore_tokens: List[str],
):
# Save fields that must be checked for equality
fields_to_compare = utils.get_fields_to_compare(inputs, instance, input_field_to_attack)
# Set num_ignore_tokens, which tells input reduction when to stop
# We keep at least one token for input reduction on classification/entailment/etc.
if "tags" not in instance:
num_ignore_tokens = 1
tag_mask = None
# Set num_ignore_tokens for NER and build token mask
else:
num_ignore_tokens, tag_mask, original_tags = _get_ner_tags_and_mask(
instance, input_field_to_attack, ignore_tokens
)
text_field: TextField = instance[input_field_to_attack] # type: ignore
current_tokens = deepcopy(text_field.tokens)
candidates = [(instance, -1, tag_mask)]
# keep removing tokens until prediction is about to change
while len(current_tokens) > num_ignore_tokens and candidates:
# sort current candidates by smallest length (we want to remove as many tokens as possible)
def get_length(input_instance: Instance):
input_text_field: TextField = input_instance[input_field_to_attack] # type: ignore
return len(input_text_field.tokens)
candidates = heapq.nsmallest(self.beam_size, candidates, key=lambda x: get_length(x[0]))
beam_candidates = deepcopy(candidates)
candidates = []
for beam_instance, smallest_idx, tag_mask in beam_candidates:
# get gradients and predictions
beam_tag_mask = deepcopy(tag_mask)
grads, outputs = self.predictor.get_gradients([beam_instance])
for output in outputs:
if isinstance(outputs[output], torch.Tensor):
outputs[output] = outputs[output].detach().cpu().numpy().squeeze().squeeze()
elif isinstance(outputs[output], list):
outputs[output] = outputs[output][0]
# Check if any fields have changed, if so, next beam
if "tags" not in instance:
# relabel beam_instance since last iteration removed an input token
beam_instance = self.predictor.predictions_to_labeled_instances(
beam_instance, outputs
)[0]
if utils.instance_has_changed(beam_instance, fields_to_compare):
continue
# special case for sentence tagging (we have tested NER)
else:
# remove the mask where you remove the input token from.
if smallest_idx != -1: # Don't delete on the very first iteration
del beam_tag_mask[smallest_idx] # type: ignore
cur_tags = [
outputs["tags"][x] for x in range(len(outputs["tags"])) if beam_tag_mask[x] # type: ignore
]
if cur_tags != original_tags:
continue
# remove a token from the input
text_field: TextField = beam_instance[input_field_to_attack] # type: ignore
current_tokens = deepcopy(text_field.tokens)
reduced_instances_and_smallest = _remove_one_token(
beam_instance,
input_field_to_attack,
grads[grad_input_field][0],
ignore_tokens,
self.beam_size,
beam_tag_mask, # type: ignore
)
candidates.extend(reduced_instances_and_smallest)
return current_tokens
def _remove_one_token(
instance: Instance,
input_field_to_attack: str,
grads: np.ndarray,
ignore_tokens: List[str],
beam_size: int,
tag_mask: List[int],
) -> List[Tuple[Instance, int, List[int]]]:
"""
Finds the token with the smallest gradient and removes it.
"""
# Compute L2 norm of all grads.
grads_mag = [np.sqrt(grad.dot(grad)) for grad in grads]
# Skip all ignore_tokens by setting grad to infinity
text_field: TextField = instance[input_field_to_attack] # type: ignore
for token_idx, token in enumerate(text_field.tokens):
if token in ignore_tokens:
grads_mag[token_idx] = float("inf")
# For NER, skip all tokens that are not in outside
if "tags" in instance:
tag_field: SequenceLabelField = instance["tags"] # type: ignore
labels: List[str] = tag_field.labels # type: ignore
for idx, label in enumerate(labels):
if label != "O":
grads_mag[idx] = float("inf")
reduced_instances_and_smallest: List[Tuple[Instance, int, List[int]]] = []
for _ in range(beam_size):
# copy instance and edit later
copied_instance = deepcopy(instance)
copied_text_field: TextField = copied_instance[input_field_to_attack] # type: ignore
# find smallest
smallest = np.argmin(grads_mag)
if grads_mag[smallest] == float("inf"): # if all are ignored tokens, return.
break
grads_mag[smallest] = float("inf") # so the other beams don't use this token
# remove smallest
inputs_before_smallest = copied_text_field.tokens[0:smallest]
inputs_after_smallest = copied_text_field.tokens[smallest + 1 :]
copied_text_field.tokens = inputs_before_smallest + inputs_after_smallest
if "tags" in instance:
tag_field: SequenceLabelField = copied_instance["tags"] # type: ignore
tag_field_before_smallest = tag_field.labels[0:smallest]
tag_field_after_smallest = tag_field.labels[smallest + 1 :]
tag_field.labels = tag_field_before_smallest + tag_field_after_smallest # type: ignore
tag_field.sequence_field = copied_text_field
copied_instance.indexed = False
reduced_instances_and_smallest.append((copied_instance, smallest, tag_mask))
return reduced_instances_and_smallest
def _get_ner_tags_and_mask(
instance: Instance, input_field_to_attack: str, ignore_tokens: List[str]
):
"""
Used for the NER task. Sets the num_ignore tokens, saves the original predicted tag and a 0/1
mask in the position of the tags
"""
# Set num_ignore_tokens
num_ignore_tokens = 0
input_field: TextField = instance[input_field_to_attack] # type: ignore
for token in input_field.tokens:
if str(token) in ignore_tokens:
num_ignore_tokens += 1
# save the original tags and a 0/1 mask where the tags are
tag_mask = []
original_tags = []
tag_field: SequenceLabelField = instance["tags"] # type: ignore
for label in tag_field.labels:
if label != "O":
tag_mask.append(1)
original_tags.append(label)
num_ignore_tokens += 1
else:
tag_mask.append(0)
return num_ignore_tokens, tag_mask, original_tags
| allennlp-master | allennlp/interpret/attackers/input_reduction.py |