python_code
stringlengths 0
290k
| repo_name
stringclasses 30
values | file_path
stringlengths 6
125
|
---|---|---|
from typing import List
from allennlp.common import Registrable
from allennlp.common.util import JsonDict
from allennlp.predictors import Predictor
class Attacker(Registrable):
"""
An `Attacker` will modify an input (e.g., add or delete tokens) to try to change an AllenNLP
Predictor's output in a desired manner (e.g., make it incorrect).
"""
def __init__(self, predictor: Predictor) -> None:
self.predictor = predictor
def initialize(self):
"""
Initializes any components of the Attacker that are expensive to compute, so that they are
not created on __init__(). Default implementation is `pass`.
"""
pass
def attack_from_json(
self,
inputs: JsonDict,
input_field_to_attack: str,
grad_input_field: str,
ignore_tokens: List[str],
target: JsonDict,
) -> JsonDict:
"""
This function finds a modification to the input text that would change the model's
prediction in some desired manner (e.g., an adversarial attack).
# Parameters
inputs : `JsonDict`
The input you want to attack (the same as the argument to a Predictor, e.g.,
predict_json()).
input_field_to_attack : `str`
The key in the inputs JsonDict you want to attack, e.g., `tokens`.
grad_input_field : `str`
The field in the gradients dictionary that contains the input gradients. For example,
`grad_input_1` will be the field for single input tasks. See get_gradients() in
`Predictor` for more information on field names.
target : `JsonDict`
If given, this is a `targeted` attack, trying to change the prediction to a particular
value, instead of just changing it from its original prediction. Subclasses are not
required to accept this argument, as not all attacks make sense as targeted attacks.
Perhaps that means we should make the API more crisp, but adding another class is not
worth it.
# Returns
reduced_input : `JsonDict`
Contains the final, sanitized input after adversarial modification.
"""
raise NotImplementedError()
| allennlp-master | allennlp/interpret/attackers/attacker.py |
"""
Subcommand for building a vocabulary from a training config.
"""
import argparse
import json
import logging
import os
import tarfile
import tempfile
from overrides import overrides
from allennlp.commands.subcommand import Subcommand
from allennlp.common.file_utils import CacheFile
from allennlp.common.params import Params
from allennlp.training.util import make_vocab_from_params
logger = logging.getLogger(__name__)
@Subcommand.register("build-vocab")
class BuildVocab(Subcommand):
@overrides
def add_subparser(self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
description = """Build a vocabulary from an experiment config file."""
subparser = parser.add_parser(self.name, description=description, help=description)
subparser.add_argument("param_path", type=str, help="path to an experiment config file")
subparser.add_argument(
"output_path", type=str, help="path to save the vocab tar.gz file to"
)
subparser.add_argument(
"-f",
"--force",
action="store_true",
help="force write if the output_path already exists",
)
subparser.add_argument(
"-o",
"--overrides",
type=str,
default="",
help=(
"a json(net) structure used to override the experiment configuration, e.g., "
"'{\"vocabulary.min_count.labels\": 10}'. Nested parameters can be specified either"
" with nested dictionaries or with dot syntax."
),
)
subparser.set_defaults(func=build_vocab_from_args)
return subparser
def build_vocab_from_args(args: argparse.Namespace):
if not args.output_path.endswith(".tar.gz"):
raise ValueError("param 'output_path' should end with '.tar.gz'")
if os.path.exists(args.output_path) and not args.force:
raise RuntimeError(f"{args.output_path} already exists. Use --force to overwrite.")
output_directory = os.path.dirname(args.output_path)
os.makedirs(output_directory, exist_ok=True)
params = Params.from_file(args.param_path)
with tempfile.TemporaryDirectory() as temp_dir:
# Serializes the vocab to 'tempdir/vocabulary'.
make_vocab_from_params(params, temp_dir)
# The CacheFile context manager gives us a temporary file to write to.
# On a successful exit from the context, it will rename the temp file to
# the target `output_path`.
with CacheFile(args.output_path, suffix=".tar.gz") as temp_archive:
logger.info("Archiving vocabulary to %s", args.output_path)
with tarfile.open(temp_archive.name, "w:gz") as archive:
vocab_dir = os.path.join(temp_dir, "vocabulary")
for fname in os.listdir(vocab_dir):
if fname.endswith(".lock"):
continue
archive.add(os.path.join(vocab_dir, fname), arcname=fname)
print(f"Success! Vocab saved to {args.output_path}")
print('You can now set the "vocabulary" entry of your training config to:')
print(json.dumps({"type": "from_files", "directory": os.path.abspath(args.output_path)}))
| allennlp-master | allennlp/commands/build_vocab.py |
"""
The `predict` subcommand allows you to make bulk JSON-to-JSON
or dataset to JSON predictions using a trained model and its
[`Predictor`](../predictors/predictor.md#predictor) wrapper.
"""
from typing import List, Iterator, Optional
import argparse
import sys
import json
from overrides import overrides
from allennlp.commands.subcommand import Subcommand
from allennlp.common import logging as common_logging
from allennlp.common.checks import check_for_gpu, ConfigurationError
from allennlp.common.file_utils import cached_path
from allennlp.common.util import lazy_groups_of
from allennlp.models.archival import load_archive
from allennlp.predictors.predictor import Predictor, JsonDict
from allennlp.data import Instance
@Subcommand.register("predict")
class Predict(Subcommand):
@overrides
def add_subparser(self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
description = """Run the specified model against a JSON-lines input file."""
subparser = parser.add_parser(
self.name, description=description, help="Use a trained model to make predictions."
)
subparser.add_argument(
"archive_file", type=str, help="the archived model to make predictions with"
)
subparser.add_argument("input_file", type=str, help="path to or url of the input file")
subparser.add_argument("--output-file", type=str, help="path to output file")
subparser.add_argument(
"--weights-file", type=str, help="a path that overrides which weights file to use"
)
batch_size = subparser.add_mutually_exclusive_group(required=False)
batch_size.add_argument(
"--batch-size", type=int, default=1, help="The batch size to use for processing"
)
subparser.add_argument(
"--silent", action="store_true", help="do not print output to stdout"
)
cuda_device = subparser.add_mutually_exclusive_group(required=False)
cuda_device.add_argument(
"--cuda-device", type=int, default=-1, help="id of GPU to use (if any)"
)
subparser.add_argument(
"--use-dataset-reader",
action="store_true",
help="Whether to use the dataset reader of the original model to load Instances. "
"The validation dataset reader will be used if it exists, otherwise it will "
"fall back to the train dataset reader. This behavior can be overridden "
"with the --dataset-reader-choice flag.",
)
subparser.add_argument(
"--dataset-reader-choice",
type=str,
choices=["train", "validation"],
default="validation",
help="Indicates which model dataset reader to use if the --use-dataset-reader "
"flag is set.",
)
subparser.add_argument(
"-o",
"--overrides",
type=str,
default="",
help=(
"a json(net) structure used to override the experiment configuration, e.g., "
"'{\"iterator.batch_size\": 16}'. Nested parameters can be specified either"
" with nested dictionaries or with dot syntax."
),
)
subparser.add_argument(
"--predictor", type=str, help="optionally specify a specific predictor to use"
)
subparser.add_argument(
"--file-friendly-logging",
action="store_true",
default=False,
help="outputs tqdm status on separate lines and slows tqdm refresh rate",
)
subparser.set_defaults(func=_predict)
return subparser
def _get_predictor(args: argparse.Namespace) -> Predictor:
check_for_gpu(args.cuda_device)
archive = load_archive(
args.archive_file,
weights_file=args.weights_file,
cuda_device=args.cuda_device,
overrides=args.overrides,
)
return Predictor.from_archive(
archive, args.predictor, dataset_reader_to_load=args.dataset_reader_choice
)
class _PredictManager:
def __init__(
self,
predictor: Predictor,
input_file: str,
output_file: Optional[str],
batch_size: int,
print_to_console: bool,
has_dataset_reader: bool,
) -> None:
self._predictor = predictor
self._input_file = input_file
self._output_file = None if output_file is None else open(output_file, "w")
self._batch_size = batch_size
self._print_to_console = print_to_console
self._dataset_reader = None if not has_dataset_reader else predictor._dataset_reader
def _predict_json(self, batch_data: List[JsonDict]) -> Iterator[str]:
if len(batch_data) == 1:
results = [self._predictor.predict_json(batch_data[0])]
else:
results = self._predictor.predict_batch_json(batch_data)
for output in results:
yield self._predictor.dump_line(output)
def _predict_instances(self, batch_data: List[Instance]) -> Iterator[str]:
if len(batch_data) == 1:
results = [self._predictor.predict_instance(batch_data[0])]
else:
results = self._predictor.predict_batch_instance(batch_data)
for output in results:
yield self._predictor.dump_line(output)
def _maybe_print_to_console_and_file(
self, index: int, prediction: str, model_input: str = None
) -> None:
if self._print_to_console:
if model_input is not None:
print(f"input {index}: ", model_input)
print("prediction: ", prediction)
if self._output_file is not None:
self._output_file.write(prediction)
def _get_json_data(self) -> Iterator[JsonDict]:
if self._input_file == "-":
for line in sys.stdin:
if not line.isspace():
yield self._predictor.load_line(line)
else:
input_file = cached_path(self._input_file)
with open(input_file, "r") as file_input:
for line in file_input:
if not line.isspace():
yield self._predictor.load_line(line)
def _get_instance_data(self) -> Iterator[Instance]:
if self._input_file == "-":
raise ConfigurationError("stdin is not an option when using a DatasetReader.")
elif self._dataset_reader is None:
raise ConfigurationError("To generate instances directly, pass a DatasetReader.")
else:
yield from self._dataset_reader.read(self._input_file)
def run(self) -> None:
has_reader = self._dataset_reader is not None
index = 0
if has_reader:
for batch in lazy_groups_of(self._get_instance_data(), self._batch_size):
for model_input_instance, result in zip(batch, self._predict_instances(batch)):
self._maybe_print_to_console_and_file(index, result, str(model_input_instance))
index = index + 1
else:
for batch_json in lazy_groups_of(self._get_json_data(), self._batch_size):
for model_input_json, result in zip(batch_json, self._predict_json(batch_json)):
self._maybe_print_to_console_and_file(
index, result, json.dumps(model_input_json)
)
index = index + 1
if self._output_file is not None:
self._output_file.close()
def _predict(args: argparse.Namespace) -> None:
common_logging.FILE_FRIENDLY_LOGGING = args.file_friendly_logging
predictor = _get_predictor(args)
if args.silent and not args.output_file:
print("--silent specified without --output-file.")
print("Exiting early because no output will be created.")
sys.exit(0)
manager = _PredictManager(
predictor,
args.input_file,
args.output_file,
args.batch_size,
not args.silent,
args.use_dataset_reader,
)
manager.run()
| allennlp-master | allennlp/commands/predict.py |
import argparse
import logging
import sys
from typing import Any, Optional, Tuple, Set
from overrides import overrides
from allennlp import __version__
from allennlp.commands.build_vocab import BuildVocab
from allennlp.commands.cached_path import CachedPath
from allennlp.commands.evaluate import Evaluate
from allennlp.commands.find_learning_rate import FindLearningRate
from allennlp.commands.predict import Predict
from allennlp.commands.print_results import PrintResults
from allennlp.commands.subcommand import Subcommand
from allennlp.commands.test_install import TestInstall
from allennlp.commands.train import Train
from allennlp.common.plugins import import_plugins
from allennlp.common.util import import_module_and_submodules
logger = logging.getLogger(__name__)
class ArgumentParserWithDefaults(argparse.ArgumentParser):
"""
Custom argument parser that will display the default value for an argument
in the help message.
"""
_action_defaults_to_ignore = {"help", "store_true", "store_false", "store_const"}
@staticmethod
def _is_empty_default(default: Any) -> bool:
if default is None:
return True
if isinstance(default, (str, list, tuple, set)):
return not bool(default)
return False
@overrides
def add_argument(self, *args, **kwargs):
# Add default value to the help message when the default is meaningful.
default = kwargs.get("default")
if kwargs.get(
"action"
) not in self._action_defaults_to_ignore and not self._is_empty_default(default):
description = kwargs.get("help", "")
kwargs["help"] = f"{description} (default = {default})"
super().add_argument(*args, **kwargs)
def parse_args(prog: Optional[str] = None) -> Tuple[argparse.ArgumentParser, argparse.Namespace]:
"""
Creates the argument parser for the main program and uses it to parse the args.
"""
parser = ArgumentParserWithDefaults(description="Run AllenNLP", prog=prog)
parser.add_argument("--version", action="version", version=f"%(prog)s {__version__}")
subparsers = parser.add_subparsers(title="Commands", metavar="")
subcommands: Set[str] = set()
def add_subcommands():
for subcommand_name in sorted(Subcommand.list_available()):
if subcommand_name in subcommands:
continue
subcommands.add(subcommand_name)
subcommand_class = Subcommand.by_name(subcommand_name)
subcommand = subcommand_class()
subparser = subcommand.add_subparser(subparsers)
if subcommand_class.requires_plugins:
subparser.add_argument(
"--include-package",
type=str,
action="append",
default=[],
help="additional packages to include",
)
# Add all default registered subcommands first.
add_subcommands()
# If we need to print the usage/help, or the subcommand is unknown,
# we'll call `import_plugins()` to register any plugin subcommands first.
argv = sys.argv[1:]
plugins_imported: bool = False
if not argv or argv == ["--help"] or argv[0] not in subcommands:
import_plugins()
plugins_imported = True
# Add subcommands again in case one of the plugins has a registered subcommand.
add_subcommands()
# Now we can parse the arguments.
args = parser.parse_args()
if not plugins_imported and Subcommand.by_name(argv[0]).requires_plugins: # type: ignore
import_plugins()
return parser, args
def main(prog: Optional[str] = None) -> None:
"""
The [`run`](./train.md#run) command only knows about the registered classes in the ``allennlp``
codebase. In particular, once you start creating your own `Model` s and so forth, it won't
work for them, unless you use the ``--include-package`` flag or you make your code available
as a plugin (see [`plugins`](./plugins.md)).
"""
parser, args = parse_args(prog)
# If a subparser is triggered, it adds its work as `args.func`.
# So if no such attribute has been added, no subparser was triggered,
# so give the user some help.
if "func" in dir(args):
# Import any additional modules needed (to register custom classes).
for package_name in getattr(args, "include_package", []):
import_module_and_submodules(package_name)
args.func(args)
else:
parser.print_help()
| allennlp-master | allennlp/commands/__init__.py |
"""
CLI to the the caching mechanism in `common.file_utils`.
"""
import argparse
import logging
from overrides import overrides
from allennlp.commands.subcommand import Subcommand
from allennlp.common.file_utils import (
cached_path,
CACHE_DIRECTORY,
inspect_cache,
remove_cache_entries,
)
logger = logging.getLogger(__name__)
@Subcommand.register("cached-path")
class CachedPath(Subcommand):
requires_plugins: bool = False
@overrides
def add_subparser(self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
description = """Cache remote files to the AllenNLP cache."""
subparser = parser.add_parser(
self.name,
description=description,
help=description,
)
subparser.set_defaults(func=_cached_path)
subparser.add_argument(
"resources",
type=str,
help="""The URLs or paths to the resources.
If using the --inspect or --remove flag, this can also contain glob patterns.""",
nargs="*",
)
subparser.add_argument(
"-d",
"--cache-dir",
type=str,
help="""Use a custom cache directory.""",
default=CACHE_DIRECTORY,
)
subparser.add_argument(
"-x",
"--extract-archive",
action="store_true",
help="""Automatically extract zip or tar.gz archive files.""",
)
subparser.add_argument(
"-f",
"--force-extract",
action="store_true",
help="""Extract archives regardless of whether or not they already exist.""",
)
subparser.add_argument(
"--inspect",
action="store_true",
help="""Print some useful information about the cache.""",
)
subparser.add_argument(
"--remove",
action="store_true",
help="""Remove any cache entries matching the given resource patterns.""",
)
return subparser
def _cached_path(args: argparse.Namespace):
logger.info("Cache directory: %s", args.cache_dir)
if args.inspect:
if args.extract_archive or args.force_extract or args.remove:
raise RuntimeError(
"cached-path cannot accept --extract-archive, --force-extract, or --remove "
"options when --inspect flag is used."
)
inspect_cache(patterns=args.resources, cache_dir=args.cache_dir)
elif args.remove:
from allennlp.common.util import format_size
if args.extract_archive or args.force_extract or args.inspect:
raise RuntimeError(
"cached-path cannot accept --extract-archive, --force-extract, or --inspect "
"options when --remove flag is used."
)
if not args.resources:
raise RuntimeError(
"Missing positional argument(s) 'resources'. 'resources' is required when using "
"the --remove option. If you really want to remove everything, pass '*' for 'resources'."
)
reclaimed_space = remove_cache_entries(args.resources, cache_dir=args.cache_dir)
print(f"Reclaimed {format_size(reclaimed_space)} of space")
else:
for resource in args.resources:
print(
cached_path(
resource,
cache_dir=args.cache_dir,
extract_archive=args.extract_archive,
force_extract=args.force_extract,
)
)
| allennlp-master | allennlp/commands/cached_path.py |
"""
The `print-results` subcommand allows you to print results from multiple
allennlp serialization directories to the console in a helpful csv format.
"""
import argparse
import json
import logging
import os
from overrides import overrides
from allennlp.commands.subcommand import Subcommand
logger = logging.getLogger(__name__)
@Subcommand.register("print-results")
class PrintResults(Subcommand):
requires_plugins: bool = False
@overrides
def add_subparser(self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
description = """Print results from allennlp training runs in a helpful CSV format."""
subparser = parser.add_parser(
self.name,
description=description,
help="Print results from allennlp serialization directories to the console.",
)
subparser.add_argument(
"path",
type=str,
help="Path to recursively search for allennlp serialization directories.",
)
subparser.add_argument(
"-k",
"--keys",
type=str,
nargs="+",
help="Keys to print from metrics.json."
'Keys not present in all metrics.json will result in "N/A"',
default=None,
required=False,
)
subparser.add_argument(
"-m",
"--metrics-filename",
type=str,
help="Name of the metrics file to inspect.",
default="metrics.json",
required=False,
)
subparser.set_defaults(func=print_results_from_args)
return subparser
def print_results_from_args(args: argparse.Namespace):
"""
Prints results from an `argparse.Namespace` object.
"""
path = args.path
metrics_name = args.metrics_filename
keys = args.keys
results_dict = {}
for root, _, files in os.walk(path):
if metrics_name in files:
full_name = os.path.join(root, metrics_name)
with open(full_name) as file_:
metrics = json.load(file_)
results_dict[full_name] = metrics
sorted_keys = sorted(list(results_dict.keys()))
print(f"model_run, {', '.join(keys)}")
for name in sorted_keys:
results = results_dict[name]
keys_to_print = (str(results.get(key, "N/A")) for key in keys)
print(f"{name}, {', '.join(keys_to_print)}")
| allennlp-master | allennlp/commands/print_results.py |
"""
The `train` subcommand can be used to train a model.
It requires a configuration file and a directory in
which to write the results.
"""
import argparse
import logging
import os
from os import PathLike
from typing import Any, Dict, List, Optional, Union
import warnings
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from overrides import overrides
from allennlp.commands.subcommand import Subcommand
from allennlp.common import Params, Registrable, Lazy
from allennlp.common.checks import check_for_gpu, ConfigurationError
from allennlp.common import logging as common_logging
from allennlp.common import util as common_util
from allennlp.common.plugins import import_plugins
from allennlp.data import DatasetReader, Vocabulary
from allennlp.data import DataLoader
from allennlp.models.archival import archive_model, CONFIG_NAME, verify_include_in_archive
from allennlp.models.model import _DEFAULT_WEIGHTS, Model
from allennlp.training.trainer import Trainer
from allennlp.training import util as training_util
logger = logging.getLogger(__name__)
@Subcommand.register("train")
class Train(Subcommand):
@overrides
def add_subparser(self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
description = """Train the specified model on the specified dataset."""
subparser = parser.add_parser(self.name, description=description, help="Train a model.")
subparser.add_argument(
"param_path", type=str, help="path to parameter file describing the model to be trained"
)
subparser.add_argument(
"-s",
"--serialization-dir",
required=True,
type=str,
help="directory in which to save the model and its logs",
)
subparser.add_argument(
"-r",
"--recover",
action="store_true",
default=False,
help="recover training from the state in serialization_dir",
)
subparser.add_argument(
"-f",
"--force",
action="store_true",
required=False,
help="overwrite the output directory if it exists",
)
subparser.add_argument(
"-o",
"--overrides",
type=str,
default="",
help=(
"a json(net) structure used to override the experiment configuration, e.g., "
"'{\"iterator.batch_size\": 16}'. Nested parameters can be specified either"
" with nested dictionaries or with dot syntax."
),
)
subparser.add_argument(
"--node-rank", type=int, default=0, help="rank of this node in the distributed setup"
)
subparser.add_argument(
"--dry-run",
action="store_true",
help=(
"do not train a model, but create a vocabulary, show dataset statistics and "
"other training information"
),
)
subparser.add_argument(
"--file-friendly-logging",
action="store_true",
default=False,
help="outputs tqdm status on separate lines and slows tqdm refresh rate",
)
subparser.set_defaults(func=train_model_from_args)
return subparser
def train_model_from_args(args: argparse.Namespace):
"""
Just converts from an `argparse.Namespace` object to string paths.
"""
train_model_from_file(
parameter_filename=args.param_path,
serialization_dir=args.serialization_dir,
overrides=args.overrides,
recover=args.recover,
force=args.force,
node_rank=args.node_rank,
include_package=args.include_package,
dry_run=args.dry_run,
file_friendly_logging=args.file_friendly_logging,
)
def train_model_from_file(
parameter_filename: Union[str, PathLike],
serialization_dir: Union[str, PathLike],
overrides: Union[str, Dict[str, Any]] = "",
recover: bool = False,
force: bool = False,
node_rank: int = 0,
include_package: List[str] = None,
dry_run: bool = False,
file_friendly_logging: bool = False,
) -> Optional[Model]:
"""
A wrapper around [`train_model`](#train_model) which loads the params from a file.
# Parameters
parameter_filename : `str`
A json parameter file specifying an AllenNLP experiment.
serialization_dir : `str`
The directory in which to save results and logs. We just pass this along to
[`train_model`](#train_model).
overrides : `Union[str, Dict[str, Any]]`, optional (default = `""`)
A JSON string or a dict that we will use to override values in the input parameter file.
recover : `bool`, optional (default=`False`)
If `True`, we will try to recover a training run from an existing serialization
directory. This is only intended for use when something actually crashed during the middle
of a run. For continuing training a model on new data, see `Model.from_archive`.
force : `bool`, optional (default=`False`)
If `True`, we will overwrite the serialization directory if it already exists.
node_rank : `int`, optional
Rank of the current node in distributed training
include_package : `str`, optional
In distributed mode, extra packages mentioned will be imported in trainer workers.
dry_run : `bool`, optional (default=`False`)
Do not train a model, but create a vocabulary, show dataset statistics and other training
information.
file_friendly_logging : `bool`, optional (default=`False`)
If `True`, we add newlines to tqdm output, even on an interactive terminal, and we slow
down tqdm's output to only once every 10 seconds.
# Returns
best_model : `Optional[Model]`
The model with the best epoch weights or `None` if in dry run.
"""
# Load the experiment config from a file and pass it to `train_model`.
params = Params.from_file(parameter_filename, overrides)
return train_model(
params=params,
serialization_dir=serialization_dir,
recover=recover,
force=force,
node_rank=node_rank,
include_package=include_package,
dry_run=dry_run,
file_friendly_logging=file_friendly_logging,
)
def train_model(
params: Params,
serialization_dir: Union[str, PathLike],
recover: bool = False,
force: bool = False,
node_rank: int = 0,
include_package: List[str] = None,
dry_run: bool = False,
file_friendly_logging: bool = False,
) -> Optional[Model]:
"""
Trains the model specified in the given [`Params`](../common/params.md#params) object, using the data
and training parameters also specified in that object, and saves the results in `serialization_dir`.
# Parameters
params : `Params`
A parameter object specifying an AllenNLP Experiment.
serialization_dir : `str`
The directory in which to save results and logs.
recover : `bool`, optional (default=`False`)
If `True`, we will try to recover a training run from an existing serialization
directory. This is only intended for use when something actually crashed during the middle
of a run. For continuing training a model on new data, see `Model.from_archive`.
force : `bool`, optional (default=`False`)
If `True`, we will overwrite the serialization directory if it already exists.
node_rank : `int`, optional
Rank of the current node in distributed training
include_package : `List[str]`, optional
In distributed mode, extra packages mentioned will be imported in trainer workers.
dry_run : `bool`, optional (default=`False`)
Do not train a model, but create a vocabulary, show dataset statistics and other training
information.
file_friendly_logging : `bool`, optional (default=`False`)
If `True`, we add newlines to tqdm output, even on an interactive terminal, and we slow
down tqdm's output to only once every 10 seconds.
# Returns
best_model : `Optional[Model]`
The model with the best epoch weights or `None` if in dry run.
"""
common_logging.FILE_FRIENDLY_LOGGING = file_friendly_logging
training_util.create_serialization_dir(params, serialization_dir, recover, force)
params.to_file(os.path.join(serialization_dir, CONFIG_NAME))
include_in_archive = params.pop("include_in_archive", None)
verify_include_in_archive(include_in_archive)
distributed_params = params.params.pop("distributed", None)
# If distributed isn't in the config and the config contains strictly
# one cuda device, we just run a single training process.
if distributed_params is None:
model = _train_worker(
process_rank=0,
params=params,
serialization_dir=serialization_dir,
include_package=include_package,
dry_run=dry_run,
file_friendly_logging=file_friendly_logging,
)
if not dry_run:
archive_model(serialization_dir, include_in_archive=include_in_archive)
return model
# Otherwise, we are running multiple processes for training.
else:
common_logging.prepare_global_logging(
serialization_dir,
rank=0,
world_size=1,
)
# We are careful here so that we can raise a good error if someone
# passed the wrong thing - cuda_devices are required.
device_ids = distributed_params.pop("cuda_devices", None)
multi_device = isinstance(device_ids, list) and len(device_ids) > 1
num_nodes = distributed_params.pop("num_nodes", 1)
if not (multi_device or num_nodes > 1):
raise ConfigurationError(
"Multiple cuda devices/nodes need to be configured to run distributed training."
)
check_for_gpu(device_ids)
master_addr = distributed_params.pop("master_address", "127.0.0.1")
if master_addr in ("127.0.0.1", "0.0.0.0", "localhost"):
# If running locally, we can automatically find an open port if one is not specified.
master_port = (
distributed_params.pop("master_port", None) or common_util.find_open_port()
)
else:
# Otherwise we require that the port be specified.
master_port = distributed_params.pop("master_port")
num_procs = len(device_ids)
world_size = num_nodes * num_procs
# Creating `Vocabulary` objects from workers could be problematic since
# the data loaders in each worker will yield only `rank` specific
# instances. Hence it is safe to construct the vocabulary and write it
# to disk before initializing the distributed context. The workers will
# load the vocabulary from the path specified.
vocab_dir = os.path.join(serialization_dir, "vocabulary")
if recover:
vocab = Vocabulary.from_files(vocab_dir)
else:
vocab = training_util.make_vocab_from_params(
params.duplicate(), serialization_dir, print_statistics=dry_run
)
params["vocabulary"] = {
"type": "from_files",
"directory": vocab_dir,
"padding_token": vocab._padding_token,
"oov_token": vocab._oov_token,
}
logging.info(
"Switching to distributed training mode since multiple GPUs are configured | "
f"Master is at: {master_addr}:{master_port} | Rank of this node: {node_rank} | "
f"Number of workers in this node: {num_procs} | Number of nodes: {num_nodes} | "
f"World size: {world_size}"
)
mp.spawn(
_train_worker,
args=(
params.duplicate(),
serialization_dir,
include_package,
dry_run,
node_rank,
master_addr,
master_port,
world_size,
device_ids,
file_friendly_logging,
include_in_archive,
),
nprocs=num_procs,
)
if dry_run:
return None
else:
archive_model(serialization_dir, include_in_archive=include_in_archive)
model = Model.load(params, serialization_dir)
return model
def _train_worker(
process_rank: int,
params: Params,
serialization_dir: Union[str, PathLike],
include_package: List[str] = None,
dry_run: bool = False,
node_rank: int = 0,
master_addr: str = "127.0.0.1",
master_port: int = 29500,
world_size: int = 1,
distributed_device_ids: List[int] = None,
file_friendly_logging: bool = False,
include_in_archive: List[str] = None,
) -> Optional[Model]:
"""
Helper to train the configured model/experiment. In distributed mode, this is spawned as a
worker process. In a single GPU experiment, this returns the `Model` object and in distributed
training, nothing is returned.
# Parameters
process_rank : `int`
The process index that is initialized using the GPU device id.
params : `Params`
A parameter object specifying an AllenNLP Experiment.
serialization_dir : `str`
The directory in which to save results and logs.
include_package : `List[str]`, optional
In distributed mode, since this function would have been spawned as a separate process,
the extra imports need to be done again. NOTE: This does not have any effect in single
GPU training.
dry_run : `bool`, optional (default=`False`)
Do not train a model, but create a vocabulary, show dataset statistics and other training
information.
node_rank : `int`, optional
Rank of the node.
master_addr : `str`, optional (default=`"127.0.0.1"`)
Address of the master node for distributed training.
master_port : `str`, optional (default=`"29500"`)
Port of the master node for distributed training.
world_size : `int`, optional
The number of processes involved in distributed training.
distributed_device_ids: `List[str]`, optional
IDs of the devices used involved in distributed training.
file_friendly_logging : `bool`, optional (default=`False`)
If `True`, we add newlines to tqdm output, even on an interactive terminal, and we slow
down tqdm's output to only once every 10 seconds.
include_in_archive : `List[str]`, optional
Paths relative to `serialization_dir` that should be archived in addition to the default ones.
# Returns
best_model : `Optional[Model]`
The model with the best epoch weights or `None` if in distributed training or in dry run.
"""
common_logging.FILE_FRIENDLY_LOGGING = file_friendly_logging
common_logging.prepare_global_logging(
serialization_dir,
rank=process_rank,
world_size=world_size,
)
common_util.prepare_environment(params)
distributed = world_size > 1
master = process_rank == 0
include_package = include_package or []
if distributed:
assert distributed_device_ids is not None
# Since the worker is spawned and not forked, the extra imports need to be done again.
# Both the ones from the plugins and the ones from `include_package`.
import_plugins()
for package_name in include_package:
common_util.import_module_and_submodules(package_name)
num_procs_per_node = len(distributed_device_ids)
# The Unique identifier of the worker process among all the processes in the
# distributed training group is computed here. This is used while initializing
# the process group using `init_process_group`
global_rank = node_rank * num_procs_per_node + process_rank
# Number of processes per node is useful to know if a process
# is a master in the local node(node in which it is running)
os.environ["ALLENNLP_PROCS_PER_NODE"] = str(num_procs_per_node)
# In distributed training, the configured device is always going to be a list.
# The corresponding gpu id for the particular worker is obtained by picking the id
# from the device list with the rank as index
gpu_id = distributed_device_ids[process_rank] # type: ignore
# Till now, "cuda_device" might not be set in the trainer params.
# But a worker trainer needs to only know about its specific GPU id.
params["trainer"]["cuda_device"] = gpu_id
params["trainer"]["world_size"] = world_size
params["trainer"]["distributed"] = True
if gpu_id >= 0:
torch.cuda.set_device(int(gpu_id))
dist.init_process_group(
backend="nccl",
init_method=f"tcp://{master_addr}:{master_port}",
world_size=world_size,
rank=global_rank,
)
else:
dist.init_process_group(
backend="gloo",
init_method=f"tcp://{master_addr}:{master_port}",
world_size=world_size,
rank=global_rank,
)
logging.info(
f"Process group of world size {world_size} initialized "
f"for distributed training in worker {global_rank}"
)
train_loop = TrainModel.from_params(
params=params,
serialization_dir=serialization_dir,
local_rank=process_rank,
)
if dry_run:
return None
try:
if distributed: # let the setup get ready for all the workers
dist.barrier()
metrics = train_loop.run()
except KeyboardInterrupt:
# if we have completed an epoch, try to create a model archive.
if master and os.path.exists(os.path.join(serialization_dir, _DEFAULT_WEIGHTS)):
logging.info(
"Training interrupted by the user. Attempting to create "
"a model archive using the current best epoch weights."
)
archive_model(serialization_dir, include_in_archive=include_in_archive)
raise
if master:
train_loop.finish(metrics)
if not distributed:
return train_loop.model
return None
class TrainModel(Registrable):
"""
This class exists so that we can easily read a configuration file with the `allennlp train`
command. The basic logic is that we call `train_loop =
TrainModel.from_params(params_from_config_file)`, then `train_loop.run()`. This class performs
very little logic, pushing most of it to the `Trainer` that has a `train()` method. The
point here is to construct all of the dependencies for the `Trainer` in a way that we can do
it using `from_params()`, while having all of those dependencies transparently documented and
not hidden in calls to `params.pop()`. If you are writing your own training loop, you almost
certainly should not use this class, but you might look at the code for this class to see what
we do, to make writing your training loop easier.
In particular, if you are tempted to call the `__init__` method of this class, you are probably
doing something unnecessary. Literally all we do after `__init__` is call `trainer.train()`. You
can do that yourself, if you've constructed a `Trainer` already. What this class gives you is a
way to construct the `Trainer` by means of a config file. The actual constructor that we use
with `from_params` in this class is `from_partial_objects`. See that method for a description
of all of the allowed top-level keys in a configuration file used with `allennlp train`.
"""
default_implementation = "default"
"""
The default implementation is registered as 'default'.
"""
def __init__(
self,
serialization_dir: str,
model: Model,
trainer: Trainer,
evaluation_data_loader: DataLoader = None,
evaluate_on_test: bool = False,
batch_weight_key: str = "",
) -> None:
self.serialization_dir = serialization_dir
self.model = model
self.trainer = trainer
self.evaluation_data_loader = evaluation_data_loader
self.evaluate_on_test = evaluate_on_test
self.batch_weight_key = batch_weight_key
def run(self) -> Dict[str, Any]:
return self.trainer.train()
def finish(self, metrics: Dict[str, Any]):
if self.evaluation_data_loader is not None and self.evaluate_on_test:
logger.info("The model will be evaluated using the best epoch weights.")
test_metrics = training_util.evaluate(
self.model,
self.evaluation_data_loader,
cuda_device=self.trainer.cuda_device,
batch_weight_key=self.batch_weight_key,
)
for key, value in test_metrics.items():
metrics["test_" + key] = value
elif self.evaluation_data_loader is not None:
logger.info(
"To evaluate on the test set after training, pass the "
"'evaluate_on_test' flag, or use the 'allennlp evaluate' command."
)
common_util.dump_metrics(
os.path.join(self.serialization_dir, "metrics.json"), metrics, log=True
)
@classmethod
def from_partial_objects(
cls,
serialization_dir: str,
local_rank: int,
dataset_reader: DatasetReader,
train_data_path: str,
model: Lazy[Model],
data_loader: Lazy[DataLoader],
trainer: Lazy[Trainer],
vocabulary: Lazy[Vocabulary] = Lazy(Vocabulary),
datasets_for_vocab_creation: List[str] = None,
validation_dataset_reader: DatasetReader = None,
validation_data_path: str = None,
validation_data_loader: Lazy[DataLoader] = None,
test_data_path: str = None,
evaluate_on_test: bool = False,
batch_weight_key: str = "",
) -> "TrainModel":
"""
This method is intended for use with our `FromParams` logic, to construct a `TrainModel`
object from a config file passed to the `allennlp train` command. The arguments to this
method are the allowed top-level keys in a configuration file (except for the first three,
which are obtained separately).
You *could* use this outside of our `FromParams` logic if you really want to, but there
might be easier ways to accomplish your goal than instantiating `Lazy` objects. If you are
writing your own training loop, we recommend that you look at the implementation of this
method for inspiration and possibly some utility functions you can call, but you very likely
should not use this method directly.
The `Lazy` type annotations here are a mechanism for building dependencies to an object
sequentially - the `TrainModel` object needs data, a model, and a trainer, but the model
needs to see the data before it's constructed (to create a vocabulary) and the trainer needs
the data and the model before it's constructed. Objects that have sequential dependencies
like this are labeled as `Lazy` in their type annotations, and we pass the missing
dependencies when we call their `construct()` method, which you can see in the code below.
# Parameters
serialization_dir: `str`
The directory where logs and model archives will be saved.
In a typical AllenNLP configuration file, this parameter does not get an entry as a
top-level key, it gets passed in separately.
local_rank: `int`
The process index that is initialized using the GPU device id.
In a typical AllenNLP configuration file, this parameter does not get an entry as a
top-level key, it gets passed in separately.
dataset_reader: `DatasetReader`
The `DatasetReader` that will be used for training and (by default) for validation.
train_data_path: `str`
The file (or directory) that will be passed to `dataset_reader.read()` to construct the
training data.
model: `Lazy[Model]`
The model that we will train. This is lazy because it depends on the `Vocabulary`;
after constructing the vocabulary we call `model.construct(vocab=vocabulary)`.
data_loader: `Lazy[DataLoader]`
The data_loader we use to batch instances from the dataset reader at training and (by
default) validation time. This is lazy because it takes a dataset in it's constructor.
trainer: `Lazy[Trainer]`
The `Trainer` that actually implements the training loop. This is a lazy object because
it depends on the model that's going to be trained.
vocabulary: `Lazy[Vocabulary]`, optional (default=`Lazy(Vocabulary)`)
The `Vocabulary` that we will use to convert strings in the data to integer ids (and
possibly set sizes of embedding matrices in the `Model`). By default we construct the
vocabulary from the instances that we read.
datasets_for_vocab_creation: `List[str]`, optional (default=`None`)
If you pass in more than one dataset but don't want to use all of them to construct a
vocabulary, you can pass in this key to limit it. Valid entries in the list are
"train", "validation" and "test".
validation_dataset_reader: `DatasetReader`, optional (default=`None`)
If given, we will use this dataset reader for the validation data instead of
`dataset_reader`.
validation_data_path: `str`, optional (default=`None`)
If given, we will use this data for computing validation metrics and early stopping.
validation_data_loader: `Lazy[DataLoader]`, optional (default=`None`)
If given, the data_loader we use to batch instances from the dataset reader at
validation and test time. This is lazy because it takes a dataset in it's constructor.
test_data_path: `str`, optional (default=`None`)
If given, we will use this as test data. This makes it available for vocab creation by
default, but nothing else.
evaluate_on_test: `bool`, optional (default=`False`)
If given, we will evaluate the final model on this data at the end of training. Note
that we do not recommend using this for actual test data in every-day experimentation;
you should only very rarely evaluate your model on actual test data.
batch_weight_key: `str`, optional (default=`""`)
The name of metric used to weight the loss on a per-batch basis. This is only used
during evaluation on final test data, if you've specified `evaluate_on_test=True`.
"""
datasets = training_util.read_all_datasets(
train_data_path=train_data_path,
dataset_reader=dataset_reader,
validation_dataset_reader=validation_dataset_reader,
validation_data_path=validation_data_path,
test_data_path=test_data_path,
)
if datasets_for_vocab_creation:
for key in datasets_for_vocab_creation:
if key not in datasets:
raise ConfigurationError(f"invalid 'dataset_for_vocab_creation' {key}")
logger.info(
"From dataset instances, %s will be considered for vocabulary creation.",
", ".join(datasets_for_vocab_creation),
)
instance_generator = (
instance
for key, dataset in datasets.items()
if datasets_for_vocab_creation is None or key in datasets_for_vocab_creation
for instance in dataset
)
vocabulary_ = vocabulary.construct(instances=instance_generator)
model_ = model.construct(vocab=vocabulary_, serialization_dir=serialization_dir)
# Initializing the model can have side effect of expanding the vocabulary.
# Save the vocab only in the master. In the degenerate non-distributed
# case, we're trivially the master. In the distributed case this is safe
# to do without worrying about race conditions since saving and loading
# the vocab involves acquiring a file lock.
if local_rank == 0:
vocabulary_path = os.path.join(serialization_dir, "vocabulary")
vocabulary_.save_to_files(vocabulary_path)
for dataset in datasets.values():
dataset.index_with(model_.vocab)
data_loader_ = data_loader.construct(dataset=datasets["train"])
validation_data = datasets.get("validation")
validation_data_loader_: Optional[DataLoader] = None
if validation_data is not None:
if validation_data_loader is None:
validation_data_loader_ = data_loader.construct(dataset=validation_data)
if getattr(validation_data_loader_, "_batches_per_epoch", None) is not None:
warnings.warn(
"Using 'data_loader' params to construct validation data loader since "
"'validation_data_loader' params not specified, but you have "
"'data_loader.batches_per_epoch' set which may result in different "
"validation datasets for each epoch.",
UserWarning,
)
else:
validation_data_loader_ = validation_data_loader.construct(dataset=validation_data)
test_data = datasets.get("test")
test_data_loader: Optional[DataLoader] = None
if test_data is not None:
if validation_data_loader is None:
test_data_loader = data_loader.construct(dataset=test_data)
else:
test_data_loader = validation_data_loader.construct(dataset=test_data)
# We don't need to pass serialization_dir and local_rank here, because they will have been
# passed through the trainer by from_params already, because they were keyword arguments to
# construct this class in the first place.
trainer_ = trainer.construct(
model=model_,
data_loader=data_loader_,
validation_data_loader=validation_data_loader_,
)
assert trainer_ is not None
return cls(
serialization_dir=serialization_dir,
model=model_,
trainer=trainer_,
evaluation_data_loader=test_data_loader,
evaluate_on_test=evaluate_on_test,
batch_weight_key=batch_weight_key,
)
TrainModel.register("default", constructor="from_partial_objects")(TrainModel)
| allennlp-master | allennlp/commands/train.py |
"""
The `evaluate` subcommand can be used to
evaluate a trained model against a dataset
and report any metrics calculated by the model.
"""
import argparse
import json
import logging
from typing import Any, Dict
from overrides import overrides
from allennlp.commands.subcommand import Subcommand
from allennlp.common import logging as common_logging
from allennlp.common.util import prepare_environment
from allennlp.data import DataLoader
from allennlp.models.archival import load_archive
from allennlp.training.util import evaluate
logger = logging.getLogger(__name__)
@Subcommand.register("evaluate")
class Evaluate(Subcommand):
@overrides
def add_subparser(self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
description = """Evaluate the specified model + dataset"""
subparser = parser.add_parser(
self.name, description=description, help="Evaluate the specified model + dataset."
)
subparser.add_argument("archive_file", type=str, help="path to an archived trained model")
subparser.add_argument(
"input_file", type=str, help="path to the file containing the evaluation data"
)
subparser.add_argument(
"--output-file", type=str, help="optional path to write the metrics to as JSON"
)
subparser.add_argument(
"--predictions-output-file",
type=str,
help="optional path to write the predictions to as JSON lines",
)
subparser.add_argument(
"--weights-file", type=str, help="a path that overrides which weights file to use"
)
cuda_device = subparser.add_mutually_exclusive_group(required=False)
cuda_device.add_argument(
"--cuda-device", type=int, default=-1, help="id of GPU to use (if any)"
)
subparser.add_argument(
"-o",
"--overrides",
type=str,
default="",
help=(
"a json(net) structure used to override the experiment configuration, e.g., "
"'{\"iterator.batch_size\": 16}'. Nested parameters can be specified either"
" with nested dictionaries or with dot syntax."
),
)
subparser.add_argument(
"--batch-size", type=int, help="If non-empty, the batch size to use during evaluation."
)
subparser.add_argument(
"--batch-weight-key",
type=str,
default="",
help="If non-empty, name of metric used to weight the loss on a per-batch basis.",
)
subparser.add_argument(
"--extend-vocab",
action="store_true",
default=False,
help="if specified, we will use the instances in your new dataset to "
"extend your vocabulary. If pretrained-file was used to initialize "
"embedding layers, you may also need to pass --embedding-sources-mapping.",
)
subparser.add_argument(
"--embedding-sources-mapping",
type=str,
default="",
help="a JSON dict defining mapping from embedding module path to embedding "
"pretrained-file used during training. If not passed, and embedding needs to be "
"extended, we will try to use the original file paths used during training. If "
"they are not available we will use random vectors for embedding extension.",
)
subparser.add_argument(
"--file-friendly-logging",
action="store_true",
default=False,
help="outputs tqdm status on separate lines and slows tqdm refresh rate",
)
subparser.set_defaults(func=evaluate_from_args)
return subparser
def evaluate_from_args(args: argparse.Namespace) -> Dict[str, Any]:
common_logging.FILE_FRIENDLY_LOGGING = args.file_friendly_logging
# Disable some of the more verbose logging statements
logging.getLogger("allennlp.common.params").disabled = True
logging.getLogger("allennlp.nn.initializers").disabled = True
logging.getLogger("allennlp.modules.token_embedders.embedding").setLevel(logging.INFO)
# Load from archive
archive = load_archive(
args.archive_file,
weights_file=args.weights_file,
cuda_device=args.cuda_device,
overrides=args.overrides,
)
config = archive.config
prepare_environment(config)
model = archive.model
model.eval()
# Load the evaluation data
dataset_reader = archive.validation_dataset_reader
evaluation_data_path = args.input_file
logger.info("Reading evaluation data from %s", evaluation_data_path)
instances = dataset_reader.read(evaluation_data_path)
embedding_sources = (
json.loads(args.embedding_sources_mapping) if args.embedding_sources_mapping else {}
)
if args.extend_vocab:
logger.info("Vocabulary is being extended with test instances.")
model.vocab.extend_from_instances(instances=instances)
model.extend_embedder_vocab(embedding_sources)
instances.index_with(model.vocab)
data_loader_params = config.pop("validation_data_loader", None)
if data_loader_params is None:
data_loader_params = config.pop("data_loader")
if args.batch_size:
data_loader_params["batch_size"] = args.batch_size
data_loader = DataLoader.from_params(dataset=instances, params=data_loader_params)
metrics = evaluate(
model,
data_loader,
args.cuda_device,
args.batch_weight_key,
output_file=args.output_file,
predictions_output_file=args.predictions_output_file,
)
logger.info("Finished evaluating.")
return metrics
| allennlp-master | allennlp/commands/evaluate.py |
"""
Base class for subcommands under `allennlp.run`.
"""
import argparse
from typing import Callable, Dict, Optional, Type, TypeVar
from overrides import overrides
from allennlp.common import Registrable
T = TypeVar("T", bound="Subcommand")
class Subcommand(Registrable):
"""
An abstract class representing subcommands for allennlp.run.
If you wanted to (for example) create your own custom `special-evaluate` command to use like
`allennlp special-evaluate ...`
you would create a `Subcommand` subclass and then pass it as an override to
[`main`](#main).
"""
requires_plugins: bool = True
"""
If `True`, the sub-command will trigger a call to `import_plugins()` (except for custom
subcommands which come from plugins, since plugins will already have been imported by the
time the subcommand is discovered), and will also have an additional `--include-package` flag.
"""
_reverse_registry: Dict[Type, str] = {}
def add_subparser(self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
raise NotImplementedError
@classmethod
@overrides
def register(
cls: Type[T], name: str, constructor: Optional[str] = None, exist_ok: bool = False
) -> Callable[[Type[T]], Type[T]]:
super_register_fn = super().register(name, constructor=constructor, exist_ok=exist_ok)
def add_name_to_reverse_registry(subclass: Type[T]) -> Type[T]:
subclass = super_register_fn(subclass)
# Don't need to check `exist_ok`, as it's done by super.
# Also, don't need to delete previous entries if overridden, they can just stay there.
cls._reverse_registry[subclass] = name
return subclass
return add_name_to_reverse_registry
@property
def name(self) -> str:
return self._reverse_registry[self.__class__]
| allennlp-master | allennlp/commands/subcommand.py |
"""
The `test-install` subcommand provides a programmatic way to verify
that AllenNLP has been successfully installed.
"""
import argparse
import logging
import pathlib
from overrides import overrides
import torch
import allennlp
from allennlp.common.util import import_module_and_submodules
from allennlp.commands.subcommand import Subcommand
from allennlp.version import VERSION
logger = logging.getLogger(__name__)
@Subcommand.register("test-install")
class TestInstall(Subcommand):
@overrides
def add_subparser(self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
description = """Test that AllenNLP is installed correctly."""
subparser = parser.add_parser(
self.name, description=description, help="Test AllenNLP installation."
)
subparser.set_defaults(func=_run_test)
return subparser
def _get_module_root():
return pathlib.Path(allennlp.__file__).parent
def _run_test(args: argparse.Namespace):
# Make sure we can actually import the main modules without errors.
import_module_and_submodules("allennlp.common")
import_module_and_submodules("allennlp.data")
import_module_and_submodules("allennlp.interpret")
import_module_and_submodules("allennlp.models")
import_module_and_submodules("allennlp.modules")
import_module_and_submodules("allennlp.nn")
import_module_and_submodules("allennlp.predictors")
import_module_and_submodules("allennlp.training")
logger.info("AllenNLP version %s installed to %s", VERSION, _get_module_root())
logger.info("Cuda devices available: %s", torch.cuda.device_count())
| allennlp-master | allennlp/commands/test_install.py |
"""
The `find-lr` subcommand can be used to find a good learning rate for a model.
It requires a configuration file and a directory in
which to write the results.
"""
import argparse
import logging
import math
import os
import re
from typing import List, Tuple
import itertools
from overrides import overrides
from allennlp.commands.subcommand import Subcommand
from allennlp.common import Params, Tqdm
from allennlp.common import logging as common_logging
from allennlp.common.checks import check_for_gpu, ConfigurationError
from allennlp.common.util import prepare_environment
from allennlp.data import Vocabulary
from allennlp.data import DataLoader
from allennlp.models import Model
from allennlp.training import GradientDescentTrainer, Trainer
from allennlp.training.util import create_serialization_dir, datasets_from_params
logger = logging.getLogger(__name__)
@Subcommand.register("find-lr")
class FindLearningRate(Subcommand):
@overrides
def add_subparser(self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
description = """Find a learning rate range where loss decreases quickly
for the specified model and dataset."""
subparser = parser.add_parser(
self.name, description=description, help="Find a learning rate range."
)
subparser.add_argument(
"param_path", type=str, help="path to parameter file describing the model to be trained"
)
subparser.add_argument(
"-s",
"--serialization-dir",
required=True,
type=str,
help="The directory in which to save results.",
)
subparser.add_argument(
"-o",
"--overrides",
type=str,
default="",
help=(
"a json(net) structure used to override the experiment configuration, e.g., "
"'{\"iterator.batch_size\": 16}'. Nested parameters can be specified either"
" with nested dictionaries or with dot syntax."
),
)
subparser.add_argument(
"--start-lr", type=float, default=1e-5, help="learning rate to start the search"
)
subparser.add_argument(
"--end-lr", type=float, default=10, help="learning rate up to which search is done"
)
subparser.add_argument(
"--num-batches",
type=int,
default=100,
help="number of mini-batches to run learning rate finder",
)
subparser.add_argument(
"--stopping-factor",
type=float,
default=None,
help="stop the search when the current loss exceeds the best loss recorded by "
"multiple of stopping factor",
)
subparser.add_argument(
"--linear",
action="store_true",
help="increase learning rate linearly instead of exponential increase",
)
subparser.add_argument(
"-f",
"--force",
action="store_true",
required=False,
help="overwrite the output directory if it exists",
)
subparser.add_argument(
"--file-friendly-logging",
action="store_true",
default=False,
help="outputs tqdm status on separate lines and slows tqdm refresh rate",
)
subparser.set_defaults(func=find_learning_rate_from_args)
return subparser
def find_learning_rate_from_args(args: argparse.Namespace) -> None:
"""
Start learning rate finder for given args
"""
common_logging.FILE_FRIENDLY_LOGGING = args.file_friendly_logging
params = Params.from_file(args.param_path, args.overrides)
find_learning_rate_model(
params,
args.serialization_dir,
start_lr=args.start_lr,
end_lr=args.end_lr,
num_batches=args.num_batches,
linear_steps=args.linear,
stopping_factor=args.stopping_factor,
force=args.force,
)
def find_learning_rate_model(
params: Params,
serialization_dir: str,
start_lr: float = 1e-5,
end_lr: float = 10,
num_batches: int = 100,
linear_steps: bool = False,
stopping_factor: float = None,
force: bool = False,
) -> None:
"""
Runs learning rate search for given `num_batches` and saves the results in ``serialization_dir``
# Parameters
params : `Params`
A parameter object specifying an AllenNLP Experiment.
serialization_dir : `str`
The directory in which to save results.
start_lr : `float`
Learning rate to start the search.
end_lr : `float`
Learning rate upto which search is done.
num_batches : `int`
Number of mini-batches to run Learning rate finder.
linear_steps : `bool`
Increase learning rate linearly if False exponentially.
stopping_factor : `float`
Stop the search when the current loss exceeds the best loss recorded by
multiple of stopping factor. If `None` search proceeds till the `end_lr`
force : `bool`
If True and the serialization directory already exists, everything in it will
be removed prior to finding the learning rate.
"""
create_serialization_dir(params, serialization_dir, recover=False, force=force)
prepare_environment(params)
cuda_device = params.params.get("trainer").get("cuda_device", -1)
check_for_gpu(cuda_device)
distributed_params = params.params.get("distributed")
# See https://github.com/allenai/allennlp/issues/3658
assert not distributed_params, "find-lr is not compatible with DistributedDataParallel."
all_datasets = datasets_from_params(params, serialization_dir=serialization_dir)
datasets_for_vocab_creation = set(params.pop("datasets_for_vocab_creation", all_datasets))
for dataset in datasets_for_vocab_creation:
if dataset not in all_datasets:
raise ConfigurationError(f"invalid 'dataset_for_vocab_creation' {dataset}")
logger.info(
"From dataset instances, %s will be considered for vocabulary creation.",
", ".join(datasets_for_vocab_creation),
)
vocab = Vocabulary.from_params(
params.pop("vocabulary", {}),
instances=(
instance
for key, dataset in all_datasets.items()
for instance in dataset
if key in datasets_for_vocab_creation
),
)
train_data = all_datasets["train"]
train_data.index_with(vocab)
model = Model.from_params(
vocab=vocab, params=params.pop("model"), serialization_dir=serialization_dir
)
data_loader = DataLoader.from_params(dataset=train_data, params=params.pop("data_loader"))
trainer_params = params.pop("trainer")
no_grad_regexes = trainer_params.pop("no_grad", ())
for name, parameter in model.named_parameters():
if any(re.search(regex, name) for regex in no_grad_regexes):
parameter.requires_grad_(False)
trainer_choice = trainer_params.pop("type", "gradient_descent")
if trainer_choice != "gradient_descent":
raise ConfigurationError(
"currently find-learning-rate only works with the GradientDescentTrainer"
)
trainer: GradientDescentTrainer = Trainer.from_params( # type: ignore
model=model,
serialization_dir=serialization_dir,
data_loader=data_loader,
params=trainer_params,
)
logger.info(
f"Starting learning rate search from {start_lr} to {end_lr} in {num_batches} iterations."
)
learning_rates, losses = search_learning_rate(
trainer,
start_lr=start_lr,
end_lr=end_lr,
num_batches=num_batches,
linear_steps=linear_steps,
stopping_factor=stopping_factor,
)
logger.info("Finished learning rate search.")
losses = _smooth(losses, 0.98)
_save_plot(learning_rates, losses, os.path.join(serialization_dir, "lr-losses.png"))
def search_learning_rate(
trainer: GradientDescentTrainer,
start_lr: float = 1e-5,
end_lr: float = 10,
num_batches: int = 100,
linear_steps: bool = False,
stopping_factor: float = None,
) -> Tuple[List[float], List[float]]:
"""
Runs training loop on the model using [`GradientDescentTrainer`](../training/trainer.md#gradientdescenttrainer)
increasing learning rate from `start_lr` to `end_lr` recording the losses.
# Parameters
trainer: `GradientDescentTrainer`
start_lr : `float`
The learning rate to start the search.
end_lr : `float`
The learning rate upto which search is done.
num_batches : `int`
Number of batches to run the learning rate finder.
linear_steps : `bool`
Increase learning rate linearly if False exponentially.
stopping_factor : `float`
Stop the search when the current loss exceeds the best loss recorded by
multiple of stopping factor. If `None` search proceeds till the `end_lr`
# Returns
(learning_rates, losses) : `Tuple[List[float], List[float]]`
Returns list of learning rates and corresponding losses.
Note: The losses are recorded before applying the corresponding learning rate
"""
if num_batches <= 10:
raise ConfigurationError(
"The number of iterations for learning rate finder should be greater than 10."
)
trainer.model.train()
infinite_generator = itertools.cycle(trainer.data_loader)
train_generator_tqdm = Tqdm.tqdm(infinite_generator, total=num_batches)
learning_rates = []
losses = []
best = 1e9
if linear_steps:
lr_update_factor = (end_lr - start_lr) / num_batches
else:
lr_update_factor = (end_lr / start_lr) ** (1.0 / num_batches)
for i, batch in enumerate(train_generator_tqdm):
if linear_steps:
current_lr = start_lr + (lr_update_factor * i)
else:
current_lr = start_lr * (lr_update_factor ** i)
for param_group in trainer.optimizer.param_groups:
param_group["lr"] = current_lr
# Zero gradients.
# NOTE: this is actually more efficient than calling `self.optimizer.zero_grad()`
# because it avoids a read op when the gradients are first updated below.
for p in param_group["params"]:
p.grad = None
loss = trainer.batch_outputs(batch, for_training=True)["loss"]
loss.backward()
loss = loss.detach().cpu().item()
if stopping_factor is not None and (math.isnan(loss) or loss > stopping_factor * best):
logger.info(f"Loss ({loss}) exceeds stopping_factor * lowest recorded loss.")
break
trainer.rescale_gradients()
trainer.optimizer.step()
learning_rates.append(current_lr)
losses.append(loss)
if loss < best and i > 10:
best = loss
if i == num_batches:
break
return learning_rates, losses
def _smooth(values: List[float], beta: float) -> List[float]:
""" Exponential smoothing of values """
avg_value = 0.0
smoothed = []
for i, value in enumerate(values):
avg_value = beta * avg_value + (1 - beta) * value
smoothed.append(avg_value / (1 - beta ** (i + 1)))
return smoothed
def _save_plot(learning_rates: List[float], losses: List[float], save_path: str):
try:
import matplotlib
matplotlib.use("Agg") # noqa
import matplotlib.pyplot as plt
except ModuleNotFoundError as error:
logger.warn(
"To use allennlp find-learning-rate, please install matplotlib: pip install matplotlib>=2.2.3 ."
)
raise error
plt.ylabel("loss")
plt.xlabel("learning rate (log10 scale)")
plt.xscale("log")
plt.plot(learning_rates, losses)
logger.info(f"Saving learning_rate vs loss plot to {save_path}.")
plt.savefig(save_path)
| allennlp-master | allennlp/commands/find_learning_rate.py |
from typing import List
import torch
from torch.nn import ParameterList, Parameter
from allennlp.common.checks import ConfigurationError
from allennlp.nn import util
class ScalarMix(torch.nn.Module):
"""
Computes a parameterised scalar mixture of N tensors, `mixture = gamma * sum(s_k * tensor_k)`
where `s = softmax(w)`, with `w` and `gamma` scalar parameters.
In addition, if `do_layer_norm=True` then apply layer normalization to each tensor
before weighting.
"""
def __init__(
self,
mixture_size: int,
do_layer_norm: bool = False,
initial_scalar_parameters: List[float] = None,
trainable: bool = True,
) -> None:
super().__init__()
self.mixture_size = mixture_size
self.do_layer_norm = do_layer_norm
if initial_scalar_parameters is None:
initial_scalar_parameters = [0.0] * mixture_size
elif len(initial_scalar_parameters) != mixture_size:
raise ConfigurationError(
"Length of initial_scalar_parameters {} differs "
"from mixture_size {}".format(initial_scalar_parameters, mixture_size)
)
self.scalar_parameters = ParameterList(
[
Parameter(
torch.FloatTensor([initial_scalar_parameters[i]]), requires_grad=trainable
)
for i in range(mixture_size)
]
)
self.gamma = Parameter(torch.FloatTensor([1.0]), requires_grad=trainable)
def forward(self, tensors: List[torch.Tensor], mask: torch.BoolTensor = None) -> torch.Tensor:
"""
Compute a weighted average of the `tensors`. The input tensors an be any shape
with at least two dimensions, but must all be the same shape.
When `do_layer_norm=True`, the `mask` is required input. If the `tensors` are
dimensioned `(dim_0, ..., dim_{n-1}, dim_n)`, then the `mask` is dimensioned
`(dim_0, ..., dim_{n-1})`, as in the typical case with `tensors` of shape
`(batch_size, timesteps, dim)` and `mask` of shape `(batch_size, timesteps)`.
When `do_layer_norm=False` the `mask` is ignored.
"""
if len(tensors) != self.mixture_size:
raise ConfigurationError(
"{} tensors were passed, but the module was initialized to "
"mix {} tensors.".format(len(tensors), self.mixture_size)
)
def _do_layer_norm(tensor, broadcast_mask, num_elements_not_masked):
tensor_masked = tensor * broadcast_mask
mean = torch.sum(tensor_masked) / num_elements_not_masked
variance = (
torch.sum(((tensor_masked - mean) * broadcast_mask) ** 2) / num_elements_not_masked
)
return (tensor - mean) / torch.sqrt(variance + util.tiny_value_of_dtype(variance.dtype))
normed_weights = torch.nn.functional.softmax(
torch.cat([parameter for parameter in self.scalar_parameters]), dim=0
)
normed_weights = torch.split(normed_weights, split_size_or_sections=1)
if not self.do_layer_norm:
pieces = []
for weight, tensor in zip(normed_weights, tensors):
pieces.append(weight * tensor)
return self.gamma * sum(pieces)
else:
assert mask is not None
broadcast_mask = mask.unsqueeze(-1)
input_dim = tensors[0].size(-1)
num_elements_not_masked = torch.sum(mask) * input_dim
pieces = []
for weight, tensor in zip(normed_weights, tensors):
pieces.append(
weight * _do_layer_norm(tensor, broadcast_mask, num_elements_not_masked)
)
return self.gamma * sum(pieces)
| allennlp-master | allennlp/modules/scalar_mix.py |
"""
A stacked LSTM with LSTM layers which alternate between going forwards over
the sequence and going backwards.
"""
from typing import Optional, Tuple, Union, List
import torch
from torch.nn.utils.rnn import PackedSequence
from allennlp.modules.augmented_lstm import AugmentedLstm
from allennlp.common.checks import ConfigurationError
TensorPair = Tuple[torch.Tensor, torch.Tensor]
class StackedAlternatingLstm(torch.nn.Module):
"""
A stacked LSTM with LSTM layers which alternate between going forwards over
the sequence and going backwards. This implementation is based on the
description in [Deep Semantic Role Labelling - What works and what's next][0].
[0]: https://www.aclweb.org/anthology/P17-1044.pdf
[1]: https://arxiv.org/abs/1512.05287
# Parameters
input_size : `int`, required
The dimension of the inputs to the LSTM.
hidden_size : `int`, required
The dimension of the outputs of the LSTM.
num_layers : `int`, required
The number of stacked LSTMs to use.
recurrent_dropout_probability : `float`, optional (default = `0.0`)
The dropout probability to be used in a dropout scheme as stated in
[A Theoretically Grounded Application of Dropout in Recurrent Neural Networks][1].
use_input_projection_bias : `bool`, optional (default = `True`)
Whether or not to use a bias on the input projection layer. This is mainly here
for backwards compatibility reasons and will be removed (and set to False)
in future releases.
# Returns
output_accumulator : `PackedSequence`
The outputs of the interleaved LSTMs per timestep. A tensor of shape
(batch_size, max_timesteps, hidden_size) where for a given batch
element, all outputs past the sequence length for that batch are
zero tensors.
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int,
recurrent_dropout_probability: float = 0.0,
use_highway: bool = True,
use_input_projection_bias: bool = True,
) -> None:
super().__init__()
# Required to be wrapped with a `PytorchSeq2SeqWrapper`.
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
layers = []
lstm_input_size = input_size
for layer_index in range(num_layers):
go_forward = layer_index % 2 == 0
layer = AugmentedLstm(
lstm_input_size,
hidden_size,
go_forward,
recurrent_dropout_probability=recurrent_dropout_probability,
use_highway=use_highway,
use_input_projection_bias=use_input_projection_bias,
)
lstm_input_size = hidden_size
self.add_module("layer_{}".format(layer_index), layer)
layers.append(layer)
self.lstm_layers = layers
def forward(
self, inputs: PackedSequence, initial_state: Optional[TensorPair] = None
) -> Tuple[Union[torch.Tensor, PackedSequence], TensorPair]:
"""
# Parameters
inputs : `PackedSequence`, required.
A batch first `PackedSequence` to run the stacked LSTM over.
initial_state : `Tuple[torch.Tensor, torch.Tensor]`, optional, (default = `None`)
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM. Each tensor has shape (1, batch_size, output_dimension).
# Returns
output_sequence : `PackedSequence`
The encoded sequence of shape (batch_size, sequence_length, hidden_size)
final_states: `Tuple[torch.Tensor, torch.Tensor]`
The per-layer final (state, memory) states of the LSTM, each with shape
(num_layers, batch_size, hidden_size).
"""
if not initial_state:
hidden_states: List[Optional[TensorPair]] = [None] * len(self.lstm_layers)
elif initial_state[0].size()[0] != len(self.lstm_layers):
raise ConfigurationError(
"Initial states were passed to forward() but the number of "
"initial states does not match the number of layers."
)
else:
hidden_states = list(zip(initial_state[0].split(1, 0), initial_state[1].split(1, 0)))
output_sequence = inputs
final_states = []
for i, state in enumerate(hidden_states):
layer = getattr(self, "layer_{}".format(i))
# The state is duplicated to mirror the Pytorch API for LSTMs.
output_sequence, final_state = layer(output_sequence, state)
final_states.append(final_state)
final_hidden_state, final_cell_state = tuple(
torch.cat(state_list, 0) for state_list in zip(*final_states)
)
return output_sequence, (final_hidden_state, final_cell_state)
| allennlp-master | allennlp/modules/stacked_alternating_lstm.py |
from typing import Tuple, Union, Optional, Callable, Any
import torch
from torch.nn.utils.rnn import pack_padded_sequence, PackedSequence
from allennlp.nn.util import get_lengths_from_binary_sequence_mask, sort_batch_by_length
# We have two types here for the state, because storing the state in something
# which is Iterable (like a tuple, below), is helpful for internal manipulation
# - however, the states are consumed as either Tensors or a Tuple of Tensors, so
# returning them in this format is unhelpful.
RnnState = Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]
RnnStateStorage = Tuple[torch.Tensor, ...]
class _EncoderBase(torch.nn.Module):
"""
This abstract class serves as a base for the 3 `Encoder` abstractions in AllenNLP.
- [`Seq2SeqEncoders`](./seq2seq_encoders/seq2seq_encoder.md)
- [`Seq2VecEncoders`](./seq2vec_encoders/seq2vec_encoder.md)
Additionally, this class provides functionality for sorting sequences by length
so they can be consumed by Pytorch RNN classes, which require their inputs to be
sorted by length. Finally, it also provides optional statefulness to all of it's
subclasses by allowing the caching and retrieving of the hidden states of RNNs.
"""
def __init__(self, stateful: bool = False) -> None:
super().__init__()
self.stateful = stateful
self._states: Optional[RnnStateStorage] = None
def sort_and_run_forward(
self,
module: Callable[
[PackedSequence, Optional[RnnState]],
Tuple[Union[PackedSequence, torch.Tensor], RnnState],
],
inputs: torch.Tensor,
mask: torch.BoolTensor,
hidden_state: Optional[RnnState] = None,
):
"""
This function exists because Pytorch RNNs require that their inputs be sorted
before being passed as input. As all of our Seq2xxxEncoders use this functionality,
it is provided in a base class. This method can be called on any module which
takes as input a `PackedSequence` and some `hidden_state`, which can either be a
tuple of tensors or a tensor.
As all of our Seq2xxxEncoders have different return types, we return `sorted`
outputs from the module, which is called directly. Additionally, we return the
indices into the batch dimension required to restore the tensor to it's correct,
unsorted order and the number of valid batch elements (i.e the number of elements
in the batch which are not completely masked). This un-sorting and re-padding
of the module outputs is left to the subclasses because their outputs have different
types and handling them smoothly here is difficult.
# Parameters
module : `Callable[RnnInputs, RnnOutputs]`
A function to run on the inputs, where
`RnnInputs: [PackedSequence, Optional[RnnState]]` and
`RnnOutputs: Tuple[Union[PackedSequence, torch.Tensor], RnnState]`.
In most cases, this is a `torch.nn.Module`.
inputs : `torch.Tensor`, required.
A tensor of shape `(batch_size, sequence_length, embedding_size)` representing
the inputs to the Encoder.
mask : `torch.BoolTensor`, required.
A tensor of shape `(batch_size, sequence_length)`, representing masked and
non-masked elements of the sequence for each element in the batch.
hidden_state : `Optional[RnnState]`, (default = `None`).
A single tensor of shape (num_layers, batch_size, hidden_size) representing the
state of an RNN with or a tuple of
tensors of shapes (num_layers, batch_size, hidden_size) and
(num_layers, batch_size, memory_size), representing the hidden state and memory
state of an LSTM-like RNN.
# Returns
module_output : `Union[torch.Tensor, PackedSequence]`.
A Tensor or PackedSequence representing the output of the Pytorch Module.
The batch size dimension will be equal to `num_valid`, as sequences of zero
length are clipped off before the module is called, as Pytorch cannot handle
zero length sequences.
final_states : `Optional[RnnState]`
A Tensor representing the hidden state of the Pytorch Module. This can either
be a single tensor of shape (num_layers, num_valid, hidden_size), for instance in
the case of a GRU, or a tuple of tensors, such as those required for an LSTM.
restoration_indices : `torch.LongTensor`
A tensor of shape `(batch_size,)`, describing the re-indexing required to transform
the outputs back to their original batch order.
"""
# In some circumstances you may have sequences of zero length. `pack_padded_sequence`
# requires all sequence lengths to be > 0, so remove sequences of zero length before
# calling self._module, then fill with zeros.
# First count how many sequences are empty.
batch_size = mask.size(0)
num_valid = torch.sum(mask[:, 0]).int().item()
sequence_lengths = get_lengths_from_binary_sequence_mask(mask)
(
sorted_inputs,
sorted_sequence_lengths,
restoration_indices,
sorting_indices,
) = sort_batch_by_length(inputs, sequence_lengths)
# Now create a PackedSequence with only the non-empty, sorted sequences.
packed_sequence_input = pack_padded_sequence(
sorted_inputs[:num_valid, :, :],
sorted_sequence_lengths[:num_valid].data.tolist(),
batch_first=True,
)
# Prepare the initial states.
if not self.stateful:
if hidden_state is None:
initial_states: Any = hidden_state
elif isinstance(hidden_state, tuple):
initial_states = [
state.index_select(1, sorting_indices)[:, :num_valid, :].contiguous()
for state in hidden_state
]
else:
initial_states = hidden_state.index_select(1, sorting_indices)[
:, :num_valid, :
].contiguous()
else:
initial_states = self._get_initial_states(batch_size, num_valid, sorting_indices)
# Actually call the module on the sorted PackedSequence.
module_output, final_states = module(packed_sequence_input, initial_states)
return module_output, final_states, restoration_indices
def _get_initial_states(
self, batch_size: int, num_valid: int, sorting_indices: torch.LongTensor
) -> Optional[RnnState]:
"""
Returns an initial state for use in an RNN. Additionally, this method handles
the batch size changing across calls by mutating the state to append initial states
for new elements in the batch. Finally, it also handles sorting the states
with respect to the sequence lengths of elements in the batch and removing rows
which are completely padded. Importantly, this `mutates` the state if the
current batch size is larger than when it was previously called.
# Parameters
batch_size : `int`, required.
The batch size can change size across calls to stateful RNNs, so we need
to know if we need to expand or shrink the states before returning them.
Expanded states will be set to zero.
num_valid : `int`, required.
The batch may contain completely padded sequences which get removed before
the sequence is passed through the encoder. We also need to clip these off
of the state too.
sorting_indices `torch.LongTensor`, required.
Pytorch RNNs take sequences sorted by length. When we return the states to be
used for a given call to `module.forward`, we need the states to match up to
the sorted sequences, so before returning them, we sort the states using the
same indices used to sort the sequences.
# Returns
This method has a complex return type because it has to deal with the first time it
is called, when it has no state, and the fact that types of RNN have heterogeneous
states.
If it is the first time the module has been called, it returns `None`, regardless
of the type of the `Module`.
Otherwise, for LSTMs, it returns a tuple of `torch.Tensors` with shape
`(num_layers, num_valid, state_size)` and `(num_layers, num_valid, memory_size)`
respectively, or for GRUs, it returns a single `torch.Tensor` of shape
`(num_layers, num_valid, state_size)`.
"""
# We don't know the state sizes the first time calling forward,
# so we let the module define what it's initial hidden state looks like.
if self._states is None:
return None
# Otherwise, we have some previous states.
if batch_size > self._states[0].size(1):
# This batch is larger than the all previous states.
# If so, resize the states.
num_states_to_concat = batch_size - self._states[0].size(1)
resized_states = []
# state has shape (num_layers, batch_size, hidden_size)
for state in self._states:
# This _must_ be inside the loop because some
# RNNs have states with different last dimension sizes.
zeros = state.new_zeros(state.size(0), num_states_to_concat, state.size(2))
resized_states.append(torch.cat([state, zeros], 1))
self._states = tuple(resized_states)
correctly_shaped_states = self._states
elif batch_size < self._states[0].size(1):
# This batch is smaller than the previous one.
correctly_shaped_states = tuple(state[:, :batch_size, :] for state in self._states)
else:
correctly_shaped_states = self._states
# At this point, our states are of shape (num_layers, batch_size, hidden_size).
# However, the encoder uses sorted sequences and additionally removes elements
# of the batch which are fully padded. We need the states to match up to these
# sorted and filtered sequences, so we do that in the next two blocks before
# returning the state/s.
if len(self._states) == 1:
# GRUs only have a single state. This `unpacks` it from the
# tuple and returns the tensor directly.
correctly_shaped_state = correctly_shaped_states[0]
sorted_state = correctly_shaped_state.index_select(1, sorting_indices)
return sorted_state[:, :num_valid, :].contiguous()
else:
# LSTMs have a state tuple of (state, memory).
sorted_states = [
state.index_select(1, sorting_indices) for state in correctly_shaped_states
]
return tuple(state[:, :num_valid, :].contiguous() for state in sorted_states)
def _update_states(
self, final_states: RnnStateStorage, restoration_indices: torch.LongTensor
) -> None:
"""
After the RNN has run forward, the states need to be updated.
This method just sets the state to the updated new state, performing
several pieces of book-keeping along the way - namely, unsorting the
states and ensuring that the states of completely padded sequences are
not updated. Finally, it also detaches the state variable from the
computational graph, such that the graph can be garbage collected after
each batch iteration.
# Parameters
final_states : `RnnStateStorage`, required.
The hidden states returned as output from the RNN.
restoration_indices : `torch.LongTensor`, required.
The indices that invert the sorting used in `sort_and_run_forward`
to order the states with respect to the lengths of the sequences in
the batch.
"""
# TODO(Mark): seems weird to sort here, but append zeros in the subclasses.
# which way around is best?
new_unsorted_states = [state.index_select(1, restoration_indices) for state in final_states]
if self._states is None:
# We don't already have states, so just set the
# ones we receive to be the current state.
self._states = tuple(state.data for state in new_unsorted_states)
else:
# Now we've sorted the states back so that they correspond to the original
# indices, we need to figure out what states we need to update, because if we
# didn't use a state for a particular row, we want to preserve its state.
# Thankfully, the rows which are all zero in the state correspond exactly
# to those which aren't used, so we create masks of shape (new_batch_size,),
# denoting which states were used in the RNN computation.
current_state_batch_size = self._states[0].size(1)
new_state_batch_size = final_states[0].size(1)
# Masks for the unused states of shape (1, new_batch_size, 1)
used_new_rows_mask = [
(state[0, :, :].sum(-1) != 0.0).float().view(1, new_state_batch_size, 1)
for state in new_unsorted_states
]
new_states = []
if current_state_batch_size > new_state_batch_size:
# The new state is smaller than the old one,
# so just update the indices which we used.
for old_state, new_state, used_mask in zip(
self._states, new_unsorted_states, used_new_rows_mask
):
# zero out all rows in the previous state
# which _were_ used in the current state.
masked_old_state = old_state[:, :new_state_batch_size, :] * (1 - used_mask)
# The old state is larger, so update the relevant parts of it.
old_state[:, :new_state_batch_size, :] = new_state + masked_old_state
new_states.append(old_state.detach())
else:
# The states are the same size, so we just have to
# deal with the possibility that some rows weren't used.
new_states = []
for old_state, new_state, used_mask in zip(
self._states, new_unsorted_states, used_new_rows_mask
):
# zero out all rows which _were_ used in the current state.
masked_old_state = old_state * (1 - used_mask)
# The old state is larger, so update the relevant parts of it.
new_state += masked_old_state
new_states.append(new_state.detach())
# It looks like there should be another case handled here - when
# the current_state_batch_size < new_state_batch_size. However,
# this never happens, because the states themeselves are mutated
# by appending zeros when calling _get_inital_states, meaning that
# the new states are either of equal size, or smaller, in the case
# that there are some unused elements (zero-length) for the RNN computation.
self._states = tuple(new_states)
def reset_states(self, mask: torch.BoolTensor = None) -> None:
"""
Resets the internal states of a stateful encoder.
# Parameters
mask : `torch.BoolTensor`, optional.
A tensor of shape `(batch_size,)` indicating which states should
be reset. If not provided, all states will be reset.
"""
if mask is None:
self._states = None
else:
# state has shape (num_layers, batch_size, hidden_size). We reshape
# mask to have shape (1, batch_size, 1) so that operations
# broadcast properly.
mask_batch_size = mask.size(0)
mask = mask.view(1, mask_batch_size, 1)
new_states = []
assert self._states is not None
for old_state in self._states:
old_state_batch_size = old_state.size(1)
if old_state_batch_size != mask_batch_size:
raise ValueError(
f"Trying to reset states using mask with incorrect batch size. "
f"Expected batch size: {old_state_batch_size}. "
f"Provided batch size: {mask_batch_size}."
)
new_state = ~mask * old_state
new_states.append(new_state.detach())
self._states = tuple(new_states)
| allennlp-master | allennlp/modules/encoder_base.py |
"""
A maxout neural network.
"""
from typing import Sequence, Union
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.common.registrable import FromParams
class Maxout(torch.nn.Module, FromParams):
"""
This `Module` is a maxout neural network.
# Parameters
input_dim : `int`, required
The dimensionality of the input. We assume the input has shape `(batch_size, input_dim)`.
num_layers : `int`, required
The number of maxout layers to apply to the input.
output_dims : `Union[int, Sequence[int]]`, required
The output dimension of each of the maxout layers. If this is a single `int`, we use
it for all maxout layers. If it is a `Sequence[int]`, `len(output_dims)` must be
`num_layers`.
pool_sizes : `Union[int, Sequence[int]]`, required
The size of max-pools. If this is a single `int`, we use
it for all maxout layers. If it is a `Sequence[int]`, `len(pool_sizes)` must be
`num_layers`.
dropout : `Union[float, Sequence[float]]`, optional (default = `0.0`)
If given, we will apply this amount of dropout after each layer. Semantics of `float`
versus `Sequence[float]` is the same as with other parameters.
"""
def __init__(
self,
input_dim: int,
num_layers: int,
output_dims: Union[int, Sequence[int]],
pool_sizes: Union[int, Sequence[int]],
dropout: Union[float, Sequence[float]] = 0.0,
) -> None:
super().__init__()
if not isinstance(output_dims, list):
output_dims = [output_dims] * num_layers # type: ignore
if not isinstance(pool_sizes, list):
pool_sizes = [pool_sizes] * num_layers # type: ignore
if not isinstance(dropout, list):
dropout = [dropout] * num_layers # type: ignore
if len(output_dims) != num_layers:
raise ConfigurationError(
"len(output_dims) (%d) != num_layers (%d)" % (len(output_dims), num_layers)
)
if len(pool_sizes) != num_layers:
raise ConfigurationError(
"len(pool_sizes) (%d) != num_layers (%d)" % (len(pool_sizes), num_layers)
)
if len(dropout) != num_layers:
raise ConfigurationError(
"len(dropout) (%d) != num_layers (%d)" % (len(dropout), num_layers)
)
self._pool_sizes = pool_sizes
input_dims = [input_dim] + output_dims[:-1]
linear_layers = []
for layer_input_dim, layer_output_dim, pool_size in zip(
input_dims, output_dims, pool_sizes
):
linear_layers.append(torch.nn.Linear(layer_input_dim, layer_output_dim * pool_size))
self._linear_layers = torch.nn.ModuleList(linear_layers)
dropout_layers = [torch.nn.Dropout(p=value) for value in dropout]
self._dropout = torch.nn.ModuleList(dropout_layers)
self._output_dims = output_dims
self._output_dim = output_dims[-1]
self._input_dim = input_dim
def get_output_dim(self):
return self._output_dim
def get_input_dim(self):
return self._input_dim
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
output = inputs
for layer, layer_output_dim, dropout, pool_size in zip(
self._linear_layers, self._output_dims, self._dropout, self._pool_sizes
):
affine_output = layer(output)
# Compute and apply the proper shape for the max.
shape = list(inputs.size())
shape[-1] = layer_output_dim
shape.append(pool_size)
maxed_output = torch.max(affine_output.view(*shape), dim=-1)[0]
dropped_output = dropout(maxed_output)
output = dropped_output
return output
| allennlp-master | allennlp/modules/maxout.py |
import json
import logging
import warnings
from typing import Any, Dict, List, Union
import numpy
import torch
from overrides import overrides
from torch.nn.modules import Dropout
from allennlp.common import FromParams
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path
from allennlp.common.util import lazy_groups_of
from allennlp.data import Instance, Token, Vocabulary
from allennlp.data.batch import Batch
from allennlp.data.fields import TextField
from allennlp.data.token_indexers.elmo_indexer import (
ELMoCharacterMapper,
ELMoTokenCharactersIndexer,
)
from allennlp.modules.elmo_lstm import ElmoLstm
from allennlp.modules.highway import Highway
from allennlp.modules.scalar_mix import ScalarMix
from allennlp.nn.util import (
add_sentence_boundary_token_ids,
get_device_of,
remove_sentence_boundaries,
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import h5py
logger = logging.getLogger(__name__)
class Elmo(torch.nn.Module, FromParams):
"""
Compute ELMo representations using a pre-trained bidirectional language model.
See "Deep contextualized word representations", Peters et al. for details.
This module takes character id input and computes `num_output_representations` different layers
of ELMo representations. Typically `num_output_representations` is 1 or 2. For example, in
the case of the SRL model in the above paper, `num_output_representations=1` where ELMo was included at
the input token representation layer. In the case of the SQuAD model, `num_output_representations=2`
as ELMo was also included at the GRU output layer.
In the implementation below, we learn separate scalar weights for each output layer,
but only run the biLM once on each input sequence for efficiency.
# Parameters
options_file : `str`, required.
ELMo JSON options file
weight_file : `str`, required.
ELMo hdf5 weight file
num_output_representations : `int`, required.
The number of ELMo representation to output with
different linear weighted combination of the 3 layers (i.e.,
character-convnet output, 1st lstm output, 2nd lstm output).
requires_grad : `bool`, optional
If True, compute gradient of ELMo parameters for fine tuning.
do_layer_norm : `bool`, optional, (default = `False`).
Should we apply layer normalization (passed to `ScalarMix`)?
dropout : `float`, optional, (default = `0.5`).
The dropout to be applied to the ELMo representations.
vocab_to_cache : `List[str]`, optional, (default = `None`).
A list of words to pre-compute and cache character convolutions
for. If you use this option, Elmo expects that you pass word
indices of shape (batch_size, timesteps) to forward, instead
of character indices. If you use this option and pass a word which
wasn't pre-cached, this will break.
keep_sentence_boundaries : `bool`, optional, (default = `False`)
If True, the representation of the sentence boundary tokens are
not removed.
scalar_mix_parameters : `List[float]`, optional, (default = `None`)
If not `None`, use these scalar mix parameters to weight the representations
produced by different layers. These mixing weights are not updated during
training. The mixing weights here should be the unnormalized (i.e., pre-softmax)
weights. So, if you wanted to use only the 1st layer of a 2-layer ELMo,
you can set this to [-9e10, 1, -9e10 ].
module : `torch.nn.Module`, optional, (default = `None`).
If provided, then use this module instead of the pre-trained ELMo biLM.
If using this option, then pass `None` for both `options_file`
and `weight_file`. The module must provide a public attribute
`num_layers` with the number of internal layers and its `forward`
method must return a `dict` with `activations` and `mask` keys
(see `_ElmoBilm` for an example). Note that `requires_grad` is also
ignored with this option.
"""
def __init__(
self,
options_file: str,
weight_file: str,
num_output_representations: int,
requires_grad: bool = False,
do_layer_norm: bool = False,
dropout: float = 0.5,
vocab_to_cache: List[str] = None,
keep_sentence_boundaries: bool = False,
scalar_mix_parameters: List[float] = None,
module: torch.nn.Module = None,
) -> None:
super().__init__()
logger.info("Initializing ELMo")
if module is not None:
if options_file is not None or weight_file is not None:
raise ConfigurationError("Don't provide options_file or weight_file with module")
self._elmo_lstm = module
else:
self._elmo_lstm = _ElmoBiLm( # type: ignore
options_file,
weight_file,
requires_grad=requires_grad,
vocab_to_cache=vocab_to_cache,
)
self._has_cached_vocab = vocab_to_cache is not None
self._keep_sentence_boundaries = keep_sentence_boundaries
self._dropout = Dropout(p=dropout)
self._scalar_mixes: Any = []
for k in range(num_output_representations):
scalar_mix = ScalarMix(
self._elmo_lstm.num_layers, # type: ignore
do_layer_norm=do_layer_norm,
initial_scalar_parameters=scalar_mix_parameters,
trainable=scalar_mix_parameters is None,
)
self.add_module("scalar_mix_{}".format(k), scalar_mix)
self._scalar_mixes.append(scalar_mix)
def get_output_dim(self):
return self._elmo_lstm.get_output_dim()
def forward(
self, inputs: torch.Tensor, word_inputs: torch.Tensor = None
) -> Dict[str, Union[torch.Tensor, List[torch.Tensor]]]:
"""
# Parameters
inputs : `torch.Tensor`, required.
Shape `(batch_size, timesteps, 50)` of character ids representing the current batch.
word_inputs : `torch.Tensor`, required.
If you passed a cached vocab, you can in addition pass a tensor of shape
`(batch_size, timesteps)`, which represent word ids which have been pre-cached.
# Returns
`Dict[str, Union[torch.Tensor, List[torch.Tensor]]]`
A dict with the following keys:
- `'elmo_representations'` (`List[torch.Tensor]`) :
A `num_output_representations` list of ELMo representations for the input sequence.
Each representation is shape `(batch_size, timesteps, embedding_dim)`
- `'mask'` (`torch.BoolTensor`) :
Shape `(batch_size, timesteps)` long tensor with sequence mask.
"""
# reshape the input if needed
original_shape = inputs.size()
if len(original_shape) > 3:
timesteps, num_characters = original_shape[-2:]
reshaped_inputs = inputs.view(-1, timesteps, num_characters)
else:
reshaped_inputs = inputs
if word_inputs is not None:
original_word_size = word_inputs.size()
if self._has_cached_vocab and len(original_word_size) > 2:
reshaped_word_inputs = word_inputs.view(-1, original_word_size[-1])
elif not self._has_cached_vocab:
logger.warning(
"Word inputs were passed to ELMo but it does not have a cached vocab."
)
reshaped_word_inputs = None
else:
reshaped_word_inputs = word_inputs
else:
reshaped_word_inputs = word_inputs
# run the biLM
bilm_output = self._elmo_lstm(reshaped_inputs, reshaped_word_inputs) # type: ignore
layer_activations = bilm_output["activations"]
mask_with_bos_eos = bilm_output["mask"]
# compute the elmo representations
representations = []
for i in range(len(self._scalar_mixes)):
scalar_mix = getattr(self, "scalar_mix_{}".format(i))
representation_with_bos_eos = scalar_mix(layer_activations, mask_with_bos_eos)
if self._keep_sentence_boundaries:
processed_representation = representation_with_bos_eos
processed_mask = mask_with_bos_eos
else:
representation_without_bos_eos, mask_without_bos_eos = remove_sentence_boundaries(
representation_with_bos_eos, mask_with_bos_eos
)
processed_representation = representation_without_bos_eos
processed_mask = mask_without_bos_eos
representations.append(self._dropout(processed_representation))
# reshape if necessary
if word_inputs is not None and len(original_word_size) > 2:
mask = processed_mask.view(original_word_size)
elmo_representations = [
representation.view(original_word_size + (-1,))
for representation in representations
]
elif len(original_shape) > 3:
mask = processed_mask.view(original_shape[:-1])
elmo_representations = [
representation.view(original_shape[:-1] + (-1,))
for representation in representations
]
else:
mask = processed_mask
elmo_representations = representations
return {"elmo_representations": elmo_representations, "mask": mask}
def batch_to_ids(batch: List[List[str]]) -> torch.Tensor:
"""
Converts a batch of tokenized sentences to a tensor representing the sentences with encoded characters
(len(batch), max sentence length, max word length).
# Parameters
batch : `List[List[str]]`, required
A list of tokenized sentences.
# Returns
A tensor of padded character ids.
"""
instances = []
indexer = ELMoTokenCharactersIndexer()
for sentence in batch:
tokens = [Token(token) for token in sentence]
field = TextField(tokens, {"character_ids": indexer})
instance = Instance({"elmo": field})
instances.append(instance)
dataset = Batch(instances)
vocab = Vocabulary()
dataset.index_instances(vocab)
return dataset.as_tensor_dict()["elmo"]["character_ids"]["elmo_tokens"]
class _ElmoCharacterEncoder(torch.nn.Module):
"""
Compute context insensitive token representation using pretrained biLM.
This embedder has input character ids of size (batch_size, sequence_length, 50)
and returns (batch_size, sequence_length + 2, embedding_dim), where embedding_dim
is specified in the options file (typically 512).
We add special entries at the beginning and end of each sequence corresponding
to <S> and </S>, the beginning and end of sentence tokens.
Note: this is a lower level class useful for advanced usage. Most users should
use `ElmoTokenEmbedder` or `allennlp.modules.Elmo` instead.
# Parameters
options_file : `str`
ELMo JSON options file
weight_file : `str`
ELMo hdf5 weight file
requires_grad : `bool`, optional, (default = `False`).
If True, compute gradient of ELMo parameters for fine tuning.
The relevant section of the options file is something like:
```
{'char_cnn': {
'activation': 'relu',
'embedding': {'dim': 4},
'filters': [[1, 4], [2, 8], [3, 16], [4, 32], [5, 64]],
'max_characters_per_token': 50,
'n_characters': 262,
'n_highway': 2
}
}
```
"""
def __init__(self, options_file: str, weight_file: str, requires_grad: bool = False) -> None:
super().__init__()
with open(cached_path(options_file), "r") as fin:
self._options = json.load(fin)
self._weight_file = weight_file
self.output_dim = self._options["lstm"]["projection_dim"]
self.requires_grad = requires_grad
self._load_weights()
# Cache the arrays for use in forward -- +1 due to masking.
self._beginning_of_sentence_characters = torch.from_numpy(
numpy.array(ELMoCharacterMapper.beginning_of_sentence_characters) + 1
)
self._end_of_sentence_characters = torch.from_numpy(
numpy.array(ELMoCharacterMapper.end_of_sentence_characters) + 1
)
def get_output_dim(self):
return self.output_dim
@overrides
def forward(self, inputs: torch.Tensor) -> Dict[str, torch.Tensor]:
"""
Compute context insensitive token embeddings for ELMo representations.
# Parameters
inputs : `torch.Tensor`
Shape `(batch_size, sequence_length, 50)` of character ids representing the
current batch.
# Returns
Dict with keys:
`'token_embedding'` : `torch.Tensor`
Shape `(batch_size, sequence_length + 2, embedding_dim)` tensor with context
insensitive token representations.
`'mask'`: `torch.BoolTensor`
Shape `(batch_size, sequence_length + 2)` long tensor with sequence mask.
"""
# Add BOS/EOS
mask = (inputs > 0).sum(dim=-1) > 0
character_ids_with_bos_eos, mask_with_bos_eos = add_sentence_boundary_token_ids(
inputs, mask, self._beginning_of_sentence_characters, self._end_of_sentence_characters
)
# the character id embedding
max_chars_per_token = self._options["char_cnn"]["max_characters_per_token"]
# (batch_size * sequence_length, max_chars_per_token, embed_dim)
character_embedding = torch.nn.functional.embedding(
character_ids_with_bos_eos.view(-1, max_chars_per_token), self._char_embedding_weights
)
# run convolutions
cnn_options = self._options["char_cnn"]
if cnn_options["activation"] == "tanh":
activation = torch.tanh
elif cnn_options["activation"] == "relu":
activation = torch.nn.functional.relu
else:
raise ConfigurationError("Unknown activation")
# (batch_size * sequence_length, embed_dim, max_chars_per_token)
character_embedding = torch.transpose(character_embedding, 1, 2)
convs = []
for i in range(len(self._convolutions)):
conv = getattr(self, "char_conv_{}".format(i))
convolved = conv(character_embedding)
# (batch_size * sequence_length, n_filters for this width)
convolved, _ = torch.max(convolved, dim=-1)
convolved = activation(convolved)
convs.append(convolved)
# (batch_size * sequence_length, n_filters)
token_embedding = torch.cat(convs, dim=-1)
# apply the highway layers (batch_size * sequence_length, n_filters)
token_embedding = self._highways(token_embedding)
# final projection (batch_size * sequence_length, embedding_dim)
token_embedding = self._projection(token_embedding)
# reshape to (batch_size, sequence_length, embedding_dim)
batch_size, sequence_length, _ = character_ids_with_bos_eos.size()
return {
"mask": mask_with_bos_eos,
"token_embedding": token_embedding.view(batch_size, sequence_length, -1),
}
def _load_weights(self):
self._load_char_embedding()
self._load_cnn_weights()
self._load_highway()
self._load_projection()
def _load_char_embedding(self):
with h5py.File(cached_path(self._weight_file), "r") as fin:
char_embed_weights = fin["char_embed"][...]
weights = numpy.zeros(
(char_embed_weights.shape[0] + 1, char_embed_weights.shape[1]), dtype="float32"
)
weights[1:, :] = char_embed_weights
self._char_embedding_weights = torch.nn.Parameter(
torch.FloatTensor(weights), requires_grad=self.requires_grad
)
def _load_cnn_weights(self):
cnn_options = self._options["char_cnn"]
filters = cnn_options["filters"]
char_embed_dim = cnn_options["embedding"]["dim"]
convolutions = []
for i, (width, num) in enumerate(filters):
conv = torch.nn.Conv1d(
in_channels=char_embed_dim, out_channels=num, kernel_size=width, bias=True
)
# load the weights
with h5py.File(cached_path(self._weight_file), "r") as fin:
weight = fin["CNN"]["W_cnn_{}".format(i)][...]
bias = fin["CNN"]["b_cnn_{}".format(i)][...]
w_reshaped = numpy.transpose(weight.squeeze(axis=0), axes=(2, 1, 0))
if w_reshaped.shape != tuple(conv.weight.data.shape):
raise ValueError("Invalid weight file")
conv.weight.data.copy_(torch.FloatTensor(w_reshaped))
conv.bias.data.copy_(torch.FloatTensor(bias))
conv.weight.requires_grad = self.requires_grad
conv.bias.requires_grad = self.requires_grad
convolutions.append(conv)
self.add_module("char_conv_{}".format(i), conv)
self._convolutions = convolutions
def _load_highway(self):
# the highway layers have same dimensionality as the number of cnn filters
cnn_options = self._options["char_cnn"]
filters = cnn_options["filters"]
n_filters = sum(f[1] for f in filters)
n_highway = cnn_options["n_highway"]
# create the layers, and load the weights
self._highways = Highway(n_filters, n_highway, activation=torch.nn.functional.relu)
for k in range(n_highway):
# The AllenNLP highway is one matrix multplication with concatenation of
# transform and carry weights.
with h5py.File(cached_path(self._weight_file), "r") as fin:
# The weights are transposed due to multiplication order assumptions in tf
# vs pytorch (tf.matmul(X, W) vs pytorch.matmul(W, X))
w_transform = numpy.transpose(fin["CNN_high_{}".format(k)]["W_transform"][...])
# -1.0 since AllenNLP is g * x + (1 - g) * f(x) but tf is (1 - g) * x + g * f(x)
w_carry = -1.0 * numpy.transpose(fin["CNN_high_{}".format(k)]["W_carry"][...])
weight = numpy.concatenate([w_transform, w_carry], axis=0)
self._highways._layers[k].weight.data.copy_(torch.FloatTensor(weight))
self._highways._layers[k].weight.requires_grad = self.requires_grad
b_transform = fin["CNN_high_{}".format(k)]["b_transform"][...]
b_carry = -1.0 * fin["CNN_high_{}".format(k)]["b_carry"][...]
bias = numpy.concatenate([b_transform, b_carry], axis=0)
self._highways._layers[k].bias.data.copy_(torch.FloatTensor(bias))
self._highways._layers[k].bias.requires_grad = self.requires_grad
def _load_projection(self):
cnn_options = self._options["char_cnn"]
filters = cnn_options["filters"]
n_filters = sum(f[1] for f in filters)
self._projection = torch.nn.Linear(n_filters, self.output_dim, bias=True)
with h5py.File(cached_path(self._weight_file), "r") as fin:
weight = fin["CNN_proj"]["W_proj"][...]
bias = fin["CNN_proj"]["b_proj"][...]
self._projection.weight.data.copy_(torch.FloatTensor(numpy.transpose(weight)))
self._projection.bias.data.copy_(torch.FloatTensor(bias))
self._projection.weight.requires_grad = self.requires_grad
self._projection.bias.requires_grad = self.requires_grad
class _ElmoBiLm(torch.nn.Module):
"""
Run a pre-trained bidirectional language model, outputting the activations at each
layer for weighting together into an ELMo representation (with
`allennlp.modules.seq2seq_encoders.Elmo`). This is a lower level class, useful
for advanced uses, but most users should use `allennlp.modules.Elmo` directly.
# Parameters
options_file : `str`
ELMo JSON options file
weight_file : `str`
ELMo hdf5 weight file
requires_grad : `bool`, optional, (default = `False`).
If True, compute gradient of ELMo parameters for fine tuning.
vocab_to_cache : `List[str]`, optional, (default = `None`).
A list of words to pre-compute and cache character convolutions
for. If you use this option, _ElmoBiLm expects that you pass word
indices of shape (batch_size, timesteps) to forward, instead
of character indices. If you use this option and pass a word which
wasn't pre-cached, this will break.
"""
def __init__(
self,
options_file: str,
weight_file: str,
requires_grad: bool = False,
vocab_to_cache: List[str] = None,
) -> None:
super().__init__()
self._token_embedder = _ElmoCharacterEncoder(
options_file, weight_file, requires_grad=requires_grad
)
self._requires_grad = requires_grad
if requires_grad and vocab_to_cache:
logging.warning(
"You are fine tuning ELMo and caching char CNN word vectors. "
"This behaviour is not guaranteed to be well defined, particularly. "
"if not all of your inputs will occur in the vocabulary cache."
)
# This is an embedding, used to look up cached
# word vectors built from character level cnn embeddings.
self._word_embedding = None
self._bos_embedding: torch.Tensor = None
self._eos_embedding: torch.Tensor = None
if vocab_to_cache:
logging.info("Caching character cnn layers for words in vocabulary.")
# This sets 3 attributes, _word_embedding, _bos_embedding and _eos_embedding.
# They are set in the method so they can be accessed from outside the
# constructor.
self.create_cached_cnn_embeddings(vocab_to_cache)
with open(cached_path(options_file), "r") as fin:
options = json.load(fin)
if not options["lstm"].get("use_skip_connections"):
raise ConfigurationError("We only support pretrained biLMs with residual connections")
self._elmo_lstm = ElmoLstm(
input_size=options["lstm"]["projection_dim"],
hidden_size=options["lstm"]["projection_dim"],
cell_size=options["lstm"]["dim"],
num_layers=options["lstm"]["n_layers"],
memory_cell_clip_value=options["lstm"]["cell_clip"],
state_projection_clip_value=options["lstm"]["proj_clip"],
requires_grad=requires_grad,
)
self._elmo_lstm.load_weights(weight_file)
# Number of representation layers including context independent layer
self.num_layers = options["lstm"]["n_layers"] + 1
def get_output_dim(self):
return 2 * self._token_embedder.get_output_dim()
def forward(
self, inputs: torch.Tensor, word_inputs: torch.Tensor = None
) -> Dict[str, Union[torch.Tensor, List[torch.Tensor]]]:
"""
# Parameters
inputs : `torch.Tensor`, required.
Shape `(batch_size, timesteps, 50)` of character ids representing the current batch.
word_inputs : `torch.Tensor`, required.
If you passed a cached vocab, you can in addition pass a tensor of shape `(batch_size, timesteps)`,
which represent word ids which have been pre-cached.
# Returns
Dict with keys:
`'activations'` : `List[torch.Tensor]`
A list of activations at each layer of the network, each of shape
`(batch_size, timesteps + 2, embedding_dim)`
`'mask'`: `torch.BoolTensor`
Shape `(batch_size, timesteps + 2)` long tensor with sequence mask.
Note that the output tensors all include additional special begin and end of sequence
markers.
"""
if self._word_embedding is not None and word_inputs is not None:
try:
mask_without_bos_eos = word_inputs > 0
# The character cnn part is cached - just look it up.
embedded_inputs = self._word_embedding(word_inputs) # type: ignore
# shape (batch_size, timesteps + 2, embedding_dim)
type_representation, mask = add_sentence_boundary_token_ids(
embedded_inputs, mask_without_bos_eos, self._bos_embedding, self._eos_embedding
)
except (RuntimeError, IndexError):
# Back off to running the character convolutions,
# as we might not have the words in the cache.
token_embedding = self._token_embedder(inputs)
mask = token_embedding["mask"]
type_representation = token_embedding["token_embedding"]
else:
token_embedding = self._token_embedder(inputs)
mask = token_embedding["mask"]
type_representation = token_embedding["token_embedding"]
lstm_outputs = self._elmo_lstm(type_representation, mask)
# Prepare the output. The first layer is duplicated.
# Because of minor differences in how masking is applied depending
# on whether the char cnn layers are cached, we'll be defensive and
# multiply by the mask here. It's not strictly necessary, as the
# mask passed on is correct, but the values in the padded areas
# of the char cnn representations can change.
output_tensors = [
torch.cat([type_representation, type_representation], dim=-1) * mask.unsqueeze(-1)
]
for layer_activations in torch.chunk(lstm_outputs, lstm_outputs.size(0), dim=0):
output_tensors.append(layer_activations.squeeze(0))
return {"activations": output_tensors, "mask": mask}
def create_cached_cnn_embeddings(self, tokens: List[str]) -> None:
"""
Given a list of tokens, this method precomputes word representations
by running just the character convolutions and highway layers of elmo,
essentially creating uncontextual word vectors. On subsequent forward passes,
the word ids are looked up from an embedding, rather than being computed on
the fly via the CNN encoder.
This function sets 3 attributes:
_word_embedding : `torch.Tensor`
The word embedding for each word in the tokens passed to this method.
_bos_embedding : `torch.Tensor`
The embedding for the BOS token.
_eos_embedding : `torch.Tensor`
The embedding for the EOS token.
# Parameters
tokens : `List[str]`, required.
A list of tokens to precompute character convolutions for.
"""
tokens = [ELMoCharacterMapper.bos_token, ELMoCharacterMapper.eos_token] + tokens
timesteps = 32
batch_size = 32
chunked_tokens = lazy_groups_of(iter(tokens), timesteps)
all_embeddings = []
device = get_device_of(next(self.parameters()))
for batch in lazy_groups_of(chunked_tokens, batch_size):
# Shape (batch_size, timesteps, 50)
batched_tensor = batch_to_ids(batch)
# NOTE: This device check is for when a user calls this method having
# already placed the model on a device. If this is called in the
# constructor, it will probably happen on the CPU. This isn't too bad,
# because it's only a few convolutions and will likely be very fast.
if device >= 0:
batched_tensor = batched_tensor.cuda(device)
output = self._token_embedder(batched_tensor)
token_embedding = output["token_embedding"]
mask = output["mask"]
token_embedding, _ = remove_sentence_boundaries(token_embedding, mask)
all_embeddings.append(token_embedding.view(-1, token_embedding.size(-1)))
full_embedding = torch.cat(all_embeddings, 0)
# We might have some trailing embeddings from padding in the batch, so
# we clip the embedding and lookup to the right size.
full_embedding = full_embedding[: len(tokens), :]
embedding = full_embedding[2 : len(tokens), :]
vocab_size, embedding_dim = list(embedding.size())
from allennlp.modules.token_embedders import Embedding # type: ignore
self._bos_embedding = full_embedding[0, :]
self._eos_embedding = full_embedding[1, :]
self._word_embedding = Embedding( # type: ignore
num_embeddings=vocab_size,
embedding_dim=embedding_dim,
weight=embedding.data,
trainable=self._requires_grad,
padding_index=0,
)
| allennlp-master | allennlp/modules/elmo.py |
"""
A wrapper that unrolls the second (time) dimension of a tensor
into the first (batch) dimension, applies some other `Module`,
and then rolls the time dimension back up.
"""
from typing import List
from overrides import overrides
import torch
class TimeDistributed(torch.nn.Module):
"""
Given an input shaped like `(batch_size, time_steps, [rest])` and a `Module` that takes
inputs like `(batch_size, [rest])`, `TimeDistributed` reshapes the input to be
`(batch_size * time_steps, [rest])`, applies the contained `Module`, then reshapes it back.
Note that while the above gives shapes with `batch_size` first, this `Module` also works if
`batch_size` is second - we always just combine the first two dimensions, then split them.
It also reshapes keyword arguments unless they are not tensors or their name is specified in
the optional `pass_through` iterable.
"""
def __init__(self, module):
super().__init__()
self._module = module
@overrides
def forward(self, *inputs, pass_through: List[str] = None, **kwargs):
pass_through = pass_through or []
reshaped_inputs = [self._reshape_tensor(input_tensor) for input_tensor in inputs]
# Need some input to then get the batch_size and time_steps.
some_input = None
if inputs:
some_input = inputs[-1]
reshaped_kwargs = {}
for key, value in kwargs.items():
if isinstance(value, torch.Tensor) and key not in pass_through:
if some_input is None:
some_input = value
value = self._reshape_tensor(value)
reshaped_kwargs[key] = value
reshaped_outputs = self._module(*reshaped_inputs, **reshaped_kwargs)
if some_input is None:
raise RuntimeError("No input tensor to time-distribute")
# Now get the output back into the right shape.
# (batch_size, time_steps, **output_size)
new_size = some_input.size()[:2] + reshaped_outputs.size()[1:]
outputs = reshaped_outputs.contiguous().view(new_size)
return outputs
@staticmethod
def _reshape_tensor(input_tensor):
input_size = input_tensor.size()
if len(input_size) <= 2:
raise RuntimeError(f"No dimension to distribute: {input_size}")
# Squash batch_size and time_steps into a single axis; result has shape
# (batch_size * time_steps, **input_size).
squashed_shape = [-1] + list(input_size[2:])
return input_tensor.contiguous().view(*squashed_shape)
| allennlp-master | allennlp/modules/time_distributed.py |
import torch
import numpy as np
class SoftmaxLoss(torch.nn.Module):
"""
Given some embeddings and some targets, applies a linear layer
to create logits over possible words and then returns the
negative log likelihood.
"""
def __init__(self, num_words: int, embedding_dim: int) -> None:
super().__init__()
# TODO(joelgrus): implement tie_embeddings (maybe)
self.tie_embeddings = False
self.softmax_w = torch.nn.Parameter(
torch.randn(embedding_dim, num_words) / np.sqrt(embedding_dim)
)
self.softmax_b = torch.nn.Parameter(torch.zeros(num_words))
def forward(self, embeddings: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
# embeddings is size (n, embedding_dim)
# targets is (batch_size, ) with the correct class id
# Does not do any count normalization / divide by batch size
probs = torch.nn.functional.log_softmax(
torch.matmul(embeddings, self.softmax_w) + self.softmax_b, dim=-1
)
return torch.nn.functional.nll_loss(probs, targets.long(), reduction="sum")
| allennlp-master | allennlp/modules/softmax_loss.py |
"""
A feed-forward neural network.
"""
from typing import List, Union
import torch
from allennlp.common import FromParams
from allennlp.common.checks import ConfigurationError
from allennlp.nn import Activation
class FeedForward(torch.nn.Module, FromParams):
"""
This `Module` is a feed-forward neural network, just a sequence of `Linear` layers with
activation functions in between.
# Parameters
input_dim : `int`, required
The dimensionality of the input. We assume the input has shape `(batch_size, input_dim)`.
num_layers : `int`, required
The number of `Linear` layers to apply to the input.
hidden_dims : `Union[int, List[int]]`, required
The output dimension of each of the `Linear` layers. If this is a single `int`, we use
it for all `Linear` layers. If it is a `List[int]`, `len(hidden_dims)` must be
`num_layers`.
activations : `Union[Activation, List[Activation]]`, required
The activation function to use after each `Linear` layer. If this is a single function,
we use it after all `Linear` layers. If it is a `List[Activation]`,
`len(activations)` must be `num_layers`. Activation must have torch.nn.Module type.
dropout : `Union[float, List[float]]`, optional (default = `0.0`)
If given, we will apply this amount of dropout after each layer. Semantics of `float`
versus `List[float]` is the same as with other parameters.
# Examples
```python
FeedForward(124, 2, [64, 32], torch.nn.ReLU(), 0.2)
#> FeedForward(
#> (_activations): ModuleList(
#> (0): ReLU()
#> (1): ReLU()
#> )
#> (_linear_layers): ModuleList(
#> (0): Linear(in_features=124, out_features=64, bias=True)
#> (1): Linear(in_features=64, out_features=32, bias=True)
#> )
#> (_dropout): ModuleList(
#> (0): Dropout(p=0.2, inplace=False)
#> (1): Dropout(p=0.2, inplace=False)
#> )
#> )
```
"""
def __init__(
self,
input_dim: int,
num_layers: int,
hidden_dims: Union[int, List[int]],
activations: Union[Activation, List[Activation]],
dropout: Union[float, List[float]] = 0.0,
) -> None:
super().__init__()
if not isinstance(hidden_dims, list):
hidden_dims = [hidden_dims] * num_layers # type: ignore
if not isinstance(activations, list):
activations = [activations] * num_layers # type: ignore
if not isinstance(dropout, list):
dropout = [dropout] * num_layers # type: ignore
if len(hidden_dims) != num_layers:
raise ConfigurationError(
"len(hidden_dims) (%d) != num_layers (%d)" % (len(hidden_dims), num_layers)
)
if len(activations) != num_layers:
raise ConfigurationError(
"len(activations) (%d) != num_layers (%d)" % (len(activations), num_layers)
)
if len(dropout) != num_layers:
raise ConfigurationError(
"len(dropout) (%d) != num_layers (%d)" % (len(dropout), num_layers)
)
self._activations = torch.nn.ModuleList(activations)
input_dims = [input_dim] + hidden_dims[:-1]
linear_layers = []
for layer_input_dim, layer_output_dim in zip(input_dims, hidden_dims):
linear_layers.append(torch.nn.Linear(layer_input_dim, layer_output_dim))
self._linear_layers = torch.nn.ModuleList(linear_layers)
dropout_layers = [torch.nn.Dropout(p=value) for value in dropout]
self._dropout = torch.nn.ModuleList(dropout_layers)
self._output_dim = hidden_dims[-1]
self.input_dim = input_dim
def get_output_dim(self):
return self._output_dim
def get_input_dim(self):
return self.input_dim
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
output = inputs
for layer, activation, dropout in zip(
self._linear_layers, self._activations, self._dropout
):
output = dropout(activation(layer(output)))
return output
| allennlp-master | allennlp/modules/feedforward.py |
"""
Custom PyTorch
`Module <https://pytorch.org/docs/master/nn.html#torch.nn.Module>`_ s
that are used as components in AllenNLP `Model` s.
"""
from allennlp.modules.attention import Attention
from allennlp.modules.bimpm_matching import BiMpmMatching
from allennlp.modules.conditional_random_field import ConditionalRandomField
from allennlp.modules.elmo import Elmo
from allennlp.modules.feedforward import FeedForward
from allennlp.modules.gated_sum import GatedSum
from allennlp.modules.highway import Highway
from allennlp.modules.input_variational_dropout import InputVariationalDropout
from allennlp.modules.layer_norm import LayerNorm
from allennlp.modules.matrix_attention import MatrixAttention
from allennlp.modules.maxout import Maxout
from allennlp.modules.residual_with_layer_dropout import ResidualWithLayerDropout
from allennlp.modules.scalar_mix import ScalarMix
from allennlp.modules.seq2seq_encoders import Seq2SeqEncoder
from allennlp.modules.seq2vec_encoders import Seq2VecEncoder
from allennlp.modules.text_field_embedders import TextFieldEmbedder
from allennlp.modules.time_distributed import TimeDistributed
from allennlp.modules.token_embedders import TokenEmbedder, Embedding
from allennlp.modules.softmax_loss import SoftmaxLoss
| allennlp-master | allennlp/modules/__init__.py |
# https://github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/python/ops/nn_impl.py#L885
from typing import Set, Tuple
import numpy as np
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.nn import util
def _choice(num_words: int, num_samples: int) -> Tuple[np.ndarray, int]:
"""
Chooses `num_samples` samples without replacement from [0, ..., num_words).
Returns a tuple (samples, num_tries).
"""
num_tries = 0
num_chosen = 0
def get_buffer() -> np.ndarray:
log_samples = np.random.rand(num_samples) * np.log(num_words + 1)
samples = np.exp(log_samples).astype("int64") - 1
return np.clip(samples, a_min=0, a_max=num_words - 1)
sample_buffer = get_buffer()
buffer_index = 0
samples: Set[int] = set()
while num_chosen < num_samples:
num_tries += 1
# choose sample
sample_id = sample_buffer[buffer_index]
if sample_id not in samples:
samples.add(sample_id)
num_chosen += 1
buffer_index += 1
if buffer_index == num_samples:
# Reset the buffer
sample_buffer = get_buffer()
buffer_index = 0
return np.array(list(samples)), num_tries
class SampledSoftmaxLoss(torch.nn.Module):
"""
Based on the default log_uniform_candidate_sampler in tensorflow.
!!! NOTE
num_words DOES NOT include padding id.
!!! NOTE
In all cases except (tie_embeddings=True and use_character_inputs=False)
the weights are dimensioned as num_words and do not include an entry for the padding (0) id.
For the (tie_embeddings=True and use_character_inputs=False) case,
then the embeddings DO include the extra 0 padding, to be consistent with the word embedding layer.
# Parameters
num_words, `int`, required
The number of words in the vocabulary
embedding_dim, `int`, required
The dimension to softmax over
num_samples, `int`, required
During training take this many samples. Must be less than num_words.
sparse, `bool`, optional (default = `False`)
If this is true, we use a sparse embedding matrix.
unk_id, `int`, optional (default = `None`)
If provided, the id that represents unknown characters.
use_character_inputs, `bool`, optional (default = `True`)
Whether to use character inputs
use_fast_sampler, `bool`, optional (default = `False`)
Whether to use the fast cython sampler.
"""
def __init__(
self,
num_words: int,
embedding_dim: int,
num_samples: int,
sparse: bool = False,
unk_id: int = None,
use_character_inputs: bool = True,
use_fast_sampler: bool = False,
) -> None:
super().__init__()
# TODO(joelgrus): implement tie_embeddings (maybe)
self.tie_embeddings = False
assert num_samples < num_words
if use_fast_sampler:
raise ConfigurationError("fast sampler is not implemented")
else:
self.choice_func = _choice
# Glorit init (std=(1.0 / sqrt(fan_in))
if sparse:
# create our own sparse embedding
self.softmax_w = torch.nn.Embedding(
num_embeddings=num_words, embedding_dim=embedding_dim, sparse=True
)
self.softmax_w.weight.data.normal_(mean=0.0, std=1.0 / np.sqrt(embedding_dim))
self.softmax_b = torch.nn.Embedding(
num_embeddings=num_words, embedding_dim=1, sparse=True
)
self.softmax_b.weight.data.fill_(0.0)
else:
# just create tensors to use as the embeddings
# Glorit init (std=(1.0 / sqrt(fan_in))
self.softmax_w = torch.nn.Parameter(
torch.randn(num_words, embedding_dim) / np.sqrt(embedding_dim)
)
self.softmax_b = torch.nn.Parameter(torch.zeros(num_words))
self.sparse = sparse
self.use_character_inputs = use_character_inputs
if use_character_inputs:
self._unk_id = unk_id
self._num_samples = num_samples
self._embedding_dim = embedding_dim
self._num_words = num_words
self.initialize_num_words()
def initialize_num_words(self):
if self.sparse:
num_words = self.softmax_w.weight.size(0)
else:
num_words = self.softmax_w.size(0)
self._num_words = num_words
self._log_num_words_p1 = np.log(num_words + 1)
# compute the probability of each sampled id
self._probs = (
np.log(np.arange(num_words) + 2) - np.log(np.arange(num_words) + 1)
) / self._log_num_words_p1
def forward(
self,
embeddings: torch.Tensor,
targets: torch.Tensor,
target_token_embedding: torch.Tensor = None,
) -> torch.Tensor:
# embeddings is size (n, embedding_dim)
# targets is (n_words, ) with the index of the actual target
# when tieing weights, target_token_embedding is required.
# it is size (n_words, embedding_dim)
# returns log likelihood loss (batch_size, )
# Does not do any count normalization / divide by batch size
if embeddings.shape[0] == 0:
# empty batch
return torch.tensor(0.0, device=embeddings.device)
if not self.training:
return self._forward_eval(embeddings, targets)
else:
return self._forward_train(embeddings, targets, target_token_embedding)
def _forward_train(
self, embeddings: torch.Tensor, targets: torch.Tensor, target_token_embedding: torch.Tensor
) -> torch.Tensor:
# (target_token_embedding is only used in the tie_embeddings case,
# which is not implemented)
# want to compute (n, n_samples + 1) array with the log
# probabilities where the first index is the true target
# and the remaining ones are the the negative samples.
# then we can just select the first column
# NOTE: targets input has padding removed (so 0 == the first id, NOT the padding id)
(
sampled_ids,
target_expected_count,
sampled_expected_count,
) = self.log_uniform_candidate_sampler(targets, choice_func=self.choice_func)
long_targets = targets.long()
long_targets.requires_grad_(False)
# Get the softmax weights (so we can compute logits)
# shape (batch_size * max_sequence_length + num_samples)
all_ids = torch.cat([long_targets, sampled_ids], dim=0)
if self.sparse:
all_ids_1 = all_ids.unsqueeze(1)
all_w = self.softmax_w(all_ids_1).squeeze(1)
all_b = self.softmax_b(all_ids_1).squeeze(2).squeeze(1)
else:
all_w = torch.nn.functional.embedding(all_ids, self.softmax_w)
# the unsqueeze / squeeze works around an issue with 1 dim
# embeddings
all_b = torch.nn.functional.embedding(all_ids, self.softmax_b.unsqueeze(1)).squeeze(1)
batch_size = long_targets.size(0)
true_w = all_w[:batch_size, :]
sampled_w = all_w[batch_size:, :]
true_b = all_b[:batch_size]
sampled_b = all_b[batch_size:]
# compute the logits and remove log expected counts
# [batch_size, ]
true_logits = (
(true_w * embeddings).sum(dim=1)
+ true_b
- torch.log(
target_expected_count + util.tiny_value_of_dtype(target_expected_count.dtype)
)
)
# [batch_size, n_samples]
sampled_logits = (
torch.matmul(embeddings, sampled_w.t())
+ sampled_b
- torch.log(
sampled_expected_count + util.tiny_value_of_dtype(sampled_expected_count.dtype)
)
)
# remove true labels -- we will take
# softmax, so set the sampled logits of true values to a large
# negative number
# [batch_size, n_samples]
true_in_sample_mask = sampled_ids == long_targets.unsqueeze(1)
masked_sampled_logits = sampled_logits.masked_fill(true_in_sample_mask, -10000.0)
# now concat the true logits as index 0
# [batch_size, n_samples + 1]
logits = torch.cat([true_logits.unsqueeze(1), masked_sampled_logits], dim=1)
# finally take log_softmax
log_softmax = torch.nn.functional.log_softmax(logits, dim=1)
# true log likelihood is index 0, loss = -1.0 * sum over batch
# the likelihood loss can become very large if the corresponding
# true logit is very small, so we apply a per-target cap here
# so that a single logit for a very rare word won't dominate the batch.
nll_loss = -1.0 * log_softmax[:, 0].sum()
return nll_loss
def _forward_eval(self, embeddings: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
# evaluation mode, use full softmax
if self.sparse:
w = self.softmax_w.weight
b = self.softmax_b.weight.squeeze(1)
else:
w = self.softmax_w
b = self.softmax_b
log_softmax = torch.nn.functional.log_softmax(torch.matmul(embeddings, w.t()) + b, dim=-1)
if self.tie_embeddings and not self.use_character_inputs:
targets_ = targets + 1
else:
targets_ = targets
return torch.nn.functional.nll_loss(log_softmax, targets_.long(), reduction="sum")
def log_uniform_candidate_sampler(self, targets, choice_func=_choice):
# returns sampled, true_expected_count, sampled_expected_count
# targets = (batch_size, )
#
# samples = (n_samples, )
# true_expected_count = (batch_size, )
# sampled_expected_count = (n_samples, )
# see: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/range_sampler.h
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/range_sampler.cc
# algorithm: keep track of number of tries when doing sampling,
# then expected count is
# -expm1(num_tries * log1p(-p))
# = (1 - (1-p)^num_tries) where p is self._probs[id]
np_sampled_ids, num_tries = choice_func(self._num_words, self._num_samples)
sampled_ids = torch.from_numpy(np_sampled_ids).to(targets.device)
# Compute expected count = (1 - (1-p)^num_tries) = -expm1(num_tries * log1p(-p))
# P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)
target_probs = (
torch.log((targets.float() + 2.0) / (targets.float() + 1.0)) / self._log_num_words_p1
)
target_expected_count = -1.0 * (torch.exp(num_tries * torch.log1p(-target_probs)) - 1.0)
sampled_probs = (
torch.log((sampled_ids.float() + 2.0) / (sampled_ids.float() + 1.0))
/ self._log_num_words_p1
)
sampled_expected_count = -1.0 * (torch.exp(num_tries * torch.log1p(-sampled_probs)) - 1.0)
sampled_ids.requires_grad_(False)
target_expected_count.requires_grad_(False)
sampled_expected_count.requires_grad_(False)
return sampled_ids, target_expected_count, sampled_expected_count
| allennlp-master | allennlp/modules/sampled_softmax_loss.py |
import torch
class ResidualWithLayerDropout(torch.nn.Module):
"""
A residual connection with the layer dropout technique [Deep Networks with Stochastic
Depth](https://arxiv.org/pdf/1603.09382.pdf).
This module accepts the input and output of a layer, decides whether this layer should
be stochastically dropped, returns either the input or output + input. During testing,
it will re-calibrate the outputs of this layer by the expected number of times it
participates in training.
"""
def __init__(self, undecayed_dropout_prob: float = 0.5) -> None:
super().__init__()
if undecayed_dropout_prob < 0 or undecayed_dropout_prob > 1:
raise ValueError(
f"undecayed dropout probability has to be between 0 and 1, "
f"but got {undecayed_dropout_prob}"
)
self.undecayed_dropout_prob = undecayed_dropout_prob
def forward(
self, # type: ignore
layer_input: torch.Tensor,
layer_output: torch.Tensor,
layer_index: int = None,
total_layers: int = None,
) -> torch.Tensor:
"""
Apply dropout to this layer, for this whole mini-batch.
dropout_prob = layer_index / total_layers * undecayed_dropout_prob if layer_idx and
total_layers is specified, else it will use the undecayed_dropout_prob directly.
# Parameters
layer_input `torch.FloatTensor` required
The input tensor of this layer.
layer_output `torch.FloatTensor` required
The output tensor of this layer, with the same shape as the layer_input.
layer_index `int`
The layer index, starting from 1. This is used to calcuate the dropout prob
together with the `total_layers` parameter.
total_layers `int`
The total number of layers.
# Returns
output : `torch.FloatTensor`
A tensor with the same shape as `layer_input` and `layer_output`.
"""
if layer_index is not None and total_layers is not None:
dropout_prob = 1.0 * self.undecayed_dropout_prob * layer_index / total_layers
else:
dropout_prob = 1.0 * self.undecayed_dropout_prob
if self.training:
if torch.rand(1) < dropout_prob:
return layer_input
else:
return layer_output + layer_input
else:
return (1 - dropout_prob) * layer_output + layer_input
| allennlp-master | allennlp/modules/residual_with_layer_dropout.py |
"""
Multi-perspective matching layer
"""
from typing import Tuple, List
import torch
import torch.nn as nn
import torch.nn.functional as F
from allennlp.common.checks import ConfigurationError
from allennlp.common.registrable import FromParams
from allennlp.nn.util import (
get_lengths_from_binary_sequence_mask,
masked_max,
masked_mean,
masked_softmax,
tiny_value_of_dtype,
)
def multi_perspective_match(
vector1: torch.Tensor, vector2: torch.Tensor, weight: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Calculate multi-perspective cosine matching between time-steps of vectors
of the same length.
# Parameters
vector1 : `torch.Tensor`
A tensor of shape `(batch, seq_len, hidden_size)`
vector2 : `torch.Tensor`
A tensor of shape `(batch, seq_len or 1, hidden_size)`
weight : `torch.Tensor`
A tensor of shape `(num_perspectives, hidden_size)`
# Returns
`torch.Tensor` :
Shape `(batch, seq_len, 1)`.
`torch.Tensor` :
Shape `(batch, seq_len, num_perspectives)`.
"""
assert vector1.size(0) == vector2.size(0)
assert weight.size(1) == vector1.size(2) == vector1.size(2)
# (batch, seq_len, 1)
similarity_single = F.cosine_similarity(vector1, vector2, 2).unsqueeze(2)
# (1, 1, num_perspectives, hidden_size)
weight = weight.unsqueeze(0).unsqueeze(0)
# (batch, seq_len, num_perspectives, hidden_size)
vector1 = weight * vector1.unsqueeze(2)
vector2 = weight * vector2.unsqueeze(2)
similarity_multi = F.cosine_similarity(vector1, vector2, dim=3)
return similarity_single, similarity_multi
def multi_perspective_match_pairwise(
vector1: torch.Tensor, vector2: torch.Tensor, weight: torch.Tensor
) -> torch.Tensor:
"""
Calculate multi-perspective cosine matching between each time step of
one vector and each time step of another vector.
# Parameters
vector1 : `torch.Tensor`
A tensor of shape `(batch, seq_len1, hidden_size)`
vector2 : `torch.Tensor`
A tensor of shape `(batch, seq_len2, hidden_size)`
weight : `torch.Tensor`
A tensor of shape `(num_perspectives, hidden_size)`
# Returns
`torch.Tensor` :
A tensor of shape `(batch, seq_len1, seq_len2, num_perspectives)` consisting
multi-perspective matching results
"""
num_perspectives = weight.size(0)
# (1, num_perspectives, 1, hidden_size)
weight = weight.unsqueeze(0).unsqueeze(2)
# (batch, num_perspectives, seq_len*, hidden_size)
vector1 = weight * vector1.unsqueeze(1).expand(-1, num_perspectives, -1, -1)
vector2 = weight * vector2.unsqueeze(1).expand(-1, num_perspectives, -1, -1)
# (batch, num_perspectives, seq_len*, 1)
vector1_norm = vector1.norm(p=2, dim=3, keepdim=True)
vector2_norm = vector2.norm(p=2, dim=3, keepdim=True)
# (batch, num_perspectives, seq_len1, seq_len2)
mul_result = torch.matmul(vector1, vector2.transpose(2, 3))
norm_value = vector1_norm * vector2_norm.transpose(2, 3)
# (batch, seq_len1, seq_len2, num_perspectives)
return (mul_result / norm_value.clamp(min=tiny_value_of_dtype(norm_value.dtype))).permute(
0, 2, 3, 1
)
class BiMpmMatching(nn.Module, FromParams):
"""
This `Module` implements the matching layer of BiMPM model described in [Bilateral
Multi-Perspective Matching for Natural Language Sentences](https://arxiv.org/abs/1702.03814)
by Zhiguo Wang et al., 2017.
Also please refer to the [TensorFlow implementation](https://github.com/zhiguowang/BiMPM/) and
[PyTorch implementation](https://github.com/galsang/BIMPM-pytorch).
# Parameters
hidden_dim : `int`, optional (default = `100`)
The hidden dimension of the representations
num_perspectives : `int`, optional (default = `20`)
The number of perspectives for matching
share_weights_between_directions : `bool`, optional (default = `True`)
If True, share weight between matching from sentence1 to sentence2 and from sentence2
to sentence1, useful for non-symmetric tasks
is_forward : `bool`, optional (default = `None`)
Whether the matching is for forward sequence or backward sequence, useful in finding last
token in full matching. It can not be None if with_full_match is True.
with_full_match : `bool`, optional (default = `True`)
If True, include full match
with_maxpool_match : `bool`, optional (default = `True`)
If True, include max pool match
with_attentive_match : `bool`, optional (default = `True`)
If True, include attentive match
with_max_attentive_match : `bool`, optional (default = `True`)
If True, include max attentive match
"""
def __init__(
self,
hidden_dim: int = 100,
num_perspectives: int = 20,
share_weights_between_directions: bool = True,
is_forward: bool = None,
with_full_match: bool = True,
with_maxpool_match: bool = True,
with_attentive_match: bool = True,
with_max_attentive_match: bool = True,
) -> None:
super().__init__()
self.hidden_dim = hidden_dim
self.num_perspectives = num_perspectives
self.is_forward = is_forward
self.with_full_match = with_full_match
self.with_maxpool_match = with_maxpool_match
self.with_attentive_match = with_attentive_match
self.with_max_attentive_match = with_max_attentive_match
if not (
with_full_match
or with_maxpool_match
or with_attentive_match
or with_max_attentive_match
):
raise ConfigurationError("At least one of the matching method should be enabled")
def create_parameter(): # utility function to create and initialize a parameter
param = nn.Parameter(torch.zeros(num_perspectives, hidden_dim))
torch.nn.init.kaiming_normal_(param)
return param
def share_or_create(weights_to_share): # utility function to create or share the weights
return weights_to_share if share_weights_between_directions else create_parameter()
output_dim = (
2 # used to calculate total output dimension, 2 is for cosine max and cosine min
)
if with_full_match:
if is_forward is None:
raise ConfigurationError("Must specify is_forward to enable full matching")
self.full_match_weights = create_parameter()
self.full_match_weights_reversed = share_or_create(self.full_match_weights)
output_dim += num_perspectives + 1
if with_maxpool_match:
self.maxpool_match_weights = create_parameter()
output_dim += num_perspectives * 2
if with_attentive_match:
self.attentive_match_weights = create_parameter()
self.attentive_match_weights_reversed = share_or_create(self.attentive_match_weights)
output_dim += num_perspectives + 1
if with_max_attentive_match:
self.max_attentive_match_weights = create_parameter()
self.max_attentive_match_weights_reversed = share_or_create(
self.max_attentive_match_weights
)
output_dim += num_perspectives + 1
self.output_dim = output_dim
def get_output_dim(self) -> int:
return self.output_dim
def forward(
self,
context_1: torch.Tensor,
mask_1: torch.BoolTensor,
context_2: torch.Tensor,
mask_2: torch.BoolTensor,
) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
"""
Given the forward (or backward) representations of sentence1 and sentence2, apply four bilateral
matching functions between them in one direction.
# Parameters
context_1 : `torch.Tensor`
Tensor of shape (batch_size, seq_len1, hidden_dim) representing the encoding of the first sentence.
mask_1 : `torch.BoolTensor`
Boolean Tensor of shape (batch_size, seq_len1), indicating which
positions in the first sentence are padding (0) and which are not (1).
context_2 : `torch.Tensor`
Tensor of shape (batch_size, seq_len2, hidden_dim) representing the encoding of the second sentence.
mask_2 : `torch.BoolTensor`
Boolean Tensor of shape (batch_size, seq_len2), indicating which
positions in the second sentence are padding (0) and which are not (1).
# Returns
`Tuple[List[torch.Tensor], List[torch.Tensor]]` :
A tuple of matching vectors for the two sentences. Each of which is a list of
matching vectors of shape (batch, seq_len, num_perspectives or 1)
"""
assert (not mask_2.requires_grad) and (not mask_1.requires_grad)
assert context_1.size(-1) == context_2.size(-1) == self.hidden_dim
# (batch,)
len_1 = get_lengths_from_binary_sequence_mask(mask_1)
len_2 = get_lengths_from_binary_sequence_mask(mask_2)
# explicitly set masked weights to zero
# (batch_size, seq_len*, hidden_dim)
context_1 = context_1 * mask_1.unsqueeze(-1)
context_2 = context_2 * mask_2.unsqueeze(-1)
# array to keep the matching vectors for the two sentences
matching_vector_1: List[torch.Tensor] = []
matching_vector_2: List[torch.Tensor] = []
# Step 0. unweighted cosine
# First calculate the cosine similarities between each forward
# (or backward) contextual embedding and every forward (or backward)
# contextual embedding of the other sentence.
# (batch, seq_len1, seq_len2)
cosine_sim = F.cosine_similarity(context_1.unsqueeze(-2), context_2.unsqueeze(-3), dim=3)
# (batch, seq_len*, 1)
cosine_max_1 = masked_max(cosine_sim, mask_2.unsqueeze(-2), dim=2, keepdim=True)
cosine_mean_1 = masked_mean(cosine_sim, mask_2.unsqueeze(-2), dim=2, keepdim=True)
cosine_max_2 = masked_max(
cosine_sim.permute(0, 2, 1), mask_1.unsqueeze(-2), dim=2, keepdim=True
)
cosine_mean_2 = masked_mean(
cosine_sim.permute(0, 2, 1), mask_1.unsqueeze(-2), dim=2, keepdim=True
)
matching_vector_1.extend([cosine_max_1, cosine_mean_1])
matching_vector_2.extend([cosine_max_2, cosine_mean_2])
# Step 1. Full-Matching
# Each time step of forward (or backward) contextual embedding of one sentence
# is compared with the last time step of the forward (or backward)
# contextual embedding of the other sentence
if self.with_full_match:
# (batch, 1, hidden_dim)
if self.is_forward:
# (batch, 1, hidden_dim)
last_position_1 = (len_1 - 1).clamp(min=0)
last_position_1 = last_position_1.view(-1, 1, 1).expand(-1, 1, self.hidden_dim)
last_position_2 = (len_2 - 1).clamp(min=0)
last_position_2 = last_position_2.view(-1, 1, 1).expand(-1, 1, self.hidden_dim)
context_1_last = context_1.gather(1, last_position_1)
context_2_last = context_2.gather(1, last_position_2)
else:
context_1_last = context_1[:, 0:1, :]
context_2_last = context_2[:, 0:1, :]
# (batch, seq_len*, num_perspectives)
matching_vector_1_full = multi_perspective_match(
context_1, context_2_last, self.full_match_weights
)
matching_vector_2_full = multi_perspective_match(
context_2, context_1_last, self.full_match_weights_reversed
)
matching_vector_1.extend(matching_vector_1_full)
matching_vector_2.extend(matching_vector_2_full)
# Step 2. Maxpooling-Matching
# Each time step of forward (or backward) contextual embedding of one sentence
# is compared with every time step of the forward (or backward)
# contextual embedding of the other sentence, and only the max value of each
# dimension is retained.
if self.with_maxpool_match:
# (batch, seq_len1, seq_len2, num_perspectives)
matching_vector_max = multi_perspective_match_pairwise(
context_1, context_2, self.maxpool_match_weights
)
# (batch, seq_len*, num_perspectives)
matching_vector_1_max = masked_max(
matching_vector_max, mask_2.unsqueeze(-2).unsqueeze(-1), dim=2
)
matching_vector_1_mean = masked_mean(
matching_vector_max, mask_2.unsqueeze(-2).unsqueeze(-1), dim=2
)
matching_vector_2_max = masked_max(
matching_vector_max.permute(0, 2, 1, 3), mask_1.unsqueeze(-2).unsqueeze(-1), dim=2
)
matching_vector_2_mean = masked_mean(
matching_vector_max.permute(0, 2, 1, 3), mask_1.unsqueeze(-2).unsqueeze(-1), dim=2
)
matching_vector_1.extend([matching_vector_1_max, matching_vector_1_mean])
matching_vector_2.extend([matching_vector_2_max, matching_vector_2_mean])
# Step 3. Attentive-Matching
# Each forward (or backward) similarity is taken as the weight
# of the forward (or backward) contextual embedding, and calculate an
# attentive vector for the sentence by weighted summing all its
# contextual embeddings.
# Finally match each forward (or backward) contextual embedding
# with its corresponding attentive vector.
# (batch, seq_len1, seq_len2, hidden_dim)
att_2 = context_2.unsqueeze(-3) * cosine_sim.unsqueeze(-1)
# (batch, seq_len1, seq_len2, hidden_dim)
att_1 = context_1.unsqueeze(-2) * cosine_sim.unsqueeze(-1)
if self.with_attentive_match:
# (batch, seq_len*, hidden_dim)
att_mean_2 = masked_softmax(att_2.sum(dim=2), mask_1.unsqueeze(-1))
att_mean_1 = masked_softmax(att_1.sum(dim=1), mask_2.unsqueeze(-1))
# (batch, seq_len*, num_perspectives)
matching_vector_1_att_mean = multi_perspective_match(
context_1, att_mean_2, self.attentive_match_weights
)
matching_vector_2_att_mean = multi_perspective_match(
context_2, att_mean_1, self.attentive_match_weights_reversed
)
matching_vector_1.extend(matching_vector_1_att_mean)
matching_vector_2.extend(matching_vector_2_att_mean)
# Step 4. Max-Attentive-Matching
# Pick the contextual embeddings with the highest cosine similarity as the attentive
# vector, and match each forward (or backward) contextual embedding with its
# corresponding attentive vector.
if self.with_max_attentive_match:
# (batch, seq_len*, hidden_dim)
att_max_2 = masked_max(att_2, mask_2.unsqueeze(-2).unsqueeze(-1), dim=2)
att_max_1 = masked_max(
att_1.permute(0, 2, 1, 3), mask_1.unsqueeze(-2).unsqueeze(-1), dim=2
)
# (batch, seq_len*, num_perspectives)
matching_vector_1_att_max = multi_perspective_match(
context_1, att_max_2, self.max_attentive_match_weights
)
matching_vector_2_att_max = multi_perspective_match(
context_2, att_max_1, self.max_attentive_match_weights_reversed
)
matching_vector_1.extend(matching_vector_1_att_max)
matching_vector_2.extend(matching_vector_2_att_max)
return matching_vector_1, matching_vector_2
| allennlp-master | allennlp/modules/bimpm_matching.py |
"""
Conditional random field
"""
from typing import List, Tuple, Dict, Union
import torch
from allennlp.common.checks import ConfigurationError
import allennlp.nn.util as util
VITERBI_DECODING = Tuple[List[int], float] # a list of tags, and a viterbi score
def allowed_transitions(constraint_type: str, labels: Dict[int, str]) -> List[Tuple[int, int]]:
"""
Given labels and a constraint type, returns the allowed transitions. It will
additionally include transitions for the start and end states, which are used
by the conditional random field.
# Parameters
constraint_type : `str`, required
Indicates which constraint to apply. Current choices are
"BIO", "IOB1", "BIOUL", and "BMES".
labels : `Dict[int, str]`, required
A mapping {label_id -> label}. Most commonly this would be the value from
Vocabulary.get_index_to_token_vocabulary()
# Returns
`List[Tuple[int, int]]`
The allowed transitions (from_label_id, to_label_id).
"""
num_labels = len(labels)
start_tag = num_labels
end_tag = num_labels + 1
labels_with_boundaries = list(labels.items()) + [(start_tag, "START"), (end_tag, "END")]
allowed = []
for from_label_index, from_label in labels_with_boundaries:
if from_label in ("START", "END"):
from_tag = from_label
from_entity = ""
else:
from_tag = from_label[0]
from_entity = from_label[1:]
for to_label_index, to_label in labels_with_boundaries:
if to_label in ("START", "END"):
to_tag = to_label
to_entity = ""
else:
to_tag = to_label[0]
to_entity = to_label[1:]
if is_transition_allowed(constraint_type, from_tag, from_entity, to_tag, to_entity):
allowed.append((from_label_index, to_label_index))
return allowed
def is_transition_allowed(
constraint_type: str, from_tag: str, from_entity: str, to_tag: str, to_entity: str
):
"""
Given a constraint type and strings `from_tag` and `to_tag` that
represent the origin and destination of the transition, return whether
the transition is allowed under the given constraint type.
# Parameters
constraint_type : `str`, required
Indicates which constraint to apply. Current choices are
"BIO", "IOB1", "BIOUL", and "BMES".
from_tag : `str`, required
The tag that the transition originates from. For example, if the
label is `I-PER`, the `from_tag` is `I`.
from_entity : `str`, required
The entity corresponding to the `from_tag`. For example, if the
label is `I-PER`, the `from_entity` is `PER`.
to_tag : `str`, required
The tag that the transition leads to. For example, if the
label is `I-PER`, the `to_tag` is `I`.
to_entity : `str`, required
The entity corresponding to the `to_tag`. For example, if the
label is `I-PER`, the `to_entity` is `PER`.
# Returns
`bool`
Whether the transition is allowed under the given `constraint_type`.
"""
if to_tag == "START" or from_tag == "END":
# Cannot transition into START or from END
return False
if constraint_type == "BIOUL":
if from_tag == "START":
return to_tag in ("O", "B", "U")
if to_tag == "END":
return from_tag in ("O", "L", "U")
return any(
[
# O can transition to O, B-* or U-*
# L-x can transition to O, B-*, or U-*
# U-x can transition to O, B-*, or U-*
from_tag in ("O", "L", "U") and to_tag in ("O", "B", "U"),
# B-x can only transition to I-x or L-x
# I-x can only transition to I-x or L-x
from_tag in ("B", "I") and to_tag in ("I", "L") and from_entity == to_entity,
]
)
elif constraint_type == "BIO":
if from_tag == "START":
return to_tag in ("O", "B")
if to_tag == "END":
return from_tag in ("O", "B", "I")
return any(
[
# Can always transition to O or B-x
to_tag in ("O", "B"),
# Can only transition to I-x from B-x or I-x
to_tag == "I" and from_tag in ("B", "I") and from_entity == to_entity,
]
)
elif constraint_type == "IOB1":
if from_tag == "START":
return to_tag in ("O", "I")
if to_tag == "END":
return from_tag in ("O", "B", "I")
return any(
[
# Can always transition to O or I-x
to_tag in ("O", "I"),
# Can only transition to B-x from B-x or I-x, where
# x is the same tag.
to_tag == "B" and from_tag in ("B", "I") and from_entity == to_entity,
]
)
elif constraint_type == "BMES":
if from_tag == "START":
return to_tag in ("B", "S")
if to_tag == "END":
return from_tag in ("E", "S")
return any(
[
# Can only transition to B or S from E or S.
to_tag in ("B", "S") and from_tag in ("E", "S"),
# Can only transition to M-x from B-x, where
# x is the same tag.
to_tag == "M" and from_tag in ("B", "M") and from_entity == to_entity,
# Can only transition to E-x from B-x or M-x, where
# x is the same tag.
to_tag == "E" and from_tag in ("B", "M") and from_entity == to_entity,
]
)
else:
raise ConfigurationError(f"Unknown constraint type: {constraint_type}")
class ConditionalRandomField(torch.nn.Module):
"""
This module uses the "forward-backward" algorithm to compute
the log-likelihood of its inputs assuming a conditional random field model.
See, e.g. http://www.cs.columbia.edu/~mcollins/fb.pdf
# Parameters
num_tags : `int`, required
The number of tags.
constraints : `List[Tuple[int, int]]`, optional (default = `None`)
An optional list of allowed transitions (from_tag_id, to_tag_id).
These are applied to `viterbi_tags()` but do not affect `forward()`.
These should be derived from `allowed_transitions` so that the
start and end transitions are handled correctly for your tag type.
include_start_end_transitions : `bool`, optional (default = `True`)
Whether to include the start and end transition parameters.
"""
def __init__(
self,
num_tags: int,
constraints: List[Tuple[int, int]] = None,
include_start_end_transitions: bool = True,
) -> None:
super().__init__()
self.num_tags = num_tags
# transitions[i, j] is the logit for transitioning from state i to state j.
self.transitions = torch.nn.Parameter(torch.Tensor(num_tags, num_tags))
# _constraint_mask indicates valid transitions (based on supplied constraints).
# Include special start of sequence (num_tags + 1) and end of sequence tags (num_tags + 2)
if constraints is None:
# All transitions are valid.
constraint_mask = torch.Tensor(num_tags + 2, num_tags + 2).fill_(1.0)
else:
constraint_mask = torch.Tensor(num_tags + 2, num_tags + 2).fill_(0.0)
for i, j in constraints:
constraint_mask[i, j] = 1.0
self._constraint_mask = torch.nn.Parameter(constraint_mask, requires_grad=False)
# Also need logits for transitioning from "start" state and to "end" state.
self.include_start_end_transitions = include_start_end_transitions
if include_start_end_transitions:
self.start_transitions = torch.nn.Parameter(torch.Tensor(num_tags))
self.end_transitions = torch.nn.Parameter(torch.Tensor(num_tags))
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_normal_(self.transitions)
if self.include_start_end_transitions:
torch.nn.init.normal_(self.start_transitions)
torch.nn.init.normal_(self.end_transitions)
def _input_likelihood(self, logits: torch.Tensor, mask: torch.BoolTensor) -> torch.Tensor:
"""
Computes the (batch_size,) denominator term for the log-likelihood, which is the
sum of the likelihoods across all possible state sequences.
"""
batch_size, sequence_length, num_tags = logits.size()
# Transpose batch size and sequence dimensions
mask = mask.transpose(0, 1).contiguous()
logits = logits.transpose(0, 1).contiguous()
# Initial alpha is the (batch_size, num_tags) tensor of likelihoods combining the
# transitions to the initial states and the logits for the first timestep.
if self.include_start_end_transitions:
alpha = self.start_transitions.view(1, num_tags) + logits[0]
else:
alpha = logits[0]
# For each i we compute logits for the transitions from timestep i-1 to timestep i.
# We do so in a (batch_size, num_tags, num_tags) tensor where the axes are
# (instance, current_tag, next_tag)
for i in range(1, sequence_length):
# The emit scores are for time i ("next_tag") so we broadcast along the current_tag axis.
emit_scores = logits[i].view(batch_size, 1, num_tags)
# Transition scores are (current_tag, next_tag) so we broadcast along the instance axis.
transition_scores = self.transitions.view(1, num_tags, num_tags)
# Alpha is for the current_tag, so we broadcast along the next_tag axis.
broadcast_alpha = alpha.view(batch_size, num_tags, 1)
# Add all the scores together and logexp over the current_tag axis.
inner = broadcast_alpha + emit_scores + transition_scores
# In valid positions (mask == True) we want to take the logsumexp over the current_tag dimension
# of `inner`. Otherwise (mask == False) we want to retain the previous alpha.
alpha = util.logsumexp(inner, 1) * mask[i].view(batch_size, 1) + alpha * (
~mask[i]
).view(batch_size, 1)
# Every sequence needs to end with a transition to the stop_tag.
if self.include_start_end_transitions:
stops = alpha + self.end_transitions.view(1, num_tags)
else:
stops = alpha
# Finally we log_sum_exp along the num_tags dim, result is (batch_size,)
return util.logsumexp(stops)
def _joint_likelihood(
self, logits: torch.Tensor, tags: torch.Tensor, mask: torch.BoolTensor
) -> torch.Tensor:
"""
Computes the numerator term for the log-likelihood, which is just score(inputs, tags)
"""
batch_size, sequence_length, _ = logits.data.shape
# Transpose batch size and sequence dimensions:
logits = logits.transpose(0, 1).contiguous()
mask = mask.transpose(0, 1).contiguous()
tags = tags.transpose(0, 1).contiguous()
# Start with the transition scores from start_tag to the first tag in each input
if self.include_start_end_transitions:
score = self.start_transitions.index_select(0, tags[0])
else:
score = 0.0
# Add up the scores for the observed transitions and all the inputs but the last
for i in range(sequence_length - 1):
# Each is shape (batch_size,)
current_tag, next_tag = tags[i], tags[i + 1]
# The scores for transitioning from current_tag to next_tag
transition_score = self.transitions[current_tag.view(-1), next_tag.view(-1)]
# The score for using current_tag
emit_score = logits[i].gather(1, current_tag.view(batch_size, 1)).squeeze(1)
# Include transition score if next element is unmasked,
# input_score if this element is unmasked.
score = score + transition_score * mask[i + 1] + emit_score * mask[i]
# Transition from last state to "stop" state. To start with, we need to find the last tag
# for each instance.
last_tag_index = mask.sum(0).long() - 1
last_tags = tags.gather(0, last_tag_index.view(1, batch_size)).squeeze(0)
# Compute score of transitioning to `stop_tag` from each "last tag".
if self.include_start_end_transitions:
last_transition_score = self.end_transitions.index_select(0, last_tags)
else:
last_transition_score = 0.0
# Add the last input if it's not masked.
last_inputs = logits[-1] # (batch_size, num_tags)
last_input_score = last_inputs.gather(1, last_tags.view(-1, 1)) # (batch_size, 1)
last_input_score = last_input_score.squeeze() # (batch_size,)
score = score + last_transition_score + last_input_score * mask[-1]
return score
def forward(
self, inputs: torch.Tensor, tags: torch.Tensor, mask: torch.BoolTensor = None
) -> torch.Tensor:
"""
Computes the log likelihood.
"""
if mask is None:
mask = torch.ones(*tags.size(), dtype=torch.bool)
else:
# The code below fails in weird ways if this isn't a bool tensor, so we make sure.
mask = mask.to(torch.bool)
log_denominator = self._input_likelihood(inputs, mask)
log_numerator = self._joint_likelihood(inputs, tags, mask)
return torch.sum(log_numerator - log_denominator)
def viterbi_tags(
self, logits: torch.Tensor, mask: torch.BoolTensor = None, top_k: int = None
) -> Union[List[VITERBI_DECODING], List[List[VITERBI_DECODING]]]:
"""
Uses viterbi algorithm to find most likely tags for the given inputs.
If constraints are applied, disallows all other transitions.
Returns a list of results, of the same size as the batch (one result per batch member)
Each result is a List of length top_k, containing the top K viterbi decodings
Each decoding is a tuple (tag_sequence, viterbi_score)
For backwards compatibility, if top_k is None, then instead returns a flat list of
tag sequences (the top tag sequence for each batch item).
"""
if mask is None:
mask = torch.ones(*logits.shape[:2], dtype=torch.bool, device=logits.device)
if top_k is None:
top_k = 1
flatten_output = True
else:
flatten_output = False
_, max_seq_length, num_tags = logits.size()
# Get the tensors out of the variables
logits, mask = logits.data, mask.data
# Augment transitions matrix with start and end transitions
start_tag = num_tags
end_tag = num_tags + 1
transitions = torch.Tensor(num_tags + 2, num_tags + 2).fill_(-10000.0)
# Apply transition constraints
constrained_transitions = self.transitions * self._constraint_mask[
:num_tags, :num_tags
] + -10000.0 * (1 - self._constraint_mask[:num_tags, :num_tags])
transitions[:num_tags, :num_tags] = constrained_transitions.data
if self.include_start_end_transitions:
transitions[
start_tag, :num_tags
] = self.start_transitions.detach() * self._constraint_mask[
start_tag, :num_tags
].data + -10000.0 * (
1 - self._constraint_mask[start_tag, :num_tags].detach()
)
transitions[:num_tags, end_tag] = self.end_transitions.detach() * self._constraint_mask[
:num_tags, end_tag
].data + -10000.0 * (1 - self._constraint_mask[:num_tags, end_tag].detach())
else:
transitions[start_tag, :num_tags] = -10000.0 * (
1 - self._constraint_mask[start_tag, :num_tags].detach()
)
transitions[:num_tags, end_tag] = -10000.0 * (
1 - self._constraint_mask[:num_tags, end_tag].detach()
)
best_paths = []
# Pad the max sequence length by 2 to account for start_tag + end_tag.
tag_sequence = torch.Tensor(max_seq_length + 2, num_tags + 2)
for prediction, prediction_mask in zip(logits, mask):
mask_indices = prediction_mask.nonzero(as_tuple=False).squeeze()
masked_prediction = torch.index_select(prediction, 0, mask_indices)
sequence_length = masked_prediction.shape[0]
# Start with everything totally unlikely
tag_sequence.fill_(-10000.0)
# At timestep 0 we must have the START_TAG
tag_sequence[0, start_tag] = 0.0
# At steps 1, ..., sequence_length we just use the incoming prediction
tag_sequence[1 : (sequence_length + 1), :num_tags] = masked_prediction
# And at the last timestep we must have the END_TAG
tag_sequence[sequence_length + 1, end_tag] = 0.0
# We pass the tags and the transitions to `viterbi_decode`.
viterbi_paths, viterbi_scores = util.viterbi_decode(
tag_sequence=tag_sequence[: (sequence_length + 2)],
transition_matrix=transitions,
top_k=top_k,
)
top_k_paths = []
for viterbi_path, viterbi_score in zip(viterbi_paths, viterbi_scores):
# Get rid of START and END sentinels and append.
viterbi_path = viterbi_path[1:-1]
top_k_paths.append((viterbi_path, viterbi_score.item()))
best_paths.append(top_k_paths)
if flatten_output:
return [top_k_paths[0] for top_k_paths in best_paths]
return best_paths
| allennlp-master | allennlp/modules/conditional_random_field.py |
from typing import Optional, Tuple, List
import torch
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from allennlp.modules.augmented_lstm import AugmentedLstm
from allennlp.modules.input_variational_dropout import InputVariationalDropout
from allennlp.common.checks import ConfigurationError
TensorPair = Tuple[torch.Tensor, torch.Tensor]
class StackedBidirectionalLstm(torch.nn.Module):
"""
A standard stacked Bidirectional LSTM where the LSTM layers
are concatenated between each layer. The only difference between
this and a regular bidirectional LSTM is the application of
variational dropout to the hidden states and outputs of each layer apart
from the last layer of the LSTM. Note that this will be slower, as it
doesn't use CUDNN.
[0]: https://arxiv.org/abs/1512.05287
# Parameters
input_size : `int`, required
The dimension of the inputs to the LSTM.
hidden_size : `int`, required
The dimension of the outputs of the LSTM.
num_layers : `int`, required
The number of stacked Bidirectional LSTMs to use.
recurrent_dropout_probability : `float`, optional (default = `0.0`)
The recurrent dropout probability to be used in a dropout scheme as
stated in [A Theoretically Grounded Application of Dropout in Recurrent
Neural Networks][0].
layer_dropout_probability : `float`, optional (default = `0.0`)
The layer wise dropout probability to be used in a dropout scheme as
stated in [A Theoretically Grounded Application of Dropout in Recurrent
Neural Networks][0].
use_highway : `bool`, optional (default = `True`)
Whether or not to use highway connections between layers. This effectively involves
reparameterising the normal output of an LSTM as::
gate = sigmoid(W_x1 * x_t + W_h * h_t)
output = gate * h_t + (1 - gate) * (W_x2 * x_t)
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int,
recurrent_dropout_probability: float = 0.0,
layer_dropout_probability: float = 0.0,
use_highway: bool = True,
) -> None:
super().__init__()
# Required to be wrapped with a `PytorchSeq2SeqWrapper`.
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bidirectional = True
layers = []
lstm_input_size = input_size
for layer_index in range(num_layers):
forward_layer = AugmentedLstm(
lstm_input_size,
hidden_size,
go_forward=True,
recurrent_dropout_probability=recurrent_dropout_probability,
use_highway=use_highway,
use_input_projection_bias=False,
)
backward_layer = AugmentedLstm(
lstm_input_size,
hidden_size,
go_forward=False,
recurrent_dropout_probability=recurrent_dropout_probability,
use_highway=use_highway,
use_input_projection_bias=False,
)
lstm_input_size = hidden_size * 2
self.add_module("forward_layer_{}".format(layer_index), forward_layer)
self.add_module("backward_layer_{}".format(layer_index), backward_layer)
layers.append([forward_layer, backward_layer])
self.lstm_layers = layers
self.layer_dropout = InputVariationalDropout(layer_dropout_probability)
def forward(
self, inputs: PackedSequence, initial_state: Optional[TensorPair] = None
) -> Tuple[PackedSequence, TensorPair]:
"""
# Parameters
inputs : `PackedSequence`, required.
A batch first `PackedSequence` to run the stacked LSTM over.
initial_state : `Tuple[torch.Tensor, torch.Tensor]`, optional, (default = `None`)
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM. Each tensor has shape (num_layers, batch_size, output_dimension * 2).
# Returns
output_sequence : `PackedSequence`
The encoded sequence of shape (batch_size, sequence_length, hidden_size * 2)
final_states: `torch.Tensor`
The per-layer final (state, memory) states of the LSTM, each with shape
(num_layers * 2, batch_size, hidden_size * 2).
"""
if initial_state is None:
hidden_states: List[Optional[TensorPair]] = [None] * len(self.lstm_layers)
elif initial_state[0].size()[0] != len(self.lstm_layers):
raise ConfigurationError(
"Initial states were passed to forward() but the number of "
"initial states does not match the number of layers."
)
else:
hidden_states = list(zip(initial_state[0].split(1, 0), initial_state[1].split(1, 0)))
output_sequence = inputs
final_h = []
final_c = []
for i, state in enumerate(hidden_states):
forward_layer = getattr(self, "forward_layer_{}".format(i))
backward_layer = getattr(self, "backward_layer_{}".format(i))
# The state is duplicated to mirror the Pytorch API for LSTMs.
forward_output, final_forward_state = forward_layer(output_sequence, state)
backward_output, final_backward_state = backward_layer(output_sequence, state)
forward_output, lengths = pad_packed_sequence(forward_output, batch_first=True)
backward_output, _ = pad_packed_sequence(backward_output, batch_first=True)
output_sequence = torch.cat([forward_output, backward_output], -1)
# Apply layer wise dropout on each output sequence apart from the
# first (input) and last
if i < (self.num_layers - 1):
output_sequence = self.layer_dropout(output_sequence)
output_sequence = pack_padded_sequence(output_sequence, lengths, batch_first=True)
final_h.extend([final_forward_state[0], final_backward_state[0]])
final_c.extend([final_forward_state[1], final_backward_state[1]])
final_h = torch.cat(final_h, dim=0)
final_c = torch.cat(final_c, dim=0)
final_state_tuple = (final_h, final_c)
return output_sequence, final_state_tuple
| allennlp-master | allennlp/modules/stacked_bidirectional_lstm.py |
"""
A [Highway layer](https://arxiv.org/abs/1505.00387) that does a gated combination of a linear
transformation and a non-linear transformation of its input.
"""
from typing import Callable
import torch
from overrides import overrides
class Highway(torch.nn.Module):
"""
A [Highway layer](https://arxiv.org/abs/1505.00387) does a gated combination of a linear
transformation and a non-linear transformation of its input. :math:`y = g * x + (1 - g) *
f(A(x))`, where :math:`A` is a linear transformation, :math:`f` is an element-wise
non-linearity, and :math:`g` is an element-wise gate, computed as :math:`sigmoid(B(x))`.
This module will apply a fixed number of highway layers to its input, returning the final
result.
# Parameters
input_dim : `int`, required
The dimensionality of :math:`x`. We assume the input has shape `(batch_size, ...,
input_dim)`.
num_layers : `int`, optional (default=`1`)
The number of highway layers to apply to the input.
activation : `Callable[[torch.Tensor], torch.Tensor]`, optional (default=`torch.nn.functional.relu`)
The non-linearity to use in the highway layers.
"""
def __init__(
self,
input_dim: int,
num_layers: int = 1,
activation: Callable[[torch.Tensor], torch.Tensor] = torch.nn.functional.relu,
) -> None:
super().__init__()
self._input_dim = input_dim
self._layers = torch.nn.ModuleList(
[torch.nn.Linear(input_dim, input_dim * 2) for _ in range(num_layers)]
)
self._activation = activation
for layer in self._layers:
# We should bias the highway layer to just carry its input forward. We do that by
# setting the bias on `B(x)` to be positive, because that means `g` will be biased to
# be high, so we will carry the input forward. The bias on `B(x)` is the second half
# of the bias vector in each Linear layer.
layer.bias[input_dim:].data.fill_(1)
@overrides
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
current_input = inputs
for layer in self._layers:
projected_input = layer(current_input)
linear_part = current_input
# NOTE: if you modify this, think about whether you should modify the initialization
# above, too.
nonlinear_part, gate = projected_input.chunk(2, dim=-1)
nonlinear_part = self._activation(nonlinear_part)
gate = torch.sigmoid(gate)
current_input = gate * linear_part + (1 - gate) * nonlinear_part
return current_input
| allennlp-master | allennlp/modules/highway.py |
import torch
class InputVariationalDropout(torch.nn.Dropout):
"""
Apply the dropout technique in Gal and Ghahramani, [Dropout as a Bayesian Approximation:
Representing Model Uncertainty in Deep Learning](https://arxiv.org/abs/1506.02142) to a
3D tensor.
This module accepts a 3D tensor of shape `(batch_size, num_timesteps, embedding_dim)`
and samples a single dropout mask of shape `(batch_size, embedding_dim)` and applies
it to every time step.
"""
def forward(self, input_tensor):
"""
Apply dropout to input tensor.
# Parameters
input_tensor : `torch.FloatTensor`
A tensor of shape `(batch_size, num_timesteps, embedding_dim)`
# Returns
output : `torch.FloatTensor`
A tensor of shape `(batch_size, num_timesteps, embedding_dim)` with dropout applied.
"""
ones = input_tensor.data.new_ones(input_tensor.shape[0], input_tensor.shape[-1])
dropout_mask = torch.nn.functional.dropout(ones, self.p, self.training, inplace=False)
if self.inplace:
input_tensor *= dropout_mask.unsqueeze(1)
return None
else:
return dropout_mask.unsqueeze(1) * input_tensor
| allennlp-master | allennlp/modules/input_variational_dropout.py |
import torch
from allennlp.nn import util
class MaskedLayerNorm(torch.nn.Module):
"""
See LayerNorm for details.
Note, however, that unlike LayerNorm this norm includes a batch component.
"""
def __init__(self, size: int, gamma0: float = 0.1) -> None:
super().__init__()
self.gamma = torch.nn.Parameter(torch.ones(1, 1, size) * gamma0)
self.beta = torch.nn.Parameter(torch.zeros(1, 1, size))
self.size = size
def forward(self, tensor: torch.Tensor, mask: torch.BoolTensor) -> torch.Tensor:
broadcast_mask = mask.unsqueeze(-1)
num_elements = broadcast_mask.sum() * self.size
mean = (tensor * broadcast_mask).sum() / num_elements
masked_centered = (tensor - mean) * broadcast_mask
std = torch.sqrt(
(masked_centered * masked_centered).sum() / num_elements
+ util.tiny_value_of_dtype(tensor.dtype)
)
return (
self.gamma * (tensor - mean) / (std + util.tiny_value_of_dtype(tensor.dtype))
+ self.beta
)
| allennlp-master | allennlp/modules/masked_layer_norm.py |
"""
A stacked bidirectional LSTM with skip connections between layers.
"""
import warnings
from typing import List, Optional, Tuple, Any
import numpy
import torch
from torch.nn.utils.rnn import PackedSequence, pad_packed_sequence
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path
from allennlp.modules.encoder_base import _EncoderBase
from allennlp.modules.lstm_cell_with_projection import LstmCellWithProjection
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import h5py
class ElmoLstm(_EncoderBase):
"""
A stacked, bidirectional LSTM which uses
[`LstmCellWithProjection`'s](./lstm_cell_with_projection.md)
with highway layers between the inputs to layers.
The inputs to the forward and backward directions are independent - forward and backward
states are not concatenated between layers.
Additionally, this LSTM maintains its `own` state, which is updated every time
`forward` is called. It is dynamically resized for different batch sizes and is
designed for use with non-continuous inputs (i.e inputs which aren't formatted as a stream,
such as text used for a language modeling task, which is how stateful RNNs are typically used).
This is non-standard, but can be thought of as having an "end of sentence" state, which is
carried across different sentences.
[0]: https://arxiv.org/abs/1512.05287
# Parameters
input_size : `int`, required
The dimension of the inputs to the LSTM.
hidden_size : `int`, required
The dimension of the outputs of the LSTM.
cell_size : `int`, required.
The dimension of the memory cell of the `LstmCellWithProjection`.
num_layers : `int`, required
The number of bidirectional LSTMs to use.
requires_grad : `bool`, optional
If True, compute gradient of ELMo parameters for fine tuning.
recurrent_dropout_probability : `float`, optional (default = `0.0`)
The dropout probability to be used in a dropout scheme as stated in
[A Theoretically Grounded Application of Dropout in Recurrent Neural Networks][0].
state_projection_clip_value : `float`, optional, (default = `None`)
The magnitude with which to clip the hidden_state after projecting it.
memory_cell_clip_value : `float`, optional, (default = `None`)
The magnitude with which to clip the memory cell.
"""
def __init__(
self,
input_size: int,
hidden_size: int,
cell_size: int,
num_layers: int,
requires_grad: bool = False,
recurrent_dropout_probability: float = 0.0,
memory_cell_clip_value: Optional[float] = None,
state_projection_clip_value: Optional[float] = None,
) -> None:
super().__init__(stateful=True)
# Required to be wrapped with a `PytorchSeq2SeqWrapper`.
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.cell_size = cell_size
self.requires_grad = requires_grad
forward_layers = []
backward_layers = []
lstm_input_size = input_size
go_forward = True
for layer_index in range(num_layers):
forward_layer = LstmCellWithProjection(
lstm_input_size,
hidden_size,
cell_size,
go_forward,
recurrent_dropout_probability,
memory_cell_clip_value,
state_projection_clip_value,
)
backward_layer = LstmCellWithProjection(
lstm_input_size,
hidden_size,
cell_size,
not go_forward,
recurrent_dropout_probability,
memory_cell_clip_value,
state_projection_clip_value,
)
lstm_input_size = hidden_size
self.add_module("forward_layer_{}".format(layer_index), forward_layer)
self.add_module("backward_layer_{}".format(layer_index), backward_layer)
forward_layers.append(forward_layer)
backward_layers.append(backward_layer)
self.forward_layers = forward_layers
self.backward_layers = backward_layers
def forward(self, inputs: torch.Tensor, mask: torch.BoolTensor) -> torch.Tensor:
"""
# Parameters
inputs : `torch.Tensor`, required.
A Tensor of shape `(batch_size, sequence_length, hidden_size)`.
mask : `torch.BoolTensor`, required.
A binary mask of shape `(batch_size, sequence_length)` representing the
non-padded elements in each sequence in the batch.
# Returns
`torch.Tensor`
A `torch.Tensor` of shape (num_layers, batch_size, sequence_length, hidden_size),
where the num_layers dimension represents the LSTM output from that layer.
"""
batch_size, total_sequence_length = mask.size()
stacked_sequence_output, final_states, restoration_indices = self.sort_and_run_forward(
self._lstm_forward, inputs, mask
)
num_layers, num_valid, returned_timesteps, encoder_dim = stacked_sequence_output.size()
# Add back invalid rows which were removed in the call to sort_and_run_forward.
if num_valid < batch_size:
zeros = stacked_sequence_output.new_zeros(
num_layers, batch_size - num_valid, returned_timesteps, encoder_dim
)
stacked_sequence_output = torch.cat([stacked_sequence_output, zeros], 1)
# The states also need to have invalid rows added back.
new_states = []
for state in final_states:
state_dim = state.size(-1)
zeros = state.new_zeros(num_layers, batch_size - num_valid, state_dim)
new_states.append(torch.cat([state, zeros], 1))
final_states = new_states
# It's possible to need to pass sequences which are padded to longer than the
# max length of the sequence to a Seq2StackEncoder. However, packing and unpacking
# the sequences mean that the returned tensor won't include these dimensions, because
# the RNN did not need to process them. We add them back on in the form of zeros here.
sequence_length_difference = total_sequence_length - returned_timesteps
if sequence_length_difference > 0:
zeros = stacked_sequence_output.new_zeros(
num_layers,
batch_size,
sequence_length_difference,
stacked_sequence_output[0].size(-1),
)
stacked_sequence_output = torch.cat([stacked_sequence_output, zeros], 2)
self._update_states(final_states, restoration_indices)
# Restore the original indices and return the sequence.
# Has shape (num_layers, batch_size, sequence_length, hidden_size)
return stacked_sequence_output.index_select(1, restoration_indices)
def _lstm_forward(
self,
inputs: PackedSequence,
initial_state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""
# Parameters
inputs : `PackedSequence`, required.
A batch first `PackedSequence` to run the stacked LSTM over.
initial_state : `Tuple[torch.Tensor, torch.Tensor]`, optional, (default = `None`)
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM, with shape (num_layers, batch_size, 2 * hidden_size) and
(num_layers, batch_size, 2 * cell_size) respectively.
# Returns
output_sequence : `torch.FloatTensor`
The encoded sequence of shape (num_layers, batch_size, sequence_length, hidden_size)
final_states : `Tuple[torch.FloatTensor, torch.FloatTensor]`
The per-layer final (state, memory) states of the LSTM, with shape
(num_layers, batch_size, 2 * hidden_size) and (num_layers, batch_size, 2 * cell_size)
respectively. The last dimension is duplicated because it contains the state/memory
for both the forward and backward layers.
"""
if initial_state is None:
hidden_states: List[Optional[Tuple[torch.Tensor, torch.Tensor]]] = [None] * len(
self.forward_layers
)
elif initial_state[0].size()[0] != len(self.forward_layers):
raise ConfigurationError(
"Initial states were passed to forward() but the number of "
"initial states does not match the number of layers."
)
else:
hidden_states = list(zip(initial_state[0].split(1, 0), initial_state[1].split(1, 0)))
inputs, batch_lengths = pad_packed_sequence(inputs, batch_first=True)
forward_output_sequence = inputs
backward_output_sequence = inputs
final_states = []
sequence_outputs = []
for layer_index, state in enumerate(hidden_states):
forward_layer = getattr(self, "forward_layer_{}".format(layer_index))
backward_layer = getattr(self, "backward_layer_{}".format(layer_index))
forward_cache = forward_output_sequence
backward_cache = backward_output_sequence
forward_state: Optional[Tuple[Any, Any]] = None
backward_state: Optional[Tuple[Any, Any]] = None
if state is not None:
forward_hidden_state, backward_hidden_state = state[0].split(self.hidden_size, 2)
forward_memory_state, backward_memory_state = state[1].split(self.cell_size, 2)
forward_state = (forward_hidden_state, forward_memory_state)
backward_state = (backward_hidden_state, backward_memory_state)
forward_output_sequence, forward_state = forward_layer(
forward_output_sequence, batch_lengths, forward_state
)
backward_output_sequence, backward_state = backward_layer(
backward_output_sequence, batch_lengths, backward_state
)
# Skip connections, just adding the input to the output.
if layer_index != 0:
forward_output_sequence += forward_cache
backward_output_sequence += backward_cache
sequence_outputs.append(
torch.cat([forward_output_sequence, backward_output_sequence], -1)
)
# Append the state tuples in a list, so that we can return
# the final states for all the layers.
final_states.append(
(
torch.cat([forward_state[0], backward_state[0]], -1), # type: ignore
torch.cat([forward_state[1], backward_state[1]], -1), # type: ignore
)
)
stacked_sequence_outputs: torch.FloatTensor = torch.stack(sequence_outputs)
# Stack the hidden state and memory for each layer into 2 tensors of shape
# (num_layers, batch_size, hidden_size) and (num_layers, batch_size, cell_size)
# respectively.
final_hidden_states, final_memory_states = zip(*final_states)
final_state_tuple: Tuple[torch.FloatTensor, torch.FloatTensor] = (
torch.cat(final_hidden_states, 0),
torch.cat(final_memory_states, 0),
)
return stacked_sequence_outputs, final_state_tuple
def load_weights(self, weight_file: str) -> None:
"""
Load the pre-trained weights from the file.
"""
requires_grad = self.requires_grad
with h5py.File(cached_path(weight_file), "r") as fin:
for i_layer, lstms in enumerate(zip(self.forward_layers, self.backward_layers)):
for j_direction, lstm in enumerate(lstms):
# lstm is an instance of LSTMCellWithProjection
cell_size = lstm.cell_size
dataset = fin["RNN_%s" % j_direction]["RNN"]["MultiRNNCell"][
"Cell%s" % i_layer
]["LSTMCell"]
# tensorflow packs together both W and U matrices into one matrix,
# but pytorch maintains individual matrices. In addition, tensorflow
# packs the gates as input, memory, forget, output but pytorch
# uses input, forget, memory, output. So we need to modify the weights.
tf_weights = numpy.transpose(dataset["W_0"][...])
torch_weights = tf_weights.copy()
# split the W from U matrices
input_size = lstm.input_size
input_weights = torch_weights[:, :input_size]
recurrent_weights = torch_weights[:, input_size:]
tf_input_weights = tf_weights[:, :input_size]
tf_recurrent_weights = tf_weights[:, input_size:]
# handle the different gate order convention
for torch_w, tf_w in [
[input_weights, tf_input_weights],
[recurrent_weights, tf_recurrent_weights],
]:
torch_w[(1 * cell_size) : (2 * cell_size), :] = tf_w[
(2 * cell_size) : (3 * cell_size), :
]
torch_w[(2 * cell_size) : (3 * cell_size), :] = tf_w[
(1 * cell_size) : (2 * cell_size), :
]
lstm.input_linearity.weight.data.copy_(torch.FloatTensor(input_weights))
lstm.state_linearity.weight.data.copy_(torch.FloatTensor(recurrent_weights))
lstm.input_linearity.weight.requires_grad = requires_grad
lstm.state_linearity.weight.requires_grad = requires_grad
# the bias weights
tf_bias = dataset["B"][...]
# tensorflow adds 1.0 to forget gate bias instead of modifying the
# parameters...
tf_bias[(2 * cell_size) : (3 * cell_size)] += 1
torch_bias = tf_bias.copy()
torch_bias[(1 * cell_size) : (2 * cell_size)] = tf_bias[
(2 * cell_size) : (3 * cell_size)
]
torch_bias[(2 * cell_size) : (3 * cell_size)] = tf_bias[
(1 * cell_size) : (2 * cell_size)
]
lstm.state_linearity.bias.data.copy_(torch.FloatTensor(torch_bias))
lstm.state_linearity.bias.requires_grad = requires_grad
# the projection weights
proj_weights = numpy.transpose(dataset["W_P_0"][...])
lstm.state_projection.weight.data.copy_(torch.FloatTensor(proj_weights))
lstm.state_projection.weight.requires_grad = requires_grad
| allennlp-master | allennlp/modules/elmo_lstm.py |
"""
An LSTM with Recurrent Dropout, a hidden_state which is projected and
clipping on both the hidden state and the memory state of the LSTM.
"""
from typing import Optional, Tuple, List
import torch
from allennlp.nn.util import get_dropout_mask
from allennlp.nn.initializers import block_orthogonal
class LstmCellWithProjection(torch.nn.Module):
"""
An LSTM with Recurrent Dropout and a projected and clipped hidden state and
memory. Note: this implementation is slower than the native Pytorch LSTM because
it cannot make use of CUDNN optimizations for stacked RNNs due to and
variational dropout and the custom nature of the cell state.
[0]: https://arxiv.org/abs/1512.05287
# Parameters
input_size : `int`, required.
The dimension of the inputs to the LSTM.
hidden_size : `int`, required.
The dimension of the outputs of the LSTM.
cell_size : `int`, required.
The dimension of the memory cell used for the LSTM.
go_forward : `bool`, optional (default = `True`)
The direction in which the LSTM is applied to the sequence.
Forwards by default, or backwards if False.
recurrent_dropout_probability : `float`, optional (default = `0.0`)
The dropout probability to be used in a dropout scheme as stated in
[A Theoretically Grounded Application of Dropout in Recurrent Neural Networks]
[0]. Implementation wise, this simply
applies a fixed dropout mask per sequence to the recurrent connection of the
LSTM.
state_projection_clip_value : `float`, optional, (default = `None`)
The magnitude with which to clip the hidden_state after projecting it.
memory_cell_clip_value : `float`, optional, (default = `None`)
The magnitude with which to clip the memory cell.
# Returns
output_accumulator : `torch.FloatTensor`
The outputs of the LSTM for each timestep. A tensor of shape
(batch_size, max_timesteps, hidden_size) where for a given batch
element, all outputs past the sequence length for that batch are
zero tensors.
final_state : `Tuple[torch.FloatTensor, torch.FloatTensor]`
The final (state, memory) states of the LSTM, with shape
(1, batch_size, hidden_size) and (1, batch_size, cell_size)
respectively. The first dimension is 1 in order to match the Pytorch
API for returning stacked LSTM states.
"""
def __init__(
self,
input_size: int,
hidden_size: int,
cell_size: int,
go_forward: bool = True,
recurrent_dropout_probability: float = 0.0,
memory_cell_clip_value: Optional[float] = None,
state_projection_clip_value: Optional[float] = None,
) -> None:
super().__init__()
# Required to be wrapped with a `PytorchSeq2SeqWrapper`.
self.input_size = input_size
self.hidden_size = hidden_size
self.cell_size = cell_size
self.go_forward = go_forward
self.state_projection_clip_value = state_projection_clip_value
self.memory_cell_clip_value = memory_cell_clip_value
self.recurrent_dropout_probability = recurrent_dropout_probability
# We do the projections for all the gates all at once.
self.input_linearity = torch.nn.Linear(input_size, 4 * cell_size, bias=False)
self.state_linearity = torch.nn.Linear(hidden_size, 4 * cell_size, bias=True)
# Additional projection matrix for making the hidden state smaller.
self.state_projection = torch.nn.Linear(cell_size, hidden_size, bias=False)
self.reset_parameters()
def reset_parameters(self):
# Use sensible default initializations for parameters.
block_orthogonal(self.input_linearity.weight.data, [self.cell_size, self.input_size])
block_orthogonal(self.state_linearity.weight.data, [self.cell_size, self.hidden_size])
self.state_linearity.bias.data.fill_(0.0)
# Initialize forget gate biases to 1.0 as per An Empirical
# Exploration of Recurrent Network Architectures, (Jozefowicz, 2015).
self.state_linearity.bias.data[self.cell_size : 2 * self.cell_size].fill_(1.0)
def forward(
self,
inputs: torch.FloatTensor,
batch_lengths: List[int],
initial_state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
):
"""
# Parameters
inputs : `torch.FloatTensor`, required.
A tensor of shape (batch_size, num_timesteps, input_size)
to apply the LSTM over.
batch_lengths : `List[int]`, required.
A list of length batch_size containing the lengths of the sequences in batch.
initial_state : `Tuple[torch.Tensor, torch.Tensor]`, optional, (default = `None`)
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM. The `state` has shape (1, batch_size, hidden_size) and the
`memory` has shape (1, batch_size, cell_size).
# Returns
output_accumulator : `torch.FloatTensor`
The outputs of the LSTM for each timestep. A tensor of shape
(batch_size, max_timesteps, hidden_size) where for a given batch
element, all outputs past the sequence length for that batch are
zero tensors.
final_state : `Tuple[torch.FloatTensor, torch.FloatTensor]`
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM. The `state` has shape (1, batch_size, hidden_size) and the
`memory` has shape (1, batch_size, cell_size).
"""
batch_size = inputs.size()[0]
total_timesteps = inputs.size()[1]
output_accumulator = inputs.new_zeros(batch_size, total_timesteps, self.hidden_size)
if initial_state is None:
full_batch_previous_memory = inputs.new_zeros(batch_size, self.cell_size)
full_batch_previous_state = inputs.new_zeros(batch_size, self.hidden_size)
else:
full_batch_previous_state = initial_state[0].squeeze(0)
full_batch_previous_memory = initial_state[1].squeeze(0)
current_length_index = batch_size - 1 if self.go_forward else 0
if self.recurrent_dropout_probability > 0.0 and self.training:
dropout_mask = get_dropout_mask(
self.recurrent_dropout_probability, full_batch_previous_state
)
else:
dropout_mask = None
for timestep in range(total_timesteps):
# The index depends on which end we start.
index = timestep if self.go_forward else total_timesteps - timestep - 1
# What we are doing here is finding the index into the batch dimension
# which we need to use for this timestep, because the sequences have
# variable length, so once the index is greater than the length of this
# particular batch sequence, we no longer need to do the computation for
# this sequence. The key thing to recognise here is that the batch inputs
# must be _ordered_ by length from longest (first in batch) to shortest
# (last) so initially, we are going forwards with every sequence and as we
# pass the index at which the shortest elements of the batch finish,
# we stop picking them up for the computation.
if self.go_forward:
while batch_lengths[current_length_index] <= index:
current_length_index -= 1
# If we're going backwards, we are _picking up_ more indices.
else:
# First conditional: Are we already at the maximum number of elements in the batch?
# Second conditional: Does the next shortest sequence beyond the current batch
# index require computation use this timestep?
while (
current_length_index < (len(batch_lengths) - 1)
and batch_lengths[current_length_index + 1] > index
):
current_length_index += 1
# Actually get the slices of the batch which we
# need for the computation at this timestep.
# shape (batch_size, cell_size)
previous_memory = full_batch_previous_memory[0 : current_length_index + 1].clone()
# Shape (batch_size, hidden_size)
previous_state = full_batch_previous_state[0 : current_length_index + 1].clone()
# Shape (batch_size, input_size)
timestep_input = inputs[0 : current_length_index + 1, index]
# Do the projections for all the gates all at once.
# Both have shape (batch_size, 4 * cell_size)
projected_input = self.input_linearity(timestep_input)
projected_state = self.state_linearity(previous_state)
# Main LSTM equations using relevant chunks of the big linear
# projections of the hidden state and inputs.
input_gate = torch.sigmoid(
projected_input[:, (0 * self.cell_size) : (1 * self.cell_size)]
+ projected_state[:, (0 * self.cell_size) : (1 * self.cell_size)]
)
forget_gate = torch.sigmoid(
projected_input[:, (1 * self.cell_size) : (2 * self.cell_size)]
+ projected_state[:, (1 * self.cell_size) : (2 * self.cell_size)]
)
memory_init = torch.tanh(
projected_input[:, (2 * self.cell_size) : (3 * self.cell_size)]
+ projected_state[:, (2 * self.cell_size) : (3 * self.cell_size)]
)
output_gate = torch.sigmoid(
projected_input[:, (3 * self.cell_size) : (4 * self.cell_size)]
+ projected_state[:, (3 * self.cell_size) : (4 * self.cell_size)]
)
memory = input_gate * memory_init + forget_gate * previous_memory
# Here is the non-standard part of this LSTM cell; first, we clip the
# memory cell, then we project the output of the timestep to a smaller size
# and again clip it.
if self.memory_cell_clip_value:
memory = torch.clamp(
memory, -self.memory_cell_clip_value, self.memory_cell_clip_value
)
# shape (current_length_index, cell_size)
pre_projection_timestep_output = output_gate * torch.tanh(memory)
# shape (current_length_index, hidden_size)
timestep_output = self.state_projection(pre_projection_timestep_output)
if self.state_projection_clip_value:
timestep_output = torch.clamp(
timestep_output,
-self.state_projection_clip_value,
self.state_projection_clip_value,
)
# Only do dropout if the dropout prob is > 0.0 and we are in training mode.
if dropout_mask is not None:
timestep_output = timestep_output * dropout_mask[0 : current_length_index + 1]
# We've been doing computation with less than the full batch, so here we create a new
# variable for the the whole batch at this timestep and insert the result for the
# relevant elements of the batch into it.
full_batch_previous_memory = full_batch_previous_memory.clone()
full_batch_previous_state = full_batch_previous_state.clone()
full_batch_previous_memory[0 : current_length_index + 1] = memory
full_batch_previous_state[0 : current_length_index + 1] = timestep_output
output_accumulator[0 : current_length_index + 1, index] = timestep_output
# Mimic the pytorch API by returning state in the following shape:
# (num_layers * num_directions, batch_size, ...). As this
# LSTM cell cannot be stacked, the first dimension here is just 1.
final_state = (
full_batch_previous_state.unsqueeze(0),
full_batch_previous_memory.unsqueeze(0),
)
return output_accumulator, final_state
| allennlp-master | allennlp/modules/lstm_cell_with_projection.py |
"""
An LSTM with Recurrent Dropout and the option to use highway
connections between layers.
Based on PyText version (that was based on a previous AllenNLP version)
"""
from typing import Optional, Tuple
import torch
from allennlp.common.checks import ConfigurationError
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from allennlp.nn.initializers import block_orthogonal
from allennlp.nn.util import get_dropout_mask
class AugmentedLSTMCell(torch.nn.Module):
"""
`AugmentedLSTMCell` implements a AugmentedLSTM cell.
# Parameters
embed_dim : `int`
The number of expected features in the input.
lstm_dim : `int`
Number of features in the hidden state of the LSTM.
use_highway : `bool`, optional (default = `True`)
If `True` we append a highway network to the outputs of the LSTM.
use_bias : `bool`, optional (default = `True`)
If `True` we use a bias in our LSTM calculations, otherwise we don't.
# Attributes
input_linearity : `nn.Module`
Fused weight matrix which computes a linear function over the input.
state_linearity : `nn.Module`
Fused weight matrix which computes a linear function over the states.
"""
def __init__(
self, embed_dim: int, lstm_dim: int, use_highway: bool = True, use_bias: bool = True
):
super().__init__()
self.embed_dim = embed_dim
self.lstm_dim = lstm_dim
self.use_highway = use_highway
self.use_bias = use_bias
if use_highway:
self._highway_inp_proj_start = 5 * self.lstm_dim
self._highway_inp_proj_end = 6 * self.lstm_dim
# fused linearity of input to input_gate,
# forget_gate, memory_init, output_gate, highway_gate,
# and the actual highway value
self.input_linearity = torch.nn.Linear(
self.embed_dim, self._highway_inp_proj_end, bias=self.use_bias
)
# fused linearity of input to input_gate,
# forget_gate, memory_init, output_gate, highway_gate
self.state_linearity = torch.nn.Linear(
self.lstm_dim, self._highway_inp_proj_start, bias=True
)
else:
# If there's no highway layer then we have a standard
# LSTM. The 4 comes from fusing input, forget, memory, output
# gates/inputs.
self.input_linearity = torch.nn.Linear(
self.embed_dim, 4 * self.lstm_dim, bias=self.use_bias
)
self.state_linearity = torch.nn.Linear(self.lstm_dim, 4 * self.lstm_dim, bias=True)
self.reset_parameters()
def reset_parameters(self):
# Use sensible default initializations for parameters.
block_orthogonal(self.input_linearity.weight.data, [self.lstm_dim, self.embed_dim])
block_orthogonal(self.state_linearity.weight.data, [self.lstm_dim, self.lstm_dim])
self.state_linearity.bias.data.fill_(0.0)
# Initialize forget gate biases to 1.0 as per An Empirical
# Exploration of Recurrent Network Architectures, (Jozefowicz, 2015).
self.state_linearity.bias.data[self.lstm_dim : 2 * self.lstm_dim].fill_(1.0)
def forward(
self,
x: torch.Tensor,
states=Tuple[torch.Tensor, torch.Tensor],
variational_dropout_mask: Optional[torch.BoolTensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
!!! Warning
DO NOT USE THIS LAYER DIRECTLY, instead use the AugmentedLSTM class
# Parameters
x : `torch.Tensor`
Input tensor of shape (bsize x input_dim).
states : `Tuple[torch.Tensor, torch.Tensor]`
Tuple of tensors containing
the hidden state and the cell state of each element in
the batch. Each of these tensors have a dimension of
(bsize x nhid). Defaults to `None`.
# Returns
`Tuple[torch.Tensor, torch.Tensor]`
Returned states. Shape of each state is (bsize x nhid).
"""
hidden_state, memory_state = states
# In Pytext this was done as the last step of the cell.
# But the original AugmentedLSTM from AllenNLP this was done before the processing
if variational_dropout_mask is not None and self.training:
hidden_state = hidden_state * variational_dropout_mask
projected_input = self.input_linearity(x)
projected_state = self.state_linearity(hidden_state)
input_gate = forget_gate = memory_init = output_gate = highway_gate = None
if self.use_highway:
fused_op = projected_input[:, : 5 * self.lstm_dim] + projected_state
fused_chunked = torch.chunk(fused_op, 5, 1)
(input_gate, forget_gate, memory_init, output_gate, highway_gate) = fused_chunked
highway_gate = torch.sigmoid(highway_gate)
else:
fused_op = projected_input + projected_state
input_gate, forget_gate, memory_init, output_gate = torch.chunk(fused_op, 4, 1)
input_gate = torch.sigmoid(input_gate)
forget_gate = torch.sigmoid(forget_gate)
memory_init = torch.tanh(memory_init)
output_gate = torch.sigmoid(output_gate)
memory = input_gate * memory_init + forget_gate * memory_state
timestep_output: torch.Tensor = output_gate * torch.tanh(memory)
if self.use_highway:
highway_input_projection = projected_input[
:, self._highway_inp_proj_start : self._highway_inp_proj_end
]
timestep_output = (
highway_gate * timestep_output
+ (1 - highway_gate) * highway_input_projection # type: ignore
)
return timestep_output, memory
class AugmentedLstm(torch.nn.Module):
"""
`AugmentedLstm` implements a one-layer single directional
AugmentedLSTM layer. AugmentedLSTM is an LSTM which optionally
appends an optional highway network to the output layer. Furthermore the
dropout controls the level of variational dropout done.
# Parameters
input_size : `int`
The number of expected features in the input.
hidden_size : `int`
Number of features in the hidden state of the LSTM.
Defaults to 32.
go_forward : `bool`
Whether to compute features left to right (forward)
or right to left (backward).
recurrent_dropout_probability : `float`
Variational dropout probability to use. Defaults to 0.0.
use_highway : `bool`
If `True` we append a highway network to the outputs of the LSTM.
use_input_projection_bias : `bool`
If `True` we use a bias in our LSTM calculations, otherwise we don't.
# Attributes
cell : `AugmentedLSTMCell`
`AugmentedLSTMCell` that is applied at every timestep.
"""
def __init__(
self,
input_size: int,
hidden_size: int,
go_forward: bool = True,
recurrent_dropout_probability: float = 0.0,
use_highway: bool = True,
use_input_projection_bias: bool = True,
):
super().__init__()
self.embed_dim = input_size
self.lstm_dim = hidden_size
self.go_forward = go_forward
self.use_highway = use_highway
self.recurrent_dropout_probability = recurrent_dropout_probability
self.cell = AugmentedLSTMCell(
self.embed_dim, self.lstm_dim, self.use_highway, use_input_projection_bias
)
def forward(
self, inputs: PackedSequence, states: Optional[Tuple[torch.Tensor, torch.Tensor]] = None
) -> Tuple[PackedSequence, Tuple[torch.Tensor, torch.Tensor]]:
"""
Warning: Would be better to use the BiAugmentedLstm class in a regular model
Given an input batch of sequential data such as word embeddings, produces a single layer unidirectional
AugmentedLSTM representation of the sequential input and new state tensors.
# Parameters
inputs : `PackedSequence`
`bsize` sequences of shape `(len, input_dim)` each, in PackedSequence format
states : `Tuple[torch.Tensor, torch.Tensor]`
Tuple of tensors containing the initial hidden state and
the cell state of each element in the batch. Each of these tensors have a dimension of
(1 x bsize x nhid). Defaults to `None`.
# Returns
`Tuple[PackedSequence, Tuple[torch.Tensor, torch.Tensor]]`
AugmentedLSTM representation of input and the state of the LSTM `t = seq_len`.
Shape of representation is (bsize x seq_len x representation_dim).
Shape of each state is (1 x bsize x nhid).
"""
if not isinstance(inputs, PackedSequence):
raise ConfigurationError("inputs must be PackedSequence but got %s" % (type(inputs)))
sequence_tensor, batch_lengths = pad_packed_sequence(inputs, batch_first=True)
batch_size = sequence_tensor.size()[0]
total_timesteps = sequence_tensor.size()[1]
output_accumulator = sequence_tensor.new_zeros(batch_size, total_timesteps, self.lstm_dim)
if states is None:
full_batch_previous_memory = sequence_tensor.new_zeros(batch_size, self.lstm_dim)
full_batch_previous_state = sequence_tensor.data.new_zeros(batch_size, self.lstm_dim)
else:
full_batch_previous_state = states[0].squeeze(0)
full_batch_previous_memory = states[1].squeeze(0)
current_length_index = batch_size - 1 if self.go_forward else 0
if self.recurrent_dropout_probability > 0.0:
dropout_mask = get_dropout_mask(
self.recurrent_dropout_probability, full_batch_previous_memory
)
else:
dropout_mask = None
for timestep in range(total_timesteps):
index = timestep if self.go_forward else total_timesteps - timestep - 1
if self.go_forward:
while batch_lengths[current_length_index] <= index:
current_length_index -= 1
# If we're going backwards, we are _picking up_ more indices.
else:
# First conditional: Are we already at the maximum
# number of elements in the batch?
# Second conditional: Does the next shortest
# sequence beyond the current batch
# index require computation use this timestep?
while (
current_length_index < (len(batch_lengths) - 1)
and batch_lengths[current_length_index + 1] > index
):
current_length_index += 1
previous_memory = full_batch_previous_memory[0 : current_length_index + 1].clone()
previous_state = full_batch_previous_state[0 : current_length_index + 1].clone()
timestep_input = sequence_tensor[0 : current_length_index + 1, index]
timestep_output, memory = self.cell(
timestep_input,
(previous_state, previous_memory),
dropout_mask[0 : current_length_index + 1] if dropout_mask is not None else None,
)
full_batch_previous_memory = full_batch_previous_memory.data.clone()
full_batch_previous_state = full_batch_previous_state.data.clone()
full_batch_previous_memory[0 : current_length_index + 1] = memory
full_batch_previous_state[0 : current_length_index + 1] = timestep_output
output_accumulator[0 : current_length_index + 1, index, :] = timestep_output
output_accumulator = pack_padded_sequence(
output_accumulator, batch_lengths, batch_first=True
)
# Mimic the pytorch API by returning state in the following shape:
# (num_layers * num_directions, batch_size, lstm_dim). As this
# LSTM cannot be stacked, the first dimension here is just 1.
final_state = (
full_batch_previous_state.unsqueeze(0),
full_batch_previous_memory.unsqueeze(0),
)
return output_accumulator, final_state
class BiAugmentedLstm(torch.nn.Module):
"""
`BiAugmentedLstm` implements a generic AugmentedLSTM representation layer.
BiAugmentedLstm is an LSTM which optionally appends an optional highway network to the output layer.
Furthermore the dropout controls the level of variational dropout done.
# Parameters
input_size : `int`, required
The dimension of the inputs to the LSTM.
hidden_size : `int`, required.
The dimension of the outputs of the LSTM.
num_layers : `int`
Number of recurrent layers. Eg. setting `num_layers=2`
would mean stacking two LSTMs together to form a stacked LSTM,
with the second LSTM taking in the outputs of the first LSTM and
computing the final result. Defaults to 1.
bias : `bool`
If `True` we use a bias in our LSTM calculations, otherwise we don't.
recurrent_dropout_probability : `float`, optional (default = `0.0`)
Variational dropout probability to use.
bidirectional : `bool`
If `True`, becomes a bidirectional LSTM. Defaults to `True`.
padding_value : `float`, optional (default = `0.0`)
Value for the padded elements. Defaults to 0.0.
use_highway : `bool`, optional (default = `True`)
Whether or not to use highway connections between layers. This effectively involves
reparameterising the normal output of an LSTM as::
gate = sigmoid(W_x1 * x_t + W_h * h_t)
output = gate * h_t + (1 - gate) * (W_x2 * x_t)
# Returns
output_accumulator : `PackedSequence`
The outputs of the LSTM for each timestep. A tensor of shape (batch_size, max_timesteps, hidden_size) where
for a given batch element, all outputs past the sequence length for that batch are zero tensors.
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
recurrent_dropout_probability: float = 0.0,
bidirectional: bool = False,
padding_value: float = 0.0,
use_highway: bool = True,
) -> None:
super().__init__()
self.input_size = input_size
self.padding_value = padding_value
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bidirectional = bidirectional
self.recurrent_dropout_probability = recurrent_dropout_probability
self.use_highway = use_highway
self.use_bias = bias
num_directions = int(self.bidirectional) + 1
self.forward_layers = torch.nn.ModuleList()
if self.bidirectional:
self.backward_layers = torch.nn.ModuleList()
lstm_embed_dim = self.input_size
for _ in range(self.num_layers):
self.forward_layers.append(
AugmentedLstm(
lstm_embed_dim,
self.hidden_size,
go_forward=True,
recurrent_dropout_probability=self.recurrent_dropout_probability,
use_highway=self.use_highway,
use_input_projection_bias=self.use_bias,
)
)
if self.bidirectional:
self.backward_layers.append(
AugmentedLstm(
lstm_embed_dim,
self.hidden_size,
go_forward=False,
recurrent_dropout_probability=self.recurrent_dropout_probability,
use_highway=self.use_highway,
use_input_projection_bias=self.use_bias,
)
)
lstm_embed_dim = self.hidden_size * num_directions
self.representation_dim = lstm_embed_dim
def forward(
self, inputs: torch.Tensor, states: Optional[Tuple[torch.Tensor, torch.Tensor]] = None
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""
Given an input batch of sequential data such as word embeddings, produces
a AugmentedLSTM representation of the sequential input and new state
tensors.
# Parameters
inputs : `PackedSequence`, required.
A tensor of shape (batch_size, num_timesteps, input_size)
to apply the LSTM over.
states : `Tuple[torch.Tensor, torch.Tensor]`
Tuple of tensors containing
the initial hidden state and the cell state of each element in
the batch. Each of these tensors have a dimension of
(bsize x num_layers x num_directions * nhid). Defaults to `None`.
# Returns
`Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]`
AgumentedLSTM representation of input and
the state of the LSTM `t = seq_len`.
Shape of representation is (bsize x seq_len x representation_dim).
Shape of each state is (bsize x num_layers * num_directions x nhid).
"""
if not isinstance(inputs, PackedSequence):
raise ConfigurationError("inputs must be PackedSequence but got %s" % (type(inputs)))
# if states is not None:
# states = (states[0].transpose(0, 1), states[1].transpose(0, 1))
if self.bidirectional:
return self._forward_bidirectional(inputs, states)
return self._forward_unidirectional(inputs, states)
def _forward_bidirectional(
self, inputs: PackedSequence, states: Optional[Tuple[torch.Tensor, torch.Tensor]]
):
output_sequence = inputs
final_h = []
final_c = []
if not states:
hidden_states = [None] * self.num_layers
elif states[0].size()[0] != self.num_layers:
raise RuntimeError(
"Initial states were passed to forward() but the number of "
"initial states does not match the number of layers."
)
else:
hidden_states = list(
zip( # type: ignore
states[0].chunk(self.num_layers, 0), states[1].chunk(self.num_layers, 0)
)
)
for i, state in enumerate(hidden_states):
if state:
forward_state = state[0].chunk(2, -1)
backward_state = state[1].chunk(2, -1)
else:
forward_state = backward_state = None
forward_layer = self.forward_layers[i]
backward_layer = self.backward_layers[i]
# The state is duplicated to mirror the Pytorch API for LSTMs.
forward_output, final_forward_state = forward_layer(output_sequence, forward_state)
backward_output, final_backward_state = backward_layer(output_sequence, backward_state)
forward_output, lengths = pad_packed_sequence(forward_output, batch_first=True)
backward_output, _ = pad_packed_sequence(backward_output, batch_first=True)
output_sequence = torch.cat([forward_output, backward_output], -1)
output_sequence = pack_padded_sequence(output_sequence, lengths, batch_first=True)
final_h.extend([final_forward_state[0], final_backward_state[0]])
final_c.extend([final_forward_state[1], final_backward_state[1]])
final_h = torch.cat(final_h, dim=0)
final_c = torch.cat(final_c, dim=0)
final_state_tuple = (final_h, final_c)
output_sequence, batch_lengths = pad_packed_sequence(
output_sequence, padding_value=self.padding_value, batch_first=True
)
output_sequence = pack_padded_sequence(output_sequence, batch_lengths, batch_first=True)
return output_sequence, final_state_tuple
def _forward_unidirectional(
self, inputs: PackedSequence, states: Optional[Tuple[torch.Tensor, torch.Tensor]]
):
output_sequence = inputs
final_h = []
final_c = []
if not states:
hidden_states = [None] * self.num_layers
elif states[0].size()[0] != self.num_layers:
raise RuntimeError(
"Initial states were passed to forward() but the number of "
"initial states does not match the number of layers."
)
else:
hidden_states = list(
zip( # type: ignore
states[0].chunk(self.num_layers, 0), states[1].chunk(self.num_layers, 0)
) # type: ignore
)
for i, state in enumerate(hidden_states):
forward_layer = self.forward_layers[i]
# The state is duplicated to mirror the Pytorch API for LSTMs.
forward_output, final_forward_state = forward_layer(output_sequence, state)
output_sequence = forward_output
final_h.append(final_forward_state[0])
final_c.append(final_forward_state[1])
final_h = torch.cat(final_h, dim=0)
final_c = torch.cat(final_c, dim=0)
final_state_tuple = (final_h, final_c)
output_sequence, batch_lengths = pad_packed_sequence(
output_sequence, padding_value=self.padding_value, batch_first=True
)
output_sequence = pack_padded_sequence(output_sequence, batch_lengths, batch_first=True)
return output_sequence, final_state_tuple
| allennlp-master | allennlp/modules/augmented_lstm.py |
import torch
from allennlp.nn import util
class LayerNorm(torch.nn.Module):
"""
An implementation of [Layer Normalization](
https://www.semanticscholar.org/paper/Layer-Normalization-Ba-Kiros/97fb4e3d45bb098e27e0071448b6152217bd35a5).
Layer Normalization stabilises the training of deep neural networks by
normalising the outputs of neurons from a particular layer. It computes:
output = (gamma * (tensor - mean) / (std + eps)) + beta
# Parameters
dimension : `int`, required.
The dimension of the layer output to normalize.
# Returns
The normalized layer output.
""" # noqa
def __init__(self, dimension: int) -> None:
super().__init__()
self.gamma = torch.nn.Parameter(torch.ones(dimension))
self.beta = torch.nn.Parameter(torch.zeros(dimension))
def forward(self, tensor: torch.Tensor):
mean = tensor.mean(-1, keepdim=True)
std = tensor.std(-1, unbiased=False, keepdim=True)
return (
self.gamma * (tensor - mean) / (std + util.tiny_value_of_dtype(std.dtype)) + self.beta
)
| allennlp-master | allennlp/modules/layer_norm.py |
import torch
from allennlp.nn import Activation
class GatedSum(torch.nn.Module):
"""
This `Module` represents a gated sum of two tensors `a` and `b`. Specifically:
```
f = activation(W [a; b])
out = f * a + (1 - f) * b
```
# Parameters
input_dim : `int`, required
The dimensionality of the input. We assume the input have shape `(..., input_dim)`.
activation : `Activation`, optional (default = `torch.nn.Sigmoid()`)
The activation function to use.
"""
def __init__(self, input_dim: int, activation: Activation = torch.nn.Sigmoid()) -> None:
super().__init__()
self.input_dim = input_dim
self._gate = torch.nn.Linear(input_dim * 2, 1)
self._activation = activation
def get_input_dim(self):
return self.input_dim
def get_output_dim(self):
return self.input_dim
def forward(self, input_a: torch.Tensor, input_b: torch.Tensor) -> torch.Tensor:
if input_a.size() != input_b.size():
raise ValueError("The input must have the same size.")
if input_a.size(-1) != self.input_dim:
raise ValueError("Input size must match `input_dim`.")
gate_value = self._activation(self._gate(torch.cat([input_a, input_b], -1)))
return gate_value * input_a + (1 - gate_value) * input_b
| allennlp-master | allennlp/modules/gated_sum.py |
from overrides import overrides
import torch
from typing import List
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
@Seq2SeqEncoder.register("compose")
class ComposeEncoder(Seq2SeqEncoder):
"""This class can be used to compose several encoders in sequence.
Among other things, this can be used to add a "pre-contextualizer" before a Seq2SeqEncoder.
Registered as a `Seq2SeqEncoder` with name "compose".
# Parameters
encoders : `List[Seq2SeqEncoder]`, required.
A non-empty list of encoders to compose. The encoders must match in bidirectionality.
"""
def __init__(self, encoders: List[Seq2SeqEncoder]):
super().__init__()
self.encoders = encoders
for idx, encoder in enumerate(encoders):
self.add_module("encoder%d" % idx, encoder)
# Compute bidirectionality.
all_bidirectional = all(encoder.is_bidirectional() for encoder in encoders)
any_bidirectional = any(encoder.is_bidirectional() for encoder in encoders)
self.bidirectional = all_bidirectional
if all_bidirectional != any_bidirectional:
raise ValueError("All encoders need to match in bidirectionality.")
if len(self.encoders) < 1:
raise ValueError("Need at least one encoder.")
last_enc = None
for enc in encoders:
if last_enc is not None and last_enc.get_output_dim() != enc.get_input_dim():
raise ValueError("Encoder input and output dimensions don't match.")
last_enc = enc
@overrides
def forward(self, inputs: torch.Tensor, mask: torch.BoolTensor = None) -> torch.Tensor:
"""
# Parameters
inputs : `torch.Tensor`, required.
A tensor of shape (batch_size, timesteps, input_dim)
mask : `torch.BoolTensor`, optional (default = `None`).
A tensor of shape (batch_size, timesteps).
# Returns
A tensor computed by composing the sequence of encoders.
"""
for encoder in self.encoders:
inputs = encoder(inputs, mask)
return inputs
@overrides
def get_input_dim(self) -> int:
return self.encoders[0].get_input_dim()
@overrides
def get_output_dim(self) -> int:
return self.encoders[-1].get_output_dim()
@overrides
def is_bidirectional(self) -> bool:
return self.bidirectional
| allennlp-master | allennlp/modules/seq2seq_encoders/compose_encoder.py |
from allennlp.modules.encoder_base import _EncoderBase
from allennlp.common import Registrable
class Seq2SeqEncoder(_EncoderBase, Registrable):
"""
A `Seq2SeqEncoder` is a `Module` that takes as input a sequence of vectors and returns a
modified sequence of vectors. Input shape : `(batch_size, sequence_length, input_dim)`; output
shape : `(batch_size, sequence_length, output_dim)`.
We add two methods to the basic `Module` API: `get_input_dim()` and `get_output_dim()`.
You might need this if you want to construct a `Linear` layer using the output of this encoder,
or to raise sensible errors for mis-matching input dimensions.
"""
def get_input_dim(self) -> int:
"""
Returns the dimension of the vector input for each element in the sequence input
to a `Seq2SeqEncoder`. This is `not` the shape of the input tensor, but the
last element of that shape.
"""
raise NotImplementedError
def get_output_dim(self) -> int:
"""
Returns the dimension of each vector in the sequence output by this `Seq2SeqEncoder`.
This is `not` the shape of the returned tensor, but the last element of that shape.
"""
raise NotImplementedError
def is_bidirectional(self) -> bool:
"""
Returns `True` if this encoder is bidirectional. If so, we assume the forward direction
of the encoder is the first half of the final dimension, and the backward direction is the
second half.
"""
raise NotImplementedError
| allennlp-master | allennlp/modules/seq2seq_encoders/seq2seq_encoder.py |
from overrides import overrides
import torch
from torch.nn.utils.rnn import pad_packed_sequence
from allennlp.common.checks import ConfigurationError
from allennlp.modules.augmented_lstm import AugmentedLstm
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
from allennlp.modules.stacked_alternating_lstm import StackedAlternatingLstm
from allennlp.modules.stacked_bidirectional_lstm import StackedBidirectionalLstm
class PytorchSeq2SeqWrapper(Seq2SeqEncoder):
"""
Pytorch's RNNs have two outputs: the hidden state for every time step, and the hidden state at
the last time step for every layer. We just want the first one as a single output. This
wrapper pulls out that output, and adds a `get_output_dim` method, which is useful if you
want to, e.g., define a linear + softmax layer on top of this to get some distribution over a
set of labels. The linear layer needs to know its input dimension before it is called, and you
can get that from `get_output_dim`.
In order to be wrapped with this wrapper, a class must have the following members:
- `self.input_size: int`
- `self.hidden_size: int`
- `def forward(inputs: PackedSequence, hidden_state: torch.Tensor) ->
Tuple[PackedSequence, torch.Tensor]`.
- `self.bidirectional: bool` (optional)
This is what pytorch's RNN's look like - just make sure your class looks like those, and it
should work.
Note that we *require* you to pass a binary mask of shape (batch_size, sequence_length)
when you call this module, to avoid subtle bugs around masking. If you already have a
`PackedSequence` you can pass `None` as the second parameter.
We support stateful RNNs where the final state from each batch is used as the initial
state for the subsequent batch by passing `stateful=True` to the constructor.
"""
def __init__(self, module: torch.nn.Module, stateful: bool = False) -> None:
super().__init__(stateful)
self._module = module
try:
if not self._module.batch_first:
raise ConfigurationError("Our encoder semantics assumes batch is always first!")
except AttributeError:
pass
try:
self._is_bidirectional = self._module.bidirectional
except AttributeError:
self._is_bidirectional = False
if self._is_bidirectional:
self._num_directions = 2
else:
self._num_directions = 1
@overrides
def get_input_dim(self) -> int:
return self._module.input_size
@overrides
def get_output_dim(self) -> int:
return self._module.hidden_size * self._num_directions
@overrides
def is_bidirectional(self) -> bool:
return self._is_bidirectional
@overrides
def forward(
self, inputs: torch.Tensor, mask: torch.BoolTensor, hidden_state: torch.Tensor = None
) -> torch.Tensor:
if self.stateful and mask is None:
raise ValueError("Always pass a mask with stateful RNNs.")
if self.stateful and hidden_state is not None:
raise ValueError("Stateful RNNs provide their own initial hidden_state.")
if mask is None:
return self._module(inputs, hidden_state)[0]
batch_size, total_sequence_length = mask.size()
packed_sequence_output, final_states, restoration_indices = self.sort_and_run_forward(
self._module, inputs, mask, hidden_state
)
unpacked_sequence_tensor, _ = pad_packed_sequence(packed_sequence_output, batch_first=True)
num_valid = unpacked_sequence_tensor.size(0)
# Some RNNs (GRUs) only return one state as a Tensor. Others (LSTMs) return two.
# If one state, use a single element list to handle in a consistent manner below.
if not isinstance(final_states, (list, tuple)) and self.stateful:
final_states = [final_states]
# Add back invalid rows.
if num_valid < batch_size:
_, length, output_dim = unpacked_sequence_tensor.size()
zeros = unpacked_sequence_tensor.new_zeros(batch_size - num_valid, length, output_dim)
unpacked_sequence_tensor = torch.cat([unpacked_sequence_tensor, zeros], 0)
# The states also need to have invalid rows added back.
if self.stateful:
new_states = []
for state in final_states:
num_layers, _, state_dim = state.size()
zeros = state.new_zeros(num_layers, batch_size - num_valid, state_dim)
new_states.append(torch.cat([state, zeros], 1))
final_states = new_states
# It's possible to need to pass sequences which are padded to longer than the
# max length of the sequence to a Seq2SeqEncoder. However, packing and unpacking
# the sequences mean that the returned tensor won't include these dimensions, because
# the RNN did not need to process them. We add them back on in the form of zeros here.
sequence_length_difference = total_sequence_length - unpacked_sequence_tensor.size(1)
if sequence_length_difference > 0:
zeros = unpacked_sequence_tensor.new_zeros(
batch_size, sequence_length_difference, unpacked_sequence_tensor.size(-1)
)
unpacked_sequence_tensor = torch.cat([unpacked_sequence_tensor, zeros], 1)
if self.stateful:
self._update_states(final_states, restoration_indices)
# Restore the original indices and return the sequence.
return unpacked_sequence_tensor.index_select(0, restoration_indices)
@Seq2SeqEncoder.register("gru")
class GruSeq2SeqEncoder(PytorchSeq2SeqWrapper):
"""
Registered as a `Seq2SeqEncoder` with name "gru".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
stateful: bool = False,
):
module = torch.nn.GRU(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
super().__init__(module=module, stateful=stateful)
@Seq2SeqEncoder.register("lstm")
class LstmSeq2SeqEncoder(PytorchSeq2SeqWrapper):
"""
Registered as a `Seq2SeqEncoder` with name "lstm".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
stateful: bool = False,
):
module = torch.nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
super().__init__(module=module, stateful=stateful)
@Seq2SeqEncoder.register("rnn")
class RnnSeq2SeqEncoder(PytorchSeq2SeqWrapper):
"""
Registered as a `Seq2SeqEncoder` with name "rnn".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
nonlinearity: str = "tanh",
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
stateful: bool = False,
):
module = torch.nn.RNN(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
nonlinearity=nonlinearity,
bias=bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
super().__init__(module=module, stateful=stateful)
@Seq2SeqEncoder.register("augmented_lstm")
class AugmentedLstmSeq2SeqEncoder(PytorchSeq2SeqWrapper):
"""
Registered as a `Seq2SeqEncoder` with name "augmented_lstm".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
go_forward: bool = True,
recurrent_dropout_probability: float = 0.0,
use_highway: bool = True,
use_input_projection_bias: bool = True,
stateful: bool = False,
) -> None:
module = AugmentedLstm(
input_size=input_size,
hidden_size=hidden_size,
go_forward=go_forward,
recurrent_dropout_probability=recurrent_dropout_probability,
use_highway=use_highway,
use_input_projection_bias=use_input_projection_bias,
)
super().__init__(module=module, stateful=stateful)
@Seq2SeqEncoder.register("alternating_lstm")
class StackedAlternatingLstmSeq2SeqEncoder(PytorchSeq2SeqWrapper):
"""
Registered as a `Seq2SeqEncoder` with name "alternating_lstm".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int,
recurrent_dropout_probability: float = 0.0,
use_highway: bool = True,
use_input_projection_bias: bool = True,
stateful: bool = False,
) -> None:
module = StackedAlternatingLstm(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
recurrent_dropout_probability=recurrent_dropout_probability,
use_highway=use_highway,
use_input_projection_bias=use_input_projection_bias,
)
super().__init__(module=module, stateful=stateful)
@Seq2SeqEncoder.register("stacked_bidirectional_lstm")
class StackedBidirectionalLstmSeq2SeqEncoder(PytorchSeq2SeqWrapper):
"""
Registered as a `Seq2SeqEncoder` with name "stacked_bidirectional_lstm".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int,
recurrent_dropout_probability: float = 0.0,
layer_dropout_probability: float = 0.0,
use_highway: bool = True,
stateful: bool = False,
) -> None:
module = StackedBidirectionalLstm(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
recurrent_dropout_probability=recurrent_dropout_probability,
layer_dropout_probability=layer_dropout_probability,
use_highway=use_highway,
)
super().__init__(module=module, stateful=stateful)
| allennlp-master | allennlp/modules/seq2seq_encoders/pytorch_seq2seq_wrapper.py |
import torch
from overrides import overrides
from allennlp.modules.feedforward import FeedForward
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
@Seq2SeqEncoder.register("feedforward")
class FeedForwardEncoder(Seq2SeqEncoder):
"""
This class applies the `FeedForward` to each item in sequences.
Registered as a `Seq2SeqEncoder` with name "feedforward".
"""
def __init__(self, feedforward: FeedForward) -> None:
super().__init__()
self._feedforward = feedforward
@overrides
def get_input_dim(self) -> int:
return self._feedforward.get_input_dim()
@overrides
def get_output_dim(self) -> int:
return self._feedforward.get_output_dim()
@overrides
def is_bidirectional(self) -> bool:
return False
@overrides
def forward(self, inputs: torch.Tensor, mask: torch.BoolTensor = None) -> torch.Tensor:
"""
# Parameters
inputs : `torch.Tensor`, required.
A tensor of shape (batch_size, timesteps, input_dim)
mask : `torch.BoolTensor`, optional (default = `None`).
A tensor of shape (batch_size, timesteps).
# Returns
A tensor of shape (batch_size, timesteps, output_dim).
"""
if mask is None:
return self._feedforward(inputs)
else:
outputs = self._feedforward(inputs)
return outputs * mask.unsqueeze(dim=-1)
| allennlp-master | allennlp/modules/seq2seq_encoders/feedforward_encoder.py |
from overrides import overrides
import torch
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
@Seq2SeqEncoder.register("pass_through")
class PassThroughEncoder(Seq2SeqEncoder):
"""
This class allows you to specify skipping a `Seq2SeqEncoder` just
by changing a configuration file. This is useful for ablations and
measuring the impact of different elements of your model.
Registered as a `Seq2SeqEncoder` with name "pass_through".
"""
def __init__(self, input_dim: int) -> None:
super().__init__()
self._input_dim = input_dim
@overrides
def get_input_dim(self) -> int:
return self._input_dim
@overrides
def get_output_dim(self) -> int:
return self._input_dim
@overrides
def is_bidirectional(self):
return False
@overrides
def forward(self, inputs: torch.Tensor, mask: torch.BoolTensor = None) -> torch.Tensor:
"""
# Parameters
inputs : `torch.Tensor`, required.
A tensor of shape (batch_size, timesteps, input_dim)
mask : `torch.BoolTensor`, optional (default = `None`).
A tensor of shape (batch_size, timesteps).
# Returns
A tensor of shape (batch_size, timesteps, output_dim),
where output_dim = input_dim.
"""
if mask is None:
return inputs
else:
# We should mask out the output instead of the input.
# But here, output = input, so we directly mask out the input.
return inputs * mask.unsqueeze(dim=-1)
| allennlp-master | allennlp/modules/seq2seq_encoders/pass_through_encoder.py |
"""
Modules that transform a sequence of input vectors
into a sequence of output vectors.
Some are just basic wrappers around existing PyTorch modules,
others are AllenNLP modules.
The available Seq2Seq encoders are
- `"gru"` : allennlp.modules.seq2seq_encoders.GruSeq2SeqEncoder
- `"lstm"` : allennlp.modules.seq2seq_encoders.LstmSeq2SeqEncoder
- `"rnn"` : allennlp.modules.seq2seq_encoders.RnnSeq2SeqEncoder
- `"augmented_lstm"` : allennlp.modules.seq2seq_encoders.AugmentedLstmSeq2SeqEncoder
- `"alternating_lstm"` : allennlp.modules.seq2seq_encoders.StackedAlternatingLstmSeq2SeqEncoder
- `"pass_through"` : allennlp.modules.seq2seq_encoders.PassThroughEncoder
- `"feedforward"` : allennlp.modules.seq2seq_encoders.FeedForwardEncoder
- `"pytorch_transformer"` : allennlp.modules.seq2seq_encoders.PytorchTransformer
- `"compose"` : allennlp.modules.seq2seq_encoders.ComposeEncoder
- `"gated-cnn-encoder"` : allennlp.momdules.seq2seq_encoders.GatedCnnEncoder
- `"stacked_bidirectional_lstm"`: allennlp.modules.seq2seq_encoders.StackedBidirectionalLstmSeq2SeqEncoder
"""
from allennlp.modules.seq2seq_encoders.compose_encoder import ComposeEncoder
from allennlp.modules.seq2seq_encoders.feedforward_encoder import FeedForwardEncoder
from allennlp.modules.seq2seq_encoders.gated_cnn_encoder import GatedCnnEncoder
from allennlp.modules.seq2seq_encoders.pass_through_encoder import PassThroughEncoder
from allennlp.modules.seq2seq_encoders.pytorch_seq2seq_wrapper import (
AugmentedLstmSeq2SeqEncoder,
GruSeq2SeqEncoder,
LstmSeq2SeqEncoder,
PytorchSeq2SeqWrapper,
RnnSeq2SeqEncoder,
StackedAlternatingLstmSeq2SeqEncoder,
StackedBidirectionalLstmSeq2SeqEncoder,
)
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
from allennlp.modules.seq2seq_encoders.pytorch_transformer_wrapper import PytorchTransformer
| allennlp-master | allennlp/modules/seq2seq_encoders/__init__.py |
from typing import Optional
from overrides import overrides
import torch
from torch import nn
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
from allennlp.nn.util import add_positional_features
@Seq2SeqEncoder.register("pytorch_transformer")
class PytorchTransformer(Seq2SeqEncoder):
"""
Implements a stacked self-attention encoder similar to the Transformer
architecture in [Attention is all you Need]
(https://www.semanticscholar.org/paper/Attention-Is-All-You-Need-Vaswani-Shazeer/0737da0767d77606169cbf4187b83e1ab62f6077).
This class adapts the Transformer from torch.nn for use in AllenNLP. Optionally, it adds positional encodings.
Registered as a `Seq2SeqEncoder` with name "pytorch_transformer".
# Parameters
input_dim : `int`, required.
The input dimension of the encoder.
feedforward_hidden_dim : `int`, required.
The middle dimension of the FeedForward network. The input and output
dimensions are fixed to ensure sizes match up for the self attention layers.
num_layers : `int`, required.
The number of stacked self attention -> feedforward -> layer normalisation blocks.
num_attention_heads : `int`, required.
The number of attention heads to use per layer.
use_positional_encoding : `bool`, optional, (default = `True`)
Whether to add sinusoidal frequencies to the input tensor. This is strongly recommended,
as without this feature, the self attention layers have no idea of absolute or relative
position (as they are just computing pairwise similarity between vectors of elements),
which can be important features for many tasks.
dropout_prob : `float`, optional, (default = `0.1`)
The dropout probability for the feedforward network.
""" # noqa
def __init__(
self,
input_dim: int,
num_layers: int,
feedforward_hidden_dim: int = 2048,
num_attention_heads: int = 8,
positional_encoding: Optional[str] = None,
positional_embedding_size: int = 512,
dropout_prob: float = 0.1,
activation: str = "relu",
) -> None:
super().__init__()
layer = nn.TransformerEncoderLayer(
d_model=input_dim,
nhead=num_attention_heads,
dim_feedforward=feedforward_hidden_dim,
dropout=dropout_prob,
activation=activation,
)
self._transformer = nn.TransformerEncoder(layer, num_layers)
self._input_dim = input_dim
# initialize parameters
# We do this before the embeddings are initialized so we get the default initialization for the embeddings.
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
if positional_encoding is None:
self._sinusoidal_positional_encoding = False
self._positional_embedding = None
elif positional_encoding == "sinusoidal":
self._sinusoidal_positional_encoding = True
self._positional_embedding = None
elif positional_encoding == "embedding":
self._sinusoidal_positional_encoding = False
self._positional_embedding = nn.Embedding(positional_embedding_size, input_dim)
else:
raise ValueError(
"positional_encoding must be one of None, 'sinusoidal', or 'embedding'"
)
@overrides
def get_input_dim(self) -> int:
return self._input_dim
@overrides
def get_output_dim(self) -> int:
return self._input_dim
@overrides
def is_bidirectional(self):
return False
@overrides
def forward(self, inputs: torch.Tensor, mask: torch.BoolTensor):
output = inputs
if self._sinusoidal_positional_encoding:
output = add_positional_features(output)
if self._positional_embedding is not None:
position_ids = torch.arange(inputs.size(1), dtype=torch.long, device=output.device)
position_ids = position_ids.unsqueeze(0).expand(inputs.shape[:-1])
output = output + self._positional_embedding(position_ids)
# For some reason the torch transformer expects the shape (sequence, batch, features), not the more
# familiar (batch, sequence, features), so we have to fix it.
output = output.permute(1, 0, 2)
# For some other reason, the torch transformer takes the mask backwards.
mask = ~mask
output = self._transformer(output, src_key_padding_mask=mask)
output = output.permute(1, 0, 2)
return output
| allennlp-master | allennlp/modules/seq2seq_encoders/pytorch_transformer_wrapper.py |
from typing import Sequence, List
import math
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
class ResidualBlock(torch.nn.Module):
def __init__(
self,
input_dim: int,
layers: Sequence[Sequence[int]],
direction: str,
do_weight_norm: bool = True,
dropout: float = 0.0,
) -> None:
super().__init__()
self.dropout = dropout
self._convolutions = torch.nn.ModuleList()
last_dim = input_dim
for k, layer in enumerate(layers):
# We run two convolutions for each block -- one for the
# output and one for the gates -- do them at once, and
# we'll worry about slicing them in forward
if len(layer) == 2:
# no dilation
conv = torch.nn.Conv1d(
last_dim, layer[1] * 2, layer[0], stride=1, padding=layer[0] - 1, bias=True
)
elif len(layer) == 3:
# a dilation
assert layer[0] == 2, "only support kernel = 2 for now"
conv = torch.nn.Conv1d(
last_dim,
layer[1] * 2,
layer[0],
stride=1,
padding=layer[2],
dilation=layer[2],
bias=True,
)
else:
raise ValueError("each layer must have length 2 or 3")
# from Convolutional Sequence to Sequence Learning
if k == 0:
conv_dropout = dropout
else:
# no dropout
conv_dropout = 0.0
std = math.sqrt((4 * (1.0 - conv_dropout)) / (layer[0] * last_dim))
conv.weight.data.normal_(0, std=std)
conv.bias.data.zero_()
if do_weight_norm:
# conv.weight.shape == (out_channels, in_channels, kernel width)
# in fairseq, conv.weight.shape == ([width, in, out])
# for ConvTBC. In ConvTBC, weight norm is applied as
# nn.utils.weight_norm(m, dim=2) over the output dimension.
# so for regular 1D convs we need to apply over dimension=0
conv = torch.nn.utils.weight_norm(conv, name="weight", dim=0)
self._convolutions.append(conv)
last_dim = layer[1]
assert last_dim == input_dim
if direction not in ("forward", "backward"):
raise ConfigurationError(f"invalid direction: {direction}")
self._direction = direction
def forward(self, x: torch.Tensor) -> torch.Tensor:
# x = (batch_size, dim, timesteps)
# outputs: (batch_size, dim, timesteps) = f(x) + x
out = x
timesteps = x.size(2)
for k, convolution in enumerate(self._convolutions):
if k == 0 and self.dropout > 0:
# apply dropout to the input
out = torch.nn.functional.dropout(out, self.dropout, self.training)
conv_out = convolution(out)
# remove the padding indices
# x is padded by convolution width - 1 in each direction
dims_to_remove = conv_out.size(2) - timesteps
if dims_to_remove > 0:
if self._direction == "forward":
# remove from the end of the sequence
conv_out = conv_out.narrow(2, 0, timesteps)
else:
# remove from the beginning of the sequence
conv_out = conv_out.narrow(2, dims_to_remove, timesteps)
out = torch.nn.functional.glu(conv_out, dim=1)
# see Convolutional Sequence to Sequence Learning
return (out + x) * math.sqrt(0.5)
@Seq2SeqEncoder.register("gated-cnn-encoder")
class GatedCnnEncoder(Seq2SeqEncoder):
"""
**This is work-in-progress and has not been fully tested yet. Use at your own risk!**
A `Seq2SeqEncoder` that uses a Gated CNN.
see
Language Modeling with Gated Convolutional Networks, Yann N. Dauphin et al, ICML 2017
https://arxiv.org/abs/1612.08083
Convolutional Sequence to Sequence Learning, Jonas Gehring et al, ICML 2017
https://arxiv.org/abs/1705.03122
Some possibilities:
Each element of the list is wrapped in a residual block:
input_dim = 512
layers = [ [[4, 512]], [[4, 512], [4, 512]], [[4, 512], [4, 512]], [[4, 512], [4, 512]]
dropout = 0.05
A "bottleneck architecture"
input_dim = 512
layers = [ [[4, 512]], [[1, 128], [5, 128], [1, 512]], ... ]
An architecture with dilated convolutions
input_dim = 512
layers = [
[[2, 512, 1]], [[2, 512, 2]], [[2, 512, 4]], [[2, 512, 8]], # receptive field == 16
[[2, 512, 1]], [[2, 512, 2]], [[2, 512, 4]], [[2, 512, 8]], # receptive field == 31
[[2, 512, 1]], [[2, 512, 2]], [[2, 512, 4]], [[2, 512, 8]], # receptive field == 46
[[2, 512, 1]], [[2, 512, 2]], [[2, 512, 4]], [[2, 512, 8]], # receptive field == 57
]
Registered as a `Seq2SeqEncoder` with name "gated-cnn-encoder".
# Parameters
input_dim : `int`, required
The dimension of the inputs.
layers : `Sequence[Sequence[Sequence[int]]]`, required
The layer dimensions for each `ResidualBlock`.
dropout : `float`, optional (default = `0.0`)
The dropout for each `ResidualBlock`.
return_all_layers : `bool`, optional (default = `False`)
Whether to return all layers or just the last layer.
"""
def __init__(
self,
input_dim: int,
layers: Sequence[Sequence[Sequence[int]]],
dropout: float = 0.0,
return_all_layers: bool = False,
) -> None:
super().__init__()
self._forward_residual_blocks = torch.nn.ModuleList()
self._backward_residual_blocks = torch.nn.ModuleList()
self._input_dim = input_dim
self._output_dim = input_dim * 2
for layer in layers:
self._forward_residual_blocks.append(
ResidualBlock(input_dim, layer, "forward", dropout=dropout)
)
self._backward_residual_blocks.append(
ResidualBlock(input_dim, layer, "backward", dropout=dropout)
)
self._return_all_layers = return_all_layers
def forward(self, token_embeddings: torch.Tensor, mask: torch.BoolTensor):
# Convolutions need transposed input
transposed_embeddings = torch.transpose(token_embeddings, 1, 2)
# We need to broadcast the mask to feature dimension,
# and to use masked_fill_ we need the inverse of the mask.
mask_for_fill = ~mask.unsqueeze(1)
if self._return_all_layers:
# outputs will be [[all forward layers], [all backward layers]]
layer_outputs: List[List[torch.Tensor]] = [[], []]
else:
# outputs will be [forward final layer, backward final layer]
outputs: List[torch.Tensor] = []
for k, blocks in enumerate([self._forward_residual_blocks, self._backward_residual_blocks]):
out = transposed_embeddings
# Due to zero padding for backward sequences, we need
# to ensure that the input has zeros everywhere where
# there isn't a mask.
for block in blocks:
out = block(out.masked_fill(mask_for_fill, 0.0))
if self._return_all_layers:
layer_outputs[k].append(out)
if not self._return_all_layers:
outputs.append(out)
if self._return_all_layers:
return [
torch.cat([fwd, bwd], dim=1).transpose(1, 2) for fwd, bwd in zip(*layer_outputs)
]
else:
# Concatenate forward and backward, then transpose back
return torch.cat(outputs, dim=1).transpose(1, 2)
def get_input_dim(self) -> int:
return self._input_dim
def get_output_dim(self) -> int:
return self._output_dim
def is_bidirectional(self) -> bool:
return True
| allennlp-master | allennlp/modules/seq2seq_encoders/gated_cnn_encoder.py |
"""
An *attention* module that computes the similarity between
an input vector and the rows of a matrix.
"""
import torch
from overrides import overrides
from allennlp.common.registrable import Registrable
from allennlp.nn.util import masked_softmax
class Attention(torch.nn.Module, Registrable):
"""
An `Attention` takes two inputs: a (batched) vector and a matrix, plus an optional mask on the
rows of the matrix. We compute the similarity between the vector and each row in the matrix,
and then (optionally) perform a softmax over rows using those computed similarities.
Inputs:
- vector: shape `(batch_size, embedding_dim)`
- matrix: shape `(batch_size, num_rows, embedding_dim)`
- matrix_mask: shape `(batch_size, num_rows)`, specifying which rows are just padding.
Output:
- attention: shape `(batch_size, num_rows)`.
# Parameters
normalize : `bool`, optional (default = `True`)
If true, we normalize the computed similarities with a softmax, to return a probability
distribution for your attention. If false, this is just computing a similarity score.
"""
def __init__(self, normalize: bool = True) -> None:
super().__init__()
self._normalize = normalize
@overrides
def forward(
self, vector: torch.Tensor, matrix: torch.Tensor, matrix_mask: torch.BoolTensor = None
) -> torch.Tensor:
similarities = self._forward_internal(vector, matrix)
if self._normalize:
return masked_softmax(similarities, matrix_mask)
else:
return similarities
def _forward_internal(self, vector: torch.Tensor, matrix: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
| allennlp-master | allennlp/modules/attention/attention.py |
from overrides import overrides
import torch
from torch.nn.parameter import Parameter
from allennlp.modules.attention.attention import Attention
from allennlp.nn import Activation
@Attention.register("bilinear")
class BilinearAttention(Attention):
"""
Computes attention between a vector and a matrix using a bilinear attention function. This
function has a matrix of weights `W` and a bias `b`, and the similarity between the vector
`x` and the matrix `y` is computed as `x^T W y + b`.
Registered as an `Attention` with name "bilinear".
# Parameters
vector_dim : `int`, required
The dimension of the vector, `x`, described above. This is `x.size()[-1]` - the length
of the vector that will go into the similarity computation. We need this so we can build
the weight matrix correctly.
matrix_dim : `int`, required
The dimension of the matrix, `y`, described above. This is `y.size()[-1]` - the length
of the vector that will go into the similarity computation. We need this so we can build
the weight matrix correctly.
activation : `Activation`, optional (default=`linear`)
An activation function applied after the `x^T W y + b` calculation. Default is
linear, i.e. no activation.
normalize : `bool`, optional (default=`True`)
If true, we normalize the computed similarities with a softmax, to return a probability
distribution for your attention. If false, this is just computing a similarity score.
"""
def __init__(
self,
vector_dim: int,
matrix_dim: int,
activation: Activation = None,
normalize: bool = True,
) -> None:
super().__init__(normalize)
self._weight_matrix = Parameter(torch.Tensor(vector_dim, matrix_dim))
self._bias = Parameter(torch.Tensor(1))
self._activation = activation or Activation.by_name("linear")()
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self._weight_matrix)
self._bias.data.fill_(0)
@overrides
def _forward_internal(self, vector: torch.Tensor, matrix: torch.Tensor) -> torch.Tensor:
intermediate = vector.mm(self._weight_matrix).unsqueeze(1)
return self._activation(intermediate.bmm(matrix.transpose(1, 2)).squeeze(1) + self._bias)
| allennlp-master | allennlp/modules/attention/bilinear_attention.py |
from allennlp.modules.attention.attention import Attention
from allennlp.modules.attention.bilinear_attention import BilinearAttention
from allennlp.modules.attention.additive_attention import AdditiveAttention
from allennlp.modules.attention.cosine_attention import CosineAttention
from allennlp.modules.attention.dot_product_attention import DotProductAttention
from allennlp.modules.attention.linear_attention import LinearAttention
| allennlp-master | allennlp/modules/attention/__init__.py |
import torch
from overrides import overrides
from allennlp.modules.attention.attention import Attention
@Attention.register("dot_product")
class DotProductAttention(Attention):
"""
Computes attention between a vector and a matrix using dot product.
Registered as an `Attention` with name "dot_product".
"""
@overrides
def _forward_internal(self, vector: torch.Tensor, matrix: torch.Tensor) -> torch.Tensor:
return matrix.bmm(vector.unsqueeze(-1)).squeeze(-1)
| allennlp-master | allennlp/modules/attention/dot_product_attention.py |
import torch
from overrides import overrides
from allennlp.modules.attention.attention import Attention
from allennlp.nn import util
@Attention.register("cosine")
class CosineAttention(Attention):
"""
Computes attention between a vector and a matrix using cosine similarity.
Registered as an `Attention` with name "cosine".
"""
@overrides
def _forward_internal(self, vector: torch.Tensor, matrix: torch.Tensor) -> torch.Tensor:
a_norm = vector / (
vector.norm(p=2, dim=-1, keepdim=True) + util.tiny_value_of_dtype(vector.dtype)
)
b_norm = matrix / (
matrix.norm(p=2, dim=-1, keepdim=True) + util.tiny_value_of_dtype(matrix.dtype)
)
return torch.bmm(a_norm.unsqueeze(dim=1), b_norm.transpose(-1, -2)).squeeze(1)
| allennlp-master | allennlp/modules/attention/cosine_attention.py |
import math
import torch
from torch.nn import Parameter
from overrides import overrides
from allennlp.modules.attention.attention import Attention
from allennlp.nn import util
from allennlp.nn.activations import Activation
@Attention.register("linear")
class LinearAttention(Attention):
"""
This `Attention` module performs a dot product between a vector of weights and some
combination of the two input vectors, followed by an (optional) activation function. The
combination used is configurable.
If the two vectors are `x` and `y`, we allow the following kinds of combinations : `x`,
`y`, `x*y`, `x+y`, `x-y`, `x/y`, where each of those binary operations is performed
elementwise. You can list as many combinations as you want, comma separated. For example, you
might give `x,y,x*y` as the `combination` parameter to this class. The computed similarity
function would then be `w^T [x; y; x*y] + b`, where `w` is a vector of weights, `b` is a
bias parameter, and `[;]` is vector concatenation.
Note that if you want a bilinear similarity function with a diagonal weight matrix W, where the
similarity function is computed as `x * w * y + b` (with `w` the diagonal of `W`), you can
accomplish that with this class by using "x*y" for `combination`.
Registered as an `Attention` with name "linear".
# Parameters
tensor_1_dim : `int`, required
The dimension of the first tensor, `x`, described above. This is `x.size()[-1]` - the
length of the vector that will go into the similarity computation. We need this so we can
build weight vectors correctly.
tensor_2_dim : `int`, required
The dimension of the second tensor, `y`, described above. This is `y.size()[-1]` - the
length of the vector that will go into the similarity computation. We need this so we can
build weight vectors correctly.
combination : `str`, optional (default=`"x,y"`)
Described above.
activation : `Activation`, optional (default=`linear`)
An activation function applied after the `w^T * [x;y] + b` calculation. Default is
linear, i.e. no activation.
normalize : `bool`, optional (default=`True`)
"""
def __init__(
self,
tensor_1_dim: int,
tensor_2_dim: int,
combination: str = "x,y",
activation: Activation = None,
normalize: bool = True,
) -> None:
super().__init__(normalize)
self._combination = combination
combined_dim = util.get_combined_dim(combination, [tensor_1_dim, tensor_2_dim])
self._weight_vector = Parameter(torch.Tensor(combined_dim))
self._bias = Parameter(torch.Tensor(1))
self._activation = activation or Activation.by_name("linear")()
self.reset_parameters()
def reset_parameters(self):
std = math.sqrt(6 / (self._weight_vector.size(0) + 1))
self._weight_vector.data.uniform_(-std, std)
self._bias.data.fill_(0)
@overrides
def _forward_internal(self, vector: torch.Tensor, matrix: torch.Tensor) -> torch.Tensor:
combined_tensors = util.combine_tensors_and_multiply(
self._combination, [vector.unsqueeze(1), matrix], self._weight_vector
)
return self._activation(combined_tensors.squeeze(1) + self._bias)
| allennlp-master | allennlp/modules/attention/linear_attention.py |
from overrides import overrides
import torch
from torch.nn.parameter import Parameter
from allennlp.modules.attention.attention import Attention
@Attention.register("additive")
class AdditiveAttention(Attention):
"""
Computes attention between a vector and a matrix using an additive attention function. This
function has two matrices `W`, `U` and a vector `V`. The similarity between the vector
`x` and the matrix `y` is computed as `V tanh(Wx + Uy)`.
This attention is often referred as concat or additive attention. It was introduced in
<https://arxiv.org/abs/1409.0473> by Bahdanau et al.
Registered as an `Attention` with name "additive".
# Parameters
vector_dim : `int`, required
The dimension of the vector, `x`, described above. This is `x.size()[-1]` - the length
of the vector that will go into the similarity computation. We need this so we can build
the weight matrix correctly.
matrix_dim : `int`, required
The dimension of the matrix, `y`, described above. This is `y.size()[-1]` - the length
of the vector that will go into the similarity computation. We need this so we can build
the weight matrix correctly.
normalize : `bool`, optional (default = `True`)
If true, we normalize the computed similarities with a softmax, to return a probability
distribution for your attention. If false, this is just computing a similarity score.
"""
def __init__(self, vector_dim: int, matrix_dim: int, normalize: bool = True) -> None:
super().__init__(normalize)
self._w_matrix = Parameter(torch.Tensor(vector_dim, vector_dim))
self._u_matrix = Parameter(torch.Tensor(matrix_dim, vector_dim))
self._v_vector = Parameter(torch.Tensor(vector_dim, 1))
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self._w_matrix)
torch.nn.init.xavier_uniform_(self._u_matrix)
torch.nn.init.xavier_uniform_(self._v_vector)
@overrides
def _forward_internal(self, vector: torch.Tensor, matrix: torch.Tensor) -> torch.Tensor:
intermediate = vector.matmul(self._w_matrix).unsqueeze(1) + matrix.matmul(self._u_matrix)
intermediate = torch.tanh(intermediate)
return intermediate.matmul(self._v_vector).squeeze(2)
| allennlp-master | allennlp/modules/attention/additive_attention.py |
from typing import Dict
import inspect
import torch
from overrides import overrides
from allennlp.common.checks import ConfigurationError
from allennlp.data import TextFieldTensors
from allennlp.modules.text_field_embedders.text_field_embedder import TextFieldEmbedder
from allennlp.modules.time_distributed import TimeDistributed
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
from allennlp.modules.token_embedders import EmptyEmbedder
@TextFieldEmbedder.register("basic")
class BasicTextFieldEmbedder(TextFieldEmbedder):
"""
This is a `TextFieldEmbedder` that wraps a collection of
[`TokenEmbedder`](../token_embedders/token_embedder.md) objects. Each
`TokenEmbedder` embeds or encodes the representation output from one
[`allennlp.data.TokenIndexer`](../../data/token_indexers/token_indexer.md). As the data produced by a
[`allennlp.data.fields.TextField`](../../data/fields/text_field.md) is a dictionary mapping names to these
representations, we take `TokenEmbedders` with corresponding names. Each `TokenEmbedders`
embeds its input, and the result is concatenated in an arbitrary (but consistent) order.
Registered as a `TextFieldEmbedder` with name "basic", which is also the default.
# Parameters
token_embedders : `Dict[str, TokenEmbedder]`, required.
A dictionary mapping token embedder names to implementations.
These names should match the corresponding indexer used to generate
the tensor passed to the TokenEmbedder.
"""
def __init__(self, token_embedders: Dict[str, TokenEmbedder]) -> None:
super().__init__()
# NOTE(mattg): I'd prefer to just use ModuleDict(token_embedders) here, but that changes
# weight locations in torch state dictionaries and invalidates all prior models, just for a
# cosmetic change in the code.
self._token_embedders = token_embedders
for key, embedder in token_embedders.items():
name = "token_embedder_%s" % key
self.add_module(name, embedder)
self._ordered_embedder_keys = sorted(self._token_embedders.keys())
@overrides
def get_output_dim(self) -> int:
output_dim = 0
for embedder in self._token_embedders.values():
output_dim += embedder.get_output_dim()
return output_dim
def forward(
self, text_field_input: TextFieldTensors, num_wrapping_dims: int = 0, **kwargs
) -> torch.Tensor:
if sorted(self._token_embedders.keys()) != sorted(text_field_input.keys()):
message = "Mismatched token keys: %s and %s" % (
str(self._token_embedders.keys()),
str(text_field_input.keys()),
)
embedder_keys = set(self._token_embedders.keys())
input_keys = set(text_field_input.keys())
if embedder_keys > input_keys and all(
isinstance(embedder, EmptyEmbedder)
for name, embedder in self._token_embedders.items()
if name in embedder_keys - input_keys
):
# Allow extra embedders that are only in the token embedders (but not input) and are empty to pass
# config check
pass
else:
raise ConfigurationError(message)
embedded_representations = []
for key in self._ordered_embedder_keys:
# Note: need to use getattr here so that the pytorch voodoo
# with submodules works with multiple GPUs.
embedder = getattr(self, "token_embedder_{}".format(key))
if isinstance(embedder, EmptyEmbedder):
# Skip empty embedders
continue
forward_params = inspect.signature(embedder.forward).parameters
forward_params_values = {}
missing_tensor_args = set()
for param in forward_params.keys():
if param in kwargs:
forward_params_values[param] = kwargs[param]
else:
missing_tensor_args.add(param)
for _ in range(num_wrapping_dims):
embedder = TimeDistributed(embedder)
tensors: Dict[str, torch.Tensor] = text_field_input[key]
if len(tensors) == 1 and len(missing_tensor_args) == 1:
# If there's only one tensor argument to the embedder, and we just have one tensor to
# embed, we can just pass in that tensor, without requiring a name match.
token_vectors = embedder(list(tensors.values())[0], **forward_params_values)
else:
# If there are multiple tensor arguments, we have to require matching names from the
# TokenIndexer. I don't think there's an easy way around that.
token_vectors = embedder(**tensors, **forward_params_values)
if token_vectors is not None:
# To handle some very rare use cases, we allow the return value of the embedder to
# be None; we just skip it in that case.
embedded_representations.append(token_vectors)
return torch.cat(embedded_representations, dim=-1)
| allennlp-master | allennlp/modules/text_field_embedders/basic_text_field_embedder.py |
"""
A `TextFieldEmbedder` is a `Module` that takes as input the `dict` of NumPy arrays
produced by a `TextField` and returns as output an embedded representation of the tokens in that field.
"""
from allennlp.modules.text_field_embedders.text_field_embedder import TextFieldEmbedder
from allennlp.modules.text_field_embedders.basic_text_field_embedder import BasicTextFieldEmbedder
| allennlp-master | allennlp/modules/text_field_embedders/__init__.py |
import torch
from allennlp.common import Registrable
from allennlp.data import TextFieldTensors
class TextFieldEmbedder(torch.nn.Module, Registrable):
"""
A `TextFieldEmbedder` is a `Module` that takes as input the
[`DataArray`](../../data/fields/text_field.md) produced by a [`TextField`](../../data/fields/text_field.md) and
returns as output an embedded representation of the tokens in that field.
The `DataArrays` produced by `TextFields` are _dictionaries_ with named representations, like
"words" and "characters". When you create a `TextField`, you pass in a dictionary of
[`TokenIndexer`](../../data/token_indexers/token_indexer.md) objects, telling the field how exactly the
tokens in the field should be represented. This class changes the type signature of `Module.forward`,
restricting `TextFieldEmbedders` to take inputs corresponding to a single `TextField`, which is
a dictionary of tensors with the same names as were passed to the `TextField`.
We also add a method to the basic `Module` API: `get_output_dim()`. You might need this
if you want to construct a `Linear` layer using the output of this embedder, for instance.
"""
default_implementation = "basic"
def forward(
self, text_field_input: TextFieldTensors, num_wrapping_dims: int = 0, **kwargs
) -> torch.Tensor:
"""
# Parameters
text_field_input : `TextFieldTensors`
A dictionary that was the output of a call to `TextField.as_tensor`. Each tensor in
here is assumed to have a shape roughly similar to `(batch_size, sequence_length)`
(perhaps with an extra trailing dimension for the characters in each token).
num_wrapping_dims : `int`, optional (default=`0`)
If you have a `ListField[TextField]` that created the `text_field_input`, you'll
end up with tensors of shape `(batch_size, wrapping_dim1, wrapping_dim2, ...,
sequence_length)`. This parameter tells us how many wrapping dimensions there are, so
that we can correctly `TimeDistribute` the embedding of each named representation.
"""
raise NotImplementedError
def get_output_dim(self) -> int:
"""
Returns the dimension of the vector representing each token in the output of this
`TextFieldEmbedder`. This is _not_ the shape of the returned tensor, but the last element
of that shape.
"""
raise NotImplementedError
| allennlp-master | allennlp/modules/text_field_embedders/text_field_embedder.py |
from overrides import overrides
import torch
from allennlp.modules.seq2vec_encoders.seq2vec_encoder import Seq2VecEncoder
from allennlp.nn.util import get_lengths_from_binary_sequence_mask
@Seq2VecEncoder.register("boe")
@Seq2VecEncoder.register("bag_of_embeddings")
class BagOfEmbeddingsEncoder(Seq2VecEncoder):
"""
A `BagOfEmbeddingsEncoder` is a simple [`Seq2VecEncoder`](./seq2vec_encoder.md) which simply sums
the embeddings of a sequence across the time dimension. The input to this module is of shape
`(batch_size, num_tokens, embedding_dim)`, and the output is of shape `(batch_size, embedding_dim)`.
Registered as a `Seq2VecEncoder` with name "bag_of_embeddings" and "boe".
# Parameters
embedding_dim : `int`, required
This is the input dimension to the encoder.
averaged : `bool`, optional (default=`False`)
If `True`, this module will average the embeddings across time, rather than simply summing
(ie. we will divide the summed embeddings by the length of the sentence).
"""
def __init__(self, embedding_dim: int, averaged: bool = False) -> None:
super().__init__()
self._embedding_dim = embedding_dim
self._averaged = averaged
@overrides
def get_input_dim(self) -> int:
return self._embedding_dim
@overrides
def get_output_dim(self) -> int:
return self._embedding_dim
def forward(self, tokens: torch.Tensor, mask: torch.BoolTensor = None):
if mask is not None:
tokens = tokens * mask.unsqueeze(-1)
# Our input has shape `(batch_size, num_tokens, embedding_dim)`, so we sum out the `num_tokens`
# dimension.
summed = tokens.sum(1)
if self._averaged:
if mask is not None:
lengths = get_lengths_from_binary_sequence_mask(mask)
length_mask = lengths > 0
# Set any length 0 to 1, to avoid dividing by zero.
lengths = torch.max(lengths, lengths.new_ones(1))
else:
lengths = tokens.new_full((1,), fill_value=tokens.size(1))
length_mask = None
summed = summed / lengths.unsqueeze(-1).float()
if length_mask is not None:
summed = summed * (length_mask > 0).unsqueeze(-1)
return summed
| allennlp-master | allennlp/modules/seq2vec_encoders/boe_encoder.py |
from typing import Sequence, Dict, List, Callable
import torch
import numpy as np
from allennlp.common.checks import ConfigurationError
from allennlp.modules.layer_norm import LayerNorm
from allennlp.modules.highway import Highway
from allennlp.modules.seq2vec_encoders.seq2vec_encoder import Seq2VecEncoder
_VALID_PROJECTION_LOCATIONS = {"after_cnn", "after_highway", None}
@Seq2VecEncoder.register("cnn-highway")
class CnnHighwayEncoder(Seq2VecEncoder):
"""
The character CNN + highway encoder from
[Kim et al "Character aware neural language models"](https://arxiv.org/abs/1508.06615)
with an optional projection.
Registered as a `Seq2VecEncoder` with name "cnn-highway".
# Parameters
embedding_dim : `int`, required
The dimension of the initial character embedding.
filters : `Sequence[Sequence[int]]`, required
A sequence of pairs (filter_width, num_filters).
num_highway : `int`, required
The number of highway layers.
projection_dim : `int`, required
The output dimension of the projection layer.
activation : `str`, optional (default = `'relu'`)
The activation function for the convolutional layers.
projection_location : `str`, optional (default = `'after_highway'`)
Where to apply the projection layer. Valid values are
'after_highway', 'after_cnn', and None.
"""
def __init__(
self,
embedding_dim: int,
filters: Sequence[Sequence[int]],
num_highway: int,
projection_dim: int,
activation: str = "relu",
projection_location: str = "after_highway",
do_layer_norm: bool = False,
) -> None:
super().__init__()
if projection_location not in _VALID_PROJECTION_LOCATIONS:
raise ConfigurationError(f"unknown projection location: {projection_location}")
self.input_dim = embedding_dim
self.output_dim = projection_dim
self._projection_location = projection_location
if activation == "tanh":
self._activation = torch.nn.functional.tanh
elif activation == "relu":
self._activation = torch.nn.functional.relu
else:
raise ConfigurationError(f"unknown activation {activation}")
# Create the convolutions
self._convolutions: List[torch.nn.Module] = []
for i, (width, num) in enumerate(filters):
conv = torch.nn.Conv1d(
in_channels=embedding_dim, out_channels=num, kernel_size=width, bias=True
)
conv.weight.data.uniform_(-0.05, 0.05)
conv.bias.data.fill_(0.0)
self.add_module(f"char_conv_{i}", conv) # needs to match the old ELMo name
self._convolutions.append(conv)
# Create the highway layers
num_filters = sum(num for _, num in filters)
if projection_location == "after_cnn":
highway_dim = projection_dim
else:
# highway_dim is the number of cnn filters
highway_dim = num_filters
self._highways = Highway(highway_dim, num_highway, activation=torch.nn.functional.relu)
for highway_layer in self._highways._layers:
# highway is a linear layer for each highway layer
# with fused W and b weights
highway_layer.weight.data.normal_(mean=0.0, std=np.sqrt(1.0 / highway_dim))
highway_layer.bias[:highway_dim].data.fill_(0.0)
highway_layer.bias[highway_dim:].data.fill_(2.0)
# Projection layer: always num_filters -> projection_dim
self._projection = torch.nn.Linear(num_filters, projection_dim, bias=True)
self._projection.weight.data.normal_(mean=0.0, std=np.sqrt(1.0 / num_filters))
self._projection.bias.data.fill_(0.0)
# And add a layer norm
if do_layer_norm:
self._layer_norm: Callable = LayerNorm(self.output_dim)
else:
self._layer_norm = lambda tensor: tensor
def forward(self, inputs: torch.Tensor, mask: torch.BoolTensor) -> Dict[str, torch.Tensor]:
"""
Compute context insensitive token embeddings for ELMo representations.
# Parameters
inputs: `torch.Tensor`
Shape `(batch_size, num_characters, embedding_dim)`
Character embeddings representing the current batch.
mask: `torch.BoolTensor`
Shape `(batch_size, num_characters)`
Currently unused. The mask for characters is implicit. See TokenCharactersEncoder.forward.
# Returns
`encoding`:
Shape `(batch_size, projection_dim)` tensor with context-insensitive token representations.
"""
# convolutions want (batch_size, embedding_dim, num_characters)
inputs = inputs.transpose(1, 2)
convolutions = []
for i in range(len(self._convolutions)):
char_conv_i = getattr(self, f"char_conv_{i}")
convolved = char_conv_i(inputs)
# (batch_size, n_filters for this width)
convolved, _ = torch.max(convolved, dim=-1)
convolved = self._activation(convolved)
convolutions.append(convolved)
# (batch_size, n_filters)
token_embedding = torch.cat(convolutions, dim=-1)
if self._projection_location == "after_cnn":
token_embedding = self._projection(token_embedding)
# apply the highway layers (batch_size, highway_dim)
token_embedding = self._highways(token_embedding)
if self._projection_location == "after_highway":
# final projection (batch_size, projection_dim)
token_embedding = self._projection(token_embedding)
# Apply layer norm if appropriate
token_embedding = self._layer_norm(token_embedding)
return token_embedding
def get_input_dim(self) -> int:
return self.input_dim
def get_output_dim(self) -> int:
return self.output_dim
| allennlp-master | allennlp/modules/seq2vec_encoders/cnn_highway_encoder.py |
from overrides import overrides
import torch.nn
from allennlp.modules.seq2vec_encoders.seq2vec_encoder import Seq2VecEncoder
from allennlp.nn.util import get_final_encoder_states
@Seq2VecEncoder.register("cls_pooler")
class ClsPooler(Seq2VecEncoder):
"""
Just takes the first vector from a list of vectors (which in a transformer is typically the
[CLS] token) and returns it. For BERT, it's recommended to use `BertPooler` instead.
Registered as a `Seq2VecEncoder` with name "cls_pooler".
# Parameters
embedding_dim: `int`
This isn't needed for any computation that we do, but we sometimes rely on `get_input_dim`
and `get_output_dim` to check parameter settings, or to instantiate final linear layers. In
order to give the right values there, we need to know the embedding dimension. If you're
using this with a transformer from the `transformers` library, this can often be found with
`model.config.hidden_size`, if you're not sure.
cls_is_last_token: `bool`, optional
The [CLS] token is the first token for most of the pretrained transformer models.
For some models such as XLNet, however, it is the last token, and we therefore need to
select at the end.
"""
def __init__(self, embedding_dim: int, cls_is_last_token: bool = False):
super().__init__()
self._embedding_dim = embedding_dim
self._cls_is_last_token = cls_is_last_token
@overrides
def get_input_dim(self) -> int:
return self._embedding_dim
@overrides
def get_output_dim(self) -> int:
return self._embedding_dim
@overrides
def forward(self, tokens: torch.Tensor, mask: torch.BoolTensor = None):
# tokens is assumed to have shape (batch_size, sequence_length, embedding_dim).
# mask is assumed to have shape (batch_size, sequence_length) with all 1s preceding all 0s.
if not self._cls_is_last_token:
return tokens[:, 0, :]
else: # [CLS] at the end
if mask is None:
raise ValueError("Must provide mask for transformer models with [CLS] at the end.")
return get_final_encoder_states(tokens, mask)
| allennlp-master | allennlp/modules/seq2vec_encoders/cls_pooler.py |
from allennlp.modules.encoder_base import _EncoderBase
from allennlp.common import Registrable
class Seq2VecEncoder(_EncoderBase, Registrable):
"""
A `Seq2VecEncoder` is a `Module` that takes as input a sequence of vectors and returns a
single vector. Input shape : `(batch_size, sequence_length, input_dim)`; output shape:
`(batch_size, output_dim)`.
We add two methods to the basic `Module` API: `get_input_dim()` and `get_output_dim()`.
You might need this if you want to construct a `Linear` layer using the output of this encoder,
or to raise sensible errors for mis-matching input dimensions.
"""
def get_input_dim(self) -> int:
"""
Returns the dimension of the vector input for each element in the sequence input
to a `Seq2VecEncoder`. This is `not` the shape of the input tensor, but the
last element of that shape.
"""
raise NotImplementedError
def get_output_dim(self) -> int:
"""
Returns the dimension of the final vector output by this `Seq2VecEncoder`. This is `not`
the shape of the returned tensor, but the last element of that shape.
"""
raise NotImplementedError
| allennlp-master | allennlp/modules/seq2vec_encoders/seq2vec_encoder.py |
from typing import Optional, Tuple
from overrides import overrides
import torch
from torch.nn import Conv1d, Linear
from allennlp.modules.seq2vec_encoders.seq2vec_encoder import Seq2VecEncoder
from allennlp.nn import Activation
from allennlp.nn.util import min_value_of_dtype
@Seq2VecEncoder.register("cnn")
class CnnEncoder(Seq2VecEncoder):
"""
A `CnnEncoder` is a combination of multiple convolution layers and max pooling layers. As a
[`Seq2VecEncoder`](./seq2vec_encoder.md), the input to this module is of shape `(batch_size, num_tokens,
input_dim)`, and the output is of shape `(batch_size, output_dim)`.
The CNN has one convolution layer for each ngram filter size. Each convolution operation gives
out a vector of size num_filters. The number of times a convolution layer will be used
is `num_tokens - ngram_size + 1`. The corresponding maxpooling layer aggregates all these
outputs from the convolution layer and outputs the max.
This operation is repeated for every ngram size passed, and consequently the dimensionality of
the output after maxpooling is `len(ngram_filter_sizes) * num_filters`. This then gets
(optionally) projected down to a lower dimensional output, specified by `output_dim`.
We then use a fully connected layer to project in back to the desired output_dim. For more
details, refer to "A Sensitivity Analysis of (and Practitioners’ Guide to) Convolutional Neural
Networks for Sentence Classification", Zhang and Wallace 2016, particularly Figure 1.
Registered as a `Seq2VecEncoder` with name "cnn".
# Parameters
embedding_dim : `int`, required
This is the input dimension to the encoder. We need this because we can't do shape
inference in pytorch, and we need to know what size filters to construct in the CNN.
num_filters : `int`, required
This is the output dim for each convolutional layer, which is the number of "filters"
learned by that layer.
ngram_filter_sizes : `Tuple[int]`, optional (default=`(2, 3, 4, 5)`)
This specifies both the number of convolutional layers we will create and their sizes. The
default of `(2, 3, 4, 5)` will have four convolutional layers, corresponding to encoding
ngrams of size 2 to 5 with some number of filters.
conv_layer_activation : `Activation`, optional (default=`torch.nn.ReLU`)
Activation to use after the convolution layers.
output_dim : `Optional[int]`, optional (default=`None`)
After doing convolutions and pooling, we'll project the collected features into a vector of
this size. If this value is `None`, we will just return the result of the max pooling,
giving an output of shape `len(ngram_filter_sizes) * num_filters`.
"""
def __init__(
self,
embedding_dim: int,
num_filters: int,
ngram_filter_sizes: Tuple[int, ...] = (2, 3, 4, 5),
conv_layer_activation: Activation = None,
output_dim: Optional[int] = None,
) -> None:
super().__init__()
self._embedding_dim = embedding_dim
self._num_filters = num_filters
self._ngram_filter_sizes = ngram_filter_sizes
self._activation = conv_layer_activation or Activation.by_name("relu")()
self._convolution_layers = [
Conv1d(
in_channels=self._embedding_dim,
out_channels=self._num_filters,
kernel_size=ngram_size,
)
for ngram_size in self._ngram_filter_sizes
]
for i, conv_layer in enumerate(self._convolution_layers):
self.add_module("conv_layer_%d" % i, conv_layer)
maxpool_output_dim = self._num_filters * len(self._ngram_filter_sizes)
if output_dim:
self.projection_layer = Linear(maxpool_output_dim, output_dim)
self._output_dim = output_dim
else:
self.projection_layer = None
self._output_dim = maxpool_output_dim
@overrides
def get_input_dim(self) -> int:
return self._embedding_dim
@overrides
def get_output_dim(self) -> int:
return self._output_dim
def forward(self, tokens: torch.Tensor, mask: torch.BoolTensor):
if mask is not None:
tokens = tokens * mask.unsqueeze(-1)
else:
# If mask doesn't exist create one of shape (batch_size, num_tokens)
mask = torch.ones(tokens.shape[0], tokens.shape[1], device=tokens.device).bool()
# Our input is expected to have shape `(batch_size, num_tokens, embedding_dim)`. The
# convolution layers expect input of shape `(batch_size, in_channels, sequence_length)`,
# where the conv layer `in_channels` is our `embedding_dim`. We thus need to transpose the
# tensor first.
tokens = torch.transpose(tokens, 1, 2)
# Each convolution layer returns output of size `(batch_size, num_filters, pool_length)`,
# where `pool_length = num_tokens - ngram_size + 1`. We then do an activation function,
# masking, then do max pooling over each filter for the whole input sequence.
# Because our max pooling is simple, we just use `torch.max`. The resultant tensor has shape
# `(batch_size, num_conv_layers * num_filters)`, which then gets projected using the
# projection layer, if requested.
# To ensure the cnn_encoder respects masking we add a large negative value to
# the activations of all filters that convolved over a masked token. We do this by
# first enumerating all filters for a given convolution size (torch.arange())
# then by comparing it to an index of the last filter that does not involve a masked
# token (.ge()) and finally adjusting dimensions to allow for addition and multiplying
# by a large negative value (.unsqueeze())
filter_outputs = []
batch_size = tokens.shape[0]
# shape: (batch_size, 1)
last_unmasked_tokens = mask.sum(dim=1).unsqueeze(dim=-1)
for i in range(len(self._convolution_layers)):
convolution_layer = getattr(self, "conv_layer_{}".format(i))
pool_length = tokens.shape[2] - convolution_layer.kernel_size[0] + 1
# Forward pass of the convolutions.
# shape: (batch_size, num_filters, pool_length)
activations = self._activation(convolution_layer(tokens))
# Create activation mask.
# shape: (batch_size, pool_length)
indices = (
torch.arange(pool_length, device=activations.device)
.unsqueeze(0)
.expand(batch_size, pool_length)
)
# shape: (batch_size, pool_length)
activations_mask = indices.ge(
last_unmasked_tokens - convolution_layer.kernel_size[0] + 1
)
# shape: (batch_size, num_filters, pool_length)
activations_mask = activations_mask.unsqueeze(1).expand_as(activations)
# Replace masked out values with smallest possible value of the dtype so
# that max pooling will ignore these activations.
# shape: (batch_size, pool_length)
activations = activations + (activations_mask * min_value_of_dtype(activations.dtype))
# Pick out the max filters
filter_outputs.append(activations.max(dim=2)[0])
# Now we have a list of `num_conv_layers` tensors of shape `(batch_size, num_filters)`.
# Concatenating them gives us a tensor of shape `(batch_size, num_filters * num_conv_layers)`.
maxpool_output = (
torch.cat(filter_outputs, dim=1) if len(filter_outputs) > 1 else filter_outputs[0]
)
# Replace the maxpool activations that picked up the masks with 0s
maxpool_output[maxpool_output == min_value_of_dtype(maxpool_output.dtype)] = 0.0
if self.projection_layer:
result = self.projection_layer(maxpool_output)
else:
result = maxpool_output
return result
| allennlp-master | allennlp/modules/seq2vec_encoders/cnn_encoder.py |
from typing import Optional, Dict, Any
from overrides import overrides
import torch
import torch.nn
from allennlp.modules.seq2vec_encoders.seq2vec_encoder import Seq2VecEncoder
@Seq2VecEncoder.register("bert_pooler")
class BertPooler(Seq2VecEncoder):
"""
The pooling layer at the end of the BERT model. This returns an embedding for the
[CLS] token, after passing it through a non-linear tanh activation; the non-linear layer
is also part of the BERT model. If you want to use the pretrained BERT model
to build a classifier and you want to use the AllenNLP token-indexer ->
token-embedder -> seq2vec encoder setup, this is the Seq2VecEncoder to use.
(For example, if you want to experiment with other embedding / encoding combinations.)
Registered as a `Seq2VecEncoder` with name "bert_pooler".
# Parameters
pretrained_model : `Union[str, BertModel]`, required
The pretrained BERT model to use. If this is a string,
we will call `transformers.AutoModel.from_pretrained(pretrained_model)`
and use that.
requires_grad : `bool`, optional, (default = `True`)
If True, the weights of the pooler will be updated during training.
Otherwise they will not.
dropout : `float`, optional, (default = `0.0`)
Amount of dropout to apply after pooling
transformer_kwargs: `Dict[str, Any]`, optional (default = `None`)
Dictionary with
[additional arguments](https://github.com/huggingface/transformers/blob/155c782a2ccd103cf63ad48a2becd7c76a7d2115/transformers/modeling_utils.py#L253)
for `AutoModel.from_pretrained`.
""" # noqa: E501
def __init__(
self,
pretrained_model: str,
*,
override_weights_file: Optional[str] = None,
override_weights_strip_prefix: Optional[str] = None,
requires_grad: bool = True,
dropout: float = 0.0,
transformer_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__()
from allennlp.common import cached_transformers
model = cached_transformers.get(
pretrained_model,
False,
override_weights_file,
override_weights_strip_prefix,
**(transformer_kwargs or {}),
)
self._dropout = torch.nn.Dropout(p=dropout)
import copy
self.pooler = copy.deepcopy(model.pooler)
for param in self.pooler.parameters():
param.requires_grad = requires_grad
self._embedding_dim = model.config.hidden_size
@overrides
def get_input_dim(self) -> int:
return self._embedding_dim
@overrides
def get_output_dim(self) -> int:
return self._embedding_dim
def forward(
self, tokens: torch.Tensor, mask: torch.BoolTensor = None, num_wrapping_dims: int = 0
):
pooler = self.pooler
for _ in range(num_wrapping_dims):
from allennlp.modules import TimeDistributed
pooler = TimeDistributed(pooler)
pooled = pooler(tokens)
pooled = self._dropout(pooled)
return pooled
| allennlp-master | allennlp/modules/seq2vec_encoders/bert_pooler.py |
"""
Modules that transform a sequence of input vectors
into a single output vector.
Some are just basic wrappers around existing PyTorch modules,
others are AllenNLP modules.
The available Seq2Vec encoders are
* `"gru"` https://pytorch.org/docs/master/nn.html#torch.nn.GRU
* `"lstm"` https://pytorch.org/docs/master/nn.html#torch.nn.LSTM
* `"rnn"` https://pytorch.org/docs/master/nn.html#torch.nn.RNN
* `"cnn"` allennlp.modules.seq2vec_encoders.cnn_encoder.CnnEncoder
* `"augmented_lstm"` allennlp.modules.augmented_lstm.AugmentedLstm
* `"alternating_lstm"` allennlp.modules.stacked_alternating_lstm.StackedAlternatingLstm
* `"stacked_bidirectional_lstm"` allennlp.modules.stacked_bidirectional_lstm.StackedBidirectionalLstm
"""
from allennlp.modules.seq2vec_encoders.bert_pooler import BertPooler
from allennlp.modules.seq2vec_encoders.boe_encoder import BagOfEmbeddingsEncoder
from allennlp.modules.seq2vec_encoders.cls_pooler import ClsPooler
from allennlp.modules.seq2vec_encoders.cnn_encoder import CnnEncoder
from allennlp.modules.seq2vec_encoders.cnn_highway_encoder import CnnHighwayEncoder
from allennlp.modules.seq2vec_encoders.pytorch_seq2vec_wrapper import (
AugmentedLstmSeq2VecEncoder,
GruSeq2VecEncoder,
LstmSeq2VecEncoder,
PytorchSeq2VecWrapper,
RnnSeq2VecEncoder,
StackedAlternatingLstmSeq2VecEncoder,
StackedBidirectionalLstmSeq2VecEncoder,
)
from allennlp.modules.seq2vec_encoders.seq2vec_encoder import Seq2VecEncoder
| allennlp-master | allennlp/modules/seq2vec_encoders/__init__.py |
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.modules.augmented_lstm import AugmentedLstm
from allennlp.modules.seq2vec_encoders.seq2vec_encoder import Seq2VecEncoder
from allennlp.modules.stacked_alternating_lstm import StackedAlternatingLstm
from allennlp.modules.stacked_bidirectional_lstm import StackedBidirectionalLstm
class PytorchSeq2VecWrapper(Seq2VecEncoder):
"""
Pytorch's RNNs have two outputs: the final hidden state for every time step,
and the hidden state at the last time step for every layer.
We just want the final hidden state of the last time step.
This wrapper pulls out that output, and adds a `get_output_dim` method, which is useful if you
want to, e.g., define a linear + softmax layer on top of this to get some distribution over a
set of labels. The linear layer needs to know its input dimension before it is called, and you
can get that from `get_output_dim`.
Also, there are lots of ways you could imagine going from an RNN hidden state at every
timestep to a single vector - you could take the last vector at all layers in the stack, do
some kind of pooling, take the last vector of the top layer in a stack, or many other options.
We just take the final hidden state vector, or in the case of a bidirectional RNN cell, we
concatenate the forward and backward final states together. TODO(mattg): allow for other ways
of wrapping RNNs.
In order to be wrapped with this wrapper, a class must have the following members:
- `self.input_size: int`
- `self.hidden_size: int`
- `def forward(inputs: PackedSequence, hidden_state: torch.tensor) ->
Tuple[PackedSequence, torch.Tensor]`.
- `self.bidirectional: bool` (optional)
This is what pytorch's RNN's look like - just make sure your class looks like those, and it
should work.
Note that we *require* you to pass sequence lengths when you call this module, to avoid subtle
bugs around masking. If you already have a `PackedSequence` you can pass `None` as the
second parameter.
"""
def __init__(self, module: torch.nn.modules.RNNBase) -> None:
# Seq2VecEncoders cannot be stateful.
super().__init__(stateful=False)
self._module = module
try:
if not self._module.batch_first:
raise ConfigurationError("Our encoder semantics assumes batch is always first!")
except AttributeError:
pass
def get_input_dim(self) -> int:
return self._module.input_size
def get_output_dim(self) -> int:
try:
is_bidirectional = self._module.bidirectional
except AttributeError:
is_bidirectional = False
return self._module.hidden_size * (2 if is_bidirectional else 1)
def forward(
self, inputs: torch.Tensor, mask: torch.BoolTensor, hidden_state: torch.Tensor = None
) -> torch.Tensor:
if mask is None:
# If a mask isn't passed, there is no padding in the batch of instances, so we can just
# return the last sequence output as the state. This doesn't work in the case of
# variable length sequences, as the last state for each element of the batch won't be
# at the end of the max sequence length, so we have to use the state of the RNN below.
return self._module(inputs, hidden_state)[0][:, -1, :]
batch_size = mask.size(0)
(
_,
state,
restoration_indices,
) = self.sort_and_run_forward(self._module, inputs, mask, hidden_state)
# Deal with the fact the LSTM state is a tuple of (state, memory).
if isinstance(state, tuple):
state = state[0]
num_layers_times_directions, num_valid, encoding_dim = state.size()
# Add back invalid rows.
if num_valid < batch_size:
# batch size is the second dimension here, because pytorch
# returns RNN state as a tensor of shape (num_layers * num_directions,
# batch_size, hidden_size)
zeros = state.new_zeros(
num_layers_times_directions, batch_size - num_valid, encoding_dim
)
state = torch.cat([state, zeros], 1)
# Restore the original indices and return the final state of the
# top layer. Pytorch's recurrent layers return state in the form
# (num_layers * num_directions, batch_size, hidden_size) regardless
# of the 'batch_first' flag, so we transpose, extract the relevant
# layer state (both forward and backward if using bidirectional layers)
# and return them as a single (batch_size, self.get_output_dim()) tensor.
# now of shape: (batch_size, num_layers * num_directions, hidden_size).
unsorted_state = state.transpose(0, 1).index_select(0, restoration_indices)
# Extract the last hidden vector, including both forward and backward states
# if the cell is bidirectional. Then reshape by concatenation (in the case
# we have bidirectional states) or just squash the 1st dimension in the non-
# bidirectional case. Return tensor has shape (batch_size, hidden_size * num_directions).
try:
last_state_index = 2 if self._module.bidirectional else 1
except AttributeError:
last_state_index = 1
last_layer_state = unsorted_state[:, -last_state_index:, :]
return last_layer_state.contiguous().view([-1, self.get_output_dim()])
@Seq2VecEncoder.register("gru")
class GruSeq2VecEncoder(PytorchSeq2VecWrapper):
"""
Registered as a `Seq2VecEncoder` with name "gru".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
):
module = torch.nn.GRU(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
super().__init__(module=module)
@Seq2VecEncoder.register("lstm")
class LstmSeq2VecEncoder(PytorchSeq2VecWrapper):
"""
Registered as a `Seq2VecEncoder` with name "lstm".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
):
module = torch.nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
super().__init__(module=module)
@Seq2VecEncoder.register("rnn")
class RnnSeq2VecEncoder(PytorchSeq2VecWrapper):
"""
Registered as a `Seq2VecEncoder` with name "rnn".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
nonlinearity: str = "tanh",
bias: bool = True,
dropout: float = 0.0,
bidirectional: bool = False,
):
module = torch.nn.RNN(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
nonlinearity=nonlinearity,
bias=bias,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional,
)
super().__init__(module=module)
@Seq2VecEncoder.register("augmented_lstm")
class AugmentedLstmSeq2VecEncoder(PytorchSeq2VecWrapper):
"""
Registered as a `Seq2VecEncoder` with name "augmented_lstm".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
go_forward: bool = True,
recurrent_dropout_probability: float = 0.0,
use_highway: bool = True,
use_input_projection_bias: bool = True,
) -> None:
module = AugmentedLstm(
input_size=input_size,
hidden_size=hidden_size,
go_forward=go_forward,
recurrent_dropout_probability=recurrent_dropout_probability,
use_highway=use_highway,
use_input_projection_bias=use_input_projection_bias,
)
super().__init__(module=module)
@Seq2VecEncoder.register("alternating_lstm")
class StackedAlternatingLstmSeq2VecEncoder(PytorchSeq2VecWrapper):
"""
Registered as a `Seq2VecEncoder` with name "alternating_lstm".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int,
recurrent_dropout_probability: float = 0.0,
use_highway: bool = True,
use_input_projection_bias: bool = True,
) -> None:
module = StackedAlternatingLstm(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
recurrent_dropout_probability=recurrent_dropout_probability,
use_highway=use_highway,
use_input_projection_bias=use_input_projection_bias,
)
super().__init__(module=module)
@Seq2VecEncoder.register("stacked_bidirectional_lstm")
class StackedBidirectionalLstmSeq2VecEncoder(PytorchSeq2VecWrapper):
"""
Registered as a `Seq2VecEncoder` with name "stacked_bidirectional_lstm".
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int,
recurrent_dropout_probability: float = 0.0,
layer_dropout_probability: float = 0.0,
use_highway: bool = True,
) -> None:
module = StackedBidirectionalLstm(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
recurrent_dropout_probability=recurrent_dropout_probability,
layer_dropout_probability=layer_dropout_probability,
use_highway=use_highway,
)
super().__init__(module=module)
| allennlp-master | allennlp/modules/seq2vec_encoders/pytorch_seq2vec_wrapper.py |
import torch
from overrides import overrides
from allennlp.modules.span_extractors.span_extractor import SpanExtractor
from allennlp.modules.time_distributed import TimeDistributed
from allennlp.nn import util
@SpanExtractor.register("self_attentive")
class SelfAttentiveSpanExtractor(SpanExtractor):
"""
Computes span representations by generating an unnormalized attention score for each
word in the document. Spans representations are computed with respect to these
scores by normalising the attention scores for words inside the span.
Given these attention distributions over every span, this module weights the
corresponding vector representations of the words in the span by this distribution,
returning a weighted representation of each span.
Registered as a `SpanExtractor` with name "self_attentive".
# Parameters
input_dim : `int`, required.
The final dimension of the `sequence_tensor`.
# Returns
attended_text_embeddings : `torch.FloatTensor`.
A tensor of shape (batch_size, num_spans, input_dim), which each span representation
is formed by locally normalising a global attention over the sequence. The only way
in which the attention distribution differs over different spans is in the set of words
over which they are normalized.
"""
def __init__(self, input_dim: int) -> None:
super().__init__()
self._input_dim = input_dim
self._global_attention = TimeDistributed(torch.nn.Linear(input_dim, 1))
def get_input_dim(self) -> int:
return self._input_dim
def get_output_dim(self) -> int:
return self._input_dim
@overrides
def forward(
self,
sequence_tensor: torch.FloatTensor,
span_indices: torch.LongTensor,
span_indices_mask: torch.BoolTensor = None,
) -> torch.FloatTensor:
# shape (batch_size, sequence_length, 1)
global_attention_logits = self._global_attention(sequence_tensor)
# shape (batch_size, sequence_length, embedding_dim + 1)
concat_tensor = torch.cat([sequence_tensor, global_attention_logits], -1)
concat_output, span_mask = util.batched_span_select(concat_tensor, span_indices)
# Shape: (batch_size, num_spans, max_batch_span_width, embedding_dim)
span_embeddings = concat_output[:, :, :, :-1]
# Shape: (batch_size, num_spans, max_batch_span_width)
span_attention_logits = concat_output[:, :, :, -1]
# Shape: (batch_size, num_spans, max_batch_span_width)
span_attention_weights = util.masked_softmax(span_attention_logits, span_mask)
# Do a weighted sum of the embedded spans with
# respect to the normalised attention distributions.
# Shape: (batch_size, num_spans, embedding_dim)
attended_text_embeddings = util.weighted_sum(span_embeddings, span_attention_weights)
if span_indices_mask is not None:
# Above we were masking the widths of spans with respect to the max
# span width in the batch. Here we are masking the spans which were
# originally passed in as padding.
return attended_text_embeddings * span_indices_mask.unsqueeze(-1)
return attended_text_embeddings
| allennlp-master | allennlp/modules/span_extractors/self_attentive_span_extractor.py |
from typing import Optional
import torch
from overrides import overrides
from torch.nn.parameter import Parameter
from allennlp.common.checks import ConfigurationError
from allennlp.modules.span_extractors.span_extractor import SpanExtractor
from allennlp.modules.token_embedders.embedding import Embedding
from allennlp.nn import util
@SpanExtractor.register("bidirectional_endpoint")
class BidirectionalEndpointSpanExtractor(SpanExtractor):
"""
Represents spans from a bidirectional encoder as a concatenation of two different
representations of the span endpoints, one for the forward direction of the encoder
and one from the backward direction. This type of representation encodes some subtlety,
because when you consider the forward and backward directions separately, the end index
of the span for the backward direction's representation is actually the start index.
By default, this `SpanExtractor` represents spans as
`sequence_tensor[inclusive_span_end] - sequence_tensor[exclusive_span_start]`
meaning that the representation is the difference between the the last word in the span
and the word `before` the span started. Note that the start and end indices are with
respect to the direction that the RNN is going in, so for the backward direction, the
start/end indices are reversed.
Additionally, the width of the spans can be embedded and concatenated on to the
final combination.
The following other types of representation are supported for both the forward and backward
directions, assuming that `x = span_start_embeddings` and `y = span_end_embeddings`.
`x`, `y`, `x*y`, `x+y`, `x-y`, `x/y`, where each of those binary operations
is performed elementwise. You can list as many combinations as you want, comma separated.
For example, you might give `x,y,x*y` as the `combination` parameter to this class.
The computed similarity function would then be `[x; y; x*y]`, which can then be optionally
concatenated with an embedded representation of the width of the span.
Registered as a `SpanExtractor` with name "bidirectional_endpoint".
# Parameters
input_dim : `int`, required
The final dimension of the `sequence_tensor`.
forward_combination : `str`, optional (default = `"y-x"`).
The method used to combine the `forward_start_embeddings` and `forward_end_embeddings`
for the forward direction of the bidirectional representation.
See above for a full description.
backward_combination : `str`, optional (default = `"x-y"`).
The method used to combine the `backward_start_embeddings` and `backward_end_embeddings`
for the backward direction of the bidirectional representation.
See above for a full description.
num_width_embeddings : `int`, optional (default = `None`).
Specifies the number of buckets to use when representing
span width features.
span_width_embedding_dim : `int`, optional (default = `None`).
The embedding size for the span_width features.
bucket_widths : `bool`, optional (default = `False`).
Whether to bucket the span widths into log-space buckets. If `False`,
the raw span widths are used.
use_sentinels : `bool`, optional (default = `True`).
If `True`, sentinels are used to represent exclusive span indices for the elements
in the first and last positions in the sequence (as the exclusive indices for these
elements are outside of the the sequence boundary). This is not strictly necessary,
as you may know that your exclusive start and end indices are always within your sequence
representation, such as if you have appended/prepended <START> and <END> tokens to your
sequence.
"""
def __init__(
self,
input_dim: int,
forward_combination: str = "y-x",
backward_combination: str = "x-y",
num_width_embeddings: int = None,
span_width_embedding_dim: int = None,
bucket_widths: bool = False,
use_sentinels: bool = True,
) -> None:
super().__init__()
self._input_dim = input_dim
self._forward_combination = forward_combination
self._backward_combination = backward_combination
self._num_width_embeddings = num_width_embeddings
self._bucket_widths = bucket_widths
if self._input_dim % 2 != 0:
raise ConfigurationError(
"The input dimension is not divisible by 2, but the "
"BidirectionalEndpointSpanExtractor assumes the embedded representation "
"is bidirectional (and hence divisible by 2)."
)
self._span_width_embedding: Optional[Embedding] = None
if num_width_embeddings is not None and span_width_embedding_dim is not None:
self._span_width_embedding = Embedding(
num_embeddings=num_width_embeddings, embedding_dim=span_width_embedding_dim
)
elif num_width_embeddings is not None or span_width_embedding_dim is not None:
raise ConfigurationError(
"To use a span width embedding representation, you must"
"specify both num_width_buckets and span_width_embedding_dim."
)
self._use_sentinels = use_sentinels
if use_sentinels:
self._start_sentinel = Parameter(torch.randn([1, 1, int(input_dim / 2)]))
self._end_sentinel = Parameter(torch.randn([1, 1, int(input_dim / 2)]))
def get_input_dim(self) -> int:
return self._input_dim
def get_output_dim(self) -> int:
unidirectional_dim = int(self._input_dim / 2)
forward_combined_dim = util.get_combined_dim(
self._forward_combination, [unidirectional_dim, unidirectional_dim]
)
backward_combined_dim = util.get_combined_dim(
self._backward_combination, [unidirectional_dim, unidirectional_dim]
)
if self._span_width_embedding is not None:
return (
forward_combined_dim
+ backward_combined_dim
+ self._span_width_embedding.get_output_dim()
)
return forward_combined_dim + backward_combined_dim
@overrides
def forward(
self,
sequence_tensor: torch.FloatTensor,
span_indices: torch.LongTensor,
sequence_mask: torch.BoolTensor = None,
span_indices_mask: torch.BoolTensor = None,
) -> torch.FloatTensor:
# Both of shape (batch_size, sequence_length, embedding_size / 2)
forward_sequence, backward_sequence = sequence_tensor.split(
int(self._input_dim / 2), dim=-1
)
forward_sequence = forward_sequence.contiguous()
backward_sequence = backward_sequence.contiguous()
# shape (batch_size, num_spans)
span_starts, span_ends = [index.squeeze(-1) for index in span_indices.split(1, dim=-1)]
if span_indices_mask is not None:
span_starts = span_starts * span_indices_mask
span_ends = span_ends * span_indices_mask
# We want `exclusive` span starts, so we remove 1 from the forward span starts
# as the AllenNLP `SpanField` is inclusive.
# shape (batch_size, num_spans)
exclusive_span_starts = span_starts - 1
# shape (batch_size, num_spans, 1)
start_sentinel_mask = (exclusive_span_starts == -1).unsqueeze(-1)
# We want `exclusive` span ends for the backward direction
# (so that the `start` of the span in that direction is exlusive), so
# we add 1 to the span ends as the AllenNLP `SpanField` is inclusive.
exclusive_span_ends = span_ends + 1
if sequence_mask is not None:
# shape (batch_size)
sequence_lengths = util.get_lengths_from_binary_sequence_mask(sequence_mask)
else:
# shape (batch_size), filled with the sequence length size of the sequence_tensor.
sequence_lengths = torch.ones_like(
sequence_tensor[:, 0, 0], dtype=torch.long
) * sequence_tensor.size(1)
# shape (batch_size, num_spans, 1)
end_sentinel_mask = (exclusive_span_ends >= sequence_lengths.unsqueeze(-1)).unsqueeze(-1)
# As we added 1 to the span_ends to make them exclusive, which might have caused indices
# equal to the sequence_length to become out of bounds, we multiply by the inverse of the
# end_sentinel mask to erase these indices (as we will replace them anyway in the block below).
# The same argument follows for the exclusive span start indices.
exclusive_span_ends = exclusive_span_ends * ~end_sentinel_mask.squeeze(-1)
exclusive_span_starts = exclusive_span_starts * ~start_sentinel_mask.squeeze(-1)
# We'll check the indices here at runtime, because it's difficult to debug
# if this goes wrong and it's tricky to get right.
if (exclusive_span_starts < 0).any() or (
exclusive_span_ends > sequence_lengths.unsqueeze(-1)
).any():
raise ValueError(
f"Adjusted span indices must lie inside the length of the sequence tensor, "
f"but found: exclusive_span_starts: {exclusive_span_starts}, "
f"exclusive_span_ends: {exclusive_span_ends} for a sequence tensor with lengths "
f"{sequence_lengths}."
)
# Forward Direction: start indices are exclusive. Shape (batch_size, num_spans, input_size / 2)
forward_start_embeddings = util.batched_index_select(
forward_sequence, exclusive_span_starts
)
# Forward Direction: end indices are inclusive, so we can just use span_ends.
# Shape (batch_size, num_spans, input_size / 2)
forward_end_embeddings = util.batched_index_select(forward_sequence, span_ends)
# Backward Direction: The backward start embeddings use the `forward` end
# indices, because we are going backwards.
# Shape (batch_size, num_spans, input_size / 2)
backward_start_embeddings = util.batched_index_select(
backward_sequence, exclusive_span_ends
)
# Backward Direction: The backward end embeddings use the `forward` start
# indices, because we are going backwards.
# Shape (batch_size, num_spans, input_size / 2)
backward_end_embeddings = util.batched_index_select(backward_sequence, span_starts)
if self._use_sentinels:
# If we're using sentinels, we need to replace all the elements which were
# outside the dimensions of the sequence_tensor with either the start sentinel,
# or the end sentinel.
forward_start_embeddings = (
forward_start_embeddings * ~start_sentinel_mask
+ start_sentinel_mask * self._start_sentinel
)
backward_start_embeddings = (
backward_start_embeddings * ~end_sentinel_mask
+ end_sentinel_mask * self._end_sentinel
)
# Now we combine the forward and backward spans in the manner specified by the
# respective combinations and concatenate these representations.
# Shape (batch_size, num_spans, forward_combination_dim)
forward_spans = util.combine_tensors(
self._forward_combination, [forward_start_embeddings, forward_end_embeddings]
)
# Shape (batch_size, num_spans, backward_combination_dim)
backward_spans = util.combine_tensors(
self._backward_combination, [backward_start_embeddings, backward_end_embeddings]
)
# Shape (batch_size, num_spans, forward_combination_dim + backward_combination_dim)
span_embeddings = torch.cat([forward_spans, backward_spans], -1)
if self._span_width_embedding is not None:
# Embed the span widths and concatenate to the rest of the representations.
if self._bucket_widths:
span_widths = util.bucket_values(
span_ends - span_starts, num_total_buckets=self._num_width_embeddings # type: ignore
)
else:
span_widths = span_ends - span_starts
span_width_embeddings = self._span_width_embedding(span_widths)
return torch.cat([span_embeddings, span_width_embeddings], -1)
if span_indices_mask is not None:
return span_embeddings * span_indices_mask.unsqueeze(-1)
return span_embeddings
| allennlp-master | allennlp/modules/span_extractors/bidirectional_endpoint_span_extractor.py |
from typing import Optional
import torch
from torch.nn.parameter import Parameter
from overrides import overrides
from allennlp.modules.span_extractors.span_extractor import SpanExtractor
from allennlp.modules.token_embedders.embedding import Embedding
from allennlp.nn import util
from allennlp.common.checks import ConfigurationError
@SpanExtractor.register("endpoint")
class EndpointSpanExtractor(SpanExtractor):
"""
Represents spans as a combination of the embeddings of their endpoints. Additionally,
the width of the spans can be embedded and concatenated on to the final combination.
The following types of representation are supported, assuming that
`x = span_start_embeddings` and `y = span_end_embeddings`.
`x`, `y`, `x*y`, `x+y`, `x-y`, `x/y`, where each of those binary operations
is performed elementwise. You can list as many combinations as you want, comma separated.
For example, you might give `x,y,x*y` as the `combination` parameter to this class.
The computed similarity function would then be `[x; y; x*y]`, which can then be optionally
concatenated with an embedded representation of the width of the span.
Registered as a `SpanExtractor` with name "endpoint".
# Parameters
input_dim : `int`, required.
The final dimension of the `sequence_tensor`.
combination : `str`, optional (default = `"x,y"`).
The method used to combine the `start_embedding` and `end_embedding`
representations. See above for a full description.
num_width_embeddings : `int`, optional (default = `None`).
Specifies the number of buckets to use when representing
span width features.
span_width_embedding_dim : `int`, optional (default = `None`).
The embedding size for the span_width features.
bucket_widths : `bool`, optional (default = `False`).
Whether to bucket the span widths into log-space buckets. If `False`,
the raw span widths are used.
use_exclusive_start_indices : `bool`, optional (default = `False`).
If `True`, the start indices extracted are converted to exclusive indices. Sentinels
are used to represent exclusive span indices for the elements in the first
position in the sequence (as the exclusive indices for these elements are outside
of the the sequence boundary) so that start indices can be exclusive.
NOTE: This option can be helpful to avoid the pathological case in which you
want span differences for length 1 spans - if you use inclusive indices, you
will end up with an `x - x` operation for length 1 spans, which is not good.
"""
def __init__(
self,
input_dim: int,
combination: str = "x,y",
num_width_embeddings: int = None,
span_width_embedding_dim: int = None,
bucket_widths: bool = False,
use_exclusive_start_indices: bool = False,
) -> None:
super().__init__()
self._input_dim = input_dim
self._combination = combination
self._num_width_embeddings = num_width_embeddings
self._bucket_widths = bucket_widths
self._use_exclusive_start_indices = use_exclusive_start_indices
if use_exclusive_start_indices:
self._start_sentinel = Parameter(torch.randn([1, 1, int(input_dim)]))
self._span_width_embedding: Optional[Embedding] = None
if num_width_embeddings is not None and span_width_embedding_dim is not None:
self._span_width_embedding = Embedding(
num_embeddings=num_width_embeddings, embedding_dim=span_width_embedding_dim
)
elif num_width_embeddings is not None or span_width_embedding_dim is not None:
raise ConfigurationError(
"To use a span width embedding representation, you must"
"specify both num_width_buckets and span_width_embedding_dim."
)
def get_input_dim(self) -> int:
return self._input_dim
def get_output_dim(self) -> int:
combined_dim = util.get_combined_dim(self._combination, [self._input_dim, self._input_dim])
if self._span_width_embedding is not None:
return combined_dim + self._span_width_embedding.get_output_dim()
return combined_dim
@overrides
def forward(
self,
sequence_tensor: torch.FloatTensor,
span_indices: torch.LongTensor,
sequence_mask: torch.BoolTensor = None,
span_indices_mask: torch.BoolTensor = None,
) -> None:
# shape (batch_size, num_spans)
span_starts, span_ends = [index.squeeze(-1) for index in span_indices.split(1, dim=-1)]
if span_indices_mask is not None:
# It's not strictly necessary to multiply the span indices by the mask here,
# but it's possible that the span representation was padded with something other
# than 0 (such as -1, which would be an invalid index), so we do so anyway to
# be safe.
span_starts = span_starts * span_indices_mask
span_ends = span_ends * span_indices_mask
if not self._use_exclusive_start_indices:
if sequence_tensor.size(-1) != self._input_dim:
raise ValueError(
f"Dimension mismatch expected ({sequence_tensor.size(-1)}) "
f"received ({self._input_dim})."
)
start_embeddings = util.batched_index_select(sequence_tensor, span_starts)
end_embeddings = util.batched_index_select(sequence_tensor, span_ends)
else:
# We want `exclusive` span starts, so we remove 1 from the forward span starts
# as the AllenNLP `SpanField` is inclusive.
# shape (batch_size, num_spans)
exclusive_span_starts = span_starts - 1
# shape (batch_size, num_spans, 1)
start_sentinel_mask = (exclusive_span_starts == -1).unsqueeze(-1)
exclusive_span_starts = exclusive_span_starts * ~start_sentinel_mask.squeeze(-1)
# We'll check the indices here at runtime, because it's difficult to debug
# if this goes wrong and it's tricky to get right.
if (exclusive_span_starts < 0).any():
raise ValueError(
f"Adjusted span indices must lie inside the the sequence tensor, "
f"but found: exclusive_span_starts: {exclusive_span_starts}."
)
start_embeddings = util.batched_index_select(sequence_tensor, exclusive_span_starts)
end_embeddings = util.batched_index_select(sequence_tensor, span_ends)
# We're using sentinels, so we need to replace all the elements which were
# outside the dimensions of the sequence_tensor with the start sentinel.
start_embeddings = (
start_embeddings * ~start_sentinel_mask + start_sentinel_mask * self._start_sentinel
)
combined_tensors = util.combine_tensors(
self._combination, [start_embeddings, end_embeddings]
)
if self._span_width_embedding is not None:
# Embed the span widths and concatenate to the rest of the representations.
if self._bucket_widths:
span_widths = util.bucket_values(
span_ends - span_starts, num_total_buckets=self._num_width_embeddings # type: ignore
)
else:
span_widths = span_ends - span_starts
span_width_embeddings = self._span_width_embedding(span_widths)
combined_tensors = torch.cat([combined_tensors, span_width_embeddings], -1)
if span_indices_mask is not None:
return combined_tensors * span_indices_mask.unsqueeze(-1)
return combined_tensors
| allennlp-master | allennlp/modules/span_extractors/endpoint_span_extractor.py |
from allennlp.modules.span_extractors.span_extractor import SpanExtractor
from allennlp.modules.span_extractors.endpoint_span_extractor import EndpointSpanExtractor
from allennlp.modules.span_extractors.self_attentive_span_extractor import (
SelfAttentiveSpanExtractor,
)
from allennlp.modules.span_extractors.bidirectional_endpoint_span_extractor import (
BidirectionalEndpointSpanExtractor,
)
| allennlp-master | allennlp/modules/span_extractors/__init__.py |
import torch
from overrides import overrides
from allennlp.common.registrable import Registrable
class SpanExtractor(torch.nn.Module, Registrable):
"""
Many NLP models deal with representations of spans inside a sentence.
SpanExtractors define methods for extracting and representing spans
from a sentence.
SpanExtractors take a sequence tensor of shape (batch_size, timesteps, embedding_dim)
and indices of shape (batch_size, num_spans, 2) and return a tensor of
shape (batch_size, num_spans, ...), forming some representation of the
spans.
"""
@overrides
def forward(
self,
sequence_tensor: torch.FloatTensor,
span_indices: torch.LongTensor,
sequence_mask: torch.BoolTensor = None,
span_indices_mask: torch.BoolTensor = None,
):
"""
Given a sequence tensor, extract spans and return representations of
them. Span representation can be computed in many different ways,
such as concatenation of the start and end spans, attention over the
vectors contained inside the span, etc.
# Parameters
sequence_tensor : `torch.FloatTensor`, required.
A tensor of shape (batch_size, sequence_length, embedding_size)
representing an embedded sequence of words.
span_indices : `torch.LongTensor`, required.
A tensor of shape `(batch_size, num_spans, 2)`, where the last
dimension represents the inclusive start and end indices of the
span to be extracted from the `sequence_tensor`.
sequence_mask : `torch.BoolTensor`, optional (default = `None`).
A tensor of shape (batch_size, sequence_length) representing padded
elements of the sequence.
span_indices_mask : `torch.BoolTensor`, optional (default = `None`).
A tensor of shape (batch_size, num_spans) representing the valid
spans in the `indices` tensor. This mask is optional because
sometimes it's easier to worry about masking after calling this
function, rather than passing a mask directly.
# Returns
A tensor of shape `(batch_size, num_spans, embedded_span_size)`,
where `embedded_span_size` depends on the way spans are represented.
"""
raise NotImplementedError
def get_input_dim(self) -> int:
"""
Returns the expected final dimension of the `sequence_tensor`.
"""
raise NotImplementedError
def get_output_dim(self) -> int:
"""
Returns the expected final dimension of the returned span representation.
"""
raise NotImplementedError
| allennlp-master | allennlp/modules/span_extractors/span_extractor.py |
import math
import torch
from torch.nn import Parameter
from overrides import overrides
from allennlp.nn import util
from allennlp.nn.activations import Activation
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
@MatrixAttention.register("linear")
class LinearMatrixAttention(MatrixAttention):
"""
This `MatrixAttention` takes two matrices as input and returns a matrix of attentions
by performing a dot product between a vector of weights and some
combination of the two input matrices, followed by an (optional) activation function. The
combination used is configurable.
If the two vectors are `x` and `y`, we allow the following kinds of combinations : `x`,
`y`, `x*y`, `x+y`, `x-y`, `x/y`, where each of those binary operations is performed
elementwise. You can list as many combinations as you want, comma separated. For example, you
might give `x,y,x*y` as the `combination` parameter to this class. The computed similarity
function would then be `w^T [x; y; x*y] + b`, where `w` is a vector of weights, `b` is a
bias parameter, and `[;]` is vector concatenation.
Note that if you want a bilinear similarity function with a diagonal weight matrix W, where the
similarity function is computed as `x * w * y + b` (with `w` the diagonal of `W`), you can
accomplish that with this class by using "x*y" for `combination`.
Registered as a `MatrixAttention` with name "linear".
# Parameters
tensor_1_dim : `int`, required
The dimension of the first tensor, `x`, described above. This is `x.size()[-1]` - the
length of the vector that will go into the similarity computation. We need this so we can
build weight vectors correctly.
tensor_2_dim : `int`, required
The dimension of the second tensor, `y`, described above. This is `y.size()[-1]` - the
length of the vector that will go into the similarity computation. We need this so we can
build weight vectors correctly.
combination : `str`, optional (default=`"x,y"`)
Described above.
activation : `Activation`, optional (default=`linear`)
An activation function applied after the `w^T * [x;y] + b` calculation. Default is
linear, i.e. no activation.
"""
def __init__(
self,
tensor_1_dim: int,
tensor_2_dim: int,
combination: str = "x,y",
activation: Activation = None,
) -> None:
super().__init__()
self._combination = combination
combined_dim = util.get_combined_dim(combination, [tensor_1_dim, tensor_2_dim])
self._weight_vector = Parameter(torch.Tensor(combined_dim))
self._bias = Parameter(torch.Tensor(1))
self._activation = activation or Activation.by_name("linear")()
self.reset_parameters()
def reset_parameters(self):
std = math.sqrt(6 / (self._weight_vector.size(0) + 1))
self._weight_vector.data.uniform_(-std, std)
self._bias.data.fill_(0)
@overrides
def forward(self, matrix_1: torch.Tensor, matrix_2: torch.Tensor) -> torch.Tensor:
combined_tensors = util.combine_tensors_and_multiply(
self._combination, [matrix_1.unsqueeze(2), matrix_2.unsqueeze(1)], self._weight_vector
)
return self._activation(combined_tensors + self._bias)
| allennlp-master | allennlp/modules/matrix_attention/linear_matrix_attention.py |
from overrides import overrides
import torch
from torch.nn.parameter import Parameter
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
from allennlp.nn import Activation
@MatrixAttention.register("bilinear")
class BilinearMatrixAttention(MatrixAttention):
"""
Computes attention between two matrices using a bilinear attention function. This function has
a matrix of weights `W` and a bias `b`, and the similarity between the two matrices `X`
and `Y` is computed as `X W Y^T + b`.
Registered as a `MatrixAttention` with name "bilinear".
# Parameters
matrix_1_dim : `int`, required
The dimension of the matrix `X`, described above. This is `X.size()[-1]` - the length
of the vector that will go into the similarity computation. We need this so we can build
the weight matrix correctly.
matrix_2_dim : `int`, required
The dimension of the matrix `Y`, described above. This is `Y.size()[-1]` - the length
of the vector that will go into the similarity computation. We need this so we can build
the weight matrix correctly.
activation : `Activation`, optional (default=`linear`)
An activation function applied after the `X W Y^T + b` calculation. Default is
linear, i.e. no activation.
use_input_biases : `bool`, optional (default = `False`)
If True, we add biases to the inputs such that the final computation
is equivalent to the original bilinear matrix multiplication plus a
projection of both inputs.
label_dim : `int`, optional (default = `1`)
The number of output classes. Typically in an attention setting this will be one,
but this parameter allows this class to function as an equivalent to `torch.nn.Bilinear`
for matrices, rather than vectors.
"""
def __init__(
self,
matrix_1_dim: int,
matrix_2_dim: int,
activation: Activation = None,
use_input_biases: bool = False,
label_dim: int = 1,
) -> None:
super().__init__()
if use_input_biases:
matrix_1_dim += 1
matrix_2_dim += 1
if label_dim == 1:
self._weight_matrix = Parameter(torch.Tensor(matrix_1_dim, matrix_2_dim))
else:
self._weight_matrix = Parameter(torch.Tensor(label_dim, matrix_1_dim, matrix_2_dim))
self._bias = Parameter(torch.Tensor(1))
self._activation = activation or Activation.by_name("linear")()
self._use_input_biases = use_input_biases
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self._weight_matrix)
self._bias.data.fill_(0)
@overrides
def forward(self, matrix_1: torch.Tensor, matrix_2: torch.Tensor) -> torch.Tensor:
if self._use_input_biases:
bias1 = matrix_1.new_ones(matrix_1.size()[:-1] + (1,))
bias2 = matrix_2.new_ones(matrix_2.size()[:-1] + (1,))
matrix_1 = torch.cat([matrix_1, bias1], -1)
matrix_2 = torch.cat([matrix_2, bias2], -1)
weight = self._weight_matrix
if weight.dim() == 2:
weight = weight.unsqueeze(0)
intermediate = torch.matmul(matrix_1.unsqueeze(1), weight)
final = torch.matmul(intermediate, matrix_2.unsqueeze(1).transpose(2, 3))
return self._activation(final.squeeze(1) + self._bias)
| allennlp-master | allennlp/modules/matrix_attention/bilinear_matrix_attention.py |
import torch
from overrides import overrides
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
@MatrixAttention.register("dot_product")
class DotProductMatrixAttention(MatrixAttention):
"""
Computes attention between every entry in matrix_1 with every entry in matrix_2 using a dot
product.
Registered as a `MatrixAttention` with name "dot_product".
"""
@overrides
def forward(self, matrix_1: torch.Tensor, matrix_2: torch.Tensor) -> torch.Tensor:
return matrix_1.bmm(matrix_2.transpose(2, 1))
| allennlp-master | allennlp/modules/matrix_attention/dot_product_matrix_attention.py |
import torch
from allennlp.common.registrable import Registrable
class MatrixAttention(torch.nn.Module, Registrable):
"""
`MatrixAttention` takes two matrices as input and returns a matrix of attentions.
We compute the similarity between each row in each matrix and return unnormalized similarity
scores. Because these scores are unnormalized, we don't take a mask as input; it's up to the
caller to deal with masking properly when this output is used.
Input:
- matrix_1 : `(batch_size, num_rows_1, embedding_dim_1)`
- matrix_2 : `(batch_size, num_rows_2, embedding_dim_2)`
Output:
- `(batch_size, num_rows_1, num_rows_2)`
"""
def forward(self, matrix_1: torch.Tensor, matrix_2: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
| allennlp-master | allennlp/modules/matrix_attention/matrix_attention.py |
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
from allennlp.modules.matrix_attention.bilinear_matrix_attention import BilinearMatrixAttention
from allennlp.modules.matrix_attention.cosine_matrix_attention import CosineMatrixAttention
from allennlp.modules.matrix_attention.dot_product_matrix_attention import DotProductMatrixAttention
from allennlp.modules.matrix_attention.linear_matrix_attention import LinearMatrixAttention
| allennlp-master | allennlp/modules/matrix_attention/__init__.py |
import torch
from overrides import overrides
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
from allennlp.nn import util
@MatrixAttention.register("cosine")
class CosineMatrixAttention(MatrixAttention):
"""
Computes attention between every entry in matrix_1 with every entry in matrix_2 using cosine
similarity.
Registered as a `MatrixAttention` with name "cosine".
"""
@overrides
def forward(self, matrix_1: torch.Tensor, matrix_2: torch.Tensor) -> torch.Tensor:
a_norm = matrix_1 / (
matrix_1.norm(p=2, dim=-1, keepdim=True) + util.tiny_value_of_dtype(matrix_1.dtype)
)
b_norm = matrix_2 / (
matrix_2.norm(p=2, dim=-1, keepdim=True) + util.tiny_value_of_dtype(matrix_2.dtype)
)
return torch.bmm(a_norm, b_norm.transpose(-1, -2))
| allennlp-master | allennlp/modules/matrix_attention/cosine_matrix_attention.py |
import math
from typing import Optional, Tuple, Dict, Any
from overrides import overrides
import torch
import torch.nn.functional as F
from transformers import XLNetConfig
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
from allennlp.modules.scalar_mix import ScalarMix
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
from allennlp.nn.util import batched_index_select
@TokenEmbedder.register("pretrained_transformer")
class PretrainedTransformerEmbedder(TokenEmbedder):
"""
Uses a pretrained model from `transformers` as a `TokenEmbedder`.
Registered as a `TokenEmbedder` with name "pretrained_transformer".
# Parameters
model_name : `str`
The name of the `transformers` model to use. Should be the same as the corresponding
`PretrainedTransformerIndexer`.
max_length : `int`, optional (default = `None`)
If positive, folds input token IDs into multiple segments of this length, pass them
through the transformer model independently, and concatenate the final representations.
Should be set to the same value as the `max_length` option on the
`PretrainedTransformerIndexer`.
sub_module: `str`, optional (default = `None`)
The name of a submodule of the transformer to be used as the embedder. Some transformers naturally act
as embedders such as BERT. However, other models consist of encoder and decoder, in which case we just
want to use the encoder.
train_parameters: `bool`, optional (default = `True`)
If this is `True`, the transformer weights get updated during training.
last_layer_only: `bool`, optional (default = `True`)
When `True` (the default), only the final layer of the pretrained transformer is taken
for the embeddings. But if set to `False`, a scalar mix of all of the layers
is used.
gradient_checkpointing: `bool`, optional (default = `None`)
Enable or disable gradient checkpointing.
tokenizer_kwargs: `Dict[str, Any]`, optional (default = `None`)
Dictionary with
[additional arguments](https://github.com/huggingface/transformers/blob/155c782a2ccd103cf63ad48a2becd7c76a7d2115/transformers/tokenization_utils.py#L691)
for `AutoTokenizer.from_pretrained`.
transformer_kwargs: `Dict[str, Any]`, optional (default = `None`)
Dictionary with
[additional arguments](https://github.com/huggingface/transformers/blob/155c782a2ccd103cf63ad48a2becd7c76a7d2115/transformers/modeling_utils.py#L253)
for `AutoModel.from_pretrained`.
""" # noqa: E501
authorized_missing_keys = [r"position_ids$"]
def __init__(
self,
model_name: str,
*,
max_length: int = None,
sub_module: str = None,
train_parameters: bool = True,
last_layer_only: bool = True,
override_weights_file: Optional[str] = None,
override_weights_strip_prefix: Optional[str] = None,
gradient_checkpointing: Optional[bool] = None,
tokenizer_kwargs: Optional[Dict[str, Any]] = None,
transformer_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__()
from allennlp.common import cached_transformers
self.transformer_model = cached_transformers.get(
model_name,
True,
override_weights_file=override_weights_file,
override_weights_strip_prefix=override_weights_strip_prefix,
**(transformer_kwargs or {}),
)
if gradient_checkpointing is not None:
self.transformer_model.config.update({"gradient_checkpointing": gradient_checkpointing})
self.config = self.transformer_model.config
if sub_module:
assert hasattr(self.transformer_model, sub_module)
self.transformer_model = getattr(self.transformer_model, sub_module)
self._max_length = max_length
# I'm not sure if this works for all models; open an issue on github if you find a case
# where it doesn't work.
self.output_dim = self.config.hidden_size
self._scalar_mix: Optional[ScalarMix] = None
if not last_layer_only:
self._scalar_mix = ScalarMix(self.config.num_hidden_layers)
self.config.output_hidden_states = True
tokenizer = PretrainedTransformerTokenizer(
model_name,
tokenizer_kwargs=tokenizer_kwargs,
)
self._num_added_start_tokens = len(tokenizer.single_sequence_start_tokens)
self._num_added_end_tokens = len(tokenizer.single_sequence_end_tokens)
self._num_added_tokens = self._num_added_start_tokens + self._num_added_end_tokens
if not train_parameters:
for param in self.transformer_model.parameters():
param.requires_grad = False
@overrides
def get_output_dim(self):
return self.output_dim
def _number_of_token_type_embeddings(self):
if isinstance(self.config, XLNetConfig):
return 3 # XLNet has 3 type ids
elif hasattr(self.config, "type_vocab_size"):
return self.config.type_vocab_size
else:
return 0
@overrides
def forward(
self,
token_ids: torch.LongTensor,
mask: torch.BoolTensor,
type_ids: Optional[torch.LongTensor] = None,
segment_concat_mask: Optional[torch.BoolTensor] = None,
) -> torch.Tensor: # type: ignore
"""
# Parameters
token_ids: `torch.LongTensor`
Shape: `[batch_size, num_wordpieces if max_length is None else num_segment_concat_wordpieces]`.
num_segment_concat_wordpieces is num_wordpieces plus special tokens inserted in the
middle, e.g. the length of: "[CLS] A B C [SEP] [CLS] D E F [SEP]" (see indexer logic).
mask: `torch.BoolTensor`
Shape: [batch_size, num_wordpieces].
type_ids: `Optional[torch.LongTensor]`
Shape: `[batch_size, num_wordpieces if max_length is None else num_segment_concat_wordpieces]`.
segment_concat_mask: `Optional[torch.BoolTensor]`
Shape: `[batch_size, num_segment_concat_wordpieces]`.
# Returns
`torch.Tensor`
Shape: `[batch_size, num_wordpieces, embedding_size]`.
"""
# Some of the huggingface transformers don't support type ids at all and crash when you supply
# them. For others, you can supply a tensor of zeros, and if you don't, they act as if you did.
# There is no practical difference to the caller, so here we pretend that one case is the same
# as another case.
if type_ids is not None:
max_type_id = type_ids.max()
if max_type_id == 0:
type_ids = None
else:
if max_type_id >= self._number_of_token_type_embeddings():
raise ValueError("Found type ids too large for the chosen transformer model.")
assert token_ids.shape == type_ids.shape
fold_long_sequences = self._max_length is not None and token_ids.size(1) > self._max_length
if fold_long_sequences:
batch_size, num_segment_concat_wordpieces = token_ids.size()
token_ids, segment_concat_mask, type_ids = self._fold_long_sequences(
token_ids, segment_concat_mask, type_ids
)
transformer_mask = segment_concat_mask if self._max_length is not None else mask
assert transformer_mask is not None
# Shape: [batch_size, num_wordpieces, embedding_size],
# or if self._max_length is not None:
# [batch_size * num_segments, self._max_length, embedding_size]
# We call this with kwargs because some of the huggingface models don't have the
# token_type_ids parameter and fail even when it's given as None.
# Also, as of transformers v2.5.1, they are taking FloatTensor masks.
parameters = {"input_ids": token_ids, "attention_mask": transformer_mask.float()}
if type_ids is not None:
parameters["token_type_ids"] = type_ids
transformer_output = self.transformer_model(**parameters)
if self._scalar_mix is not None:
# As far as I can tell, the hidden states will always be the last element
# in the output tuple as long as the model is not also configured to return
# attention scores.
# See, for example, the return value description for BERT:
# https://huggingface.co/transformers/model_doc/bert.html#transformers.BertModel.forward
# These hidden states will also include the embedding layer, which we don't
# include in the scalar mix. Hence the `[1:]` slicing.
hidden_states = transformer_output[-1][1:]
embeddings = self._scalar_mix(hidden_states)
else:
embeddings = transformer_output[0]
if fold_long_sequences:
embeddings = self._unfold_long_sequences(
embeddings, segment_concat_mask, batch_size, num_segment_concat_wordpieces
)
return embeddings
def _fold_long_sequences(
self,
token_ids: torch.LongTensor,
mask: torch.BoolTensor,
type_ids: Optional[torch.LongTensor] = None,
) -> Tuple[torch.LongTensor, torch.LongTensor, Optional[torch.LongTensor]]:
"""
We fold 1D sequences (for each element in batch), returned by `PretrainedTransformerIndexer`
that are in reality multiple segments concatenated together, to 2D tensors, e.g.
[ [CLS] A B C [SEP] [CLS] D E [SEP] ]
-> [ [ [CLS] A B C [SEP] ], [ [CLS] D E [SEP] [PAD] ] ]
The [PAD] positions can be found in the returned `mask`.
# Parameters
token_ids: `torch.LongTensor`
Shape: `[batch_size, num_segment_concat_wordpieces]`.
num_segment_concat_wordpieces is num_wordpieces plus special tokens inserted in the
middle, i.e. the length of: "[CLS] A B C [SEP] [CLS] D E F [SEP]" (see indexer logic).
mask: `torch.BoolTensor`
Shape: `[batch_size, num_segment_concat_wordpieces]`.
The mask for the concatenated segments of wordpieces. The same as `segment_concat_mask`
in `forward()`.
type_ids: `Optional[torch.LongTensor]`
Shape: [batch_size, num_segment_concat_wordpieces].
# Returns:
token_ids: `torch.LongTensor`
Shape: [batch_size * num_segments, self._max_length].
mask: `torch.BoolTensor`
Shape: [batch_size * num_segments, self._max_length].
"""
num_segment_concat_wordpieces = token_ids.size(1)
num_segments = math.ceil(num_segment_concat_wordpieces / self._max_length) # type: ignore
padded_length = num_segments * self._max_length # type: ignore
length_to_pad = padded_length - num_segment_concat_wordpieces
def fold(tensor): # Shape: [batch_size, num_segment_concat_wordpieces]
# Shape: [batch_size, num_segments * self._max_length]
tensor = F.pad(tensor, [0, length_to_pad], value=0)
# Shape: [batch_size * num_segments, self._max_length]
return tensor.reshape(-1, self._max_length)
return fold(token_ids), fold(mask), fold(type_ids) if type_ids is not None else None
def _unfold_long_sequences(
self,
embeddings: torch.FloatTensor,
mask: torch.BoolTensor,
batch_size: int,
num_segment_concat_wordpieces: int,
) -> torch.FloatTensor:
"""
We take 2D segments of a long sequence and flatten them out to get the whole sequence
representation while remove unnecessary special tokens.
[ [ [CLS]_emb A_emb B_emb C_emb [SEP]_emb ], [ [CLS]_emb D_emb E_emb [SEP]_emb [PAD]_emb ] ]
-> [ [CLS]_emb A_emb B_emb C_emb D_emb E_emb [SEP]_emb ]
We truncate the start and end tokens for all segments, recombine the segments,
and manually add back the start and end tokens.
# Parameters
embeddings: `torch.FloatTensor`
Shape: [batch_size * num_segments, self._max_length, embedding_size].
mask: `torch.BoolTensor`
Shape: [batch_size * num_segments, self._max_length].
The mask for the concatenated segments of wordpieces. The same as `segment_concat_mask`
in `forward()`.
batch_size: `int`
num_segment_concat_wordpieces: `int`
The length of the original "[ [CLS] A B C [SEP] [CLS] D E F [SEP] ]", i.e.
the original `token_ids.size(1)`.
# Returns:
embeddings: `torch.FloatTensor`
Shape: [batch_size, self._num_wordpieces, embedding_size].
"""
def lengths_to_mask(lengths, max_len, device):
return torch.arange(max_len, device=device).expand(
lengths.size(0), max_len
) < lengths.unsqueeze(1)
device = embeddings.device
num_segments = int(embeddings.size(0) / batch_size)
embedding_size = embeddings.size(2)
# We want to remove all segment-level special tokens but maintain sequence-level ones
num_wordpieces = num_segment_concat_wordpieces - (num_segments - 1) * self._num_added_tokens
embeddings = embeddings.reshape(
batch_size, num_segments * self._max_length, embedding_size # type: ignore
)
mask = mask.reshape(batch_size, num_segments * self._max_length) # type: ignore
# We assume that all 1s in the mask precede all 0s, and add an assert for that.
# Open an issue on GitHub if this breaks for you.
# Shape: (batch_size,)
seq_lengths = mask.sum(-1)
if not (lengths_to_mask(seq_lengths, mask.size(1), device) == mask).all():
raise ValueError(
"Long sequence splitting only supports masks with all 1s preceding all 0s."
)
# Shape: (batch_size, self._num_added_end_tokens); this is a broadcast op
end_token_indices = (
seq_lengths.unsqueeze(-1) - torch.arange(self._num_added_end_tokens, device=device) - 1
)
# Shape: (batch_size, self._num_added_start_tokens, embedding_size)
start_token_embeddings = embeddings[:, : self._num_added_start_tokens, :]
# Shape: (batch_size, self._num_added_end_tokens, embedding_size)
end_token_embeddings = batched_index_select(embeddings, end_token_indices)
embeddings = embeddings.reshape(batch_size, num_segments, self._max_length, embedding_size)
embeddings = embeddings[
:, :, self._num_added_start_tokens : embeddings.size(2) - self._num_added_end_tokens, :
] # truncate segment-level start/end tokens
embeddings = embeddings.reshape(batch_size, -1, embedding_size) # flatten
# Now try to put end token embeddings back which is a little tricky.
# The number of segment each sequence spans, excluding padding. Mimicking ceiling operation.
# Shape: (batch_size,)
num_effective_segments = (seq_lengths + self._max_length - 1) // self._max_length
# The number of indices that end tokens should shift back.
num_removed_non_end_tokens = (
num_effective_segments * self._num_added_tokens - self._num_added_end_tokens
)
# Shape: (batch_size, self._num_added_end_tokens)
end_token_indices -= num_removed_non_end_tokens.unsqueeze(-1)
assert (end_token_indices >= self._num_added_start_tokens).all()
# Add space for end embeddings
embeddings = torch.cat([embeddings, torch.zeros_like(end_token_embeddings)], 1)
# Add end token embeddings back
embeddings.scatter_(
1, end_token_indices.unsqueeze(-1).expand_as(end_token_embeddings), end_token_embeddings
)
# Now put back start tokens. We can do this before putting back end tokens, but then
# we need to change `num_removed_non_end_tokens` a little.
embeddings = torch.cat([start_token_embeddings, embeddings], 1)
# Truncate to original length
embeddings = embeddings[:, :num_wordpieces, :]
return embeddings
| allennlp-master | allennlp/modules/token_embedders/pretrained_transformer_embedder.py |
from typing import Optional, Dict, Any
from overrides import overrides
import torch
from allennlp.modules.token_embedders import PretrainedTransformerEmbedder, TokenEmbedder
from allennlp.nn import util
@TokenEmbedder.register("pretrained_transformer_mismatched")
class PretrainedTransformerMismatchedEmbedder(TokenEmbedder):
"""
Use this embedder to embed wordpieces given by `PretrainedTransformerMismatchedIndexer`
and to pool the resulting vectors to get word-level representations.
Registered as a `TokenEmbedder` with name "pretrained_transformer_mismatched".
# Parameters
model_name : `str`
The name of the `transformers` model to use. Should be the same as the corresponding
`PretrainedTransformerMismatchedIndexer`.
max_length : `int`, optional (default = `None`)
If positive, folds input token IDs into multiple segments of this length, pass them
through the transformer model independently, and concatenate the final representations.
Should be set to the same value as the `max_length` option on the
`PretrainedTransformerMismatchedIndexer`.
train_parameters: `bool`, optional (default = `True`)
If this is `True`, the transformer weights get updated during training.
last_layer_only: `bool`, optional (default = `True`)
When `True` (the default), only the final layer of the pretrained transformer is taken
for the embeddings. But if set to `False`, a scalar mix of all of the layers
is used.
gradient_checkpointing: `bool`, optional (default = `None`)
Enable or disable gradient checkpointing.
tokenizer_kwargs: `Dict[str, Any]`, optional (default = `None`)
Dictionary with
[additional arguments](https://github.com/huggingface/transformers/blob/155c782a2ccd103cf63ad48a2becd7c76a7d2115/transformers/tokenization_utils.py#L691)
for `AutoTokenizer.from_pretrained`.
transformer_kwargs: `Dict[str, Any]`, optional (default = `None`)
Dictionary with
[additional arguments](https://github.com/huggingface/transformers/blob/155c782a2ccd103cf63ad48a2becd7c76a7d2115/transformers/modeling_utils.py#L253)
for `AutoModel.from_pretrained`.
""" # noqa: E501
def __init__(
self,
model_name: str,
max_length: int = None,
train_parameters: bool = True,
last_layer_only: bool = True,
gradient_checkpointing: Optional[bool] = None,
tokenizer_kwargs: Optional[Dict[str, Any]] = None,
transformer_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__()
# The matched version v.s. mismatched
self._matched_embedder = PretrainedTransformerEmbedder(
model_name,
max_length=max_length,
train_parameters=train_parameters,
last_layer_only=last_layer_only,
gradient_checkpointing=gradient_checkpointing,
tokenizer_kwargs=tokenizer_kwargs,
transformer_kwargs=transformer_kwargs,
)
@overrides
def get_output_dim(self):
return self._matched_embedder.get_output_dim()
@overrides
def forward(
self,
token_ids: torch.LongTensor,
mask: torch.BoolTensor,
offsets: torch.LongTensor,
wordpiece_mask: torch.BoolTensor,
type_ids: Optional[torch.LongTensor] = None,
segment_concat_mask: Optional[torch.BoolTensor] = None,
) -> torch.Tensor: # type: ignore
"""
# Parameters
token_ids: `torch.LongTensor`
Shape: [batch_size, num_wordpieces] (for exception see `PretrainedTransformerEmbedder`).
mask: `torch.BoolTensor`
Shape: [batch_size, num_orig_tokens].
offsets: `torch.LongTensor`
Shape: [batch_size, num_orig_tokens, 2].
Maps indices for the original tokens, i.e. those given as input to the indexer,
to a span in token_ids. `token_ids[i][offsets[i][j][0]:offsets[i][j][1] + 1]`
corresponds to the original j-th token from the i-th batch.
wordpiece_mask: `torch.BoolTensor`
Shape: [batch_size, num_wordpieces].
type_ids: `Optional[torch.LongTensor]`
Shape: [batch_size, num_wordpieces].
segment_concat_mask: `Optional[torch.BoolTensor]`
See `PretrainedTransformerEmbedder`.
# Returns
`torch.Tensor`
Shape: [batch_size, num_orig_tokens, embedding_size].
"""
# Shape: [batch_size, num_wordpieces, embedding_size].
embeddings = self._matched_embedder(
token_ids, wordpiece_mask, type_ids=type_ids, segment_concat_mask=segment_concat_mask
)
# span_embeddings: (batch_size, num_orig_tokens, max_span_length, embedding_size)
# span_mask: (batch_size, num_orig_tokens, max_span_length)
span_embeddings, span_mask = util.batched_span_select(embeddings.contiguous(), offsets)
span_mask = span_mask.unsqueeze(-1)
span_embeddings *= span_mask # zero out paddings
span_embeddings_sum = span_embeddings.sum(2)
span_embeddings_len = span_mask.sum(2)
# Shape: (batch_size, num_orig_tokens, embedding_size)
orig_embeddings = span_embeddings_sum / torch.clamp_min(span_embeddings_len, 1)
# All the places where the span length is zero, write in zeros.
orig_embeddings[(span_embeddings_len == 0).expand(orig_embeddings.shape)] = 0
return orig_embeddings
| allennlp-master | allennlp/modules/token_embedders/pretrained_transformer_mismatched_embedder.py |
import io
import itertools
import logging
import re
import tarfile
import warnings
import zipfile
from typing import Any, cast, Iterator, NamedTuple, Optional, Sequence, Tuple, BinaryIO
import numpy
import torch
from overrides import overrides
from torch.nn.functional import embedding
from allennlp.common import Tqdm
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path, get_file_extension, is_url_or_existing_file
from allennlp.data import Vocabulary
from allennlp.modules.time_distributed import TimeDistributed
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
from allennlp.nn import util
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import h5py
logger = logging.getLogger(__name__)
@TokenEmbedder.register("embedding")
class Embedding(TokenEmbedder):
"""
A more featureful embedding module than the default in Pytorch. Adds the ability to:
1. embed higher-order inputs
2. pre-specify the weight matrix
3. use a non-trainable embedding
4. project the resultant embeddings to some other dimension (which only makes sense with
non-trainable embeddings).
Note that if you are using our data API and are trying to embed a
[`TextField`](../../data/fields/text_field.md), you should use a
[`TextFieldEmbedder`](../text_field_embedders/text_field_embedder.md) instead of using this directly.
Registered as a `TokenEmbedder` with name "embedding".
# Parameters
num_embeddings : `int`
Size of the dictionary of embeddings (vocabulary size).
embedding_dim : `int`
The size of each embedding vector.
projection_dim : `int`, optional (default=`None`)
If given, we add a projection layer after the embedding layer. This really only makes
sense if `trainable` is `False`.
weight : `torch.FloatTensor`, optional (default=`None`)
A pre-initialised weight matrix for the embedding lookup, allowing the use of
pretrained vectors.
padding_index : `int`, optional (default=`None`)
If given, pads the output with zeros whenever it encounters the index.
trainable : `bool`, optional (default=`True`)
Whether or not to optimize the embedding parameters.
max_norm : `float`, optional (default=`None`)
If given, will renormalize the embeddings to always have a norm lesser than this
norm_type : `float`, optional (default=`2`)
The p of the p-norm to compute for the max_norm option
scale_grad_by_freq : `bool`, optional (default=`False`)
If given, this will scale gradients by the frequency of the words in the mini-batch.
sparse : `bool`, optional (default=`False`)
Whether or not the Pytorch backend should use a sparse representation of the embedding weight.
vocab_namespace : `str`, optional (default=`None`)
In case of fine-tuning/transfer learning, the model's embedding matrix needs to be
extended according to the size of extended-vocabulary. To be able to know how much to
extend the embedding-matrix, it's necessary to know which vocab_namspace was used to
construct it in the original training. We store vocab_namespace used during the original
training as an attribute, so that it can be retrieved during fine-tuning.
pretrained_file : `str`, optional (default=`None`)
Path to a file of word vectors to initialize the embedding matrix. It can be the
path to a local file or a URL of a (cached) remote file. Two formats are supported:
* hdf5 file - containing an embedding matrix in the form of a torch.Tensor;
* text file - an utf-8 encoded text file with space separated fields.
vocab : `Vocabulary`, optional (default = `None`)
Used to construct an embedding from a pretrained file.
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"embedding", it gets specified as a top-level parameter, then is passed in to this module
separately.
# Returns
An Embedding module.
"""
def __init__(
self,
embedding_dim: int,
num_embeddings: int = None,
projection_dim: int = None,
weight: torch.FloatTensor = None,
padding_index: int = None,
trainable: bool = True,
max_norm: float = None,
norm_type: float = 2.0,
scale_grad_by_freq: bool = False,
sparse: bool = False,
vocab_namespace: str = "tokens",
pretrained_file: str = None,
vocab: Vocabulary = None,
) -> None:
super().__init__()
if num_embeddings is None and vocab is None:
raise ConfigurationError(
"Embedding must be constructed with either num_embeddings or a vocabulary."
)
_vocab_namespace: Optional[str] = vocab_namespace
if num_embeddings is None:
num_embeddings = vocab.get_vocab_size(_vocab_namespace) # type: ignore
else:
# If num_embeddings is present, set default namespace to None so that extend_vocab
# call doesn't misinterpret that some namespace was originally used.
_vocab_namespace = None # type: ignore
self.num_embeddings = num_embeddings
self.padding_index = padding_index
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
self._vocab_namespace = _vocab_namespace
self._pretrained_file = pretrained_file
self.output_dim = projection_dim or embedding_dim
if weight is not None and pretrained_file:
raise ConfigurationError(
"Embedding was constructed with both a weight and a pretrained file."
)
elif pretrained_file is not None:
if vocab is None:
raise ConfigurationError(
"To construct an Embedding from a pretrained file, you must also pass a vocabulary."
)
# If we're loading a saved model, we don't want to actually read a pre-trained
# embedding file - the embeddings will just be in our saved weights, and we might not
# have the original embedding file anymore, anyway.
# TODO: having to pass tokens here is SUPER gross, but otherwise this breaks the
# extend_vocab method, which relies on the value of vocab_namespace being None
# to infer at what stage the embedding has been constructed. Phew.
weight = _read_pretrained_embeddings_file(
pretrained_file, embedding_dim, vocab, vocab_namespace
)
self.weight = torch.nn.Parameter(weight, requires_grad=trainable)
elif weight is not None:
self.weight = torch.nn.Parameter(weight, requires_grad=trainable)
else:
weight = torch.FloatTensor(num_embeddings, embedding_dim)
self.weight = torch.nn.Parameter(weight, requires_grad=trainable)
torch.nn.init.xavier_uniform_(self.weight)
# Whatever way we have constructed the embedding, it should be consistent with
# num_embeddings and embedding_dim.
if self.weight.size() != (num_embeddings, embedding_dim):
raise ConfigurationError(
"A weight matrix was passed with contradictory embedding shapes."
)
if self.padding_index is not None:
self.weight.data[self.padding_index].fill_(0)
if projection_dim:
self._projection = torch.nn.Linear(embedding_dim, projection_dim)
else:
self._projection = None
@overrides
def get_output_dim(self) -> int:
return self.output_dim
@overrides
def forward(self, tokens: torch.Tensor) -> torch.Tensor:
# tokens may have extra dimensions (batch_size, d1, ..., dn, sequence_length),
# but embedding expects (batch_size, sequence_length), so pass tokens to
# util.combine_initial_dims (which is a no-op if there are no extra dimensions).
# Remember the original size.
original_size = tokens.size()
tokens = util.combine_initial_dims(tokens)
embedded = embedding(
tokens,
self.weight,
padding_idx=self.padding_index,
max_norm=self.max_norm,
norm_type=self.norm_type,
scale_grad_by_freq=self.scale_grad_by_freq,
sparse=self.sparse,
)
# Now (if necessary) add back in the extra dimensions.
embedded = util.uncombine_initial_dims(embedded, original_size)
if self._projection:
projection = self._projection
for _ in range(embedded.dim() - 2):
projection = TimeDistributed(projection)
embedded = projection(embedded)
return embedded
def extend_vocab(
self,
extended_vocab: Vocabulary,
vocab_namespace: str = None,
extension_pretrained_file: str = None,
model_path: str = None,
):
"""
Extends the embedding matrix according to the extended vocabulary.
If extension_pretrained_file is available, it will be used for initializing the new words
embeddings in the extended vocabulary; otherwise we will check if _pretrained_file attribute
is already available. If none is available, they will be initialized with xavier uniform.
# Parameters
extended_vocab : `Vocabulary`
Vocabulary extended from original vocabulary used to construct
this `Embedding`.
vocab_namespace : `str`, (optional, default=`None`)
In case you know what vocab_namespace should be used for extension, you
can pass it. If not passed, it will check if vocab_namespace used at the
time of `Embedding` construction is available. If so, this namespace
will be used or else extend_vocab will be a no-op.
extension_pretrained_file : `str`, (optional, default=`None`)
A file containing pretrained embeddings can be specified here. It can be
the path to a local file or an URL of a (cached) remote file. Check format
details in `from_params` of `Embedding` class.
model_path : `str`, (optional, default=`None`)
Path traversing the model attributes upto this embedding module.
Eg. "_text_field_embedder.token_embedder_tokens". This is only useful
to give a helpful error message when extend_vocab is implicitly called
by train or any other command.
"""
# Caveat: For allennlp v0.8.1 and below, we weren't storing vocab_namespace as an attribute,
# knowing which is necessary at time of embedding vocab extension. So old archive models are
# currently unextendable.
vocab_namespace = vocab_namespace or self._vocab_namespace
if not vocab_namespace:
# It's not safe to default to "tokens" or any other namespace.
logger.info(
"Loading a model trained before embedding extension was implemented; "
"pass an explicit vocab namespace if you want to extend the vocabulary."
)
return
extended_num_embeddings = extended_vocab.get_vocab_size(vocab_namespace)
if extended_num_embeddings == self.num_embeddings:
# It's already been extended. No need to initialize / read pretrained file in first place (no-op)
return
if extended_num_embeddings < self.num_embeddings:
raise ConfigurationError(
f"Size of namespace, {vocab_namespace} for extended_vocab is smaller than "
f"embedding. You likely passed incorrect vocab or namespace for extension."
)
# Case 1: user passed extension_pretrained_file and it's available.
if extension_pretrained_file and is_url_or_existing_file(extension_pretrained_file):
# Don't have to do anything here, this is the happy case.
pass
# Case 2: user passed extension_pretrained_file and it's not available
elif extension_pretrained_file:
raise ConfigurationError(
f"You passed pretrained embedding file {extension_pretrained_file} "
f"for model_path {model_path} but it's not available."
)
# Case 3: user didn't pass extension_pretrained_file, but pretrained_file attribute was
# saved during training and is available.
elif is_url_or_existing_file(self._pretrained_file):
extension_pretrained_file = self._pretrained_file
# Case 4: no file is available, hope that pretrained embeddings weren't used in the first place and warn
elif self._pretrained_file is not None:
# Warn here instead of an exception to allow a fine-tuning even without the original pretrained_file
logger.warning(
f"Embedding at model_path, {model_path} cannot locate the pretrained_file. "
f"Originally pretrained_file was at '{self._pretrained_file}'."
)
else:
# When loading a model from archive there is no way to distinguish between whether a pretrained-file
# was or wasn't used during the original training. So we leave an info here.
logger.info(
"If you are fine-tuning and want to use a pretrained_file for "
"embedding extension, please pass the mapping by --embedding-sources argument."
)
embedding_dim = self.weight.data.shape[-1]
if not extension_pretrained_file:
extra_num_embeddings = extended_num_embeddings - self.num_embeddings
extra_weight = torch.FloatTensor(extra_num_embeddings, embedding_dim)
torch.nn.init.xavier_uniform_(extra_weight)
else:
# It's easiest to just reload the embeddings for the entire vocab,
# then only keep the ones we need.
whole_weight = _read_pretrained_embeddings_file(
extension_pretrained_file, embedding_dim, extended_vocab, vocab_namespace
)
extra_weight = whole_weight[self.num_embeddings :, :]
device = self.weight.data.device
extended_weight = torch.cat([self.weight.data, extra_weight.to(device)], dim=0)
self.weight = torch.nn.Parameter(extended_weight, requires_grad=self.weight.requires_grad)
self.num_embeddings = extended_num_embeddings
def _read_pretrained_embeddings_file(
file_uri: str, embedding_dim: int, vocab: Vocabulary, namespace: str = "tokens"
) -> torch.FloatTensor:
"""
Returns and embedding matrix for the given vocabulary using the pretrained embeddings
contained in the given file. Embeddings for tokens not found in the pretrained embedding file
are randomly initialized using a normal distribution with mean and standard deviation equal to
those of the pretrained embeddings.
We support two file formats:
* text format - utf-8 encoded text file with space separated fields: [word] [dim 1] [dim 2] ...
The text file can eventually be compressed, and even resides in an archive with multiple files.
If the file resides in an archive with other files, then `embeddings_filename` must
be a URI "(archive_uri)#file_path_inside_the_archive"
* hdf5 format - hdf5 file containing an embedding matrix in the form of a torch.Tensor.
If the filename ends with '.hdf5' or '.h5' then we load from hdf5, otherwise we assume
text format.
# Parameters
file_uri : `str`, required.
It can be:
* a file system path or a URL of an eventually compressed text file or a zip/tar archive
containing a single file.
* URI of the type `(archive_path_or_url)#file_path_inside_archive` if the text file
is contained in a multi-file archive.
vocab : `Vocabulary`, required.
A Vocabulary object.
namespace : `str`, (optional, default=`"tokens"`)
The namespace of the vocabulary to find pretrained embeddings for.
trainable : `bool`, (optional, default=`True`)
Whether or not the embedding parameters should be optimized.
# Returns
A weight matrix with embeddings initialized from the read file. The matrix has shape
`(vocab.get_vocab_size(namespace), embedding_dim)`, where the indices of words appearing in
the pretrained embedding file are initialized to the pretrained embedding value.
"""
file_ext = get_file_extension(file_uri)
if file_ext in [".h5", ".hdf5"]:
return _read_embeddings_from_hdf5(file_uri, embedding_dim, vocab, namespace)
return _read_embeddings_from_text_file(file_uri, embedding_dim, vocab, namespace)
def _read_embeddings_from_text_file(
file_uri: str, embedding_dim: int, vocab: Vocabulary, namespace: str = "tokens"
) -> torch.FloatTensor:
"""
Read pre-trained word vectors from an eventually compressed text file, possibly contained
inside an archive with multiple files. The text file is assumed to be utf-8 encoded with
space-separated fields: [word] [dim 1] [dim 2] ...
Lines that contain more numerical tokens than `embedding_dim` raise a warning and are skipped.
The remainder of the docstring is identical to `_read_pretrained_embeddings_file`.
"""
tokens_to_keep = set(vocab.get_index_to_token_vocabulary(namespace).values())
vocab_size = vocab.get_vocab_size(namespace)
embeddings = {}
# First we read the embeddings from the file, only keeping vectors for the words we need.
logger.info("Reading pretrained embeddings from file")
with EmbeddingsTextFile(file_uri) as embeddings_file:
for line in Tqdm.tqdm(embeddings_file):
token = line.split(" ", 1)[0]
if token in tokens_to_keep:
fields = line.rstrip().split(" ")
if len(fields) - 1 != embedding_dim:
# Sometimes there are funny unicode parsing problems that lead to different
# fields lengths (e.g., a word with a unicode space character that splits
# into more than one column). We skip those lines. Note that if you have
# some kind of long header, this could result in all of your lines getting
# skipped. It's hard to check for that here; you just have to look in the
# embedding_misses_file and at the model summary to make sure things look
# like they are supposed to.
logger.warning(
"Found line with wrong number of dimensions (expected: %d; actual: %d): %s",
embedding_dim,
len(fields) - 1,
line,
)
continue
vector = numpy.asarray(fields[1:], dtype="float32")
embeddings[token] = vector
if not embeddings:
raise ConfigurationError(
"No embeddings of correct dimension found; you probably "
"misspecified your embedding_dim parameter, or didn't "
"pre-populate your Vocabulary"
)
all_embeddings = numpy.asarray(list(embeddings.values()))
embeddings_mean = float(numpy.mean(all_embeddings))
embeddings_std = float(numpy.std(all_embeddings))
# Now we initialize the weight matrix for an embedding layer, starting with random vectors,
# then filling in the word vectors we just read.
logger.info("Initializing pre-trained embedding layer")
embedding_matrix = torch.FloatTensor(vocab_size, embedding_dim).normal_(
embeddings_mean, embeddings_std
)
num_tokens_found = 0
index_to_token = vocab.get_index_to_token_vocabulary(namespace)
for i in range(vocab_size):
token = index_to_token[i]
# If we don't have a pre-trained vector for this word, we'll just leave this row alone,
# so the word has a random initialization.
if token in embeddings:
embedding_matrix[i] = torch.FloatTensor(embeddings[token])
num_tokens_found += 1
else:
logger.debug(
"Token %s was not found in the embedding file. Initialising randomly.", token
)
logger.info(
"Pretrained embeddings were found for %d out of %d tokens", num_tokens_found, vocab_size
)
return embedding_matrix
def _read_embeddings_from_hdf5(
embeddings_filename: str, embedding_dim: int, vocab: Vocabulary, namespace: str = "tokens"
) -> torch.FloatTensor:
"""
Reads from a hdf5 formatted file. The embedding matrix is assumed to
be keyed by 'embedding' and of size `(num_tokens, embedding_dim)`.
"""
with h5py.File(embeddings_filename, "r") as fin:
embeddings = fin["embedding"][...]
if list(embeddings.shape) != [vocab.get_vocab_size(namespace), embedding_dim]:
raise ConfigurationError(
"Read shape {0} embeddings from the file, but expected {1}".format(
list(embeddings.shape), [vocab.get_vocab_size(namespace), embedding_dim]
)
)
return torch.FloatTensor(embeddings)
def format_embeddings_file_uri(
main_file_path_or_url: str, path_inside_archive: Optional[str] = None
) -> str:
if path_inside_archive:
return "({})#{}".format(main_file_path_or_url, path_inside_archive)
return main_file_path_or_url
class EmbeddingsFileURI(NamedTuple):
main_file_uri: str
path_inside_archive: Optional[str] = None
def parse_embeddings_file_uri(uri: str) -> "EmbeddingsFileURI":
match = re.fullmatch(r"\((.*)\)#(.*)", uri)
if match:
fields = cast(Tuple[str, str], match.groups())
return EmbeddingsFileURI(*fields)
else:
return EmbeddingsFileURI(uri, None)
class EmbeddingsTextFile(Iterator[str]):
"""
Utility class for opening embeddings text files. Handles various compression formats,
as well as context management.
# Parameters
file_uri : `str`
It can be:
* a file system path or a URL of an eventually compressed text file or a zip/tar archive
containing a single file.
* URI of the type `(archive_path_or_url)#file_path_inside_archive` if the text file
is contained in a multi-file archive.
encoding : `str`
cache_dir : `str`
"""
DEFAULT_ENCODING = "utf-8"
def __init__(
self, file_uri: str, encoding: str = DEFAULT_ENCODING, cache_dir: str = None
) -> None:
self.uri = file_uri
self._encoding = encoding
self._cache_dir = cache_dir
self._archive_handle: Any = None # only if the file is inside an archive
main_file_uri, path_inside_archive = parse_embeddings_file_uri(file_uri)
main_file_local_path = cached_path(main_file_uri, cache_dir=cache_dir)
if zipfile.is_zipfile(main_file_local_path): # ZIP archive
self._open_inside_zip(main_file_uri, path_inside_archive)
elif tarfile.is_tarfile(main_file_local_path): # TAR archive
self._open_inside_tar(main_file_uri, path_inside_archive)
else: # all the other supported formats, including uncompressed files
if path_inside_archive:
raise ValueError("Unsupported archive format: %s" + main_file_uri)
# All the python packages for compressed files share the same interface of io.open
extension = get_file_extension(main_file_uri)
# Some systems don't have support for all of these libraries, so we import them only
# when necessary.
package = None
if extension in [".txt", ".vec"]:
package = io
elif extension == ".gz":
import gzip
package = gzip
elif extension == ".bz2":
import bz2
package = bz2
elif extension == ".lzma":
import lzma
package = lzma
if package is None:
logger.warning(
'The embeddings file has an unknown file extension "%s". '
"We will assume the file is an (uncompressed) text file",
extension,
)
package = io
self._handle = package.open( # type: ignore
main_file_local_path, "rt", encoding=encoding
)
# To use this with tqdm we'd like to know the number of tokens. It's possible that the
# first line of the embeddings file contains this: if it does, we want to start iteration
# from the 2nd line, otherwise we want to start from the 1st.
# Unfortunately, once we read the first line, we cannot move back the file iterator
# because the underlying file may be "not seekable"; we use itertools.chain instead.
first_line = next(self._handle) # this moves the iterator forward
self.num_tokens = EmbeddingsTextFile._get_num_tokens_from_first_line(first_line)
if self.num_tokens:
# the first line is a header line: start iterating from the 2nd line
self._iterator = self._handle
else:
# the first line is not a header line: start iterating from the 1st line
self._iterator = itertools.chain([first_line], self._handle)
def _open_inside_zip(self, archive_path: str, member_path: Optional[str] = None) -> None:
cached_archive_path = cached_path(archive_path, cache_dir=self._cache_dir)
archive = zipfile.ZipFile(cached_archive_path, "r")
if member_path is None:
members_list = archive.namelist()
member_path = self._get_the_only_file_in_the_archive(members_list, archive_path)
member_path = cast(str, member_path)
member_file = cast(BinaryIO, archive.open(member_path, "r"))
self._handle = io.TextIOWrapper(member_file, encoding=self._encoding)
self._archive_handle = archive
def _open_inside_tar(self, archive_path: str, member_path: Optional[str] = None) -> None:
cached_archive_path = cached_path(archive_path, cache_dir=self._cache_dir)
archive = tarfile.open(cached_archive_path, "r")
if member_path is None:
members_list = archive.getnames()
member_path = self._get_the_only_file_in_the_archive(members_list, archive_path)
member_path = cast(str, member_path)
member = archive.getmember(member_path) # raises exception if not present
member_file = cast(BinaryIO, archive.extractfile(member))
self._handle = io.TextIOWrapper(member_file, encoding=self._encoding)
self._archive_handle = archive
def read(self) -> str:
return "".join(self._iterator)
def readline(self) -> str:
return next(self._iterator)
def close(self) -> None:
self._handle.close()
if self._archive_handle:
self._archive_handle.close()
def __enter__(self) -> "EmbeddingsTextFile":
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.close()
def __iter__(self) -> "EmbeddingsTextFile":
return self
def __next__(self) -> str:
return next(self._iterator)
def __len__(self) -> Optional[int]:
if self.num_tokens:
return self.num_tokens
raise AttributeError(
"an object of type EmbeddingsTextFile implements `__len__` only if the underlying "
"text file declares the number of tokens (i.e. the number of lines following)"
"in the first line. That is not the case of this particular instance."
)
@staticmethod
def _get_the_only_file_in_the_archive(members_list: Sequence[str], archive_path: str) -> str:
if len(members_list) > 1:
raise ValueError(
"The archive %s contains multiple files, so you must select "
"one of the files inside providing a uri of the type: %s."
% (
archive_path,
format_embeddings_file_uri("path_or_url_to_archive", "path_inside_archive"),
)
)
return members_list[0]
@staticmethod
def _get_num_tokens_from_first_line(line: str) -> Optional[int]:
"""This function takes in input a string and if it contains 1 or 2 integers, it assumes the
largest one it the number of tokens. Returns None if the line doesn't match that pattern."""
fields = line.split(" ")
if 1 <= len(fields) <= 2:
try:
int_fields = [int(x) for x in fields]
except ValueError:
return None
else:
num_tokens = max(int_fields)
logger.info(
"Recognized a header line in the embedding file with number of tokens: %d",
num_tokens,
)
return num_tokens
return None
| allennlp-master | allennlp/modules/token_embedders/embedding.py |
import torch
from allennlp.modules.token_embedders.embedding import Embedding
from allennlp.modules.seq2vec_encoders.seq2vec_encoder import Seq2VecEncoder
from allennlp.modules.time_distributed import TimeDistributed
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
@TokenEmbedder.register("character_encoding")
class TokenCharactersEncoder(TokenEmbedder):
"""
A `TokenCharactersEncoder` takes the output of a
[`TokenCharactersIndexer`](../../data/token_indexers/token_characters_indexer.md), which is a tensor of shape
(batch_size, num_tokens, num_characters), embeds the characters, runs a token-level encoder, and
returns the result, which is a tensor of shape (batch_size, num_tokens, encoding_dim). We also
optionally apply dropout after the token-level encoder.
We take the embedding and encoding modules as input, so this class is itself quite simple.
Registered as a `TokenEmbedder` with name "character_encoding".
"""
def __init__(self, embedding: Embedding, encoder: Seq2VecEncoder, dropout: float = 0.0) -> None:
super().__init__()
self._embedding = TimeDistributed(embedding)
self._encoder = TimeDistributed(encoder)
if dropout > 0:
self._dropout = torch.nn.Dropout(p=dropout)
else:
self._dropout = lambda x: x
def get_output_dim(self) -> int:
return self._encoder._module.get_output_dim()
def forward(self, token_characters: torch.Tensor) -> torch.Tensor:
mask = (token_characters != 0).long()
return self._dropout(self._encoder(self._embedding(token_characters), mask))
| allennlp-master | allennlp/modules/token_embedders/token_characters_encoder.py |
"""
A `TokenEmbedder` is a `Module` that
embeds one-hot-encoded tokens as vectors.
"""
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
from allennlp.modules.token_embedders.embedding import Embedding
from allennlp.modules.token_embedders.token_characters_encoder import TokenCharactersEncoder
from allennlp.modules.token_embedders.elmo_token_embedder import ElmoTokenEmbedder
from allennlp.modules.token_embedders.empty_embedder import EmptyEmbedder
from allennlp.modules.token_embedders.bag_of_word_counts_token_embedder import (
BagOfWordCountsTokenEmbedder,
)
from allennlp.modules.token_embedders.pass_through_token_embedder import PassThroughTokenEmbedder
from allennlp.modules.token_embedders.pretrained_transformer_embedder import (
PretrainedTransformerEmbedder,
)
from allennlp.modules.token_embedders.pretrained_transformer_mismatched_embedder import (
PretrainedTransformerMismatchedEmbedder,
)
| allennlp-master | allennlp/modules/token_embedders/__init__.py |
import torch
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
@TokenEmbedder.register("pass_through")
class PassThroughTokenEmbedder(TokenEmbedder):
"""
Assumes that the input is already vectorized in some way,
and just returns it.
Registered as a `TokenEmbedder` with name "pass_through".
# Parameters
hidden_dim : `int`, required.
"""
def __init__(self, hidden_dim: int) -> None:
self.hidden_dim = hidden_dim
super().__init__()
def get_output_dim(self):
return self.hidden_dim
def forward(self, tokens: torch.Tensor) -> torch.Tensor:
return tokens
| allennlp-master | allennlp/modules/token_embedders/pass_through_token_embedder.py |
import torch
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
@TokenEmbedder.register("empty")
class EmptyEmbedder(TokenEmbedder):
"""
Assumes you want to completely ignore the output of a `TokenIndexer` for some reason, and does
not return anything when asked to embed it.
You should almost never need to use this; normally you would just not use a particular
`TokenIndexer`. It's only in very rare cases, like simplicity in data processing for language
modeling (where we use just one `TextField` to handle input embedding and computing target ids),
where you might want to use this.
Registered as a `TokenEmbedder` with name "empty".
"""
def __init__(self) -> None:
super().__init__()
def get_output_dim(self):
return 0
def forward(self, *inputs, **kwargs) -> torch.Tensor:
return None
| allennlp-master | allennlp/modules/token_embedders/empty_embedder.py |
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
from allennlp.nn.util import get_text_field_mask
@TokenEmbedder.register("bag_of_word_counts")
class BagOfWordCountsTokenEmbedder(TokenEmbedder):
"""
Represents a sequence of tokens as a bag of (discrete) word ids, as it was done
in the pre-neural days.
Each sequence gets a vector of length vocabulary size, where the i'th entry in the vector
corresponds to number of times the i'th token in the vocabulary appears in the sequence.
By default, we ignore padding tokens.
Registered as a `TokenEmbedder` with name "bag_of_word_counts".
# Parameters
vocab : `Vocabulary`
vocab_namespace : `str`, optional (default = `"tokens"`)
namespace of vocabulary to embed
projection_dim : `int`, optional (default = `None`)
if specified, will project the resulting bag of words representation
to specified dimension.
ignore_oov : `bool`, optional (default = `False`)
If true, we ignore the OOV token.
"""
def __init__(
self,
vocab: Vocabulary,
vocab_namespace: str = "tokens",
projection_dim: int = None,
ignore_oov: bool = False,
) -> None:
super().__init__()
self.vocab = vocab
self.vocab_size = vocab.get_vocab_size(vocab_namespace)
if projection_dim:
self._projection = torch.nn.Linear(self.vocab_size, projection_dim)
else:
self._projection = None
self._ignore_oov = ignore_oov
oov_token = vocab._oov_token
self._oov_idx = vocab.get_token_to_index_vocabulary(vocab_namespace).get(oov_token)
if self._oov_idx is None:
raise ConfigurationError(
"OOV token does not exist in vocabulary namespace {}".format(vocab_namespace)
)
self.output_dim = projection_dim or self.vocab_size
def get_output_dim(self):
return self.output_dim
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""
# Parameters
inputs : `torch.Tensor`
Shape `(batch_size, timesteps, sequence_length)` of word ids
representing the current batch.
# Returns
`torch.Tensor`
The bag-of-words representations for the input sequence, shape
`(batch_size, vocab_size)`
"""
bag_of_words_vectors = []
mask = get_text_field_mask({"tokens": {"tokens": inputs}})
if self._ignore_oov:
# also mask out positions corresponding to oov
mask &= inputs != self._oov_idx
for document, doc_mask in zip(inputs, mask):
document = torch.masked_select(document, doc_mask)
vec = torch.bincount(document, minlength=self.vocab_size).float()
vec = vec.view(1, -1)
bag_of_words_vectors.append(vec)
bag_of_words_output = torch.cat(bag_of_words_vectors, 0)
if self._projection:
projection = self._projection
bag_of_words_output = projection(bag_of_words_output)
return bag_of_words_output
| allennlp-master | allennlp/modules/token_embedders/bag_of_word_counts_token_embedder.py |
from typing import List
import torch
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
from allennlp.modules.elmo import Elmo
from allennlp.modules.time_distributed import TimeDistributed
@TokenEmbedder.register("elmo_token_embedder")
class ElmoTokenEmbedder(TokenEmbedder):
"""
Compute a single layer of ELMo representations.
This class serves as a convenience when you only want to use one layer of
ELMo representations at the input of your network. It's essentially a wrapper
around Elmo(num_output_representations=1, ...)
Registered as a `TokenEmbedder` with name "elmo_token_embedder".
# Parameters
options_file : `str`, required.
An ELMo JSON options file.
weight_file : `str`, required.
An ELMo hdf5 weight file.
do_layer_norm : `bool`, optional.
Should we apply layer normalization (passed to `ScalarMix`)?
dropout : `float`, optional, (default = `0.5`).
The dropout value to be applied to the ELMo representations.
requires_grad : `bool`, optional
If True, compute gradient of ELMo parameters for fine tuning.
projection_dim : `int`, optional
If given, we will project the ELMo embedding down to this dimension. We recommend that you
try using ELMo with a lot of dropout and no projection first, but we have found a few cases
where projection helps (particularly where there is very limited training data).
vocab_to_cache : `List[str]`, optional.
A list of words to pre-compute and cache character convolutions
for. If you use this option, the ElmoTokenEmbedder expects that you pass word
indices of shape (batch_size, timesteps) to forward, instead
of character indices. If you use this option and pass a word which
wasn't pre-cached, this will break.
scalar_mix_parameters : `List[int]`, optional, (default=`None`)
If not `None`, use these scalar mix parameters to weight the representations
produced by different layers. These mixing weights are not updated during
training. The mixing weights here should be the unnormalized (i.e., pre-softmax)
weights. So, if you wanted to use only the 1st layer of a 2-layer ELMo,
you can set this to [-9e10, 1, -9e10 ].
"""
def __init__(
self,
options_file: str = "https://allennlp.s3.amazonaws.com/models/elmo/2x4096_512_2048cnn_2xhighway/"
+ "elmo_2x4096_512_2048cnn_2xhighway_options.json",
weight_file: str = "https://allennlp.s3.amazonaws.com/models/elmo/2x4096_512_2048cnn_2xhighway/"
+ "elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5",
do_layer_norm: bool = False,
dropout: float = 0.5,
requires_grad: bool = False,
projection_dim: int = None,
vocab_to_cache: List[str] = None,
scalar_mix_parameters: List[float] = None,
) -> None:
super().__init__()
self._elmo = Elmo(
options_file,
weight_file,
1,
do_layer_norm=do_layer_norm,
dropout=dropout,
requires_grad=requires_grad,
vocab_to_cache=vocab_to_cache,
scalar_mix_parameters=scalar_mix_parameters,
)
if projection_dim:
self._projection = torch.nn.Linear(self._elmo.get_output_dim(), projection_dim)
self.output_dim = projection_dim
else:
self._projection = None
self.output_dim = self._elmo.get_output_dim()
def get_output_dim(self) -> int:
return self.output_dim
def forward(self, elmo_tokens: torch.Tensor, word_inputs: torch.Tensor = None) -> torch.Tensor:
"""
# Parameters
elmo_tokens : `torch.Tensor`
Shape `(batch_size, timesteps, 50)` of character ids representing the current batch.
word_inputs : `torch.Tensor`, optional.
If you passed a cached vocab, you can in addition pass a tensor of shape
`(batch_size, timesteps)`, which represent word ids which have been pre-cached.
# Returns
`torch.Tensor`
The ELMo representations for the input sequence, shape
`(batch_size, timesteps, embedding_dim)`
"""
elmo_output = self._elmo(elmo_tokens, word_inputs)
elmo_representations = elmo_output["elmo_representations"][0]
if self._projection:
projection = self._projection
for _ in range(elmo_representations.dim() - 2):
projection = TimeDistributed(projection)
elmo_representations = projection(elmo_representations)
return elmo_representations
| allennlp-master | allennlp/modules/token_embedders/elmo_token_embedder.py |
import torch
from allennlp.common import Registrable
class TokenEmbedder(torch.nn.Module, Registrable):
"""
A `TokenEmbedder` is a `Module` that takes as input a tensor with integer ids that have
been output from a [`TokenIndexer`](/api/data/token_indexers/token_indexer.md) and outputs
a vector per token in the input. The input typically has shape `(batch_size, num_tokens)`
or `(batch_size, num_tokens, num_characters)`, and the output is of shape `(batch_size, num_tokens,
output_dim)`. The simplest `TokenEmbedder` is just an embedding layer, but for
character-level input, it could also be some kind of character encoder.
We add a single method to the basic `Module` API: `get_output_dim()`. This lets us
more easily compute output dimensions for the
[`TextFieldEmbedder`](/api/modules/text_field_embedders/text_field_embedder.md),
which we might need when defining model parameters such as LSTMs or linear layers, which need
to know their input dimension before the layers are called.
"""
default_implementation = "embedding"
def get_output_dim(self) -> int:
"""
Returns the final output dimension that this `TokenEmbedder` uses to represent each
token. This is `not` the shape of the returned tensor, but the last element of that shape.
"""
raise NotImplementedError
| allennlp-master | allennlp/modules/token_embedders/token_embedder.py |
from typing import Dict, MutableMapping, Mapping
from allennlp.data.fields.field import DataArray, Field
from allennlp.data.vocabulary import Vocabulary
class Instance(Mapping[str, Field]):
"""
An `Instance` is a collection of :class:`~allennlp.data.fields.field.Field` objects,
specifying the inputs and outputs to
some model. We don't make a distinction between inputs and outputs here, though - all
operations are done on all fields, and when we return arrays, we return them as dictionaries
keyed by field name. A model can then decide which fields it wants to use as inputs as which
as outputs.
The `Fields` in an `Instance` can start out either indexed or un-indexed. During the data
processing pipeline, all fields will be indexed, after which multiple instances can be combined
into a `Batch` and then converted into padded arrays.
# Parameters
fields : `Dict[str, Field]`
The `Field` objects that will be used to produce data arrays for this instance.
"""
__slots__ = ["fields", "indexed"]
def __init__(self, fields: MutableMapping[str, Field]) -> None:
self.fields = fields
self.indexed = False
# Add methods for `Mapping`. Note, even though the fields are
# mutable, we don't implement `MutableMapping` because we want
# you to use `add_field` and supply a vocabulary.
def __getitem__(self, key: str) -> Field:
return self.fields[key]
def __iter__(self):
return iter(self.fields)
def __len__(self) -> int:
return len(self.fields)
def add_field(self, field_name: str, field: Field, vocab: Vocabulary = None) -> None:
"""
Add the field to the existing fields mapping.
If we have already indexed the Instance, then we also index `field`, so
it is necessary to supply the vocab.
"""
self.fields[field_name] = field
if self.indexed and vocab is not None:
field.index(vocab)
def count_vocab_items(self, counter: Dict[str, Dict[str, int]]):
"""
Increments counts in the given `counter` for all of the vocabulary items in all of the
`Fields` in this `Instance`.
"""
for field in self.fields.values():
field.count_vocab_items(counter)
def index_fields(self, vocab: Vocabulary) -> None:
"""
Indexes all fields in this `Instance` using the provided `Vocabulary`.
This `mutates` the current object, it does not return a new `Instance`.
A `DataLoader` will call this on each pass through a dataset; we use the `indexed`
flag to make sure that indexing only happens once.
This means that if for some reason you modify your vocabulary after you've
indexed your instances, you might get unexpected behavior.
"""
if not self.indexed:
self.indexed = True
for field in self.fields.values():
field.index(vocab)
def get_padding_lengths(self) -> Dict[str, Dict[str, int]]:
"""
Returns a dictionary of padding lengths, keyed by field name. Each `Field` returns a
mapping from padding keys to actual lengths, and we just key that dictionary by field name.
"""
lengths = {}
for field_name, field in self.fields.items():
lengths[field_name] = field.get_padding_lengths()
return lengths
def as_tensor_dict(
self, padding_lengths: Dict[str, Dict[str, int]] = None
) -> Dict[str, DataArray]:
"""
Pads each `Field` in this instance to the lengths given in `padding_lengths` (which is
keyed by field name, then by padding key, the same as the return value in
:func:`get_padding_lengths`), returning a list of torch tensors for each field.
If `padding_lengths` is omitted, we will call `self.get_padding_lengths()` to get the
sizes of the tensors to create.
"""
padding_lengths = padding_lengths or self.get_padding_lengths()
tensors = {}
for field_name, field in self.fields.items():
tensors[field_name] = field.as_tensor(padding_lengths[field_name])
return tensors
def __str__(self) -> str:
base_string = "Instance with fields:\n"
return " ".join(
[base_string] + [f"\t {name}: {field} \n" for name, field in self.fields.items()]
)
def duplicate(self) -> "Instance":
new = Instance({k: field.duplicate() for k, field in self.fields.items()})
new.indexed = self.indexed
return new
| allennlp-master | allennlp/data/instance.py |
"""
A :class:`Batch` represents a collection of `Instance` s to be fed
through a model.
"""
import logging
from collections import defaultdict
from typing import Dict, Iterable, Iterator, List, Union
import numpy
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import ensure_list
from allennlp.data.instance import Instance
from allennlp.data.vocabulary import Vocabulary
logger = logging.getLogger(__name__)
class Batch(Iterable):
"""
A batch of Instances. In addition to containing the instances themselves,
it contains helper functions for converting the data into tensors.
A Batch just takes an iterable of instances in its constructor and hangs onto them
in a list.
"""
__slots__ = ["instances"]
def __init__(self, instances: Iterable[Instance]) -> None:
super().__init__()
self.instances = ensure_list(instances)
self._check_types()
def _check_types(self) -> None:
"""
Check that all the instances have the same types.
"""
all_instance_fields_and_types: List[Dict[str, str]] = [
{k: v.__class__.__name__ for k, v in x.fields.items()} for x in self.instances
]
# Check all the field names and Field types are the same for every instance.
if not all(all_instance_fields_and_types[0] == x for x in all_instance_fields_and_types):
raise ConfigurationError("You cannot construct a Batch with non-homogeneous Instances.")
def get_padding_lengths(self) -> Dict[str, Dict[str, int]]:
"""
Gets the maximum padding lengths from all `Instances` in this batch. Each `Instance`
has multiple `Fields`, and each `Field` could have multiple things that need padding.
We look at all fields in all instances, and find the max values for each (field_name,
padding_key) pair, returning them in a dictionary.
This can then be used to convert this batch into arrays of consistent length, or to set
model parameters, etc.
"""
padding_lengths: Dict[str, Dict[str, int]] = defaultdict(dict)
all_instance_lengths: List[Dict[str, Dict[str, int]]] = [
instance.get_padding_lengths() for instance in self.instances
]
all_field_lengths: Dict[str, List[Dict[str, int]]] = defaultdict(list)
for instance_lengths in all_instance_lengths:
for field_name, instance_field_lengths in instance_lengths.items():
all_field_lengths[field_name].append(instance_field_lengths)
for field_name, field_lengths in all_field_lengths.items():
for padding_key in field_lengths[0].keys():
max_value = max(x.get(padding_key, 0) for x in field_lengths)
padding_lengths[field_name][padding_key] = max_value
return {**padding_lengths}
def as_tensor_dict(
self, padding_lengths: Dict[str, Dict[str, int]] = None, verbose: bool = False
) -> Dict[str, Union[torch.Tensor, Dict[str, torch.Tensor]]]:
# This complex return type is actually predefined elsewhere as a DataArray,
# but we can't use it because mypy doesn't like it.
"""
This method converts this `Batch` into a set of pytorch Tensors that can be passed
through a model. In order for the tensors to be valid tensors, all `Instances` in this
batch need to be padded to the same lengths wherever padding is necessary, so we do that
first, then we combine all of the tensors for each field in each instance into a set of
batched tensors for each field.
# Parameters
padding_lengths : `Dict[str, Dict[str, int]]`
If a key is present in this dictionary with a non-`None` value, we will pad to that
length instead of the length calculated from the data. This lets you, e.g., set a
maximum value for sentence length if you want to throw out long sequences.
Entries in this dictionary are keyed first by field name (e.g., "question"), then by
padding key (e.g., "num_tokens").
verbose : `bool`, optional (default=`False`)
Should we output logging information when we're doing this padding? If the batch is
large, this is nice to have, because padding a large batch could take a long time.
But if you're doing this inside of a data generator, having all of this output per
batch is a bit obnoxious (and really slow).
# Returns
tensors : `Dict[str, DataArray]`
A dictionary of tensors, keyed by field name, suitable for passing as input to a model.
This is a `batch` of instances, so, e.g., if the instances have a "question" field and
an "answer" field, the "question" fields for all of the instances will be grouped
together into a single tensor, and the "answer" fields for all instances will be
similarly grouped in a parallel set of tensors, for batched computation. Additionally,
for complex `Fields`, the value of the dictionary key is not necessarily a single
tensor. For example, with the `TextField`, the output is a dictionary mapping
`TokenIndexer` keys to tensors. The number of elements in this sub-dictionary
therefore corresponds to the number of `TokenIndexers` used to index the
`TextField`. Each `Field` class is responsible for batching its own output.
"""
padding_lengths = padding_lengths or defaultdict(dict)
# First we need to decide _how much_ to pad. To do that, we find the max length for all
# relevant padding decisions from the instances themselves. Then we check whether we were
# given a max length for a particular field and padding key. If we were, we use that
# instead of the instance-based one.
if verbose:
logger.info(f"Padding batch of size {len(self.instances)} to lengths {padding_lengths}")
logger.info("Getting max lengths from instances")
instance_padding_lengths = self.get_padding_lengths()
if verbose:
logger.info(f"Instance max lengths: {instance_padding_lengths}")
lengths_to_use: Dict[str, Dict[str, int]] = defaultdict(dict)
for field_name, instance_field_lengths in instance_padding_lengths.items():
for padding_key in instance_field_lengths.keys():
if padding_key in padding_lengths[field_name]:
lengths_to_use[field_name][padding_key] = padding_lengths[field_name][
padding_key
]
else:
lengths_to_use[field_name][padding_key] = instance_field_lengths[padding_key]
# Now we actually pad the instances to tensors.
field_tensors: Dict[str, list] = defaultdict(list)
if verbose:
logger.info(f"Now actually padding instances to length: {lengths_to_use}")
for instance in self.instances:
for field, tensors in instance.as_tensor_dict(lengths_to_use).items():
field_tensors[field].append(tensors)
# Finally, we combine the tensors that we got for each instance into one big tensor (or set
# of tensors) per field. The `Field` classes themselves have the logic for batching the
# tensors together, so we grab a dictionary of field_name -> field class from the first
# instance in the batch.
field_classes = self.instances[0].fields
return {
field_name: field_classes[field_name].batch_tensors(field_tensor_list)
for field_name, field_tensor_list in field_tensors.items()
}
def __iter__(self) -> Iterator[Instance]:
return iter(self.instances)
def index_instances(self, vocab: Vocabulary) -> None:
for instance in self.instances:
instance.index_fields(vocab)
def print_statistics(self) -> None:
# Make sure if has been indexed first
sequence_field_lengths: Dict[str, List] = defaultdict(list)
for instance in self.instances:
if not instance.indexed:
raise ConfigurationError(
"Instances must be indexed with vocabulary "
"before asking to print dataset statistics."
)
for field, field_padding_lengths in instance.get_padding_lengths().items():
for key, value in field_padding_lengths.items():
sequence_field_lengths[f"{field}.{key}"].append(value)
print("\n\n----Dataset Statistics----\n")
for name, lengths in sequence_field_lengths.items():
print(f"Statistics for {name}:")
print(
f"\tLengths: Mean: {numpy.mean(lengths)}, Standard Dev: {numpy.std(lengths)}, "
f"Max: {numpy.max(lengths)}, Min: {numpy.min(lengths)}"
)
print("\n10 Random instances:")
for i in numpy.random.randint(len(self.instances), size=10):
print(f"Instance {i}:")
print(f"\t{self.instances[i]}")
| allennlp-master | allennlp/data/batch.py |
from allennlp.data.dataloader import DataLoader, PyTorchDataLoader, allennlp_collate
from allennlp.data.dataset_readers.dataset_reader import (
DatasetReader,
AllennlpDataset,
AllennlpLazyDataset,
)
from allennlp.data.fields.field import DataArray, Field
from allennlp.data.fields.text_field import TextFieldTensors
from allennlp.data.instance import Instance
from allennlp.data.samplers import BatchSampler, Sampler
from allennlp.data.token_indexers.token_indexer import TokenIndexer, IndexedTokenList
from allennlp.data.tokenizers import Token, Tokenizer
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.batch import Batch
| allennlp-master | allennlp/data/__init__.py |
from typing import List, Dict, Union, Iterator
import torch
from torch.utils import data
from allennlp.common.registrable import Registrable
from allennlp.common.lazy import Lazy
from allennlp.data.instance import Instance
from allennlp.data.batch import Batch
from allennlp.data.samplers import Sampler, BatchSampler
TensorDict = Dict[str, Union[torch.Tensor, Dict[str, torch.Tensor]]]
def allennlp_collate(instances: List[Instance]) -> TensorDict:
batch = Batch(instances)
return batch.as_tensor_dict(batch.get_padding_lengths())
class DataLoader(Registrable):
"""
A `DataLoader` is responsible for generating batches of instances from a `Dataset`,
or another source of data. This is essentially just an abstraction over `torch.utils.data.DataLoader`.
This class only has one required method, `__iter__()`, that creates an iterable
of `TensorDict`s. Additionally, this class comes with a `__len__()` method
that just raises a `TypeError` by default. When possible, this should be overriden
to return the number of batches that will be generated by the `__iter__()` method.
"""
default_implementation = "pytorch_dataloader"
def __len__(self) -> int:
raise TypeError
def __iter__(self) -> Iterator[TensorDict]:
raise NotImplementedError
@DataLoader.register("pytorch_dataloader", constructor="from_partial_objects")
class PyTorchDataLoader(data.DataLoader, DataLoader):
"""
A registrable version of the pytorch
[DataLoader](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader).
Firstly, this class exists is so that we can construct a DataLoader
from a configuration file and have a different default `collate_fn`.
You can use this class directly in python code, but it is identical to using
pytorch dataloader with allennlp's custom collate function:
```
from torch.utils.data import DataLoader
from allennlp.data import allennlp_collate
# Construct a dataloader directly for a dataset which contains allennlp
# Instances which have _already_ been indexed.
my_loader = DataLoader(dataset, batch_size=32, collate_fn=allennlp_collate)
```
Secondly, this class adds a `batches_per_epoch` parameter which, if given, determines the number
of batches after which an epoch ends. If this is `None`, then an epoch is set to be one full pass
through your data. You might use this if you have a very large dataset and want more frequent
checkpoints and evaluations on validation data, for instance.
In a typical AllenNLP configuration file, the `dataset` parameter does not get an entry under
the "data_loader", it gets constructed separately.
"""
def __init__(
self,
dataset: data.Dataset,
batch_size: int = 1,
shuffle: bool = False,
sampler: Sampler = None,
batch_sampler: BatchSampler = None,
num_workers: int = 0,
# NOTE: The default for collate_fn is different from the normal `None`.
# We assume that if you are using this class you are using an
# allennlp dataset of instances, which would require this.
collate_fn=allennlp_collate,
pin_memory: bool = False,
drop_last: bool = False,
timeout: int = 0,
worker_init_fn=None,
multiprocessing_context: str = None,
batches_per_epoch: int = None,
):
super().__init__(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
sampler=sampler,
batch_sampler=batch_sampler,
num_workers=num_workers,
collate_fn=collate_fn,
pin_memory=pin_memory,
drop_last=drop_last,
timeout=timeout,
worker_init_fn=worker_init_fn,
multiprocessing_context=multiprocessing_context,
)
self._data_generator = super().__iter__()
self._batches_per_epoch = batches_per_epoch
def __len__(self):
if self._batches_per_epoch is not None:
return self._batches_per_epoch
return super().__len__()
def __iter__(self):
if self._batches_per_epoch is None:
# NOTE: since torch's DataLoader is listed as the first super class of this class,
# super().__iter__() will resolve to the __iter__ method from torch's DataLoader,
# which is what we want.
yield from super().__iter__()
else:
for i in range(self._batches_per_epoch):
try:
yield next(self._data_generator)
except StopIteration: # data_generator is exhausted
self._data_generator = super().__iter__() # so refresh it
yield next(self._data_generator) # and yield required instance
@classmethod
def from_partial_objects(
cls,
dataset: data.Dataset,
batch_size: int = 1,
shuffle: bool = False,
sampler: Lazy[Sampler] = None,
batch_sampler: Lazy[BatchSampler] = None,
num_workers: int = 0,
pin_memory: bool = False,
drop_last: bool = False,
timeout: int = 0,
worker_init_fn=None,
multiprocessing_context: str = None,
batches_per_epoch: int = None,
) -> "PyTorchDataLoader":
batch_sampler_ = (
None if batch_sampler is None else batch_sampler.construct(data_source=dataset)
)
sampler_ = None if sampler is None else sampler.construct(data_source=dataset)
return cls(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
sampler=sampler_,
batch_sampler=batch_sampler_,
num_workers=num_workers,
# NOTE: The default for collate_fn is different from the normal `None`.
# We assume that if you are using this class you are using an
# allennlp dataset of instances, which would require this.
collate_fn=allennlp_collate,
pin_memory=pin_memory,
drop_last=drop_last,
timeout=timeout,
worker_init_fn=worker_init_fn,
multiprocessing_context=multiprocessing_context,
batches_per_epoch=batches_per_epoch,
)
| allennlp-master | allennlp/data/dataloader.py |
"""
A Vocabulary maps strings to integers, allowing for strings to be mapped to an
out-of-vocabulary token.
"""
import codecs
import copy
import logging
import os
import re
from collections import defaultdict
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Union, TYPE_CHECKING
from filelock import FileLock
from allennlp.common import Registrable
from allennlp.common.file_utils import cached_path
from allennlp.common.checks import ConfigurationError
from allennlp.common.tqdm import Tqdm
from allennlp.common.util import namespace_match
if TYPE_CHECKING:
from allennlp.data import instance as adi # noqa
logger = logging.getLogger(__name__)
DEFAULT_NON_PADDED_NAMESPACES = ("*tags", "*labels")
DEFAULT_PADDING_TOKEN = "@@PADDING@@"
DEFAULT_OOV_TOKEN = "@@UNKNOWN@@"
NAMESPACE_PADDING_FILE = "non_padded_namespaces.txt"
_NEW_LINE_REGEX = re.compile(r"\n|\r\n")
class _NamespaceDependentDefaultDict(defaultdict):
"""
This is a [defaultdict]
(https://docs.python.org/2/library/collections.html#collections.defaultdict) where the
default value is dependent on the key that is passed.
We use "namespaces" in the :class:`Vocabulary` object to keep track of several different
mappings from strings to integers, so that we have a consistent API for mapping words, tags,
labels, characters, or whatever else you want, into integers. The issue is that some of those
namespaces (words and characters) should have integers reserved for padding and
out-of-vocabulary tokens, while others (labels and tags) shouldn't. This class allows you to
specify filters on the namespace (the key used in the `defaultdict`), and use different
default values depending on whether the namespace passes the filter.
To do filtering, we take a set of `non_padded_namespaces`. This is a set of strings
that are either matched exactly against the keys, or treated as suffixes, if the
string starts with `*`. In other words, if `*tags` is in `non_padded_namespaces` then
`passage_tags`, `question_tags`, etc. (anything that ends with `tags`) will have the
`non_padded` default value.
# Parameters
non_padded_namespaces : `Iterable[str]`
A set / list / tuple of strings describing which namespaces are not padded. If a namespace
(key) is missing from this dictionary, we will use :func:`namespace_match` to see whether
the namespace should be padded. If the given namespace matches any of the strings in this
list, we will use `non_padded_function` to initialize the value for that namespace, and
we will use `padded_function` otherwise.
padded_function : `Callable[[], Any]`
A zero-argument function to call to initialize a value for a namespace that `should` be
padded.
non_padded_function : `Callable[[], Any]`
A zero-argument function to call to initialize a value for a namespace that should `not` be
padded.
"""
def __init__(
self,
non_padded_namespaces: Iterable[str],
padded_function: Callable[[], Any],
non_padded_function: Callable[[], Any],
) -> None:
self._non_padded_namespaces = set(non_padded_namespaces)
self._padded_function = padded_function
self._non_padded_function = non_padded_function
super().__init__()
def __missing__(self, key: str):
if any(namespace_match(pattern, key) for pattern in self._non_padded_namespaces):
value = self._non_padded_function()
else:
value = self._padded_function()
dict.__setitem__(self, key, value)
return value
def add_non_padded_namespaces(self, non_padded_namespaces: Set[str]):
# add non_padded_namespaces which weren't already present
self._non_padded_namespaces.update(non_padded_namespaces)
class _TokenToIndexDefaultDict(_NamespaceDependentDefaultDict):
def __init__(self, non_padded_namespaces: Set[str], padding_token: str, oov_token: str) -> None:
super().__init__(
non_padded_namespaces, lambda: {padding_token: 0, oov_token: 1}, lambda: {}
)
class _IndexToTokenDefaultDict(_NamespaceDependentDefaultDict):
def __init__(self, non_padded_namespaces: Set[str], padding_token: str, oov_token: str) -> None:
super().__init__(
non_padded_namespaces, lambda: {0: padding_token, 1: oov_token}, lambda: {}
)
def _read_pretrained_tokens(embeddings_file_uri: str) -> List[str]:
# Moving this import to the top breaks everything (cycling import, I guess)
from allennlp.modules.token_embedders.embedding import EmbeddingsTextFile
logger.info("Reading pretrained tokens from: %s", embeddings_file_uri)
tokens: List[str] = []
with EmbeddingsTextFile(embeddings_file_uri) as embeddings_file:
for line_number, line in enumerate(Tqdm.tqdm(embeddings_file), start=1):
token_end = line.find(" ")
if token_end >= 0:
token = line[:token_end]
tokens.append(token)
else:
line_begin = line[:20] + "..." if len(line) > 20 else line
logger.warning("Skipping line number %d: %s", line_number, line_begin)
return tokens
class Vocabulary(Registrable):
"""
A Vocabulary maps strings to integers, allowing for strings to be mapped to an
out-of-vocabulary token.
Vocabularies are fit to a particular dataset, which we use to decide which tokens are
in-vocabulary.
Vocabularies also allow for several different namespaces, so you can have separate indices for
'a' as a word, and 'a' as a character, for instance, and so we can use this object to also map
tag and label strings to indices, for a unified :class:`~.fields.field.Field` API. Most of the
methods on this class allow you to pass in a namespace; by default we use the 'tokens'
namespace, and you can omit the namespace argument everywhere and just use the default.
This class is registered as a `Vocabulary` with four different names, which all point to
different `@classmethod` constructors found in this class. `from_instances` is registered as
"from_instances", `from_files` is registered as "from_files", `from_files_and_instances` is
registered as "extend", and `empty` is registered as "empty". If you are using a configuration
file to construct a vocabulary, you can use any of those strings as the "type" key in the
configuration file to use the corresponding `@classmethod` to construct the object.
"from_instances" is the default. Look at the docstring for the `@classmethod` to see what keys
are allowed in the configuration file (when there is an `instances` argument to the
`@classmethod`, it will be passed in separately and does not need a corresponding key in the
configuration file).
# Parameters
counter : `Dict[str, Dict[str, int]]`, optional (default=`None`)
A collection of counts from which to initialize this vocabulary. We will examine the
counts and, together with the other parameters to this class, use them to decide which
words are in-vocabulary. If this is `None`, we just won't initialize the vocabulary with
anything.
min_count : `Dict[str, int]`, optional (default=`None`)
When initializing the vocab from a counter, you can specify a minimum count, and every
token with a count less than this will not be added to the dictionary. These minimum
counts are `namespace-specific`, so you can specify different minimums for labels versus
words tokens, for example. If a namespace does not have a key in the given dictionary, we
will add all seen tokens to that namespace.
max_vocab_size : `Union[int, Dict[str, int]]`, optional (default=`None`)
If you want to cap the number of tokens in your vocabulary, you can do so with this
parameter. If you specify a single integer, every namespace will have its vocabulary fixed
to be no larger than this. If you specify a dictionary, then each namespace in the
`counter` can have a separate maximum vocabulary size. Any missing key will have a value
of `None`, which means no cap on the vocabulary size.
non_padded_namespaces : `Iterable[str]`, optional
By default, we assume you are mapping word / character tokens to integers, and so you want
to reserve word indices for padding and out-of-vocabulary tokens. However, if you are
mapping NER or SRL tags, or class labels, to integers, you probably do not want to reserve
indices for padding and out-of-vocabulary tokens. Use this field to specify which
namespaces should `not` have padding and OOV tokens added.
The format of each element of this is either a string, which must match field names
exactly, or `*` followed by a string, which we match as a suffix against field names.
We try to make the default here reasonable, so that you don't have to think about this.
The default is `("*tags", "*labels")`, so as long as your namespace ends in "tags" or
"labels" (which is true by default for all tag and label fields in this code), you don't
have to specify anything here.
pretrained_files : `Dict[str, str]`, optional
If provided, this map specifies the path to optional pretrained embedding files for each
namespace. This can be used to either restrict the vocabulary to only words which appear
in this file, or to ensure that any words in this file are included in the vocabulary
regardless of their count, depending on the value of `only_include_pretrained_words`.
Words which appear in the pretrained embedding file but not in the data are NOT included
in the Vocabulary.
min_pretrained_embeddings : `Dict[str, int]`, optional
If provided, specifies for each namespace a minimum number of lines (typically the
most common words) to keep from pretrained embedding files, even for words not
appearing in the data.
only_include_pretrained_words : `bool`, optional (default=`False`)
This defines the strategy for using any pretrained embedding files which may have been
specified in `pretrained_files`. If False, an inclusive strategy is used: and words
which are in the `counter` and in the pretrained file are added to the `Vocabulary`,
regardless of whether their count exceeds `min_count` or not. If True, we use an
exclusive strategy: words are only included in the Vocabulary if they are in the pretrained
embedding file (their count must still be at least `min_count`).
tokens_to_add : `Dict[str, List[str]]`, optional (default=`None`)
If given, this is a list of tokens to add to the vocabulary, keyed by the namespace to add
the tokens to. This is a way to be sure that certain items appear in your vocabulary,
regardless of any other vocabulary computation.
padding_token : `str`, optional (default=`DEFAULT_PADDING_TOKEN`)
If given, this the string used for padding.
oov_token : `str`, optional (default=`DEFAULT_OOV_TOKEN`)
If given, this the string used for the out of vocabulary (OOVs) tokens.
"""
default_implementation = "from_instances"
def __init__(
self,
counter: Dict[str, Dict[str, int]] = None,
min_count: Dict[str, int] = None,
max_vocab_size: Union[int, Dict[str, int]] = None,
non_padded_namespaces: Iterable[str] = DEFAULT_NON_PADDED_NAMESPACES,
pretrained_files: Optional[Dict[str, str]] = None,
only_include_pretrained_words: bool = False,
tokens_to_add: Dict[str, List[str]] = None,
min_pretrained_embeddings: Dict[str, int] = None,
padding_token: Optional[str] = DEFAULT_PADDING_TOKEN,
oov_token: Optional[str] = DEFAULT_OOV_TOKEN,
) -> None:
self._padding_token = padding_token if padding_token is not None else DEFAULT_PADDING_TOKEN
self._oov_token = oov_token if oov_token is not None else DEFAULT_OOV_TOKEN
self._non_padded_namespaces = set(non_padded_namespaces)
self._token_to_index = _TokenToIndexDefaultDict(
self._non_padded_namespaces, self._padding_token, self._oov_token
)
self._index_to_token = _IndexToTokenDefaultDict(
self._non_padded_namespaces, self._padding_token, self._oov_token
)
self._retained_counter: Optional[Dict[str, Dict[str, int]]] = None
# Made an empty vocabulary, now extend it.
self._extend(
counter,
min_count,
max_vocab_size,
non_padded_namespaces,
pretrained_files,
only_include_pretrained_words,
tokens_to_add,
min_pretrained_embeddings,
)
@classmethod
def from_instances(
cls,
instances: Iterable["adi.Instance"],
min_count: Dict[str, int] = None,
max_vocab_size: Union[int, Dict[str, int]] = None,
non_padded_namespaces: Iterable[str] = DEFAULT_NON_PADDED_NAMESPACES,
pretrained_files: Optional[Dict[str, str]] = None,
only_include_pretrained_words: bool = False,
tokens_to_add: Dict[str, List[str]] = None,
min_pretrained_embeddings: Dict[str, int] = None,
padding_token: Optional[str] = DEFAULT_PADDING_TOKEN,
oov_token: Optional[str] = DEFAULT_OOV_TOKEN,
) -> "Vocabulary":
"""
Constructs a vocabulary given a collection of `Instances` and some parameters.
We count all of the vocabulary items in the instances, then pass those counts
and the other parameters, to :func:`__init__`. See that method for a description
of what the other parameters do.
The `instances` parameter does not get an entry in a typical AllenNLP configuration file,
but the other parameters do (if you want non-default parameters).
"""
logger.info("Fitting token dictionary from dataset.")
padding_token = padding_token if padding_token is not None else DEFAULT_PADDING_TOKEN
oov_token = oov_token if oov_token is not None else DEFAULT_OOV_TOKEN
namespace_token_counts: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int))
for instance in Tqdm.tqdm(instances, desc="building vocab"):
instance.count_vocab_items(namespace_token_counts)
return cls(
counter=namespace_token_counts,
min_count=min_count,
max_vocab_size=max_vocab_size,
non_padded_namespaces=non_padded_namespaces,
pretrained_files=pretrained_files,
only_include_pretrained_words=only_include_pretrained_words,
tokens_to_add=tokens_to_add,
min_pretrained_embeddings=min_pretrained_embeddings,
padding_token=padding_token,
oov_token=oov_token,
)
@classmethod
def from_files(
cls,
directory: Union[str, os.PathLike],
padding_token: Optional[str] = DEFAULT_PADDING_TOKEN,
oov_token: Optional[str] = DEFAULT_OOV_TOKEN,
) -> "Vocabulary":
"""
Loads a `Vocabulary` that was serialized either using `save_to_files` or inside
a model archive file.
# Parameters
directory : `str`
The directory or archive file containing the serialized vocabulary.
"""
logger.info("Loading token dictionary from %s.", directory)
padding_token = padding_token if padding_token is not None else DEFAULT_PADDING_TOKEN
oov_token = oov_token if oov_token is not None else DEFAULT_OOV_TOKEN
if not os.path.isdir(directory):
base_directory = cached_path(directory, extract_archive=True)
# For convenience we'll check for a 'vocabulary' subdirectory of the archive.
# That way you can use model archives directly.
vocab_subdir = os.path.join(base_directory, "vocabulary")
if os.path.isdir(vocab_subdir):
directory = vocab_subdir
elif os.path.isdir(base_directory):
directory = base_directory
else:
raise ConfigurationError(f"{directory} is neither a directory nor an archive")
# We use a lock file to avoid race conditions where multiple processes
# might be reading/writing from/to the same vocab files at once.
with FileLock(os.path.join(directory, ".lock")):
with codecs.open(
os.path.join(directory, NAMESPACE_PADDING_FILE), "r", "utf-8"
) as namespace_file:
non_padded_namespaces = [namespace_str.strip() for namespace_str in namespace_file]
vocab = cls(
non_padded_namespaces=non_padded_namespaces,
padding_token=padding_token,
oov_token=oov_token,
)
# Check every file in the directory.
for namespace_filename in os.listdir(directory):
if namespace_filename == NAMESPACE_PADDING_FILE:
continue
if namespace_filename.startswith("."):
continue
namespace = namespace_filename.replace(".txt", "")
if any(namespace_match(pattern, namespace) for pattern in non_padded_namespaces):
is_padded = False
else:
is_padded = True
filename = os.path.join(directory, namespace_filename)
vocab.set_from_file(filename, is_padded, namespace=namespace, oov_token=oov_token)
return vocab
@classmethod
def from_files_and_instances(
cls,
instances: Iterable["adi.Instance"],
directory: str,
padding_token: Optional[str] = DEFAULT_PADDING_TOKEN,
oov_token: Optional[str] = DEFAULT_OOV_TOKEN,
min_count: Dict[str, int] = None,
max_vocab_size: Union[int, Dict[str, int]] = None,
non_padded_namespaces: Iterable[str] = DEFAULT_NON_PADDED_NAMESPACES,
pretrained_files: Optional[Dict[str, str]] = None,
only_include_pretrained_words: bool = False,
tokens_to_add: Dict[str, List[str]] = None,
min_pretrained_embeddings: Dict[str, int] = None,
) -> "Vocabulary":
"""
Extends an already generated vocabulary using a collection of instances.
The `instances` parameter does not get an entry in a typical AllenNLP configuration file,
but the other parameters do (if you want non-default parameters). See `__init__` for a
description of what the other parameters mean.
"""
vocab = cls.from_files(directory, padding_token, oov_token)
logger.info("Fitting token dictionary from dataset.")
namespace_token_counts: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int))
for instance in Tqdm.tqdm(instances):
instance.count_vocab_items(namespace_token_counts)
vocab._extend(
counter=namespace_token_counts,
min_count=min_count,
max_vocab_size=max_vocab_size,
non_padded_namespaces=non_padded_namespaces,
pretrained_files=pretrained_files,
only_include_pretrained_words=only_include_pretrained_words,
tokens_to_add=tokens_to_add,
min_pretrained_embeddings=min_pretrained_embeddings,
)
return vocab
@classmethod
def empty(cls) -> "Vocabulary":
"""
This method returns a bare vocabulary instantiated with `cls()` (so, `Vocabulary()` if you
haven't made a subclass of this object). The only reason to call `Vocabulary.empty()`
instead of `Vocabulary()` is if you are instantiating this object from a config file. We
register this constructor with the key "empty", so if you know that you don't need to
compute a vocabulary (either because you're loading a pre-trained model from an archive
file, you're using a pre-trained transformer that has its own vocabulary, or something
else), you can use this to avoid having the default vocabulary construction code iterate
through the data.
"""
return cls()
def set_from_file(
self,
filename: str,
is_padded: bool = True,
oov_token: str = DEFAULT_OOV_TOKEN,
namespace: str = "tokens",
):
"""
If you already have a vocabulary file for a trained model somewhere, and you really want to
use that vocabulary file instead of just setting the vocabulary from a dataset, for
whatever reason, you can do that with this method. You must specify the namespace to use,
and we assume that you want to use padding and OOV tokens for this.
# Parameters
filename : `str`
The file containing the vocabulary to load. It should be formatted as one token per
line, with nothing else in the line. The index we assign to the token is the line
number in the file (1-indexed if `is_padded`, 0-indexed otherwise). Note that this
file should contain the OOV token string!
is_padded : `bool`, optional (default=`True`)
Is this vocabulary padded? For token / word / character vocabularies, this should be
`True`; while for tag or label vocabularies, this should typically be `False`. If
`True`, we add a padding token with index 0, and we enforce that the `oov_token` is
present in the file.
oov_token : `str`, optional (default=`DEFAULT_OOV_TOKEN`)
What token does this vocabulary use to represent out-of-vocabulary characters? This
must show up as a line in the vocabulary file. When we find it, we replace
`oov_token` with `self._oov_token`, because we only use one OOV token across
namespaces.
namespace : `str`, optional (default=`"tokens"`)
What namespace should we overwrite with this vocab file?
"""
if is_padded:
self._token_to_index[namespace] = {self._padding_token: 0}
self._index_to_token[namespace] = {0: self._padding_token}
else:
self._token_to_index[namespace] = {}
self._index_to_token[namespace] = {}
with codecs.open(filename, "r", "utf-8") as input_file:
lines = _NEW_LINE_REGEX.split(input_file.read())
# Be flexible about having final newline or not
if lines and lines[-1] == "":
lines = lines[:-1]
for i, line in enumerate(lines):
index = i + 1 if is_padded else i
token = line.replace("@@NEWLINE@@", "\n")
if token == oov_token:
token = self._oov_token
self._token_to_index[namespace][token] = index
self._index_to_token[namespace][index] = token
if is_padded:
assert self._oov_token in self._token_to_index[namespace], "OOV token not found!"
def extend_from_instances(self, instances: Iterable["adi.Instance"]) -> None:
logger.info("Fitting token dictionary from dataset.")
namespace_token_counts: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int))
for instance in Tqdm.tqdm(instances):
instance.count_vocab_items(namespace_token_counts)
self._extend(counter=namespace_token_counts)
def extend_from_vocab(self, vocab: "Vocabulary") -> None:
"""
Adds all vocabulary items from all namespaces in the given vocabulary to this vocabulary.
Useful if you want to load a model and extends its vocabulary from new instances.
We also add all non-padded namespaces from the given vocabulary to this vocabulary.
"""
self._non_padded_namespaces.update(vocab._non_padded_namespaces)
self._token_to_index._non_padded_namespaces.update(vocab._non_padded_namespaces)
self._index_to_token._non_padded_namespaces.update(vocab._non_padded_namespaces)
for namespace in vocab.get_namespaces():
for token in vocab.get_token_to_index_vocabulary(namespace):
self.add_token_to_namespace(token, namespace)
def _extend(
self,
counter: Dict[str, Dict[str, int]] = None,
min_count: Dict[str, int] = None,
max_vocab_size: Union[int, Dict[str, int]] = None,
non_padded_namespaces: Iterable[str] = DEFAULT_NON_PADDED_NAMESPACES,
pretrained_files: Optional[Dict[str, str]] = None,
only_include_pretrained_words: bool = False,
tokens_to_add: Dict[str, List[str]] = None,
min_pretrained_embeddings: Dict[str, int] = None,
) -> None:
"""
This method can be used for extending already generated vocabulary. It takes same
parameters as Vocabulary initializer. The `_token_to_index` and `_index_to_token`
mappings of calling vocabulary will be retained. It is an inplace operation so None will be
returned.
"""
if not isinstance(max_vocab_size, dict):
int_max_vocab_size = max_vocab_size
max_vocab_size = defaultdict(lambda: int_max_vocab_size) # type: ignore
min_count = min_count or {}
pretrained_files = pretrained_files or {}
min_pretrained_embeddings = min_pretrained_embeddings or {}
non_padded_namespaces = set(non_padded_namespaces)
counter = counter or {}
tokens_to_add = tokens_to_add or {}
self._retained_counter = counter
# Make sure vocabulary extension is safe.
current_namespaces = {*self._token_to_index}
extension_namespaces = {*counter, *tokens_to_add}
for namespace in current_namespaces & extension_namespaces:
# if new namespace was already present
# Either both should be padded or none should be.
original_padded = not any(
namespace_match(pattern, namespace) for pattern in self._non_padded_namespaces
)
extension_padded = not any(
namespace_match(pattern, namespace) for pattern in non_padded_namespaces
)
if original_padded != extension_padded:
raise ConfigurationError(
"Common namespace {} has conflicting ".format(namespace)
+ "setting of padded = True/False. "
+ "Hence extension cannot be done."
)
# Add new non-padded namespaces for extension
self._token_to_index.add_non_padded_namespaces(non_padded_namespaces)
self._index_to_token.add_non_padded_namespaces(non_padded_namespaces)
self._non_padded_namespaces.update(non_padded_namespaces)
for namespace in counter:
pretrained_set: Optional[Set] = None
if namespace in pretrained_files:
pretrained_list = _read_pretrained_tokens(pretrained_files[namespace])
min_embeddings = min_pretrained_embeddings.get(namespace, 0)
if min_embeddings > 0:
tokens_old = tokens_to_add.get(namespace, [])
tokens_new = pretrained_list[:min_embeddings]
tokens_to_add[namespace] = tokens_old + tokens_new
pretrained_set = set(pretrained_list)
token_counts = list(counter[namespace].items())
token_counts.sort(key=lambda x: x[1], reverse=True)
max_vocab: Optional[int]
try:
max_vocab = max_vocab_size[namespace]
except KeyError:
max_vocab = None
if max_vocab:
token_counts = token_counts[:max_vocab]
for token, count in token_counts:
if pretrained_set is not None:
if only_include_pretrained_words:
if token in pretrained_set and count >= min_count.get(namespace, 1):
self.add_token_to_namespace(token, namespace)
elif token in pretrained_set or count >= min_count.get(namespace, 1):
self.add_token_to_namespace(token, namespace)
elif count >= min_count.get(namespace, 1):
self.add_token_to_namespace(token, namespace)
for namespace, tokens in tokens_to_add.items():
for token in tokens:
self.add_token_to_namespace(token, namespace)
def __getstate__(self):
"""
Need to sanitize defaultdict and defaultdict-like objects
by converting them to vanilla dicts when we pickle the vocabulary.
"""
state = copy.copy(self.__dict__)
state["_token_to_index"] = dict(state["_token_to_index"])
state["_index_to_token"] = dict(state["_index_to_token"])
if "_retained_counter" in state:
state["_retained_counter"] = {
key: dict(value) for key, value in state["_retained_counter"].items()
}
return state
def __setstate__(self, state):
"""
Conversely, when we unpickle, we need to reload the plain dicts
into our special DefaultDict subclasses.
"""
self.__dict__ = copy.copy(state)
self._token_to_index = _TokenToIndexDefaultDict(
self._non_padded_namespaces, self._padding_token, self._oov_token
)
self._token_to_index.update(state["_token_to_index"])
self._index_to_token = _IndexToTokenDefaultDict(
self._non_padded_namespaces, self._padding_token, self._oov_token
)
self._index_to_token.update(state["_index_to_token"])
def save_to_files(self, directory: str) -> None:
"""
Persist this Vocabulary to files so it can be reloaded later.
Each namespace corresponds to one file.
# Parameters
directory : `str`
The directory where we save the serialized vocabulary.
"""
os.makedirs(directory, exist_ok=True)
if os.listdir(directory):
logger.warning("vocabulary serialization directory %s is not empty", directory)
# We use a lock file to avoid race conditions where multiple processes
# might be reading/writing from/to the same vocab files at once.
with FileLock(os.path.join(directory, ".lock")):
with codecs.open(
os.path.join(directory, NAMESPACE_PADDING_FILE), "w", "utf-8"
) as namespace_file:
for namespace_str in self._non_padded_namespaces:
print(namespace_str, file=namespace_file)
for namespace, mapping in self._index_to_token.items():
# Each namespace gets written to its own file, in index order.
with codecs.open(
os.path.join(directory, namespace + ".txt"), "w", "utf-8"
) as token_file:
num_tokens = len(mapping)
start_index = 1 if mapping[0] == self._padding_token else 0
for i in range(start_index, num_tokens):
print(mapping[i].replace("\n", "@@NEWLINE@@"), file=token_file)
def is_padded(self, namespace: str) -> bool:
"""
Returns whether or not there are padding and OOV tokens added to the given namespace.
"""
return self._index_to_token[namespace][0] == self._padding_token
def add_token_to_namespace(self, token: str, namespace: str = "tokens") -> int:
"""
Adds `token` to the index, if it is not already present. Either way, we return the index of
the token.
"""
if not isinstance(token, str):
raise ValueError(
"Vocabulary tokens must be strings, or saving and loading will break."
" Got %s (with type %s)" % (repr(token), type(token))
)
if token not in self._token_to_index[namespace]:
index = len(self._token_to_index[namespace])
self._token_to_index[namespace][token] = index
self._index_to_token[namespace][index] = token
return index
else:
return self._token_to_index[namespace][token]
def add_tokens_to_namespace(self, tokens: List[str], namespace: str = "tokens") -> List[int]:
"""
Adds `tokens` to the index, if they are not already present. Either way, we return the
indices of the tokens in the order that they were given.
"""
return [self.add_token_to_namespace(token, namespace) for token in tokens]
def get_index_to_token_vocabulary(self, namespace: str = "tokens") -> Dict[int, str]:
return self._index_to_token[namespace]
def get_token_to_index_vocabulary(self, namespace: str = "tokens") -> Dict[str, int]:
return self._token_to_index[namespace]
def get_token_index(self, token: str, namespace: str = "tokens") -> int:
try:
return self._token_to_index[namespace][token]
except KeyError:
try:
return self._token_to_index[namespace][self._oov_token]
except KeyError:
logger.error("Namespace: %s", namespace)
logger.error("Token: %s", token)
raise KeyError(
f"'{token}' not found in vocab namespace '{namespace}', and namespace "
f"does not contain the default OOV token ('{self._oov_token}')"
)
def get_token_from_index(self, index: int, namespace: str = "tokens") -> str:
return self._index_to_token[namespace][index]
def get_vocab_size(self, namespace: str = "tokens") -> int:
return len(self._token_to_index[namespace])
def get_namespaces(self) -> Set[str]:
return set(self._index_to_token.keys())
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return False
def __str__(self) -> str:
base_string = "Vocabulary with namespaces:\n"
non_padded_namespaces = f"\tNon Padded Namespaces: {self._non_padded_namespaces}\n"
namespaces = [
f"\tNamespace: {name}, Size: {self.get_vocab_size(name)} \n"
for name in self._index_to_token
]
return " ".join([base_string, non_padded_namespaces] + namespaces)
def __repr__(self) -> str:
# This is essentially the same as __str__, but with no newlines
base_string = "Vocabulary with namespaces: "
namespaces = [
f"{name}, Size: {self.get_vocab_size(name)} ||" for name in self._index_to_token
]
non_padded_namespaces = f"Non Padded Namespaces: {self._non_padded_namespaces}"
return " ".join([base_string] + namespaces + [non_padded_namespaces])
def print_statistics(self) -> None:
if self._retained_counter:
logger.info(
"Printed vocabulary statistics are only for the part of the vocabulary generated "
"from instances. If vocabulary is constructed by extending saved vocabulary with "
"dataset instances, the directly loaded portion won't be considered here."
)
print("\n\n----Vocabulary Statistics----\n")
# Since we don't saved counter info, it is impossible to consider pre-saved portion.
for namespace in self._retained_counter:
tokens_with_counts = list(self._retained_counter[namespace].items())
tokens_with_counts.sort(key=lambda x: x[1], reverse=True)
print(f"\nTop 10 most frequent tokens in namespace '{namespace}':")
for token, freq in tokens_with_counts[:10]:
print(f"\tToken: {token}\t\tFrequency: {freq}")
# Now sort by token length, not frequency
tokens_with_counts.sort(key=lambda x: len(x[0]), reverse=True)
print(f"\nTop 10 longest tokens in namespace '{namespace}':")
for token, freq in tokens_with_counts[:10]:
print(f"\tToken: {token}\t\tlength: {len(token)}\tFrequency: {freq}")
print(f"\nTop 10 shortest tokens in namespace '{namespace}':")
for token, freq in reversed(tokens_with_counts[-10:]):
print(f"\tToken: {token}\t\tlength: {len(token)}\tFrequency: {freq}")
else:
# _retained_counter would be set only if instances were used for vocabulary construction.
logger.info(
"Vocabulary statistics cannot be printed since "
"dataset instances were not used for its construction."
)
# We can't decorate `Vocabulary` with `Vocabulary.register()`, because `Vocabulary` hasn't been
# defined yet. So we put these down here.
Vocabulary.register("from_instances", constructor="from_instances")(Vocabulary)
Vocabulary.register("from_files", constructor="from_files")(Vocabulary)
Vocabulary.register("extend", constructor="from_files_and_instances")(Vocabulary)
Vocabulary.register("empty", constructor="empty")(Vocabulary)
| allennlp-master | allennlp/data/vocabulary.py |
from typing import List
from overrides import overrides
import spacy
from allennlp.common import Registrable
from allennlp.common.util import get_spacy_model
class SentenceSplitter(Registrable):
"""
A `SentenceSplitter` splits strings into sentences.
"""
default_implementation = "spacy"
def split_sentences(self, text: str) -> List[str]:
"""
Splits a `text` :class:`str` paragraph into a list of :class:`str`, where each is a sentence.
"""
raise NotImplementedError
def batch_split_sentences(self, texts: List[str]) -> List[List[str]]:
"""
Default implementation is to just iterate over the texts and call `split_sentences`.
"""
return [self.split_sentences(text) for text in texts]
@SentenceSplitter.register("spacy")
class SpacySentenceSplitter(SentenceSplitter):
"""
A `SentenceSplitter` that uses spaCy's built-in sentence boundary detection.
Spacy's default sentence splitter uses a dependency parse to detect sentence boundaries, so
it is slow, but accurate.
Another option is to use rule-based sentence boundary detection. It's fast and has a small memory footprint,
since it uses punctuation to detect sentence boundaries. This can be activated with the `rule_based` flag.
By default, `SpacySentenceSplitter` calls the default spacy boundary detector.
Registered as a `SentenceSplitter` with name "spacy".
"""
def __init__(self, language: str = "en_core_web_sm", rule_based: bool = False) -> None:
# we need spacy's dependency parser if we're not using rule-based sentence boundary detection.
self.spacy = get_spacy_model(language, parse=not rule_based, ner=False, pos_tags=False)
if rule_based:
# we use `sentencizer`, a built-in spacy module for rule-based sentence boundary detection.
# depending on the spacy version, it could be called 'sentencizer' or 'sbd'
sbd_name = "sbd" if spacy.__version__ < "2.1" else "sentencizer"
if not self.spacy.has_pipe(sbd_name):
sbd = self.spacy.create_pipe(sbd_name)
self.spacy.add_pipe(sbd)
@overrides
def split_sentences(self, text: str) -> List[str]:
return [sent.string.strip() for sent in self.spacy(text).sents]
@overrides
def batch_split_sentences(self, texts: List[str]) -> List[List[str]]:
"""
This method lets you take advantage of spacy's batch processing.
"""
return [
[sentence.string.strip() for sentence in doc.sents] for doc in self.spacy.pipe(texts)
]
| allennlp-master | allennlp/data/tokenizers/sentence_splitter.py |
import copy
import logging
from typing import Any, Dict, List, Optional, Tuple, Iterable
from overrides import overrides
from transformers import PreTrainedTokenizer
from allennlp.common.util import sanitize_wordpiece
from allennlp.data.tokenizers.token_class import Token
from allennlp.data.tokenizers.tokenizer import Tokenizer
logger = logging.getLogger(__name__)
@Tokenizer.register("pretrained_transformer")
class PretrainedTransformerTokenizer(Tokenizer):
"""
A `PretrainedTransformerTokenizer` uses a model from HuggingFace's
`transformers` library to tokenize some input text. This often means wordpieces
(where `'AllenNLP is awesome'` might get split into `['Allen', '##NL', '##P', 'is',
'awesome']`), but it could also use byte-pair encoding, or some other tokenization, depending
on the pretrained model that you're using.
We take a model name as an input parameter, which we will pass to
`AutoTokenizer.from_pretrained`.
We also add special tokens relative to the pretrained model and truncate the sequences.
This tokenizer also indexes tokens and adds the indexes to the `Token` fields so that
they can be picked up by `PretrainedTransformerIndexer`.
Registered as a `Tokenizer` with name "pretrained_transformer".
# Parameters
model_name : `str`
The name of the pretrained wordpiece tokenizer to use.
add_special_tokens : `bool`, optional, (default=`True`)
If set to `True`, the sequences will be encoded with the special tokens relative
to their model.
max_length : `int`, optional (default=`None`)
If set to a number, will limit the total sequence returned so that it has a maximum length.
If there are overflowing tokens, those will be added to the returned dictionary
stride : `int`, optional (default=`0`)
If set to a number along with max_length, the overflowing tokens returned will contain some tokens
from the main sequence returned. The value of this argument defines the number of additional tokens.
tokenizer_kwargs: `Dict[str, Any]`, optional (default = `None`)
Dictionary with
[additional arguments](https://github.com/huggingface/transformers/blob/155c782a2ccd103cf63ad48a2becd7c76a7d2115/transformers/tokenization_utils.py#L691)
for `AutoTokenizer.from_pretrained`.
""" # noqa: E501
def __init__(
self,
model_name: str,
add_special_tokens: bool = True,
max_length: Optional[int] = None,
stride: int = 0,
tokenizer_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
else:
tokenizer_kwargs = tokenizer_kwargs.copy()
tokenizer_kwargs.setdefault("use_fast", True)
# Note: Just because we request a fast tokenizer doesn't mean we get one.
from allennlp.common import cached_transformers
self.tokenizer = cached_transformers.get_tokenizer(
model_name, add_special_tokens=False, **tokenizer_kwargs
)
self._add_special_tokens = add_special_tokens
self._max_length = max_length
self._stride = stride
self._tokenizer_lowercases = self.tokenizer_lowercases(self.tokenizer)
try:
self._reverse_engineer_special_tokens("a", "b", model_name, tokenizer_kwargs)
except AssertionError:
# For most transformer models, "a" and "b" work just fine as dummy tokens. For a few,
# they don't, and so we use "1" and "2" instead.
self._reverse_engineer_special_tokens("1", "2", model_name, tokenizer_kwargs)
def _reverse_engineer_special_tokens(
self,
token_a: str,
token_b: str,
model_name: str,
tokenizer_kwargs: Optional[Dict[str, Any]],
):
# storing the special tokens
self.sequence_pair_start_tokens = []
self.sequence_pair_mid_tokens = []
self.sequence_pair_end_tokens = []
# storing token type ids for the sequences
self.sequence_pair_first_token_type_id = None
self.sequence_pair_second_token_type_id = None
# storing the special tokens
self.single_sequence_start_tokens = []
self.single_sequence_end_tokens = []
# storing token type id for the sequence
self.single_sequence_token_type_id = None
# Reverse-engineer the tokenizer for two sequences
from allennlp.common import cached_transformers
tokenizer_with_special_tokens = cached_transformers.get_tokenizer(
model_name, add_special_tokens=True, **(tokenizer_kwargs or {})
)
dummy_output = tokenizer_with_special_tokens.encode_plus(
token_a,
token_b,
add_special_tokens=True,
return_token_type_ids=True,
return_attention_mask=False,
)
if len(dummy_output["token_type_ids"]) != len(dummy_output["input_ids"]):
logger.warning(
"Tokenizer library did not return valid token type ids. We will assume they are all zero."
)
dummy_output["token_type_ids"] = [0] * len(dummy_output["input_ids"])
dummy_a = self.tokenizer.encode(token_a, add_special_tokens=False)[0]
assert dummy_a in dummy_output["input_ids"]
dummy_b = self.tokenizer.encode(token_b, add_special_tokens=False)[0]
assert dummy_b in dummy_output["input_ids"]
assert dummy_a != dummy_b
seen_dummy_a = False
seen_dummy_b = False
for token_id, token_type_id in zip(
dummy_output["input_ids"], dummy_output["token_type_ids"]
):
if token_id == dummy_a:
if seen_dummy_a or seen_dummy_b: # seeing a twice or b before a
raise ValueError("Cannot auto-determine the number of special tokens added.")
seen_dummy_a = True
assert (
self.sequence_pair_first_token_type_id is None
or self.sequence_pair_first_token_type_id == token_type_id
), "multiple different token type ids found for the first sequence"
self.sequence_pair_first_token_type_id = token_type_id
continue
if token_id == dummy_b:
if seen_dummy_b: # seeing b twice
raise ValueError("Cannot auto-determine the number of special tokens added.")
seen_dummy_b = True
assert (
self.sequence_pair_second_token_type_id is None
or self.sequence_pair_second_token_type_id == token_type_id
), "multiple different token type ids found for the second sequence"
self.sequence_pair_second_token_type_id = token_type_id
continue
token = Token(
tokenizer_with_special_tokens.convert_ids_to_tokens(token_id),
text_id=token_id,
type_id=token_type_id,
)
if not seen_dummy_a:
self.sequence_pair_start_tokens.append(token)
elif not seen_dummy_b:
self.sequence_pair_mid_tokens.append(token)
else:
self.sequence_pair_end_tokens.append(token)
assert (
len(self.sequence_pair_start_tokens)
+ len(self.sequence_pair_mid_tokens)
+ len(self.sequence_pair_end_tokens)
) == self.tokenizer.num_special_tokens_to_add(pair=True)
# Reverse-engineer the tokenizer for one sequence
dummy_output = tokenizer_with_special_tokens.encode_plus(
token_a,
add_special_tokens=True,
return_token_type_ids=True,
return_attention_mask=False,
)
if len(dummy_output["token_type_ids"]) != len(dummy_output["input_ids"]):
logger.warning(
"Tokenizer library did not return valid token type ids. We will assume they are all zero."
)
dummy_output["token_type_ids"] = [0] * len(dummy_output["input_ids"])
seen_dummy_a = False
for token_id, token_type_id in zip(
dummy_output["input_ids"], dummy_output["token_type_ids"]
):
if token_id == dummy_a:
if seen_dummy_a:
raise ValueError("Cannot auto-determine the number of special tokens added.")
seen_dummy_a = True
assert (
self.single_sequence_token_type_id is None
or self.single_sequence_token_type_id == token_type_id
), "multiple different token type ids found for the sequence"
self.single_sequence_token_type_id = token_type_id
continue
token = Token(
tokenizer_with_special_tokens.convert_ids_to_tokens(token_id),
text_id=token_id,
type_id=token_type_id,
)
if not seen_dummy_a:
self.single_sequence_start_tokens.append(token)
else:
self.single_sequence_end_tokens.append(token)
assert (
len(self.single_sequence_start_tokens) + len(self.single_sequence_end_tokens)
) == self.tokenizer.num_special_tokens_to_add(pair=False)
@staticmethod
def tokenizer_lowercases(tokenizer: PreTrainedTokenizer) -> bool:
# Huggingface tokenizers have different ways of remembering whether they lowercase or not. Detecting it
# this way seems like the least brittle way to do it.
tokenized = tokenizer.tokenize(
"A"
) # Use a single character that won't be cut into word pieces.
detokenized = " ".join(tokenized)
return "a" in detokenized
@overrides
def tokenize(self, text: str) -> List[Token]:
"""
This method only handles a single sentence (or sequence) of text.
"""
max_length = self._max_length
if max_length is not None and not self._add_special_tokens:
max_length += self.num_special_tokens_for_sequence()
encoded_tokens = self.tokenizer.encode_plus(
text=text,
add_special_tokens=True,
max_length=max_length,
stride=self._stride,
return_tensors=None,
return_offsets_mapping=self.tokenizer.is_fast,
return_attention_mask=False,
return_token_type_ids=True,
return_special_tokens_mask=True,
)
# token_ids contains a final list with ids for both regular and special tokens
token_ids, token_type_ids, special_tokens_mask, token_offsets = (
encoded_tokens["input_ids"],
encoded_tokens["token_type_ids"],
encoded_tokens["special_tokens_mask"],
encoded_tokens.get("offset_mapping"),
)
# If we don't have token offsets, try to calculate them ourselves.
if token_offsets is None:
token_offsets = self._estimate_character_indices(text, token_ids)
tokens = []
for token_id, token_type_id, special_token_mask, offsets in zip(
token_ids, token_type_ids, special_tokens_mask, token_offsets
):
# In `special_tokens_mask`, 1s indicate special tokens and 0s indicate regular tokens.
# NOTE: in transformers v3.4.0 (and probably older versions) the docstring
# for `encode_plus` was incorrect as it had the 0s and 1s reversed.
# https://github.com/huggingface/transformers/pull/7949 fixed this.
if not self._add_special_tokens and special_token_mask == 1:
continue
if offsets is None or offsets[0] >= offsets[1]:
start = None
end = None
else:
start, end = offsets
tokens.append(
Token(
text=self.tokenizer.convert_ids_to_tokens(token_id, skip_special_tokens=False),
text_id=token_id,
type_id=token_type_id,
idx=start,
idx_end=end,
)
)
return tokens
def _estimate_character_indices(
self, text: str, token_ids: List[int]
) -> List[Optional[Tuple[int, int]]]:
"""
The huggingface tokenizers produce tokens that may or may not be slices from the
original text. Differences arise from lowercasing, Unicode normalization, and other
kinds of normalization, as well as special characters that are included to denote
various situations, such as "##" in BERT for word pieces from the middle of a word, or
"Ġ" in RoBERTa for the beginning of words not at the start of a sentence.
This code attempts to calculate character offsets while being tolerant to these
differences. It scans through the text and the tokens in parallel, trying to match up
positions in both. If it gets out of sync, it backs off to not adding any token
indices, and attempts to catch back up afterwards. This procedure is approximate.
Don't rely on precise results, especially in non-English languages that are far more
affected by Unicode normalization.
"""
token_texts = [
sanitize_wordpiece(t) for t in self.tokenizer.convert_ids_to_tokens(token_ids)
]
token_offsets: List[Optional[Tuple[int, int]]] = [None] * len(token_ids)
if self._tokenizer_lowercases:
text = text.lower()
token_texts = [t.lower() for t in token_texts]
min_allowed_skipped_whitespace = 3
allowed_skipped_whitespace = min_allowed_skipped_whitespace
text_index = 0
token_index = 0
while text_index < len(text) and token_index < len(token_ids):
token_text = token_texts[token_index]
token_start_index = text.find(token_text, text_index)
# Did we not find it at all?
if token_start_index < 0:
token_index += 1
# When we skip a token, we increase our tolerance, so we have a chance of catching back up.
allowed_skipped_whitespace += 1 + min_allowed_skipped_whitespace
continue
# Did we jump too far?
non_whitespace_chars_skipped = sum(
1 for c in text[text_index:token_start_index] if not c.isspace()
)
if non_whitespace_chars_skipped > allowed_skipped_whitespace:
# Too many skipped characters. Something is wrong. Ignore this token.
token_index += 1
# When we skip a token, we increase our tolerance, so we have a chance of catching back up.
allowed_skipped_whitespace += 1 + min_allowed_skipped_whitespace
continue
allowed_skipped_whitespace = min_allowed_skipped_whitespace
token_offsets[token_index] = (
token_start_index,
token_start_index + len(token_text),
)
text_index = token_start_index + len(token_text)
token_index += 1
return token_offsets
def _intra_word_tokenize(
self, string_tokens: List[str]
) -> Tuple[List[Token], List[Optional[Tuple[int, int]]]]:
tokens: List[Token] = []
offsets: List[Optional[Tuple[int, int]]] = []
for token_string in string_tokens:
wordpieces = self.tokenizer.encode_plus(
token_string,
add_special_tokens=False,
return_tensors=None,
return_offsets_mapping=False,
return_attention_mask=False,
)
wp_ids = wordpieces["input_ids"]
if len(wp_ids) > 0:
offsets.append((len(tokens), len(tokens) + len(wp_ids) - 1))
tokens.extend(
Token(text=wp_text, text_id=wp_id)
for wp_id, wp_text in zip(wp_ids, self.tokenizer.convert_ids_to_tokens(wp_ids))
)
else:
offsets.append(None)
return tokens, offsets
@staticmethod
def _increment_offsets(
offsets: Iterable[Optional[Tuple[int, int]]], increment: int
) -> List[Optional[Tuple[int, int]]]:
return [
None if offset is None else (offset[0] + increment, offset[1] + increment)
for offset in offsets
]
def intra_word_tokenize(
self, string_tokens: List[str]
) -> Tuple[List[Token], List[Optional[Tuple[int, int]]]]:
"""
Tokenizes each word into wordpieces separately and returns the wordpiece IDs.
Also calculates offsets such that tokens[offsets[i][0]:offsets[i][1] + 1]
corresponds to the original i-th token.
This function inserts special tokens.
"""
tokens, offsets = self._intra_word_tokenize(string_tokens)
tokens = self.add_special_tokens(tokens)
offsets = self._increment_offsets(offsets, len(self.single_sequence_start_tokens))
return tokens, offsets
def intra_word_tokenize_sentence_pair(
self, string_tokens_a: List[str], string_tokens_b: List[str]
) -> Tuple[List[Token], List[Optional[Tuple[int, int]]], List[Optional[Tuple[int, int]]]]:
"""
Tokenizes each word into wordpieces separately and returns the wordpiece IDs.
Also calculates offsets such that wordpieces[offsets[i][0]:offsets[i][1] + 1]
corresponds to the original i-th token.
This function inserts special tokens.
"""
tokens_a, offsets_a = self._intra_word_tokenize(string_tokens_a)
tokens_b, offsets_b = self._intra_word_tokenize(string_tokens_b)
offsets_b = self._increment_offsets(
offsets_b,
(
len(self.sequence_pair_start_tokens)
+ len(tokens_a)
+ len(self.sequence_pair_mid_tokens)
),
)
tokens_a = self.add_special_tokens(tokens_a, tokens_b)
offsets_a = self._increment_offsets(offsets_a, len(self.sequence_pair_start_tokens))
return tokens_a, offsets_a, offsets_b
def add_special_tokens(
self, tokens1: List[Token], tokens2: Optional[List[Token]] = None
) -> List[Token]:
def with_new_type_id(tokens: List[Token], type_id: int) -> List[Token]:
return [dataclasses.replace(t, type_id=type_id) for t in tokens]
# Make sure we don't change the input parameters
tokens2 = copy.deepcopy(tokens2)
# We add special tokens and also set token type ids.
import dataclasses
if tokens2 is None:
return (
self.single_sequence_start_tokens
+ with_new_type_id(tokens1, self.single_sequence_token_type_id) # type: ignore
+ self.single_sequence_end_tokens
)
else:
return (
self.sequence_pair_start_tokens
+ with_new_type_id(tokens1, self.sequence_pair_first_token_type_id) # type: ignore
+ self.sequence_pair_mid_tokens
+ with_new_type_id(tokens2, self.sequence_pair_second_token_type_id) # type: ignore
+ self.sequence_pair_end_tokens
)
def num_special_tokens_for_sequence(self) -> int:
return len(self.single_sequence_start_tokens) + len(self.single_sequence_end_tokens)
def num_special_tokens_for_pair(self) -> int:
return (
len(self.sequence_pair_start_tokens)
+ len(self.sequence_pair_mid_tokens)
+ len(self.sequence_pair_end_tokens)
)
| allennlp-master | allennlp/data/tokenizers/pretrained_transformer_tokenizer.py |
"""
This module contains various classes for performing
tokenization.
"""
from allennlp.data.tokenizers.token_class import Token
from allennlp.data.tokenizers.tokenizer import Tokenizer
from allennlp.data.tokenizers.spacy_tokenizer import SpacyTokenizer
from allennlp.data.tokenizers.letters_digits_tokenizer import LettersDigitsTokenizer
from allennlp.data.tokenizers.pretrained_transformer_tokenizer import PretrainedTransformerTokenizer
from allennlp.data.tokenizers.character_tokenizer import CharacterTokenizer
from allennlp.data.tokenizers.sentence_splitter import SentenceSplitter
from allennlp.data.tokenizers.whitespace_tokenizer import WhitespaceTokenizer
| allennlp-master | allennlp/data/tokenizers/__init__.py |
from typing import List, Optional
from overrides import overrides
import spacy
from spacy.tokens import Doc
from allennlp.common.util import get_spacy_model
from allennlp.data.tokenizers.token_class import Token
from allennlp.data.tokenizers.tokenizer import Tokenizer
@Tokenizer.register("spacy")
class SpacyTokenizer(Tokenizer):
"""
A `Tokenizer` that uses spaCy's tokenizer. It's fast and reasonable - this is the
recommended `Tokenizer`. By default it will return allennlp Tokens,
which are small, efficient NamedTuples (and are serializable). If you want
to keep the original spaCy tokens, pass keep_spacy_tokens=True. Note that we leave one particular piece of
post-processing for later: the decision of whether or not to lowercase the token. This is for
two reasons: (1) if you want to make two different casing decisions for whatever reason, you
won't have to run the tokenizer twice, and more importantly (2) if you want to lowercase words
for your word embedding, but retain capitalization in a character-level representation, we need
to retain the capitalization here.
Registered as a `Tokenizer` with name "spacy", which is currently the default.
# Parameters
language : `str`, optional, (default=`"en_core_web_sm"`)
Spacy model name.
pos_tags : `bool`, optional, (default=`False`)
If `True`, performs POS tagging with spacy model on the tokens.
Generally used in conjunction with :class:`~allennlp.data.token_indexers.pos_tag_indexer.PosTagIndexer`.
parse : `bool`, optional, (default=`False`)
If `True`, performs dependency parsing with spacy model on the tokens.
Generally used in conjunction with :class:`~allennlp.data.token_indexers.pos_tag_indexer.DepLabelIndexer`.
ner : `bool`, optional, (default=`False`)
If `True`, performs dependency parsing with spacy model on the tokens.
Generally used in conjunction with :class:`~allennlp.data.token_indexers.ner_tag_indexer.NerTagIndexer`.
keep_spacy_tokens : `bool`, optional, (default=`False`)
If `True`, will preserve spacy token objects, We copy spacy tokens into our own class by default instead
because spacy Cython Tokens can't be pickled.
split_on_spaces : `bool`, optional, (default=`False`)
If `True`, will split by spaces without performing tokenization.
Used when your data is already tokenized, but you want to perform pos, ner or parsing on the tokens.
start_tokens : `Optional[List[str]]`, optional, (default=`None`)
If given, these tokens will be added to the beginning of every string we tokenize.
end_tokens : `Optional[List[str]]`, optional, (default=`None`)
If given, these tokens will be added to the end of every string we tokenize.
"""
def __init__(
self,
language: str = "en_core_web_sm",
pos_tags: bool = False,
parse: bool = False,
ner: bool = False,
keep_spacy_tokens: bool = False,
split_on_spaces: bool = False,
start_tokens: Optional[List[str]] = None,
end_tokens: Optional[List[str]] = None,
) -> None:
self.spacy = get_spacy_model(language, pos_tags, parse, ner)
if split_on_spaces:
self.spacy.tokenizer = _WhitespaceSpacyTokenizer(self.spacy.vocab)
self._keep_spacy_tokens = keep_spacy_tokens
self._start_tokens = start_tokens or []
# We reverse the tokens here because we're going to insert them with `insert(0)` later;
# this makes sure they show up in the right order.
self._start_tokens.reverse()
self._end_tokens = end_tokens or []
def _sanitize(self, tokens: List[spacy.tokens.Token]) -> List[Token]:
"""
Converts spaCy tokens to allennlp tokens. Is a no-op if
keep_spacy_tokens is True
"""
if not self._keep_spacy_tokens:
tokens = [
Token(
token.text,
token.idx,
token.idx + len(token.text),
token.lemma_,
token.pos_,
token.tag_,
token.dep_,
token.ent_type_,
)
for token in tokens
]
for start_token in self._start_tokens:
tokens.insert(0, Token(start_token, 0))
for end_token in self._end_tokens:
tokens.append(Token(end_token, -1))
return tokens
@overrides
def batch_tokenize(self, texts: List[str]) -> List[List[Token]]:
return [
self._sanitize(_remove_spaces(tokens))
for tokens in self.spacy.pipe(texts, n_threads=-1)
]
@overrides
def tokenize(self, text: str) -> List[Token]:
# This works because our Token class matches spacy's.
return self._sanitize(_remove_spaces(self.spacy(text)))
class _WhitespaceSpacyTokenizer:
"""
Spacy doesn't assume that text is tokenised. Sometimes this
is annoying, like when you have gold data which is pre-tokenised,
but Spacy's tokenisation doesn't match the gold. This can be used
as follows:
nlp = spacy.load("en_core_web_md")
# hack to replace tokenizer with a whitespace tokenizer
nlp.tokenizer = _WhitespaceSpacyTokenizer(nlp.vocab)
... use nlp("here is some text") as normal.
"""
def __init__(self, vocab):
self.vocab = vocab
def __call__(self, text):
words = text.split(" ")
spaces = [True] * len(words)
return Doc(self.vocab, words=words, spaces=spaces)
def _remove_spaces(tokens: List[spacy.tokens.Token]) -> List[spacy.tokens.Token]:
return [token for token in tokens if not token.is_space]
| allennlp-master | allennlp/data/tokenizers/spacy_tokenizer.py |
from typing import List, Optional
import logging
from allennlp.common import Registrable
from allennlp.data.tokenizers.token_class import Token
logger = logging.getLogger(__name__)
class Tokenizer(Registrable):
"""
A `Tokenizer` splits strings of text into tokens. Typically, this either splits text into
word tokens or character tokens, and those are the two tokenizer subclasses we have implemented
here, though you could imagine wanting to do other kinds of tokenization for structured or
other inputs.
See the parameters to, e.g., :class:`~.SpacyTokenizer`, or whichever tokenizer
you want to use.
If the base input to your model is words, you should use a :class:`~.SpacyTokenizer`, even if
you also want to have a character-level encoder to get an additional vector for each word
token. Splitting word tokens into character arrays is handled separately, in the
:class:`..token_representations.TokenRepresentation` class.
"""
default_implementation = "spacy"
def batch_tokenize(self, texts: List[str]) -> List[List[Token]]:
"""
Batches together tokenization of several texts, in case that is faster for particular
tokenizers.
By default we just do this without batching. Override this in your tokenizer if you have a
good way of doing batched computation.
"""
return [self.tokenize(text) for text in texts]
def tokenize(self, text: str) -> List[Token]:
"""
Actually implements splitting words into tokens.
# Returns
tokens : `List[Token]`
"""
raise NotImplementedError
def add_special_tokens(
self, tokens1: List[Token], tokens2: Optional[List[Token]] = None
) -> List[Token]:
"""
Adds special tokens to tokenized text. These are tokens like [CLS] or [SEP].
Not all tokenizers do this. The default is to just return the tokens unchanged.
# Parameters
tokens1 : `List[Token]`
The list of tokens to add special tokens to.
tokens2 : `Optional[List[Token]]`
An optional second list of tokens. This will be concatenated with `tokens1`. Special tokens will be
added as appropriate.
# Returns
tokens : `List[Token]`
The combined list of tokens, with special tokens added.
"""
return tokens1 + (tokens2 or [])
def num_special_tokens_for_sequence(self) -> int:
"""
Returns the number of special tokens added for a single sequence.
"""
return 0
def num_special_tokens_for_pair(self) -> int:
"""
Returns the number of special tokens added for a pair of sequences.
"""
return 0
| allennlp-master | allennlp/data/tokenizers/tokenizer.py |
from typing import List
from overrides import overrides
from allennlp.data.tokenizers.token_class import Token
from allennlp.data.tokenizers.tokenizer import Tokenizer
@Tokenizer.register("whitespace")
@Tokenizer.register("just_spaces")
class WhitespaceTokenizer(Tokenizer):
"""
A `Tokenizer` that assumes you've already done your own tokenization somehow and have
separated the tokens by spaces. We just split the input string on whitespace and return the
resulting list.
Note that we use `text.split()`, which means that the amount of whitespace between the
tokens does not matter. This will never result in spaces being included as tokens.
Registered as a `Tokenizer` with name "whitespace" and "just_spaces".
"""
@overrides
def tokenize(self, text: str) -> List[Token]:
return [Token(t) for t in text.split()]
| allennlp-master | allennlp/data/tokenizers/whitespace_tokenizer.py |
import re
from typing import List
from overrides import overrides
from allennlp.data.tokenizers.token_class import Token
from allennlp.data.tokenizers.tokenizer import Tokenizer
@Tokenizer.register("letters_digits")
class LettersDigitsTokenizer(Tokenizer):
"""
A `Tokenizer` which keeps runs of (unicode) letters and runs of digits together, while
every other non-whitespace character becomes a separate word.
Registered as a `Tokenizer` with name "letters_digits".
"""
@overrides
def tokenize(self, text: str) -> List[Token]:
# We use the [^\W\d_] pattern as a trick to match unicode letters
tokens = [Token(m.group(), idx=m.start()) for m in re.finditer(r"[^\W\d_]+|\d+|\S", text)]
return tokens
| allennlp-master | allennlp/data/tokenizers/letters_digits_tokenizer.py |
from typing import List, Union
from overrides import overrides
from allennlp.data.tokenizers.token_class import Token
from allennlp.data.tokenizers.tokenizer import Tokenizer
@Tokenizer.register("character")
class CharacterTokenizer(Tokenizer):
"""
A `CharacterTokenizer` splits strings into character tokens.
Registered as a `Tokenizer` with name "character".
# Parameters
byte_encoding : `str`, optional (default=`None`)
If not `None`, we will use this encoding to encode the string as bytes, and use the byte
sequence as characters, instead of the unicode characters in the python string. E.g., the
character 'á' would be a single token if this option is `None`, but it would be two
tokens if this option is set to `"utf-8"`.
If this is not `None`, `tokenize` will return a `List[int]` instead of a
`List[str]`, and we will bypass the vocabulary in the `TokenIndexer`.
lowercase_characters : `bool`, optional (default=`False`)
If `True`, we will lowercase all of the characters in the text before doing any other
operation. You probably do not want to do this, as character vocabularies are generally
not very large to begin with, but it's an option if you really want it.
start_tokens : `List[str]`, optional
If given, these tokens will be added to the beginning of every string we tokenize. If
using byte encoding, this should actually be a `List[int]`, not a `List[str]`.
end_tokens : `List[str]`, optional
If given, these tokens will be added to the end of every string we tokenize. If using byte
encoding, this should actually be a `List[int]`, not a `List[str]`.
"""
def __init__(
self,
byte_encoding: str = None,
lowercase_characters: bool = False,
start_tokens: List[Union[str, int]] = None,
end_tokens: List[Union[str, int]] = None,
) -> None:
# TODO(brendanr): Add length truncation.
self._byte_encoding = byte_encoding
self._lowercase_characters = lowercase_characters
self._start_tokens = start_tokens or []
# We reverse the tokens here because we're going to insert them with `insert(0)` later;
# this makes sure they show up in the right order.
self._start_tokens.reverse()
self._end_tokens = end_tokens or []
@overrides
def tokenize(self, text: str) -> List[Token]:
if self._lowercase_characters:
text = text.lower()
if self._byte_encoding is not None:
# We add 1 here so that we can still use 0 for masking, no matter what bytes we get out
# of this.
tokens = [Token(text_id=c + 1) for c in text.encode(self._byte_encoding)]
else:
tokens = [Token(t) for t in list(text)]
for start_token in self._start_tokens:
if isinstance(start_token, int):
token = Token(text_id=start_token, idx=0)
else:
token = Token(text=start_token, idx=0)
tokens.insert(0, token)
for end_token in self._end_tokens:
if isinstance(end_token, int):
token = Token(text_id=end_token, idx=0)
else:
token = Token(text=end_token, idx=0)
tokens.append(token)
return tokens
def __eq__(self, other) -> bool:
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
| allennlp-master | allennlp/data/tokenizers/character_tokenizer.py |
from dataclasses import dataclass
from typing import Optional
@dataclass(init=False, repr=False)
class Token:
"""
A simple token representation, keeping track of the token's text, offset in the passage it was
taken from, POS tag, dependency relation, and similar information. These fields match spacy's
exactly, so we can just use a spacy token for this.
# Parameters
text : `str`, optional
The original text represented by this token.
idx : `int`, optional
The character offset of this token into the tokenized passage.
idx_end : `int`, optional
The character offset one past the last character in the tokenized passage.
lemma_ : `str`, optional
The lemma of this token.
pos_ : `str`, optional
The coarse-grained part of speech of this token.
tag_ : `str`, optional
The fine-grained part of speech of this token.
dep_ : `str`, optional
The dependency relation for this token.
ent_type_ : `str`, optional
The entity type (i.e., the NER tag) for this token.
text_id : `int`, optional
If your tokenizer returns integers instead of strings (e.g., because you're doing byte
encoding, or some hash-based embedding), set this with the integer. If this is set, we
will bypass the vocabulary when indexing this token, regardless of whether `text` is also
set. You can `also` set `text` with the original text, if you want, so that you can
still use a character-level representation in addition to a hash-based word embedding.
type_id : `int`, optional
Token type id used by some pretrained language models like original BERT
The other fields on `Token` follow the fields on spacy's `Token` object; this is one we
added, similar to spacy's `lex_id`.
"""
__slots__ = [
"text",
"idx",
"idx_end",
"lemma_",
"pos_",
"tag_",
"dep_",
"ent_type_",
"text_id",
"type_id",
]
# Defining the `__slots__` of this class is an optimization that dramatically reduces
# the size in memory of a `Token` instance. The downside of using `__slots__`
# with a dataclass is that you can't assign default values at the class level,
# which is why we need a custom `__init__` function that provides the default values.
text: Optional[str]
idx: Optional[int]
idx_end: Optional[int]
lemma_: Optional[str]
pos_: Optional[str]
tag_: Optional[str]
dep_: Optional[str]
ent_type_: Optional[str]
text_id: Optional[int]
type_id: Optional[int]
def __init__(
self,
text: str = None,
idx: int = None,
idx_end: int = None,
lemma_: str = None,
pos_: str = None,
tag_: str = None,
dep_: str = None,
ent_type_: str = None,
text_id: int = None,
type_id: int = None,
) -> None:
assert text is None or isinstance(
text, str
) # Some very hard to debug errors happen when this is not true.
self.text = text
self.idx = idx
self.idx_end = idx_end
self.lemma_ = lemma_
self.pos_ = pos_
self.tag_ = tag_
self.dep_ = dep_
self.ent_type_ = ent_type_
self.text_id = text_id
self.type_id = type_id
def __str__(self):
return self.text
def __repr__(self):
return self.__str__()
def ensure_text(self) -> str:
"""
Return the `text` field, raising an exception if it's `None`.
"""
if self.text is None:
raise ValueError("Unexpected null text for token")
else:
return self.text
def show_token(token: Token) -> str:
return (
f"{token.text} "
f"(idx: {token.idx}) "
f"(idx_end: {token.idx_end}) "
f"(lemma: {token.lemma_}) "
f"(pos: {token.pos_}) "
f"(tag: {token.tag_}) "
f"(dep: {token.dep_}) "
f"(ent_type: {token.ent_type_}) "
f"(text_id: {token.text_id}) "
f"(type_id: {token.type_id}) "
)
| allennlp-master | allennlp/data/tokenizers/token_class.py |
from typing import Dict, List, Sequence, Iterable
import itertools
import logging
from overrides import overrides
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.dataset_readers.dataset_utils import to_bioul
from allennlp.data.fields import TextField, SequenceLabelField, Field, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
logger = logging.getLogger(__name__)
def _is_divider(line: str) -> bool:
empty_line = line.strip() == ""
if empty_line:
return True
else:
first_token = line.split()[0]
if first_token == "-DOCSTART-":
return True
else:
return False
@DatasetReader.register("conll2003")
class Conll2003DatasetReader(DatasetReader):
"""
Reads instances from a pretokenised file where each line is in the following format:
```
WORD POS-TAG CHUNK-TAG NER-TAG
```
with a blank line indicating the end of each sentence
and `-DOCSTART- -X- -X- O` indicating the end of each article,
and converts it into a `Dataset` suitable for sequence tagging.
Each `Instance` contains the words in the `"tokens"` `TextField`.
The values corresponding to the `tag_label`
values will get loaded into the `"tags"` `SequenceLabelField`.
And if you specify any `feature_labels` (you probably shouldn't),
the corresponding values will get loaded into their own `SequenceLabelField` s.
This dataset reader ignores the "article" divisions and simply treats
each sentence as an independent `Instance`. (Technically the reader splits sentences
on any combination of blank lines and "DOCSTART" tags; in particular, it does the right
thing on well formed inputs.)
Registered as a `DatasetReader` with name "conll2003".
# Parameters
token_indexers : `Dict[str, TokenIndexer]`, optional (default=`{"tokens": SingleIdTokenIndexer()}`)
We use this to define the input representation for the text. See :class:`TokenIndexer`.
tag_label : `str`, optional (default=`ner`)
Specify `ner`, `pos`, or `chunk` to have that tag loaded into the instance field `tag`.
feature_labels : `Sequence[str]`, optional (default=`()`)
These labels will be loaded as features into the corresponding instance fields:
`pos` -> `pos_tags`, `chunk` -> `chunk_tags`, `ner` -> `ner_tags`
Each will have its own namespace : `pos_tags`, `chunk_tags`, `ner_tags`.
If you want to use one of the tags as a `feature` in your model, it should be
specified here.
coding_scheme : `str`, optional (default=`IOB1`)
Specifies the coding scheme for `ner_labels` and `chunk_labels`.
Valid options are `IOB1` and `BIOUL`. The `IOB1` default maintains
the original IOB1 scheme in the CoNLL 2003 NER data.
In the IOB1 scheme, I is a token inside a span, O is a token outside
a span and B is the beginning of span immediately following another
span of the same type.
label_namespace : `str`, optional (default=`labels`)
Specifies the namespace for the chosen `tag_label`.
"""
_VALID_LABELS = {"ner", "pos", "chunk"}
def __init__(
self,
token_indexers: Dict[str, TokenIndexer] = None,
tag_label: str = "ner",
feature_labels: Sequence[str] = (),
coding_scheme: str = "IOB1",
label_namespace: str = "labels",
**kwargs,
) -> None:
super().__init__(**kwargs)
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
if tag_label is not None and tag_label not in self._VALID_LABELS:
raise ConfigurationError("unknown tag label type: {}".format(tag_label))
for label in feature_labels:
if label not in self._VALID_LABELS:
raise ConfigurationError("unknown feature label type: {}".format(label))
if coding_scheme not in ("IOB1", "BIOUL"):
raise ConfigurationError("unknown coding_scheme: {}".format(coding_scheme))
self.tag_label = tag_label
self.feature_labels = set(feature_labels)
self.coding_scheme = coding_scheme
self.label_namespace = label_namespace
self._original_coding_scheme = "IOB1"
@overrides
def _read(self, file_path: str) -> Iterable[Instance]:
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
with open(file_path, "r") as data_file:
logger.info("Reading instances from lines in file at: %s", file_path)
# Group into alternative divider / sentence chunks.
for is_divider, lines in itertools.groupby(data_file, _is_divider):
# Ignore the divider chunks, so that `lines` corresponds to the words
# of a single sentence.
if not is_divider:
fields = [line.strip().split() for line in lines]
# unzipping trick returns tuples, but our Fields need lists
fields = [list(field) for field in zip(*fields)]
tokens_, pos_tags, chunk_tags, ner_tags = fields
# TextField requires `Token` objects
tokens = [Token(token) for token in tokens_]
yield self.text_to_instance(tokens, pos_tags, chunk_tags, ner_tags)
def text_to_instance( # type: ignore
self,
tokens: List[Token],
pos_tags: List[str] = None,
chunk_tags: List[str] = None,
ner_tags: List[str] = None,
) -> Instance:
"""
We take `pre-tokenized` input here, because we don't have a tokenizer in this class.
"""
sequence = TextField(tokens, self._token_indexers)
instance_fields: Dict[str, Field] = {"tokens": sequence}
instance_fields["metadata"] = MetadataField({"words": [x.text for x in tokens]})
# Recode the labels if necessary.
if self.coding_scheme == "BIOUL":
coded_chunks = (
to_bioul(chunk_tags, encoding=self._original_coding_scheme)
if chunk_tags is not None
else None
)
coded_ner = (
to_bioul(ner_tags, encoding=self._original_coding_scheme)
if ner_tags is not None
else None
)
else:
# the default IOB1
coded_chunks = chunk_tags
coded_ner = ner_tags
# Add "feature labels" to instance
if "pos" in self.feature_labels:
if pos_tags is None:
raise ConfigurationError(
"Dataset reader was specified to use pos_tags as "
"features. Pass them to text_to_instance."
)
instance_fields["pos_tags"] = SequenceLabelField(pos_tags, sequence, "pos_tags")
if "chunk" in self.feature_labels:
if coded_chunks is None:
raise ConfigurationError(
"Dataset reader was specified to use chunk tags as "
"features. Pass them to text_to_instance."
)
instance_fields["chunk_tags"] = SequenceLabelField(coded_chunks, sequence, "chunk_tags")
if "ner" in self.feature_labels:
if coded_ner is None:
raise ConfigurationError(
"Dataset reader was specified to use NER tags as "
" features. Pass them to text_to_instance."
)
instance_fields["ner_tags"] = SequenceLabelField(coded_ner, sequence, "ner_tags")
# Add "tag label" to instance
if self.tag_label == "ner" and coded_ner is not None:
instance_fields["tags"] = SequenceLabelField(coded_ner, sequence, self.label_namespace)
elif self.tag_label == "pos" and pos_tags is not None:
instance_fields["tags"] = SequenceLabelField(pos_tags, sequence, self.label_namespace)
elif self.tag_label == "chunk" and coded_chunks is not None:
instance_fields["tags"] = SequenceLabelField(
coded_chunks, sequence, self.label_namespace
)
return Instance(instance_fields)
| allennlp-master | allennlp/data/dataset_readers/conll2003.py |
from typing import Dict, Mapping, Iterable
import json
from allennlp.common.checks import ConfigurationError
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import MetadataField
from allennlp.data.instance import Instance
_VALID_SCHEMES = {"round_robin", "all_at_once"}
@DatasetReader.register("interleaving")
class InterleavingDatasetReader(DatasetReader):
"""
A `DatasetReader` that wraps multiple other dataset readers,
and interleaves their instances, adding a `MetadataField` to
indicate the provenance of each instance.
Unlike most of our other dataset readers, here the `file_path` passed into
`read()` should be a JSON-serialized dictionary with one file_path
per wrapped dataset reader (and with corresponding keys).
Registered as a `DatasetReader` with name "interleaving".
# Parameters
readers : `Dict[str, DatasetReader]`
The dataset readers to wrap. The keys of this dictionary will be used
as the values in the MetadataField indicating provenance.
dataset_field_name : `str`, optional (default = `"dataset"`)
The name of the MetadataField indicating which dataset an instance came from.
scheme : `str`, optional (default = `"round_robin"`)
Indicates how to interleave instances. Currently the two options are "round_robin",
which repeatedly cycles through the datasets grabbing one instance from each;
and "all_at_once", which yields all the instances from the first dataset,
then all the instances from the second dataset, and so on. You could imagine also
implementing some sort of over- or under-sampling, although hasn't been done.
"""
def __init__(
self,
readers: Dict[str, DatasetReader],
dataset_field_name: str = "dataset",
scheme: str = "round_robin",
**kwargs,
) -> None:
super().__init__(**kwargs)
self._readers = readers
self._dataset_field_name = dataset_field_name
if scheme not in _VALID_SCHEMES:
raise ConfigurationError(f"invalid scheme: {scheme}")
self._scheme = scheme
def _read_round_robin(self, datasets: Mapping[str, Iterable[Instance]]) -> Iterable[Instance]:
remaining = set(datasets)
dataset_iterators = {key: iter(dataset) for key, dataset in datasets.items()}
while remaining:
for key, dataset in dataset_iterators.items():
if key in remaining:
try:
instance = next(dataset)
instance.fields[self._dataset_field_name] = MetadataField(key)
yield instance
except StopIteration:
remaining.remove(key)
def _read_all_at_once(self, datasets: Mapping[str, Iterable[Instance]]) -> Iterable[Instance]:
for key, dataset in datasets.items():
for instance in dataset:
instance.fields[self._dataset_field_name] = MetadataField(key)
yield instance
def _read(self, file_path: str) -> Iterable[Instance]:
try:
file_paths = json.loads(file_path)
except json.JSONDecodeError:
raise ConfigurationError(
"the file_path for the InterleavingDatasetReader "
"needs to be a JSON-serialized dictionary {reader_name -> file_path}"
)
if file_paths.keys() != self._readers.keys():
raise ConfigurationError("mismatched keys")
# Load datasets
datasets = {key: reader.read(file_paths[key]) for key, reader in self._readers.items()}
if self._scheme == "round_robin":
yield from self._read_round_robin(datasets)
elif self._scheme == "all_at_once":
yield from self._read_all_at_once(datasets)
else:
raise RuntimeError("impossible to get here")
def text_to_instance(self) -> Instance: # type: ignore
raise RuntimeError("text_to_instance doesn't make sense here")
| allennlp-master | allennlp/data/dataset_readers/interleaving_dataset_reader.py |
import logging
from typing import Dict, List
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.fields import Field, TextField, ListField, IndexField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
logger = logging.getLogger(__name__)
@DatasetReader.register("babi")
class BabiReader(DatasetReader):
"""
Reads one single task in the bAbI tasks format as formulated in
Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks
(https://arxiv.org/abs/1502.05698). Since this class handle a single file,
if one wants to load multiple tasks together it has to merge them into a
single file and use this reader.
Registered as a `DatasetReader` with name "babi".
# Parameters
keep_sentences : `bool`, optional, (default = `False`)
Whether to keep each sentence in the context or to concatenate them.
Default is `False` that corresponds to concatenation.
token_indexers : `Dict[str, TokenIndexer]`, optional (default=`{"tokens": SingleIdTokenIndexer()}`)
We use this to define the input representation for the text. See :class:`TokenIndexer`.
"""
def __init__(
self,
keep_sentences: bool = False,
token_indexers: Dict[str, TokenIndexer] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._keep_sentences = keep_sentences
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
@overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
logger.info("Reading file at %s", file_path)
with open(file_path) as dataset_file:
dataset = dataset_file.readlines()
logger.info("Reading the dataset")
context: List[List[str]] = [[]]
for line in dataset:
if "?" in line:
question_str, answer, supports_str = line.replace("?", " ?").split("\t")
question = question_str.split()[1:]
supports = [int(support) - 1 for support in supports_str.split()]
yield self.text_to_instance(context, question, answer, supports)
else:
new_entry = line.replace(".", " .").split()[1:]
if line[0] == "1":
context = [new_entry]
else:
context.append(new_entry)
@overrides
def text_to_instance(
self, # type: ignore
context: List[List[str]],
question: List[str],
answer: str,
supports: List[int],
) -> Instance:
fields: Dict[str, Field] = {}
if self._keep_sentences:
context_field_ks = ListField(
[
TextField([Token(word) for word in line], self._token_indexers)
for line in context
]
)
fields["supports"] = ListField(
[IndexField(support, context_field_ks) for support in supports]
)
else:
context_field = TextField(
[Token(word) for line in context for word in line], self._token_indexers
)
fields["context"] = context_field_ks if self._keep_sentences else context_field
fields["question"] = TextField([Token(word) for word in question], self._token_indexers)
fields["answer"] = TextField([Token(answer)], self._token_indexers)
return Instance(fields)
| allennlp-master | allennlp/data/dataset_readers/babi.py |
from typing import Dict, List
import logging
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import TextField, SequenceLabelField, MetadataField, Field
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
logger = logging.getLogger(__name__)
DEFAULT_WORD_TAG_DELIMITER = "###"
@DatasetReader.register("sequence_tagging")
class SequenceTaggingDatasetReader(DatasetReader):
"""
Reads instances from a pretokenised file where each line is in the following format:
```
WORD###TAG [TAB] WORD###TAG [TAB] ..... \n
```
and converts it into a `Dataset` suitable for sequence tagging. You can also specify
alternative delimiters in the constructor.
Registered as a `DatasetReader` with name "sequence_tagging".
# Parameters
word_tag_delimiter: `str`, optional (default=`"###"`)
The text that separates each WORD from its TAG.
token_delimiter: `str`, optional (default=`None`)
The text that separates each WORD-TAG pair from the next pair. If `None`
then the line will just be split on whitespace.
token_indexers : `Dict[str, TokenIndexer]`, optional (default=`{"tokens": SingleIdTokenIndexer()}`)
We use this to define the input representation for the text. See :class:`TokenIndexer`.
Note that the `output` tags will always correspond to single token IDs based on how they
are pre-tokenised in the data file.
"""
def __init__(
self,
word_tag_delimiter: str = DEFAULT_WORD_TAG_DELIMITER,
token_delimiter: str = None,
token_indexers: Dict[str, TokenIndexer] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self._word_tag_delimiter = word_tag_delimiter
self._token_delimiter = token_delimiter
@overrides
def _read(self, file_path):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
with open(file_path, "r") as data_file:
logger.info("Reading instances from lines in file at: %s", file_path)
for line in data_file:
line = line.strip("\n")
# skip blank lines
if not line:
continue
tokens_and_tags = [
pair.rsplit(self._word_tag_delimiter, 1)
for pair in line.split(self._token_delimiter)
]
tokens = [Token(token) for token, tag in tokens_and_tags]
tags = [tag for token, tag in tokens_and_tags]
yield self.text_to_instance(tokens, tags)
def text_to_instance( # type: ignore
self, tokens: List[Token], tags: List[str] = None
) -> Instance:
"""
We take `pre-tokenized` input here, because we don't have a tokenizer in this class.
"""
fields: Dict[str, Field] = {}
sequence = TextField(tokens, self._token_indexers)
fields["tokens"] = sequence
fields["metadata"] = MetadataField({"words": [x.text for x in tokens]})
if tags is not None:
fields["tags"] = SequenceLabelField(tags, sequence)
return Instance(fields)
| allennlp-master | allennlp/data/dataset_readers/sequence_tagging.py |
"""
A :class:`~allennlp.data.dataset_readers.dataset_reader.DatasetReader`
reads a file and converts it to a collection of
:class:`~allennlp.data.instance.Instance` s.
The various subclasses know how to read specific filetypes
and produce datasets in the formats required by specific models.
"""
from allennlp.data.dataset_readers.conll2003 import Conll2003DatasetReader
from allennlp.data.dataset_readers.dataset_reader import (
DatasetReader,
AllennlpDataset,
AllennlpLazyDataset,
)
from allennlp.data.dataset_readers.interleaving_dataset_reader import InterleavingDatasetReader
from allennlp.data.dataset_readers.sequence_tagging import SequenceTaggingDatasetReader
from allennlp.data.dataset_readers.sharded_dataset_reader import ShardedDatasetReader
from allennlp.data.dataset_readers.babi import BabiReader
from allennlp.data.dataset_readers.text_classification_json import TextClassificationJsonReader
| allennlp-master | allennlp/data/dataset_readers/__init__.py |
from typing import Dict, List, Union
import logging
import json
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import LabelField, TextField, Field, ListField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Tokenizer, SpacyTokenizer
from allennlp.data.tokenizers.sentence_splitter import SpacySentenceSplitter
logger = logging.getLogger(__name__)
@DatasetReader.register("text_classification_json")
class TextClassificationJsonReader(DatasetReader):
"""
Reads tokens and their labels from a labeled text classification dataset.
Expects a "text" field and a "label" field in JSON format.
The output of `read` is a list of `Instance` s with the fields:
tokens : `TextField` and
label : `LabelField`
Registered as a `DatasetReader` with name "text_classification_json".
[0]: https://www.cs.cmu.edu/~hovy/papers/16HLT-hierarchical-attention-networks.pdf
# Parameters
token_indexers : `Dict[str, TokenIndexer]`, optional
optional (default=`{"tokens": SingleIdTokenIndexer()}`)
We use this to define the input representation for the text.
See :class:`TokenIndexer`.
tokenizer : `Tokenizer`, optional (default = `{"tokens": SpacyTokenizer()}`)
Tokenizer to use to split the input text into words or other kinds of tokens.
segment_sentences : `bool`, optional (default = `False`)
If True, we will first segment the text into sentences using SpaCy and then tokenize words.
Necessary for some models that require pre-segmentation of sentences, like [the Hierarchical
Attention Network][0].
max_sequence_length : `int`, optional (default = `None`)
If specified, will truncate tokens to specified maximum length.
skip_label_indexing : `bool`, optional (default = `False`)
Whether or not to skip label indexing. You might want to skip label indexing if your
labels are numbers, so the dataset reader doesn't re-number them starting from 0.
"""
def __init__(
self,
token_indexers: Dict[str, TokenIndexer] = None,
tokenizer: Tokenizer = None,
segment_sentences: bool = False,
max_sequence_length: int = None,
skip_label_indexing: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._tokenizer = tokenizer or SpacyTokenizer()
self._segment_sentences = segment_sentences
self._max_sequence_length = max_sequence_length
self._skip_label_indexing = skip_label_indexing
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
if self._segment_sentences:
self._sentence_segmenter = SpacySentenceSplitter()
@overrides
def _read(self, file_path):
with open(cached_path(file_path), "r") as data_file:
for line in data_file.readlines():
if not line:
continue
items = json.loads(line)
text = items["text"]
label = items.get("label")
if label is not None:
if self._skip_label_indexing:
try:
label = int(label)
except ValueError:
raise ValueError(
"Labels must be integers if skip_label_indexing is True."
)
else:
label = str(label)
instance = self.text_to_instance(text=text, label=label)
if instance is not None:
yield instance
def _truncate(self, tokens):
"""
truncate a set of tokens using the provided sequence length
"""
if len(tokens) > self._max_sequence_length:
tokens = tokens[: self._max_sequence_length]
return tokens
@overrides
def text_to_instance(
self, text: str, label: Union[str, int] = None
) -> Instance: # type: ignore
"""
# Parameters
text : `str`, required.
The text to classify
label : `str`, optional, (default = `None`).
The label for this text.
# Returns
An `Instance` containing the following fields:
- tokens (`TextField`) :
The tokens in the sentence or phrase.
- label (`LabelField`) :
The label label of the sentence or phrase.
"""
fields: Dict[str, Field] = {}
if self._segment_sentences:
sentences: List[Field] = []
sentence_splits = self._sentence_segmenter.split_sentences(text)
for sentence in sentence_splits:
word_tokens = self._tokenizer.tokenize(sentence)
if self._max_sequence_length is not None:
word_tokens = self._truncate(word_tokens)
sentences.append(TextField(word_tokens, self._token_indexers))
fields["tokens"] = ListField(sentences)
else:
tokens = self._tokenizer.tokenize(text)
if self._max_sequence_length is not None:
tokens = self._truncate(tokens)
fields["tokens"] = TextField(tokens, self._token_indexers)
if label is not None:
fields["label"] = LabelField(label, skip_indexing=self._skip_label_indexing)
return Instance(fields)
| allennlp-master | allennlp/data/dataset_readers/text_classification_json.py |