|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from collections import OrderedDict |
|
from typing import Mapping |
|
|
|
from transformers.configuration_utils import PretrainedConfig |
|
from transformers.onnx import OnnxConfig |
|
|
|
|
|
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { |
|
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json", |
|
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json", |
|
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json", |
|
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json", |
|
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json", |
|
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json", |
|
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json", |
|
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json", |
|
"bert-large-uncased-whole-word-masking": "https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json", |
|
"bert-large-cased-whole-word-masking": "https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json", |
|
"bert-large-uncased-whole-word-masking-finetuned-squad": "https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json", |
|
"bert-large-cased-whole-word-masking-finetuned-squad": "https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json", |
|
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json", |
|
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json", |
|
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json", |
|
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json", |
|
"cl-tohoku/bert-base-japanese-whole-word-masking": "https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json", |
|
"cl-tohoku/bert-base-japanese-char": "https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json", |
|
"cl-tohoku/bert-base-japanese-char-whole-word-masking": "https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json", |
|
"TurkuNLP/bert-base-finnish-cased-v1": "https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json", |
|
"TurkuNLP/bert-base-finnish-uncased-v1": "https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json", |
|
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json", |
|
|
|
} |
|
|
|
|
|
class BertConfig(PretrainedConfig): |
|
r""" |
|
This is the configuration class to store the configuration of a [`BertModel`] or a |
|
[`TFBertModel`]. It is used to instantiate a BERT model according to the specified arguments, |
|
defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration |
|
to that of the BERT [bert-base-uncased](https://huggingface.co/bert-base-uncased) architecture. |
|
|
|
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model |
|
outputs. Read the documentation from [`PretrainedConfig`] for more information. |
|
|
|
|
|
Args: |
|
vocab_size (`int`, *optional*, defaults to 30522): |
|
Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the |
|
`inputs_ids` passed when calling [`BertModel`] or |
|
[`TFBertModel`]. |
|
hidden_size (`int`, *optional*, defaults to 768): |
|
Dimensionality of the encoder layers and the pooler layer. |
|
num_hidden_layers (`int`, *optional*, defaults to 12): |
|
Number of hidden layers in the Transformer encoder. |
|
num_attention_heads (`int`, *optional*, defaults to 12): |
|
Number of attention heads for each attention layer in the Transformer encoder. |
|
intermediate_size (`int`, *optional*, defaults to 3072): |
|
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. |
|
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): |
|
The non-linear activation function (function or string) in the encoder and pooler. If string, |
|
`"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. |
|
hidden_dropout_prob (`float`, *optional*, defaults to 0.1): |
|
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. |
|
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): |
|
The dropout ratio for the attention probabilities. |
|
max_position_embeddings (`int`, *optional*, defaults to 512): |
|
The maximum sequence length that this model might ever be used with. Typically set this to something large |
|
just in case (e.g., 512 or 1024 or 2048). |
|
type_vocab_size (`int`, *optional*, defaults to 2): |
|
The vocabulary size of the `token_type_ids` passed when calling [`BertModel`] or |
|
[`TFBertModel`]. |
|
initializer_range (`float`, *optional*, defaults to 0.02): |
|
The standard deviation of the truncated_normal_initializer for initializing all weight matrices. |
|
layer_norm_eps (`float`, *optional*, defaults to 1e-12): |
|
The epsilon used by the layer normalization layers. |
|
position_embedding_type (`str`, *optional*, defaults to `"absolute"`): |
|
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, |
|
`"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on |
|
`"relative_key"`, please refer to [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). For more information on `"relative_key_query"`, please refer to |
|
*Method 4* in [Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). |
|
use_cache (`bool`, *optional*, defaults to `True`): |
|
Whether or not the model should return the last key/values attentions (not used by all models). Only |
|
relevant if `config.is_decoder=True`. |
|
classifier_dropout (`float`, *optional*): |
|
The dropout ratio for the classification head. |
|
|
|
Examples: |
|
|
|
```python |
|
>>> from transformers import BertModel, BertConfig |
|
|
|
>>> # Initializing a BERT bert-base-uncased style configuration |
|
>>> configuration = BertConfig() |
|
|
|
>>> # Initializing a model from the bert-base-uncased style configuration |
|
>>> model = BertModel(configuration) |
|
|
|
>>> # Accessing the model configuration |
|
>>> configuration = model.config |
|
```""" |
|
model_type = "bert" |
|
|
|
def __init__( |
|
self, |
|
vocab_size=30522, |
|
hidden_size=768, |
|
num_hidden_layers=12, |
|
num_attention_heads=12, |
|
intermediate_size=3072, |
|
hidden_act="gelu", |
|
hidden_dropout_prob=0.1, |
|
attention_probs_dropout_prob=0.1, |
|
max_position_embeddings=512, |
|
type_vocab_size=2, |
|
initializer_range=0.02, |
|
layer_norm_eps=1e-12, |
|
pad_token_id=0, |
|
position_embedding_type="absolute", |
|
use_cache=True, |
|
classifier_dropout=None, |
|
token_keep_rate=1, |
|
token_keep_strategy='cls_attn', |
|
token_drop_loc=[9], |
|
**kwargs |
|
): |
|
super().__init__(pad_token_id=pad_token_id, **kwargs) |
|
|
|
self.vocab_size = vocab_size |
|
self.hidden_size = hidden_size |
|
self.num_hidden_layers = num_hidden_layers |
|
self.num_attention_heads = num_attention_heads |
|
self.hidden_act = hidden_act |
|
self.intermediate_size = intermediate_size |
|
self.hidden_dropout_prob = hidden_dropout_prob |
|
self.attention_probs_dropout_prob = attention_probs_dropout_prob |
|
self.max_position_embeddings = max_position_embeddings |
|
self.type_vocab_size = type_vocab_size |
|
self.initializer_range = initializer_range |
|
self.layer_norm_eps = layer_norm_eps |
|
self.position_embedding_type = position_embedding_type |
|
self.use_cache = use_cache |
|
self.classifier_dropout = classifier_dropout |
|
self.token_keep_rate = token_keep_rate |
|
self.token_keep_strategy = token_keep_strategy |
|
self.token_drop_loc = token_drop_loc |
|
|
|
|
|
class BertOnnxConfig(OnnxConfig): |
|
@property |
|
def inputs(self) -> Mapping[str, Mapping[int, str]]: |
|
return OrderedDict( |
|
[ |
|
("input_ids", {0: "batch", 1: "sequence"}), |
|
("attention_mask", {0: "batch", 1: "sequence"}), |
|
("token_type_ids", {0: "batch", 1: "sequence"}), |
|
] |
|
) |
|
|
|
|
|
BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP = { |
|
"microsoft/beit-base-patch16-224-in22k": "https://huggingface.co/microsoft/beit-base-patch16-224-in22k/resolve/main/config.json", |
|
|
|
} |
|
|
|
|
|
class BeitConfig(PretrainedConfig): |
|
r""" |
|
This is the configuration class to store the configuration of a [`BeitModel`]. It is used to |
|
instantiate an BEiT model according to the specified arguments, defining the model architecture. Instantiating a |
|
configuration with the defaults will yield a similar configuration to that of the BEiT |
|
[microsoft/beit-base-patch16-224-in22k](https://huggingface.co/microsoft/beit-base-patch16-224-in22k) |
|
architecture. |
|
|
|
Args: |
|
vocab_size (`int`, *optional*, defaults to 8092): |
|
Vocabulary size of the BEiT model. Defines the number of different image tokens that can be used during |
|
pre-training. |
|
hidden_size (`int`, *optional*, defaults to 768): |
|
Dimensionality of the encoder layers and the pooler layer. |
|
num_hidden_layers (`int`, *optional*, defaults to 12): |
|
Number of hidden layers in the Transformer encoder. |
|
num_attention_heads (`int`, *optional*, defaults to 12): |
|
Number of attention heads for each attention layer in the Transformer encoder. |
|
intermediate_size (`int`, *optional*, defaults to 3072): |
|
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. |
|
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): |
|
The non-linear activation function (function or string) in the encoder and pooler. If string, |
|
`"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. |
|
hidden_dropout_prob (`float`, *optional*, defaults to 0.1): |
|
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. |
|
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): |
|
The dropout ratio for the attention probabilities. |
|
initializer_range (`float`, *optional*, defaults to 0.02): |
|
The standard deviation of the truncated_normal_initializer for initializing all weight matrices. |
|
layer_norm_eps (`float`, *optional*, defaults to 1e-12): |
|
The epsilon used by the layer normalization layers. |
|
image_size (`int`, *optional*, defaults to `224`): |
|
The size (resolution) of each image. |
|
patch_size (`int`, *optional*, defaults to `16`): |
|
The size (resolution) of each patch. |
|
num_channels (`int`, *optional*, defaults to `3`): |
|
The number of input channels. |
|
use_mask_token (`bool`, *optional*, defaults to `False`): |
|
Whether to use a mask token for masked image modeling. |
|
use_absolute_position_embeddings (`bool`, *optional*, defaults to `False`): |
|
Whether to use BERT-style absolute position embeddings. |
|
use_relative_position_bias (`bool`, *optional*, defaults to `False`): |
|
Whether to use T5-style relative position embeddings in the self-attention layers. |
|
use_shared_relative_position_bias (`bool`, *optional*, defaults to `False`): |
|
Whether to use the same relative position embeddings across all self-attention layers of the Transformer. |
|
layer_scale_init_value (`float`, *optional*, defaults to 0.1): |
|
Scale to use in the self-attention layers. 0.1 for base, 1e-5 for large. Set 0 to disable layer scale. |
|
drop_path_rate (`float`, *optional*, defaults to 0.1): |
|
Stochastic depth rate per sample (when applied in the main path of residual layers). |
|
use_mean_pooling (`bool`, *optional*, defaults to `True`): |
|
Whether to mean pool the final hidden states of the patches instead of using the final hidden state of the |
|
CLS token, before applying the classification head. |
|
out_indices (`List[int]`, *optional*, defaults to `[3, 5, 7, 11]`): |
|
Indices of the feature maps to use for semantic segmentation. |
|
pool_scales (`Tuple[int]`, *optional*, defaults to `[1, 2, 3, 6]`): |
|
Pooling scales used in Pooling Pyramid Module applied on the last feature map. |
|
use_auxiliary_head (`bool`, *optional*, defaults to `True`): |
|
Whether to use an auxiliary head during training. |
|
auxiliary_loss_weight (`float`, *optional*, defaults to 0.4): |
|
Weight of the cross-entropy loss of the auxiliary head. |
|
auxiliary_channels (`int`, *optional*, defaults to 256): |
|
Number of channels to use in the auxiliary head. |
|
auxiliary_num_convs (`int`, *optional*, defaults to 1): |
|
Number of convolutional layers to use in the auxiliary head. |
|
auxiliary_concat_input (`bool`, *optional*, defaults to `False`): |
|
Whether to concatenate the output of the auxiliary head with the input before the classification layer. |
|
semantic_loss_ignore_index (`int`, *optional*, defaults to 255): |
|
The index that is ignored by the loss function of the semantic segmentation model. |
|
|
|
Example: |
|
|
|
```python |
|
>>> from transformers import BeitModel, BeitConfig |
|
|
|
>>> # Initializing a BEiT beit-base-patch16-224-in22k style configuration |
|
>>> configuration = BeitConfig() |
|
|
|
>>> # Initializing a model from the beit-base-patch16-224-in22k style configuration |
|
>>> model = BeitModel(configuration) |
|
|
|
>>> # Accessing the model configuration |
|
>>> configuration = model.config |
|
```""" |
|
model_type = "beit" |
|
|
|
def __init__( |
|
self, |
|
vocab_size=8192, |
|
hidden_size=768, |
|
num_hidden_layers=12, |
|
num_attention_heads=12, |
|
intermediate_size=3072, |
|
hidden_act="gelu", |
|
hidden_dropout_prob=0.0, |
|
attention_probs_dropout_prob=0.0, |
|
initializer_range=0.02, |
|
layer_norm_eps=1e-12, |
|
is_encoder_decoder=False, |
|
image_size=224, |
|
patch_size=16, |
|
num_channels=3, |
|
use_mask_token=False, |
|
use_absolute_position_embeddings=False, |
|
use_relative_position_bias=False, |
|
use_shared_relative_position_bias=False, |
|
layer_scale_init_value=0.1, |
|
drop_path_rate=0.1, |
|
use_mean_pooling=True, |
|
out_indices=[3, 5, 7, 11], |
|
pool_scales=[1, 2, 3, 6], |
|
use_auxiliary_head=True, |
|
auxiliary_loss_weight=0.4, |
|
auxiliary_channels=256, |
|
auxiliary_num_convs=1, |
|
auxiliary_concat_input=False, |
|
semantic_loss_ignore_index=255, |
|
token_keep_rate=1, |
|
token_keep_strategy='cls_attn', |
|
token_drop_loc=[3, 6, 9], |
|
sparse_random_attn=None, |
|
sparse_local_attn=1, |
|
attn_block_size=1, |
|
num_cls_tokens=1, |
|
token_3d_order='none', |
|
**kwargs |
|
): |
|
super().__init__(**kwargs) |
|
|
|
self.vocab_size = vocab_size |
|
self.hidden_size = hidden_size |
|
self.num_hidden_layers = num_hidden_layers |
|
self.num_attention_heads = num_attention_heads |
|
self.intermediate_size = intermediate_size |
|
self.hidden_act = hidden_act |
|
self.hidden_dropout_prob = hidden_dropout_prob |
|
self.attention_probs_dropout_prob = attention_probs_dropout_prob |
|
self.initializer_range = initializer_range |
|
self.layer_norm_eps = layer_norm_eps |
|
|
|
self.image_size = image_size |
|
self.patch_size = patch_size |
|
self.num_channels = num_channels |
|
self.use_mask_token = use_mask_token |
|
self.use_absolute_position_embeddings = use_absolute_position_embeddings |
|
self.use_relative_position_bias = use_relative_position_bias |
|
self.use_shared_relative_position_bias = use_shared_relative_position_bias |
|
self.layer_scale_init_value = layer_scale_init_value |
|
self.drop_path_rate = drop_path_rate |
|
self.use_mean_pooling = use_mean_pooling |
|
|
|
self.out_indices = out_indices |
|
self.pool_scales = pool_scales |
|
|
|
self.use_auxiliary_head = use_auxiliary_head |
|
self.auxiliary_loss_weight = auxiliary_loss_weight |
|
self.auxiliary_channels = auxiliary_channels |
|
self.auxiliary_num_convs = auxiliary_num_convs |
|
self.auxiliary_concat_input = auxiliary_concat_input |
|
self.semantic_loss_ignore_index = semantic_loss_ignore_index |
|
|
|
|
|
self.token_keep_rate = token_keep_rate |
|
self.token_keep_strategy = token_keep_strategy |
|
self.token_drop_loc = token_drop_loc |
|
|
|
self.sparse_random_attn = sparse_random_attn |
|
self.sparse_local_attn = sparse_local_attn |
|
self.attn_block_size = attn_block_size |
|
self.num_cls_tokens = num_cls_tokens |
|
|
|
self.token_3d_order = token_3d_order |
|
|