“sayehs” commited on
Commit
6a8a059
1 Parent(s): 791b61d

custom modeling of gpt-j-6b

Browse files
Files changed (3) hide show
  1. config.json +4 -1
  2. configuration_gptj.py +239 -0
  3. modeling_gptj.py +1247 -0
config.json CHANGED
@@ -3,6 +3,10 @@
3
  "architectures": [
4
  "GPTJForCausalLM"
5
  ],
 
 
 
 
6
  "attn_pdrop": 0.0,
7
  "bos_token_id": 50256,
8
  "embd_pdrop": 0.0,
@@ -34,7 +38,6 @@
34
  },
35
  "tie_word_embeddings": false,
36
  "tokenizer_class": "GPT2Tokenizer",
37
- "transformers_version": "4.18.0.dev0",
38
  "use_cache": true,
39
  "vocab_size": 50400
40
  }
 
3
  "architectures": [
4
  "GPTJForCausalLM"
5
  ],
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_gptj.GPTJConfig",
8
+ "AutoModelForCausalLM": "modeling_gptj.GPTJForCausalLM"
9
+ },
10
  "attn_pdrop": 0.0,
11
  "bos_token_id": 50256,
12
  "embd_pdrop": 0.0,
 
38
  },
39
  "tie_word_embeddings": false,
40
  "tokenizer_class": "GPT2Tokenizer",
 
41
  "use_cache": true,
42
  "vocab_size": 50400
43
  }
configuration_gptj.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ GPT-J model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Any, List, Mapping, Optional
18
+
19
+ from transformers import PreTrainedTokenizer, TensorType, is_torch_available
20
+ from transformers.configuration_utils import PretrainedConfig
21
+ from transformers.onnx import OnnxConfigWithPast, PatchingSpec
22
+ from transformers.utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP = {
28
+ "EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
29
+ # See all GPT-J models at https://huggingface.co/models?filter=gpt_j
30
+ }
31
+
32
+
33
+ class GPTJConfig(PretrainedConfig):
34
+ r"""
35
+ This is the configuration class to store the configuration of a [`GPTJModel`]. It is used to instantiate a GPT-J
36
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
37
+ defaults will yield a similar configuration to that of the GPT-J
38
+ [EleutherAI/gpt-j-6B](https://huggingface.co/EleutherAI/gpt-j-6B) architecture. Configuration objects inherit from
39
+ [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`]
40
+ for more information.
41
+
42
+ Args:
43
+ vocab_size (`int`, *optional*, defaults to 50400):
44
+ Vocabulary size of the GPT-J model. Defines the number of different tokens that can be represented by the
45
+ `inputs_ids` passed when calling [`GPTJModel`].
46
+ n_positions (`int`, *optional*, defaults to 2048):
47
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
48
+ just in case (e.g., 512 or 1024 or 2048).
49
+ n_embd (`int`, *optional*, defaults to 4096):
50
+ Dimensionality of the embeddings and hidden states.
51
+ n_layer (`int`, *optional*, defaults to 28):
52
+ Number of hidden layers in the Transformer encoder.
53
+ n_head (`int`, *optional*, defaults to 16):
54
+ Number of attention heads for each attention layer in the Transformer encoder.
55
+ rotary_dim (`int`, *optional*, defaults to 64):
56
+ Number of dimensions in the embedding that Rotary Position Embedding is applied to.
57
+ n_inner (`int`, *optional*, defaults to None):
58
+ Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
59
+ activation_function (`str`, *optional*, defaults to `"gelu_new"`):
60
+ Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
61
+ resid_pdrop (`float`, *optional*, defaults to 0.1):
62
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
63
+ embd_pdrop (`int`, *optional*, defaults to 0.1):
64
+ The dropout ratio for the embeddings.
65
+ attn_pdrop (`float`, *optional*, defaults to 0.1):
66
+ The dropout ratio for the attention.
67
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
68
+ The epsilon to use in the layer normalization layers.
69
+ initializer_range (`float`, *optional*, defaults to 0.02):
70
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
71
+ use_cache (`bool`, *optional*, defaults to `True`):
72
+ Whether or not the model should return the last key/values attentions (not used by all models).
73
+
74
+ Example:
75
+
76
+ ```python
77
+ >>> from transformers import GPTJModel, GPTJConfig
78
+
79
+ >>> # Initializing a GPT-J 6B configuration
80
+ >>> configuration = GPTJConfig()
81
+
82
+ >>> # Initializing a model from the configuration
83
+ >>> model = GPTJModel(configuration)
84
+
85
+ >>> # Accessing the model configuration
86
+ >>> configuration = model.config
87
+ ```"""
88
+
89
+ model_type = "gptj"
90
+ attribute_map = {
91
+ "max_position_embeddings": "n_positions",
92
+ "hidden_size": "n_embd",
93
+ "num_attention_heads": "n_head",
94
+ "num_hidden_layers": "n_layer",
95
+ }
96
+
97
+ def __init__(
98
+ self,
99
+ vocab_size=50400,
100
+ n_positions=2048,
101
+ n_embd=4096,
102
+ n_layer=28,
103
+ n_head=16,
104
+ rotary_dim=64,
105
+ n_inner=None,
106
+ activation_function="gelu_new",
107
+ resid_pdrop=0.0,
108
+ embd_pdrop=0.0,
109
+ attn_pdrop=0.0,
110
+ layer_norm_epsilon=1e-5,
111
+ initializer_range=0.02,
112
+ use_cache=True,
113
+ bos_token_id=50256,
114
+ eos_token_id=50256,
115
+ tie_word_embeddings=False,
116
+ **kwargs,
117
+ ):
118
+ self.vocab_size = vocab_size
119
+ self.n_positions = n_positions
120
+ self.n_embd = n_embd
121
+ self.n_layer = n_layer
122
+ self.n_head = n_head
123
+ self.n_inner = n_inner
124
+ self.rotary_dim = rotary_dim
125
+ self.activation_function = activation_function
126
+ self.resid_pdrop = resid_pdrop
127
+ self.embd_pdrop = embd_pdrop
128
+ self.attn_pdrop = attn_pdrop
129
+ self.layer_norm_epsilon = layer_norm_epsilon
130
+ self.initializer_range = initializer_range
131
+ self.use_cache = use_cache
132
+
133
+ self.bos_token_id = bos_token_id
134
+ self.eos_token_id = eos_token_id
135
+
136
+ super().__init__(
137
+ bos_token_id=bos_token_id,
138
+ eos_token_id=eos_token_id,
139
+ tie_word_embeddings=tie_word_embeddings,
140
+ **kwargs,
141
+ )
142
+
143
+
144
+ # Copied from transformers.models.gpt2.configuration_gpt2.GPT2OnnxConfig
145
+ class GPTJOnnxConfig(OnnxConfigWithPast):
146
+ def __init__(
147
+ self,
148
+ config: PretrainedConfig,
149
+ task: str = "default",
150
+ patching_specs: List[PatchingSpec] = None,
151
+ use_past: bool = False,
152
+ ):
153
+ super().__init__(
154
+ config, task=task, patching_specs=patching_specs, use_past=use_past
155
+ )
156
+ if not getattr(self._config, "pad_token_id", None):
157
+ # TODO: how to do that better?
158
+ self._config.pad_token_id = 0
159
+
160
+ @property
161
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
162
+ common_inputs = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
163
+ if self.use_past:
164
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
165
+ common_inputs["attention_mask"] = {
166
+ 0: "batch",
167
+ 1: "past_sequence + sequence",
168
+ }
169
+ else:
170
+ common_inputs["attention_mask"] = {0: "batch", 1: "sequence"}
171
+
172
+ return common_inputs
173
+
174
+ @property
175
+ def num_layers(self) -> int:
176
+ return self._config.n_layer
177
+
178
+ @property
179
+ def num_attention_heads(self) -> int:
180
+ return self._config.n_head
181
+
182
+ def generate_dummy_inputs(
183
+ self,
184
+ tokenizer: PreTrainedTokenizer,
185
+ batch_size: int = -1,
186
+ seq_length: int = -1,
187
+ is_pair: bool = False,
188
+ framework: Optional[TensorType] = None,
189
+ ) -> Mapping[str, Any]:
190
+ common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(
191
+ tokenizer,
192
+ batch_size=batch_size,
193
+ seq_length=seq_length,
194
+ is_pair=is_pair,
195
+ framework=framework,
196
+ )
197
+
198
+ # We need to order the input in the way they appears in the forward()
199
+ ordered_inputs = OrderedDict({"input_ids": common_inputs["input_ids"]})
200
+
201
+ # Need to add the past_keys
202
+ if self.use_past:
203
+ if not is_torch_available():
204
+ raise ValueError(
205
+ "Cannot generate dummy past_keys inputs without PyTorch installed."
206
+ )
207
+ else:
208
+ import torch
209
+
210
+ batch, seqlen = common_inputs["input_ids"].shape
211
+ # Not using the same length for past_key_values
212
+ past_key_values_length = seqlen + 2
213
+ past_shape = (
214
+ batch,
215
+ self.num_attention_heads,
216
+ past_key_values_length,
217
+ self._config.hidden_size // self.num_attention_heads,
218
+ )
219
+ ordered_inputs["past_key_values"] = [
220
+ (torch.zeros(past_shape), torch.zeros(past_shape))
221
+ for _ in range(self.num_layers)
222
+ ]
223
+
224
+ ordered_inputs["attention_mask"] = common_inputs["attention_mask"]
225
+ if self.use_past:
226
+ mask_dtype = ordered_inputs["attention_mask"].dtype
227
+ ordered_inputs["attention_mask"] = torch.cat(
228
+ [
229
+ ordered_inputs["attention_mask"],
230
+ torch.ones(batch, past_key_values_length, dtype=mask_dtype),
231
+ ],
232
+ dim=1,
233
+ )
234
+
235
+ return ordered_inputs
236
+
237
+ @property
238
+ def default_onnx_opset(self) -> int:
239
+ return 13
modeling_gptj.py ADDED
@@ -0,0 +1,1247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch GPT-J model."""
16
+
17
+ import warnings
18
+ from typing import Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.fx
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
25
+
26
+ from transformers.activations import ACT2FN
27
+ from transformers.modeling_outputs import (
28
+ BaseModelOutputWithPast,
29
+ CausalLMOutputWithPast,
30
+ QuestionAnsweringModelOutput,
31
+ SequenceClassifierOutputWithPast,
32
+ )
33
+ from transformers.modeling_utils import PreTrainedModel
34
+ from transformers.utils import (
35
+ add_code_sample_docstrings,
36
+ add_start_docstrings,
37
+ add_start_docstrings_to_model_forward,
38
+ is_torch_fx_proxy,
39
+ logging,
40
+ )
41
+ from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
42
+ from .configuration_gptj import GPTJConfig
43
+
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+ _CHECKPOINT_FOR_DOC = "hf-internal-testing/tiny-random-gptj"
48
+ _REAL_CHECKPOINT_FOR_DOC = "EleutherAI/gpt-j-6B"
49
+ _CONFIG_FOR_DOC = "GPTJConfig"
50
+
51
+
52
+ GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST = [
53
+ "EleutherAI/gpt-j-6B",
54
+ # See all GPT-J models at https://huggingface.co/models?filter=gptj
55
+ ]
56
+
57
+
58
+ def create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor:
59
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64) / dim))
60
+ sinusoid_inp = torch.einsum(
61
+ "i , j -> i j", torch.arange(num_pos, dtype=torch.int64).float(), inv_freq
62
+ ).float()
63
+ return torch.cat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1)
64
+
65
+
66
+ @torch.fx.wrap
67
+ def get_embed_positions(embed_positions, position_ids):
68
+ return embed_positions.to(position_ids.device).repeat(position_ids.shape[0], 1, 1)
69
+
70
+
71
+ def rotate_every_two(x: torch.Tensor) -> torch.Tensor:
72
+ x1 = x[:, :, :, ::2]
73
+ x2 = x[:, :, :, 1::2]
74
+ x = torch.stack((-x2, x1), dim=-1)
75
+ return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')
76
+
77
+
78
+ def apply_rotary_pos_emb(
79
+ tensor: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor
80
+ ) -> torch.Tensor:
81
+ sin = torch.repeat_interleave(sin[:, :, None, :], 2, 3)
82
+ cos = torch.repeat_interleave(cos[:, :, None, :], 2, 3)
83
+ return (tensor * cos) + (rotate_every_two(tensor) * sin)
84
+
85
+
86
+ class GPTJAttention(nn.Module):
87
+ def __init__(self, config):
88
+ super().__init__()
89
+
90
+ max_positions = config.max_position_embeddings
91
+ self.register_buffer(
92
+ "bias",
93
+ torch.tril(
94
+ torch.ones((max_positions, max_positions), dtype=torch.bool)
95
+ ).view(1, 1, max_positions, max_positions),
96
+ persistent=False,
97
+ )
98
+ self.register_buffer("masked_bias", torch.tensor(-1e9), persistent=False)
99
+
100
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
101
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
102
+
103
+ self.embed_dim = config.hidden_size
104
+ self.num_attention_heads = config.num_attention_heads
105
+ self.head_dim = self.embed_dim // self.num_attention_heads
106
+ if self.head_dim * self.num_attention_heads != self.embed_dim:
107
+ raise ValueError(
108
+ f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and"
109
+ f" `num_attention_heads`: {self.num_attention_heads})."
110
+ )
111
+ self.scale_attn = torch.sqrt(
112
+ torch.tensor(self.head_dim, dtype=torch.float32)
113
+ ).to(torch.get_default_dtype())
114
+
115
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
116
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
117
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
118
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
119
+ self.rotary_dim = config.rotary_dim
120
+ pos_embd_dim = self.rotary_dim or self.embed_dim
121
+ self.embed_positions = create_sinusoidal_positions(max_positions, pos_embd_dim)
122
+
123
+ def _split_heads(self, tensor, num_attention_heads, attn_head_size, rotary):
124
+ """
125
+ Splits hidden dim into attn_head_size and num_attention_heads
126
+ """
127
+ new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size)
128
+ tensor = tensor.view(new_shape)
129
+ if rotary:
130
+ return tensor
131
+ if len(tensor.shape) == 5:
132
+ return tensor.permute(
133
+ 0, 1, 3, 2, 4
134
+ ) # (batch, blocks, head, block_length, head_features)
135
+ elif len(tensor.shape) == 4:
136
+ return tensor.permute(
137
+ 0, 2, 1, 3
138
+ ) # (batch, head, seq_length, head_features)
139
+ else:
140
+ raise ValueError(
141
+ f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}"
142
+ )
143
+
144
+ def _merge_heads(self, tensor, num_attention_heads, attn_head_size):
145
+ """
146
+ Merges attn_head_size dim and num_attn_heads dim into hidden dim
147
+ """
148
+ if len(tensor.shape) == 5:
149
+ tensor = tensor.permute(0, 1, 3, 2, 4).contiguous()
150
+ elif len(tensor.shape) == 4:
151
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
152
+ else:
153
+ raise ValueError(
154
+ f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}"
155
+ )
156
+ new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,)
157
+ return tensor.view(new_shape)
158
+
159
+ def _attn(
160
+ self,
161
+ query,
162
+ key,
163
+ value,
164
+ attention_mask=None,
165
+ head_mask=None,
166
+ ):
167
+ # compute causal mask from causal mask buffer
168
+ query_length, key_length = query.size(-2), key.size(-2)
169
+ causal_mask = self.bias[
170
+ :, :, key_length - query_length : key_length, :key_length
171
+ ]
172
+
173
+ # Keep the attention weights computation in fp32 to avoid overflow issues
174
+ query = query.to(torch.float32)
175
+ key = key.to(torch.float32)
176
+
177
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
178
+
179
+ mask_value = torch.finfo(attn_weights.dtype).min
180
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
181
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
182
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(
183
+ attn_weights.device
184
+ )
185
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
186
+
187
+ attn_weights = attn_weights / self.scale_attn
188
+
189
+ if attention_mask is not None:
190
+ # Apply the attention mask
191
+ attn_weights = attn_weights + attention_mask
192
+
193
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
194
+ attn_weights = attn_weights.to(value.dtype)
195
+ attn_weights = self.attn_dropout(attn_weights)
196
+
197
+ # Mask heads if we want to
198
+ if head_mask is not None:
199
+ attn_weights = attn_weights * head_mask
200
+
201
+ attn_output = torch.matmul(attn_weights, value)
202
+
203
+ return attn_output, attn_weights
204
+
205
+ def _get_embed_positions(self, position_ids):
206
+ embed_positions = self.embed_positions
207
+ if embed_positions.device != position_ids.device:
208
+ embed_positions = embed_positions.to(position_ids.device)
209
+ self.embed_positions = embed_positions
210
+ return embed_positions.repeat(position_ids.shape[0], 1, 1)
211
+
212
+ def forward(
213
+ self,
214
+ hidden_states: torch.FloatTensor,
215
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
216
+ attention_mask: Optional[torch.FloatTensor] = None,
217
+ position_ids: Optional[torch.LongTensor] = None,
218
+ head_mask: Optional[torch.FloatTensor] = None,
219
+ use_cache: Optional[bool] = False,
220
+ output_attentions: Optional[bool] = False,
221
+ ) -> Union[
222
+ Tuple[torch.Tensor, Tuple[torch.Tensor]],
223
+ Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],
224
+ ]:
225
+ query = self.q_proj(hidden_states)
226
+ key = self.k_proj(hidden_states)
227
+ value = self.v_proj(hidden_states)
228
+
229
+ query = self._split_heads(query, self.num_attention_heads, self.head_dim, True)
230
+ key = self._split_heads(key, self.num_attention_heads, self.head_dim, True)
231
+ value = self._split_heads(value, self.num_attention_heads, self.head_dim, False)
232
+
233
+ if is_torch_fx_proxy(position_ids) or torch.jit.is_tracing():
234
+ # The logic to conditionally copy to GPU could not be traced, so we do this
235
+ # every time in the torch.fx case
236
+ embed_positions = get_embed_positions(self.embed_positions, position_ids)
237
+ else:
238
+ embed_positions = self._get_embed_positions(position_ids)
239
+
240
+ repeated_position_ids = position_ids.unsqueeze(-1).repeat(
241
+ 1, 1, embed_positions.shape[-1]
242
+ )
243
+ sincos = torch.gather(embed_positions, 1, repeated_position_ids)
244
+ sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1)
245
+
246
+ if self.rotary_dim is not None:
247
+ k_rot = key[:, :, :, : self.rotary_dim]
248
+ k_pass = key[:, :, :, self.rotary_dim :]
249
+
250
+ q_rot = query[:, :, :, : self.rotary_dim]
251
+ q_pass = query[:, :, :, self.rotary_dim :]
252
+
253
+ k_rot = apply_rotary_pos_emb(k_rot, sin, cos)
254
+ q_rot = apply_rotary_pos_emb(q_rot, sin, cos)
255
+
256
+ key = torch.cat([k_rot, k_pass], dim=-1)
257
+ query = torch.cat([q_rot, q_pass], dim=-1)
258
+ else:
259
+ key = apply_rotary_pos_emb(key, sin, cos)
260
+ query = apply_rotary_pos_emb(query, sin, cos)
261
+
262
+ key = key.permute(0, 2, 1, 3)
263
+ query = query.permute(0, 2, 1, 3)
264
+
265
+ if layer_past is not None:
266
+ past_key = layer_past[0]
267
+ past_value = layer_past[1]
268
+ key = torch.cat((past_key, key), dim=-2)
269
+ value = torch.cat((past_value, value), dim=-2)
270
+
271
+ if use_cache is True:
272
+ # Note that this cast is quite ugly, but is not implemented before ROPE as the original codebase keeps the key in float32 all along the computation.
273
+ # Reference: https://github.com/kingoflolz/mesh-transformer-jax/blob/f8315e3003033b23f21d78361b288953064e0e76/mesh_transformer/layers.py#L128
274
+ present = (key.to(hidden_states.dtype), value)
275
+ else:
276
+ present = None
277
+
278
+ # compute self-attention: V x Softmax(QK^T)
279
+ attn_output, attn_weights = self._attn(
280
+ query, key, value, attention_mask, head_mask
281
+ )
282
+
283
+ attn_output = self._merge_heads(
284
+ attn_output, self.num_attention_heads, self.head_dim
285
+ )
286
+ attn_output = self.out_proj(attn_output)
287
+ attn_output = self.resid_dropout(attn_output)
288
+
289
+ outputs = (attn_output, present)
290
+ if output_attentions:
291
+ outputs += (attn_weights,)
292
+
293
+ return outputs # a, present, (attentions)
294
+
295
+
296
+ class GPTJMLP(nn.Module):
297
+ def __init__(
298
+ self, intermediate_size, config
299
+ ): # in MLP: intermediate_size= 4 * embed_dim
300
+ super().__init__()
301
+ embed_dim = config.n_embd
302
+
303
+ self.fc_in = nn.Linear(embed_dim, intermediate_size)
304
+ self.fc_out = nn.Linear(intermediate_size, embed_dim)
305
+
306
+ self.act = ACT2FN[config.activation_function]
307
+ self.dropout = nn.Dropout(config.resid_pdrop)
308
+
309
+ def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor:
310
+ hidden_states = self.fc_in(hidden_states)
311
+ hidden_states = self.act(hidden_states)
312
+ hidden_states = self.fc_out(hidden_states)
313
+ hidden_states = self.dropout(hidden_states)
314
+ return hidden_states
315
+
316
+
317
+ class GPTJBlock(nn.Module):
318
+ def __init__(self, config):
319
+ super().__init__()
320
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
321
+ self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
322
+ self.attn = GPTJAttention(config)
323
+ self.mlp = GPTJMLP(inner_dim, config)
324
+
325
+ def forward(
326
+ self,
327
+ hidden_states: Optional[torch.FloatTensor],
328
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
329
+ attention_mask: Optional[torch.FloatTensor] = None,
330
+ position_ids: Optional[torch.LongTensor] = None,
331
+ head_mask: Optional[torch.FloatTensor] = None,
332
+ use_cache: Optional[bool] = False,
333
+ output_attentions: Optional[bool] = False,
334
+ ) -> Union[
335
+ Tuple[torch.Tensor],
336
+ Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]],
337
+ ]:
338
+ residual = hidden_states
339
+ hidden_states = self.ln_1(hidden_states)
340
+ attn_outputs = self.attn(
341
+ hidden_states=hidden_states,
342
+ layer_past=layer_past,
343
+ attention_mask=attention_mask,
344
+ position_ids=position_ids,
345
+ head_mask=head_mask,
346
+ use_cache=use_cache,
347
+ output_attentions=output_attentions,
348
+ )
349
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
350
+ outputs = attn_outputs[1:]
351
+
352
+ feed_forward_hidden_states = self.mlp(hidden_states)
353
+ hidden_states = attn_output + feed_forward_hidden_states + residual
354
+
355
+ if use_cache:
356
+ outputs = (hidden_states,) + outputs
357
+ else:
358
+ outputs = (hidden_states,) + outputs[1:]
359
+
360
+ return outputs # hidden_states, present, (attentions)
361
+
362
+
363
+ class GPTJPreTrainedModel(PreTrainedModel):
364
+ """
365
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
366
+ models.
367
+ """
368
+
369
+ config_class = GPTJConfig
370
+ base_model_prefix = "transformer"
371
+ is_parallelizable = True
372
+ supports_gradient_checkpointing = True
373
+ _no_split_modules = ["GPTJBlock"]
374
+ _skip_keys_device_placement = "past_key_values"
375
+
376
+ def __init__(self, *inputs, **kwargs):
377
+ super().__init__(*inputs, **kwargs)
378
+
379
+ def _init_weights(self, module):
380
+ """Initialize the weights."""
381
+ if isinstance(module, (nn.Linear,)):
382
+ # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization
383
+ # cf https://github.com/pytorch/pytorch/pull/5617
384
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
385
+ if module.bias is not None:
386
+ module.bias.data.zero_()
387
+ elif isinstance(module, nn.Embedding):
388
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
389
+ if module.padding_idx is not None:
390
+ module.weight.data[module.padding_idx].zero_()
391
+ elif isinstance(module, nn.LayerNorm):
392
+ module.bias.data.zero_()
393
+ module.weight.data.fill_(1.0)
394
+
395
+
396
+ GPTJ_START_DOCSTRING = r"""
397
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
398
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
399
+ behavior.
400
+
401
+ Parameters:
402
+ config ([`GPTJConfig`]): Model configuration class with all the parameters of the model.
403
+ Initializing with a config file does not load the weights associated with the model, only the
404
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
405
+ """
406
+
407
+ GPTJ_INPUTS_DOCSTRING = r"""
408
+ Args:
409
+ input_ids (`torch.LongTensor` of shape `({0})`):
410
+ Indices of input sequence tokens in the vocabulary.
411
+
412
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
413
+ [`PreTrainedTokenizer.__call__`] for details.
414
+
415
+ [What are input IDs?](../glossary#input-ids)
416
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
417
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
418
+
419
+ - 1 for tokens that are **not masked**,
420
+ - 0 for tokens that are **masked**.
421
+
422
+ [What are attention masks?](../glossary#attention-mask)
423
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
424
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
425
+ 1]`:
426
+
427
+ - 0 corresponds to a *sentence A* token,
428
+ - 1 corresponds to a *sentence B* token.
429
+
430
+ [What are token type IDs?](../glossary#token-type-ids)
431
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
432
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
433
+ config.n_positions - 1]`.
434
+
435
+ [What are position IDs?](../glossary#position-ids)
436
+ head_mask (`torch.FloatTensor` of shape `(num_attention_heads,)` or `(n_layer, num_attention_heads)`, *optional*):
437
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
438
+
439
+ - 1 indicates the head is **not masked**,
440
+ - 0 indicates the head is **masked**.
441
+
442
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_dim)`, *optional*):
443
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
444
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
445
+ model's internal embedding lookup matrix.
446
+ output_attentions (`bool`, *optional*):
447
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
448
+ tensors for more detail.
449
+ output_hidden_states (`bool`, *optional*):
450
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
451
+ more detail.
452
+ return_dict (`bool`, *optional*):
453
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
454
+ """
455
+
456
+ PARALLELIZE_DOCSTRING = r"""
457
+ This is an experimental feature and is a subject to change at a moment's notice. Uses a device map to distribute
458
+ attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks
459
+ across all devices.
460
+
461
+ Args:
462
+ device_map (`Dict[int, list]`, optional, defaults to None):
463
+ A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always
464
+ automatically mapped to the first device (for esoteric reasons). That means that the first device should
465
+ have fewer attention modules mapped to it than other devices. For reference, the GPT-J models have the
466
+ following number of attention modules:
467
+
468
+ - gpt-j-6B: 28
469
+
470
+ Example:
471
+
472
+ ```python
473
+ # Here is an example of a device map on a machine with 4 GPUs using gpt-j-6B, which has a total of 28 attention modules:
474
+ model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
475
+ device_map = {
476
+ 0: [0, 1, 2, 3, 4, 5, 6],
477
+ 1: [7, 8, 9, 10, 11, 12, 13],
478
+ 2: [14, 15, 16, 17, 18, 19, 20],
479
+ 3: [21, 22, 23, 24, 25, 26, 27],
480
+ }
481
+ model.parallelize(device_map)
482
+ ```
483
+ """
484
+
485
+ DEPARALLELIZE_DOCSTRING = r"""
486
+ Moves the model to CPU from a model parallel state.
487
+
488
+ Example:
489
+
490
+ ```python
491
+ # On a 4 GPU machine with gpt-j-6B:
492
+ model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
493
+ device_map = {
494
+ 0: [0, 1, 2, 3, 4, 5, 6],
495
+ 1: [7, 8, 9, 10, 11, 12, 13],
496
+ 2: [14, 15, 16, 17, 18, 19, 20],
497
+ 3: [21, 22, 23, 24, 25, 26, 27],
498
+ }
499
+ model.parallelize(device_map) # Splits the model across several devices
500
+ model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()
501
+ ```
502
+ """
503
+
504
+
505
+ @add_start_docstrings(
506
+ "The bare GPT-J Model transformer outputting raw hidden-states without any specific head on top.",
507
+ GPTJ_START_DOCSTRING,
508
+ )
509
+ class GPTJModel(GPTJPreTrainedModel):
510
+ def __init__(self, config):
511
+ super().__init__(config)
512
+
513
+ self.embed_dim = config.n_embd
514
+ self.vocab_size = config.vocab_size
515
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
516
+ self.drop = nn.Dropout(config.embd_pdrop)
517
+ self.h = nn.ModuleList([GPTJBlock(config) for _ in range(config.n_layer)])
518
+ self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
519
+
520
+ # Model parallel
521
+ self.model_parallel = False
522
+ self.device_map = None
523
+ self.gradient_checkpointing = False
524
+
525
+ # Initialize weights and apply final processing
526
+ self.post_init()
527
+
528
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
529
+ def parallelize(self, device_map=None):
530
+ warnings.warn(
531
+ "`GPTJModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your"
532
+ " model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own"
533
+ " `device_map` but it needs to be a dictionary module_name to device, so for instance {'h.0': 0, 'h.1': 1,"
534
+ " ...}",
535
+ FutureWarning,
536
+ )
537
+ # Check validity of device_map
538
+ self.device_map = (
539
+ get_device_map(len(self.h), range(torch.cuda.device_count()))
540
+ if device_map is None
541
+ else device_map
542
+ )
543
+ assert_device_map(self.device_map, len(self.h))
544
+ self.model_parallel = True
545
+ self.first_device = (
546
+ "cpu"
547
+ if "cpu" in self.device_map.keys()
548
+ else "cuda:" + str(min(self.device_map.keys()))
549
+ )
550
+ self.last_device = "cuda:" + str(max(self.device_map.keys()))
551
+ self.wte = self.wte.to(self.first_device)
552
+ # Load onto devices
553
+ for k, v in self.device_map.items():
554
+ for block in v:
555
+ cuda_device = "cuda:" + str(k)
556
+ self.h[block] = self.h[block].to(cuda_device)
557
+ # ln_f to last
558
+ self.ln_f = self.ln_f.to(self.last_device)
559
+
560
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
561
+ def deparallelize(self):
562
+ warnings.warn(
563
+ "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
564
+ FutureWarning,
565
+ )
566
+ self.model_parallel = False
567
+ self.device_map = None
568
+ self.first_device = "cpu"
569
+ self.last_device = "cpu"
570
+ self.wte = self.wte.to("cpu")
571
+ for index in range(len(self.h)):
572
+ self.h[index] = self.h[index].to("cpu")
573
+ self.ln_f = self.ln_f.to("cpu")
574
+ torch.cuda.empty_cache()
575
+
576
+ def get_input_embeddings(self):
577
+ return self.wte
578
+
579
+ def set_input_embeddings(self, new_embeddings):
580
+ self.wte = new_embeddings
581
+
582
+ @add_start_docstrings_to_model_forward(
583
+ GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length")
584
+ )
585
+ @add_code_sample_docstrings(
586
+ checkpoint=_CHECKPOINT_FOR_DOC,
587
+ output_type=BaseModelOutputWithPast,
588
+ config_class=_CONFIG_FOR_DOC,
589
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
590
+ )
591
+ def forward(
592
+ self,
593
+ input_ids: Optional[torch.LongTensor] = None,
594
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
595
+ attention_mask: Optional[torch.FloatTensor] = None,
596
+ token_type_ids: Optional[torch.LongTensor] = None,
597
+ position_ids: Optional[torch.LongTensor] = None,
598
+ head_mask: Optional[torch.FloatTensor] = None,
599
+ inputs_embeds: Optional[torch.FloatTensor] = None,
600
+ use_cache: Optional[bool] = None,
601
+ output_attentions: Optional[bool] = None,
602
+ output_hidden_states: Optional[bool] = None,
603
+ return_dict: Optional[bool] = None,
604
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
605
+ output_attentions = (
606
+ output_attentions
607
+ if output_attentions is not None
608
+ else self.config.output_attentions
609
+ )
610
+ output_hidden_states = (
611
+ output_hidden_states
612
+ if output_hidden_states is not None
613
+ else self.config.output_hidden_states
614
+ )
615
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
616
+ return_dict = (
617
+ return_dict if return_dict is not None else self.config.use_return_dict
618
+ )
619
+
620
+ if input_ids is not None and inputs_embeds is not None:
621
+ raise ValueError(
622
+ "You cannot specify both input_ids and inputs_embeds at the same time"
623
+ )
624
+ elif input_ids is not None:
625
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
626
+ input_shape = input_ids.size()
627
+ input_ids = input_ids.view(-1, input_shape[-1])
628
+ batch_size = input_ids.shape[0]
629
+ elif inputs_embeds is not None:
630
+ input_shape = inputs_embeds.size()[:-1]
631
+ batch_size = inputs_embeds.shape[0]
632
+ else:
633
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
634
+
635
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
636
+
637
+ if token_type_ids is not None:
638
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
639
+
640
+ if past_key_values is None:
641
+ past_length = 0
642
+ past_key_values = tuple([None] * len(self.h))
643
+ else:
644
+ past_length = past_key_values[0][0].size(-2)
645
+
646
+ if position_ids is None:
647
+ position_ids = torch.arange(
648
+ past_length,
649
+ input_shape[-1] + past_length,
650
+ dtype=torch.long,
651
+ device=device,
652
+ )
653
+ position_ids = position_ids.unsqueeze(0)
654
+
655
+ # Attention mask.
656
+ if attention_mask is not None:
657
+ if batch_size <= 0:
658
+ raise ValueError("batch_size has to be defined and > 0")
659
+ attention_mask = attention_mask.view(batch_size, -1)
660
+ # We create a 3D attention mask from a 2D tensor mask.
661
+ # Sizes are [batch_size, 1, 1, to_seq_length]
662
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
663
+ # this attention mask is more simple than the triangular masking of causal attention
664
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
665
+ attention_mask = attention_mask[:, None, None, :]
666
+
667
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
668
+ # masked positions, this operation will create a tensor which is 0.0 for
669
+ # positions we want to attend and the dtype's smallest value for masked positions.
670
+ # Since we are adding it to the raw scores before the softmax, this is
671
+ # effectively the same as removing these entirely.
672
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
673
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
674
+
675
+ # Prepare head mask if needed
676
+ # 1.0 in head_mask indicate we keep the head
677
+ # attention_probs has shape bsz x num_attention_heads x N x N
678
+ # head_mask has shape n_layer x batch x num_attention_heads x N x N
679
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
680
+
681
+ if inputs_embeds is None:
682
+ inputs_embeds = self.wte(input_ids)
683
+
684
+ hidden_states = inputs_embeds
685
+
686
+ if token_type_ids is not None:
687
+ token_type_embeds = self.wte(token_type_ids)
688
+ hidden_states = hidden_states + token_type_embeds
689
+
690
+ hidden_states = self.drop(hidden_states)
691
+
692
+ output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),)
693
+
694
+ if self.gradient_checkpointing and self.training:
695
+ if use_cache:
696
+ logger.warning_once(
697
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
698
+ )
699
+ use_cache = False
700
+
701
+ presents = () if use_cache else None
702
+ all_self_attentions = () if output_attentions else None
703
+ all_hidden_states = () if output_hidden_states else None
704
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
705
+ # Model parallel
706
+ if self.model_parallel:
707
+ torch.cuda.set_device(hidden_states.device)
708
+ # Ensure layer_past is on same device as hidden_states (might not be correct)
709
+ if layer_past is not None:
710
+ layer_past = tuple(
711
+ past_state.to(hidden_states.device) for past_state in layer_past
712
+ )
713
+ # Ensure that attention_mask is always on the same device as hidden_states
714
+ if attention_mask is not None:
715
+ attention_mask = attention_mask.to(hidden_states.device)
716
+ if isinstance(head_mask, torch.Tensor):
717
+ head_mask = head_mask.to(hidden_states.device)
718
+ if output_hidden_states:
719
+ all_hidden_states = all_hidden_states + (hidden_states,)
720
+
721
+ if self.gradient_checkpointing and self.training:
722
+ outputs = self._gradient_checkpointing_func(
723
+ block.__call__,
724
+ hidden_states,
725
+ None,
726
+ attention_mask,
727
+ position_ids,
728
+ head_mask[i],
729
+ use_cache,
730
+ output_attentions,
731
+ )
732
+ else:
733
+ outputs = block(
734
+ hidden_states=hidden_states,
735
+ layer_past=layer_past,
736
+ attention_mask=attention_mask,
737
+ position_ids=position_ids,
738
+ head_mask=head_mask[i],
739
+ use_cache=use_cache,
740
+ output_attentions=output_attentions,
741
+ )
742
+
743
+ hidden_states = outputs[0]
744
+ if use_cache is True:
745
+ presents = presents + (outputs[1],)
746
+
747
+ if output_attentions:
748
+ all_self_attentions = all_self_attentions + (
749
+ outputs[2 if use_cache else 1],
750
+ )
751
+
752
+ # Model Parallel: If it's the last layer for that device, put things on the next device
753
+ if self.model_parallel:
754
+ for k, v in self.device_map.items():
755
+ if i == v[-1] and "cuda:" + str(k) != self.last_device:
756
+ hidden_states = hidden_states.to("cuda:" + str(k + 1))
757
+
758
+ hidden_states = self.ln_f(hidden_states)
759
+
760
+ hidden_states = hidden_states.view(output_shape)
761
+ # Add last hidden state
762
+ if output_hidden_states:
763
+ all_hidden_states = all_hidden_states + (hidden_states,)
764
+
765
+ if not return_dict:
766
+ return tuple(
767
+ v
768
+ for v in [
769
+ hidden_states,
770
+ presents,
771
+ all_hidden_states,
772
+ all_self_attentions,
773
+ ]
774
+ if v is not None
775
+ )
776
+
777
+ return BaseModelOutputWithPast(
778
+ last_hidden_state=hidden_states,
779
+ past_key_values=presents,
780
+ hidden_states=all_hidden_states,
781
+ attentions=all_self_attentions,
782
+ )
783
+
784
+
785
+ @add_start_docstrings(
786
+ """
787
+ The GPT-J Model transformer with a language modeling head on top.
788
+ """,
789
+ GPTJ_START_DOCSTRING,
790
+ )
791
+ class GPTJForCausalLM(GPTJPreTrainedModel):
792
+ _tied_weights_keys = ["lm_head.weight"]
793
+
794
+ def __init__(self, config):
795
+ super().__init__(config)
796
+ self.transformer = GPTJModel(config)
797
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
798
+
799
+ # Model parallel
800
+ self.model_parallel = False
801
+ self.device_map = None
802
+
803
+ # Initialize weights and apply final processing
804
+ self.post_init()
805
+
806
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
807
+ def parallelize(self, device_map=None):
808
+ warnings.warn(
809
+ "`GPTJForCausalLM.parallelize` is deprecated and will be removed in v5 of Transformers, you should load"
810
+ " your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own"
811
+ " `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0':"
812
+ " 0, 'transformer.h.1': 1, ...}",
813
+ FutureWarning,
814
+ )
815
+ self.device_map = (
816
+ get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
817
+ if device_map is None
818
+ else device_map
819
+ )
820
+ assert_device_map(self.device_map, len(self.transformer.h))
821
+ self.transformer.parallelize(self.device_map)
822
+ self.lm_head = self.lm_head.to(self.transformer.first_device)
823
+ self.model_parallel = True
824
+
825
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
826
+ def deparallelize(self):
827
+ warnings.warn(
828
+ "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
829
+ FutureWarning,
830
+ )
831
+ self.transformer.deparallelize()
832
+ self.transformer = self.transformer.to("cpu")
833
+ self.lm_head = self.lm_head.to("cpu")
834
+ self.model_parallel = False
835
+ torch.cuda.empty_cache()
836
+
837
+ def get_output_embeddings(self):
838
+ return self.lm_head
839
+
840
+ def set_output_embeddings(self, new_embeddings):
841
+ self.lm_head = new_embeddings
842
+
843
+ def prepare_inputs_for_generation(
844
+ self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs
845
+ ):
846
+ token_type_ids = kwargs.get("token_type_ids", None)
847
+ # Omit tokens covered by past_key_values
848
+ if past_key_values:
849
+ past_length = past_key_values[0][0].shape[2]
850
+
851
+ # Some generation methods already pass only the last input ID
852
+ if input_ids.shape[1] > past_length:
853
+ remove_prefix_length = past_length
854
+ else:
855
+ # Default to old behavior: keep only final ID
856
+ remove_prefix_length = input_ids.shape[1] - 1
857
+
858
+ input_ids = input_ids[:, remove_prefix_length:]
859
+ if token_type_ids is not None:
860
+ token_type_ids = token_type_ids[:, -input_ids.shape[1] :]
861
+
862
+ attention_mask = kwargs.get("attention_mask", None)
863
+ position_ids = kwargs.get("position_ids", None)
864
+
865
+ if attention_mask is not None and position_ids is None:
866
+ # create position_ids on the fly for batch generation
867
+ position_ids = attention_mask.long().cumsum(-1) - 1
868
+ position_ids.masked_fill_(attention_mask == 0, 1)
869
+ if past_key_values:
870
+ position_ids = position_ids[:, -input_ids.shape[1] :]
871
+
872
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
873
+ if inputs_embeds is not None and past_key_values is None:
874
+ model_inputs = {"inputs_embeds": inputs_embeds}
875
+ else:
876
+ model_inputs = {"input_ids": input_ids}
877
+
878
+ model_inputs.update(
879
+ {
880
+ "past_key_values": past_key_values,
881
+ "use_cache": kwargs.get("use_cache"),
882
+ "position_ids": position_ids,
883
+ "attention_mask": attention_mask,
884
+ "token_type_ids": token_type_ids,
885
+ }
886
+ )
887
+
888
+ return model_inputs
889
+
890
+ @add_start_docstrings_to_model_forward(
891
+ GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length")
892
+ )
893
+ @add_code_sample_docstrings(
894
+ checkpoint=_CHECKPOINT_FOR_DOC,
895
+ output_type=CausalLMOutputWithPast,
896
+ config_class=_CONFIG_FOR_DOC,
897
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
898
+ )
899
+ def forward(
900
+ self,
901
+ input_ids: Optional[torch.LongTensor] = None,
902
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
903
+ attention_mask: Optional[torch.FloatTensor] = None,
904
+ token_type_ids: Optional[torch.LongTensor] = None,
905
+ position_ids: Optional[torch.LongTensor] = None,
906
+ head_mask: Optional[torch.FloatTensor] = None,
907
+ inputs_embeds: Optional[torch.FloatTensor] = None,
908
+ labels: Optional[torch.LongTensor] = None,
909
+ use_cache: Optional[bool] = None,
910
+ output_attentions: Optional[bool] = None,
911
+ output_hidden_states: Optional[bool] = None,
912
+ return_dict: Optional[bool] = None,
913
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
914
+ r"""
915
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
916
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
917
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
918
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
919
+ """
920
+ return_dict = (
921
+ return_dict if return_dict is not None else self.config.use_return_dict
922
+ )
923
+
924
+ transformer_outputs = self.transformer(
925
+ input_ids,
926
+ past_key_values=past_key_values,
927
+ attention_mask=attention_mask,
928
+ token_type_ids=token_type_ids,
929
+ position_ids=position_ids,
930
+ head_mask=head_mask,
931
+ inputs_embeds=inputs_embeds,
932
+ use_cache=use_cache,
933
+ output_attentions=output_attentions,
934
+ output_hidden_states=output_hidden_states,
935
+ return_dict=return_dict,
936
+ )
937
+ hidden_states = transformer_outputs[0]
938
+
939
+ # Set device for model parallelism
940
+ if self.model_parallel:
941
+ torch.cuda.set_device(self.transformer.first_device)
942
+ hidden_states = hidden_states.to(self.lm_head.weight.device)
943
+
944
+ # make sure sampling in fp16 works correctly and
945
+ # compute loss in fp32 to match with mesh-tf version
946
+ # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179
947
+ lm_logits = self.lm_head(hidden_states).to(torch.float32)
948
+
949
+ loss = None
950
+ if labels is not None:
951
+ # move labels to correct device to enable model parallelism
952
+ labels = labels.to(lm_logits.device)
953
+ # Shift so that tokens < n predict n
954
+ shift_logits = lm_logits[..., :-1, :].contiguous()
955
+ shift_labels = labels[..., 1:].contiguous()
956
+ # Flatten the tokens
957
+ loss_fct = CrossEntropyLoss()
958
+ loss = loss_fct(
959
+ shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)
960
+ )
961
+
962
+ loss = loss.to(hidden_states.dtype)
963
+
964
+ if not return_dict:
965
+ output = (lm_logits,) + transformer_outputs[1:]
966
+ return ((loss,) + output) if loss is not None else output
967
+
968
+ return CausalLMOutputWithPast(
969
+ loss=loss,
970
+ logits=lm_logits,
971
+ past_key_values=transformer_outputs.past_key_values,
972
+ hidden_states=transformer_outputs.hidden_states,
973
+ attentions=transformer_outputs.attentions,
974
+ )
975
+
976
+ @staticmethod
977
+ def _reorder_cache(
978
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
979
+ ) -> Tuple[Tuple[torch.Tensor]]:
980
+ """
981
+ This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or
982
+ [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
983
+ beam_idx at every generation step.
984
+ """
985
+ return tuple(
986
+ tuple(
987
+ past_state.index_select(0, beam_idx.to(past_state.device))
988
+ for past_state in layer_past
989
+ )
990
+ for layer_past in past_key_values
991
+ )
992
+
993
+
994
+ @add_start_docstrings(
995
+ """
996
+ The GPT-J Model transformer with a sequence classification head on top (linear layer).
997
+
998
+ [`GPTJForSequenceClassification`] uses the last token in order to do the classification, as other causal models
999
+ (e.g. GPT, GPT-2, GPT-Neo) do.
1000
+
1001
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1002
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1003
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1004
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1005
+ each row of the batch).
1006
+ """,
1007
+ GPTJ_START_DOCSTRING,
1008
+ )
1009
+ class GPTJForSequenceClassification(GPTJPreTrainedModel):
1010
+ def __init__(self, config):
1011
+ super().__init__(config)
1012
+ self.num_labels = config.num_labels
1013
+ self.transformer = GPTJModel(config)
1014
+ self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
1015
+
1016
+ # Model parallel
1017
+ self.model_parallel = False
1018
+ self.device_map = None
1019
+
1020
+ # Initialize weights and apply final processing
1021
+ self.post_init()
1022
+
1023
+ @add_start_docstrings_to_model_forward(
1024
+ GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length")
1025
+ )
1026
+ @add_code_sample_docstrings(
1027
+ checkpoint="ydshieh/tiny-random-gptj-for-sequence-classification",
1028
+ output_type=SequenceClassifierOutputWithPast,
1029
+ config_class=_CONFIG_FOR_DOC,
1030
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
1031
+ )
1032
+ def forward(
1033
+ self,
1034
+ input_ids: Optional[torch.LongTensor] = None,
1035
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
1036
+ attention_mask: Optional[torch.FloatTensor] = None,
1037
+ token_type_ids: Optional[torch.LongTensor] = None,
1038
+ position_ids: Optional[torch.LongTensor] = None,
1039
+ head_mask: Optional[torch.FloatTensor] = None,
1040
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1041
+ labels: Optional[torch.LongTensor] = None,
1042
+ use_cache: Optional[bool] = None,
1043
+ output_attentions: Optional[bool] = None,
1044
+ output_hidden_states: Optional[bool] = None,
1045
+ return_dict: Optional[bool] = None,
1046
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1047
+ r"""
1048
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1049
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1050
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1051
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1052
+ """
1053
+ return_dict = (
1054
+ return_dict if return_dict is not None else self.config.use_return_dict
1055
+ )
1056
+
1057
+ transformer_outputs = self.transformer(
1058
+ input_ids,
1059
+ past_key_values=past_key_values,
1060
+ attention_mask=attention_mask,
1061
+ token_type_ids=token_type_ids,
1062
+ position_ids=position_ids,
1063
+ head_mask=head_mask,
1064
+ inputs_embeds=inputs_embeds,
1065
+ use_cache=use_cache,
1066
+ output_attentions=output_attentions,
1067
+ output_hidden_states=output_hidden_states,
1068
+ return_dict=return_dict,
1069
+ )
1070
+ hidden_states = transformer_outputs[0]
1071
+ logits = self.score(hidden_states)
1072
+
1073
+ if input_ids is not None:
1074
+ batch_size = input_ids.shape[0]
1075
+ else:
1076
+ batch_size = inputs_embeds.shape[0]
1077
+
1078
+ if self.config.pad_token_id is None and batch_size != 1:
1079
+ raise ValueError(
1080
+ "Cannot handle batch sizes > 1 if no padding token is defined."
1081
+ )
1082
+ if self.config.pad_token_id is None:
1083
+ sequence_lengths = -1
1084
+ else:
1085
+ if input_ids is not None:
1086
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1087
+ sequence_lengths = (
1088
+ torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1089
+ )
1090
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1091
+ sequence_lengths = sequence_lengths.to(logits.device)
1092
+ else:
1093
+ sequence_lengths = -1
1094
+ logger.warning(
1095
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
1096
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
1097
+ )
1098
+
1099
+ pooled_logits = logits[
1100
+ torch.arange(batch_size, device=logits.device), sequence_lengths
1101
+ ]
1102
+
1103
+ loss = None
1104
+ if labels is not None:
1105
+ labels = labels.to(pooled_logits.device)
1106
+ if self.config.problem_type is None:
1107
+ if self.num_labels == 1:
1108
+ self.config.problem_type = "regression"
1109
+ elif self.num_labels > 1 and (
1110
+ labels.dtype == torch.long or labels.dtype == torch.int
1111
+ ):
1112
+ self.config.problem_type = "single_label_classification"
1113
+ else:
1114
+ self.config.problem_type = "multi_label_classification"
1115
+
1116
+ if self.config.problem_type == "regression":
1117
+ loss_fct = MSELoss()
1118
+ if self.num_labels == 1:
1119
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1120
+ else:
1121
+ loss = loss_fct(pooled_logits, labels)
1122
+ elif self.config.problem_type == "single_label_classification":
1123
+ loss_fct = CrossEntropyLoss()
1124
+ loss = loss_fct(
1125
+ pooled_logits.view(-1, self.num_labels), labels.view(-1)
1126
+ )
1127
+ elif self.config.problem_type == "multi_label_classification":
1128
+ loss_fct = BCEWithLogitsLoss()
1129
+ loss = loss_fct(pooled_logits, labels)
1130
+ if not return_dict:
1131
+ output = (pooled_logits,) + transformer_outputs[1:]
1132
+ return ((loss,) + output) if loss is not None else output
1133
+
1134
+ return SequenceClassifierOutputWithPast(
1135
+ loss=loss,
1136
+ logits=pooled_logits,
1137
+ past_key_values=transformer_outputs.past_key_values,
1138
+ hidden_states=transformer_outputs.hidden_states,
1139
+ attentions=transformer_outputs.attentions,
1140
+ )
1141
+
1142
+
1143
+ @add_start_docstrings(
1144
+ """
1145
+ The GPT-J Model transformer with a span classification head on top for extractive question-answering tasks like
1146
+ SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1147
+ """,
1148
+ GPTJ_START_DOCSTRING,
1149
+ )
1150
+ class GPTJForQuestionAnswering(GPTJPreTrainedModel):
1151
+ def __init__(self, config):
1152
+ super().__init__(config)
1153
+ self.num_labels = config.num_labels
1154
+ self.transformer = GPTJModel(config)
1155
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1156
+
1157
+ # Model parallel
1158
+ self.model_parallel = False
1159
+ self.device_map = None
1160
+
1161
+ # Initialize weights and apply final processing
1162
+ self.post_init()
1163
+
1164
+ @add_start_docstrings_to_model_forward(
1165
+ GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length")
1166
+ )
1167
+ @add_code_sample_docstrings(
1168
+ checkpoint=_CHECKPOINT_FOR_DOC,
1169
+ output_type=QuestionAnsweringModelOutput,
1170
+ config_class=_CONFIG_FOR_DOC,
1171
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
1172
+ )
1173
+ def forward(
1174
+ self,
1175
+ input_ids: Optional[torch.LongTensor] = None,
1176
+ attention_mask: Optional[torch.FloatTensor] = None,
1177
+ token_type_ids: Optional[torch.LongTensor] = None,
1178
+ position_ids: Optional[torch.LongTensor] = None,
1179
+ head_mask: Optional[torch.FloatTensor] = None,
1180
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1181
+ start_positions: Optional[torch.LongTensor] = None,
1182
+ end_positions: Optional[torch.LongTensor] = None,
1183
+ output_attentions: Optional[bool] = None,
1184
+ output_hidden_states: Optional[bool] = None,
1185
+ return_dict: Optional[bool] = None,
1186
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1187
+ r"""
1188
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1189
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1190
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1191
+ are not taken into account for computing the loss.
1192
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1193
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1194
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1195
+ are not taken into account for computing the loss.
1196
+ """
1197
+ return_dict = (
1198
+ return_dict if return_dict is not None else self.config.use_return_dict
1199
+ )
1200
+
1201
+ outputs = self.transformer(
1202
+ input_ids,
1203
+ attention_mask=attention_mask,
1204
+ token_type_ids=token_type_ids,
1205
+ position_ids=position_ids,
1206
+ head_mask=head_mask,
1207
+ inputs_embeds=inputs_embeds,
1208
+ output_attentions=output_attentions,
1209
+ output_hidden_states=output_hidden_states,
1210
+ return_dict=return_dict,
1211
+ )
1212
+
1213
+ sequence_output = outputs[0]
1214
+
1215
+ logits = self.qa_outputs(sequence_output)
1216
+ start_logits, end_logits = logits.split(1, dim=-1)
1217
+ start_logits = start_logits.squeeze(-1).contiguous()
1218
+ end_logits = end_logits.squeeze(-1).contiguous()
1219
+
1220
+ total_loss = None
1221
+ if start_positions is not None and end_positions is not None:
1222
+ # If we are on multi-GPU, split add a dimension
1223
+ if len(start_positions.size()) > 1:
1224
+ start_positions = start_positions.squeeze(-1).to(start_logits.device)
1225
+ if len(end_positions.size()) > 1:
1226
+ end_positions = end_positions.squeeze(-1).to(end_logits.device)
1227
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1228
+ ignored_index = start_logits.size(1)
1229
+ start_positions = start_positions.clamp(0, ignored_index)
1230
+ end_positions = end_positions.clamp(0, ignored_index)
1231
+
1232
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1233
+ start_loss = loss_fct(start_logits, start_positions)
1234
+ end_loss = loss_fct(end_logits, end_positions)
1235
+ total_loss = (start_loss + end_loss) / 2
1236
+
1237
+ if not return_dict:
1238
+ output = (start_logits, end_logits) + outputs[2:]
1239
+ return ((total_loss,) + output) if total_loss is not None else output
1240
+
1241
+ return QuestionAnsweringModelOutput(
1242
+ loss=total_loss,
1243
+ start_logits=start_logits,
1244
+ end_logits=end_logits,
1245
+ hidden_states=outputs.hidden_states,
1246
+ attentions=outputs.attentions,
1247
+ )