Hiveurban commited on
Commit
c8c8b02
•
1 Parent(s): a6633be

Upload BertForPrefixMarking.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. BertForPrefixMarking.py +248 -0
BertForPrefixMarking.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers.utils import ModelOutput
2
+ import torch
3
+ from torch import nn
4
+ from typing import Dict, List, Tuple, Optional
5
+ from dataclasses import dataclass
6
+ from transformers import BertPreTrainedModel, BertModel, BertTokenizerFast
7
+
8
+ # define the classes, and the possible prefixes for each class
9
+ POSSIBLE_PREFIX_CLASSES = [ ['לכש', 'כש', 'מש', 'בש', 'לש'], ['מ'], ['ש'], ['ה'], ['ו'], ['כ'], ['ל'], ['ב'] ]
10
+ # map each individual prefix to it's class number
11
+ PREFIXES_TO_CLASS = {w:i for i,l in enumerate(POSSIBLE_PREFIX_CLASSES) for w in l}
12
+ # keep a list of all the prefixes, sorted by length, so that we can decompose
13
+ # a given prefixes and figure out the classes
14
+ ALL_PREFIX_ITEMS = list(sorted(PREFIXES_TO_CLASS.keys(), key=len, reverse=True))
15
+ TOTAL_POSSIBLE_PREFIX_CLASSES = len(POSSIBLE_PREFIX_CLASSES)
16
+
17
+ def get_prefixes_from_str(s, greedy=False):
18
+ # keep trimming prefixes from the string
19
+ while len(s) > 0 and s[0] in PREFIXES_TO_CLASS:
20
+ # find the longest string to trim
21
+ next_pre = next((pre for pre in ALL_PREFIX_ITEMS if s.startswith(pre)), None)
22
+ if next_pre is None:
23
+ return
24
+ yield next_pre
25
+ # if the chosen prefix is more than one letter, there is always an option that the
26
+ # prefix is actually just the first letter of the prefix - so offer that up as a valid prefix
27
+ # as well. We will still jump to the length of the longer one, since if the next two/three
28
+ # letters are a prefix, they have to be the longest one
29
+ if not greedy and len(next_pre) > 1:
30
+ yield next_pre[0]
31
+ s = s[len(next_pre):]
32
+
33
+ def get_prefix_classes_from_str(s, greedy=False):
34
+ for pre in get_prefixes_from_str(s, greedy):
35
+ yield PREFIXES_TO_CLASS[pre]
36
+
37
+ @dataclass
38
+ class PrefixesClassifiersOutput(ModelOutput):
39
+ loss: Optional[torch.FloatTensor] = None
40
+ logits: Optional[torch.FloatTensor] = None
41
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
42
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
43
+
44
+ class BertPrefixMarkingHead(nn.Module):
45
+ def __init__(self, config) -> None:
46
+ super().__init__()
47
+ self.config = config
48
+
49
+ # an embedding table containing an embedding for each prefix class + 1 for NONE
50
+ # we will concatenate either the embedding/NONE for each class - and we want the concatenate
51
+ # size to be the hidden_size
52
+ prefix_class_embed = config.hidden_size // TOTAL_POSSIBLE_PREFIX_CLASSES
53
+ self.prefix_class_embeddings = nn.Embedding(TOTAL_POSSIBLE_PREFIX_CLASSES + 1, prefix_class_embed)
54
+
55
+ # one layer for transformation, apply an activation, then another N classifiers for each prefix class
56
+ self.transform = nn.Linear(config.hidden_size + prefix_class_embed * TOTAL_POSSIBLE_PREFIX_CLASSES, config.hidden_size)
57
+ self.activation = nn.Tanh()
58
+ self.classifiers = nn.ModuleList([nn.Linear(config.hidden_size, 2) for _ in range(TOTAL_POSSIBLE_PREFIX_CLASSES)])
59
+
60
+ def forward(
61
+ self,
62
+ hidden_states: torch.Tensor,
63
+ prefix_class_id_options: torch.Tensor,
64
+ labels: Optional[torch.Tensor] = None) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
65
+
66
+ # encode the prefix_class_id_options
67
+ # If input_ids is batch x seq_len
68
+ # Then sequence_output is batch x seq_len x hidden_dim
69
+ # So prefix_class_id_options is batch x seq_len x TOTAL_POSSIBLE_PREFIX_CLASSES
70
+ # Looking up the embeddings should give us batch x seq_len x TOTAL_POSSIBLE_PREFIX_CLASSES x hidden_dim / N
71
+ possible_class_embed = self.prefix_class_embeddings(prefix_class_id_options)
72
+ # then flatten the final dimension - now we have batch x seq_len x hidden_dim_2
73
+ possible_class_embed = possible_class_embed.reshape(possible_class_embed.shape[:-2] + (-1,))
74
+
75
+ # concatenate the new class embed into the sequence output before the transform
76
+ pre_transform_output = torch.cat((hidden_states, possible_class_embed), dim=-1) # batch x seq_len x (hidden_dim + hidden_dim_2)
77
+ pre_logits_output = self.activation(self.transform(pre_transform_output))# batch x seq_len x hidden_dim
78
+
79
+ # run each of the classifiers on the transformed output
80
+ logits = torch.cat([cls(pre_logits_output).unsqueeze(-2) for cls in self.classifiers], dim=-2)
81
+
82
+ loss = None
83
+ if labels is not None:
84
+ loss_fct = nn.CrossEntropyLoss()
85
+ loss = loss_fct(logits.view(-1, 2), labels.view(-1))
86
+
87
+ return (loss, logits)
88
+
89
+
90
+
91
+ class BertForPrefixMarking(BertPreTrainedModel):
92
+
93
+ def __init__(self, config):
94
+ super().__init__(config)
95
+
96
+ self.bert = BertModel(config, add_pooling_layer=False)
97
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
98
+ self.prefix = BertPrefixMarkingHead(config)
99
+
100
+ # Initialize weights and apply final processing
101
+ self.post_init()
102
+
103
+ def forward(
104
+ self,
105
+ input_ids: Optional[torch.Tensor] = None,
106
+ attention_mask: Optional[torch.Tensor] = None,
107
+ token_type_ids: Optional[torch.Tensor] = None,
108
+ prefix_class_id_options: Optional[torch.Tensor] = None,
109
+ position_ids: Optional[torch.Tensor] = None,
110
+ labels: Optional[torch.Tensor] = None,
111
+ head_mask: Optional[torch.Tensor] = None,
112
+ inputs_embeds: Optional[torch.Tensor] = None,
113
+ output_attentions: Optional[bool] = None,
114
+ output_hidden_states: Optional[bool] = None,
115
+ return_dict: Optional[bool] = None,
116
+ ):
117
+ r"""
118
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
119
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
120
+ """
121
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
122
+
123
+ bert_outputs = self.bert(
124
+ input_ids,
125
+ attention_mask=attention_mask,
126
+ token_type_ids=token_type_ids,
127
+ position_ids=position_ids,
128
+ head_mask=head_mask,
129
+ inputs_embeds=inputs_embeds,
130
+ output_attentions=output_attentions,
131
+ output_hidden_states=output_hidden_states,
132
+ return_dict=return_dict,
133
+ )
134
+
135
+ hidden_states = bert_outputs[0]
136
+ hidden_states = self.dropout(hidden_states)
137
+
138
+ loss, logits = self.prefix.forward(hidden_states, prefix_class_id_options, labels)
139
+ if not return_dict:
140
+ return (loss,logits,) + bert_outputs[2:]
141
+
142
+ return PrefixesClassifiersOutput(
143
+ loss=loss,
144
+ logits=logits,
145
+ hidden_states=bert_outputs.hidden_states,
146
+ attentions=bert_outputs.attentions,
147
+ )
148
+
149
+ def predict(self, sentences: List[str], tokenizer: BertTokenizerFast, padding='longest'):
150
+ # step 1: encode the sentences through using the tokenizer, and get the input tensors + prefix id tensors
151
+ inputs = encode_sentences_for_bert_for_prefix_marking(tokenizer, sentences, padding)
152
+ inputs.pop('offset_mapping')
153
+ inputs = {k:v.to(self.device) for k,v in inputs.items()}
154
+
155
+ # run through bert
156
+ logits = self.forward(**inputs, return_dict=True).logits
157
+ return parse_logits(inputs['input_ids'].tolist(), sentences, tokenizer, logits)
158
+
159
+ def parse_logits(input_ids: List[List[int]], sentences: List[str], tokenizer: BertTokenizerFast, logits: torch.FloatTensor):
160
+ # extract the predictions by argmaxing the final dimension (batch x sequence x prefixes x prediction)
161
+ logit_preds = torch.argmax(logits, axis=3).tolist()
162
+
163
+ ret = []
164
+
165
+ for sent_idx,sent_ids in enumerate(input_ids):
166
+ tokens = tokenizer.convert_ids_to_tokens(sent_ids)
167
+ ret.append([])
168
+ for tok_idx,token in enumerate(tokens):
169
+ # If we've reached the pad token, then we are at the end
170
+ if token == tokenizer.pad_token: continue
171
+ if token.startswith('##'): continue
172
+
173
+ # combine the next tokens in? only if it's a breakup
174
+ next_tok_idx = tok_idx + 1
175
+ while next_tok_idx < len(tokens) and tokens[next_tok_idx].startswith('##'):
176
+ token += tokens[next_tok_idx][2:]
177
+ next_tok_idx += 1
178
+
179
+ prefix_len = get_predicted_prefix_len_from_logits(token, logit_preds[sent_idx][tok_idx])
180
+
181
+ if not prefix_len:
182
+ ret[-1].append([token])
183
+ else:
184
+ ret[-1].append([token[:prefix_len], token[prefix_len:]])
185
+ return ret
186
+
187
+ def encode_sentences_for_bert_for_prefix_marking(tokenizer: BertTokenizerFast, sentences: List[str], padding='longest', truncation=True):
188
+ inputs = tokenizer(sentences, padding=padding, truncation=truncation, return_offsets_mapping=True, return_tensors='pt')
189
+ # create our prefix_id_options array which will be like the input ids shape but with an addtional
190
+ # dimension containing for each prefix whether it can be for that word
191
+ prefix_id_options = torch.full(inputs['input_ids'].shape + (TOTAL_POSSIBLE_PREFIX_CLASSES,), TOTAL_POSSIBLE_PREFIX_CLASSES, dtype=torch.long)
192
+
193
+ # go through each token, and fill in the vector accordingly
194
+ for sent_idx, sent_ids in enumerate(inputs['input_ids']):
195
+ tokens = tokenizer.convert_ids_to_tokens(sent_ids)
196
+ for tok_idx, token in enumerate(tokens):
197
+ # if the first letter isn't a valid prefix letter, nothing to talk about
198
+ if len(token) < 2 or not token[0] in PREFIXES_TO_CLASS: continue
199
+
200
+ # combine the next tokens in? only if it's a breakup
201
+ next_tok_idx = tok_idx + 1
202
+ while next_tok_idx < len(tokens) and tokens[next_tok_idx].startswith('##'):
203
+ token += tokens[next_tok_idx][2:]
204
+ next_tok_idx += 1
205
+
206
+ # find all the possible prefixes - and mark them as 0 (and in the possible mark it as it's value for embed lookup)
207
+ for pre_class in get_prefix_classes_from_str(token):
208
+ prefix_id_options[sent_idx, tok_idx, pre_class] = pre_class
209
+
210
+ inputs['prefix_class_id_options'] = prefix_id_options
211
+ return inputs
212
+
213
+ def get_predicted_prefix_len_from_logits(token, token_logits):
214
+ # Go through each possible prefix, and check if the prefix is yes - and if
215
+ # so increase the counter of the matched length, otherwise break out. That will solve cases
216
+ # of predicting prefix combinations that don't exist on the word.
217
+ # For example, if we have the word ושכשהלכתי and the model predict ו & כש, then we will only
218
+ # take the vuv because in order to get the כש we need the ש as well.
219
+ # Two extra items:
220
+ # 1] Don't allow the same prefix multiple times
221
+ # 2] Always check that the word starts with that prefix - otherwise it's bad
222
+ # (except for the case of multi-letter prefix, where we force the next to be last)
223
+ cur_len, skip_next, last_check, seen_prefixes = 0, False, False, set()
224
+ for prefix in get_prefixes_from_str(token):
225
+ # Are we skipping this prefix? This will be the case where we matched כש, don't allow ש
226
+ if skip_next:
227
+ skip_next = False
228
+ continue
229
+ # check for duplicate prefixes, we don't allow two of the same prefix
230
+ # if it predicted two of the same, then we will break out
231
+ if prefix in seen_prefixes: break
232
+ seen_prefixes.add(prefix)
233
+
234
+ # check if we predicted this prefix
235
+ if token_logits[PREFIXES_TO_CLASS[prefix]]:
236
+ cur_len += len(prefix)
237
+ if last_check: break
238
+ skip_next = len(prefix) > 1
239
+ # Otherwise, we predicted no. If we didn't, then this is the end of the prefix
240
+ # and time to break out. *Except* if it's a multi letter prefix, then we allow
241
+ # just the next letter - e.g., if כש doesn't match, then we allow כ, but then we know
242
+ # the word continues with a ש, and if it's not כש, then it's not כ-ש- (invalid)
243
+ elif len(prefix) > 1:
244
+ last_check = True
245
+ else:
246
+ break
247
+
248
+ return cur_len