jupyterjazz commited on
Commit
c0215f6
1 Parent(s): 8b2ad1e

fix: partition adapter mask when batch size is specified (#41)

Browse files

- fix: partition adapter mask when batch size is specified (e6e3a6f1dcfa01de524099f84cd7a85299175f55)

Files changed (1) hide show
  1. modeling_xlm_roberta.py +6 -3
modeling_xlm_roberta.py CHANGED
@@ -558,15 +558,18 @@ class XLMRobertaModel(XLMRobertaPreTrainedModel):
558
  )
559
  else:
560
  range_iter = range(0, len(sentences), batch_size)
561
- lora_arguments = (
562
- {"adapter_mask": adapter_mask} if adapter_mask is not None else {}
563
- )
564
  for i in range_iter:
565
  encoded_input = self.tokenizer(
566
  sentences[i : i + batch_size],
567
  return_tensors="pt",
568
  **tokenizer_kwargs,
569
  ).to(self.device)
 
 
 
 
 
570
  token_embs = self.forward(**encoded_input, **lora_arguments)[0]
571
 
572
  # Accumulate in fp32 to avoid overflow
 
558
  )
559
  else:
560
  range_iter = range(0, len(sentences), batch_size)
561
+
 
 
562
  for i in range_iter:
563
  encoded_input = self.tokenizer(
564
  sentences[i : i + batch_size],
565
  return_tensors="pt",
566
  **tokenizer_kwargs,
567
  ).to(self.device)
568
+ lora_arguments = (
569
+ {"adapter_mask": adapter_mask[i : i + batch_size]}
570
+ if adapter_mask is not None
571
+ else {}
572
+ )
573
  token_embs = self.forward(**encoded_input, **lora_arguments)[0]
574
 
575
  # Accumulate in fp32 to avoid overflow