philipp-zettl commited on
Commit
4556a29
1 Parent(s): 6089159

Create src/optimization.py

Browse files
Files changed (1) hide show
  1. src/optimization.py +66 -0
src/optimization.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import Counter
2
+ from itertools import chain
3
+ import math
4
+ import torch
5
+ from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
6
+
7
+
8
+ def ngrams(sequence, n):
9
+ return [tuple(sequence[i:i+n]) for i in range(len(sequence)-n+1)]
10
+
11
+ def count_ngrams(sequence, max_n):
12
+ counts = Counter()
13
+ for n in range(1, max_n + 1):
14
+ counts.update(ngrams(sequence, n))
15
+ return counts
16
+
17
+ def self_bleu(outputs):
18
+ smoothing_function = SmoothingFunction().method1
19
+ scores = []
20
+ for i in range(len(outputs)):
21
+ references = outputs[:i] + outputs[i+1:]
22
+ # Avoid calculating BLEU score for empty references
23
+ if references:
24
+ scores.append(sentence_bleu(references, outputs[i], smoothing_function=smoothing_function))
25
+ # If all references are empty, return a default value
26
+ if not scores:
27
+ return 0
28
+ return sum(scores) / len(scores)
29
+
30
+ def dist_n(outputs, n):
31
+ all_ngrams = list(chain(*[ngrams(output, n) for output in outputs]))
32
+ unique_ngrams = set(all_ngrams)
33
+ return len(unique_ngrams) / len(all_ngrams) if all_ngrams else 0
34
+
35
+ def perplexity(model, tokenizer, texts):
36
+ encodings = tokenizer(texts, return_tensors='pt', padding=True, truncation=True)
37
+ max_length = model.config.n_positions
38
+ stride = 512
39
+ lls = []
40
+ for i in range(0, encodings.input_ids.size(1), stride):
41
+ begin_loc = max(i + stride - max_length, 0)
42
+ end_loc = i + stride
43
+ trg_len = end_loc - i
44
+ input_ids = encodings.input_ids[:, begin_loc:end_loc].to(model.device)
45
+ target_ids = input_ids.clone()
46
+ target_ids[:, :-trg_len] = -100
47
+
48
+ with torch.no_grad():
49
+ outputs = model(input_ids, labels=target_ids)
50
+ log_likelihood = outputs.loss * trg_len
51
+ lls.append(log_likelihood)
52
+
53
+ ppl = torch.exp(torch.stack(lls).sum() / end_loc)
54
+ return ppl.item()
55
+
56
+ def js_divergence(p, q):
57
+ def kl_divergence(p, q):
58
+ return sum(p[i] * math.log(p[i] / q[i]) for i in range(len(p)) if p[i] != 0 and q[i] != 0)
59
+
60
+ p_norm = [float(i)/sum(p) for i in p]
61
+ q_norm = [float(i)/sum(q) for i in q]
62
+
63
+ m = [(p_norm[i] + q_norm[i]) / 2 for i in range(len(p_norm))]
64
+
65
+ return (kl_divergence(p_norm, m) + kl_divergence(q_norm, m)) / 2
66
+