unsubscribe commited on
Commit
cc06cac
1 Parent(s): a4eded4
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +604 -0
  2. model_repository/postprocessing +1 -0
  3. model_repository/preprocessing +1 -0
  4. model_repository/turbomind +1 -0
  5. service_docker_up.sh +87 -0
  6. triton_models/interactive/1/placeholder +0 -0
  7. triton_models/interactive/1/weights +1 -0
  8. triton_models/interactive/config.pbtxt +281 -0
  9. triton_models/postprocessing/1/model.py +129 -0
  10. triton_models/postprocessing/1/tokenizer +1 -0
  11. triton_models/postprocessing/config.pbtxt +36 -0
  12. triton_models/preprocessing/1/model.py +151 -0
  13. triton_models/preprocessing/1/tokenizer +1 -0
  14. triton_models/preprocessing/config.pbtxt +74 -0
  15. triton_models/tokenizer/config.json +33 -0
  16. triton_models/tokenizer/configuration_internlm.py +120 -0
  17. triton_models/tokenizer/generation_config.json +6 -0
  18. triton_models/tokenizer/modeling_internlm.py +998 -0
  19. triton_models/tokenizer/placeholder +0 -0
  20. triton_models/tokenizer/pytorch_model.bin.index.json +0 -0
  21. triton_models/tokenizer/special_tokens_map.json +6 -0
  22. triton_models/tokenizer/tokenization_internlm.py +242 -0
  23. triton_models/tokenizer/tokenizer.model +3 -0
  24. triton_models/tokenizer/tokenizer.py +291 -0
  25. triton_models/tokenizer/tokenizer_config.json +15 -0
  26. triton_models/weights/config.ini +3 -0
  27. triton_models/weights/layers.0.attention.w_qkv.0.qweight +3 -0
  28. triton_models/weights/layers.0.attention.w_qkv.0.scales_zeros +3 -0
  29. triton_models/weights/layers.0.attention.wo.0.qweight +3 -0
  30. triton_models/weights/layers.0.attention.wo.0.scales_zeros +3 -0
  31. triton_models/weights/layers.0.attention_norm.weight +3 -0
  32. triton_models/weights/layers.0.feed_forward.w13.0.qweight +3 -0
  33. triton_models/weights/layers.0.feed_forward.w13.0.scales_zeros +3 -0
  34. triton_models/weights/layers.0.feed_forward.w2.0.qweight +3 -0
  35. triton_models/weights/layers.0.feed_forward.w2.0.scales_zeros +3 -0
  36. triton_models/weights/layers.0.ffn_norm.weight +3 -0
  37. triton_models/weights/layers.1.attention.w_qkv.0.qweight +3 -0
  38. triton_models/weights/layers.1.attention.w_qkv.0.scales_zeros +3 -0
  39. triton_models/weights/layers.1.attention.wo.0.qweight +3 -0
  40. triton_models/weights/layers.1.attention.wo.0.scales_zeros +3 -0
  41. triton_models/weights/layers.1.attention_norm.weight +3 -0
  42. triton_models/weights/layers.1.feed_forward.w13.0.qweight +3 -0
  43. triton_models/weights/layers.1.feed_forward.w13.0.scales_zeros +3 -0
  44. triton_models/weights/layers.1.feed_forward.w2.0.qweight +3 -0
  45. triton_models/weights/layers.1.feed_forward.w2.0.scales_zeros +3 -0
  46. triton_models/weights/layers.1.ffn_norm.weight +3 -0
  47. triton_models/weights/layers.10.attention.w_qkv.0.qweight +3 -0
  48. triton_models/weights/layers.10.attention.w_qkv.0.scales_zeros +3 -0
  49. triton_models/weights/layers.10.attention.wo.0.qweight +3 -0
  50. triton_models/weights/layers.10.attention.wo.0.scales_zeros +3 -0
.gitattributes CHANGED
@@ -33,3 +33,607 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ triton_models/weights/layers.32.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
37
+ triton_models/weights/layers.39.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
38
+ triton_models/weights/layers.8.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
39
+ triton_models/weights/layers.17.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
40
+ triton_models/weights/layers.30.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
41
+ triton_models/weights/layers.37.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
42
+ triton_models/weights/layers.37.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
43
+ triton_models/weights/layers.40.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
44
+ triton_models/weights/layers.6.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
45
+ triton_models/weights/layers.18.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
46
+ triton_models/weights/layers.25.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
47
+ triton_models/weights/layers.44.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
48
+ triton_models/weights/layers.8.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
49
+ triton_models/weights/layers.23.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
50
+ triton_models/weights/layers.42.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
51
+ triton_models/weights/layers.44.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
52
+ triton_models/weights/layers.47.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
53
+ triton_models/weights/layers.17.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
54
+ triton_models/weights/layers.24.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
55
+ triton_models/weights/layers.25.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
56
+ triton_models/weights/layers.35.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
57
+ triton_models/weights/layers.3.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
58
+ triton_models/weights/layers.5.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
59
+ triton_models/weights/layers.16.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
60
+ triton_models/weights/layers.40.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
61
+ triton_models/weights/layers.45.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
62
+ triton_models/weights/layers.48.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
63
+ triton_models/weights/layers.33.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
64
+ triton_models/weights/layers.41.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
65
+ triton_models/weights/layers.42.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
66
+ triton_models/weights/layers.45.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
67
+ triton_models/weights/layers.53.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
68
+ triton_models/weights/layers.27.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
69
+ triton_models/weights/layers.16.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
70
+ triton_models/weights/layers.21.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
71
+ triton_models/weights/layers.23.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
72
+ triton_models/weights/layers.29.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
73
+ triton_models/weights/layers.44.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
74
+ triton_models/weights/layers.51.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
75
+ triton_models/weights/layers.6.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
76
+ triton_models/weights/layers.14.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
77
+ triton_models/weights/layers.19.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
78
+ triton_models/weights/layers.20.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
79
+ triton_models/weights/layers.23.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
80
+ triton_models/weights/layers.28.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
81
+ triton_models/weights/layers.2.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
82
+ triton_models/weights/layers.54.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
83
+ triton_models/weights/layers.56.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
84
+ triton_models/weights/layers.15.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
85
+ triton_models/weights/layers.25.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
86
+ triton_models/weights/layers.2.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
87
+ triton_models/weights/layers.30.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
88
+ triton_models/weights/layers.46.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
89
+ triton_models/weights/layers.47.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
90
+ triton_models/weights/layers.54.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
91
+ triton_models/weights/layers.10.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
92
+ triton_models/weights/layers.34.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
93
+ triton_models/weights/layers.3.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
94
+ triton_models/weights/layers.0.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
95
+ triton_models/weights/layers.3.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
96
+ triton_models/weights/layers.48.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
97
+ triton_models/weights/layers.48.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
98
+ triton_models/weights/layers.48.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
99
+ triton_models/weights/layers.24.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
100
+ triton_models/weights/layers.27.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
101
+ triton_models/weights/layers.36.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
102
+ triton_models/weights/layers.49.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
103
+ triton_models/weights/layers.54.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
104
+ triton_models/weights/layers.13.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
105
+ triton_models/weights/layers.18.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
106
+ triton_models/weights/layers.41.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
107
+ triton_models/weights/layers.18.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
108
+ triton_models/weights/layers.58.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
109
+ triton_models/weights/layers.13.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
110
+ triton_models/weights/layers.42.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
111
+ triton_models/weights/layers.46.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
112
+ triton_models/weights/layers.50.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
113
+ triton_models/weights/layers.20.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
114
+ triton_models/weights/layers.14.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
115
+ triton_models/weights/layers.40.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
116
+ triton_models/weights/layers.42.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
117
+ triton_models/weights/layers.50.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
118
+ triton_models/weights/layers.51.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
119
+ triton_models/weights/layers.5.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
120
+ triton_models/weights/layers.11.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
121
+ triton_models/weights/layers.8.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
122
+ triton_models/weights/norm.weight filter=lfs diff=lfs merge=lfs -text
123
+ triton_models/weights/layers.52.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
124
+ triton_models/weights/layers.17.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
125
+ triton_models/weights/layers.23.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
126
+ triton_models/weights/layers.26.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
127
+ triton_models/weights/layers.55.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
128
+ triton_models/weights/layers.8.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
129
+ triton_models/weights/layers.11.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
130
+ triton_models/weights/layers.20.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
131
+ triton_models/weights/layers.43.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
132
+ triton_models/weights/layers.7.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
133
+ triton_models/weights/layers.7.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
134
+ triton_models/weights/layers.13.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
135
+ triton_models/weights/layers.21.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
136
+ triton_models/weights/layers.32.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
137
+ triton_models/weights/layers.39.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
138
+ triton_models/weights/layers.44.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
139
+ triton_models/weights/layers.52.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
140
+ triton_models/weights/layers.57.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
141
+ triton_models/weights/layers.13.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
142
+ triton_models/weights/layers.28.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
143
+ triton_models/weights/layers.2.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
144
+ triton_models/weights/layers.7.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
145
+ triton_models/weights/layers.22.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
146
+ triton_models/weights/layers.35.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
147
+ triton_models/weights/layers.3.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
148
+ triton_models/weights/layers.45.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
149
+ triton_models/weights/layers.4.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
150
+ triton_models/weights/tok_embeddings.weight filter=lfs diff=lfs merge=lfs -text
151
+ triton_models/weights/layers.32.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
152
+ triton_models/weights/layers.59.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
153
+ triton_models/weights/layers.7.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
154
+ triton_models/weights/layers.55.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
155
+ triton_models/weights/layers.23.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
156
+ triton_models/weights/layers.56.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
157
+ triton_models/weights/layers.1.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
158
+ triton_models/weights/layers.39.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
159
+ triton_models/weights/layers.48.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
160
+ triton_models/weights/layers.52.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
161
+ triton_models/weights/layers.19.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
162
+ triton_models/weights/layers.46.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
163
+ triton_models/weights/layers.26.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
164
+ triton_models/weights/layers.29.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
165
+ triton_models/weights/layers.3.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
166
+ triton_models/weights/layers.53.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
167
+ triton_models/weights/layers.57.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
168
+ triton_models/weights/layers.20.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
169
+ triton_models/weights/layers.3.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
170
+ triton_models/weights/layers.44.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
171
+ triton_models/weights/layers.36.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
172
+ triton_models/weights/layers.21.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
173
+ triton_models/weights/layers.24.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
174
+ triton_models/weights/layers.31.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
175
+ triton_models/weights/layers.35.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
176
+ triton_models/weights/layers.51.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
177
+ triton_models/weights/layers.53.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
178
+ triton_models/weights/layers.59.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
179
+ triton_models/weights/layers.14.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
180
+ triton_models/weights/layers.9.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
181
+ triton_models/weights/layers.21.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
182
+ triton_models/weights/layers.39.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
183
+ triton_models/weights/layers.43.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
184
+ triton_models/weights/layers.48.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
185
+ triton_models/weights/layers.4.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
186
+ triton_models/weights/layers.57.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
187
+ triton_models/weights/layers.19.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
188
+ triton_models/weights/layers.27.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
189
+ triton_models/weights/layers.30.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
190
+ triton_models/weights/layers.34.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
191
+ triton_models/weights/layers.58.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
192
+ triton_models/weights/layers.24.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
193
+ triton_models/weights/layers.46.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
194
+ triton_models/weights/layers.47.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
195
+ triton_models/weights/layers.53.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
196
+ triton_models/weights/layers.7.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
197
+ triton_models/weights/layers.22.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
198
+ triton_models/weights/layers.16.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
199
+ triton_models/weights/layers.23.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
200
+ triton_models/weights/layers.41.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
201
+ triton_models/weights/layers.6.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
202
+ triton_models/weights/layers.8.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
203
+ triton_models/weights/layers.16.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
204
+ triton_models/weights/layers.28.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
205
+ triton_models/weights/layers.16.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
206
+ triton_models/weights/layers.43.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
207
+ triton_models/weights/layers.1.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
208
+ triton_models/weights/layers.26.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
209
+ triton_models/weights/layers.31.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
210
+ triton_models/weights/layers.55.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
211
+ triton_models/weights/layers.6.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
212
+ triton_models/weights/layers.18.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
213
+ triton_models/weights/layers.47.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
214
+ triton_models/weights/layers.4.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
215
+ triton_models/weights/layers.50.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
216
+ triton_models/weights/layers.15.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
217
+ triton_models/weights/layers.2.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
218
+ triton_models/weights/layers.40.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
219
+ triton_models/weights/layers.45.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
220
+ triton_models/weights/layers.7.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
221
+ triton_models/weights/layers.18.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
222
+ triton_models/weights/layers.31.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
223
+ triton_models/weights/layers.40.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
224
+ triton_models/weights/layers.19.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
225
+ triton_models/weights/layers.27.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
226
+ triton_models/weights/layers.47.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
227
+ triton_models/weights/layers.51.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
228
+ triton_models/weights/layers.59.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
229
+ triton_models/weights/layers.21.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
230
+ triton_models/weights/layers.41.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
231
+ triton_models/weights/layers.34.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
232
+ triton_models/weights/layers.44.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
233
+ triton_models/weights/layers.13.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
234
+ triton_models/weights/layers.38.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
235
+ triton_models/weights/layers.40.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
236
+ triton_models/weights/layers.50.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
237
+ triton_models/weights/layers.56.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
238
+ triton_models/weights/layers.37.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
239
+ triton_models/weights/layers.47.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
240
+ triton_models/weights/layers.49.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
241
+ triton_models/weights/layers.51.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
242
+ triton_models/weights/layers.53.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
243
+ triton_models/weights/layers.55.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
244
+ triton_models/weights/layers.38.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
245
+ triton_models/weights/layers.17.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
246
+ triton_models/weights/layers.53.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
247
+ triton_models/weights/layers.11.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
248
+ triton_models/weights/layers.26.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
249
+ triton_models/weights/layers.33.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
250
+ triton_models/weights/layers.47.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
251
+ triton_models/weights/layers.47.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
252
+ triton_models/weights/layers.26.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
253
+ triton_models/weights/layers.14.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
254
+ triton_models/weights/layers.27.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
255
+ triton_models/weights/layers.36.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
256
+ triton_models/weights/layers.12.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
257
+ triton_models/weights/layers.20.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
258
+ triton_models/weights/layers.21.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
259
+ triton_models/weights/layers.41.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
260
+ triton_models/weights/layers.41.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
261
+ triton_models/weights/layers.43.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
262
+ triton_models/weights/layers.45.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
263
+ triton_models/weights/layers.58.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
264
+ triton_models/weights/layers.14.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
265
+ triton_models/weights/layers.20.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
266
+ triton_models/weights/layers.38.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
267
+ triton_models/weights/layers.41.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
268
+ triton_models/weights/layers.49.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
269
+ triton_models/weights/layers.7.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
270
+ triton_models/weights/layers.9.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
271
+ triton_models/weights/layers.0.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
272
+ triton_models/weights/layers.42.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
273
+ triton_models/weights/layers.0.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
274
+ triton_models/weights/layers.18.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
275
+ triton_models/weights/layers.29.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
276
+ triton_models/weights/layers.34.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
277
+ triton_models/weights/layers.39.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
278
+ triton_models/weights/config.ini filter=lfs diff=lfs merge=lfs -text
279
+ triton_models/weights/layers.46.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
280
+ triton_models/weights/layers.9.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
281
+ triton_models/weights/layers.3.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
282
+ triton_models/weights/layers.18.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
283
+ triton_models/weights/layers.22.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
284
+ triton_models/weights/layers.22.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
285
+ triton_models/weights/layers.34.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
286
+ triton_models/weights/layers.49.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
287
+ triton_models/weights/layers.53.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
288
+ triton_models/weights/layers.5.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
289
+ triton_models/weights/layers.0.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
290
+ triton_models/weights/layers.9.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
291
+ triton_models/weights/layers.22.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
292
+ triton_models/weights/layers.4.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
293
+ triton_models/weights/layers.10.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
294
+ triton_models/weights/layers.35.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
295
+ triton_models/weights/layers.39.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
296
+ triton_models/weights/layers.46.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
297
+ triton_models/weights/layers.57.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
298
+ triton_models/weights/layers.6.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
299
+ triton_models/weights/layers.22.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
300
+ triton_models/weights/layers.10.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
301
+ triton_models/weights/layers.36.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
302
+ triton_models/weights/layers.37.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
303
+ triton_models/weights/layers.38.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
304
+ triton_models/weights/layers.41.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
305
+ triton_models/weights/layers.10.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
306
+ triton_models/weights/layers.13.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
307
+ triton_models/weights/layers.25.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
308
+ triton_models/weights/layers.33.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
309
+ triton_models/weights/layers.36.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
310
+ triton_models/weights/layers.51.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
311
+ triton_models/weights/layers.56.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
312
+ triton_models/weights/layers.0.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
313
+ triton_models/weights/layers.15.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
314
+ triton_models/weights/layers.29.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
315
+ triton_models/weights/layers.48.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
316
+ triton_models/weights/layers.6.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
317
+ triton_models/weights/layers.8.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
318
+ triton_models/weights/layers.11.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
319
+ triton_models/weights/layers.12.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
320
+ triton_models/weights/layers.0.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
321
+ triton_models/weights/layers.38.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
322
+ triton_models/weights/layers.37.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
323
+ triton_models/weights/layers.49.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
324
+ triton_models/weights/layers.55.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
325
+ triton_models/weights/layers.5.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
326
+ triton_models/weights/layers.40.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
327
+ triton_models/weights/layers.43.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
328
+ triton_models/weights/layers.4.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
329
+ triton_models/weights/layers.58.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
330
+ triton_models/weights/layers.8.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
331
+ triton_models/weights/layers.15.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
332
+ triton_models/weights/layers.56.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
333
+ triton_models/weights/layers.42.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
334
+ triton_models/weights/layers.14.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
335
+ triton_models/weights/layers.27.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
336
+ triton_models/weights/layers.28.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
337
+ triton_models/weights/layers.28.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
338
+ triton_models/weights/layers.55.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
339
+ triton_models/weights/layers.7.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
340
+ triton_models/weights/layers.12.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
341
+ triton_models/weights/layers.21.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
342
+ triton_models/weights/layers.31.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
343
+ triton_models/weights/layers.10.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
344
+ triton_models/weights/layers.10.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
345
+ triton_models/weights/layers.11.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
346
+ triton_models/weights/layers.27.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
347
+ triton_models/weights/layers.29.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
348
+ triton_models/weights/layers.0.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
349
+ triton_models/weights/layers.15.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
350
+ triton_models/weights/layers.17.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
351
+ triton_models/weights/layers.32.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
352
+ triton_models/weights/layers.33.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
353
+ triton_models/weights/layers.0.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
354
+ triton_models/weights/layers.1.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
355
+ triton_models/weights/layers.30.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
356
+ triton_models/weights/layers.35.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
357
+ triton_models/weights/layers.54.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
358
+ triton_models/weights/layers.57.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
359
+ triton_models/weights/layers.9.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
360
+ triton_models/weights/layers.15.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
361
+ triton_models/weights/layers.33.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
362
+ triton_models/weights/layers.58.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
363
+ triton_models/weights/layers.32.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
364
+ triton_models/weights/layers.54.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
365
+ triton_models/weights/layers.35.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
366
+ triton_models/weights/layers.24.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
367
+ triton_models/weights/layers.28.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
368
+ triton_models/weights/layers.2.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
369
+ triton_models/weights/layers.56.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
370
+ triton_models/weights/layers.6.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
371
+ triton_models/weights/layers.19.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
372
+ triton_models/weights/layers.23.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
373
+ triton_models/weights/layers.31.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
374
+ triton_models/weights/layers.59.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
375
+ triton_models/weights/layers.9.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
376
+ triton_models/weights/layers.13.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
377
+ triton_models/weights/layers.2.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
378
+ triton_models/weights/layers.30.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
379
+ triton_models/weights/layers.32.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
380
+ triton_models/weights/layers.42.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
381
+ triton_models/weights/layers.58.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
382
+ triton_models/weights/layers.21.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
383
+ triton_models/weights/layers.36.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
384
+ triton_models/weights/layers.44.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
385
+ triton_models/weights/layers.2.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
386
+ triton_models/weights/layers.33.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
387
+ triton_models/weights/layers.3.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
388
+ triton_models/weights/layers.49.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
389
+ triton_models/weights/layers.57.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
390
+ triton_models/weights/layers.9.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
391
+ triton_models/weights/layers.11.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
392
+ triton_models/weights/layers.33.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
393
+ triton_models/weights/layers.36.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
394
+ triton_models/weights/layers.43.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
395
+ triton_models/weights/layers.43.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
396
+ triton_models/weights/layers.45.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
397
+ triton_models/weights/layers.50.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
398
+ triton_models/weights/layers.53.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
399
+ triton_models/weights/layers.14.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
400
+ triton_models/weights/layers.8.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
401
+ triton_models/weights/layers.34.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
402
+ triton_models/weights/layers.42.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
403
+ triton_models/weights/layers.49.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
404
+ triton_models/weights/layers.51.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
405
+ triton_models/weights/layers.10.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
406
+ triton_models/weights/layers.25.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
407
+ triton_models/weights/layers.38.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
408
+ triton_models/weights/layers.39.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
409
+ triton_models/weights/layers.3.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
410
+ triton_models/weights/layers.6.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
411
+ triton_models/weights/layers.9.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
412
+ triton_models/weights/layers.24.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
413
+ triton_models/weights/layers.19.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
414
+ triton_models/weights/layers.1.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
415
+ triton_models/weights/layers.24.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
416
+ triton_models/weights/layers.29.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
417
+ triton_models/weights/layers.32.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
418
+ triton_models/weights/layers.57.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
419
+ triton_models/weights/layers.15.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
420
+ triton_models/weights/layers.29.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
421
+ triton_models/weights/layers.51.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
422
+ triton_models/weights/layers.7.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
423
+ triton_models/weights/layers.26.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
424
+ triton_models/weights/layers.52.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
425
+ triton_models/weights/layers.44.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
426
+ triton_models/weights/layers.48.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
427
+ triton_models/weights/layers.55.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
428
+ triton_models/weights/layers.59.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
429
+ triton_models/weights/layers.22.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
430
+ triton_models/weights/layers.2.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
431
+ triton_models/weights/layers.35.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
432
+ triton_models/weights/layers.36.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
433
+ triton_models/weights/layers.55.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
434
+ triton_models/weights/layers.26.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
435
+ triton_models/weights/layers.30.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
436
+ triton_models/weights/layers.31.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
437
+ triton_models/weights/layers.42.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
438
+ triton_models/weights/layers.46.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
439
+ triton_models/weights/layers.50.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
440
+ triton_models/weights/layers.28.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
441
+ triton_models/weights/layers.37.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
442
+ triton_models/weights/layers.51.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
443
+ triton_models/weights/layers.16.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
444
+ triton_models/weights/layers.14.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
445
+ triton_models/weights/layers.19.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
446
+ triton_models/weights/layers.37.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
447
+ triton_models/weights/layers.4.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
448
+ triton_models/weights/layers.58.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
449
+ triton_models/weights/layers.12.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
450
+ triton_models/weights/layers.34.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
451
+ triton_models/weights/layers.44.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
452
+ triton_models/weights/layers.55.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
453
+ triton_models/weights/layers.57.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
454
+ triton_models/weights/layers.9.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
455
+ triton_models/weights/layers.25.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
456
+ triton_models/weights/layers.1.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
457
+ triton_models/weights/layers.23.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
458
+ triton_models/weights/layers.28.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
459
+ triton_models/weights/layers.30.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
460
+ triton_models/weights/layers.35.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
461
+ triton_models/weights/layers.49.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
462
+ triton_models/weights/layers.59.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
463
+ triton_models/weights/layers.12.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
464
+ triton_models/weights/layers.17.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
465
+ triton_models/weights/layers.41.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
466
+ triton_models/weights/layers.5.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
467
+ triton_models/weights/layers.13.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
468
+ triton_models/weights/layers.15.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
469
+ triton_models/weights/layers.15.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
470
+ triton_models/weights/layers.26.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
471
+ triton_models/weights/layers.40.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
472
+ triton_models/weights/layers.43.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
473
+ triton_models/weights/layers.4.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
474
+ triton_models/weights/layers.54.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
475
+ triton_models/weights/layers.12.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
476
+ triton_models/weights/layers.5.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
477
+ triton_models/weights/layers.56.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
478
+ triton_models/weights/layers.29.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
479
+ triton_models/weights/layers.36.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
480
+ triton_models/weights/layers.8.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
481
+ triton_models/weights/layers.26.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
482
+ triton_models/weights/layers.16.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
483
+ triton_models/weights/layers.31.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
484
+ triton_models/weights/layers.33.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
485
+ triton_models/weights/layers.12.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
486
+ triton_models/weights/layers.17.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
487
+ triton_models/weights/layers.55.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
488
+ triton_models/weights/layers.13.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
489
+ triton_models/weights/layers.25.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
490
+ triton_models/weights/layers.41.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
491
+ triton_models/weights/layers.17.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
492
+ triton_models/weights/layers.34.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
493
+ triton_models/weights/layers.38.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
494
+ triton_models/weights/layers.50.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
495
+ triton_models/weights/layers.5.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
496
+ triton_models/weights/layers.0.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
497
+ triton_models/weights/layers.31.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
498
+ triton_models/weights/layers.38.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
499
+ triton_models/weights/layers.40.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
500
+ triton_models/weights/layers.50.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
501
+ triton_models/weights/layers.56.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
502
+ triton_models/weights/layers.23.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
503
+ triton_models/weights/layers.52.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
504
+ triton_models/weights/layers.40.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
505
+ triton_models/weights/layers.7.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
506
+ triton_models/weights/layers.27.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
507
+ triton_models/weights/layers.21.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
508
+ triton_models/weights/layers.36.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
509
+ triton_models/weights/layers.4.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
510
+ triton_models/weights/layers.12.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
511
+ triton_models/weights/layers.38.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
512
+ triton_models/weights/layers.52.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
513
+ triton_models/weights/layers.21.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
514
+ triton_models/weights/layers.16.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
515
+ triton_models/weights/layers.19.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
516
+ triton_models/weights/layers.31.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
517
+ triton_models/weights/layers.4.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
518
+ triton_models/weights/layers.50.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
519
+ triton_models/weights/layers.10.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
520
+ triton_models/weights/layers.1.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
521
+ triton_models/weights/layers.23.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
522
+ triton_models/weights/layers.52.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
523
+ triton_models/weights/layers.53.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
524
+ triton_models/weights/layers.54.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
525
+ triton_models/weights/layers.0.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
526
+ triton_models/weights/layers.46.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
527
+ triton_models/weights/layers.51.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
528
+ triton_models/weights/layers.2.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
529
+ triton_models/weights/layers.20.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
530
+ triton_models/weights/layers.43.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
531
+ triton_models/weights/layers.45.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
532
+ triton_models/weights/layers.16.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
533
+ triton_models/weights/layers.12.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
534
+ triton_models/weights/layers.39.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
535
+ triton_models/weights/layers.54.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
536
+ triton_models/weights/layers.54.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
537
+ triton_models/weights/layers.11.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
538
+ triton_models/weights/layers.19.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
539
+ triton_models/weights/layers.59.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
540
+ triton_models/weights/layers.18.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
541
+ triton_models/weights/layers.32.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
542
+ triton_models/weights/layers.50.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
543
+ triton_models/weights/layers.18.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
544
+ triton_models/weights/layers.17.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
545
+ triton_models/weights/layers.19.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
546
+ triton_models/weights/layers.29.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
547
+ triton_models/weights/layers.47.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
548
+ triton_models/weights/layers.58.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
549
+ triton_models/weights/layers.13.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
550
+ triton_models/weights/layers.49.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
551
+ triton_models/weights/layers.52.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
552
+ triton_models/weights/layers.56.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
553
+ triton_models/weights/layers.58.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
554
+ triton_models/weights/layers.6.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
555
+ triton_models/weights/layers.8.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
556
+ triton_models/weights/layers.9.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
557
+ triton_models/weights/layers.48.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
558
+ triton_models/weights/layers.33.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
559
+ triton_models/weights/layers.14.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
560
+ triton_models/weights/layers.22.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
561
+ triton_models/weights/layers.30.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
562
+ triton_models/weights/layers.44.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
563
+ triton_models/weights/layers.4.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
564
+ triton_models/weights/layers.52.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
565
+ triton_models/weights/layers.59.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
566
+ triton_models/weights/layers.12.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
567
+ triton_models/weights/layers.24.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
568
+ triton_models/weights/layers.52.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
569
+ triton_models/weights/layers.57.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
570
+ triton_models/weights/layers.58.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
571
+ triton_models/weights/layers.10.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
572
+ triton_models/weights/layers.1.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
573
+ triton_models/weights/layers.1.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
574
+ triton_models/weights/layers.2.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
575
+ triton_models/weights/layers.39.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
576
+ triton_models/weights/layers.11.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
577
+ triton_models/weights/layers.20.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
578
+ triton_models/weights/layers.34.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
579
+ triton_models/weights/layers.42.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
580
+ triton_models/weights/layers.5.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
581
+ triton_models/weights/layers.1.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
582
+ triton_models/weights/layers.20.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
583
+ triton_models/weights/layers.25.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
584
+ triton_models/weights/layers.30.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
585
+ triton_models/weights/layers.35.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
586
+ triton_models/weights/layers.37.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
587
+ triton_models/weights/layers.37.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
588
+ triton_models/weights/output.weight filter=lfs diff=lfs merge=lfs -text
589
+ triton_models/weights/layers.11.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
590
+ triton_models/weights/layers.25.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
591
+ triton_models/weights/layers.27.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
592
+ triton_models/weights/layers.29.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
593
+ triton_models/weights/layers.45.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
594
+ triton_models/weights/layers.45.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
595
+ triton_models/weights/layers.47.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
596
+ triton_models/weights/layers.54.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
597
+ triton_models/weights/layers.22.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
598
+ triton_models/weights/layers.5.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
599
+ triton_models/weights/layers.20.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
600
+ triton_models/weights/layers.24.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
601
+ triton_models/weights/layers.39.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
602
+ triton_models/weights/layers.59.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
603
+ triton_models/weights/layers.18.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
604
+ triton_models/weights/layers.28.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
605
+ triton_models/weights/layers.3.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
606
+ triton_models/weights/layers.46.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
607
+ triton_models/weights/layers.56.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
608
+ triton_models/weights/layers.25.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
609
+ triton_models/weights/layers.28.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
610
+ triton_models/weights/layers.30.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
611
+ triton_models/weights/layers.38.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
612
+ triton_models/weights/layers.10.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
613
+ triton_models/weights/layers.1.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
614
+ triton_models/weights/layers.27.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
615
+ triton_models/weights/layers.37.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
616
+ triton_models/weights/layers.11.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
617
+ triton_models/weights/layers.43.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
618
+ triton_models/weights/layers.49.feed_forward.w2.0.qweight filter=lfs diff=lfs merge=lfs -text
619
+ triton_models/weights/layers.6.feed_forward.w13.0.qweight filter=lfs diff=lfs merge=lfs -text
620
+ triton_models/weights/layers.14.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
621
+ triton_models/weights/layers.24.ffn_norm.weight filter=lfs diff=lfs merge=lfs -text
622
+ triton_models/weights/layers.34.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
623
+ triton_models/weights/layers.45.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
624
+ triton_models/weights/layers.22.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
625
+ triton_models/weights/layers.26.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
626
+ triton_models/weights/layers.33.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
627
+ triton_models/weights/layers.53.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
628
+ triton_models/weights/layers.16.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
629
+ triton_models/weights/layers.31.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
630
+ triton_models/weights/layers.32.feed_forward.w13.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
631
+ triton_models/weights/layers.46.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
632
+ triton_models/weights/layers.48.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
633
+ triton_models/weights/layers.15.attention.wo.0.qweight filter=lfs diff=lfs merge=lfs -text
634
+ triton_models/weights/layers.35.attention_norm.weight filter=lfs diff=lfs merge=lfs -text
635
+ triton_models/weights/layers.57.attention.w_qkv.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
636
+ triton_models/weights/layers.59.feed_forward.w2.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
637
+ triton_models/weights/layers.5.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
638
+ triton_models/weights/layers.17.attention.w_qkv.0.qweight filter=lfs diff=lfs merge=lfs -text
639
+ triton_models/weights/layers.32.attention.wo.0.scales_zeros filter=lfs diff=lfs merge=lfs -text
model_repository/postprocessing ADDED
@@ -0,0 +1 @@
 
 
1
+ ../triton_models/postprocessing
model_repository/preprocessing ADDED
@@ -0,0 +1 @@
 
 
1
+ ../triton_models/preprocessing
model_repository/turbomind ADDED
@@ -0,0 +1 @@
 
 
1
+ ../triton_models/interactive
service_docker_up.sh ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/sh
2
+
3
+ show_help() {
4
+ echo "Usage: $0 [-h] [--help] [-l] [--lib-dir]"
5
+ echo
6
+ echo "Options:"
7
+ echo " -h, --help Show this help message and exit"
8
+ echo " --lib-dir Specify the directory of turbomind libraries"
9
+ }
10
+
11
+ # check if '-h' or '--help' in the arguments
12
+ for arg in "$@"
13
+ do
14
+ if [ "$arg" == "-h" ] || [ "$arg" == "--help" ]; then
15
+ show_help
16
+ exit 0
17
+ fi
18
+ done
19
+
20
+
21
+ TP=1
22
+ DEVICES="0"
23
+ for ((i = 1; i < ${TP}; ++i)); do
24
+ DEVICES="${DEVICES},$i"
25
+ done
26
+ DEVICES="\"device=${DEVICES}\""
27
+
28
+
29
+ SCRIPT_DIR="$(dirname "$0")"
30
+ SCRIPT_ABS_DIR="$(realpath "$SCRIPT_DIR")"
31
+
32
+
33
+ if [ -z "$1" ]; then
34
+ docker run \
35
+ --gpus $DEVICES \
36
+ --rm \
37
+ -v "${SCRIPT_ABS_DIR}":/workspace/models \
38
+ --shm-size 16g \
39
+ -p 33336:22 \
40
+ -p 33337-33400:33337-33400 \
41
+ --cap-add=SYS_PTRACE \
42
+ --cap-add=SYS_ADMIN \
43
+ --security-opt seccomp=unconfined \
44
+ --name lmdeploy \
45
+ -it --env NCCL_LAUNCH_MODE=GROUP openmmlab/lmdeploy:latest \
46
+ tritonserver \
47
+ --model-repository=/workspace/models/model_repository \
48
+ --allow-http=0 \
49
+ --allow-grpc=1 \
50
+ --grpc-port=33337 \
51
+ --log-verbose=0 \
52
+ --allow-metrics=1
53
+ fi
54
+
55
+ for ((i = 1; i <= $#; i++)); do
56
+ arg=${!i}
57
+ case "$arg" in
58
+ --lib-dir)
59
+ if [ "$i" -eq "$#" ]; then
60
+ show_help
61
+ exit -1
62
+ fi
63
+ LIB_PATH=${@:i+1:1}
64
+ docker run \
65
+ --gpus $DEVICES \
66
+ --rm \
67
+ -v "${LIB_PATH}":/opt/tritonserver/backends/turbomind \
68
+ -v ""${SCRIPT_ABS_DIR}"":/workspace/models \
69
+ --shm-size 16g \
70
+ -p 33336:22 \
71
+ -p 33337-33400:33337-33400 \
72
+ --cap-add=SYS_PTRACE \
73
+ --cap-add=SYS_ADMIN \
74
+ --security-opt seccomp=unconfined \
75
+ --name lmdeploy \
76
+ -it --env NCCL_LAUNCH_MODE=GROUP openmmlab/lmdeploy:latest \
77
+ tritonserver \
78
+ --model-repository=/workspace/models/model_repository \
79
+ --allow-http=0 \
80
+ --allow-grpc=1 \
81
+ --grpc-port=33337 \
82
+ --log-verbose=0 \
83
+ --allow-metrics=1
84
+ break
85
+ ;;
86
+ esac
87
+ done
triton_models/interactive/1/placeholder ADDED
File without changes
triton_models/interactive/1/weights ADDED
@@ -0,0 +1 @@
 
 
1
+ ../../weights
triton_models/interactive/config.pbtxt ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # Redistribution and use in source and binary forms, with or without
4
+ # modification, are permitted provided that the following conditions
5
+ # are met:
6
+ # * Redistributions of source code must retain the above copyright
7
+ # notice, this list of conditions and the following disclaimer.
8
+ # * Redistributions in binary form must reproduce the above copyright
9
+ # notice, this list of conditions and the following disclaimer in the
10
+ # documentation and/or other materials provided with the distribution.
11
+ # * Neither the name of NVIDIA CORPORATION nor the names of its
12
+ # contributors may be used to endorse or promote products derived
13
+ # from this software without specific prior written permission.
14
+ #
15
+ # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS AND ANY
16
+ # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
+ # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18
+ # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
19
+ # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20
+ # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21
+ # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22
+ # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
23
+ # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
+ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25
+ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
+
27
+ name: "turbomind"
28
+ backend: "turbomind"
29
+ default_model_filename: "weights"
30
+ max_batch_size: 1
31
+
32
+ model_transaction_policy {
33
+ decoupled: True
34
+ }
35
+
36
+ instance_group [
37
+ {
38
+ # max concurrent instances
39
+ count: 48
40
+ kind: KIND_CPU
41
+ }
42
+ ]
43
+
44
+ input [
45
+ {
46
+ name: "input_ids"
47
+ data_type: TYPE_UINT32
48
+ dims: [ -1 ]
49
+ # allow_ragged_batch: true
50
+ },
51
+ {
52
+ name: "input_lengths"
53
+ data_type: TYPE_UINT32
54
+ dims: [ 1 ]
55
+ reshape: { shape: [ ] }
56
+ },
57
+ {
58
+ name: "request_output_len"
59
+ data_type: TYPE_UINT32
60
+ dims: [ -1 ]
61
+ },
62
+ {
63
+ name: "step"
64
+ data_type: TYPE_INT32
65
+ dims: [ 1 ]
66
+ reshape: { shape: [ ] }
67
+ optional: true
68
+ },
69
+ {
70
+ name: "session_len"
71
+ data_type: TYPE_UINT32
72
+ dims: [ 1 ]
73
+ reshape: { shape: [ ] }
74
+ optional: true
75
+ },
76
+ {
77
+ name: "runtime_top_k"
78
+ data_type: TYPE_UINT32
79
+ dims: [ 1 ]
80
+ reshape: { shape: [ ] }
81
+ optional: true
82
+ },
83
+ {
84
+ name: "runtime_top_p"
85
+ data_type: TYPE_FP32
86
+ dims: [ 1 ]
87
+ reshape: { shape: [ ] }
88
+ optional: true
89
+ },
90
+ {
91
+ name: "beam_search_diversity_rate"
92
+ data_type: TYPE_FP32
93
+ dims: [ 1 ]
94
+ reshape: { shape: [ ] }
95
+ optional: true
96
+ },
97
+ {
98
+ name: "temperature"
99
+ data_type: TYPE_FP32
100
+ dims: [ 1 ]
101
+ reshape: { shape: [ ] }
102
+ optional: true
103
+ },
104
+ {
105
+ name: "len_penalty"
106
+ data_type: TYPE_FP32
107
+ dims: [ 1 ]
108
+ reshape: { shape: [ ] }
109
+ optional: true
110
+ },
111
+ {
112
+ name: "repetition_penalty"
113
+ data_type: TYPE_FP32
114
+ dims: [ 1 ]
115
+ reshape: { shape: [ ] }
116
+ optional: true
117
+ },
118
+ {
119
+ name: "random_seed"
120
+ data_type: TYPE_UINT64
121
+ dims: [ 1 ]
122
+ reshape: { shape: [ ] }
123
+ optional: true
124
+ },
125
+ {
126
+ name: "is_return_log_probs"
127
+ data_type: TYPE_BOOL
128
+ dims: [ 1 ]
129
+ reshape: { shape: [ ] }
130
+ optional: true
131
+ },
132
+ {
133
+ name: "beam_width"
134
+ data_type: TYPE_UINT32
135
+ dims: [ 1 ]
136
+ reshape: { shape: [ ] }
137
+ optional: true
138
+ },
139
+ {
140
+ name: "start_id"
141
+ data_type: TYPE_UINT32
142
+ dims: [ 1 ]
143
+ reshape: { shape: [ ] }
144
+ optional: true
145
+ },
146
+ {
147
+ name: "end_id"
148
+ data_type: TYPE_UINT32
149
+ dims: [ 1 ]
150
+ reshape: { shape: [ ] }
151
+ optional: true
152
+ },
153
+ {
154
+ name: "bad_words_list"
155
+ data_type: TYPE_INT32
156
+ dims: [ 2, -1 ]
157
+ optional: true
158
+ },
159
+ {
160
+ name: "stop_words_list"
161
+ data_type: TYPE_INT32
162
+ dims: [ 2, -1 ]
163
+ optional: true
164
+ },
165
+ {
166
+ name: "prompt_learning_task_name_ids"
167
+ data_type: TYPE_UINT32
168
+ dims: [ 1 ]
169
+ reshape: { shape: [ ] }
170
+ optional: true
171
+ },
172
+ {
173
+ name: "top_p_decay"
174
+ data_type: TYPE_FP32
175
+ dims: [ 1 ]
176
+ reshape: { shape: [ ] }
177
+ optional: true
178
+ },
179
+ {
180
+ name: "top_p_min"
181
+ data_type: TYPE_FP32
182
+ dims: [ 1 ]
183
+ reshape: { shape: [ ] }
184
+ optional: true
185
+ },
186
+ {
187
+ name: "top_p_reset_ids"
188
+ data_type: TYPE_UINT32
189
+ dims: [ 1 ]
190
+ reshape: { shape: [ ] }
191
+ optional: true
192
+ },
193
+ {
194
+ name: "START"
195
+ data_type: TYPE_INT32
196
+ dims: [ 1 ]
197
+ reshape: { shape: [ ] }
198
+ optional: true
199
+ },
200
+ {
201
+ name: "END"
202
+ data_type: TYPE_INT32
203
+ dims: [ 1 ]
204
+ reshape: { shape: [ ] }
205
+ optional: true
206
+ },
207
+ {
208
+ name: "STOP"
209
+ data_type: TYPE_INT32
210
+ dims: [ 1 ]
211
+ reshape: { shape: [ ] }
212
+ optional: true
213
+ },
214
+ {
215
+ name: "CORRID"
216
+ data_type: TYPE_UINT64
217
+ dims: [ 1 ]
218
+ reshape: { shape: [ ] }
219
+ optional: true
220
+ }
221
+ ]
222
+ output [
223
+ {
224
+ name: "output_ids"
225
+ data_type: TYPE_UINT32
226
+ dims: [ -1, -1 ]
227
+ },
228
+ {
229
+ name: "sequence_length"
230
+ data_type: TYPE_UINT32
231
+ dims: [ -1 ]
232
+ },
233
+ {
234
+ name: "cum_log_probs"
235
+ data_type: TYPE_FP32
236
+ dims: [ -1 ]
237
+ },
238
+ {
239
+ name: "output_log_probs"
240
+ data_type: TYPE_FP32
241
+ dims: [ -1, -1 ]
242
+ }
243
+ ]
244
+
245
+ parameters {
246
+ key: "pipeline_para_size"
247
+ value: {
248
+ string_value: "1"
249
+ }
250
+ }
251
+ parameters {
252
+ key: "data_type"
253
+ value: {
254
+ string_value: "fp16"
255
+ }
256
+ }
257
+ parameters {
258
+ key: "model_type"
259
+ value: {
260
+ string_value: "Llama"
261
+ }
262
+ }
263
+
264
+ parameters {
265
+ key: "enable_custom_all_reduce"
266
+ value: {
267
+ string_value: "0"
268
+ }
269
+ }
270
+ parameters {
271
+ key: "tensor_para_size"
272
+ value: {
273
+ string_value: "1"
274
+ }
275
+ }
276
+ parameters {
277
+ key: "model_name"
278
+ value: {
279
+ string_value: "internlm-chat-7b"
280
+ }
281
+ }
triton_models/postprocessing/1/model.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import json
3
+ import os.path as osp
4
+ from pathlib import Path
5
+
6
+ import numpy as np
7
+ import triton_python_backend_utils as pb_utils
8
+
9
+ # This tokenizer is `lmdeploy/turbomind/tokenizer.py`. When an LLM is served
10
+ # by triton inference server, it has to be converted first by running
11
+ # `python lmdeploy/serve/turbomind/deploy.py`. Then
12
+ # `lmdeploy/turbomind/tokenizer.py` will be copied to `tokenizer/tokenizer.py`
13
+ from .tokenizer.tokenizer import Tokenizer
14
+
15
+
16
+ class TritonPythonModel:
17
+ """Your Python model must use the same class name.
18
+
19
+ Every Python model that is created must have "TritonPythonModel" as the
20
+ class name.
21
+ """
22
+
23
+ def initialize(self, args):
24
+ """`initialize` is called only once when the model is being loaded.
25
+ Implementing `initialize` function is optional. This function allows
26
+ the model to initialize any state associated with this model.
27
+ Parameters
28
+ ----------
29
+ args : dict
30
+ Both keys and values are strings. The dictionary keys and values are:
31
+ * model_config: A JSON string containing the model configuration
32
+ * model_instance_kind: A string containing model instance kind
33
+ * model_instance_device_id: A string containing model instance device
34
+ ID
35
+ * model_repository: Model repository path
36
+ * model_version: Model version
37
+ * model_name: Model name
38
+ """
39
+ # Parse model configs
40
+ self.model_config = model_config = json.loads(args['model_config'])
41
+
42
+ # Parse model output configs
43
+ output_config = pb_utils.get_output_config_by_name(
44
+ model_config, 'OUTPUT')
45
+
46
+ # Convert Triton types to numpy types
47
+ self.output_dtype = pb_utils.triton_string_to_numpy(
48
+ output_config['data_type'])
49
+
50
+ cur_folder = Path(__file__).parent
51
+
52
+ self.tokenizer = Tokenizer(
53
+ osp.join(
54
+ cur_folder, self.model_config['parameters']['tokenizer_path']
55
+ ['string_value']))
56
+
57
+ def execute(self, requests):
58
+ """`execute` must be implemented in every Python model. `execute`
59
+ function receives a list of pb_utils.InferenceRequest as the only
60
+ argument. This function is called when an inference is requested
61
+ for this model. Depending on the batching configuration (e.g. Dynamic
62
+ Batching) used, `requests` may contain multiple requests. Every
63
+ Python model, must create one pb_utils.InferenceResponse for every
64
+ pb_utils.InferenceRequest in `requests`. If there is an error, you can
65
+ set the error argument when creating a pb_utils.InferenceResponse.
66
+ Parameters
67
+ ----------
68
+ requests : list
69
+ A list of pb_utils.InferenceRequest
70
+ Returns
71
+ -------
72
+ list
73
+ A list of pb_utils.InferenceResponse. The length of this list must
74
+ be the same as `requests`
75
+ """
76
+
77
+ responses = []
78
+
79
+ # Every Python backend must iterate over everyone of the requests
80
+ # and create a pb_utils.InferenceResponse for each of them.
81
+ for idx, request in enumerate(requests):
82
+ # Get input tensors
83
+ tokens_batch = pb_utils.get_input_tensor_by_name(
84
+ request, 'TOKENS_BATCH').as_numpy()
85
+ sequence_length = pb_utils.get_input_tensor_by_name(
86
+ request, 'sequence_length').as_numpy()
87
+
88
+ # Postprocessing output data.
89
+ outputs = self._postprocessing(tokens_batch.tolist(),
90
+ sequence_length)
91
+
92
+ # Create output tensors. You need pb_utils.Tensor
93
+ # objects to create pb_utils.InferenceResponse.
94
+ output_tensor = pb_utils.Tensor(
95
+ 'OUTPUT',
96
+ np.array(outputs).astype(self.output_dtype))
97
+
98
+ # Create InferenceResponse. You can set an error here in case
99
+ # there was a problem with handling this inference request.
100
+ # Below is an example of how you can set errors in inference
101
+ # response:
102
+ #
103
+ # pb_utils.InferenceResponse(
104
+ # output_tensors=..., TritonError("An error occurred"))
105
+ inference_response = pb_utils.InferenceResponse(
106
+ output_tensors=[output_tensor])
107
+ responses.append(inference_response)
108
+
109
+ # You should return a list of pb_utils.InferenceResponse. Length
110
+ # of this list must match the length of `requests` list.
111
+ return responses
112
+
113
+ def finalize(self):
114
+ """`finalize` is called only once when the model is being unloaded.
115
+
116
+ Implementing `finalize` function is optional. This function allows the
117
+ model to perform any necessary clean ups before exit.
118
+ """
119
+ print('Cleaning up...')
120
+
121
+ def _postprocessing(self, tokens_batch, sequence_length):
122
+ """decode token ids into texts."""
123
+ outputs = []
124
+ for beam_tokens, beam_len in zip(tokens_batch, sequence_length):
125
+ for tokens, _len in zip(beam_tokens, beam_len):
126
+ output = self.tokenizer.decode(tokens, _len)
127
+ output = output.encode('utf8')
128
+ outputs.append(output)
129
+ return outputs
triton_models/postprocessing/1/tokenizer ADDED
@@ -0,0 +1 @@
 
 
1
+ ../../tokenizer
triton_models/postprocessing/config.pbtxt ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "postprocessing"
2
+ backend: "python"
3
+ max_batch_size: 1
4
+ input [
5
+ {
6
+ name: "TOKENS_BATCH"
7
+ data_type: TYPE_UINT32
8
+ dims: [ -1, -1 ]
9
+ },
10
+ {
11
+ name: "sequence_length"
12
+ data_type: TYPE_UINT32
13
+ dims: [ -1 ]
14
+ }
15
+ ]
16
+ output [
17
+ {
18
+ name: "OUTPUT"
19
+ data_type: TYPE_STRING
20
+ dims: [ -1, -1 ]
21
+ }
22
+ ]
23
+
24
+ instance_group [
25
+ {
26
+ count: 16
27
+ kind: KIND_CPU
28
+ }
29
+ ]
30
+
31
+ parameters {
32
+ key: "tokenizer_path"
33
+ value: {
34
+ string_value: "tokenizer/tokenizer.model"
35
+ }
36
+ }
triton_models/preprocessing/1/model.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import json
3
+ import os.path as osp
4
+ from pathlib import Path
5
+
6
+ import numpy as np
7
+ import torch
8
+ import triton_python_backend_utils as pb_utils
9
+ from torch.nn.utils.rnn import pad_sequence
10
+
11
+ # This tokenizer is `lmdeploy/turbomind/tokenizer.py`. When an LLM is served
12
+ # by triton inference server, it has to be converted first by running
13
+ # `python lmdeploy/serve/turbomind/deploy.py`. Then
14
+ # `lmdeploy/turbomind/tokenizer.py` will be copied to `tokenizer/tokenizer.py`
15
+ from .tokenizer.tokenizer import Tokenizer
16
+
17
+
18
+ class TritonPythonModel:
19
+ """Your Python model must use the same class name.
20
+
21
+ Every Python model that is created must have "TritonPythonModel" as the
22
+ class name.
23
+ """
24
+
25
+ def initialize(self, args):
26
+ """`initialize` is called only once when the model is being loaded.
27
+ Implementing `initialize` function is optional. This function allows
28
+ the model to initialize any state associated with this model.
29
+ Parameters
30
+ ----------
31
+ args : dict
32
+ Both keys and values are strings. The dictionary keys and values are:
33
+ * model_config: A JSON string containing the model configuration
34
+ * model_instance_kind: A string containing model instance kind
35
+ * model_instance_device_id: A string containing model instance device
36
+ ID
37
+ * model_repository: Model repository path
38
+ * model_version: Model version
39
+ * model_name: Model name
40
+ """
41
+ # Parse model configs
42
+ self.model_config = model_config = json.loads(args['model_config'])
43
+
44
+ # Parse model output configs and convert Triton types to numpy types
45
+ input_names = [
46
+ 'INPUT_ID', 'REQUEST_INPUT_LEN', 'BAD_WORDS_IDS', 'STOP_WORDS_IDS'
47
+ ]
48
+ for input_name in input_names:
49
+ setattr(
50
+ self,
51
+ input_name.lower() + '_dtype',
52
+ pb_utils.triton_string_to_numpy(
53
+ pb_utils.get_output_config_by_name(
54
+ model_config, input_name)['data_type']))
55
+
56
+ cur_folder = Path(__file__).parent
57
+ self.tokenizer = Tokenizer(
58
+ osp.join(
59
+ cur_folder, self.model_config['parameters']['tokenizer_path']
60
+ ['string_value']))
61
+ self.start_id = self.tokenizer.bos_token_id
62
+ self.end_id = self.tokenizer.eos_token_id
63
+
64
+ def execute(self, requests):
65
+ """`execute` must be implemented in every Python model. `execute`
66
+ function receives a list of pb_utils.InferenceRequest as the only
67
+ argument. This function is called when an inference is requested
68
+ for this model. Depending on the batching configuration (e.g. Dynamic
69
+ Batching) used, `requests` may contain multiple requests. Every
70
+ Python model, must create one pb_utils.InferenceResponse for every
71
+ pb_utils.InferenceRequest in `requests`. If there is an error, you can
72
+ set the error argument when creating a pb_utils.InferenceResponse.
73
+ Parameters
74
+ ----------
75
+ requests : list
76
+ A list of pb_utils.InferenceRequest
77
+ Returns
78
+ -------
79
+ list
80
+ A list of pb_utils.InferenceResponse. The length of this list must
81
+ be the same as `requests`
82
+ """
83
+
84
+ responses = []
85
+
86
+ # Every Python backend must iterate over everyone of the requests
87
+ # and create a pb_utils.InferenceResponse for each of them.
88
+ for idx, request in enumerate(requests):
89
+ # Get input tensors
90
+ query = pb_utils.get_input_tensor_by_name(request,
91
+ 'QUERY').as_numpy()
92
+ request_output_len = pb_utils.get_input_tensor_by_name(
93
+ request, 'REQUEST_OUTPUT_LEN').as_numpy()
94
+
95
+ # Preprocessing input data.
96
+ input_id, request_input_len = self._create_request(query)
97
+
98
+ # Create output tensors. You need pb_utils.Tensor
99
+ # objects to create pb_utils.InferenceResponse.
100
+ input_id_tensor = pb_utils.Tensor(
101
+ 'INPUT_ID',
102
+ np.array(input_id).astype(self.input_id_dtype))
103
+ request_input_len_tensor = pb_utils.Tensor(
104
+ 'REQUEST_INPUT_LEN',
105
+ np.array(request_input_len).astype(
106
+ self.request_input_len_dtype))
107
+ request_output_len_tensor = pb_utils.Tensor(
108
+ 'REQUEST_OUTPUT_LEN', request_output_len)
109
+
110
+ # Create InferenceResponse. You can set an error here in case
111
+ # there was a problem with handling this inference request.
112
+ # Below is an example of how you can set errors in inference
113
+ # response:
114
+ #
115
+ # pb_utils.InferenceResponse(
116
+ # output_tensors=..., TritonError("An error occurred"))
117
+ inference_response = pb_utils.InferenceResponse(output_tensors=[
118
+ input_id_tensor, request_input_len_tensor,
119
+ request_output_len_tensor
120
+ ])
121
+ responses.append(inference_response)
122
+
123
+ # You should return a list of pb_utils.InferenceResponse. Length
124
+ # of this list must match the length of `requests` list.
125
+ return responses
126
+
127
+ def finalize(self):
128
+ """`finalize` is called only once when the model is being unloaded.
129
+
130
+ Implementing `finalize` function is optional. This function allows the
131
+ model to perform any necessary clean ups before exit.
132
+ """
133
+ print('Cleaning up...')
134
+
135
+ def _create_request(self, query):
136
+ """Tokenize prompts and return the token ids and their length.
137
+
138
+ Args:
139
+ query (List[str]): a list of prompt
140
+ Returns:
141
+ tuple: token ids and their length
142
+ """
143
+ start_ids = [
144
+ torch.IntTensor(self.tokenizer.encode(s[0].decode()))
145
+ for s in query
146
+ ]
147
+ start_lengths = torch.IntTensor([[len(ids)] for ids in start_ids])
148
+ start_ids = pad_sequence(start_ids,
149
+ batch_first=True,
150
+ padding_value=self.end_id)
151
+ return start_ids, start_lengths
triton_models/preprocessing/1/tokenizer ADDED
@@ -0,0 +1 @@
 
 
1
+ ../../tokenizer
triton_models/preprocessing/config.pbtxt ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "preprocessing"
2
+ backend: "python"
3
+ max_batch_size: 1
4
+
5
+ input [
6
+ {
7
+ name: "QUERY"
8
+ data_type: TYPE_STRING
9
+ dims: [ -1 ]
10
+ },
11
+ {
12
+ name: "BAD_WORDS_DICT"
13
+ data_type: TYPE_STRING
14
+ dims: [ -1 ]
15
+ optional: true
16
+ },
17
+ {
18
+ name: "STOP_WORDS_DICT"
19
+ data_type: TYPE_STRING
20
+ dims: [ -1 ]
21
+ optional: true
22
+ },
23
+ {
24
+ name: "REQUEST_OUTPUT_LEN"
25
+ data_type: TYPE_UINT32
26
+ dims: [ -1 ]
27
+ }
28
+ ]
29
+ output [
30
+ {
31
+ name: "INPUT_ID"
32
+ data_type: TYPE_UINT32
33
+ dims: [ -1 ]
34
+ },
35
+ {
36
+ name: "REQUEST_INPUT_LEN"
37
+ data_type: TYPE_UINT32
38
+ dims: [ 1 ]
39
+ },
40
+ {
41
+ name: "BAD_WORDS_IDS"
42
+ data_type: TYPE_INT32
43
+ dims: [ 2, -1 ]
44
+ },
45
+ {
46
+ name: "STOP_WORDS_IDS"
47
+ data_type: TYPE_INT32
48
+ dims: [ 2, -1 ]
49
+ },
50
+ {
51
+ name: "REQUEST_OUTPUT_LEN"
52
+ data_type: TYPE_UINT32
53
+ dims: [ -1 ]
54
+ },
55
+ {
56
+ name: "PROMPT_LEARNING_TASK_NAME_IDS"
57
+ data_type: TYPE_UINT32
58
+ dims: [ 1 ]
59
+ }
60
+ ]
61
+
62
+ instance_group [
63
+ {
64
+ count: 4
65
+ kind: KIND_CPU
66
+ }
67
+ ]
68
+
69
+ parameters {
70
+ key: "tokenizer_path"
71
+ value: {
72
+ string_value: "tokenizer/tokenizer.model"
73
+ }
74
+ }
triton_models/tokenizer/config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/nvme/shared_data/InternLM/20B/internlm-20b-chat",
3
+ "architectures": [
4
+ "InternLMForCausalLM"
5
+ ],
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_internlm.InternLMConfig",
8
+ "AutoModel": "modeling_internlm.InternLMForCausalLM",
9
+ "AutoModelForCausalLM": "modeling_internlm.InternLMForCausalLM"
10
+ },
11
+ "bias": false,
12
+ "bos_token_id": 1,
13
+ "eos_token_id": 2,
14
+ "hidden_act": "silu",
15
+ "hidden_size": 5120,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 13824,
18
+ "max_position_embeddings": 2048,
19
+ "model_type": "internlm",
20
+ "num_attention_heads": 40,
21
+ "num_hidden_layers": 60,
22
+ "num_key_value_heads": 40,
23
+ "pad_token_id": 0,
24
+ "pretraining_tp": 1,
25
+ "rms_norm_eps": 1e-06,
26
+ "rope_scaling": null,
27
+ "rope_theta": 10000.0,
28
+ "tie_word_embeddings": false,
29
+ "torch_dtype": "float16",
30
+ "transformers_version": "4.33.1",
31
+ "use_cache": false,
32
+ "vocab_size": 103168
33
+ }
triton_models/tokenizer/configuration_internlm.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ InternLM model configuration"""
21
+
22
+ from transformers.utils import logging
23
+ from transformers.configuration_utils import PretrainedConfig
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ INTERNLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
29
+
30
+
31
+ class InternLMConfig(PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`InternLMModel`]. It is used to instantiate an InternLM
34
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
35
+ defaults will yield a similar configuration to that of the InternLM-7B.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+
41
+ Args:
42
+ vocab_size (`int`, *optional*, defaults to 32000):
43
+ Vocabulary size of the InternLM model. Defines the number of different tokens that can be represented by the
44
+ `inputs_ids` passed when calling [`InternLMModel`]
45
+ hidden_size (`int`, *optional*, defaults to 4096):
46
+ Dimension of the hidden representations.
47
+ intermediate_size (`int`, *optional*, defaults to 11008):
48
+ Dimension of the MLP representations.
49
+ num_hidden_layers (`int`, *optional*, defaults to 32):
50
+ Number of hidden layers in the Transformer encoder.
51
+ num_attention_heads (`int`, *optional*, defaults to 32):
52
+ Number of attention heads for each attention layer in the Transformer encoder.
53
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
54
+ The non-linear activation function (function or string) in the decoder.
55
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
56
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
57
+ just in case (e.g., 512 or 1024 or 2048).
58
+ initializer_range (`float`, *optional*, defaults to 0.02):
59
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
60
+ rms_norm_eps (`float`, *optional*, defaults to 1e-12):
61
+ The epsilon used by the rms normalization layers.
62
+ use_cache (`bool`, *optional*, defaults to `True`):
63
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
64
+ relevant if `config.is_decoder=True`.
65
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
66
+ Whether to tie weight embeddings
67
+ Example:
68
+
69
+ ```python
70
+ >>> from transformers import InternLMModel, InternLMConfig
71
+
72
+ >>> # Initializing a InternLM internlm-7b style configuration
73
+ >>> configuration = InternLMConfig()
74
+
75
+ >>> # Initializing a model from the internlm-7b style configuration
76
+ >>> model = InternLMModel(configuration)
77
+
78
+ >>> # Accessing the model configuration
79
+ >>> configuration = model.config
80
+ ```"""
81
+ model_type = "internlm"
82
+ _auto_class = "AutoConfig"
83
+
84
+ def __init__(
85
+ self,
86
+ vocab_size=103168,
87
+ hidden_size=4096,
88
+ intermediate_size=11008,
89
+ num_hidden_layers=32,
90
+ num_attention_heads=32,
91
+ hidden_act="silu",
92
+ max_position_embeddings=2048,
93
+ initializer_range=0.02,
94
+ rms_norm_eps=1e-6,
95
+ use_cache=True,
96
+ pad_token_id=0,
97
+ bos_token_id=1,
98
+ eos_token_id=2,
99
+ tie_word_embeddings=False,
100
+ bias=True,
101
+ **kwargs,
102
+ ):
103
+ self.vocab_size = vocab_size
104
+ self.max_position_embeddings = max_position_embeddings
105
+ self.hidden_size = hidden_size
106
+ self.intermediate_size = intermediate_size
107
+ self.num_hidden_layers = num_hidden_layers
108
+ self.num_attention_heads = num_attention_heads
109
+ self.hidden_act = hidden_act
110
+ self.initializer_range = initializer_range
111
+ self.rms_norm_eps = rms_norm_eps
112
+ self.use_cache = use_cache
113
+ self.bias = bias
114
+ super().__init__(
115
+ pad_token_id=pad_token_id,
116
+ bos_token_id=bos_token_id,
117
+ eos_token_id=eos_token_id,
118
+ tie_word_embeddings=tie_word_embeddings,
119
+ **kwargs,
120
+ )
triton_models/tokenizer/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.33.1"
6
+ }
triton_models/tokenizer/modeling_internlm.py ADDED
@@ -0,0 +1,998 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ PyTorch InternLM model."""
21
+ import math
22
+ from typing import List, Optional, Tuple, Union
23
+ import threading, queue
24
+
25
+ import torch
26
+ import torch.utils.checkpoint
27
+ from torch import nn
28
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
29
+
30
+ from transformers.activations import ACT2FN
31
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
32
+ from transformers.modeling_utils import PreTrainedModel
33
+ from transformers.generation.streamers import BaseStreamer
34
+ from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
35
+ from .configuration_internlm import InternLMConfig
36
+
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+ _CONFIG_FOR_DOC = "InternLMConfig"
41
+
42
+ # Copied from transformers.models.bart.modeling_bart._make_causal_mask
43
+ def _make_causal_mask(
44
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
45
+ ):
46
+ """
47
+ Make causal mask used for bi-directional self-attention.
48
+ """
49
+ bsz, tgt_len = input_ids_shape
50
+ mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
51
+ mask_cond = torch.arange(mask.size(-1), device=device)
52
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
53
+ mask = mask.to(dtype)
54
+
55
+ if past_key_values_length > 0:
56
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
57
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
58
+
59
+
60
+ # Copied from transformers.models.bart.modeling_bart._expand_mask
61
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
62
+ """
63
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
64
+ """
65
+ bsz, src_len = mask.size()
66
+ tgt_len = tgt_len if tgt_len is not None else src_len
67
+
68
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
69
+
70
+ inverted_mask = 1.0 - expanded_mask
71
+
72
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
73
+
74
+
75
+ class InternLMRMSNorm(nn.Module):
76
+ def __init__(self, hidden_size, eps=1e-6):
77
+ """
78
+ InternLMRMSNorm is equivalent to T5LayerNorm
79
+ """
80
+ super().__init__()
81
+ self.weight = nn.Parameter(torch.ones(hidden_size))
82
+ self.variance_epsilon = eps
83
+
84
+ def forward(self, hidden_states):
85
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
86
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
87
+
88
+ # convert into half-precision if necessary
89
+ if self.weight.dtype in [torch.float16, torch.bfloat16]:
90
+ hidden_states = hidden_states.to(self.weight.dtype)
91
+
92
+ return self.weight * hidden_states
93
+
94
+
95
+ class InternLMRotaryEmbedding(torch.nn.Module):
96
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
97
+ super().__init__()
98
+ inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
99
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
100
+
101
+ # Build here to make `torch.jit.trace` work.
102
+ self.max_seq_len_cached = max_position_embeddings
103
+ t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
104
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
105
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
106
+ emb = torch.cat((freqs, freqs), dim=-1)
107
+ self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
108
+ self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
109
+
110
+ def forward(self, x, seq_len=None):
111
+ # x: [bs, num_attention_heads, seq_len, head_size]
112
+ # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
113
+ if seq_len > self.max_seq_len_cached:
114
+ self.max_seq_len_cached = seq_len
115
+ t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype)
116
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
117
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
118
+ emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
119
+ self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
120
+ self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
121
+ return (
122
+ self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
123
+ self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
124
+ )
125
+
126
+
127
+ def rotate_half(x):
128
+ """Rotates half the hidden dims of the input."""
129
+ x1 = x[..., : x.shape[-1] // 2]
130
+ x2 = x[..., x.shape[-1] // 2 :]
131
+ return torch.cat((-x2, x1), dim=-1)
132
+
133
+
134
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
135
+ # The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
136
+ cos = cos.squeeze(1).squeeze(0) # [seq_len, dim]
137
+ sin = sin.squeeze(1).squeeze(0) # [seq_len, dim]
138
+ cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
139
+ sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
140
+ q_embed = (q * cos) + (rotate_half(q) * sin)
141
+ k_embed = (k * cos) + (rotate_half(k) * sin)
142
+ return q_embed, k_embed
143
+
144
+
145
+ class InternLMMLP(nn.Module):
146
+ def __init__(
147
+ self,
148
+ hidden_size: int,
149
+ intermediate_size: int,
150
+ hidden_act: str,
151
+ ):
152
+ super().__init__()
153
+ self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
154
+ self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
155
+ self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
156
+ self.act_fn = ACT2FN[hidden_act]
157
+
158
+ def forward(self, x):
159
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
160
+
161
+
162
+ class InternLMAttention(nn.Module):
163
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
164
+
165
+ def __init__(self, config: InternLMConfig):
166
+ super().__init__()
167
+ self.config = config
168
+ self.hidden_size = config.hidden_size
169
+ self.num_heads = config.num_attention_heads
170
+ self.head_dim = self.hidden_size // self.num_heads
171
+ self.max_position_embeddings = config.max_position_embeddings
172
+
173
+ if (self.head_dim * self.num_heads) != self.hidden_size:
174
+ raise ValueError(
175
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
176
+ f" and `num_heads`: {self.num_heads})."
177
+ )
178
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.bias)
179
+ self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.bias)
180
+ self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.bias)
181
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
182
+ self.rotary_emb = InternLMRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings)
183
+
184
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
185
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
186
+
187
+ def forward(
188
+ self,
189
+ hidden_states: torch.Tensor,
190
+ attention_mask: Optional[torch.Tensor] = None,
191
+ position_ids: Optional[torch.LongTensor] = None,
192
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
193
+ output_attentions: bool = False,
194
+ use_cache: bool = False,
195
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
196
+ bsz, q_len, _ = hidden_states.size()
197
+
198
+ query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
199
+ key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
200
+ value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
201
+
202
+ kv_seq_len = key_states.shape[-2]
203
+ if past_key_value is not None:
204
+ kv_seq_len += past_key_value[0].shape[-2]
205
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
206
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
207
+ # [bsz, nh, t, hd]
208
+
209
+ if past_key_value is not None:
210
+ # reuse k, v, self_attention
211
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
212
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
213
+
214
+ past_key_value = (key_states, value_states) if use_cache else None
215
+
216
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
217
+
218
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
219
+ raise ValueError(
220
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
221
+ f" {attn_weights.size()}"
222
+ )
223
+
224
+ if attention_mask is not None:
225
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
226
+ raise ValueError(
227
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
228
+ )
229
+ attn_weights = attn_weights + attention_mask
230
+ attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min))
231
+
232
+ # upcast attention to fp32
233
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
234
+ attn_output = torch.matmul(attn_weights, value_states)
235
+
236
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
237
+ raise ValueError(
238
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
239
+ f" {attn_output.size()}"
240
+ )
241
+
242
+ attn_output = attn_output.transpose(1, 2)
243
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
244
+
245
+ attn_output = self.o_proj(attn_output)
246
+
247
+ if not output_attentions:
248
+ attn_weights = None
249
+
250
+ return attn_output, attn_weights, past_key_value
251
+
252
+
253
+ class InternLMDecoderLayer(nn.Module):
254
+ def __init__(self, config: InternLMConfig):
255
+ super().__init__()
256
+ self.hidden_size = config.hidden_size
257
+ self.self_attn = InternLMAttention(config=config)
258
+ self.mlp = InternLMMLP(
259
+ hidden_size=self.hidden_size,
260
+ intermediate_size=config.intermediate_size,
261
+ hidden_act=config.hidden_act,
262
+ )
263
+ self.input_layernorm = InternLMRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
264
+ self.post_attention_layernorm = InternLMRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
265
+
266
+ def forward(
267
+ self,
268
+ hidden_states: torch.Tensor,
269
+ attention_mask: Optional[torch.Tensor] = None,
270
+ position_ids: Optional[torch.LongTensor] = None,
271
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
272
+ output_attentions: Optional[bool] = False,
273
+ use_cache: Optional[bool] = False,
274
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
275
+ """
276
+ Args:
277
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
278
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
279
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
280
+ output_attentions (`bool`, *optional*):
281
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
282
+ returned tensors for more detail.
283
+ use_cache (`bool`, *optional*):
284
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
285
+ (see `past_key_values`).
286
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
287
+ """
288
+
289
+ residual = hidden_states
290
+
291
+ hidden_states = self.input_layernorm(hidden_states)
292
+
293
+ # Self Attention
294
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
295
+ hidden_states=hidden_states,
296
+ attention_mask=attention_mask,
297
+ position_ids=position_ids,
298
+ past_key_value=past_key_value,
299
+ output_attentions=output_attentions,
300
+ use_cache=use_cache,
301
+ )
302
+ hidden_states = residual + hidden_states
303
+
304
+ # Fully Connected
305
+ residual = hidden_states
306
+ hidden_states = self.post_attention_layernorm(hidden_states)
307
+ hidden_states = self.mlp(hidden_states)
308
+ hidden_states = residual + hidden_states
309
+
310
+ outputs = (hidden_states,)
311
+
312
+ if output_attentions:
313
+ outputs += (self_attn_weights,)
314
+
315
+ if use_cache:
316
+ outputs += (present_key_value,)
317
+
318
+ return outputs
319
+
320
+
321
+ INTERNLM_START_DOCSTRING = r"""
322
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
323
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
324
+ etc.)
325
+
326
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
327
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
328
+ and behavior.
329
+
330
+ Parameters:
331
+ config ([`InternLMConfig`]):
332
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
333
+ load the weights associated with the model, only the configuration. Check out the
334
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
335
+ """
336
+
337
+
338
+ @add_start_docstrings(
339
+ "The bare InternLM Model outputting raw hidden-states without any specific head on top.",
340
+ INTERNLM_START_DOCSTRING,
341
+ )
342
+ class InternLMPreTrainedModel(PreTrainedModel):
343
+ config_class = InternLMConfig
344
+ base_model_prefix = "model"
345
+ supports_gradient_checkpointing = True
346
+ _no_split_modules = ["InternLMDecoderLayer"]
347
+ _keys_to_ignore_on_load_unexpected = [r"decoder\.version"]
348
+
349
+ def _init_weights(self, module):
350
+ std = self.config.initializer_range
351
+ if isinstance(module, nn.Linear):
352
+ module.weight.data.normal_(mean=0.0, std=std)
353
+ if module.bias is not None:
354
+ module.bias.data.zero_()
355
+ elif isinstance(module, nn.Embedding):
356
+ module.weight.data.normal_(mean=0.0, std=std)
357
+ if module.padding_idx is not None:
358
+ module.weight.data[module.padding_idx].zero_()
359
+
360
+ def _set_gradient_checkpointing(self, module, value=False):
361
+ if isinstance(module, InternLMModel):
362
+ module.gradient_checkpointing = value
363
+
364
+
365
+ INTERNLM_INPUTS_DOCSTRING = r"""
366
+ Args:
367
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
368
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
369
+ it.
370
+
371
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
372
+ [`PreTrainedTokenizer.__call__`] for details.
373
+
374
+ [What are input IDs?](../glossary#input-ids)
375
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
376
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
377
+
378
+ - 1 for tokens that are **not masked**,
379
+ - 0 for tokens that are **masked**.
380
+
381
+ [What are attention masks?](../glossary#attention-mask)
382
+
383
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
384
+ [`PreTrainedTokenizer.__call__`] for details.
385
+
386
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
387
+ `past_key_values`).
388
+
389
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
390
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
391
+ information on the default strategy.
392
+
393
+ - 1 indicates the head is **not masked**,
394
+ - 0 indicates the head is **masked**.
395
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
396
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
397
+ config.n_positions - 1]`.
398
+
399
+ [What are position IDs?](../glossary#position-ids)
400
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
401
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
402
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
403
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
404
+
405
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
406
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
407
+
408
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
409
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
410
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
411
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
412
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
413
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
414
+ model's internal embedding lookup matrix.
415
+ use_cache (`bool`, *optional*):
416
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
417
+ `past_key_values`).
418
+ output_attentions (`bool`, *optional*):
419
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
420
+ tensors for more detail.
421
+ output_hidden_states (`bool`, *optional*):
422
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
423
+ more detail.
424
+ return_dict (`bool`, *optional*):
425
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
426
+ """
427
+
428
+
429
+ @add_start_docstrings(
430
+ "The bare InternLM Model outputting raw hidden-states without any specific head on top.",
431
+ INTERNLM_START_DOCSTRING,
432
+ )
433
+ class InternLMModel(InternLMPreTrainedModel):
434
+ """
435
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`InternLMDecoderLayer`]
436
+
437
+ Args:
438
+ config: InternLMConfig
439
+ """
440
+ _auto_class = "AutoModel"
441
+
442
+ def __init__(self, config: InternLMConfig):
443
+ super().__init__(config)
444
+ self.padding_idx = config.pad_token_id
445
+ self.vocab_size = config.vocab_size
446
+
447
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
448
+ self.layers = nn.ModuleList([InternLMDecoderLayer(config) for _ in range(config.num_hidden_layers)])
449
+ self.norm = InternLMRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
450
+
451
+ self.gradient_checkpointing = False
452
+ # Initialize weights and apply final processing
453
+ self.post_init()
454
+
455
+ def get_input_embeddings(self):
456
+ return self.embed_tokens
457
+
458
+ def set_input_embeddings(self, value):
459
+ self.embed_tokens = value
460
+
461
+ # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
462
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
463
+ # create causal mask
464
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
465
+ combined_attention_mask = None
466
+ if input_shape[-1] > 1:
467
+ combined_attention_mask = _make_causal_mask(
468
+ input_shape,
469
+ inputs_embeds.dtype,
470
+ device=inputs_embeds.device,
471
+ past_key_values_length=past_key_values_length,
472
+ )
473
+
474
+ if attention_mask is not None:
475
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
476
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
477
+ inputs_embeds.device
478
+ )
479
+ combined_attention_mask = (
480
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
481
+ )
482
+
483
+ return combined_attention_mask
484
+
485
+ @add_start_docstrings_to_model_forward(INTERNLM_INPUTS_DOCSTRING)
486
+ def forward(
487
+ self,
488
+ input_ids: torch.LongTensor = None,
489
+ attention_mask: Optional[torch.Tensor] = None,
490
+ position_ids: Optional[torch.LongTensor] = None,
491
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
492
+ inputs_embeds: Optional[torch.FloatTensor] = None,
493
+ use_cache: Optional[bool] = None,
494
+ output_attentions: Optional[bool] = None,
495
+ output_hidden_states: Optional[bool] = None,
496
+ return_dict: Optional[bool] = None,
497
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
498
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
499
+ output_hidden_states = (
500
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
501
+ )
502
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
503
+
504
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
505
+
506
+ # retrieve input_ids and inputs_embeds
507
+ if input_ids is not None and inputs_embeds is not None:
508
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
509
+ elif input_ids is not None:
510
+ batch_size, seq_length = input_ids.shape
511
+ elif inputs_embeds is not None:
512
+ batch_size, seq_length, _ = inputs_embeds.shape
513
+ else:
514
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
515
+
516
+ seq_length_with_past = seq_length
517
+ past_key_values_length = 0
518
+
519
+ if past_key_values is not None:
520
+ past_key_values_length = past_key_values[0][0].shape[2]
521
+ seq_length_with_past = seq_length_with_past + past_key_values_length
522
+
523
+ if position_ids is None:
524
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
525
+ position_ids = torch.arange(
526
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
527
+ )
528
+ position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
529
+ else:
530
+ position_ids = position_ids.view(-1, seq_length).long()
531
+
532
+ if inputs_embeds is None:
533
+ inputs_embeds = self.embed_tokens(input_ids)
534
+ # embed positions
535
+ if attention_mask is None:
536
+ attention_mask = torch.ones(
537
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
538
+ )
539
+ attention_mask = self._prepare_decoder_attention_mask(
540
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
541
+ )
542
+
543
+ hidden_states = inputs_embeds
544
+
545
+ if self.gradient_checkpointing and self.training:
546
+ if use_cache:
547
+ logger.warning_once(
548
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
549
+ )
550
+ use_cache = False
551
+
552
+ # decoder layers
553
+ all_hidden_states = () if output_hidden_states else None
554
+ all_self_attns = () if output_attentions else None
555
+ next_decoder_cache = () if use_cache else None
556
+
557
+ for idx, decoder_layer in enumerate(self.layers):
558
+ if output_hidden_states:
559
+ all_hidden_states += (hidden_states,)
560
+
561
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
562
+
563
+ if self.gradient_checkpointing and self.training:
564
+
565
+ def create_custom_forward(module):
566
+ def custom_forward(*inputs):
567
+ # None for past_key_value
568
+ return module(*inputs, output_attentions, None)
569
+
570
+ return custom_forward
571
+
572
+ layer_outputs = torch.utils.checkpoint.checkpoint(
573
+ create_custom_forward(decoder_layer),
574
+ hidden_states,
575
+ attention_mask,
576
+ position_ids,
577
+ None,
578
+ )
579
+ else:
580
+ layer_outputs = decoder_layer(
581
+ hidden_states,
582
+ attention_mask=attention_mask,
583
+ position_ids=position_ids,
584
+ past_key_value=past_key_value,
585
+ output_attentions=output_attentions,
586
+ use_cache=use_cache,
587
+ )
588
+
589
+ hidden_states = layer_outputs[0]
590
+
591
+ if use_cache:
592
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
593
+
594
+ if output_attentions:
595
+ all_self_attns += (layer_outputs[1],)
596
+
597
+ hidden_states = self.norm(hidden_states)
598
+
599
+ # add hidden states from the last decoder layer
600
+ if output_hidden_states:
601
+ all_hidden_states += (hidden_states,)
602
+
603
+ next_cache = next_decoder_cache if use_cache else None
604
+ if not return_dict:
605
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
606
+ return BaseModelOutputWithPast(
607
+ last_hidden_state=hidden_states,
608
+ past_key_values=next_cache,
609
+ hidden_states=all_hidden_states,
610
+ attentions=all_self_attns,
611
+ )
612
+
613
+
614
+ class InternLMForCausalLM(InternLMPreTrainedModel):
615
+ _auto_class = "AutoModelForCausalLM"
616
+
617
+ def __init__(self, config):
618
+ super().__init__(config)
619
+ self.model = InternLMModel(config)
620
+
621
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
622
+
623
+ # Initialize weights and apply final processing
624
+ self.post_init()
625
+
626
+ def get_input_embeddings(self):
627
+ return self.model.embed_tokens
628
+
629
+ def set_input_embeddings(self, value):
630
+ self.model.embed_tokens = value
631
+
632
+ def get_output_embeddings(self):
633
+ return self.lm_head
634
+
635
+ def set_output_embeddings(self, new_embeddings):
636
+ self.lm_head = new_embeddings
637
+
638
+ def set_decoder(self, decoder):
639
+ self.model = decoder
640
+
641
+ def get_decoder(self):
642
+ return self.model
643
+
644
+ @add_start_docstrings_to_model_forward(INTERNLM_INPUTS_DOCSTRING)
645
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
646
+ def forward(
647
+ self,
648
+ input_ids: torch.LongTensor = None,
649
+ attention_mask: Optional[torch.Tensor] = None,
650
+ position_ids: Optional[torch.LongTensor] = None,
651
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
652
+ inputs_embeds: Optional[torch.FloatTensor] = None,
653
+ labels: Optional[torch.LongTensor] = None,
654
+ use_cache: Optional[bool] = None,
655
+ output_attentions: Optional[bool] = None,
656
+ output_hidden_states: Optional[bool] = None,
657
+ return_dict: Optional[bool] = None,
658
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
659
+ r"""
660
+ Args:
661
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
662
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
663
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
664
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
665
+
666
+ Returns:
667
+
668
+ Example:
669
+
670
+ ```python
671
+ >>> from transformers import AutoTokenizer, InternLMForCausalLM
672
+
673
+ >>> model = InternLMForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
674
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
675
+
676
+ >>> prompt = "Hey, are you consciours? Can you talk to me?"
677
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
678
+
679
+ >>> # Generate
680
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
681
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
682
+ "Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
683
+ ```"""
684
+
685
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
686
+ output_hidden_states = (
687
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
688
+ )
689
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
690
+
691
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
692
+ outputs = self.model(
693
+ input_ids=input_ids,
694
+ attention_mask=attention_mask,
695
+ position_ids=position_ids,
696
+ past_key_values=past_key_values,
697
+ inputs_embeds=inputs_embeds,
698
+ use_cache=use_cache,
699
+ output_attentions=output_attentions,
700
+ output_hidden_states=output_hidden_states,
701
+ return_dict=return_dict,
702
+ )
703
+
704
+ hidden_states = outputs[0]
705
+ logits = self.lm_head(hidden_states)
706
+
707
+ loss = None
708
+ if labels is not None:
709
+ # Shift so that tokens < n predict n
710
+ shift_logits = logits[..., :-1, :].contiguous()
711
+ shift_labels = labels[..., 1:].contiguous()
712
+ # Flatten the tokens
713
+ loss_fct = CrossEntropyLoss()
714
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
715
+ shift_labels = shift_labels.view(-1)
716
+ # Enable model parallelism
717
+ shift_labels = shift_labels.to(shift_logits.device)
718
+ loss = loss_fct(shift_logits, shift_labels)
719
+
720
+ if not return_dict:
721
+ output = (logits,) + outputs[1:]
722
+ return (loss,) + output if loss is not None else output
723
+
724
+ return CausalLMOutputWithPast(
725
+ loss=loss,
726
+ logits=logits,
727
+ past_key_values=outputs.past_key_values,
728
+ hidden_states=outputs.hidden_states,
729
+ attentions=outputs.attentions,
730
+ )
731
+
732
+ def prepare_inputs_for_generation(
733
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
734
+ ):
735
+ if past_key_values:
736
+ input_ids = input_ids[:, -1:]
737
+
738
+ position_ids = kwargs.get("position_ids", None)
739
+ if attention_mask is not None and position_ids is None:
740
+ # create position_ids on the fly for batch generation
741
+ position_ids = attention_mask.long().cumsum(-1) - 1
742
+ position_ids.masked_fill_(attention_mask == 0, 1)
743
+ if past_key_values:
744
+ position_ids = position_ids[:, -1].unsqueeze(-1)
745
+
746
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
747
+ if inputs_embeds is not None and past_key_values is None:
748
+ model_inputs = {"inputs_embeds": inputs_embeds}
749
+ else:
750
+ model_inputs = {"input_ids": input_ids}
751
+
752
+ model_inputs.update(
753
+ {
754
+ "position_ids": position_ids,
755
+ "past_key_values": past_key_values,
756
+ "use_cache": kwargs.get("use_cache"),
757
+ "attention_mask": attention_mask,
758
+ }
759
+ )
760
+ return model_inputs
761
+
762
+ @staticmethod
763
+ def _reorder_cache(past_key_values, beam_idx):
764
+ reordered_past = ()
765
+ for layer_past in past_key_values:
766
+ reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
767
+ return reordered_past
768
+
769
+ def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = []):
770
+ prompt = ""
771
+ for record in history:
772
+ prompt += f"""<s><|User|>:{record[0]}<eoh>\n<|Bot|>:{record[1]}<eoa>\n"""
773
+ if len(prompt) == 0:
774
+ prompt += "<s>"
775
+ prompt += f"""<|User|>:{query}<eoh>\n<|Bot|>:"""
776
+ return tokenizer([prompt], return_tensors="pt")
777
+
778
+ @torch.no_grad()
779
+ def chat(self,
780
+ tokenizer,
781
+ query: str,
782
+ history: List[Tuple[str, str]] = [],
783
+ streamer: Optional[BaseStreamer] = None,
784
+ max_new_tokens: int = 1024,
785
+ do_sample: bool = True,
786
+ temperature: float = 0.8,
787
+ top_p: float = 0.8,
788
+ **kwargs):
789
+ inputs = self.build_inputs(tokenizer, query, history)
790
+ inputs = {k: v.to(self.device) for k, v in inputs.items() if torch.is_tensor(v)}
791
+ outputs = self.generate(**inputs,
792
+ streamer=streamer,
793
+ max_new_tokens=max_new_tokens,
794
+ do_sample=do_sample,
795
+ temperature=temperature,
796
+ top_p=top_p,
797
+ **kwargs)
798
+ outputs = outputs[0].cpu().tolist()[len(inputs["input_ids"][0]):]
799
+ response = tokenizer.decode(outputs, skip_special_tokens=True)
800
+ response = response.split("<eoa>")[0]
801
+ history = history + [(query, response)]
802
+ return response, history
803
+
804
+ @torch.no_grad()
805
+ def stream_chat(self,
806
+ tokenizer,
807
+ query: str,
808
+ history: List[Tuple[str, str]] = [],
809
+ max_new_tokens: int = 1024,
810
+ do_sample: bool = True,
811
+ temperature: float = 0.8,
812
+ top_p: float = 0.8,
813
+ **kwargs):
814
+ """
815
+ Return a generator in format: (response, history)
816
+ Eg.
817
+ ('你好,有什么可以帮助您的吗', [('你好', '你好,有什么可以帮助您的吗')])
818
+ ('你好,有什么可以帮助您的吗?', [('你好', '你好,有什么可以帮助您的吗?')])
819
+ """
820
+
821
+ response_queue = queue.Queue(maxsize=20)
822
+
823
+ class ChatStreamer(BaseStreamer):
824
+ def __init__(self, tokenizer) -> None:
825
+ super().__init__()
826
+ self.tokenizer = tokenizer
827
+ self.queue = response_queue
828
+ self.query = query
829
+ self.history = history
830
+ self.response = ""
831
+ self.received_inputs = False
832
+ self.queue.put((self.response, history + [(self.query, self.response)]))
833
+
834
+ def put(self, value):
835
+ if len(value.shape) > 1 and value.shape[0] > 1:
836
+ raise ValueError("ChatStreamer only supports batch size 1")
837
+ elif len(value.shape) > 1:
838
+ value = value[0]
839
+
840
+ if not self.received_inputs:
841
+ # The first received value is input_ids, ignore here
842
+ self.received_inputs = True
843
+ return
844
+
845
+ token = self.tokenizer.decode([value[-1]], skip_special_tokens=True)
846
+ if token.strip() != "<eoa>":
847
+ self.response = self.response + token
848
+ history = self.history + [(self.query, self.response)]
849
+ self.queue.put((self.response, history))
850
+
851
+ def end(self):
852
+ self.queue.put(None)
853
+
854
+ def stream_producer():
855
+ return self.chat(
856
+ tokenizer=tokenizer,
857
+ query=query,
858
+ streamer=ChatStreamer(tokenizer=tokenizer),
859
+ history=history,
860
+ max_new_tokens=max_new_tokens,
861
+ do_sample=do_sample,
862
+ temperature=temperature,
863
+ top_p=top_p,
864
+ **kwargs
865
+ )
866
+
867
+ def consumer():
868
+ producer = threading.Thread(target=stream_producer)
869
+ producer.start()
870
+ while True:
871
+ res = response_queue.get()
872
+ if res is None:
873
+ return
874
+ yield res
875
+
876
+ return consumer()
877
+
878
+
879
+ @add_start_docstrings(
880
+ """
881
+ The InternLM Model transformer with a sequence classification head on top (linear layer).
882
+
883
+ [`InternLMForSequenceClassification`] uses the last token in order to do the classification, as other causal models
884
+ (e.g. GPT-2) do.
885
+
886
+ Since it does classification on the last token, it requires to know the position of the last token. If a
887
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
888
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
889
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
890
+ each row of the batch).
891
+ """,
892
+ INTERNLM_START_DOCSTRING,
893
+ )
894
+ class InternLMForSequenceClassification(InternLMPreTrainedModel):
895
+ _keys_to_ignore_on_load_missing = [r"lm_head.weight"]
896
+
897
+ def __init__(self, config):
898
+ super().__init__(config)
899
+ self.num_labels = config.num_labels
900
+ self.model = InternLMModel(config)
901
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
902
+
903
+ # Initialize weights and apply final processing
904
+ self.post_init()
905
+
906
+ def get_input_embeddings(self):
907
+ return self.model.embed_tokens
908
+
909
+ def set_input_embeddings(self, value):
910
+ self.model.embed_tokens = value
911
+
912
+ @add_start_docstrings_to_model_forward(INTERNLM_INPUTS_DOCSTRING)
913
+ def forward(
914
+ self,
915
+ input_ids: torch.LongTensor = None,
916
+ attention_mask: Optional[torch.Tensor] = None,
917
+ position_ids: Optional[torch.LongTensor] = None,
918
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
919
+ inputs_embeds: Optional[torch.FloatTensor] = None,
920
+ labels: Optional[torch.LongTensor] = None,
921
+ use_cache: Optional[bool] = None,
922
+ output_attentions: Optional[bool] = None,
923
+ output_hidden_states: Optional[bool] = None,
924
+ return_dict: Optional[bool] = None,
925
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
926
+ r"""
927
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
928
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
929
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
930
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
931
+ """
932
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
933
+
934
+ transformer_outputs = self.model(
935
+ input_ids,
936
+ attention_mask=attention_mask,
937
+ position_ids=position_ids,
938
+ past_key_values=past_key_values,
939
+ inputs_embeds=inputs_embeds,
940
+ use_cache=use_cache,
941
+ output_attentions=output_attentions,
942
+ output_hidden_states=output_hidden_states,
943
+ return_dict=return_dict,
944
+ )
945
+ hidden_states = transformer_outputs[0]
946
+ logits = self.score(hidden_states)
947
+
948
+ if input_ids is not None:
949
+ batch_size = input_ids.shape[0]
950
+ else:
951
+ batch_size = inputs_embeds.shape[0]
952
+
953
+ if self.config.pad_token_id is None and batch_size != 1:
954
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
955
+ if self.config.pad_token_id is None:
956
+ sequence_lengths = -1
957
+ else:
958
+ if input_ids is not None:
959
+ sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device)
960
+ else:
961
+ sequence_lengths = -1
962
+
963
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
964
+
965
+ loss = None
966
+ if labels is not None:
967
+ labels = labels.to(logits.device)
968
+ if self.config.problem_type is None:
969
+ if self.num_labels == 1:
970
+ self.config.problem_type = "regression"
971
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
972
+ self.config.problem_type = "single_label_classification"
973
+ else:
974
+ self.config.problem_type = "multi_label_classification"
975
+
976
+ if self.config.problem_type == "regression":
977
+ loss_fct = MSELoss()
978
+ if self.num_labels == 1:
979
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
980
+ else:
981
+ loss = loss_fct(pooled_logits, labels)
982
+ elif self.config.problem_type == "single_label_classification":
983
+ loss_fct = CrossEntropyLoss()
984
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
985
+ elif self.config.problem_type == "multi_label_classification":
986
+ loss_fct = BCEWithLogitsLoss()
987
+ loss = loss_fct(pooled_logits, labels)
988
+ if not return_dict:
989
+ output = (pooled_logits,) + transformer_outputs[1:]
990
+ return ((loss,) + output) if loss is not None else output
991
+
992
+ return SequenceClassifierOutputWithPast(
993
+ loss=loss,
994
+ logits=pooled_logits,
995
+ past_key_values=transformer_outputs.past_key_values,
996
+ hidden_states=transformer_outputs.hidden_states,
997
+ attentions=transformer_outputs.attentions,
998
+ )
triton_models/tokenizer/placeholder ADDED
File without changes
triton_models/tokenizer/pytorch_model.bin.index.json ADDED
The diff for this file is too large to render. See raw diff
 
triton_models/tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "pad_token": "</s>",
5
+ "unk_token": "<unk>"
6
+ }
triton_models/tokenizer/tokenization_internlm.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+
21
+ """Tokenization classes for IntermLM."""
22
+ import os
23
+ from shutil import copyfile
24
+ from typing import Any, Dict, List, Optional, Tuple
25
+
26
+ import sentencepiece as spm
27
+
28
+ from transformers.tokenization_utils import PreTrainedTokenizer
29
+ from transformers.utils import logging
30
+
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+ VOCAB_FILES_NAMES = {"vocab_file": "./tokenizer.model"}
35
+
36
+ PRETRAINED_VOCAB_FILES_MAP = {}
37
+
38
+
39
+ class InternLMTokenizer(PreTrainedTokenizer):
40
+ """
41
+ Construct a InternLM tokenizer. Based on byte-level Byte-Pair-Encoding.
42
+
43
+ Args:
44
+ vocab_file (`str`):
45
+ Path to the vocabulary file.
46
+ """
47
+
48
+ vocab_files_names = VOCAB_FILES_NAMES
49
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
50
+ model_input_names = ["input_ids", "attention_mask"]
51
+ _auto_class = "AutoTokenizer"
52
+
53
+ def __init__(
54
+ self,
55
+ vocab_file,
56
+ unk_token="<unk>",
57
+ bos_token="<s>",
58
+ eos_token="</s>",
59
+ pad_token="</s>",
60
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
61
+ add_bos_token=True,
62
+ add_eos_token=False,
63
+ decode_with_prefix_space=False,
64
+ clean_up_tokenization_spaces=False,
65
+ **kwargs,
66
+ ):
67
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
68
+ super().__init__(
69
+ bos_token=bos_token,
70
+ eos_token=eos_token,
71
+ unk_token=unk_token,
72
+ pad_token=pad_token,
73
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
74
+ **kwargs,
75
+ )
76
+ self.vocab_file = vocab_file
77
+ self.add_bos_token = add_bos_token
78
+ self.add_eos_token = add_eos_token
79
+ self.decode_with_prefix_space = decode_with_prefix_space
80
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
81
+ self.sp_model.Load(vocab_file)
82
+ self._no_prefix_space_tokens = None
83
+
84
+ """ Initialisation"""
85
+
86
+ @property
87
+ def no_prefix_space_tokens(self):
88
+ if self._no_prefix_space_tokens is None:
89
+ vocab = self.convert_ids_to_tokens(list(range(self.vocab_size)))
90
+ self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith("▁")}
91
+ return self._no_prefix_space_tokens
92
+
93
+ @property
94
+ def vocab_size(self):
95
+ """Returns vocab size"""
96
+ return self.sp_model.get_piece_size()
97
+
98
+ @property
99
+ def bos_token_id(self) -> Optional[int]:
100
+ return self.sp_model.bos_id()
101
+
102
+ @property
103
+ def eos_token_id(self) -> Optional[int]:
104
+ return self.sp_model.eos_id()
105
+
106
+ def get_vocab(self):
107
+ """Returns vocab as a dict"""
108
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
109
+ vocab.update(self.added_tokens_encoder)
110
+ return vocab
111
+
112
+ def _tokenize(self, text):
113
+ """Returns a tokenized string."""
114
+ return self.sp_model.encode(text, out_type=str)
115
+
116
+ def _convert_token_to_id(self, token):
117
+ """Converts a token (str) in an id using the vocab."""
118
+ return self.sp_model.piece_to_id(token)
119
+
120
+ def _convert_id_to_token(self, index):
121
+ """Converts an index (integer) in a token (str) using the vocab."""
122
+ token = self.sp_model.IdToPiece(index)
123
+ return token
124
+
125
+ def _maybe_add_prefix_space(self, tokens, decoded):
126
+ if tokens and tokens[0] not in self.no_prefix_space_tokens:
127
+ return " " + decoded
128
+ else:
129
+ return decoded
130
+
131
+ def convert_tokens_to_string(self, tokens):
132
+ """Converts a sequence of tokens (string) in a single string."""
133
+ current_sub_tokens = []
134
+ out_string = ""
135
+ prev_is_special = False
136
+ for token in tokens:
137
+ # make sure that special tokens are not decoded using sentencepiece model
138
+ if token in self.all_special_tokens:
139
+ if not prev_is_special:
140
+ out_string += " "
141
+ out_string += self.sp_model.decode(current_sub_tokens) + token
142
+ prev_is_special = True
143
+ current_sub_tokens = []
144
+ else:
145
+ current_sub_tokens.append(token)
146
+ prev_is_special = False
147
+ out_string += self.sp_model.decode(current_sub_tokens)
148
+ out_string = self.clean_up_tokenization(out_string)
149
+ out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string)
150
+ return out_string[1:]
151
+
152
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
153
+ """
154
+ Save the vocabulary and special tokens file to a directory.
155
+
156
+ Args:
157
+ save_directory (`str`):
158
+ The directory in which to save the vocabulary.
159
+
160
+ Returns:
161
+ `Tuple(str)`: Paths to the files saved.
162
+ """
163
+ if not os.path.isdir(save_directory):
164
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
165
+ return
166
+ out_vocab_file = os.path.join(
167
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
168
+ )
169
+
170
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
171
+ copyfile(self.vocab_file, out_vocab_file)
172
+ elif not os.path.isfile(self.vocab_file):
173
+ with open(out_vocab_file, "wb") as fi:
174
+ content_spiece_model = self.sp_model.serialized_model_proto()
175
+ fi.write(content_spiece_model)
176
+
177
+ return (out_vocab_file,)
178
+
179
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
180
+ if self.add_bos_token:
181
+ bos_token_ids = [self.bos_token_id]
182
+ else:
183
+ bos_token_ids = []
184
+
185
+ output = bos_token_ids + token_ids_0
186
+
187
+ if token_ids_1 is not None:
188
+ output = output + token_ids_1
189
+
190
+ if self.add_eos_token:
191
+ output = output + [self.eos_token_id]
192
+
193
+ return output
194
+
195
+ def get_special_tokens_mask(
196
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
197
+ ) -> List[int]:
198
+ """
199
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
200
+ special tokens using the tokenizer `prepare_for_model` method.
201
+
202
+ Args:
203
+ token_ids_0 (`List[int]`):
204
+ List of IDs.
205
+ token_ids_1 (`List[int]`, *optional*):
206
+ Optional second list of IDs for sequence pairs.
207
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
208
+ Whether or not the token list is already formatted with special tokens for the model.
209
+
210
+ Returns:
211
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
212
+ """
213
+ if already_has_special_tokens:
214
+ return super().get_special_tokens_mask(
215
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
216
+ )
217
+
218
+ if token_ids_1 is None:
219
+ return [1] + ([0] * len(token_ids_0)) + [1]
220
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
221
+
222
+ def create_token_type_ids_from_sequences(
223
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
224
+ ) -> List[int]:
225
+ """
226
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
227
+ use of token type ids, therefore a list of zeros is returned.
228
+
229
+ Args:
230
+ token_ids_0 (`List[int]`):
231
+ List of IDs.
232
+ token_ids_1 (`List[int]`, *optional*):
233
+ Optional second list of IDs for sequence pairs.
234
+
235
+ Returns:
236
+ `List[int]`: List of zeros.
237
+ """
238
+ eos = [self.eos_token_id]
239
+
240
+ if token_ids_1 is None:
241
+ return len(token_ids_0 + eos) * [0]
242
+ return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
triton_models/tokenizer/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aab622d98c98677a1a51f969e25765154487bf3e85c7819db105db2fcacba83f
3
+ size 1658691
triton_models/tokenizer/tokenizer.py ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import json
3
+ import os.path as osp
4
+ from typing import Optional, Sequence, Union
5
+
6
+ import torch
7
+
8
+
9
+ class SentencePieceTokenizer:
10
+ """Tokenizer of sentencepiece.
11
+
12
+ Args:
13
+ model_file (str): the path of the tokenizer model
14
+ """
15
+
16
+ def __init__(self, model_file: str):
17
+ from sentencepiece import SentencePieceProcessor
18
+ self.model = SentencePieceProcessor(model_file=model_file)
19
+ self._no_prefix_space_tokens = None
20
+
21
+ @property
22
+ def vocab_size(self):
23
+ """vocabulary size."""
24
+ return self.model.vocab_size()
25
+
26
+ @property
27
+ def bos_token_id(self):
28
+ """begine of the sentence token id."""
29
+ return self.model.bos_id()
30
+
31
+ @property
32
+ def eos_token_id(self):
33
+ """end of the sentence token id."""
34
+ return self.model.eos_id()
35
+
36
+ @property
37
+ def no_prefix_space_tokens(self):
38
+ """tokens without prefix space."""
39
+ if self._no_prefix_space_tokens is None:
40
+ vocab = self.model.IdToPiece(list(range(self.vocab_size)))
41
+ self._no_prefix_space_tokens = {
42
+ i
43
+ for i, tok in enumerate(vocab) if not tok.startswith('▁')
44
+ }
45
+ return self._no_prefix_space_tokens
46
+
47
+ def _maybe_add_prefix_space(self, tokens, decoded):
48
+ """maybe add prefix space for incremental decoding."""
49
+ if len(tokens) and tokens[0] not in self.no_prefix_space_tokens:
50
+ return ' ' + decoded
51
+ else:
52
+ return decoded
53
+
54
+ def encode(self, s: str):
55
+ """Tokenize a prompt.
56
+
57
+ Args:
58
+ s (str): a prompt
59
+ Returns:
60
+ list[int]: token ids
61
+ """
62
+ add_bos = False
63
+ add_eos = False
64
+ if s.find('<BOS>') != -1:
65
+ s = s.replace('<BOS>', '')
66
+ add_bos = True
67
+ if s == '<EOS>':
68
+ s = ''
69
+ add_eos = True
70
+ return self.model.Encode(s, add_bos=add_bos, add_eos=add_eos)
71
+
72
+ def decode(self, t: Sequence[int], offset: Optional[int] = None):
73
+ """De-tokenize.
74
+
75
+ Args:
76
+ t (List[int]): a list of token ids
77
+ offset (int): for incrementally decoding. Default to None, which
78
+ means not applied.
79
+ Returns:
80
+ str: text of decoding tokens
81
+ """
82
+ if isinstance(t, torch.Tensor):
83
+ t = t.tolist()
84
+ t = t[offset:]
85
+ out_string = self.model.Decode(t)
86
+ if offset:
87
+ out_string = self._maybe_add_prefix_space(t, out_string)
88
+ return out_string
89
+
90
+ def __call__(self, s: Union[str, Sequence[str]]):
91
+ """Tokenize prompts.
92
+
93
+ Args:
94
+ s (str): prompts
95
+ Returns:
96
+ list[int]: token ids
97
+ """
98
+ import addict
99
+ add_bos = False
100
+ add_eos = False
101
+
102
+ input_ids = self.model.Encode(s, add_bos=add_bos, add_eos=add_eos)
103
+ return addict.Addict(input_ids=input_ids)
104
+
105
+
106
+ class HuggingFaceTokenizer:
107
+ """Tokenizer of sentencepiece.
108
+
109
+ Args:
110
+ model_dir (str): the directory of the tokenizer model
111
+ """
112
+
113
+ def __init__(self, model_dir: str):
114
+ from transformers import (AutoTokenizer, CodeLlamaTokenizerFast,
115
+ LlamaTokenizer, LlamaTokenizerFast)
116
+ model_file = osp.join(model_dir, 'tokenizer.model')
117
+ backend_tokenizer_file = osp.join(model_dir, 'tokenizer.json')
118
+ model_file_exists = osp.exists(model_file)
119
+ if not osp.exists(backend_tokenizer_file) and model_file_exists:
120
+ print('WARNING: Can not find tokenizer.json. '
121
+ 'It may take long time to initialize the tokenizer.')
122
+ self.model = AutoTokenizer.from_pretrained(model_dir,
123
+ trust_remote_code=True)
124
+ self.need_padding = type(self.model) in [
125
+ LlamaTokenizer, LlamaTokenizerFast, CodeLlamaTokenizerFast
126
+ ]
127
+ self._no_prefix_space_tokens = None
128
+ # save tokenizer.json to reuse
129
+ if not osp.exists(backend_tokenizer_file) and model_file_exists:
130
+ if hasattr(self.model, 'backend_tokenizer'):
131
+ self.model.backend_tokenizer.save(backend_tokenizer_file)
132
+
133
+ if self.model.eos_token_id is None:
134
+ generation_config_file = osp.join(model_dir,
135
+ 'generation_config.json')
136
+ with open(generation_config_file, 'r') as f:
137
+ cfg = json.load(f)
138
+ self.model.eos_token_id = cfg['eos_token_id']
139
+
140
+ @property
141
+ def vocab_size(self):
142
+ """vocabulary size."""
143
+ return self.model.vocab_size
144
+
145
+ @property
146
+ def bos_token_id(self):
147
+ """begine of the sentence token id."""
148
+ return self.model.bos_token_id
149
+
150
+ @property
151
+ def eos_token_id(self):
152
+ """end of the sentence token id."""
153
+ return self.model.eos_token_id
154
+
155
+ @property
156
+ def no_prefix_space_tokens(self):
157
+ """tokens without prefix space."""
158
+ if self._no_prefix_space_tokens is None:
159
+ vocab = self.model.convert_ids_to_tokens(
160
+ list(range(self.vocab_size)))
161
+ self._no_prefix_space_tokens = {
162
+ i
163
+ for i, tok in enumerate(vocab) if not tok.startswith('▁')
164
+ }
165
+ return self._no_prefix_space_tokens
166
+
167
+ def _maybe_add_prefix_space(self, tokens, decoded):
168
+ """maybe add prefix space for incremental decoding."""
169
+ if self.need_padding and len(
170
+ tokens) and tokens[0] not in self.no_prefix_space_tokens:
171
+ return ' ' + decoded
172
+ else:
173
+ return decoded
174
+
175
+ def encode(self, s: str):
176
+ """Tokenize a prompt.
177
+
178
+ Args:
179
+ s (str): a prompt
180
+ Returns:
181
+ list[int]: token ids
182
+ """
183
+ add_special_tokens = False
184
+ if s.find('<BOS>') != -1:
185
+ s = s.replace('<BOS>', '<s>')
186
+ if s == '<EOS>':
187
+ s = '</s>'
188
+ if len(s) == 0:
189
+ add_special_tokens = True
190
+ return self.model.encode(s, add_special_tokens=add_special_tokens)
191
+
192
+ def decode(self, t: Sequence[int], offset: Optional[int] = None):
193
+ """De-tokenize.
194
+
195
+ Args:
196
+ t (List[int]): a list of token ids
197
+ offset (int): for incrementally decoding. Default to None, which
198
+ means not applied.
199
+ Returns:
200
+ str: text of decoding tokens
201
+ """
202
+ skip_special_tokens = True
203
+ t = t[offset:]
204
+ out_string = self.model.decode(t,
205
+ skip_special_tokens=skip_special_tokens)
206
+ if offset:
207
+ out_string = self._maybe_add_prefix_space(t, out_string)
208
+ return out_string
209
+
210
+ def __call__(self, s: Union[str, Sequence[str]]):
211
+ """Tokenize prompts.
212
+
213
+ Args:
214
+ s (str): prompts
215
+ Returns:
216
+ list[int]: token ids
217
+ """
218
+ add_special_tokens = False
219
+ return self.model(s, add_special_tokens=add_special_tokens)
220
+
221
+
222
+ class Tokenizer:
223
+ """Tokenize prompts or de-tokenize tokens into texts.
224
+
225
+ Args:
226
+ model_file (str): the path of the tokenizer model
227
+ """
228
+
229
+ def __init__(self, model_file: str):
230
+ if model_file.endswith('.model'):
231
+ model_folder = osp.split(model_file)[0]
232
+ else:
233
+ model_folder = model_file
234
+ model_file = osp.join(model_folder, 'tokenizer.model')
235
+ tokenizer_config_file = osp.join(model_folder, 'tokenizer_config.json')
236
+
237
+ model_file_exists = osp.exists(model_file)
238
+ config_exists = osp.exists(tokenizer_config_file)
239
+ use_hf_model = config_exists or not model_file_exists
240
+
241
+ if not use_hf_model:
242
+ self.model = SentencePieceTokenizer(model_file)
243
+ else:
244
+ self.model = HuggingFaceTokenizer(model_folder)
245
+
246
+ @property
247
+ def vocab_size(self):
248
+ """vocabulary size."""
249
+ return self.model.vocab_size
250
+
251
+ @property
252
+ def bos_token_id(self):
253
+ """begine of the sentence token id."""
254
+ return self.model.bos_token_id
255
+
256
+ @property
257
+ def eos_token_id(self):
258
+ """end of the sentence token id."""
259
+ return self.model.eos_token_id
260
+
261
+ def encode(self, s: str):
262
+ """Tokenize a prompt.
263
+
264
+ Args:
265
+ s (str): a prompt
266
+ Returns:
267
+ list[int]: token ids
268
+ """
269
+ return self.model.encode(s)
270
+
271
+ def decode(self, t: Sequence[int], offset: Optional[int] = None):
272
+ """De-tokenize.
273
+
274
+ Args:
275
+ t (List[int]): a list of token ids
276
+ offset (int): for incrementally decoding. Default to None, which
277
+ means not applied.
278
+ Returns:
279
+ str: text of decoding tokens
280
+ """
281
+ return self.model.decode(t, offset)
282
+
283
+ def __call__(self, s: Union[str, Sequence[str]]):
284
+ """Tokenize prompts.
285
+
286
+ Args:
287
+ s (str): prompts
288
+ Returns:
289
+ list[int]: token ids
290
+ """
291
+ return self.model(s)
triton_models/tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoTokenizer": [
4
+ "tokenization_internlm.InternLMTokenizer",
5
+ null
6
+ ]
7
+ },
8
+ "bos_token": "<s>",
9
+ "clean_up_tokenization_spaces": false,
10
+ "eos_token": "</s>",
11
+ "model_max_length": 1000000000000000019884624838656,
12
+ "pad_token": "</s>",
13
+ "tokenizer_class": "InternLMTokenizer",
14
+ "unk_token": "<unk>"
15
+ }
triton_models/weights/config.ini ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:799897e139626e09f107e0c0b1f8894949246c1f2cd943b997d860d2727c6e19
3
+ size 533
triton_models/weights/layers.0.attention.w_qkv.0.qweight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5bebaea97ddc342b78e8f236b9996d30d473c033a7c3d6021b874ba0cd8b210
3
+ size 39321600
triton_models/weights/layers.0.attention.w_qkv.0.scales_zeros ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b32a37add95e31b362daaef2a5656a920384986da025a0e6b5f073437c5b7693
3
+ size 2457600
triton_models/weights/layers.0.attention.wo.0.qweight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13858a78f6d336c6014c5fc2f9102ce0f48ca7b5acba5897ea7eb7c0d13984ec
3
+ size 13107200
triton_models/weights/layers.0.attention.wo.0.scales_zeros ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b78089c5d8a92cc75872f2be9b2971f0288e36be233ad2c00a5f12c5d08e2420
3
+ size 819200
triton_models/weights/layers.0.attention_norm.weight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:629d4397992d92fe8f54b91b4728c7d243b6c799ea4b045c5e6809dd0d9fa942
3
+ size 10240
triton_models/weights/layers.0.feed_forward.w13.0.qweight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd7d6275d0ab59f3b2dd3b93b053fd487882f2c9cd3ffe809eca56412ec36ca1
3
+ size 70778880
triton_models/weights/layers.0.feed_forward.w13.0.scales_zeros ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d51874fb7b4c32bc95ef39bd6bbb2f001090bf558c5232de51c0c1875911f0a
3
+ size 4423680
triton_models/weights/layers.0.feed_forward.w2.0.qweight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0590b8fa80842f609df6469552bd78e35b65a023db36b4829894c1d1e5d50f41
3
+ size 35389440
triton_models/weights/layers.0.feed_forward.w2.0.scales_zeros ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c02e35a035e2ce3df607ec97f11b6ae0650a43b7a8fdd1fa802b3c1e23e86bd
3
+ size 2211840
triton_models/weights/layers.0.ffn_norm.weight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85f577bde93009bbfb7f1c14c3bc81bb9c5353f390c445e8eda437ecbe4fa101
3
+ size 10240
triton_models/weights/layers.1.attention.w_qkv.0.qweight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05770c30f6a20b31ca6bd5f77e0895ee523872623792c4fe151e990e4085e240
3
+ size 39321600
triton_models/weights/layers.1.attention.w_qkv.0.scales_zeros ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7e342cd5208649ca929dd799fe89e114e7a5429abbc66ca86b952ba80bf8b4d
3
+ size 2457600
triton_models/weights/layers.1.attention.wo.0.qweight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1842b2a0a76be6ab1e8ad290ae9031eb547ac78e35e1bfc453176e13e70c255
3
+ size 13107200
triton_models/weights/layers.1.attention.wo.0.scales_zeros ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdbc7d7720814717801c80f08c54f1059effb10cd4683f94a351de23f6ea890b
3
+ size 819200
triton_models/weights/layers.1.attention_norm.weight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bb60ee32fe1687f47b2280c4af54a57990528a4a59fb2bb7782f28fd5f424da
3
+ size 10240
triton_models/weights/layers.1.feed_forward.w13.0.qweight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aae50ea32653bb6461c0e338082e98db2107b4a3d471934e4f3354410a6bcbf0
3
+ size 70778880
triton_models/weights/layers.1.feed_forward.w13.0.scales_zeros ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0a782e19f6392ff88942dfe687137c14da1726dce104d5b4f6abe646397885f
3
+ size 4423680
triton_models/weights/layers.1.feed_forward.w2.0.qweight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5d2e652e54d966f5b8b7b405dbdfc66f1f1460489c3ba5fc76c1ee79844abc3
3
+ size 35389440
triton_models/weights/layers.1.feed_forward.w2.0.scales_zeros ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33179e4b3a6d851ab0429f9da2244c69fdc9c18a395c19d88b561cba54cb9cd2
3
+ size 2211840
triton_models/weights/layers.1.ffn_norm.weight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c2406535fe735c7c1360214e79ab96071cbee60093f43571fed3247812aa788
3
+ size 10240
triton_models/weights/layers.10.attention.w_qkv.0.qweight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:923f2874f171dc9d43c42eb966a312ac14153dd2b35ebdb63f583b8b57fa784c
3
+ size 39321600
triton_models/weights/layers.10.attention.w_qkv.0.scales_zeros ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8a1a38ec040e7c9424a762d10cb76e06f64942ba66655dd5dabfc383d0bd52e
3
+ size 2457600
triton_models/weights/layers.10.attention.wo.0.qweight ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc2e6cd2b5022634b2d298a73b0286994a6df46567950917be5b075f29968d9d
3
+ size 13107200
triton_models/weights/layers.10.attention.wo.0.scales_zeros ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9fe0accad6e1e3df44cff0bd6e98604ea5f8036d42faa2ff6d33de628d4a748e
3
+ size 819200