sallywww commited on
Commit
bf1df8f
1 Parent(s): 5e1808a

First model version

Browse files
README.md ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - _load_in_8bit: True
10
+ - _load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ - bnb_4bit_quant_storage: uint8
19
+ - load_in_4bit: False
20
+ - load_in_8bit: True
21
+ ### Framework versions
22
+
23
+
24
+ - PEFT 0.5.0
adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "sallywww/Llama-7B",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fa326ce5833dca16fa6fa346b446a151f38350a6b93c5d4f4f93ddd50bfbb28
3
+ size 16823434
checkpoint-1000/README.md ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - _load_in_8bit: True
10
+ - _load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ - bnb_4bit_quant_storage: uint8
19
+ - load_in_4bit: False
20
+ - load_in_8bit: True
21
+ ### Framework versions
22
+
23
+
24
+ - PEFT 0.5.0
checkpoint-1000/adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "sallywww/Llama-7B",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
checkpoint-1000/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3659f379e9d178076ed3cb7f0de98dc7d28160b63d57279e248d1cf3678f0c6
3
+ size 16794200
checkpoint-1000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:212f93d9e51fb6f0f2e8f97814e0cda9eb7e5569467d1c576f323ffc4f16d58f
3
+ size 33662074
checkpoint-1000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a5039698343e4bb12f6f68ea2a34c21c1b00202d00f8ee74f5a2d610019f551
3
+ size 14244
checkpoint-1000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24c71b66b11d713aa5d2e6e370550d880a0c43866355fe5bed78940416e377e7
3
+ size 1064
checkpoint-1000/trainer_state.json ADDED
@@ -0,0 +1,371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 17.185821697099893,
5
+ "eval_steps": 500,
6
+ "global_step": 1000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.34,
13
+ "grad_norm": 0.6707318425178528,
14
+ "learning_rate": 1.97816091954023e-05,
15
+ "loss": 2.2888,
16
+ "step": 20
17
+ },
18
+ {
19
+ "epoch": 0.69,
20
+ "grad_norm": NaN,
21
+ "learning_rate": 1.9574712643678162e-05,
22
+ "loss": 2.0848,
23
+ "step": 40
24
+ },
25
+ {
26
+ "epoch": 1.03,
27
+ "grad_norm": 0.7721680402755737,
28
+ "learning_rate": 1.9344827586206897e-05,
29
+ "loss": 2.0044,
30
+ "step": 60
31
+ },
32
+ {
33
+ "epoch": 1.37,
34
+ "grad_norm": 1.1140433549880981,
35
+ "learning_rate": 1.9126436781609195e-05,
36
+ "loss": 1.8016,
37
+ "step": 80
38
+ },
39
+ {
40
+ "epoch": 1.72,
41
+ "grad_norm": 0.7205075621604919,
42
+ "learning_rate": 1.8896551724137934e-05,
43
+ "loss": 1.7217,
44
+ "step": 100
45
+ },
46
+ {
47
+ "epoch": 2.06,
48
+ "grad_norm": 0.8933233618736267,
49
+ "learning_rate": 1.866666666666667e-05,
50
+ "loss": 1.5705,
51
+ "step": 120
52
+ },
53
+ {
54
+ "epoch": 2.41,
55
+ "grad_norm": 0.7114273905754089,
56
+ "learning_rate": 1.8436781609195404e-05,
57
+ "loss": 1.4006,
58
+ "step": 140
59
+ },
60
+ {
61
+ "epoch": 2.75,
62
+ "grad_norm": 0.7229479551315308,
63
+ "learning_rate": 1.820689655172414e-05,
64
+ "loss": 1.3137,
65
+ "step": 160
66
+ },
67
+ {
68
+ "epoch": 3.09,
69
+ "grad_norm": 0.9370490908622742,
70
+ "learning_rate": 1.7977011494252874e-05,
71
+ "loss": 1.1898,
72
+ "step": 180
73
+ },
74
+ {
75
+ "epoch": 3.44,
76
+ "grad_norm": 0.6051978468894958,
77
+ "learning_rate": 1.774712643678161e-05,
78
+ "loss": 1.1229,
79
+ "step": 200
80
+ },
81
+ {
82
+ "epoch": 3.78,
83
+ "grad_norm": 0.6857028007507324,
84
+ "learning_rate": 1.7517241379310347e-05,
85
+ "loss": 1.051,
86
+ "step": 220
87
+ },
88
+ {
89
+ "epoch": 4.12,
90
+ "grad_norm": 0.6715748310089111,
91
+ "learning_rate": 1.7287356321839082e-05,
92
+ "loss": 0.9894,
93
+ "step": 240
94
+ },
95
+ {
96
+ "epoch": 4.47,
97
+ "grad_norm": 0.5918118953704834,
98
+ "learning_rate": 1.7057471264367817e-05,
99
+ "loss": 0.9687,
100
+ "step": 260
101
+ },
102
+ {
103
+ "epoch": 4.81,
104
+ "grad_norm": 0.6621690392494202,
105
+ "learning_rate": 1.6827586206896552e-05,
106
+ "loss": 0.9199,
107
+ "step": 280
108
+ },
109
+ {
110
+ "epoch": 5.16,
111
+ "grad_norm": 0.6697206497192383,
112
+ "learning_rate": 1.659770114942529e-05,
113
+ "loss": 0.9303,
114
+ "step": 300
115
+ },
116
+ {
117
+ "epoch": 5.5,
118
+ "grad_norm": 0.8184316158294678,
119
+ "learning_rate": 1.6367816091954025e-05,
120
+ "loss": 0.8898,
121
+ "step": 320
122
+ },
123
+ {
124
+ "epoch": 5.84,
125
+ "grad_norm": 0.6429987549781799,
126
+ "learning_rate": 1.613793103448276e-05,
127
+ "loss": 0.8623,
128
+ "step": 340
129
+ },
130
+ {
131
+ "epoch": 6.19,
132
+ "grad_norm": 0.7518043518066406,
133
+ "learning_rate": 1.5908045977011495e-05,
134
+ "loss": 0.8239,
135
+ "step": 360
136
+ },
137
+ {
138
+ "epoch": 6.53,
139
+ "grad_norm": 0.6667824983596802,
140
+ "learning_rate": 1.567816091954023e-05,
141
+ "loss": 0.8119,
142
+ "step": 380
143
+ },
144
+ {
145
+ "epoch": 6.87,
146
+ "grad_norm": 0.8569457530975342,
147
+ "learning_rate": 1.5448275862068965e-05,
148
+ "loss": 0.8139,
149
+ "step": 400
150
+ },
151
+ {
152
+ "epoch": 7.22,
153
+ "grad_norm": 0.7754850387573242,
154
+ "learning_rate": 1.5218390804597702e-05,
155
+ "loss": 0.7835,
156
+ "step": 420
157
+ },
158
+ {
159
+ "epoch": 7.56,
160
+ "grad_norm": 1.159196138381958,
161
+ "learning_rate": 1.4988505747126439e-05,
162
+ "loss": 0.7546,
163
+ "step": 440
164
+ },
165
+ {
166
+ "epoch": 7.91,
167
+ "grad_norm": 1.119764804840088,
168
+ "learning_rate": 1.4758620689655174e-05,
169
+ "loss": 0.7571,
170
+ "step": 460
171
+ },
172
+ {
173
+ "epoch": 8.25,
174
+ "grad_norm": 1.3600786924362183,
175
+ "learning_rate": 1.452873563218391e-05,
176
+ "loss": 0.7451,
177
+ "step": 480
178
+ },
179
+ {
180
+ "epoch": 8.59,
181
+ "grad_norm": 0.7608994245529175,
182
+ "learning_rate": 1.4298850574712644e-05,
183
+ "loss": 0.7109,
184
+ "step": 500
185
+ },
186
+ {
187
+ "epoch": 8.94,
188
+ "grad_norm": 1.0172290802001953,
189
+ "learning_rate": 1.406896551724138e-05,
190
+ "loss": 0.7228,
191
+ "step": 520
192
+ },
193
+ {
194
+ "epoch": 9.28,
195
+ "grad_norm": 1.042607069015503,
196
+ "learning_rate": 1.3839080459770115e-05,
197
+ "loss": 0.6939,
198
+ "step": 540
199
+ },
200
+ {
201
+ "epoch": 9.62,
202
+ "grad_norm": 0.8913071751594543,
203
+ "learning_rate": 1.3609195402298852e-05,
204
+ "loss": 0.6721,
205
+ "step": 560
206
+ },
207
+ {
208
+ "epoch": 9.97,
209
+ "grad_norm": 1.4283536672592163,
210
+ "learning_rate": 1.3379310344827587e-05,
211
+ "loss": 0.681,
212
+ "step": 580
213
+ },
214
+ {
215
+ "epoch": 10.31,
216
+ "grad_norm": 1.1445728540420532,
217
+ "learning_rate": 1.3149425287356324e-05,
218
+ "loss": 0.6484,
219
+ "step": 600
220
+ },
221
+ {
222
+ "epoch": 10.66,
223
+ "grad_norm": 1.425697684288025,
224
+ "learning_rate": 1.2919540229885059e-05,
225
+ "loss": 0.6558,
226
+ "step": 620
227
+ },
228
+ {
229
+ "epoch": 11.0,
230
+ "grad_norm": 0.8931305408477783,
231
+ "learning_rate": 1.2689655172413795e-05,
232
+ "loss": 0.6642,
233
+ "step": 640
234
+ },
235
+ {
236
+ "epoch": 11.34,
237
+ "grad_norm": 1.0374151468276978,
238
+ "learning_rate": 1.2459770114942529e-05,
239
+ "loss": 0.6202,
240
+ "step": 660
241
+ },
242
+ {
243
+ "epoch": 11.69,
244
+ "grad_norm": 1.628758430480957,
245
+ "learning_rate": 1.2229885057471265e-05,
246
+ "loss": 0.6163,
247
+ "step": 680
248
+ },
249
+ {
250
+ "epoch": 12.03,
251
+ "grad_norm": 1.3881452083587646,
252
+ "learning_rate": 1.2e-05,
253
+ "loss": 0.6364,
254
+ "step": 700
255
+ },
256
+ {
257
+ "epoch": 12.37,
258
+ "grad_norm": 1.0961302518844604,
259
+ "learning_rate": 1.1770114942528737e-05,
260
+ "loss": 0.5963,
261
+ "step": 720
262
+ },
263
+ {
264
+ "epoch": 12.72,
265
+ "grad_norm": 1.1812736988067627,
266
+ "learning_rate": 1.1540229885057472e-05,
267
+ "loss": 0.6102,
268
+ "step": 740
269
+ },
270
+ {
271
+ "epoch": 13.06,
272
+ "grad_norm": 1.103151559829712,
273
+ "learning_rate": 1.1310344827586209e-05,
274
+ "loss": 0.5965,
275
+ "step": 760
276
+ },
277
+ {
278
+ "epoch": 13.4,
279
+ "grad_norm": 1.108560562133789,
280
+ "learning_rate": 1.1080459770114944e-05,
281
+ "loss": 0.58,
282
+ "step": 780
283
+ },
284
+ {
285
+ "epoch": 13.75,
286
+ "grad_norm": 1.322364091873169,
287
+ "learning_rate": 1.085057471264368e-05,
288
+ "loss": 0.5707,
289
+ "step": 800
290
+ },
291
+ {
292
+ "epoch": 14.09,
293
+ "grad_norm": 1.2036404609680176,
294
+ "learning_rate": 1.0620689655172414e-05,
295
+ "loss": 0.5781,
296
+ "step": 820
297
+ },
298
+ {
299
+ "epoch": 14.44,
300
+ "grad_norm": 1.46902596950531,
301
+ "learning_rate": 1.039080459770115e-05,
302
+ "loss": 0.5413,
303
+ "step": 840
304
+ },
305
+ {
306
+ "epoch": 14.78,
307
+ "grad_norm": 0.9223589301109314,
308
+ "learning_rate": 1.0160919540229885e-05,
309
+ "loss": 0.5686,
310
+ "step": 860
311
+ },
312
+ {
313
+ "epoch": 15.12,
314
+ "grad_norm": 1.7452529668807983,
315
+ "learning_rate": 9.931034482758622e-06,
316
+ "loss": 0.5538,
317
+ "step": 880
318
+ },
319
+ {
320
+ "epoch": 15.47,
321
+ "grad_norm": 1.0680702924728394,
322
+ "learning_rate": 9.701149425287357e-06,
323
+ "loss": 0.5402,
324
+ "step": 900
325
+ },
326
+ {
327
+ "epoch": 15.81,
328
+ "grad_norm": 1.4106242656707764,
329
+ "learning_rate": 9.471264367816094e-06,
330
+ "loss": 0.5629,
331
+ "step": 920
332
+ },
333
+ {
334
+ "epoch": 16.15,
335
+ "grad_norm": 1.7341551780700684,
336
+ "learning_rate": 9.241379310344829e-06,
337
+ "loss": 0.5538,
338
+ "step": 940
339
+ },
340
+ {
341
+ "epoch": 16.5,
342
+ "grad_norm": 2.115643262863159,
343
+ "learning_rate": 9.011494252873564e-06,
344
+ "loss": 0.5481,
345
+ "step": 960
346
+ },
347
+ {
348
+ "epoch": 16.84,
349
+ "grad_norm": 1.1589787006378174,
350
+ "learning_rate": 8.7816091954023e-06,
351
+ "loss": 0.4981,
352
+ "step": 980
353
+ },
354
+ {
355
+ "epoch": 17.19,
356
+ "grad_norm": 1.0696042776107788,
357
+ "learning_rate": 8.551724137931035e-06,
358
+ "loss": 0.5041,
359
+ "step": 1000
360
+ }
361
+ ],
362
+ "logging_steps": 20,
363
+ "max_steps": 1740,
364
+ "num_input_tokens_seen": 0,
365
+ "num_train_epochs": 30,
366
+ "save_steps": 500,
367
+ "total_flos": 2.5991277871104e+18,
368
+ "train_batch_size": 1,
369
+ "trial_name": null,
370
+ "trial_params": null
371
+ }
checkpoint-1000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9925cf06613e2d8314bcf7c4157dc698e4db00ef68124ecb6a4078939ad1b743
3
+ size 4984
checkpoint-1500/README.md ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - _load_in_8bit: True
10
+ - _load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ - bnb_4bit_quant_storage: uint8
19
+ - load_in_4bit: False
20
+ - load_in_8bit: True
21
+ ### Framework versions
22
+
23
+
24
+ - PEFT 0.5.0
checkpoint-1500/adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "sallywww/Llama-7B",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
checkpoint-1500/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b0945f544eed1bd74a72c478363d87aa3677224682d469da48d0627dac3e1e9
3
+ size 16794200
checkpoint-1500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fef468176fba47b31157c2c5ce0939d3f9f5eaf5704ef48af5fd83c84842614
3
+ size 33662074
checkpoint-1500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a6ffbe6b10a9705a7dfabdc5b0b0ff498c4456a7a43f4c08dc0c5529d5ed88e
3
+ size 14244
checkpoint-1500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27c3b3bc990873f053ac72c0d455eda6d5f22024555509d4b2f0fe27ec501f60
3
+ size 1064
checkpoint-1500/trainer_state.json ADDED
@@ -0,0 +1,546 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 25.778732545649838,
5
+ "eval_steps": 500,
6
+ "global_step": 1500,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.34,
13
+ "grad_norm": 0.6707318425178528,
14
+ "learning_rate": 1.97816091954023e-05,
15
+ "loss": 2.2888,
16
+ "step": 20
17
+ },
18
+ {
19
+ "epoch": 0.69,
20
+ "grad_norm": NaN,
21
+ "learning_rate": 1.9574712643678162e-05,
22
+ "loss": 2.0848,
23
+ "step": 40
24
+ },
25
+ {
26
+ "epoch": 1.03,
27
+ "grad_norm": 0.7721680402755737,
28
+ "learning_rate": 1.9344827586206897e-05,
29
+ "loss": 2.0044,
30
+ "step": 60
31
+ },
32
+ {
33
+ "epoch": 1.37,
34
+ "grad_norm": 1.1140433549880981,
35
+ "learning_rate": 1.9126436781609195e-05,
36
+ "loss": 1.8016,
37
+ "step": 80
38
+ },
39
+ {
40
+ "epoch": 1.72,
41
+ "grad_norm": 0.7205075621604919,
42
+ "learning_rate": 1.8896551724137934e-05,
43
+ "loss": 1.7217,
44
+ "step": 100
45
+ },
46
+ {
47
+ "epoch": 2.06,
48
+ "grad_norm": 0.8933233618736267,
49
+ "learning_rate": 1.866666666666667e-05,
50
+ "loss": 1.5705,
51
+ "step": 120
52
+ },
53
+ {
54
+ "epoch": 2.41,
55
+ "grad_norm": 0.7114273905754089,
56
+ "learning_rate": 1.8436781609195404e-05,
57
+ "loss": 1.4006,
58
+ "step": 140
59
+ },
60
+ {
61
+ "epoch": 2.75,
62
+ "grad_norm": 0.7229479551315308,
63
+ "learning_rate": 1.820689655172414e-05,
64
+ "loss": 1.3137,
65
+ "step": 160
66
+ },
67
+ {
68
+ "epoch": 3.09,
69
+ "grad_norm": 0.9370490908622742,
70
+ "learning_rate": 1.7977011494252874e-05,
71
+ "loss": 1.1898,
72
+ "step": 180
73
+ },
74
+ {
75
+ "epoch": 3.44,
76
+ "grad_norm": 0.6051978468894958,
77
+ "learning_rate": 1.774712643678161e-05,
78
+ "loss": 1.1229,
79
+ "step": 200
80
+ },
81
+ {
82
+ "epoch": 3.78,
83
+ "grad_norm": 0.6857028007507324,
84
+ "learning_rate": 1.7517241379310347e-05,
85
+ "loss": 1.051,
86
+ "step": 220
87
+ },
88
+ {
89
+ "epoch": 4.12,
90
+ "grad_norm": 0.6715748310089111,
91
+ "learning_rate": 1.7287356321839082e-05,
92
+ "loss": 0.9894,
93
+ "step": 240
94
+ },
95
+ {
96
+ "epoch": 4.47,
97
+ "grad_norm": 0.5918118953704834,
98
+ "learning_rate": 1.7057471264367817e-05,
99
+ "loss": 0.9687,
100
+ "step": 260
101
+ },
102
+ {
103
+ "epoch": 4.81,
104
+ "grad_norm": 0.6621690392494202,
105
+ "learning_rate": 1.6827586206896552e-05,
106
+ "loss": 0.9199,
107
+ "step": 280
108
+ },
109
+ {
110
+ "epoch": 5.16,
111
+ "grad_norm": 0.6697206497192383,
112
+ "learning_rate": 1.659770114942529e-05,
113
+ "loss": 0.9303,
114
+ "step": 300
115
+ },
116
+ {
117
+ "epoch": 5.5,
118
+ "grad_norm": 0.8184316158294678,
119
+ "learning_rate": 1.6367816091954025e-05,
120
+ "loss": 0.8898,
121
+ "step": 320
122
+ },
123
+ {
124
+ "epoch": 5.84,
125
+ "grad_norm": 0.6429987549781799,
126
+ "learning_rate": 1.613793103448276e-05,
127
+ "loss": 0.8623,
128
+ "step": 340
129
+ },
130
+ {
131
+ "epoch": 6.19,
132
+ "grad_norm": 0.7518043518066406,
133
+ "learning_rate": 1.5908045977011495e-05,
134
+ "loss": 0.8239,
135
+ "step": 360
136
+ },
137
+ {
138
+ "epoch": 6.53,
139
+ "grad_norm": 0.6667824983596802,
140
+ "learning_rate": 1.567816091954023e-05,
141
+ "loss": 0.8119,
142
+ "step": 380
143
+ },
144
+ {
145
+ "epoch": 6.87,
146
+ "grad_norm": 0.8569457530975342,
147
+ "learning_rate": 1.5448275862068965e-05,
148
+ "loss": 0.8139,
149
+ "step": 400
150
+ },
151
+ {
152
+ "epoch": 7.22,
153
+ "grad_norm": 0.7754850387573242,
154
+ "learning_rate": 1.5218390804597702e-05,
155
+ "loss": 0.7835,
156
+ "step": 420
157
+ },
158
+ {
159
+ "epoch": 7.56,
160
+ "grad_norm": 1.159196138381958,
161
+ "learning_rate": 1.4988505747126439e-05,
162
+ "loss": 0.7546,
163
+ "step": 440
164
+ },
165
+ {
166
+ "epoch": 7.91,
167
+ "grad_norm": 1.119764804840088,
168
+ "learning_rate": 1.4758620689655174e-05,
169
+ "loss": 0.7571,
170
+ "step": 460
171
+ },
172
+ {
173
+ "epoch": 8.25,
174
+ "grad_norm": 1.3600786924362183,
175
+ "learning_rate": 1.452873563218391e-05,
176
+ "loss": 0.7451,
177
+ "step": 480
178
+ },
179
+ {
180
+ "epoch": 8.59,
181
+ "grad_norm": 0.7608994245529175,
182
+ "learning_rate": 1.4298850574712644e-05,
183
+ "loss": 0.7109,
184
+ "step": 500
185
+ },
186
+ {
187
+ "epoch": 8.94,
188
+ "grad_norm": 1.0172290802001953,
189
+ "learning_rate": 1.406896551724138e-05,
190
+ "loss": 0.7228,
191
+ "step": 520
192
+ },
193
+ {
194
+ "epoch": 9.28,
195
+ "grad_norm": 1.042607069015503,
196
+ "learning_rate": 1.3839080459770115e-05,
197
+ "loss": 0.6939,
198
+ "step": 540
199
+ },
200
+ {
201
+ "epoch": 9.62,
202
+ "grad_norm": 0.8913071751594543,
203
+ "learning_rate": 1.3609195402298852e-05,
204
+ "loss": 0.6721,
205
+ "step": 560
206
+ },
207
+ {
208
+ "epoch": 9.97,
209
+ "grad_norm": 1.4283536672592163,
210
+ "learning_rate": 1.3379310344827587e-05,
211
+ "loss": 0.681,
212
+ "step": 580
213
+ },
214
+ {
215
+ "epoch": 10.31,
216
+ "grad_norm": 1.1445728540420532,
217
+ "learning_rate": 1.3149425287356324e-05,
218
+ "loss": 0.6484,
219
+ "step": 600
220
+ },
221
+ {
222
+ "epoch": 10.66,
223
+ "grad_norm": 1.425697684288025,
224
+ "learning_rate": 1.2919540229885059e-05,
225
+ "loss": 0.6558,
226
+ "step": 620
227
+ },
228
+ {
229
+ "epoch": 11.0,
230
+ "grad_norm": 0.8931305408477783,
231
+ "learning_rate": 1.2689655172413795e-05,
232
+ "loss": 0.6642,
233
+ "step": 640
234
+ },
235
+ {
236
+ "epoch": 11.34,
237
+ "grad_norm": 1.0374151468276978,
238
+ "learning_rate": 1.2459770114942529e-05,
239
+ "loss": 0.6202,
240
+ "step": 660
241
+ },
242
+ {
243
+ "epoch": 11.69,
244
+ "grad_norm": 1.628758430480957,
245
+ "learning_rate": 1.2229885057471265e-05,
246
+ "loss": 0.6163,
247
+ "step": 680
248
+ },
249
+ {
250
+ "epoch": 12.03,
251
+ "grad_norm": 1.3881452083587646,
252
+ "learning_rate": 1.2e-05,
253
+ "loss": 0.6364,
254
+ "step": 700
255
+ },
256
+ {
257
+ "epoch": 12.37,
258
+ "grad_norm": 1.0961302518844604,
259
+ "learning_rate": 1.1770114942528737e-05,
260
+ "loss": 0.5963,
261
+ "step": 720
262
+ },
263
+ {
264
+ "epoch": 12.72,
265
+ "grad_norm": 1.1812736988067627,
266
+ "learning_rate": 1.1540229885057472e-05,
267
+ "loss": 0.6102,
268
+ "step": 740
269
+ },
270
+ {
271
+ "epoch": 13.06,
272
+ "grad_norm": 1.103151559829712,
273
+ "learning_rate": 1.1310344827586209e-05,
274
+ "loss": 0.5965,
275
+ "step": 760
276
+ },
277
+ {
278
+ "epoch": 13.4,
279
+ "grad_norm": 1.108560562133789,
280
+ "learning_rate": 1.1080459770114944e-05,
281
+ "loss": 0.58,
282
+ "step": 780
283
+ },
284
+ {
285
+ "epoch": 13.75,
286
+ "grad_norm": 1.322364091873169,
287
+ "learning_rate": 1.085057471264368e-05,
288
+ "loss": 0.5707,
289
+ "step": 800
290
+ },
291
+ {
292
+ "epoch": 14.09,
293
+ "grad_norm": 1.2036404609680176,
294
+ "learning_rate": 1.0620689655172414e-05,
295
+ "loss": 0.5781,
296
+ "step": 820
297
+ },
298
+ {
299
+ "epoch": 14.44,
300
+ "grad_norm": 1.46902596950531,
301
+ "learning_rate": 1.039080459770115e-05,
302
+ "loss": 0.5413,
303
+ "step": 840
304
+ },
305
+ {
306
+ "epoch": 14.78,
307
+ "grad_norm": 0.9223589301109314,
308
+ "learning_rate": 1.0160919540229885e-05,
309
+ "loss": 0.5686,
310
+ "step": 860
311
+ },
312
+ {
313
+ "epoch": 15.12,
314
+ "grad_norm": 1.7452529668807983,
315
+ "learning_rate": 9.931034482758622e-06,
316
+ "loss": 0.5538,
317
+ "step": 880
318
+ },
319
+ {
320
+ "epoch": 15.47,
321
+ "grad_norm": 1.0680702924728394,
322
+ "learning_rate": 9.701149425287357e-06,
323
+ "loss": 0.5402,
324
+ "step": 900
325
+ },
326
+ {
327
+ "epoch": 15.81,
328
+ "grad_norm": 1.4106242656707764,
329
+ "learning_rate": 9.471264367816094e-06,
330
+ "loss": 0.5629,
331
+ "step": 920
332
+ },
333
+ {
334
+ "epoch": 16.15,
335
+ "grad_norm": 1.7341551780700684,
336
+ "learning_rate": 9.241379310344829e-06,
337
+ "loss": 0.5538,
338
+ "step": 940
339
+ },
340
+ {
341
+ "epoch": 16.5,
342
+ "grad_norm": 2.115643262863159,
343
+ "learning_rate": 9.011494252873564e-06,
344
+ "loss": 0.5481,
345
+ "step": 960
346
+ },
347
+ {
348
+ "epoch": 16.84,
349
+ "grad_norm": 1.1589787006378174,
350
+ "learning_rate": 8.7816091954023e-06,
351
+ "loss": 0.4981,
352
+ "step": 980
353
+ },
354
+ {
355
+ "epoch": 17.19,
356
+ "grad_norm": 1.0696042776107788,
357
+ "learning_rate": 8.551724137931035e-06,
358
+ "loss": 0.5041,
359
+ "step": 1000
360
+ },
361
+ {
362
+ "epoch": 17.53,
363
+ "grad_norm": 1.892269253730774,
364
+ "learning_rate": 8.32183908045977e-06,
365
+ "loss": 0.4956,
366
+ "step": 1020
367
+ },
368
+ {
369
+ "epoch": 17.87,
370
+ "grad_norm": 2.214688301086426,
371
+ "learning_rate": 8.091954022988507e-06,
372
+ "loss": 0.5363,
373
+ "step": 1040
374
+ },
375
+ {
376
+ "epoch": 18.22,
377
+ "grad_norm": 1.271893858909607,
378
+ "learning_rate": 7.862068965517242e-06,
379
+ "loss": 0.5195,
380
+ "step": 1060
381
+ },
382
+ {
383
+ "epoch": 18.56,
384
+ "grad_norm": 0.9383485913276672,
385
+ "learning_rate": 7.632183908045979e-06,
386
+ "loss": 0.5002,
387
+ "step": 1080
388
+ },
389
+ {
390
+ "epoch": 18.9,
391
+ "grad_norm": 1.749745488166809,
392
+ "learning_rate": 7.402298850574713e-06,
393
+ "loss": 0.4841,
394
+ "step": 1100
395
+ },
396
+ {
397
+ "epoch": 19.25,
398
+ "grad_norm": 1.2551345825195312,
399
+ "learning_rate": 7.172413793103449e-06,
400
+ "loss": 0.5033,
401
+ "step": 1120
402
+ },
403
+ {
404
+ "epoch": 19.59,
405
+ "grad_norm": 1.0362412929534912,
406
+ "learning_rate": 6.9425287356321845e-06,
407
+ "loss": 0.5138,
408
+ "step": 1140
409
+ },
410
+ {
411
+ "epoch": 19.94,
412
+ "grad_norm": 1.483361840248108,
413
+ "learning_rate": 6.71264367816092e-06,
414
+ "loss": 0.4826,
415
+ "step": 1160
416
+ },
417
+ {
418
+ "epoch": 20.28,
419
+ "grad_norm": 1.792438268661499,
420
+ "learning_rate": 6.482758620689655e-06,
421
+ "loss": 0.4544,
422
+ "step": 1180
423
+ },
424
+ {
425
+ "epoch": 20.62,
426
+ "grad_norm": 1.1103723049163818,
427
+ "learning_rate": 6.252873563218391e-06,
428
+ "loss": 0.5097,
429
+ "step": 1200
430
+ },
431
+ {
432
+ "epoch": 20.97,
433
+ "grad_norm": 1.3724958896636963,
434
+ "learning_rate": 6.022988505747127e-06,
435
+ "loss": 0.4827,
436
+ "step": 1220
437
+ },
438
+ {
439
+ "epoch": 21.31,
440
+ "grad_norm": 1.9351897239685059,
441
+ "learning_rate": 5.793103448275863e-06,
442
+ "loss": 0.4772,
443
+ "step": 1240
444
+ },
445
+ {
446
+ "epoch": 21.65,
447
+ "grad_norm": 1.3644013404846191,
448
+ "learning_rate": 5.563218390804598e-06,
449
+ "loss": 0.485,
450
+ "step": 1260
451
+ },
452
+ {
453
+ "epoch": 22.0,
454
+ "grad_norm": 1.4544987678527832,
455
+ "learning_rate": 5.333333333333334e-06,
456
+ "loss": 0.468,
457
+ "step": 1280
458
+ },
459
+ {
460
+ "epoch": 22.34,
461
+ "grad_norm": 1.7704741954803467,
462
+ "learning_rate": 5.1034482758620695e-06,
463
+ "loss": 0.4653,
464
+ "step": 1300
465
+ },
466
+ {
467
+ "epoch": 22.69,
468
+ "grad_norm": 1.2633339166641235,
469
+ "learning_rate": 4.873563218390805e-06,
470
+ "loss": 0.477,
471
+ "step": 1320
472
+ },
473
+ {
474
+ "epoch": 23.03,
475
+ "grad_norm": 1.43886399269104,
476
+ "learning_rate": 4.643678160919541e-06,
477
+ "loss": 0.4724,
478
+ "step": 1340
479
+ },
480
+ {
481
+ "epoch": 23.37,
482
+ "grad_norm": 1.1346815824508667,
483
+ "learning_rate": 4.413793103448276e-06,
484
+ "loss": 0.4712,
485
+ "step": 1360
486
+ },
487
+ {
488
+ "epoch": 23.72,
489
+ "grad_norm": 1.289389967918396,
490
+ "learning_rate": 4.183908045977012e-06,
491
+ "loss": 0.4661,
492
+ "step": 1380
493
+ },
494
+ {
495
+ "epoch": 24.06,
496
+ "grad_norm": 1.477211833000183,
497
+ "learning_rate": 3.954022988505747e-06,
498
+ "loss": 0.4443,
499
+ "step": 1400
500
+ },
501
+ {
502
+ "epoch": 24.4,
503
+ "grad_norm": 2.224083185195923,
504
+ "learning_rate": 3.7241379310344832e-06,
505
+ "loss": 0.4445,
506
+ "step": 1420
507
+ },
508
+ {
509
+ "epoch": 24.75,
510
+ "grad_norm": 1.6573207378387451,
511
+ "learning_rate": 3.4942528735632187e-06,
512
+ "loss": 0.4756,
513
+ "step": 1440
514
+ },
515
+ {
516
+ "epoch": 25.09,
517
+ "grad_norm": 2.269866466522217,
518
+ "learning_rate": 3.2643678160919545e-06,
519
+ "loss": 0.4535,
520
+ "step": 1460
521
+ },
522
+ {
523
+ "epoch": 25.44,
524
+ "grad_norm": 1.2528423070907593,
525
+ "learning_rate": 3.03448275862069e-06,
526
+ "loss": 0.4881,
527
+ "step": 1480
528
+ },
529
+ {
530
+ "epoch": 25.78,
531
+ "grad_norm": 1.2892448902130127,
532
+ "learning_rate": 2.8045977011494257e-06,
533
+ "loss": 0.4226,
534
+ "step": 1500
535
+ }
536
+ ],
537
+ "logging_steps": 20,
538
+ "max_steps": 1740,
539
+ "num_input_tokens_seen": 0,
540
+ "num_train_epochs": 30,
541
+ "save_steps": 500,
542
+ "total_flos": 3.8986916806656e+18,
543
+ "train_batch_size": 1,
544
+ "trial_name": null,
545
+ "trial_params": null
546
+ }
checkpoint-1500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9925cf06613e2d8314bcf7c4157dc698e4db00ef68124ecb6a4078939ad1b743
3
+ size 4984
checkpoint-500/README.md ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - _load_in_8bit: True
10
+ - _load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ - bnb_4bit_quant_storage: uint8
19
+ - load_in_4bit: False
20
+ - load_in_8bit: True
21
+ ### Framework versions
22
+
23
+
24
+ - PEFT 0.5.0
checkpoint-500/adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "sallywww/Llama-7B",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
checkpoint-500/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8836c557c53a0af5e9d9d0e899ed218fbacc6bf7bf67f5622257dd7c56306cdb
3
+ size 16794200
checkpoint-500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03fddfa6dda8a20753f48efe821aed960f8505727c40032f2690964b3cd1deb3
3
+ size 33662074
checkpoint-500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:790e6877bf6ec40bcdc6fbde63e117ebe8304171f41280355984b34905baf59b
3
+ size 14244
checkpoint-500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92a4b7696dc26a7eb67205c0c77ad4444cb3fcc7e6ed9e426d1226f52b45d396
3
+ size 1064
checkpoint-500/trainer_state.json ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 8.592910848549947,
5
+ "eval_steps": 500,
6
+ "global_step": 500,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.34,
13
+ "grad_norm": 0.6707318425178528,
14
+ "learning_rate": 1.97816091954023e-05,
15
+ "loss": 2.2888,
16
+ "step": 20
17
+ },
18
+ {
19
+ "epoch": 0.69,
20
+ "grad_norm": NaN,
21
+ "learning_rate": 1.9574712643678162e-05,
22
+ "loss": 2.0848,
23
+ "step": 40
24
+ },
25
+ {
26
+ "epoch": 1.03,
27
+ "grad_norm": 0.7721680402755737,
28
+ "learning_rate": 1.9344827586206897e-05,
29
+ "loss": 2.0044,
30
+ "step": 60
31
+ },
32
+ {
33
+ "epoch": 1.37,
34
+ "grad_norm": 1.1140433549880981,
35
+ "learning_rate": 1.9126436781609195e-05,
36
+ "loss": 1.8016,
37
+ "step": 80
38
+ },
39
+ {
40
+ "epoch": 1.72,
41
+ "grad_norm": 0.7205075621604919,
42
+ "learning_rate": 1.8896551724137934e-05,
43
+ "loss": 1.7217,
44
+ "step": 100
45
+ },
46
+ {
47
+ "epoch": 2.06,
48
+ "grad_norm": 0.8933233618736267,
49
+ "learning_rate": 1.866666666666667e-05,
50
+ "loss": 1.5705,
51
+ "step": 120
52
+ },
53
+ {
54
+ "epoch": 2.41,
55
+ "grad_norm": 0.7114273905754089,
56
+ "learning_rate": 1.8436781609195404e-05,
57
+ "loss": 1.4006,
58
+ "step": 140
59
+ },
60
+ {
61
+ "epoch": 2.75,
62
+ "grad_norm": 0.7229479551315308,
63
+ "learning_rate": 1.820689655172414e-05,
64
+ "loss": 1.3137,
65
+ "step": 160
66
+ },
67
+ {
68
+ "epoch": 3.09,
69
+ "grad_norm": 0.9370490908622742,
70
+ "learning_rate": 1.7977011494252874e-05,
71
+ "loss": 1.1898,
72
+ "step": 180
73
+ },
74
+ {
75
+ "epoch": 3.44,
76
+ "grad_norm": 0.6051978468894958,
77
+ "learning_rate": 1.774712643678161e-05,
78
+ "loss": 1.1229,
79
+ "step": 200
80
+ },
81
+ {
82
+ "epoch": 3.78,
83
+ "grad_norm": 0.6857028007507324,
84
+ "learning_rate": 1.7517241379310347e-05,
85
+ "loss": 1.051,
86
+ "step": 220
87
+ },
88
+ {
89
+ "epoch": 4.12,
90
+ "grad_norm": 0.6715748310089111,
91
+ "learning_rate": 1.7287356321839082e-05,
92
+ "loss": 0.9894,
93
+ "step": 240
94
+ },
95
+ {
96
+ "epoch": 4.47,
97
+ "grad_norm": 0.5918118953704834,
98
+ "learning_rate": 1.7057471264367817e-05,
99
+ "loss": 0.9687,
100
+ "step": 260
101
+ },
102
+ {
103
+ "epoch": 4.81,
104
+ "grad_norm": 0.6621690392494202,
105
+ "learning_rate": 1.6827586206896552e-05,
106
+ "loss": 0.9199,
107
+ "step": 280
108
+ },
109
+ {
110
+ "epoch": 5.16,
111
+ "grad_norm": 0.6697206497192383,
112
+ "learning_rate": 1.659770114942529e-05,
113
+ "loss": 0.9303,
114
+ "step": 300
115
+ },
116
+ {
117
+ "epoch": 5.5,
118
+ "grad_norm": 0.8184316158294678,
119
+ "learning_rate": 1.6367816091954025e-05,
120
+ "loss": 0.8898,
121
+ "step": 320
122
+ },
123
+ {
124
+ "epoch": 5.84,
125
+ "grad_norm": 0.6429987549781799,
126
+ "learning_rate": 1.613793103448276e-05,
127
+ "loss": 0.8623,
128
+ "step": 340
129
+ },
130
+ {
131
+ "epoch": 6.19,
132
+ "grad_norm": 0.7518043518066406,
133
+ "learning_rate": 1.5908045977011495e-05,
134
+ "loss": 0.8239,
135
+ "step": 360
136
+ },
137
+ {
138
+ "epoch": 6.53,
139
+ "grad_norm": 0.6667824983596802,
140
+ "learning_rate": 1.567816091954023e-05,
141
+ "loss": 0.8119,
142
+ "step": 380
143
+ },
144
+ {
145
+ "epoch": 6.87,
146
+ "grad_norm": 0.8569457530975342,
147
+ "learning_rate": 1.5448275862068965e-05,
148
+ "loss": 0.8139,
149
+ "step": 400
150
+ },
151
+ {
152
+ "epoch": 7.22,
153
+ "grad_norm": 0.7754850387573242,
154
+ "learning_rate": 1.5218390804597702e-05,
155
+ "loss": 0.7835,
156
+ "step": 420
157
+ },
158
+ {
159
+ "epoch": 7.56,
160
+ "grad_norm": 1.159196138381958,
161
+ "learning_rate": 1.4988505747126439e-05,
162
+ "loss": 0.7546,
163
+ "step": 440
164
+ },
165
+ {
166
+ "epoch": 7.91,
167
+ "grad_norm": 1.119764804840088,
168
+ "learning_rate": 1.4758620689655174e-05,
169
+ "loss": 0.7571,
170
+ "step": 460
171
+ },
172
+ {
173
+ "epoch": 8.25,
174
+ "grad_norm": 1.3600786924362183,
175
+ "learning_rate": 1.452873563218391e-05,
176
+ "loss": 0.7451,
177
+ "step": 480
178
+ },
179
+ {
180
+ "epoch": 8.59,
181
+ "grad_norm": 0.7608994245529175,
182
+ "learning_rate": 1.4298850574712644e-05,
183
+ "loss": 0.7109,
184
+ "step": 500
185
+ }
186
+ ],
187
+ "logging_steps": 20,
188
+ "max_steps": 1740,
189
+ "num_input_tokens_seen": 0,
190
+ "num_train_epochs": 30,
191
+ "save_steps": 500,
192
+ "total_flos": 1.2995638935552e+18,
193
+ "train_batch_size": 1,
194
+ "trial_name": null,
195
+ "trial_params": null
196
+ }
checkpoint-500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9925cf06613e2d8314bcf7c4157dc698e4db00ef68124ecb6a4078939ad1b743
3
+ size 4984
runs/Apr06_08-36-05_n69ko6zv8m/events.out.tfevents.1712392568.n69ko6zv8m.2711.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:207daa20507f4bf4081767579a41f04b0a03d8040ec6337f7b3fd6c37ba6ea7c
3
+ size 23847