AmberYifan commited on
Commit
44555e4
1 Parent(s): 6a2cb48

Model save

Browse files
README.md ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: alignment-handbook/zephyr-7b-sft-full
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: spin-v-high-loss
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # spin-v-high-loss
15
+
16
+ This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on the None dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - Loss: 0.0069
19
+ - Rewards/real: -10.1415
20
+ - Rewards/generated: -55.1541
21
+ - Rewards/accuracies: 1.0
22
+ - Rewards/margins: 45.0126
23
+ - Logps/generated: -5640.6729
24
+ - Logps/real: -1151.2217
25
+ - Logits/generated: 3.0744
26
+ - Logits/real: 1.9177
27
+
28
+ ## Model description
29
+
30
+ More information needed
31
+
32
+ ## Intended uses & limitations
33
+
34
+ More information needed
35
+
36
+ ## Training and evaluation data
37
+
38
+ More information needed
39
+
40
+ ## Training procedure
41
+
42
+ ### Training hyperparameters
43
+
44
+ The following hyperparameters were used during training:
45
+ - learning_rate: 5e-07
46
+ - train_batch_size: 8
47
+ - eval_batch_size: 8
48
+ - seed: 42
49
+ - distributed_type: multi-GPU
50
+ - num_devices: 4
51
+ - total_train_batch_size: 32
52
+ - total_eval_batch_size: 32
53
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
54
+ - lr_scheduler_type: linear
55
+ - lr_scheduler_warmup_ratio: 0.1
56
+ - num_epochs: 1
57
+
58
+ ### Training results
59
+
60
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/real | Rewards/generated | Rewards/accuracies | Rewards/margins | Logps/generated | Logps/real | Logits/generated | Logits/real |
61
+ |:-------------:|:-----:|:----:|:---------------:|:------------:|:-----------------:|:------------------:|:---------------:|:---------------:|:----------:|:----------------:|:-----------:|
62
+ | 0.0717 | 0.13 | 50 | 0.0490 | -3.1258 | -37.3431 | 0.9907 | 34.2173 | -3859.5708 | -449.6532 | 3.4831 | 1.9303 |
63
+ | 0.0323 | 0.27 | 100 | 0.0300 | -3.9959 | -38.8380 | 0.9973 | 34.8421 | -4009.0552 | -536.6592 | -0.0155 | -0.1626 |
64
+ | 0.026 | 0.4 | 150 | 0.0158 | -8.2107 | -50.0493 | 0.9947 | 41.8386 | -5130.1880 | -958.1443 | 1.0207 | 1.0071 |
65
+ | 0.0106 | 0.53 | 200 | 0.0087 | -9.2505 | -61.7325 | 0.9960 | 52.4820 | -6298.5093 | -1062.1265 | 2.2349 | 1.2992 |
66
+ | 0.0071 | 0.67 | 250 | 0.0106 | -11.4051 | -49.3118 | 0.9987 | 37.9067 | -5056.4409 | -1277.5874 | 2.8798 | 3.2925 |
67
+ | 0.0121 | 0.8 | 300 | 0.0074 | -9.0224 | -49.1152 | 1.0 | 40.0928 | -5036.7827 | -1039.3110 | 2.8713 | 2.6792 |
68
+ | 0.0013 | 0.93 | 350 | 0.0069 | -10.1415 | -55.1541 | 1.0 | 45.0126 | -5640.6729 | -1151.2217 | 3.0744 | 1.9177 |
69
+
70
+
71
+ ### Framework versions
72
+
73
+ - Transformers 4.37.0
74
+ - Pytorch 2.1.2+cu121
75
+ - Datasets 2.14.6
76
+ - Tokenizers 0.15.2
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.051339938134110225,
4
+ "train_runtime": 3437.1804,
5
+ "train_samples": 12000,
6
+ "train_samples_per_second": 3.491,
7
+ "train_steps_per_second": 0.109
8
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.37.0"
6
+ }
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f0d46c51b569fb33bb8d79cf22931e800f2ca4b6092bc9ca542c12460c5eed0
3
+ size 4943162336
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08a27d64574d806074de6f034f20397ca34fb5899d67bed868243a65a674c775
3
+ size 4999819336
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19e1de3e867e28e89ffcc254ca377be7d9ec33e66329b5c12b05b91677e0b947
3
+ size 4540516344
model.safetensors.index.json ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 14483464192
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00003-of-00003.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00003.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
13
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
14
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
15
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
16
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
17
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
18
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
19
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
20
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
21
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
22
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
23
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
24
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
25
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
26
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
27
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
28
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
29
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
30
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
31
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
32
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
33
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
34
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
35
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
36
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
37
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
38
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
39
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
40
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
41
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
42
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
43
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
44
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
45
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
46
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
47
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
48
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
49
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
50
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
51
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
52
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
53
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
54
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
55
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
56
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
57
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
58
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
59
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
60
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
61
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
62
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
63
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
64
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
65
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
66
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
67
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
68
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
69
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
70
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
71
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
72
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
73
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
74
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
75
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
76
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
77
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
78
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
79
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
80
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
81
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
82
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
83
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
84
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
85
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
86
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
87
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
88
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
89
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
90
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
91
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
92
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
93
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
94
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
95
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
96
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
97
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
98
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
99
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
100
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
101
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
102
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
103
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
104
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
105
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
106
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
107
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
108
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
109
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
110
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
111
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
112
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
113
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
114
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
115
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
116
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
117
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
118
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
119
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
120
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
121
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
122
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
123
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
124
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
125
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
126
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
127
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
128
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
129
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
130
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
131
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
132
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
133
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
134
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
135
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
136
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
137
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
138
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
139
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
140
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
141
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
142
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
148
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
149
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
150
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
151
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
152
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
153
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
154
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
155
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
156
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
161
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
162
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
163
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
164
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
165
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
166
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
167
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
168
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
169
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
170
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
171
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
172
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
173
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
174
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
175
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
176
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
177
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
178
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
179
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
180
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
181
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
182
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
183
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
184
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
185
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
186
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
187
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
188
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
189
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
190
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
191
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
192
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
193
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
194
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
195
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
196
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
197
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
198
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
199
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
200
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
201
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
202
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
203
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
204
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
205
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
206
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
207
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
208
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
209
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
210
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
211
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
212
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
213
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
214
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
215
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
216
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
217
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
218
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
219
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
220
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
221
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
222
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
223
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
224
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors",
225
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
226
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
227
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
228
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
229
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
230
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
231
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
232
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
233
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors",
234
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
235
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
236
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
237
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
238
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
239
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
240
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
241
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
242
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
243
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
244
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
245
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
246
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
247
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
248
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
249
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
250
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
251
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
252
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
253
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
254
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
255
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
256
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
257
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
258
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
259
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
260
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
261
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
262
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
263
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
264
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
265
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
266
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
267
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
268
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
269
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
270
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
271
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
272
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
273
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
274
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
275
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
276
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
277
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
278
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
279
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
280
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
281
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
282
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
283
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
284
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
285
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
286
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
287
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
288
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
289
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
290
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
291
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
292
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
293
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
294
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
295
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
296
+ "model.norm.weight": "model-00003-of-00003.safetensors"
297
+ }
298
+ }
runs/Jun24_22-35-46_gilbreth-j001.rcac.purdue.edu/events.out.tfevents.1719283059.gilbreth-j001.rcac.purdue.edu.222862.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e26d71bd18ce0ce1397baad4ac95a1e8f1a5e22774a9e50a9aa81561412a44bc
3
- size 28391
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1997a8e0bffc909b980a18f0fec1206e0d084c998ab54fbe8eb2b63b570a6482
3
+ size 33899
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.051339938134110225,
4
+ "train_runtime": 3437.1804,
5
+ "train_samples": 12000,
6
+ "train_samples_per_second": 3.491,
7
+ "train_steps_per_second": 0.109
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,674 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 50,
6
+ "global_step": 375,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 1.3157894736842104e-08,
14
+ "logits/generated": -2.9681174755096436,
15
+ "logits/real": -2.6839590072631836,
16
+ "logps/generated": -114.33634185791016,
17
+ "logps/real": -86.82533264160156,
18
+ "loss": 0.6931,
19
+ "rewards/accuracies": 0.0,
20
+ "rewards/generated": 0.0,
21
+ "rewards/margins": 0.0,
22
+ "rewards/real": 0.0,
23
+ "step": 1
24
+ },
25
+ {
26
+ "epoch": 0.03,
27
+ "learning_rate": 1.3157894736842104e-07,
28
+ "logits/generated": -3.07245135307312,
29
+ "logits/real": -2.7284328937530518,
30
+ "logps/generated": -137.1116943359375,
31
+ "logps/real": -131.3474884033203,
32
+ "loss": 0.6592,
33
+ "rewards/accuracies": 0.8333333134651184,
34
+ "rewards/generated": -0.030464060604572296,
35
+ "rewards/margins": 0.07382892817258835,
36
+ "rewards/real": 0.04336486756801605,
37
+ "step": 10
38
+ },
39
+ {
40
+ "epoch": 0.05,
41
+ "learning_rate": 2.631578947368421e-07,
42
+ "logits/generated": -2.764019012451172,
43
+ "logits/real": -2.412205219268799,
44
+ "logps/generated": -272.6034240722656,
45
+ "logps/real": -128.99154663085938,
46
+ "loss": 0.3405,
47
+ "rewards/accuracies": 1.0,
48
+ "rewards/generated": -1.3848954439163208,
49
+ "rewards/margins": 1.49906325340271,
50
+ "rewards/real": 0.1141679435968399,
51
+ "step": 20
52
+ },
53
+ {
54
+ "epoch": 0.08,
55
+ "learning_rate": 3.9473684210526315e-07,
56
+ "logits/generated": -2.10380482673645,
57
+ "logits/real": -1.7264606952667236,
58
+ "logps/generated": -1245.616943359375,
59
+ "logps/real": -437.305908203125,
60
+ "loss": 0.0957,
61
+ "rewards/accuracies": 0.9624999761581421,
62
+ "rewards/generated": -11.244729995727539,
63
+ "rewards/margins": 8.229094505310059,
64
+ "rewards/real": -3.0156354904174805,
65
+ "step": 30
66
+ },
67
+ {
68
+ "epoch": 0.11,
69
+ "learning_rate": 4.970326409495548e-07,
70
+ "logits/generated": -1.417828917503357,
71
+ "logits/real": -1.4325724840164185,
72
+ "logps/generated": -1411.8931884765625,
73
+ "logps/real": -254.04269409179688,
74
+ "loss": 0.0944,
75
+ "rewards/accuracies": 0.9624999761581421,
76
+ "rewards/generated": -12.817289352416992,
77
+ "rewards/margins": 11.621607780456543,
78
+ "rewards/real": -1.1956826448440552,
79
+ "step": 40
80
+ },
81
+ {
82
+ "epoch": 0.13,
83
+ "learning_rate": 4.821958456973294e-07,
84
+ "logits/generated": 2.231259822845459,
85
+ "logits/real": 0.8822120428085327,
86
+ "logps/generated": -3168.895751953125,
87
+ "logps/real": -463.5333557128906,
88
+ "loss": 0.0717,
89
+ "rewards/accuracies": 0.9624999761581421,
90
+ "rewards/generated": -30.413869857788086,
91
+ "rewards/margins": 27.164615631103516,
92
+ "rewards/real": -3.2492573261260986,
93
+ "step": 50
94
+ },
95
+ {
96
+ "epoch": 0.13,
97
+ "eval_logits/generated": 3.4830620288848877,
98
+ "eval_logits/real": 1.9303022623062134,
99
+ "eval_logps/generated": -3859.57080078125,
100
+ "eval_logps/real": -449.6531677246094,
101
+ "eval_loss": 0.049043506383895874,
102
+ "eval_rewards/accuracies": 0.9906914830207825,
103
+ "eval_rewards/generated": -37.34311294555664,
104
+ "eval_rewards/margins": 34.217308044433594,
105
+ "eval_rewards/real": -3.125804901123047,
106
+ "eval_runtime": 215.9205,
107
+ "eval_samples_per_second": 13.894,
108
+ "eval_steps_per_second": 0.435,
109
+ "step": 50
110
+ },
111
+ {
112
+ "epoch": 0.16,
113
+ "learning_rate": 4.673590504451038e-07,
114
+ "logits/generated": 1.6689307689666748,
115
+ "logits/real": 0.8687565922737122,
116
+ "logps/generated": -3099.0478515625,
117
+ "logps/real": -587.6290283203125,
118
+ "loss": 0.0921,
119
+ "rewards/accuracies": 0.9624999761581421,
120
+ "rewards/generated": -29.83612060546875,
121
+ "rewards/margins": 25.382980346679688,
122
+ "rewards/real": -4.453139305114746,
123
+ "step": 60
124
+ },
125
+ {
126
+ "epoch": 0.19,
127
+ "learning_rate": 4.5252225519287835e-07,
128
+ "logits/generated": -0.5513351559638977,
129
+ "logits/real": -0.6214720010757446,
130
+ "logps/generated": -2553.958740234375,
131
+ "logps/real": -673.9515991210938,
132
+ "loss": 0.081,
133
+ "rewards/accuracies": 0.987500011920929,
134
+ "rewards/generated": -24.296937942504883,
135
+ "rewards/margins": 19.031129837036133,
136
+ "rewards/real": -5.265807628631592,
137
+ "step": 70
138
+ },
139
+ {
140
+ "epoch": 0.21,
141
+ "learning_rate": 4.376854599406528e-07,
142
+ "logits/generated": -1.0486136674880981,
143
+ "logits/real": -1.0468746423721313,
144
+ "logps/generated": -3602.500732421875,
145
+ "logps/real": -834.3878784179688,
146
+ "loss": 0.0477,
147
+ "rewards/accuracies": 1.0,
148
+ "rewards/generated": -34.76307678222656,
149
+ "rewards/margins": 27.884380340576172,
150
+ "rewards/real": -6.8786940574646,
151
+ "step": 80
152
+ },
153
+ {
154
+ "epoch": 0.24,
155
+ "learning_rate": 4.228486646884273e-07,
156
+ "logits/generated": -0.8733774423599243,
157
+ "logits/real": -0.8482550382614136,
158
+ "logps/generated": -3528.38330078125,
159
+ "logps/real": -824.9230346679688,
160
+ "loss": 0.0551,
161
+ "rewards/accuracies": 0.9750000238418579,
162
+ "rewards/generated": -34.10634994506836,
163
+ "rewards/margins": 27.168109893798828,
164
+ "rewards/real": -6.938241004943848,
165
+ "step": 90
166
+ },
167
+ {
168
+ "epoch": 0.27,
169
+ "learning_rate": 4.0801186943620176e-07,
170
+ "logits/generated": -0.21600039303302765,
171
+ "logits/real": -0.1288779377937317,
172
+ "logps/generated": -3730.033203125,
173
+ "logps/real": -664.6776733398438,
174
+ "loss": 0.0323,
175
+ "rewards/accuracies": 0.9624999761581421,
176
+ "rewards/generated": -36.04426574707031,
177
+ "rewards/margins": 30.667240142822266,
178
+ "rewards/real": -5.377036094665527,
179
+ "step": 100
180
+ },
181
+ {
182
+ "epoch": 0.27,
183
+ "eval_logits/generated": -0.015528609976172447,
184
+ "eval_logits/real": -0.16258487105369568,
185
+ "eval_logps/generated": -4009.05517578125,
186
+ "eval_logps/real": -536.6591796875,
187
+ "eval_loss": 0.03004666045308113,
188
+ "eval_rewards/accuracies": 0.9973404407501221,
189
+ "eval_rewards/generated": -38.83795928955078,
190
+ "eval_rewards/margins": 34.84209442138672,
191
+ "eval_rewards/real": -3.995864152908325,
192
+ "eval_runtime": 213.8643,
193
+ "eval_samples_per_second": 14.028,
194
+ "eval_steps_per_second": 0.44,
195
+ "step": 100
196
+ },
197
+ {
198
+ "epoch": 0.29,
199
+ "learning_rate": 3.931750741839762e-07,
200
+ "logits/generated": 0.13914000988006592,
201
+ "logits/real": -0.10056102275848389,
202
+ "logps/generated": -3989.37890625,
203
+ "logps/real": -550.2565307617188,
204
+ "loss": 0.0283,
205
+ "rewards/accuracies": 1.0,
206
+ "rewards/generated": -38.63182830810547,
207
+ "rewards/margins": 34.458984375,
208
+ "rewards/real": -4.172848224639893,
209
+ "step": 110
210
+ },
211
+ {
212
+ "epoch": 0.32,
213
+ "learning_rate": 3.7833827893175073e-07,
214
+ "logits/generated": 0.34012800455093384,
215
+ "logits/real": -0.4745260775089264,
216
+ "logps/generated": -4985.052734375,
217
+ "logps/real": -664.0899047851562,
218
+ "loss": 0.0182,
219
+ "rewards/accuracies": 1.0,
220
+ "rewards/generated": -48.502471923828125,
221
+ "rewards/margins": 43.33242416381836,
222
+ "rewards/real": -5.170046806335449,
223
+ "step": 120
224
+ },
225
+ {
226
+ "epoch": 0.35,
227
+ "learning_rate": 3.635014836795252e-07,
228
+ "logits/generated": 0.1598968654870987,
229
+ "logits/real": 0.478419691324234,
230
+ "logps/generated": -4668.8681640625,
231
+ "logps/real": -993.2604370117188,
232
+ "loss": 0.0371,
233
+ "rewards/accuracies": 0.987500011920929,
234
+ "rewards/generated": -45.391544342041016,
235
+ "rewards/margins": 36.9045524597168,
236
+ "rewards/real": -8.48698616027832,
237
+ "step": 130
238
+ },
239
+ {
240
+ "epoch": 0.37,
241
+ "learning_rate": 3.486646884272997e-07,
242
+ "logits/generated": 0.4115122854709625,
243
+ "logits/real": 0.367294043302536,
244
+ "logps/generated": -4831.67919921875,
245
+ "logps/real": -1011.8640747070312,
246
+ "loss": 0.0204,
247
+ "rewards/accuracies": 1.0,
248
+ "rewards/generated": -46.988433837890625,
249
+ "rewards/margins": 38.47046661376953,
250
+ "rewards/real": -8.517963409423828,
251
+ "step": 140
252
+ },
253
+ {
254
+ "epoch": 0.4,
255
+ "learning_rate": 3.3382789317507414e-07,
256
+ "logits/generated": 0.7945320010185242,
257
+ "logits/real": 0.49422749876976013,
258
+ "logps/generated": -4842.3310546875,
259
+ "logps/real": -903.1175537109375,
260
+ "loss": 0.026,
261
+ "rewards/accuracies": 0.9750000238418579,
262
+ "rewards/generated": -47.182762145996094,
263
+ "rewards/margins": 39.56975555419922,
264
+ "rewards/real": -7.613003730773926,
265
+ "step": 150
266
+ },
267
+ {
268
+ "epoch": 0.4,
269
+ "eval_logits/generated": 1.0207222700119019,
270
+ "eval_logits/real": 1.0071420669555664,
271
+ "eval_logps/generated": -5130.18798828125,
272
+ "eval_logps/real": -958.144287109375,
273
+ "eval_loss": 0.01575618050992489,
274
+ "eval_rewards/accuracies": 0.9946808218955994,
275
+ "eval_rewards/generated": -50.04927444458008,
276
+ "eval_rewards/margins": 41.83856201171875,
277
+ "eval_rewards/real": -8.210715293884277,
278
+ "eval_runtime": 213.3884,
279
+ "eval_samples_per_second": 14.059,
280
+ "eval_steps_per_second": 0.441,
281
+ "step": 150
282
+ },
283
+ {
284
+ "epoch": 0.43,
285
+ "learning_rate": 3.189910979228487e-07,
286
+ "logits/generated": 0.9495289921760559,
287
+ "logits/real": 0.9829273223876953,
288
+ "logps/generated": -4704.9228515625,
289
+ "logps/real": -946.2503051757812,
290
+ "loss": 0.0139,
291
+ "rewards/accuracies": 1.0,
292
+ "rewards/generated": -45.76738739013672,
293
+ "rewards/margins": 37.696834564208984,
294
+ "rewards/real": -8.070551872253418,
295
+ "step": 160
296
+ },
297
+ {
298
+ "epoch": 0.45,
299
+ "learning_rate": 3.0415430267062316e-07,
300
+ "logits/generated": 1.2480947971343994,
301
+ "logits/real": 1.0694479942321777,
302
+ "logps/generated": -4743.8857421875,
303
+ "logps/real": -894.25341796875,
304
+ "loss": 0.0234,
305
+ "rewards/accuracies": 0.987500011920929,
306
+ "rewards/generated": -46.242591857910156,
307
+ "rewards/margins": 38.66545104980469,
308
+ "rewards/real": -7.577139854431152,
309
+ "step": 170
310
+ },
311
+ {
312
+ "epoch": 0.48,
313
+ "learning_rate": 2.893175074183976e-07,
314
+ "logits/generated": 1.5751049518585205,
315
+ "logits/real": 0.7654831409454346,
316
+ "logps/generated": -4563.38671875,
317
+ "logps/real": -743.2201538085938,
318
+ "loss": 0.0227,
319
+ "rewards/accuracies": 1.0,
320
+ "rewards/generated": -44.37525177001953,
321
+ "rewards/margins": 38.28398895263672,
322
+ "rewards/real": -6.0912652015686035,
323
+ "step": 180
324
+ },
325
+ {
326
+ "epoch": 0.51,
327
+ "learning_rate": 2.744807121661721e-07,
328
+ "logits/generated": 1.8286101818084717,
329
+ "logits/real": 1.0053759813308716,
330
+ "logps/generated": -5664.7138671875,
331
+ "logps/real": -959.2091064453125,
332
+ "loss": 0.0254,
333
+ "rewards/accuracies": 0.987500011920929,
334
+ "rewards/generated": -55.386558532714844,
335
+ "rewards/margins": 47.158206939697266,
336
+ "rewards/real": -8.228350639343262,
337
+ "step": 190
338
+ },
339
+ {
340
+ "epoch": 0.53,
341
+ "learning_rate": 2.596439169139466e-07,
342
+ "logits/generated": 2.440897226333618,
343
+ "logits/real": 1.7533397674560547,
344
+ "logps/generated": -6381.81005859375,
345
+ "logps/real": -920.97509765625,
346
+ "loss": 0.0106,
347
+ "rewards/accuracies": 1.0,
348
+ "rewards/generated": -62.468101501464844,
349
+ "rewards/margins": 54.67470169067383,
350
+ "rewards/real": -7.793404579162598,
351
+ "step": 200
352
+ },
353
+ {
354
+ "epoch": 0.53,
355
+ "eval_logits/generated": 2.2348735332489014,
356
+ "eval_logits/real": 1.2992360591888428,
357
+ "eval_logps/generated": -6298.50927734375,
358
+ "eval_logps/real": -1062.12646484375,
359
+ "eval_loss": 0.008726131170988083,
360
+ "eval_rewards/accuracies": 0.9960106611251831,
361
+ "eval_rewards/generated": -61.73249435424805,
362
+ "eval_rewards/margins": 52.48196029663086,
363
+ "eval_rewards/real": -9.250536918640137,
364
+ "eval_runtime": 212.9976,
365
+ "eval_samples_per_second": 14.085,
366
+ "eval_steps_per_second": 0.441,
367
+ "step": 200
368
+ },
369
+ {
370
+ "epoch": 0.56,
371
+ "learning_rate": 2.4480712166172106e-07,
372
+ "logits/generated": 2.323308229446411,
373
+ "logits/real": 1.4621614217758179,
374
+ "logps/generated": -6335.68310546875,
375
+ "logps/real": -1120.7493896484375,
376
+ "loss": 0.0035,
377
+ "rewards/accuracies": 1.0,
378
+ "rewards/generated": -62.093727111816406,
379
+ "rewards/margins": 52.304664611816406,
380
+ "rewards/real": -9.789057731628418,
381
+ "step": 210
382
+ },
383
+ {
384
+ "epoch": 0.59,
385
+ "learning_rate": 2.2997032640949554e-07,
386
+ "logits/generated": 2.158539295196533,
387
+ "logits/real": 1.8379024267196655,
388
+ "logps/generated": -4581.6611328125,
389
+ "logps/real": -948.8102416992188,
390
+ "loss": 0.0202,
391
+ "rewards/accuracies": 1.0,
392
+ "rewards/generated": -44.635250091552734,
393
+ "rewards/margins": 36.491943359375,
394
+ "rewards/real": -8.143308639526367,
395
+ "step": 220
396
+ },
397
+ {
398
+ "epoch": 0.61,
399
+ "learning_rate": 2.1513353115727e-07,
400
+ "logits/generated": 2.464277982711792,
401
+ "logits/real": 2.7233450412750244,
402
+ "logps/generated": -4033.82421875,
403
+ "logps/real": -1097.900390625,
404
+ "loss": 0.0053,
405
+ "rewards/accuracies": 1.0,
406
+ "rewards/generated": -39.125518798828125,
407
+ "rewards/margins": 29.4420166015625,
408
+ "rewards/real": -9.683503150939941,
409
+ "step": 230
410
+ },
411
+ {
412
+ "epoch": 0.64,
413
+ "learning_rate": 2.0029673590504451e-07,
414
+ "logits/generated": 2.664623260498047,
415
+ "logits/real": 2.550762176513672,
416
+ "logps/generated": -4957.3818359375,
417
+ "logps/real": -1175.33740234375,
418
+ "loss": 0.0067,
419
+ "rewards/accuracies": 1.0,
420
+ "rewards/generated": -48.34945297241211,
421
+ "rewards/margins": 38.046714782714844,
422
+ "rewards/real": -10.302745819091797,
423
+ "step": 240
424
+ },
425
+ {
426
+ "epoch": 0.67,
427
+ "learning_rate": 1.8545994065281897e-07,
428
+ "logits/generated": 2.886002779006958,
429
+ "logits/real": 2.470453977584839,
430
+ "logps/generated": -5135.99755859375,
431
+ "logps/real": -1098.706787109375,
432
+ "loss": 0.0071,
433
+ "rewards/accuracies": 1.0,
434
+ "rewards/generated": -50.12360382080078,
435
+ "rewards/margins": 40.536460876464844,
436
+ "rewards/real": -9.587137222290039,
437
+ "step": 250
438
+ },
439
+ {
440
+ "epoch": 0.67,
441
+ "eval_logits/generated": 2.8798305988311768,
442
+ "eval_logits/real": 3.2924983501434326,
443
+ "eval_logps/generated": -5056.44091796875,
444
+ "eval_logps/real": -1277.58740234375,
445
+ "eval_loss": 0.010579616762697697,
446
+ "eval_rewards/accuracies": 0.998670220375061,
447
+ "eval_rewards/generated": -49.31182098388672,
448
+ "eval_rewards/margins": 37.90666961669922,
449
+ "eval_rewards/real": -11.405147552490234,
450
+ "eval_runtime": 213.7924,
451
+ "eval_samples_per_second": 14.032,
452
+ "eval_steps_per_second": 0.44,
453
+ "step": 250
454
+ },
455
+ {
456
+ "epoch": 0.69,
457
+ "learning_rate": 1.7062314540059346e-07,
458
+ "logits/generated": 3.001654863357544,
459
+ "logits/real": 3.0144619941711426,
460
+ "logps/generated": -5104.75,
461
+ "logps/real": -1167.21826171875,
462
+ "loss": 0.0035,
463
+ "rewards/accuracies": 1.0,
464
+ "rewards/generated": -49.85370635986328,
465
+ "rewards/margins": 39.53137969970703,
466
+ "rewards/real": -10.322334289550781,
467
+ "step": 260
468
+ },
469
+ {
470
+ "epoch": 0.72,
471
+ "learning_rate": 1.5578635014836795e-07,
472
+ "logits/generated": 2.4701550006866455,
473
+ "logits/real": 2.8096556663513184,
474
+ "logps/generated": -4720.4482421875,
475
+ "logps/real": -1196.321044921875,
476
+ "loss": 0.0055,
477
+ "rewards/accuracies": 1.0,
478
+ "rewards/generated": -45.905601501464844,
479
+ "rewards/margins": 35.247032165527344,
480
+ "rewards/real": -10.6585693359375,
481
+ "step": 270
482
+ },
483
+ {
484
+ "epoch": 0.75,
485
+ "learning_rate": 1.4094955489614243e-07,
486
+ "logits/generated": 2.977241277694702,
487
+ "logits/real": 2.97806715965271,
488
+ "logps/generated": -5751.66748046875,
489
+ "logps/real": -1152.299072265625,
490
+ "loss": 0.0151,
491
+ "rewards/accuracies": 1.0,
492
+ "rewards/generated": -56.2369384765625,
493
+ "rewards/margins": 46.072731018066406,
494
+ "rewards/real": -10.164213180541992,
495
+ "step": 280
496
+ },
497
+ {
498
+ "epoch": 0.77,
499
+ "learning_rate": 1.261127596439169e-07,
500
+ "logits/generated": 2.786933183670044,
501
+ "logits/real": 2.2626030445098877,
502
+ "logps/generated": -5328.9072265625,
503
+ "logps/real": -1131.6319580078125,
504
+ "loss": 0.0074,
505
+ "rewards/accuracies": 1.0,
506
+ "rewards/generated": -51.968414306640625,
507
+ "rewards/margins": 41.99260711669922,
508
+ "rewards/real": -9.975804328918457,
509
+ "step": 290
510
+ },
511
+ {
512
+ "epoch": 0.8,
513
+ "learning_rate": 1.1127596439169139e-07,
514
+ "logits/generated": 2.6825292110443115,
515
+ "logits/real": 2.87423038482666,
516
+ "logps/generated": -4691.046875,
517
+ "logps/real": -1226.396728515625,
518
+ "loss": 0.0121,
519
+ "rewards/accuracies": 1.0,
520
+ "rewards/generated": -45.6788330078125,
521
+ "rewards/margins": 34.744598388671875,
522
+ "rewards/real": -10.934233665466309,
523
+ "step": 300
524
+ },
525
+ {
526
+ "epoch": 0.8,
527
+ "eval_logits/generated": 2.8713479042053223,
528
+ "eval_logits/real": 2.6792426109313965,
529
+ "eval_logps/generated": -5036.78271484375,
530
+ "eval_logps/real": -1039.31103515625,
531
+ "eval_loss": 0.007437660358846188,
532
+ "eval_rewards/accuracies": 1.0,
533
+ "eval_rewards/generated": -49.115230560302734,
534
+ "eval_rewards/margins": 40.09284591674805,
535
+ "eval_rewards/real": -9.022383689880371,
536
+ "eval_runtime": 213.3741,
537
+ "eval_samples_per_second": 14.06,
538
+ "eval_steps_per_second": 0.441,
539
+ "step": 300
540
+ },
541
+ {
542
+ "epoch": 0.83,
543
+ "learning_rate": 9.643916913946587e-08,
544
+ "logits/generated": 2.8469080924987793,
545
+ "logits/real": 2.4686405658721924,
546
+ "logps/generated": -5047.57080078125,
547
+ "logps/real": -1106.5921630859375,
548
+ "loss": 0.0032,
549
+ "rewards/accuracies": 1.0,
550
+ "rewards/generated": -49.19374465942383,
551
+ "rewards/margins": 39.55516815185547,
552
+ "rewards/real": -9.638582229614258,
553
+ "step": 310
554
+ },
555
+ {
556
+ "epoch": 0.85,
557
+ "learning_rate": 8.160237388724035e-08,
558
+ "logits/generated": 3.1090855598449707,
559
+ "logits/real": 2.914607048034668,
560
+ "logps/generated": -6005.77587890625,
561
+ "logps/real": -1140.301025390625,
562
+ "loss": 0.0123,
563
+ "rewards/accuracies": 0.987500011920929,
564
+ "rewards/generated": -58.757904052734375,
565
+ "rewards/margins": 48.729026794433594,
566
+ "rewards/real": -10.028875350952148,
567
+ "step": 320
568
+ },
569
+ {
570
+ "epoch": 0.88,
571
+ "learning_rate": 6.676557863501484e-08,
572
+ "logits/generated": 2.659290313720703,
573
+ "logits/real": 2.3654632568359375,
574
+ "logps/generated": -5298.484375,
575
+ "logps/real": -1117.4388427734375,
576
+ "loss": 0.0108,
577
+ "rewards/accuracies": 0.987500011920929,
578
+ "rewards/generated": -51.67462158203125,
579
+ "rewards/margins": 41.78734588623047,
580
+ "rewards/real": -9.887277603149414,
581
+ "step": 330
582
+ },
583
+ {
584
+ "epoch": 0.91,
585
+ "learning_rate": 5.192878338278932e-08,
586
+ "logits/generated": 2.7824113368988037,
587
+ "logits/real": 1.4837300777435303,
588
+ "logps/generated": -5636.43017578125,
589
+ "logps/real": -1132.7353515625,
590
+ "loss": 0.0045,
591
+ "rewards/accuracies": 1.0,
592
+ "rewards/generated": -55.110389709472656,
593
+ "rewards/margins": 45.19716262817383,
594
+ "rewards/real": -9.913228034973145,
595
+ "step": 340
596
+ },
597
+ {
598
+ "epoch": 0.93,
599
+ "learning_rate": 3.709198813056379e-08,
600
+ "logits/generated": 2.721921443939209,
601
+ "logits/real": 1.7522449493408203,
602
+ "logps/generated": -5212.6552734375,
603
+ "logps/real": -1117.9945068359375,
604
+ "loss": 0.0013,
605
+ "rewards/accuracies": 1.0,
606
+ "rewards/generated": -50.842464447021484,
607
+ "rewards/margins": 41.09899139404297,
608
+ "rewards/real": -9.743471145629883,
609
+ "step": 350
610
+ },
611
+ {
612
+ "epoch": 0.93,
613
+ "eval_logits/generated": 3.074373722076416,
614
+ "eval_logits/real": 1.9176675081253052,
615
+ "eval_logps/generated": -5640.6728515625,
616
+ "eval_logps/real": -1151.2216796875,
617
+ "eval_loss": 0.006871914956718683,
618
+ "eval_rewards/accuracies": 1.0,
619
+ "eval_rewards/generated": -55.15412521362305,
620
+ "eval_rewards/margins": 45.012638092041016,
621
+ "eval_rewards/real": -10.141489028930664,
622
+ "eval_runtime": 213.3069,
623
+ "eval_samples_per_second": 14.064,
624
+ "eval_steps_per_second": 0.441,
625
+ "step": 350
626
+ },
627
+ {
628
+ "epoch": 0.96,
629
+ "learning_rate": 2.225519287833828e-08,
630
+ "logits/generated": 3.0481696128845215,
631
+ "logits/real": 1.9120832681655884,
632
+ "logps/generated": -5828.06787109375,
633
+ "logps/real": -1140.780517578125,
634
+ "loss": 0.0043,
635
+ "rewards/accuracies": 1.0,
636
+ "rewards/generated": -56.97248077392578,
637
+ "rewards/margins": 47.068485260009766,
638
+ "rewards/real": -9.903992652893066,
639
+ "step": 360
640
+ },
641
+ {
642
+ "epoch": 0.99,
643
+ "learning_rate": 7.418397626112759e-09,
644
+ "logits/generated": 3.2567169666290283,
645
+ "logits/real": 2.3705618381500244,
646
+ "logps/generated": -6292.123046875,
647
+ "logps/real": -1168.8006591796875,
648
+ "loss": 0.0035,
649
+ "rewards/accuracies": 1.0,
650
+ "rewards/generated": -61.6358642578125,
651
+ "rewards/margins": 51.312286376953125,
652
+ "rewards/real": -10.323575973510742,
653
+ "step": 370
654
+ },
655
+ {
656
+ "epoch": 1.0,
657
+ "step": 375,
658
+ "total_flos": 0.0,
659
+ "train_loss": 0.051339938134110225,
660
+ "train_runtime": 3437.1804,
661
+ "train_samples_per_second": 3.491,
662
+ "train_steps_per_second": 0.109
663
+ }
664
+ ],
665
+ "logging_steps": 10,
666
+ "max_steps": 375,
667
+ "num_input_tokens_seen": 0,
668
+ "num_train_epochs": 1,
669
+ "save_steps": 100,
670
+ "total_flos": 0.0,
671
+ "train_batch_size": 8,
672
+ "trial_name": null,
673
+ "trial_params": null
674
+ }