AmberYifan commited on
Commit
4e5f5c5
1 Parent(s): 6fb158d

Model save

Browse files
README.md ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: alignment-handbook/zephyr-7b-sft-full
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: spin-margin2
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # spin-margin2
15
+
16
+ This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on the None dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - Loss: 0.0010
19
+ - Rewards/real: -0.7975
20
+ - Rewards/generated: -20.4822
21
+ - Rewards/accuracies: 1.0
22
+ - Rewards/margins: 19.6846
23
+ - Logps/generated: -303.8466
24
+ - Logps/real: -141.0674
25
+ - Logits/generated: -2.6068
26
+ - Logits/real: -2.3492
27
+
28
+ ## Model description
29
+
30
+ More information needed
31
+
32
+ ## Intended uses & limitations
33
+
34
+ More information needed
35
+
36
+ ## Training and evaluation data
37
+
38
+ More information needed
39
+
40
+ ## Training procedure
41
+
42
+ ### Training hyperparameters
43
+
44
+ The following hyperparameters were used during training:
45
+ - learning_rate: 5e-07
46
+ - train_batch_size: 8
47
+ - eval_batch_size: 8
48
+ - seed: 42
49
+ - distributed_type: multi-GPU
50
+ - num_devices: 4
51
+ - total_train_batch_size: 32
52
+ - total_eval_batch_size: 32
53
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
54
+ - lr_scheduler_type: linear
55
+ - lr_scheduler_warmup_ratio: 0.1
56
+ - num_epochs: 1
57
+
58
+ ### Training results
59
+
60
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/real | Rewards/generated | Rewards/accuracies | Rewards/margins | Logps/generated | Logps/real | Logits/generated | Logits/real |
61
+ |:-------------:|:-----:|:----:|:---------------:|:------------:|:-----------------:|:------------------:|:---------------:|:---------------:|:----------:|:----------------:|:-----------:|
62
+ | 0.0043 | 0.19 | 100 | 0.0049 | 0.9120 | -9.6012 | 1.0 | 10.5132 | -195.0367 | -123.9721 | -2.7982 | -2.5652 |
63
+ | 0.0034 | 0.39 | 200 | 0.0024 | -0.0739 | -14.1834 | 1.0 | 14.1095 | -240.8593 | -133.8314 | -2.8109 | -2.5347 |
64
+ | 0.0007 | 0.58 | 300 | 0.0012 | -0.2381 | -16.9127 | 1.0 | 16.6746 | -268.1524 | -135.4731 | -2.7308 | -2.4046 |
65
+ | 0.0016 | 0.78 | 400 | 0.0010 | -1.1878 | -19.5719 | 1.0 | 18.3841 | -294.7439 | -144.9703 | -2.6559 | -2.3917 |
66
+ | 0.0001 | 0.97 | 500 | 0.0010 | -0.7975 | -20.4822 | 1.0 | 19.6846 | -303.8466 | -141.0674 | -2.6068 | -2.3492 |
67
+
68
+
69
+ ### Framework versions
70
+
71
+ - Transformers 4.37.0
72
+ - Pytorch 2.1.2+cu121
73
+ - Datasets 2.14.6
74
+ - Tokenizers 0.15.2
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.016583090304290803,
4
+ "train_runtime": 3283.9246,
5
+ "train_samples": 16467,
6
+ "train_samples_per_second": 5.014,
7
+ "train_steps_per_second": 0.157
8
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.37.0"
6
+ }
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e98e3d2581137a81621547ef6c0ad50dcd903475eb5c2a356ecf404e6a543c8
3
+ size 4943162336
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41b565c1c77fd26bce5fee9886091f03e8ae36522e056b2023be74791f895c16
3
+ size 4999819336
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1523f9bd758ce036e2719e3a589b44dd58e1156c757a71ac3db4cb80cf6c270
3
+ size 4540516344
model.safetensors.index.json ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 14483464192
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00003-of-00003.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00003.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
13
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
14
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
15
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
16
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
17
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
18
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
19
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
20
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
21
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
22
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
23
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
24
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
25
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
26
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
27
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
28
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
29
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
30
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
31
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
32
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
33
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
34
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
35
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
36
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
37
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
38
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
39
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
40
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
41
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
42
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
43
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
44
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
45
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
46
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
47
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
48
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
49
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
50
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
51
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
52
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
53
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
54
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
55
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
56
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
57
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
58
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
59
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
60
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
61
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
62
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
63
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
64
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
65
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
66
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
67
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
68
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
69
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
70
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
71
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
72
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
73
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
74
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
75
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
76
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
77
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
78
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
79
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
80
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
81
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
82
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
83
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
84
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
85
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
86
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
87
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
88
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
89
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
90
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
91
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
92
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
93
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
94
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
95
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
96
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
97
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
98
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
99
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
100
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
101
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
102
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
103
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
104
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
105
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
106
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
107
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
108
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
109
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
110
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
111
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
112
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
113
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
114
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
115
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
116
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
117
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
118
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
119
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
120
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
121
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
122
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
123
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
124
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
125
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
126
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
127
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
128
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
129
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
130
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
131
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
132
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
133
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
134
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
135
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
136
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
137
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
138
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
139
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
140
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
141
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
142
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
148
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
149
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
150
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
151
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
152
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
153
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
154
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
155
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
156
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
161
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
162
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
163
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
164
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
165
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
166
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
167
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
168
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
169
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
170
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
171
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
172
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
173
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
174
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
175
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
176
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
177
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
178
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
179
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
180
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
181
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
182
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
183
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
184
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
185
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
186
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
187
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
188
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
189
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
190
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
191
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
192
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
193
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
194
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
195
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
196
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
197
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
198
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
199
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
200
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
201
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
202
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
203
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
204
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
205
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
206
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
207
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
208
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
209
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
210
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
211
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
212
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
213
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
214
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
215
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
216
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
217
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
218
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
219
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
220
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
221
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
222
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
223
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
224
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors",
225
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
226
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
227
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
228
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
229
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
230
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
231
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
232
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
233
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors",
234
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
235
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
236
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
237
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
238
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
239
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
240
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
241
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
242
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
243
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
244
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
245
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
246
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
247
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
248
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
249
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
250
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
251
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
252
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
253
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
254
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
255
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
256
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
257
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
258
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
259
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
260
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
261
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
262
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
263
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
264
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
265
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
266
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
267
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
268
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
269
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
270
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
271
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
272
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
273
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
274
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
275
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
276
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
277
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
278
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
279
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
280
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
281
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
282
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
283
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
284
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
285
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
286
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
287
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
288
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
289
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
290
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
291
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
292
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
293
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
294
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
295
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
296
+ "model.norm.weight": "model-00003-of-00003.safetensors"
297
+ }
298
+ }
runs/Jun18_10-29-25_gilbreth-j001.rcac.purdue.edu/events.out.tfevents.1718721100.gilbreth-j001.rcac.purdue.edu.219991.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3b46c6b3d7d75401c87e7a9e0a91ebf38d9a29524f9af9fb7e42ec84173e8c2e
3
- size 40271
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96e0c37b6d9118359a0f3f130cbb4fb84579c39cfb97293a80af9be97821eac0
3
+ size 41256
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.016583090304290803,
4
+ "train_runtime": 3283.9246,
5
+ "train_samples": 16467,
6
+ "train_samples_per_second": 5.014,
7
+ "train_steps_per_second": 0.157
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,838 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 100,
6
+ "global_step": 515,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 9.615384615384615e-09,
14
+ "logits/generated": -3.0575106143951416,
15
+ "logits/real": -2.816924571990967,
16
+ "logps/generated": -89.89034271240234,
17
+ "logps/real": -110.7723617553711,
18
+ "loss": 0.6931,
19
+ "rewards/accuracies": 0.0,
20
+ "rewards/generated": 0.0,
21
+ "rewards/margins": 0.0,
22
+ "rewards/real": 0.0,
23
+ "step": 1
24
+ },
25
+ {
26
+ "epoch": 0.02,
27
+ "learning_rate": 9.615384615384616e-08,
28
+ "logits/generated": -3.0530381202697754,
29
+ "logits/real": -2.897934675216675,
30
+ "logps/generated": -101.54727935791016,
31
+ "logps/real": -141.96624755859375,
32
+ "loss": 0.5193,
33
+ "rewards/accuracies": 0.7638888955116272,
34
+ "rewards/generated": -0.3860962390899658,
35
+ "rewards/margins": 0.4838888943195343,
36
+ "rewards/real": 0.09779267013072968,
37
+ "step": 10
38
+ },
39
+ {
40
+ "epoch": 0.04,
41
+ "learning_rate": 1.9230769230769231e-07,
42
+ "logits/generated": -3.022646188735962,
43
+ "logits/real": -2.780233860015869,
44
+ "logps/generated": -117.37088775634766,
45
+ "logps/real": -120.82357025146484,
46
+ "loss": 0.1046,
47
+ "rewards/accuracies": 1.0,
48
+ "rewards/generated": -2.058361291885376,
49
+ "rewards/margins": 2.6513800621032715,
50
+ "rewards/real": 0.5930188894271851,
51
+ "step": 20
52
+ },
53
+ {
54
+ "epoch": 0.06,
55
+ "learning_rate": 2.884615384615384e-07,
56
+ "logits/generated": -2.9492268562316895,
57
+ "logits/real": -2.746804714202881,
58
+ "logps/generated": -131.04525756835938,
59
+ "logps/real": -121.73081970214844,
60
+ "loss": 0.0414,
61
+ "rewards/accuracies": 0.9750000238418579,
62
+ "rewards/generated": -3.5284512042999268,
63
+ "rewards/margins": 4.389330863952637,
64
+ "rewards/real": 0.8608797788619995,
65
+ "step": 30
66
+ },
67
+ {
68
+ "epoch": 0.08,
69
+ "learning_rate": 3.8461538461538463e-07,
70
+ "logits/generated": -2.8972926139831543,
71
+ "logits/real": -2.75730562210083,
72
+ "logps/generated": -132.66815185546875,
73
+ "logps/real": -132.48605346679688,
74
+ "loss": 0.0317,
75
+ "rewards/accuracies": 0.987500011920929,
76
+ "rewards/generated": -4.410510063171387,
77
+ "rewards/margins": 5.345829963684082,
78
+ "rewards/real": 0.9353200793266296,
79
+ "step": 40
80
+ },
81
+ {
82
+ "epoch": 0.1,
83
+ "learning_rate": 4.807692307692307e-07,
84
+ "logits/generated": -2.9142377376556396,
85
+ "logits/real": -2.6744868755340576,
86
+ "logps/generated": -152.9608612060547,
87
+ "logps/real": -120.39155578613281,
88
+ "loss": 0.0178,
89
+ "rewards/accuracies": 1.0,
90
+ "rewards/generated": -5.3825178146362305,
91
+ "rewards/margins": 6.37641716003418,
92
+ "rewards/real": 0.9938996434211731,
93
+ "step": 50
94
+ },
95
+ {
96
+ "epoch": 0.12,
97
+ "learning_rate": 4.913606911447084e-07,
98
+ "logits/generated": -2.8876500129699707,
99
+ "logits/real": -2.6662659645080566,
100
+ "logps/generated": -169.52001953125,
101
+ "logps/real": -121.60459899902344,
102
+ "loss": 0.0134,
103
+ "rewards/accuracies": 1.0,
104
+ "rewards/generated": -6.808724880218506,
105
+ "rewards/margins": 7.904421806335449,
106
+ "rewards/real": 1.0956966876983643,
107
+ "step": 60
108
+ },
109
+ {
110
+ "epoch": 0.14,
111
+ "learning_rate": 4.80561555075594e-07,
112
+ "logits/generated": -2.8779220581054688,
113
+ "logits/real": -2.714576005935669,
114
+ "logps/generated": -173.93214416503906,
115
+ "logps/real": -140.5163116455078,
116
+ "loss": 0.0088,
117
+ "rewards/accuracies": 1.0,
118
+ "rewards/generated": -7.277545928955078,
119
+ "rewards/margins": 8.348401069641113,
120
+ "rewards/real": 1.0708551406860352,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 0.16,
125
+ "learning_rate": 4.6976241900647946e-07,
126
+ "logits/generated": -2.854962110519409,
127
+ "logits/real": -2.5673232078552246,
128
+ "logps/generated": -187.35971069335938,
129
+ "logps/real": -115.74288177490234,
130
+ "loss": 0.008,
131
+ "rewards/accuracies": 1.0,
132
+ "rewards/generated": -8.015608787536621,
133
+ "rewards/margins": 8.94044017791748,
134
+ "rewards/real": 0.9248324632644653,
135
+ "step": 80
136
+ },
137
+ {
138
+ "epoch": 0.17,
139
+ "learning_rate": 4.5896328293736496e-07,
140
+ "logits/generated": -2.8408970832824707,
141
+ "logits/real": -2.5543339252471924,
142
+ "logps/generated": -190.44345092773438,
143
+ "logps/real": -117.4663314819336,
144
+ "loss": 0.0096,
145
+ "rewards/accuracies": 0.987500011920929,
146
+ "rewards/generated": -8.589609146118164,
147
+ "rewards/margins": 9.519355773925781,
148
+ "rewards/real": 0.9297460317611694,
149
+ "step": 90
150
+ },
151
+ {
152
+ "epoch": 0.19,
153
+ "learning_rate": 4.481641468682505e-07,
154
+ "logits/generated": -2.8061957359313965,
155
+ "logits/real": -2.5320754051208496,
156
+ "logps/generated": -201.36314392089844,
157
+ "logps/real": -106.45611572265625,
158
+ "loss": 0.0043,
159
+ "rewards/accuracies": 1.0,
160
+ "rewards/generated": -9.835786819458008,
161
+ "rewards/margins": 10.548227310180664,
162
+ "rewards/real": 0.7124411463737488,
163
+ "step": 100
164
+ },
165
+ {
166
+ "epoch": 0.19,
167
+ "eval_logits/generated": -2.7982144355773926,
168
+ "eval_logits/real": -2.5651581287384033,
169
+ "eval_logps/generated": -195.03672790527344,
170
+ "eval_logps/real": -123.97211456298828,
171
+ "eval_loss": 0.004925635177642107,
172
+ "eval_rewards/accuracies": 1.0,
173
+ "eval_rewards/generated": -9.601181983947754,
174
+ "eval_rewards/margins": 10.513165473937988,
175
+ "eval_rewards/real": 0.9119827151298523,
176
+ "eval_runtime": 115.6828,
177
+ "eval_samples_per_second": 14.522,
178
+ "eval_steps_per_second": 0.458,
179
+ "step": 100
180
+ },
181
+ {
182
+ "epoch": 0.21,
183
+ "learning_rate": 4.3736501079913605e-07,
184
+ "logits/generated": -2.7716643810272217,
185
+ "logits/real": -2.528700351715088,
186
+ "logps/generated": -192.03431701660156,
187
+ "logps/real": -115.5088119506836,
188
+ "loss": 0.0027,
189
+ "rewards/accuracies": 1.0,
190
+ "rewards/generated": -9.888772964477539,
191
+ "rewards/margins": 10.59119987487793,
192
+ "rewards/real": 0.7024269104003906,
193
+ "step": 110
194
+ },
195
+ {
196
+ "epoch": 0.23,
197
+ "learning_rate": 4.265658747300216e-07,
198
+ "logits/generated": -2.772347927093506,
199
+ "logits/real": -2.439427137374878,
200
+ "logps/generated": -199.63067626953125,
201
+ "logps/real": -117.44853210449219,
202
+ "loss": 0.008,
203
+ "rewards/accuracies": 0.987500011920929,
204
+ "rewards/generated": -10.372490882873535,
205
+ "rewards/margins": 10.862470626831055,
206
+ "rewards/real": 0.48998016119003296,
207
+ "step": 120
208
+ },
209
+ {
210
+ "epoch": 0.25,
211
+ "learning_rate": 4.157667386609071e-07,
212
+ "logits/generated": -2.790419101715088,
213
+ "logits/real": -2.504594564437866,
214
+ "logps/generated": -182.69247436523438,
215
+ "logps/real": -117.50340270996094,
216
+ "loss": 0.0116,
217
+ "rewards/accuracies": 1.0,
218
+ "rewards/generated": -9.6342134475708,
219
+ "rewards/margins": 9.86939811706543,
220
+ "rewards/real": 0.2351863831281662,
221
+ "step": 130
222
+ },
223
+ {
224
+ "epoch": 0.27,
225
+ "learning_rate": 4.0496760259179263e-07,
226
+ "logits/generated": -2.848449230194092,
227
+ "logits/real": -2.592134475708008,
228
+ "logps/generated": -209.822998046875,
229
+ "logps/real": -132.05592346191406,
230
+ "loss": 0.0088,
231
+ "rewards/accuracies": 1.0,
232
+ "rewards/generated": -10.387749671936035,
233
+ "rewards/margins": 10.231754302978516,
234
+ "rewards/real": -0.15599501132965088,
235
+ "step": 140
236
+ },
237
+ {
238
+ "epoch": 0.29,
239
+ "learning_rate": 3.941684665226782e-07,
240
+ "logits/generated": -2.863227367401123,
241
+ "logits/real": -2.6038713455200195,
242
+ "logps/generated": -205.6251678466797,
243
+ "logps/real": -132.90298461914062,
244
+ "loss": 0.0035,
245
+ "rewards/accuracies": 1.0,
246
+ "rewards/generated": -10.28604507446289,
247
+ "rewards/margins": 10.251394271850586,
248
+ "rewards/real": -0.03464997559785843,
249
+ "step": 150
250
+ },
251
+ {
252
+ "epoch": 0.31,
253
+ "learning_rate": 3.8336933045356367e-07,
254
+ "logits/generated": -2.836599111557007,
255
+ "logits/real": -2.6022212505340576,
256
+ "logps/generated": -199.63819885253906,
257
+ "logps/real": -126.2606201171875,
258
+ "loss": 0.005,
259
+ "rewards/accuracies": 1.0,
260
+ "rewards/generated": -10.73370361328125,
261
+ "rewards/margins": 10.730461120605469,
262
+ "rewards/real": -0.0032417401671409607,
263
+ "step": 160
264
+ },
265
+ {
266
+ "epoch": 0.33,
267
+ "learning_rate": 3.7257019438444927e-07,
268
+ "logits/generated": -2.848566770553589,
269
+ "logits/real": -2.5174214839935303,
270
+ "logps/generated": -235.12161254882812,
271
+ "logps/real": -115.94686126708984,
272
+ "loss": 0.0017,
273
+ "rewards/accuracies": 1.0,
274
+ "rewards/generated": -13.108288764953613,
275
+ "rewards/margins": 12.952964782714844,
276
+ "rewards/real": -0.155323326587677,
277
+ "step": 170
278
+ },
279
+ {
280
+ "epoch": 0.35,
281
+ "learning_rate": 3.6177105831533476e-07,
282
+ "logits/generated": -2.8279058933258057,
283
+ "logits/real": -2.5158348083496094,
284
+ "logps/generated": -231.4835968017578,
285
+ "logps/real": -140.82273864746094,
286
+ "loss": 0.0033,
287
+ "rewards/accuracies": 1.0,
288
+ "rewards/generated": -12.741474151611328,
289
+ "rewards/margins": 12.89801025390625,
290
+ "rewards/real": 0.15653587877750397,
291
+ "step": 180
292
+ },
293
+ {
294
+ "epoch": 0.37,
295
+ "learning_rate": 3.509719222462203e-07,
296
+ "logits/generated": -2.8390724658966064,
297
+ "logits/real": -2.552072048187256,
298
+ "logps/generated": -226.83334350585938,
299
+ "logps/real": -127.37821197509766,
300
+ "loss": 0.0011,
301
+ "rewards/accuracies": 1.0,
302
+ "rewards/generated": -12.685934066772461,
303
+ "rewards/margins": 12.80540657043457,
304
+ "rewards/real": 0.11947281658649445,
305
+ "step": 190
306
+ },
307
+ {
308
+ "epoch": 0.39,
309
+ "learning_rate": 3.401727861771058e-07,
310
+ "logits/generated": -2.832120418548584,
311
+ "logits/real": -2.5636706352233887,
312
+ "logps/generated": -224.13436889648438,
313
+ "logps/real": -126.2430419921875,
314
+ "loss": 0.0034,
315
+ "rewards/accuracies": 1.0,
316
+ "rewards/generated": -12.46034049987793,
317
+ "rewards/margins": 12.773991584777832,
318
+ "rewards/real": 0.31365132331848145,
319
+ "step": 200
320
+ },
321
+ {
322
+ "epoch": 0.39,
323
+ "eval_logits/generated": -2.8109114170074463,
324
+ "eval_logits/real": -2.53472638130188,
325
+ "eval_logps/generated": -240.85931396484375,
326
+ "eval_logps/real": -133.8314208984375,
327
+ "eval_loss": 0.002396376570686698,
328
+ "eval_rewards/accuracies": 1.0,
329
+ "eval_rewards/generated": -14.183443069458008,
330
+ "eval_rewards/margins": 14.109496116638184,
331
+ "eval_rewards/real": -0.07394662499427795,
332
+ "eval_runtime": 113.7278,
333
+ "eval_samples_per_second": 14.772,
334
+ "eval_steps_per_second": 0.466,
335
+ "step": 200
336
+ },
337
+ {
338
+ "epoch": 0.41,
339
+ "learning_rate": 3.2937365010799135e-07,
340
+ "logits/generated": -2.8051133155822754,
341
+ "logits/real": -2.4014732837677,
342
+ "logps/generated": -237.0082244873047,
343
+ "logps/real": -113.56056213378906,
344
+ "loss": 0.001,
345
+ "rewards/accuracies": 1.0,
346
+ "rewards/generated": -14.111905097961426,
347
+ "rewards/margins": 13.899911880493164,
348
+ "rewards/real": -0.21199479699134827,
349
+ "step": 210
350
+ },
351
+ {
352
+ "epoch": 0.43,
353
+ "learning_rate": 3.1857451403887684e-07,
354
+ "logits/generated": -2.820549488067627,
355
+ "logits/real": -2.5103089809417725,
356
+ "logps/generated": -243.4583740234375,
357
+ "logps/real": -127.36563873291016,
358
+ "loss": 0.0018,
359
+ "rewards/accuracies": 1.0,
360
+ "rewards/generated": -14.133171081542969,
361
+ "rewards/margins": 14.2222900390625,
362
+ "rewards/real": 0.08911846578121185,
363
+ "step": 220
364
+ },
365
+ {
366
+ "epoch": 0.45,
367
+ "learning_rate": 3.077753779697624e-07,
368
+ "logits/generated": -2.8067831993103027,
369
+ "logits/real": -2.6124329566955566,
370
+ "logps/generated": -219.4794158935547,
371
+ "logps/real": -123.17138671875,
372
+ "loss": 0.0025,
373
+ "rewards/accuracies": 1.0,
374
+ "rewards/generated": -12.553444862365723,
375
+ "rewards/margins": 12.771233558654785,
376
+ "rewards/real": 0.21778886020183563,
377
+ "step": 230
378
+ },
379
+ {
380
+ "epoch": 0.47,
381
+ "learning_rate": 2.9697624190064794e-07,
382
+ "logits/generated": -2.8342039585113525,
383
+ "logits/real": -2.46539306640625,
384
+ "logps/generated": -229.17520141601562,
385
+ "logps/real": -126.71480560302734,
386
+ "loss": 0.0005,
387
+ "rewards/accuracies": 1.0,
388
+ "rewards/generated": -12.728944778442383,
389
+ "rewards/margins": 12.824361801147461,
390
+ "rewards/real": 0.09541518241167068,
391
+ "step": 240
392
+ },
393
+ {
394
+ "epoch": 0.49,
395
+ "learning_rate": 2.861771058315335e-07,
396
+ "logits/generated": -2.784558057785034,
397
+ "logits/real": -2.4345905780792236,
398
+ "logps/generated": -229.5637969970703,
399
+ "logps/real": -120.04652404785156,
400
+ "loss": 0.002,
401
+ "rewards/accuracies": 1.0,
402
+ "rewards/generated": -13.79577350616455,
403
+ "rewards/margins": 13.668333053588867,
404
+ "rewards/real": -0.12744084000587463,
405
+ "step": 250
406
+ },
407
+ {
408
+ "epoch": 0.5,
409
+ "learning_rate": 2.7537796976241903e-07,
410
+ "logits/generated": -2.7688822746276855,
411
+ "logits/real": -2.3783717155456543,
412
+ "logps/generated": -251.04879760742188,
413
+ "logps/real": -122.02119445800781,
414
+ "loss": 0.0023,
415
+ "rewards/accuracies": 1.0,
416
+ "rewards/generated": -14.788324356079102,
417
+ "rewards/margins": 14.605295181274414,
418
+ "rewards/real": -0.1830286979675293,
419
+ "step": 260
420
+ },
421
+ {
422
+ "epoch": 0.52,
423
+ "learning_rate": 2.645788336933045e-07,
424
+ "logits/generated": -2.761305093765259,
425
+ "logits/real": -2.324169635772705,
426
+ "logps/generated": -248.24539184570312,
427
+ "logps/real": -111.51519775390625,
428
+ "loss": 0.0002,
429
+ "rewards/accuracies": 1.0,
430
+ "rewards/generated": -14.844027519226074,
431
+ "rewards/margins": 14.8836088180542,
432
+ "rewards/real": 0.03958074748516083,
433
+ "step": 270
434
+ },
435
+ {
436
+ "epoch": 0.54,
437
+ "learning_rate": 2.5377969762419007e-07,
438
+ "logits/generated": -2.7696480751037598,
439
+ "logits/real": -2.422027826309204,
440
+ "logps/generated": -254.77340698242188,
441
+ "logps/real": -130.95404052734375,
442
+ "loss": 0.0009,
443
+ "rewards/accuracies": 1.0,
444
+ "rewards/generated": -15.440943717956543,
445
+ "rewards/margins": 15.30030632019043,
446
+ "rewards/real": -0.14063778519630432,
447
+ "step": 280
448
+ },
449
+ {
450
+ "epoch": 0.56,
451
+ "learning_rate": 2.4298056155507556e-07,
452
+ "logits/generated": -2.746011972427368,
453
+ "logits/real": -2.287593126296997,
454
+ "logps/generated": -258.4119873046875,
455
+ "logps/real": -124.4466781616211,
456
+ "loss": 0.0005,
457
+ "rewards/accuracies": 1.0,
458
+ "rewards/generated": -15.832595825195312,
459
+ "rewards/margins": 15.681103706359863,
460
+ "rewards/real": -0.15149332582950592,
461
+ "step": 290
462
+ },
463
+ {
464
+ "epoch": 0.58,
465
+ "learning_rate": 2.3218142548596113e-07,
466
+ "logits/generated": -2.7288124561309814,
467
+ "logits/real": -2.3387868404388428,
468
+ "logps/generated": -255.36669921875,
469
+ "logps/real": -116.48316955566406,
470
+ "loss": 0.0007,
471
+ "rewards/accuracies": 1.0,
472
+ "rewards/generated": -16.082441329956055,
473
+ "rewards/margins": 15.728803634643555,
474
+ "rewards/real": -0.3536352515220642,
475
+ "step": 300
476
+ },
477
+ {
478
+ "epoch": 0.58,
479
+ "eval_logits/generated": -2.730811357498169,
480
+ "eval_logits/real": -2.404576063156128,
481
+ "eval_logps/generated": -268.1523742675781,
482
+ "eval_logps/real": -135.4730987548828,
483
+ "eval_loss": 0.0012407265603542328,
484
+ "eval_rewards/accuracies": 1.0,
485
+ "eval_rewards/generated": -16.912744522094727,
486
+ "eval_rewards/margins": 16.67462921142578,
487
+ "eval_rewards/real": -0.23811471462249756,
488
+ "eval_runtime": 114.2693,
489
+ "eval_samples_per_second": 14.702,
490
+ "eval_steps_per_second": 0.464,
491
+ "step": 300
492
+ },
493
+ {
494
+ "epoch": 0.6,
495
+ "learning_rate": 2.2138228941684665e-07,
496
+ "logits/generated": -2.705501079559326,
497
+ "logits/real": -2.2757084369659424,
498
+ "logps/generated": -258.28076171875,
499
+ "logps/real": -140.76162719726562,
500
+ "loss": 0.0011,
501
+ "rewards/accuracies": 1.0,
502
+ "rewards/generated": -15.845602035522461,
503
+ "rewards/margins": 14.796327590942383,
504
+ "rewards/real": -1.0492744445800781,
505
+ "step": 310
506
+ },
507
+ {
508
+ "epoch": 0.62,
509
+ "learning_rate": 2.1058315334773217e-07,
510
+ "logits/generated": -2.7162063121795654,
511
+ "logits/real": -2.4163174629211426,
512
+ "logps/generated": -282.51336669921875,
513
+ "logps/real": -138.6903839111328,
514
+ "loss": 0.0006,
515
+ "rewards/accuracies": 1.0,
516
+ "rewards/generated": -18.408849716186523,
517
+ "rewards/margins": 17.277786254882812,
518
+ "rewards/real": -1.131063461303711,
519
+ "step": 320
520
+ },
521
+ {
522
+ "epoch": 0.64,
523
+ "learning_rate": 1.9978401727861772e-07,
524
+ "logits/generated": -2.6899831295013428,
525
+ "logits/real": -2.407248020172119,
526
+ "logps/generated": -282.01873779296875,
527
+ "logps/real": -141.7494354248047,
528
+ "loss": 0.0001,
529
+ "rewards/accuracies": 1.0,
530
+ "rewards/generated": -18.30569076538086,
531
+ "rewards/margins": 17.74685287475586,
532
+ "rewards/real": -0.5588369965553284,
533
+ "step": 330
534
+ },
535
+ {
536
+ "epoch": 0.66,
537
+ "learning_rate": 1.8898488120950324e-07,
538
+ "logits/generated": -2.694230556488037,
539
+ "logits/real": -2.284055233001709,
540
+ "logps/generated": -272.35986328125,
541
+ "logps/real": -123.73674011230469,
542
+ "loss": 0.0004,
543
+ "rewards/accuracies": 1.0,
544
+ "rewards/generated": -17.68344497680664,
545
+ "rewards/margins": 16.862354278564453,
546
+ "rewards/real": -0.8210934400558472,
547
+ "step": 340
548
+ },
549
+ {
550
+ "epoch": 0.68,
551
+ "learning_rate": 1.7818574514038876e-07,
552
+ "logits/generated": -2.713329315185547,
553
+ "logits/real": -2.368961811065674,
554
+ "logps/generated": -274.2613830566406,
555
+ "logps/real": -129.2357635498047,
556
+ "loss": 0.0009,
557
+ "rewards/accuracies": 1.0,
558
+ "rewards/generated": -17.50069808959961,
559
+ "rewards/margins": 16.7661190032959,
560
+ "rewards/real": -0.7345799207687378,
561
+ "step": 350
562
+ },
563
+ {
564
+ "epoch": 0.7,
565
+ "learning_rate": 1.673866090712743e-07,
566
+ "logits/generated": -2.701970100402832,
567
+ "logits/real": -2.498610258102417,
568
+ "logps/generated": -284.69195556640625,
569
+ "logps/real": -152.46725463867188,
570
+ "loss": 0.0016,
571
+ "rewards/accuracies": 1.0,
572
+ "rewards/generated": -18.89683723449707,
573
+ "rewards/margins": 18.188879013061523,
574
+ "rewards/real": -0.7079570293426514,
575
+ "step": 360
576
+ },
577
+ {
578
+ "epoch": 0.72,
579
+ "learning_rate": 1.5658747300215982e-07,
580
+ "logits/generated": -2.6658008098602295,
581
+ "logits/real": -2.3703696727752686,
582
+ "logps/generated": -280.6600036621094,
583
+ "logps/real": -144.48782348632812,
584
+ "loss": 0.0002,
585
+ "rewards/accuracies": 1.0,
586
+ "rewards/generated": -18.335285186767578,
587
+ "rewards/margins": 17.47960090637207,
588
+ "rewards/real": -0.8556815385818481,
589
+ "step": 370
590
+ },
591
+ {
592
+ "epoch": 0.74,
593
+ "learning_rate": 1.4578833693304534e-07,
594
+ "logits/generated": -2.7102785110473633,
595
+ "logits/real": -2.3787968158721924,
596
+ "logps/generated": -288.45074462890625,
597
+ "logps/real": -136.98178100585938,
598
+ "loss": 0.0018,
599
+ "rewards/accuracies": 1.0,
600
+ "rewards/generated": -18.772268295288086,
601
+ "rewards/margins": 18.083477020263672,
602
+ "rewards/real": -0.6887896060943604,
603
+ "step": 380
604
+ },
605
+ {
606
+ "epoch": 0.76,
607
+ "learning_rate": 1.3498920086393086e-07,
608
+ "logits/generated": -2.6787545680999756,
609
+ "logits/real": -2.271860122680664,
610
+ "logps/generated": -288.81396484375,
611
+ "logps/real": -126.51753234863281,
612
+ "loss": 0.0009,
613
+ "rewards/accuracies": 1.0,
614
+ "rewards/generated": -18.791013717651367,
615
+ "rewards/margins": 17.96844482421875,
616
+ "rewards/real": -0.8225716352462769,
617
+ "step": 390
618
+ },
619
+ {
620
+ "epoch": 0.78,
621
+ "learning_rate": 1.241900647948164e-07,
622
+ "logits/generated": -2.6518869400024414,
623
+ "logits/real": -2.3778929710388184,
624
+ "logps/generated": -278.9534912109375,
625
+ "logps/real": -152.96298217773438,
626
+ "loss": 0.0016,
627
+ "rewards/accuracies": 1.0,
628
+ "rewards/generated": -18.41701316833496,
629
+ "rewards/margins": 16.954509735107422,
630
+ "rewards/real": -1.4625046253204346,
631
+ "step": 400
632
+ },
633
+ {
634
+ "epoch": 0.78,
635
+ "eval_logits/generated": -2.655914068222046,
636
+ "eval_logits/real": -2.3916683197021484,
637
+ "eval_logps/generated": -294.743896484375,
638
+ "eval_logps/real": -144.9702606201172,
639
+ "eval_loss": 0.000959120923653245,
640
+ "eval_rewards/accuracies": 1.0,
641
+ "eval_rewards/generated": -19.5718994140625,
642
+ "eval_rewards/margins": 18.384069442749023,
643
+ "eval_rewards/real": -1.1878323554992676,
644
+ "eval_runtime": 113.966,
645
+ "eval_samples_per_second": 14.741,
646
+ "eval_steps_per_second": 0.465,
647
+ "step": 400
648
+ },
649
+ {
650
+ "epoch": 0.8,
651
+ "learning_rate": 1.1339092872570194e-07,
652
+ "logits/generated": -2.668544292449951,
653
+ "logits/real": -2.383988380432129,
654
+ "logps/generated": -284.5706481933594,
655
+ "logps/real": -138.00421142578125,
656
+ "loss": 0.0011,
657
+ "rewards/accuracies": 1.0,
658
+ "rewards/generated": -18.296131134033203,
659
+ "rewards/margins": 16.981504440307617,
660
+ "rewards/real": -1.3146272897720337,
661
+ "step": 410
662
+ },
663
+ {
664
+ "epoch": 0.82,
665
+ "learning_rate": 1.0259179265658747e-07,
666
+ "logits/generated": -2.637368679046631,
667
+ "logits/real": -2.379403829574585,
668
+ "logps/generated": -290.41619873046875,
669
+ "logps/real": -135.14498901367188,
670
+ "loss": 0.001,
671
+ "rewards/accuracies": 1.0,
672
+ "rewards/generated": -19.232669830322266,
673
+ "rewards/margins": 18.452062606811523,
674
+ "rewards/real": -0.7806074619293213,
675
+ "step": 420
676
+ },
677
+ {
678
+ "epoch": 0.83,
679
+ "learning_rate": 9.179265658747299e-08,
680
+ "logits/generated": -2.6258468627929688,
681
+ "logits/real": -2.2875823974609375,
682
+ "logps/generated": -289.2123718261719,
683
+ "logps/real": -127.7003402709961,
684
+ "loss": 0.0007,
685
+ "rewards/accuracies": 1.0,
686
+ "rewards/generated": -19.344642639160156,
687
+ "rewards/margins": 18.49654769897461,
688
+ "rewards/real": -0.8480979204177856,
689
+ "step": 430
690
+ },
691
+ {
692
+ "epoch": 0.85,
693
+ "learning_rate": 8.099352051835854e-08,
694
+ "logits/generated": -2.6232876777648926,
695
+ "logits/real": -2.2331902980804443,
696
+ "logps/generated": -286.9706726074219,
697
+ "logps/real": -136.14852905273438,
698
+ "loss": 0.0004,
699
+ "rewards/accuracies": 1.0,
700
+ "rewards/generated": -19.171314239501953,
701
+ "rewards/margins": 17.70499038696289,
702
+ "rewards/real": -1.4663217067718506,
703
+ "step": 440
704
+ },
705
+ {
706
+ "epoch": 0.87,
707
+ "learning_rate": 7.019438444924406e-08,
708
+ "logits/generated": -2.6160647869110107,
709
+ "logits/real": -2.321377992630005,
710
+ "logps/generated": -288.0433044433594,
711
+ "logps/real": -141.96792602539062,
712
+ "loss": 0.0004,
713
+ "rewards/accuracies": 1.0,
714
+ "rewards/generated": -19.229713439941406,
715
+ "rewards/margins": 18.14931869506836,
716
+ "rewards/real": -1.0803945064544678,
717
+ "step": 450
718
+ },
719
+ {
720
+ "epoch": 0.89,
721
+ "learning_rate": 5.939524838012959e-08,
722
+ "logits/generated": -2.6172728538513184,
723
+ "logits/real": -2.3889567852020264,
724
+ "logps/generated": -304.689697265625,
725
+ "logps/real": -138.3423614501953,
726
+ "loss": 0.0024,
727
+ "rewards/accuracies": 0.987500011920929,
728
+ "rewards/generated": -20.097713470458984,
729
+ "rewards/margins": 19.377609252929688,
730
+ "rewards/real": -0.7201052308082581,
731
+ "step": 460
732
+ },
733
+ {
734
+ "epoch": 0.91,
735
+ "learning_rate": 4.8596112311015116e-08,
736
+ "logits/generated": -2.6321027278900146,
737
+ "logits/real": -2.3017470836639404,
738
+ "logps/generated": -284.72479248046875,
739
+ "logps/real": -128.301025390625,
740
+ "loss": 0.0002,
741
+ "rewards/accuracies": 1.0,
742
+ "rewards/generated": -19.106754302978516,
743
+ "rewards/margins": 18.5189208984375,
744
+ "rewards/real": -0.5878311991691589,
745
+ "step": 470
746
+ },
747
+ {
748
+ "epoch": 0.93,
749
+ "learning_rate": 3.779697624190065e-08,
750
+ "logits/generated": -2.61067795753479,
751
+ "logits/real": -2.3235392570495605,
752
+ "logps/generated": -293.27984619140625,
753
+ "logps/real": -133.7152862548828,
754
+ "loss": 0.0001,
755
+ "rewards/accuracies": 1.0,
756
+ "rewards/generated": -19.834014892578125,
757
+ "rewards/margins": 18.92249870300293,
758
+ "rewards/real": -0.9115169644355774,
759
+ "step": 480
760
+ },
761
+ {
762
+ "epoch": 0.95,
763
+ "learning_rate": 2.6997840172786175e-08,
764
+ "logits/generated": -2.620330333709717,
765
+ "logits/real": -2.346421241760254,
766
+ "logps/generated": -295.2581787109375,
767
+ "logps/real": -136.19390869140625,
768
+ "loss": 0.0005,
769
+ "rewards/accuracies": 1.0,
770
+ "rewards/generated": -19.7529296875,
771
+ "rewards/margins": 18.93564224243164,
772
+ "rewards/real": -0.8172855377197266,
773
+ "step": 490
774
+ },
775
+ {
776
+ "epoch": 0.97,
777
+ "learning_rate": 1.6198704103671707e-08,
778
+ "logits/generated": -2.619122266769409,
779
+ "logits/real": -2.424999475479126,
780
+ "logps/generated": -298.48040771484375,
781
+ "logps/real": -157.33090209960938,
782
+ "loss": 0.0001,
783
+ "rewards/accuracies": 1.0,
784
+ "rewards/generated": -20.18784523010254,
785
+ "rewards/margins": 19.563133239746094,
786
+ "rewards/real": -0.6247111558914185,
787
+ "step": 500
788
+ },
789
+ {
790
+ "epoch": 0.97,
791
+ "eval_logits/generated": -2.606785535812378,
792
+ "eval_logits/real": -2.3491668701171875,
793
+ "eval_logps/generated": -303.8465576171875,
794
+ "eval_logps/real": -141.06735229492188,
795
+ "eval_loss": 0.0009754680213518441,
796
+ "eval_rewards/accuracies": 1.0,
797
+ "eval_rewards/generated": -20.482166290283203,
798
+ "eval_rewards/margins": 19.684627532958984,
799
+ "eval_rewards/real": -0.7975420951843262,
800
+ "eval_runtime": 113.9889,
801
+ "eval_samples_per_second": 14.738,
802
+ "eval_steps_per_second": 0.465,
803
+ "step": 500
804
+ },
805
+ {
806
+ "epoch": 0.99,
807
+ "learning_rate": 5.3995680345572344e-09,
808
+ "logits/generated": -2.629978895187378,
809
+ "logits/real": -2.2564597129821777,
810
+ "logps/generated": -287.25323486328125,
811
+ "logps/real": -123.89505767822266,
812
+ "loss": 0.0002,
813
+ "rewards/accuracies": 1.0,
814
+ "rewards/generated": -18.566009521484375,
815
+ "rewards/margins": 17.70358657836914,
816
+ "rewards/real": -0.8624227643013,
817
+ "step": 510
818
+ },
819
+ {
820
+ "epoch": 1.0,
821
+ "step": 515,
822
+ "total_flos": 0.0,
823
+ "train_loss": 0.016583090304290803,
824
+ "train_runtime": 3283.9246,
825
+ "train_samples_per_second": 5.014,
826
+ "train_steps_per_second": 0.157
827
+ }
828
+ ],
829
+ "logging_steps": 10,
830
+ "max_steps": 515,
831
+ "num_input_tokens_seen": 0,
832
+ "num_train_epochs": 1,
833
+ "save_steps": 100,
834
+ "total_flos": 0.0,
835
+ "train_batch_size": 8,
836
+ "trial_name": null,
837
+ "trial_params": null
838
+ }