jikaixuan commited on
Commit
5b6d4b2
1 Parent(s): f5edf98

Model save

Browse files
README.md CHANGED
@@ -15,17 +15,17 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on the None dataset.
17
  It achieves the following results on the evaluation set:
18
- - Loss: 0.6618
19
- - Rewards/chosen: 0.0116
20
- - Rewards/rejected: -0.0133
21
- - Rewards/accuracies: 0.6150
22
- - Rewards/margins: 0.0249
23
- - Logps/rejected: -262.4203
24
- - Logps/chosen: -281.8592
25
- - Logits/rejected: -2.8344
26
- - Logits/chosen: -2.8413
27
- - Use Label: 18709.0195
28
- - Pred Label: 13352.9805
29
 
30
  ## Model description
31
 
@@ -49,10 +49,10 @@ The following hyperparameters were used during training:
49
  - eval_batch_size: 4
50
  - seed: 42
51
  - distributed_type: multi-GPU
52
- - num_devices: 2
53
- - gradient_accumulation_steps: 8
54
  - total_train_batch_size: 64
55
- - total_eval_batch_size: 8
56
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
57
  - lr_scheduler_type: linear
58
  - lr_scheduler_warmup_ratio: 0.1
@@ -60,9 +60,9 @@ The following hyperparameters were used during training:
60
 
61
  ### Training results
62
 
63
- | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen | Use Label | Pred Label |
64
- |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|:----------:|:----------:|
65
- | 0.6628 | 1.0 | 955 | 0.6618 | 0.0116 | -0.0133 | 0.6150 | 0.0249 | -262.4203 | -281.8592 | -2.8344 | -2.8413 | 18206.0195 | 12855.9805 |
66
 
67
 
68
  ### Framework versions
 
15
 
16
  This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on the None dataset.
17
  It achieves the following results on the evaluation set:
18
+ - Loss: 0.6741
19
+ - Rewards/chosen: 0.0220
20
+ - Rewards/rejected: -0.0242
21
+ - Rewards/accuracies: 0.6680
22
+ - Rewards/margins: 0.0462
23
+ - Logps/rejected: -259.5420
24
+ - Logps/chosen: -284.0372
25
+ - Logits/rejected: -2.8454
26
+ - Logits/chosen: -2.8419
27
+ - Use Label: 0.0
28
+ - Pred Label: 0.0
29
 
30
  ## Model description
31
 
 
49
  - eval_batch_size: 4
50
  - seed: 42
51
  - distributed_type: multi-GPU
52
+ - num_devices: 4
53
+ - gradient_accumulation_steps: 4
54
  - total_train_batch_size: 64
55
+ - total_eval_batch_size: 16
56
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
57
  - lr_scheduler_type: linear
58
  - lr_scheduler_warmup_ratio: 0.1
 
60
 
61
  ### Training results
62
 
63
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen | Use Label | Pred Label |
64
+ |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|:---------:|:----------:|
65
+ | 0.6744 | 1.0 | 955 | 0.6741 | 0.0220 | -0.0242 | 0.6680 | 0.0462 | -259.5420 | -284.0372 | -2.8454 | -2.8419 | 0.0 | 0.0 |
66
 
67
 
68
  ### Framework versions
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:50dec1b84957e69e3addc889d603d42852492e7b6871ac38b15599e17d0a0147
3
  size 218138576
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3de4b6a0728c8fbd836578308d9379c925334f979968e2be33179009eed76454
3
  size 218138576
all_results.json CHANGED
@@ -1,23 +1,23 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.841262102127075,
4
- "eval_logits/rejected": -2.8343887329101562,
5
- "eval_logps/chosen": -281.85919189453125,
6
- "eval_logps/rejected": -262.4202880859375,
7
- "eval_loss": 0.6618225574493408,
8
- "eval_pred_label": 13352.98046875,
9
- "eval_rewards/accuracies": 0.6150000095367432,
10
- "eval_rewards/chosen": 0.011613711714744568,
11
- "eval_rewards/margins": 0.02489962987601757,
12
- "eval_rewards/rejected": -0.013285920023918152,
13
- "eval_runtime": 834.3056,
14
  "eval_samples": 2000,
15
- "eval_samples_per_second": 2.397,
16
- "eval_steps_per_second": 0.3,
17
- "eval_use_label": 18709.01953125,
18
- "train_loss": 0.6705795382954063,
19
- "train_runtime": 45840.6595,
20
  "train_samples": 61135,
21
- "train_samples_per_second": 1.334,
22
- "train_steps_per_second": 0.021
23
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_logits/chosen": -2.8419151306152344,
4
+ "eval_logits/rejected": -2.845423936843872,
5
+ "eval_logps/chosen": -284.0372314453125,
6
+ "eval_logps/rejected": -259.5419921875,
7
+ "eval_loss": 0.6741092801094055,
8
+ "eval_pred_label": 0.0,
9
+ "eval_rewards/accuracies": 0.6679999828338623,
10
+ "eval_rewards/chosen": 0.02201448194682598,
11
+ "eval_rewards/margins": 0.04624143987894058,
12
+ "eval_rewards/rejected": -0.02422695979475975,
13
+ "eval_runtime": 465.3584,
14
  "eval_samples": 2000,
15
+ "eval_samples_per_second": 4.298,
16
+ "eval_steps_per_second": 0.269,
17
+ "eval_use_label": 0.0,
18
+ "train_loss": 0.6817296707193264,
19
+ "train_runtime": 25631.2708,
20
  "train_samples": 61135,
21
+ "train_samples_per_second": 2.385,
22
+ "train_steps_per_second": 0.037
23
  }
eval_results.json CHANGED
@@ -1,18 +1,18 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.841262102127075,
4
- "eval_logits/rejected": -2.8343887329101562,
5
- "eval_logps/chosen": -281.85919189453125,
6
- "eval_logps/rejected": -262.4202880859375,
7
- "eval_loss": 0.6618225574493408,
8
- "eval_pred_label": 13352.98046875,
9
- "eval_rewards/accuracies": 0.6150000095367432,
10
- "eval_rewards/chosen": 0.011613711714744568,
11
- "eval_rewards/margins": 0.02489962987601757,
12
- "eval_rewards/rejected": -0.013285920023918152,
13
- "eval_runtime": 834.3056,
14
  "eval_samples": 2000,
15
- "eval_samples_per_second": 2.397,
16
- "eval_steps_per_second": 0.3,
17
- "eval_use_label": 18709.01953125
18
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_logits/chosen": -2.8419151306152344,
4
+ "eval_logits/rejected": -2.845423936843872,
5
+ "eval_logps/chosen": -284.0372314453125,
6
+ "eval_logps/rejected": -259.5419921875,
7
+ "eval_loss": 0.6741092801094055,
8
+ "eval_pred_label": 0.0,
9
+ "eval_rewards/accuracies": 0.6679999828338623,
10
+ "eval_rewards/chosen": 0.02201448194682598,
11
+ "eval_rewards/margins": 0.04624143987894058,
12
+ "eval_rewards/rejected": -0.02422695979475975,
13
+ "eval_runtime": 465.3584,
14
  "eval_samples": 2000,
15
+ "eval_samples_per_second": 4.298,
16
+ "eval_steps_per_second": 0.269,
17
+ "eval_use_label": 0.0
18
  }
runs/Jan20_17-20-45_uclaml04.cs.ucla.edu/events.out.tfevents.1705800111.uclaml04.cs.ucla.edu.297358.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c57e845c5e45551fb4ce09d53dbfcebee11ff839380431898c267476990f0c41
3
- size 11823
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11b2ed24d7c5f72ee264779528d97138dd4794c77fded61dcf78550b01334188
3
+ size 13024
runs/Jan20_17-20-45_uclaml04.cs.ucla.edu/events.out.tfevents.1705826207.uclaml04.cs.ucla.edu.297358.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca2b21d40fd1081b2243755d435cc236c37d2506698b65209d5474a3efde6ef7
3
+ size 935
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.6705795382954063,
4
- "train_runtime": 45840.6595,
5
  "train_samples": 61135,
6
- "train_samples_per_second": 1.334,
7
- "train_steps_per_second": 0.021
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.6817296707193264,
4
+ "train_runtime": 25631.2708,
5
  "train_samples": 61135,
6
+ "train_samples_per_second": 2.385,
7
+ "train_steps_per_second": 0.037
8
  }
trainer_state.json CHANGED
@@ -11,10 +11,10 @@
11
  {
12
  "epoch": 0.0,
13
  "learning_rate": 5.208333333333333e-09,
14
- "logits/chosen": -2.899709463119507,
15
- "logits/rejected": -2.879509687423706,
16
- "logps/chosen": -314.8815612792969,
17
- "logps/rejected": -239.785888671875,
18
  "loss": 0.6931,
19
  "pred_label": 0.0,
20
  "rewards/accuracies": 0.0,
@@ -22,184 +22,184 @@
22
  "rewards/margins": 0.0,
23
  "rewards/rejected": 0.0,
24
  "step": 1,
25
- "use_label": 18.0
26
  },
27
  {
28
  "epoch": 0.1,
29
  "learning_rate": 4.976717112922002e-07,
30
- "logits/chosen": -2.8282251358032227,
31
- "logits/rejected": -2.8269264698028564,
32
- "logps/chosen": -285.5703430175781,
33
- "logps/rejected": -267.9831237792969,
34
- "loss": 0.6805,
35
- "pred_label": 479.7752380371094,
36
- "rewards/accuracies": 0.5012626051902771,
37
- "rewards/chosen": 0.0007524320390075445,
38
- "rewards/margins": 0.0014539315598085523,
39
- "rewards/rejected": -0.0007014995790086687,
40
  "step": 100,
41
- "use_label": 1138.2247314453125
42
  },
43
  {
44
  "epoch": 0.21,
45
  "learning_rate": 4.3946449359720607e-07,
46
- "logits/chosen": -2.829744338989258,
47
- "logits/rejected": -2.8234996795654297,
48
- "logps/chosen": -283.5379333496094,
49
- "logps/rejected": -264.45965576171875,
50
- "loss": 0.6797,
51
- "pred_label": 1464.762451171875,
52
- "rewards/accuracies": 0.5268750190734863,
53
- "rewards/chosen": 0.0027550552040338516,
54
- "rewards/margins": 0.004276593215763569,
55
- "rewards/rejected": -0.0015215380117297173,
56
  "step": 200,
57
- "use_label": 3337.237548828125
58
  },
59
  {
60
  "epoch": 0.31,
61
  "learning_rate": 3.812572759022118e-07,
62
- "logits/chosen": -2.822391986846924,
63
- "logits/rejected": -2.821011543273926,
64
- "logps/chosen": -290.09552001953125,
65
- "logps/rejected": -260.20050048828125,
66
- "loss": 0.678,
67
- "pred_label": 2522.33740234375,
68
- "rewards/accuracies": 0.546875,
69
- "rewards/chosen": 0.004526687320321798,
70
- "rewards/margins": 0.008910334669053555,
71
- "rewards/rejected": -0.004383646883070469,
72
  "step": 300,
73
- "use_label": 5479.66259765625
74
  },
75
  {
76
  "epoch": 0.42,
77
  "learning_rate": 3.230500582072177e-07,
78
- "logits/chosen": -2.8350696563720703,
79
- "logits/rejected": -2.8237721920013428,
80
- "logps/chosen": -284.8573303222656,
81
- "logps/rejected": -260.8428039550781,
82
- "loss": 0.6752,
83
- "pred_label": 3636.47509765625,
84
- "rewards/accuracies": 0.5778124928474426,
85
- "rewards/chosen": 0.00710176769644022,
86
- "rewards/margins": 0.013848603703081608,
87
- "rewards/rejected": -0.006746836472302675,
88
  "step": 400,
89
- "use_label": 7565.52490234375
90
  },
91
  {
92
  "epoch": 0.52,
93
  "learning_rate": 2.648428405122235e-07,
94
- "logits/chosen": -2.8218374252319336,
95
- "logits/rejected": -2.810873508453369,
96
- "logps/chosen": -281.2003173828125,
97
- "logps/rejected": -257.1551818847656,
98
- "loss": 0.6707,
99
- "pred_label": 4911.896484375,
100
- "rewards/accuracies": 0.5731250047683716,
101
- "rewards/chosen": 0.008172390051186085,
102
- "rewards/margins": 0.017216255888342857,
103
- "rewards/rejected": -0.009043867699801922,
104
  "step": 500,
105
- "use_label": 9490.103515625
106
  },
107
  {
108
  "epoch": 0.63,
109
  "learning_rate": 2.0663562281722933e-07,
110
- "logits/chosen": -2.8341524600982666,
111
- "logits/rejected": -2.8230907917022705,
112
- "logps/chosen": -284.7864685058594,
113
- "logps/rejected": -262.0230712890625,
114
- "loss": 0.6665,
115
- "pred_label": 6349.55859375,
116
- "rewards/accuracies": 0.6025000214576721,
117
- "rewards/chosen": 0.012129506096243858,
118
- "rewards/margins": 0.025582188740372658,
119
- "rewards/rejected": -0.0134526826441288,
120
  "step": 600,
121
- "use_label": 11252.44140625
122
  },
123
  {
124
  "epoch": 0.73,
125
  "learning_rate": 1.4842840512223514e-07,
126
- "logits/chosen": -2.8274898529052734,
127
- "logits/rejected": -2.811511516571045,
128
- "logps/chosen": -282.0050048828125,
129
- "logps/rejected": -252.4735565185547,
130
- "loss": 0.6639,
131
- "pred_label": 7854.15869140625,
132
- "rewards/accuracies": 0.6112499833106995,
133
- "rewards/chosen": 0.013030249625444412,
134
- "rewards/margins": 0.026329634711146355,
135
- "rewards/rejected": -0.013299385085701942,
136
  "step": 700,
137
- "use_label": 12947.8408203125
138
  },
139
  {
140
  "epoch": 0.84,
141
  "learning_rate": 9.022118742724097e-08,
142
- "logits/chosen": -2.832928419113159,
143
- "logits/rejected": -2.8318238258361816,
144
- "logps/chosen": -285.62213134765625,
145
- "logps/rejected": -259.8959045410156,
146
- "loss": 0.662,
147
- "pred_label": 9441.02734375,
148
- "rewards/accuracies": 0.6253125071525574,
149
- "rewards/chosen": 0.01489347591996193,
150
- "rewards/margins": 0.030955424532294273,
151
- "rewards/rejected": -0.016061950474977493,
152
  "step": 800,
153
- "use_label": 14560.97265625
154
  },
155
  {
156
  "epoch": 0.94,
157
  "learning_rate": 3.20139697322468e-08,
158
- "logits/chosen": -2.824517250061035,
159
- "logits/rejected": -2.830662250518799,
160
- "logps/chosen": -276.052490234375,
161
- "logps/rejected": -262.5692138671875,
162
- "loss": 0.6628,
163
- "pred_label": 10994.255859375,
164
- "rewards/accuracies": 0.6143749952316284,
165
- "rewards/chosen": 0.012237527407705784,
166
- "rewards/margins": 0.026263901963829994,
167
- "rewards/rejected": -0.01402637455612421,
168
  "step": 900,
169
- "use_label": 16207.744140625
170
  },
171
  {
172
  "epoch": 1.0,
173
- "eval_logits/chosen": -2.841262102127075,
174
- "eval_logits/rejected": -2.8343887329101562,
175
- "eval_logps/chosen": -281.85919189453125,
176
- "eval_logps/rejected": -262.4202880859375,
177
- "eval_loss": 0.6618225574493408,
178
- "eval_pred_label": 12855.98046875,
179
- "eval_rewards/accuracies": 0.6150000095367432,
180
- "eval_rewards/chosen": 0.011613711714744568,
181
- "eval_rewards/margins": 0.02489962987601757,
182
- "eval_rewards/rejected": -0.013285920023918152,
183
- "eval_runtime": 826.7995,
184
- "eval_samples_per_second": 2.419,
185
- "eval_steps_per_second": 0.302,
186
- "eval_use_label": 18206.01953125,
187
  "step": 955
188
  },
189
  {
190
  "epoch": 1.0,
191
  "step": 955,
192
  "total_flos": 0.0,
193
- "train_loss": 0.6705795382954063,
194
- "train_runtime": 45840.6595,
195
- "train_samples_per_second": 1.334,
196
- "train_steps_per_second": 0.021
197
  }
198
  ],
199
  "logging_steps": 100,
200
  "max_steps": 955,
201
  "num_train_epochs": 1,
202
- "save_steps": 10,
203
  "total_flos": 0.0,
204
  "trial_name": null,
205
  "trial_params": null
 
11
  {
12
  "epoch": 0.0,
13
  "learning_rate": 5.208333333333333e-09,
14
+ "logits/chosen": -2.980285167694092,
15
+ "logits/rejected": -2.87275767326355,
16
+ "logps/chosen": -313.4390563964844,
17
+ "logps/rejected": -236.1754150390625,
18
  "loss": 0.6931,
19
  "pred_label": 0.0,
20
  "rewards/accuracies": 0.0,
 
22
  "rewards/margins": 0.0,
23
  "rewards/rejected": 0.0,
24
  "step": 1,
25
+ "use_label": 0.0
26
  },
27
  {
28
  "epoch": 0.1,
29
  "learning_rate": 4.976717112922002e-07,
30
+ "logits/chosen": -2.8195502758026123,
31
+ "logits/rejected": -2.828876495361328,
32
+ "logps/chosen": -285.2825012207031,
33
+ "logps/rejected": -270.9394836425781,
34
+ "loss": 0.6931,
35
+ "pred_label": 0.0,
36
+ "rewards/accuracies": 0.4873737394809723,
37
+ "rewards/chosen": -0.0001882972428575158,
38
+ "rewards/margins": -0.0012037099804729223,
39
+ "rewards/rejected": 0.0010154128540307283,
40
  "step": 100,
41
+ "use_label": 0.0
42
  },
43
  {
44
  "epoch": 0.21,
45
  "learning_rate": 4.3946449359720607e-07,
46
+ "logits/chosen": -2.8279786109924316,
47
+ "logits/rejected": -2.8164660930633545,
48
+ "logps/chosen": -278.72576904296875,
49
+ "logps/rejected": -260.5760498046875,
50
+ "loss": 0.6901,
51
+ "pred_label": 0.0,
52
+ "rewards/accuracies": 0.5506250262260437,
53
+ "rewards/chosen": 0.0056201983243227005,
54
+ "rewards/margins": 0.009352817200124264,
55
+ "rewards/rejected": -0.003732620272785425,
56
  "step": 200,
57
+ "use_label": 0.0
58
  },
59
  {
60
  "epoch": 0.31,
61
  "learning_rate": 3.812572759022118e-07,
62
+ "logits/chosen": -2.8140082359313965,
63
+ "logits/rejected": -2.8158328533172607,
64
+ "logps/chosen": -284.0040283203125,
65
+ "logps/rejected": -253.93580627441406,
66
+ "loss": 0.6875,
67
+ "pred_label": 0.0,
68
+ "rewards/accuracies": 0.5793750286102295,
69
+ "rewards/chosen": 0.006361996755003929,
70
+ "rewards/margins": 0.012689676135778427,
71
+ "rewards/rejected": -0.006327680312097073,
72
  "step": 300,
73
+ "use_label": 0.0
74
  },
75
  {
76
  "epoch": 0.42,
77
  "learning_rate": 3.230500582072177e-07,
78
+ "logits/chosen": -2.826840877532959,
79
+ "logits/rejected": -2.8091540336608887,
80
+ "logps/chosen": -284.3250427246094,
81
+ "logps/rejected": -262.8480529785156,
82
+ "loss": 0.6842,
83
+ "pred_label": 0.0,
84
+ "rewards/accuracies": 0.6031249761581421,
85
+ "rewards/chosen": 0.010542460717260838,
86
+ "rewards/margins": 0.02265419438481331,
87
+ "rewards/rejected": -0.012111731804907322,
88
  "step": 400,
89
+ "use_label": 0.0
90
  },
91
  {
92
  "epoch": 0.52,
93
  "learning_rate": 2.648428405122235e-07,
94
+ "logits/chosen": -2.8074240684509277,
95
+ "logits/rejected": -2.795992136001587,
96
+ "logps/chosen": -269.97344970703125,
97
+ "logps/rejected": -252.08152770996094,
98
+ "loss": 0.6821,
99
+ "pred_label": 0.0,
100
+ "rewards/accuracies": 0.5874999761581421,
101
+ "rewards/chosen": 0.007951202802360058,
102
+ "rewards/margins": 0.017641538754105568,
103
+ "rewards/rejected": -0.00969033595174551,
104
  "step": 500,
105
+ "use_label": 0.0
106
  },
107
  {
108
  "epoch": 0.63,
109
  "learning_rate": 2.0663562281722933e-07,
110
+ "logits/chosen": -2.8337247371673584,
111
+ "logits/rejected": -2.8203465938568115,
112
+ "logps/chosen": -285.0716857910156,
113
+ "logps/rejected": -265.4593811035156,
114
+ "loss": 0.6782,
115
+ "pred_label": 0.0,
116
+ "rewards/accuracies": 0.6356250047683716,
117
+ "rewards/chosen": 0.015922056511044502,
118
+ "rewards/margins": 0.035635706037282944,
119
+ "rewards/rejected": -0.01971365138888359,
120
  "step": 600,
121
+ "use_label": 0.0
122
  },
123
  {
124
  "epoch": 0.73,
125
  "learning_rate": 1.4842840512223514e-07,
126
+ "logits/chosen": -2.8270840644836426,
127
+ "logits/rejected": -2.8112306594848633,
128
+ "logps/chosen": -281.3733215332031,
129
+ "logps/rejected": -248.8465576171875,
130
+ "loss": 0.6762,
131
+ "pred_label": 0.0,
132
+ "rewards/accuracies": 0.6324999928474426,
133
+ "rewards/chosen": 0.018638433888554573,
134
+ "rewards/margins": 0.03718380257487297,
135
+ "rewards/rejected": -0.018545370548963547,
136
  "step": 700,
137
+ "use_label": 0.0
138
  },
139
  {
140
  "epoch": 0.84,
141
  "learning_rate": 9.022118742724097e-08,
142
+ "logits/chosen": -2.8297488689422607,
143
+ "logits/rejected": -2.83492374420166,
144
+ "logps/chosen": -288.54058837890625,
145
+ "logps/rejected": -261.6487731933594,
146
+ "loss": 0.6736,
147
+ "pred_label": 0.0,
148
+ "rewards/accuracies": 0.6393749713897705,
149
+ "rewards/chosen": 0.021646475419402122,
150
+ "rewards/margins": 0.04531754553318024,
151
+ "rewards/rejected": -0.023671068251132965,
152
  "step": 800,
153
+ "use_label": 0.0
154
  },
155
  {
156
  "epoch": 0.94,
157
  "learning_rate": 3.20139697322468e-08,
158
+ "logits/chosen": -2.8206725120544434,
159
+ "logits/rejected": -2.8274974822998047,
160
+ "logps/chosen": -277.310791015625,
161
+ "logps/rejected": -256.5542297363281,
162
+ "loss": 0.6744,
163
+ "pred_label": 0.0,
164
+ "rewards/accuracies": 0.6443750262260437,
165
+ "rewards/chosen": 0.019743308424949646,
166
+ "rewards/margins": 0.041582074016332626,
167
+ "rewards/rejected": -0.021838760003447533,
168
  "step": 900,
169
+ "use_label": 0.0
170
  },
171
  {
172
  "epoch": 1.0,
173
+ "eval_logits/chosen": -2.8419151306152344,
174
+ "eval_logits/rejected": -2.845423936843872,
175
+ "eval_logps/chosen": -284.0372314453125,
176
+ "eval_logps/rejected": -259.5419921875,
177
+ "eval_loss": 0.6741092801094055,
178
+ "eval_pred_label": 0.0,
179
+ "eval_rewards/accuracies": 0.6679999828338623,
180
+ "eval_rewards/chosen": 0.02201448194682598,
181
+ "eval_rewards/margins": 0.04624143987894058,
182
+ "eval_rewards/rejected": -0.02422695979475975,
183
+ "eval_runtime": 469.0597,
184
+ "eval_samples_per_second": 4.264,
185
+ "eval_steps_per_second": 0.266,
186
+ "eval_use_label": 0.0,
187
  "step": 955
188
  },
189
  {
190
  "epoch": 1.0,
191
  "step": 955,
192
  "total_flos": 0.0,
193
+ "train_loss": 0.6817296707193264,
194
+ "train_runtime": 25631.2708,
195
+ "train_samples_per_second": 2.385,
196
+ "train_steps_per_second": 0.037
197
  }
198
  ],
199
  "logging_steps": 100,
200
  "max_steps": 955,
201
  "num_train_epochs": 1,
202
+ "save_steps": 100,
203
  "total_flos": 0.0,
204
  "trial_name": null,
205
  "trial_params": null