File size: 47,415 Bytes
cf31064
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.9997382884061764,
  "eval_steps": 100,
  "global_step": 955,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.0,
      "learning_rate": 5.208333333333333e-09,
      "logits/chosen": -2.570180892944336,
      "logits/rejected": -2.5666794776916504,
      "logps/chosen": -302.8643798828125,
      "logps/rejected": -232.7855682373047,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.01,
      "learning_rate": 5.208333333333333e-08,
      "logits/chosen": -2.547344207763672,
      "logits/rejected": -2.532263994216919,
      "logps/chosen": -294.40087890625,
      "logps/rejected": -276.11126708984375,
      "loss": 0.6879,
      "rewards/accuracies": 0.4340277910232544,
      "rewards/chosen": 0.0004416291194502264,
      "rewards/margins": -0.0003637511981651187,
      "rewards/rejected": 0.000805380754172802,
      "step": 10
    },
    {
      "epoch": 0.02,
      "learning_rate": 1.0416666666666667e-07,
      "logits/chosen": -2.3985350131988525,
      "logits/rejected": -2.426626682281494,
      "logps/chosen": -244.53689575195312,
      "logps/rejected": -248.93624877929688,
      "loss": 0.6882,
      "rewards/accuracies": 0.49687498807907104,
      "rewards/chosen": -0.002964673563838005,
      "rewards/margins": 0.00020738500461447984,
      "rewards/rejected": -0.0031720586121082306,
      "step": 20
    },
    {
      "epoch": 0.03,
      "learning_rate": 1.5624999999999999e-07,
      "logits/chosen": -2.5088179111480713,
      "logits/rejected": -2.4950203895568848,
      "logps/chosen": -265.425537109375,
      "logps/rejected": -244.62191772460938,
      "loss": 0.6868,
      "rewards/accuracies": 0.44999998807907104,
      "rewards/chosen": -0.003862012643367052,
      "rewards/margins": -0.0030291248112916946,
      "rewards/rejected": -0.0008328882977366447,
      "step": 30
    },
    {
      "epoch": 0.04,
      "learning_rate": 2.0833333333333333e-07,
      "logits/chosen": -2.472740888595581,
      "logits/rejected": -2.498608112335205,
      "logps/chosen": -264.92767333984375,
      "logps/rejected": -260.81536865234375,
      "loss": 0.6883,
      "rewards/accuracies": 0.4937500059604645,
      "rewards/chosen": 0.002251622499898076,
      "rewards/margins": 0.002210150007158518,
      "rewards/rejected": 4.1472725570201874e-05,
      "step": 40
    },
    {
      "epoch": 0.05,
      "learning_rate": 2.604166666666667e-07,
      "logits/chosen": -2.511932611465454,
      "logits/rejected": -2.4823100566864014,
      "logps/chosen": -255.1762237548828,
      "logps/rejected": -249.55859375,
      "loss": 0.6856,
      "rewards/accuracies": 0.5406249761581421,
      "rewards/chosen": 0.0019304720917716622,
      "rewards/margins": 0.0034704413264989853,
      "rewards/rejected": -0.0015399687690660357,
      "step": 50
    },
    {
      "epoch": 0.06,
      "learning_rate": 3.1249999999999997e-07,
      "logits/chosen": -2.4945309162139893,
      "logits/rejected": -2.5101318359375,
      "logps/chosen": -288.4325256347656,
      "logps/rejected": -261.18310546875,
      "loss": 0.6867,
      "rewards/accuracies": 0.503125011920929,
      "rewards/chosen": -0.002425801707431674,
      "rewards/margins": 0.002398231066763401,
      "rewards/rejected": -0.004824032541364431,
      "step": 60
    },
    {
      "epoch": 0.07,
      "learning_rate": 3.645833333333333e-07,
      "logits/chosen": -2.521496534347534,
      "logits/rejected": -2.506330966949463,
      "logps/chosen": -275.6085510253906,
      "logps/rejected": -259.58392333984375,
      "loss": 0.6867,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": 0.0025966810062527657,
      "rewards/margins": 0.007796707563102245,
      "rewards/rejected": -0.005200026091188192,
      "step": 70
    },
    {
      "epoch": 0.08,
      "learning_rate": 4.1666666666666667e-07,
      "logits/chosen": -2.456026315689087,
      "logits/rejected": -2.4563703536987305,
      "logps/chosen": -295.45941162109375,
      "logps/rejected": -263.0380859375,
      "loss": 0.6859,
      "rewards/accuracies": 0.4593749940395355,
      "rewards/chosen": -2.993037924170494e-05,
      "rewards/margins": -0.000812772021163255,
      "rewards/rejected": 0.0007828413508832455,
      "step": 80
    },
    {
      "epoch": 0.09,
      "learning_rate": 4.6874999999999996e-07,
      "logits/chosen": -2.4610087871551514,
      "logits/rejected": -2.4633374214172363,
      "logps/chosen": -282.82684326171875,
      "logps/rejected": -245.01785278320312,
      "loss": 0.6867,
      "rewards/accuracies": 0.4906249940395355,
      "rewards/chosen": -0.003978222608566284,
      "rewards/margins": -0.003358404617756605,
      "rewards/rejected": -0.0006198181654326618,
      "step": 90
    },
    {
      "epoch": 0.1,
      "learning_rate": 4.976717112922002e-07,
      "logits/chosen": -2.468822479248047,
      "logits/rejected": -2.4451956748962402,
      "logps/chosen": -264.9376525878906,
      "logps/rejected": -259.81182861328125,
      "loss": 0.6865,
      "rewards/accuracies": 0.528124988079071,
      "rewards/chosen": 0.004453591071069241,
      "rewards/margins": 0.004475563298910856,
      "rewards/rejected": -2.197279900428839e-05,
      "step": 100
    },
    {
      "epoch": 0.12,
      "learning_rate": 4.918509895227007e-07,
      "logits/chosen": -2.487288236618042,
      "logits/rejected": -2.4807934761047363,
      "logps/chosen": -263.60906982421875,
      "logps/rejected": -244.46047973632812,
      "loss": 0.6852,
      "rewards/accuracies": 0.534375011920929,
      "rewards/chosen": 0.00421065092086792,
      "rewards/margins": 0.007397785782814026,
      "rewards/rejected": -0.003187134861946106,
      "step": 110
    },
    {
      "epoch": 0.13,
      "learning_rate": 4.860302677532014e-07,
      "logits/chosen": -2.4685282707214355,
      "logits/rejected": -2.497313976287842,
      "logps/chosen": -277.75543212890625,
      "logps/rejected": -245.1427764892578,
      "loss": 0.6852,
      "rewards/accuracies": 0.5531250238418579,
      "rewards/chosen": 0.0019862186163663864,
      "rewards/margins": 0.0036893251817673445,
      "rewards/rejected": -0.0017031064489856362,
      "step": 120
    },
    {
      "epoch": 0.14,
      "learning_rate": 4.802095459837019e-07,
      "logits/chosen": -2.512012481689453,
      "logits/rejected": -2.498793840408325,
      "logps/chosen": -283.26959228515625,
      "logps/rejected": -265.85369873046875,
      "loss": 0.6838,
      "rewards/accuracies": 0.534375011920929,
      "rewards/chosen": 0.006660536862909794,
      "rewards/margins": 0.007559786085039377,
      "rewards/rejected": -0.0008992499788291752,
      "step": 130
    },
    {
      "epoch": 0.15,
      "learning_rate": 4.743888242142026e-07,
      "logits/chosen": -2.471496105194092,
      "logits/rejected": -2.4844748973846436,
      "logps/chosen": -255.3807373046875,
      "logps/rejected": -242.02658081054688,
      "loss": 0.6857,
      "rewards/accuracies": 0.5062500238418579,
      "rewards/chosen": 0.0008136004325933754,
      "rewards/margins": 4.014703517896123e-05,
      "rewards/rejected": 0.0007734533282928169,
      "step": 140
    },
    {
      "epoch": 0.16,
      "learning_rate": 4.685681024447031e-07,
      "logits/chosen": -2.471660614013672,
      "logits/rejected": -2.463984966278076,
      "logps/chosen": -287.92523193359375,
      "logps/rejected": -260.02313232421875,
      "loss": 0.6837,
      "rewards/accuracies": 0.518750011920929,
      "rewards/chosen": 0.00581919914111495,
      "rewards/margins": 0.008211213164031506,
      "rewards/rejected": -0.0023920147214084864,
      "step": 150
    },
    {
      "epoch": 0.17,
      "learning_rate": 4.627473806752037e-07,
      "logits/chosen": -2.520613193511963,
      "logits/rejected": -2.4651389122009277,
      "logps/chosen": -279.1468505859375,
      "logps/rejected": -264.69915771484375,
      "loss": 0.682,
      "rewards/accuracies": 0.5531250238418579,
      "rewards/chosen": 0.008319775573909283,
      "rewards/margins": 0.011747308075428009,
      "rewards/rejected": -0.003427532035857439,
      "step": 160
    },
    {
      "epoch": 0.18,
      "learning_rate": 4.5692665890570433e-07,
      "logits/chosen": -2.5029516220092773,
      "logits/rejected": -2.496436595916748,
      "logps/chosen": -279.58843994140625,
      "logps/rejected": -243.44253540039062,
      "loss": 0.6817,
      "rewards/accuracies": 0.5874999761581421,
      "rewards/chosen": 0.008326916955411434,
      "rewards/margins": 0.014212583191692829,
      "rewards/rejected": -0.00588566530495882,
      "step": 170
    },
    {
      "epoch": 0.19,
      "learning_rate": 4.5110593713620486e-07,
      "logits/chosen": -2.4697937965393066,
      "logits/rejected": -2.4516141414642334,
      "logps/chosen": -236.89675903320312,
      "logps/rejected": -253.85598754882812,
      "loss": 0.6832,
      "rewards/accuracies": 0.5531250238418579,
      "rewards/chosen": 0.00583054032176733,
      "rewards/margins": 0.012070106342434883,
      "rewards/rejected": -0.00623956648632884,
      "step": 180
    },
    {
      "epoch": 0.2,
      "learning_rate": 4.4528521536670544e-07,
      "logits/chosen": -2.5360610485076904,
      "logits/rejected": -2.505577802658081,
      "logps/chosen": -291.94586181640625,
      "logps/rejected": -260.87054443359375,
      "loss": 0.6801,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": 0.011800072155892849,
      "rewards/margins": 0.02227787859737873,
      "rewards/rejected": -0.010477803647518158,
      "step": 190
    },
    {
      "epoch": 0.21,
      "learning_rate": 4.3946449359720607e-07,
      "logits/chosen": -2.4782238006591797,
      "logits/rejected": -2.508026599884033,
      "logps/chosen": -258.73895263671875,
      "logps/rejected": -252.0211639404297,
      "loss": 0.6809,
      "rewards/accuracies": 0.6031249761581421,
      "rewards/chosen": 0.009132781066000462,
      "rewards/margins": 0.0161266028881073,
      "rewards/rejected": -0.006993822753429413,
      "step": 200
    },
    {
      "epoch": 0.22,
      "learning_rate": 4.336437718277066e-07,
      "logits/chosen": -2.4700803756713867,
      "logits/rejected": -2.479269504547119,
      "logps/chosen": -272.45904541015625,
      "logps/rejected": -256.2876281738281,
      "loss": 0.6783,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": 0.016153430566191673,
      "rewards/margins": 0.023166943341493607,
      "rewards/rejected": -0.007013511843979359,
      "step": 210
    },
    {
      "epoch": 0.23,
      "learning_rate": 4.278230500582072e-07,
      "logits/chosen": -2.4889426231384277,
      "logits/rejected": -2.5127673149108887,
      "logps/chosen": -267.21966552734375,
      "logps/rejected": -258.29205322265625,
      "loss": 0.6793,
      "rewards/accuracies": 0.6031249761581421,
      "rewards/chosen": 0.005786339286714792,
      "rewards/margins": 0.01781514100730419,
      "rewards/rejected": -0.012028800323605537,
      "step": 220
    },
    {
      "epoch": 0.24,
      "learning_rate": 4.220023282887078e-07,
      "logits/chosen": -2.4950485229492188,
      "logits/rejected": -2.4980530738830566,
      "logps/chosen": -295.85028076171875,
      "logps/rejected": -259.36529541015625,
      "loss": 0.6777,
      "rewards/accuracies": 0.596875011920929,
      "rewards/chosen": 0.007827522233128548,
      "rewards/margins": 0.014727133326232433,
      "rewards/rejected": -0.006899611093103886,
      "step": 230
    },
    {
      "epoch": 0.25,
      "learning_rate": 4.1618160651920834e-07,
      "logits/chosen": -2.466526508331299,
      "logits/rejected": -2.4315407276153564,
      "logps/chosen": -282.66192626953125,
      "logps/rejected": -248.1639862060547,
      "loss": 0.6778,
      "rewards/accuracies": 0.5687500238418579,
      "rewards/chosen": 0.01533576101064682,
      "rewards/margins": 0.017889009788632393,
      "rewards/rejected": -0.0025532490108162165,
      "step": 240
    },
    {
      "epoch": 0.26,
      "learning_rate": 4.103608847497089e-07,
      "logits/chosen": -2.4848718643188477,
      "logits/rejected": -2.443408250808716,
      "logps/chosen": -288.2550964355469,
      "logps/rejected": -271.9901123046875,
      "loss": 0.6761,
      "rewards/accuracies": 0.621874988079071,
      "rewards/chosen": 0.013339035212993622,
      "rewards/margins": 0.023220960050821304,
      "rewards/rejected": -0.009881924837827682,
      "step": 250
    },
    {
      "epoch": 0.27,
      "learning_rate": 4.0454016298020956e-07,
      "logits/chosen": -2.4762511253356934,
      "logits/rejected": -2.446619749069214,
      "logps/chosen": -268.5465393066406,
      "logps/rejected": -251.9985809326172,
      "loss": 0.6763,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": 0.006427633110433817,
      "rewards/margins": 0.015916740521788597,
      "rewards/rejected": -0.009489107877016068,
      "step": 260
    },
    {
      "epoch": 0.28,
      "learning_rate": 3.987194412107101e-07,
      "logits/chosen": -2.4372754096984863,
      "logits/rejected": -2.450491189956665,
      "logps/chosen": -263.63262939453125,
      "logps/rejected": -252.0638427734375,
      "loss": 0.6735,
      "rewards/accuracies": 0.6312500238418579,
      "rewards/chosen": 0.013371949084103107,
      "rewards/margins": 0.023976340889930725,
      "rewards/rejected": -0.010604391805827618,
      "step": 270
    },
    {
      "epoch": 0.29,
      "learning_rate": 3.9289871944121066e-07,
      "logits/chosen": -2.5451080799102783,
      "logits/rejected": -2.516885995864868,
      "logps/chosen": -282.615234375,
      "logps/rejected": -242.2822265625,
      "loss": 0.6722,
      "rewards/accuracies": 0.640625,
      "rewards/chosen": 0.0192705187946558,
      "rewards/margins": 0.029350727796554565,
      "rewards/rejected": -0.010080209001898766,
      "step": 280
    },
    {
      "epoch": 0.3,
      "learning_rate": 3.870779976717113e-07,
      "logits/chosen": -2.4943687915802,
      "logits/rejected": -2.5217864513397217,
      "logps/chosen": -277.5215759277344,
      "logps/rejected": -229.96298217773438,
      "loss": 0.6722,
      "rewards/accuracies": 0.6187499761581421,
      "rewards/chosen": 0.015035443007946014,
      "rewards/margins": 0.024382654577493668,
      "rewards/rejected": -0.009347210638225079,
      "step": 290
    },
    {
      "epoch": 0.31,
      "learning_rate": 3.812572759022118e-07,
      "logits/chosen": -2.49491810798645,
      "logits/rejected": -2.501307249069214,
      "logps/chosen": -281.02484130859375,
      "logps/rejected": -237.4232940673828,
      "loss": 0.6726,
      "rewards/accuracies": 0.628125011920929,
      "rewards/chosen": 0.018051721155643463,
      "rewards/margins": 0.032371118664741516,
      "rewards/rejected": -0.014319395646452904,
      "step": 300
    },
    {
      "epoch": 0.32,
      "learning_rate": 3.754365541327124e-07,
      "logits/chosen": -2.4540202617645264,
      "logits/rejected": -2.435542583465576,
      "logps/chosen": -258.79193115234375,
      "logps/rejected": -253.71694946289062,
      "loss": 0.673,
      "rewards/accuracies": 0.6468750238418579,
      "rewards/chosen": 0.015593824908137321,
      "rewards/margins": 0.0299003217369318,
      "rewards/rejected": -0.014306495897471905,
      "step": 310
    },
    {
      "epoch": 0.33,
      "learning_rate": 3.6961583236321304e-07,
      "logits/chosen": -2.488478422164917,
      "logits/rejected": -2.471566677093506,
      "logps/chosen": -287.9583435058594,
      "logps/rejected": -256.38427734375,
      "loss": 0.6681,
      "rewards/accuracies": 0.653124988079071,
      "rewards/chosen": 0.01864878088235855,
      "rewards/margins": 0.03612912446260452,
      "rewards/rejected": -0.01748034544289112,
      "step": 320
    },
    {
      "epoch": 0.35,
      "learning_rate": 3.637951105937136e-07,
      "logits/chosen": -2.469587802886963,
      "logits/rejected": -2.4525370597839355,
      "logps/chosen": -249.15280151367188,
      "logps/rejected": -235.3613739013672,
      "loss": 0.6664,
      "rewards/accuracies": 0.6187499761581421,
      "rewards/chosen": 0.013197916559875011,
      "rewards/margins": 0.030393391847610474,
      "rewards/rejected": -0.01719547249376774,
      "step": 330
    },
    {
      "epoch": 0.36,
      "learning_rate": 3.579743888242142e-07,
      "logits/chosen": -2.486199378967285,
      "logits/rejected": -2.491894006729126,
      "logps/chosen": -279.8724060058594,
      "logps/rejected": -249.26123046875,
      "loss": 0.6691,
      "rewards/accuracies": 0.59375,
      "rewards/chosen": 0.016388490796089172,
      "rewards/margins": 0.02631198987364769,
      "rewards/rejected": -0.009923500940203667,
      "step": 340
    },
    {
      "epoch": 0.37,
      "learning_rate": 3.521536670547148e-07,
      "logits/chosen": -2.501917839050293,
      "logits/rejected": -2.500124216079712,
      "logps/chosen": -277.4295349121094,
      "logps/rejected": -261.6769104003906,
      "loss": 0.665,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": 0.019429894164204597,
      "rewards/margins": 0.030850976705551147,
      "rewards/rejected": -0.011421086266636848,
      "step": 350
    },
    {
      "epoch": 0.38,
      "learning_rate": 3.4633294528521536e-07,
      "logits/chosen": -2.5457377433776855,
      "logits/rejected": -2.5352020263671875,
      "logps/chosen": -280.8336486816406,
      "logps/rejected": -259.36895751953125,
      "loss": 0.6614,
      "rewards/accuracies": 0.6156250238418579,
      "rewards/chosen": 0.01814255118370056,
      "rewards/margins": 0.03848281502723694,
      "rewards/rejected": -0.02034026011824608,
      "step": 360
    },
    {
      "epoch": 0.39,
      "learning_rate": 3.4051222351571594e-07,
      "logits/chosen": -2.5180299282073975,
      "logits/rejected": -2.4479191303253174,
      "logps/chosen": -274.1128845214844,
      "logps/rejected": -259.3582763671875,
      "loss": 0.6664,
      "rewards/accuracies": 0.643750011920929,
      "rewards/chosen": 0.023864779621362686,
      "rewards/margins": 0.041657593101263046,
      "rewards/rejected": -0.01779281720519066,
      "step": 370
    },
    {
      "epoch": 0.4,
      "learning_rate": 3.346915017462165e-07,
      "logits/chosen": -2.504103899002075,
      "logits/rejected": -2.4852070808410645,
      "logps/chosen": -269.26409912109375,
      "logps/rejected": -253.5343780517578,
      "loss": 0.663,
      "rewards/accuracies": 0.640625,
      "rewards/chosen": 0.021114524453878403,
      "rewards/margins": 0.042601972818374634,
      "rewards/rejected": -0.021487446501851082,
      "step": 380
    },
    {
      "epoch": 0.41,
      "learning_rate": 3.288707799767171e-07,
      "logits/chosen": -2.4515504837036133,
      "logits/rejected": -2.447246551513672,
      "logps/chosen": -259.05401611328125,
      "logps/rejected": -240.0911407470703,
      "loss": 0.663,
      "rewards/accuracies": 0.628125011920929,
      "rewards/chosen": 0.014576256275177002,
      "rewards/margins": 0.04004546254873276,
      "rewards/rejected": -0.025469202548265457,
      "step": 390
    },
    {
      "epoch": 0.42,
      "learning_rate": 3.230500582072177e-07,
      "logits/chosen": -2.509997844696045,
      "logits/rejected": -2.522238254547119,
      "logps/chosen": -282.1490478515625,
      "logps/rejected": -243.8563690185547,
      "loss": 0.6624,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": 0.025715211406350136,
      "rewards/margins": 0.043169617652893066,
      "rewards/rejected": -0.01745440624654293,
      "step": 400
    },
    {
      "epoch": 0.43,
      "learning_rate": 3.1722933643771827e-07,
      "logits/chosen": -2.498192548751831,
      "logits/rejected": -2.4921789169311523,
      "logps/chosen": -271.2725830078125,
      "logps/rejected": -244.8346710205078,
      "loss": 0.6594,
      "rewards/accuracies": 0.6468750238418579,
      "rewards/chosen": 0.02634851261973381,
      "rewards/margins": 0.0405191034078598,
      "rewards/rejected": -0.014170585200190544,
      "step": 410
    },
    {
      "epoch": 0.44,
      "learning_rate": 3.1140861466821885e-07,
      "logits/chosen": -2.4811172485351562,
      "logits/rejected": -2.4510648250579834,
      "logps/chosen": -253.666015625,
      "logps/rejected": -243.52877807617188,
      "loss": 0.6617,
      "rewards/accuracies": 0.628125011920929,
      "rewards/chosen": 0.024721184745430946,
      "rewards/margins": 0.03779374435544014,
      "rewards/rejected": -0.013072559610009193,
      "step": 420
    },
    {
      "epoch": 0.45,
      "learning_rate": 3.0558789289871943e-07,
      "logits/chosen": -2.5075089931488037,
      "logits/rejected": -2.4863505363464355,
      "logps/chosen": -279.38818359375,
      "logps/rejected": -254.1600799560547,
      "loss": 0.6609,
      "rewards/accuracies": 0.596875011920929,
      "rewards/chosen": 0.02350917086005211,
      "rewards/margins": 0.03294364735484123,
      "rewards/rejected": -0.009434476494789124,
      "step": 430
    },
    {
      "epoch": 0.46,
      "learning_rate": 2.9976717112922e-07,
      "logits/chosen": -2.5201539993286133,
      "logits/rejected": -2.511667013168335,
      "logps/chosen": -272.9944152832031,
      "logps/rejected": -233.72854614257812,
      "loss": 0.6589,
      "rewards/accuracies": 0.581250011920929,
      "rewards/chosen": 0.020865267142653465,
      "rewards/margins": 0.04071135073900223,
      "rewards/rejected": -0.019846081733703613,
      "step": 440
    },
    {
      "epoch": 0.47,
      "learning_rate": 2.939464493597206e-07,
      "logits/chosen": -2.4186248779296875,
      "logits/rejected": -2.3882439136505127,
      "logps/chosen": -244.801025390625,
      "logps/rejected": -241.0476837158203,
      "loss": 0.6588,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": 0.019629117101430893,
      "rewards/margins": 0.037906430661678314,
      "rewards/rejected": -0.01827731356024742,
      "step": 450
    },
    {
      "epoch": 0.48,
      "learning_rate": 2.8812572759022117e-07,
      "logits/chosen": -2.5232837200164795,
      "logits/rejected": -2.5000243186950684,
      "logps/chosen": -294.4573669433594,
      "logps/rejected": -256.6082458496094,
      "loss": 0.6559,
      "rewards/accuracies": 0.6156250238418579,
      "rewards/chosen": 0.02450462244451046,
      "rewards/margins": 0.0395827516913414,
      "rewards/rejected": -0.015078130178153515,
      "step": 460
    },
    {
      "epoch": 0.49,
      "learning_rate": 2.8230500582072175e-07,
      "logits/chosen": -2.4724531173706055,
      "logits/rejected": -2.4754977226257324,
      "logps/chosen": -283.3002624511719,
      "logps/rejected": -251.4059295654297,
      "loss": 0.6557,
      "rewards/accuracies": 0.65625,
      "rewards/chosen": 0.024192675948143005,
      "rewards/margins": 0.05236636474728584,
      "rewards/rejected": -0.028173688799142838,
      "step": 470
    },
    {
      "epoch": 0.5,
      "learning_rate": 2.7648428405122233e-07,
      "logits/chosen": -2.410810947418213,
      "logits/rejected": -2.4008870124816895,
      "logps/chosen": -242.271484375,
      "logps/rejected": -250.8043212890625,
      "loss": 0.6593,
      "rewards/accuracies": 0.6031249761581421,
      "rewards/chosen": 0.012235969305038452,
      "rewards/margins": 0.02636186219751835,
      "rewards/rejected": -0.01412589568644762,
      "step": 480
    },
    {
      "epoch": 0.51,
      "learning_rate": 2.706635622817229e-07,
      "logits/chosen": -2.4645464420318604,
      "logits/rejected": -2.469316244125366,
      "logps/chosen": -272.09796142578125,
      "logps/rejected": -238.6561737060547,
      "loss": 0.6533,
      "rewards/accuracies": 0.671875,
      "rewards/chosen": 0.024095356464385986,
      "rewards/margins": 0.05057717487215996,
      "rewards/rejected": -0.02648181840777397,
      "step": 490
    },
    {
      "epoch": 0.52,
      "learning_rate": 2.648428405122235e-07,
      "logits/chosen": -2.508164167404175,
      "logits/rejected": -2.4578819274902344,
      "logps/chosen": -277.1492004394531,
      "logps/rejected": -256.0687561035156,
      "loss": 0.6531,
      "rewards/accuracies": 0.640625,
      "rewards/chosen": 0.03192506358027458,
      "rewards/margins": 0.05114533379673958,
      "rewards/rejected": -0.019220268353819847,
      "step": 500
    },
    {
      "epoch": 0.53,
      "learning_rate": 2.590221187427241e-07,
      "logits/chosen": -2.5105607509613037,
      "logits/rejected": -2.4753470420837402,
      "logps/chosen": -263.40966796875,
      "logps/rejected": -244.8257598876953,
      "loss": 0.6522,
      "rewards/accuracies": 0.625,
      "rewards/chosen": 0.027492288500070572,
      "rewards/margins": 0.04991314187645912,
      "rewards/rejected": -0.022420858964323997,
      "step": 510
    },
    {
      "epoch": 0.54,
      "learning_rate": 2.5320139697322466e-07,
      "logits/chosen": -2.5335867404937744,
      "logits/rejected": -2.468331813812256,
      "logps/chosen": -272.64385986328125,
      "logps/rejected": -244.65286254882812,
      "loss": 0.651,
      "rewards/accuracies": 0.65625,
      "rewards/chosen": 0.03846021741628647,
      "rewards/margins": 0.06949218362569809,
      "rewards/rejected": -0.03103196993470192,
      "step": 520
    },
    {
      "epoch": 0.55,
      "learning_rate": 2.4738067520372524e-07,
      "logits/chosen": -2.505664110183716,
      "logits/rejected": -2.512172222137451,
      "logps/chosen": -280.70819091796875,
      "logps/rejected": -255.12228393554688,
      "loss": 0.6487,
      "rewards/accuracies": 0.668749988079071,
      "rewards/chosen": 0.035160940140485764,
      "rewards/margins": 0.06470004469156265,
      "rewards/rejected": -0.029539108276367188,
      "step": 530
    },
    {
      "epoch": 0.57,
      "learning_rate": 2.415599534342258e-07,
      "logits/chosen": -2.5341105461120605,
      "logits/rejected": -2.4813685417175293,
      "logps/chosen": -280.51751708984375,
      "logps/rejected": -252.9778289794922,
      "loss": 0.6471,
      "rewards/accuracies": 0.653124988079071,
      "rewards/chosen": 0.03973756358027458,
      "rewards/margins": 0.06431148201227188,
      "rewards/rejected": -0.02457391656935215,
      "step": 540
    },
    {
      "epoch": 0.58,
      "learning_rate": 2.3573923166472642e-07,
      "logits/chosen": -2.506739377975464,
      "logits/rejected": -2.487623691558838,
      "logps/chosen": -272.10675048828125,
      "logps/rejected": -258.69647216796875,
      "loss": 0.6479,
      "rewards/accuracies": 0.6343749761581421,
      "rewards/chosen": 0.028482938185334206,
      "rewards/margins": 0.057554639875888824,
      "rewards/rejected": -0.02907169796526432,
      "step": 550
    },
    {
      "epoch": 0.59,
      "learning_rate": 2.2991850989522698e-07,
      "logits/chosen": -2.442371129989624,
      "logits/rejected": -2.4590439796447754,
      "logps/chosen": -280.783935546875,
      "logps/rejected": -227.5161895751953,
      "loss": 0.6513,
      "rewards/accuracies": 0.628125011920929,
      "rewards/chosen": 0.02295442670583725,
      "rewards/margins": 0.05334927886724472,
      "rewards/rejected": -0.03039485774934292,
      "step": 560
    },
    {
      "epoch": 0.6,
      "learning_rate": 2.2409778812572759e-07,
      "logits/chosen": -2.470194101333618,
      "logits/rejected": -2.441986560821533,
      "logps/chosen": -255.4265594482422,
      "logps/rejected": -241.7792205810547,
      "loss": 0.6487,
      "rewards/accuracies": 0.6343749761581421,
      "rewards/chosen": 0.024136796593666077,
      "rewards/margins": 0.04882895201444626,
      "rewards/rejected": -0.02469216100871563,
      "step": 570
    },
    {
      "epoch": 0.61,
      "learning_rate": 2.1827706635622817e-07,
      "logits/chosen": -2.5127058029174805,
      "logits/rejected": -2.496788263320923,
      "logps/chosen": -262.003662109375,
      "logps/rejected": -251.4923858642578,
      "loss": 0.6508,
      "rewards/accuracies": 0.6937500238418579,
      "rewards/chosen": 0.025721842423081398,
      "rewards/margins": 0.056790102273225784,
      "rewards/rejected": -0.031068259850144386,
      "step": 580
    },
    {
      "epoch": 0.62,
      "learning_rate": 2.1245634458672875e-07,
      "logits/chosen": -2.4597954750061035,
      "logits/rejected": -2.4486401081085205,
      "logps/chosen": -272.46258544921875,
      "logps/rejected": -250.1220703125,
      "loss": 0.6465,
      "rewards/accuracies": 0.671875,
      "rewards/chosen": 0.03377040475606918,
      "rewards/margins": 0.05980812385678291,
      "rewards/rejected": -0.02603771723806858,
      "step": 590
    },
    {
      "epoch": 0.63,
      "learning_rate": 2.0663562281722933e-07,
      "logits/chosen": -2.468313217163086,
      "logits/rejected": -2.4899404048919678,
      "logps/chosen": -280.9910888671875,
      "logps/rejected": -279.2497863769531,
      "loss": 0.6493,
      "rewards/accuracies": 0.6468750238418579,
      "rewards/chosen": 0.03328691050410271,
      "rewards/margins": 0.05028299614787102,
      "rewards/rejected": -0.016996093094348907,
      "step": 600
    },
    {
      "epoch": 0.64,
      "learning_rate": 2.008149010477299e-07,
      "logits/chosen": -2.515079975128174,
      "logits/rejected": -2.471588611602783,
      "logps/chosen": -284.3990478515625,
      "logps/rejected": -246.21835327148438,
      "loss": 0.6456,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": 0.03540956228971481,
      "rewards/margins": 0.062371380627155304,
      "rewards/rejected": -0.026961814612150192,
      "step": 610
    },
    {
      "epoch": 0.65,
      "learning_rate": 1.949941792782305e-07,
      "logits/chosen": -2.5138742923736572,
      "logits/rejected": -2.451676845550537,
      "logps/chosen": -272.3465881347656,
      "logps/rejected": -240.0901641845703,
      "loss": 0.6492,
      "rewards/accuracies": 0.6343749761581421,
      "rewards/chosen": 0.03495832532644272,
      "rewards/margins": 0.05197330191731453,
      "rewards/rejected": -0.017014967277646065,
      "step": 620
    },
    {
      "epoch": 0.66,
      "learning_rate": 1.8917345750873107e-07,
      "logits/chosen": -2.4583332538604736,
      "logits/rejected": -2.468278169631958,
      "logps/chosen": -264.42913818359375,
      "logps/rejected": -256.8759765625,
      "loss": 0.6451,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": 0.02500128373503685,
      "rewards/margins": 0.04210829734802246,
      "rewards/rejected": -0.01710701361298561,
      "step": 630
    },
    {
      "epoch": 0.67,
      "learning_rate": 1.8335273573923165e-07,
      "logits/chosen": -2.5137476921081543,
      "logits/rejected": -2.473841667175293,
      "logps/chosen": -252.57705688476562,
      "logps/rejected": -230.48471069335938,
      "loss": 0.6434,
      "rewards/accuracies": 0.640625,
      "rewards/chosen": 0.03589923307299614,
      "rewards/margins": 0.06454572081565857,
      "rewards/rejected": -0.028646480292081833,
      "step": 640
    },
    {
      "epoch": 0.68,
      "learning_rate": 1.7753201396973226e-07,
      "logits/chosen": -2.45261549949646,
      "logits/rejected": -2.4653515815734863,
      "logps/chosen": -274.7919006347656,
      "logps/rejected": -261.8819580078125,
      "loss": 0.6407,
      "rewards/accuracies": 0.6468750238418579,
      "rewards/chosen": 0.03197002038359642,
      "rewards/margins": 0.07201725244522095,
      "rewards/rejected": -0.040047239512205124,
      "step": 650
    },
    {
      "epoch": 0.69,
      "learning_rate": 1.7171129220023281e-07,
      "logits/chosen": -2.4911041259765625,
      "logits/rejected": -2.4684813022613525,
      "logps/chosen": -267.3164978027344,
      "logps/rejected": -232.7432403564453,
      "loss": 0.6489,
      "rewards/accuracies": 0.6499999761581421,
      "rewards/chosen": 0.022824671119451523,
      "rewards/margins": 0.04568014293909073,
      "rewards/rejected": -0.022855471819639206,
      "step": 660
    },
    {
      "epoch": 0.7,
      "learning_rate": 1.658905704307334e-07,
      "logits/chosen": -2.4661662578582764,
      "logits/rejected": -2.429795026779175,
      "logps/chosen": -267.6933898925781,
      "logps/rejected": -238.0891876220703,
      "loss": 0.6461,
      "rewards/accuracies": 0.65625,
      "rewards/chosen": 0.029722299426794052,
      "rewards/margins": 0.061273299157619476,
      "rewards/rejected": -0.031550996005535126,
      "step": 670
    },
    {
      "epoch": 0.71,
      "learning_rate": 1.60069848661234e-07,
      "logits/chosen": -2.494511127471924,
      "logits/rejected": -2.505267381668091,
      "logps/chosen": -287.6944274902344,
      "logps/rejected": -259.6459045410156,
      "loss": 0.6466,
      "rewards/accuracies": 0.640625,
      "rewards/chosen": 0.041329581290483475,
      "rewards/margins": 0.05584716796875,
      "rewards/rejected": -0.014517592266201973,
      "step": 680
    },
    {
      "epoch": 0.72,
      "learning_rate": 1.5424912689173456e-07,
      "logits/chosen": -2.4804582595825195,
      "logits/rejected": -2.462634325027466,
      "logps/chosen": -270.22698974609375,
      "logps/rejected": -227.7113800048828,
      "loss": 0.6407,
      "rewards/accuracies": 0.671875,
      "rewards/chosen": 0.037567246705293655,
      "rewards/margins": 0.0671004056930542,
      "rewards/rejected": -0.02953316643834114,
      "step": 690
    },
    {
      "epoch": 0.73,
      "learning_rate": 1.4842840512223514e-07,
      "logits/chosen": -2.468341112136841,
      "logits/rejected": -2.4682674407958984,
      "logps/chosen": -257.55096435546875,
      "logps/rejected": -232.13906860351562,
      "loss": 0.647,
      "rewards/accuracies": 0.671875,
      "rewards/chosen": 0.03749538213014603,
      "rewards/margins": 0.0758819431066513,
      "rewards/rejected": -0.03838656097650528,
      "step": 700
    },
    {
      "epoch": 0.74,
      "learning_rate": 1.4260768335273574e-07,
      "logits/chosen": -2.5157179832458496,
      "logits/rejected": -2.497849702835083,
      "logps/chosen": -264.9339904785156,
      "logps/rejected": -229.9503936767578,
      "loss": 0.6375,
      "rewards/accuracies": 0.6312500238418579,
      "rewards/chosen": 0.03981786221265793,
      "rewards/margins": 0.06821642816066742,
      "rewards/rejected": -0.02839856967329979,
      "step": 710
    },
    {
      "epoch": 0.75,
      "learning_rate": 1.3678696158323632e-07,
      "logits/chosen": -2.4586503505706787,
      "logits/rejected": -2.4603028297424316,
      "logps/chosen": -262.70880126953125,
      "logps/rejected": -237.71212768554688,
      "loss": 0.645,
      "rewards/accuracies": 0.6312500238418579,
      "rewards/chosen": 0.025259777903556824,
      "rewards/margins": 0.0490126870572567,
      "rewards/rejected": -0.023752911016345024,
      "step": 720
    },
    {
      "epoch": 0.76,
      "learning_rate": 1.3096623981373688e-07,
      "logits/chosen": -2.502094268798828,
      "logits/rejected": -2.4800729751586914,
      "logps/chosen": -282.3802185058594,
      "logps/rejected": -236.8638153076172,
      "loss": 0.6438,
      "rewards/accuracies": 0.6656249761581421,
      "rewards/chosen": 0.04312821850180626,
      "rewards/margins": 0.07856440544128418,
      "rewards/rejected": -0.03543618693947792,
      "step": 730
    },
    {
      "epoch": 0.77,
      "learning_rate": 1.2514551804423749e-07,
      "logits/chosen": -2.4671387672424316,
      "logits/rejected": -2.514324903488159,
      "logps/chosen": -285.3311462402344,
      "logps/rejected": -263.767822265625,
      "loss": 0.642,
      "rewards/accuracies": 0.684374988079071,
      "rewards/chosen": 0.04080774262547493,
      "rewards/margins": 0.07315204292535782,
      "rewards/rejected": -0.03234430402517319,
      "step": 740
    },
    {
      "epoch": 0.79,
      "learning_rate": 1.1932479627473807e-07,
      "logits/chosen": -2.490058183670044,
      "logits/rejected": -2.4621422290802,
      "logps/chosen": -251.80148315429688,
      "logps/rejected": -249.9593048095703,
      "loss": 0.6395,
      "rewards/accuracies": 0.6625000238418579,
      "rewards/chosen": 0.028191978111863136,
      "rewards/margins": 0.06258732825517654,
      "rewards/rejected": -0.03439534455537796,
      "step": 750
    },
    {
      "epoch": 0.8,
      "learning_rate": 1.1350407450523865e-07,
      "logits/chosen": -2.470336437225342,
      "logits/rejected": -2.5136280059814453,
      "logps/chosen": -281.42767333984375,
      "logps/rejected": -254.0972137451172,
      "loss": 0.6419,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": 0.036293573677539825,
      "rewards/margins": 0.06266774237155914,
      "rewards/rejected": -0.02637416496872902,
      "step": 760
    },
    {
      "epoch": 0.81,
      "learning_rate": 1.0768335273573923e-07,
      "logits/chosen": -2.5531864166259766,
      "logits/rejected": -2.545531749725342,
      "logps/chosen": -305.70111083984375,
      "logps/rejected": -276.01019287109375,
      "loss": 0.64,
      "rewards/accuracies": 0.621874988079071,
      "rewards/chosen": 0.046415045857429504,
      "rewards/margins": 0.07657970488071442,
      "rewards/rejected": -0.030164653435349464,
      "step": 770
    },
    {
      "epoch": 0.82,
      "learning_rate": 1.0186263096623981e-07,
      "logits/chosen": -2.484541416168213,
      "logits/rejected": -2.4959442615509033,
      "logps/chosen": -272.62445068359375,
      "logps/rejected": -249.4247589111328,
      "loss": 0.6416,
      "rewards/accuracies": 0.6187499761581421,
      "rewards/chosen": 0.03473570942878723,
      "rewards/margins": 0.057318903505802155,
      "rewards/rejected": -0.022583190351724625,
      "step": 780
    },
    {
      "epoch": 0.83,
      "learning_rate": 9.604190919674039e-08,
      "logits/chosen": -2.480217218399048,
      "logits/rejected": -2.4997220039367676,
      "logps/chosen": -253.22061157226562,
      "logps/rejected": -248.5176239013672,
      "loss": 0.6447,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": 0.02552936039865017,
      "rewards/margins": 0.05032141134142876,
      "rewards/rejected": -0.024792049080133438,
      "step": 790
    },
    {
      "epoch": 0.84,
      "learning_rate": 9.022118742724097e-08,
      "logits/chosen": -2.4872233867645264,
      "logits/rejected": -2.461895704269409,
      "logps/chosen": -271.40191650390625,
      "logps/rejected": -261.29241943359375,
      "loss": 0.6352,
      "rewards/accuracies": 0.640625,
      "rewards/chosen": 0.035524625331163406,
      "rewards/margins": 0.0665384978055954,
      "rewards/rejected": -0.031013870611786842,
      "step": 800
    },
    {
      "epoch": 0.85,
      "learning_rate": 8.440046565774157e-08,
      "logits/chosen": -2.462954521179199,
      "logits/rejected": -2.472172498703003,
      "logps/chosen": -272.635986328125,
      "logps/rejected": -229.35366821289062,
      "loss": 0.6432,
      "rewards/accuracies": 0.65625,
      "rewards/chosen": 0.03845527023077011,
      "rewards/margins": 0.05895563215017319,
      "rewards/rejected": -0.020500360056757927,
      "step": 810
    },
    {
      "epoch": 0.86,
      "learning_rate": 7.857974388824213e-08,
      "logits/chosen": -2.481274127960205,
      "logits/rejected": -2.4623420238494873,
      "logps/chosen": -282.552734375,
      "logps/rejected": -246.048095703125,
      "loss": 0.6457,
      "rewards/accuracies": 0.6312500238418579,
      "rewards/chosen": 0.04651721939444542,
      "rewards/margins": 0.07211866974830627,
      "rewards/rejected": -0.025601446628570557,
      "step": 820
    },
    {
      "epoch": 0.87,
      "learning_rate": 7.275902211874273e-08,
      "logits/chosen": -2.495702028274536,
      "logits/rejected": -2.4765870571136475,
      "logps/chosen": -249.3843536376953,
      "logps/rejected": -278.4346923828125,
      "loss": 0.644,
      "rewards/accuracies": 0.684374988079071,
      "rewards/chosen": 0.021697301417589188,
      "rewards/margins": 0.059917084872722626,
      "rewards/rejected": -0.038219790905714035,
      "step": 830
    },
    {
      "epoch": 0.88,
      "learning_rate": 6.693830034924331e-08,
      "logits/chosen": -2.4697625637054443,
      "logits/rejected": -2.4722721576690674,
      "logps/chosen": -265.5938415527344,
      "logps/rejected": -257.2969665527344,
      "loss": 0.6376,
      "rewards/accuracies": 0.653124988079071,
      "rewards/chosen": 0.028097212314605713,
      "rewards/margins": 0.046820152550935745,
      "rewards/rejected": -0.018722938373684883,
      "step": 840
    },
    {
      "epoch": 0.89,
      "learning_rate": 6.111757857974389e-08,
      "logits/chosen": -2.4835047721862793,
      "logits/rejected": -2.5235934257507324,
      "logps/chosen": -263.8602294921875,
      "logps/rejected": -241.2860870361328,
      "loss": 0.6443,
      "rewards/accuracies": 0.640625,
      "rewards/chosen": 0.03554076701402664,
      "rewards/margins": 0.065843865275383,
      "rewards/rejected": -0.030303100124001503,
      "step": 850
    },
    {
      "epoch": 0.9,
      "learning_rate": 5.529685681024446e-08,
      "logits/chosen": -2.446396589279175,
      "logits/rejected": -2.447554111480713,
      "logps/chosen": -278.2039489746094,
      "logps/rejected": -267.393798828125,
      "loss": 0.6433,
      "rewards/accuracies": 0.6312500238418579,
      "rewards/chosen": 0.02485939860343933,
      "rewards/margins": 0.04789874702692032,
      "rewards/rejected": -0.023039352148771286,
      "step": 860
    },
    {
      "epoch": 0.91,
      "learning_rate": 4.947613504074505e-08,
      "logits/chosen": -2.451906442642212,
      "logits/rejected": -2.4401917457580566,
      "logps/chosen": -254.4597625732422,
      "logps/rejected": -240.5189666748047,
      "loss": 0.6406,
      "rewards/accuracies": 0.596875011920929,
      "rewards/chosen": 0.03511255979537964,
      "rewards/margins": 0.05534617230296135,
      "rewards/rejected": -0.020233619958162308,
      "step": 870
    },
    {
      "epoch": 0.92,
      "learning_rate": 4.365541327124563e-08,
      "logits/chosen": -2.507020950317383,
      "logits/rejected": -2.4901695251464844,
      "logps/chosen": -260.8884582519531,
      "logps/rejected": -251.20938110351562,
      "loss": 0.6417,
      "rewards/accuracies": 0.6312500238418579,
      "rewards/chosen": 0.04014205187559128,
      "rewards/margins": 0.060112785547971725,
      "rewards/rejected": -0.01997072994709015,
      "step": 880
    },
    {
      "epoch": 0.93,
      "learning_rate": 3.783469150174622e-08,
      "logits/chosen": -2.5302183628082275,
      "logits/rejected": -2.5431013107299805,
      "logps/chosen": -254.4590606689453,
      "logps/rejected": -259.6826477050781,
      "loss": 0.6409,
      "rewards/accuracies": 0.643750011920929,
      "rewards/chosen": 0.02364221215248108,
      "rewards/margins": 0.04719501733779907,
      "rewards/rejected": -0.023552805185317993,
      "step": 890
    },
    {
      "epoch": 0.94,
      "learning_rate": 3.20139697322468e-08,
      "logits/chosen": -2.4807305335998535,
      "logits/rejected": -2.5058140754699707,
      "logps/chosen": -264.16021728515625,
      "logps/rejected": -249.19570922851562,
      "loss": 0.6452,
      "rewards/accuracies": 0.628125011920929,
      "rewards/chosen": 0.029653768986463547,
      "rewards/margins": 0.05728424713015556,
      "rewards/rejected": -0.027630474418401718,
      "step": 900
    },
    {
      "epoch": 0.95,
      "learning_rate": 2.619324796274738e-08,
      "logits/chosen": -2.48866605758667,
      "logits/rejected": -2.5024585723876953,
      "logps/chosen": -289.67401123046875,
      "logps/rejected": -261.8035583496094,
      "loss": 0.6424,
      "rewards/accuracies": 0.643750011920929,
      "rewards/chosen": 0.041562773287296295,
      "rewards/margins": 0.06672655045986176,
      "rewards/rejected": -0.025163773447275162,
      "step": 910
    },
    {
      "epoch": 0.96,
      "learning_rate": 2.037252619324796e-08,
      "logits/chosen": -2.4807181358337402,
      "logits/rejected": -2.4651541709899902,
      "logps/chosen": -270.3717346191406,
      "logps/rejected": -248.5556182861328,
      "loss": 0.6415,
      "rewards/accuracies": 0.653124988079071,
      "rewards/chosen": 0.022484585642814636,
      "rewards/margins": 0.05588115006685257,
      "rewards/rejected": -0.033396560698747635,
      "step": 920
    },
    {
      "epoch": 0.97,
      "learning_rate": 1.4551804423748545e-08,
      "logits/chosen": -2.4763240814208984,
      "logits/rejected": -2.4676055908203125,
      "logps/chosen": -272.7355041503906,
      "logps/rejected": -254.0940704345703,
      "loss": 0.6432,
      "rewards/accuracies": 0.65625,
      "rewards/chosen": 0.046184636652469635,
      "rewards/margins": 0.0702408105134964,
      "rewards/rejected": -0.02405618131160736,
      "step": 930
    },
    {
      "epoch": 0.98,
      "learning_rate": 8.731082654249125e-09,
      "logits/chosen": -2.4857518672943115,
      "logits/rejected": -2.5030598640441895,
      "logps/chosen": -275.62445068359375,
      "logps/rejected": -264.8072509765625,
      "loss": 0.6407,
      "rewards/accuracies": 0.671875,
      "rewards/chosen": 0.036477264016866684,
      "rewards/margins": 0.06461935490369797,
      "rewards/rejected": -0.02814210020005703,
      "step": 940
    },
    {
      "epoch": 0.99,
      "learning_rate": 2.910360884749709e-09,
      "logits/chosen": -2.4887900352478027,
      "logits/rejected": -2.494776725769043,
      "logps/chosen": -259.7311706542969,
      "logps/rejected": -253.02346801757812,
      "loss": 0.6442,
      "rewards/accuracies": 0.65625,
      "rewards/chosen": 0.02119762822985649,
      "rewards/margins": 0.04852701723575592,
      "rewards/rejected": -0.02732938900589943,
      "step": 950
    },
    {
      "epoch": 1.0,
      "eval_logits/chosen": -2.4944536685943604,
      "eval_logits/rejected": -2.4963433742523193,
      "eval_logps/chosen": -269.25555419921875,
      "eval_logps/rejected": -253.21238708496094,
      "eval_loss": 0.6399702429771423,
      "eval_rewards/accuracies": 0.6370000243186951,
      "eval_rewards/chosen": 0.030110126361250877,
      "eval_rewards/margins": 0.05743245780467987,
      "eval_rewards/rejected": -0.027322327718138695,
      "eval_runtime": 806.7503,
      "eval_samples_per_second": 2.479,
      "eval_steps_per_second": 0.31,
      "step": 955
    },
    {
      "epoch": 1.0,
      "step": 955,
      "total_flos": 0.0,
      "train_loss": 0.6598132096035942,
      "train_runtime": 45126.4521,
      "train_samples_per_second": 1.355,
      "train_steps_per_second": 0.021
    }
  ],
  "logging_steps": 10,
  "max_steps": 955,
  "num_train_epochs": 1,
  "save_steps": 500,
  "total_flos": 0.0,
  "trial_name": null,
  "trial_params": null
}