MohamedAhmedAE commited on
Commit
7a0ca95
1 Parent(s): 820e03f

Training in progress, step 4200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eb01ed2a8ec6b8d9ca441331f478bc184548ba2cd587732637da94911f94caa3
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bafc3f6442e5ba84431ff078caa6e69b3a936652a78ec4e062738afe5c219732
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:48b28c6356ac383f9524b669dda20f0fa6982eb51ae20cccd0122514477c4f52
3
  size 84581014
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e921e944e51dba181ad318d41576f8ac3157dc39c4bff064cdc6b4b895ad6d05
3
  size 84581014
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3712761bc2feaa4dd59bbca7eb1e2751ef5fe3d141fa66ff9fb4561780df39c8
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de92704bc3b6ffc2d27e74d056f93cc6b41e535e9e0201f0fa7c3b78e6075e90
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:17c0443aeeda2543522ca2ca694a10a6e2b344c5716e020a89e9019e3511777c
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ae6e4af65e99b8816fc6ca644da9b1cf355b8cdc1d577590a1d02007539bd66
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.002974652244561034,
5
  "eval_steps": 2000,
6
- "global_step": 4000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -147,6 +147,13 @@
147
  "learning_rate": 1.999998261213993e-05,
148
  "loss": 1.6025,
149
  "step": 4000
 
 
 
 
 
 
 
150
  }
151
  ],
152
  "logging_steps": 200,
@@ -154,7 +161,7 @@
154
  "num_input_tokens_seen": 0,
155
  "num_train_epochs": 5,
156
  "save_steps": 200,
157
- "total_flos": 5.2577140632576e+16,
158
  "train_batch_size": 1,
159
  "trial_name": null,
160
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.003123384856789086,
5
  "eval_steps": 2000,
6
+ "global_step": 4200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
147
  "learning_rate": 1.999998261213993e-05,
148
  "loss": 1.6025,
149
  "step": 4000
150
+ },
151
+ {
152
+ "epoch": 0.0,
153
+ "grad_norm": 2.5482230186462402,
154
+ "learning_rate": 1.9999980825767474e-05,
155
+ "loss": 1.5963,
156
+ "step": 4200
157
  }
158
  ],
159
  "logging_steps": 200,
 
161
  "num_input_tokens_seen": 0,
162
  "num_train_epochs": 5,
163
  "save_steps": 200,
164
+ "total_flos": 5.526240672691814e+16,
165
  "train_batch_size": 1,
166
  "trial_name": null,
167
  "trial_params": null