MohamedAhmedAE commited on
Commit
a11fa54
1 Parent(s): be4793a

Training in progress, step 4600, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:78b3e09c4e01a30ef881735499660d4412e95b93c75556d23b1ed6810bcf64b8
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59eaacff7d5070762d45bdd45efbb6b228c686c1e112dbafc0c104f49216ec3e
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ede9351d0e560e402f48b63d9ef5f99a54f6f3c33467860dc582674848a7dd81
3
  size 84581014
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90e2fd24d0533b528ed874a6637c5f5d7e738f41677f210a597f6771befa3753
3
  size 84581014
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c980230c9b331cf755c313cb53fe86e70becec911cf990f83cc255f22cc4cb3d
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21c5c75ca0c47c9e8bfa47c24c3071bf0748801b48fbacbc60bea500a545e48d
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c019041ef3f15ade90caa60ec00bd9729de47047048dfbd8b528e52704018ac7
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20e8eaec8b57dfb6cb3597040aca1e3875096e994226b7c8aa538ca79c87ebaa
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.003272117469017138,
5
  "eval_steps": 2000,
6
- "global_step": 4400,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -161,6 +161,13 @@
161
  "learning_rate": 1.999997896164907e-05,
162
  "loss": 1.5837,
163
  "step": 4400
 
 
 
 
 
 
 
164
  }
165
  ],
166
  "logging_steps": 200,
@@ -168,7 +175,7 @@
168
  "num_input_tokens_seen": 0,
169
  "num_train_epochs": 5,
170
  "save_steps": 200,
171
- "total_flos": 5.776057087694438e+16,
172
  "train_batch_size": 1,
173
  "trial_name": null,
174
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0034208500812451894,
5
  "eval_steps": 2000,
6
+ "global_step": 4600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
161
  "learning_rate": 1.999997896164907e-05,
162
  "loss": 1.5837,
163
  "step": 4400
164
+ },
165
+ {
166
+ "epoch": 0.0,
167
+ "grad_norm": 3.9968528747558594,
168
+ "learning_rate": 1.9999977001049872e-05,
169
+ "loss": 1.5586,
170
+ "step": 4600
171
  }
172
  ],
173
  "logging_steps": 200,
 
175
  "num_input_tokens_seen": 0,
176
  "num_train_epochs": 5,
177
  "save_steps": 200,
178
+ "total_flos": 6.036713823478579e+16,
179
  "train_batch_size": 1,
180
  "trial_name": null,
181
  "trial_params": null