xshubhamx commited on
Commit
3f94a92
1 Parent(s): 1334b16

Upload folder using huggingface_hub

Browse files
training_checkpoints/checkpoint-482/README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  library_name: peft
3
- base_model: facebook/bart-base
4
  ---
5
 
6
  # Model Card for Model ID
@@ -81,7 +81,7 @@ Use the code below to get started with the model.
81
 
82
  [More Information Needed]
83
 
84
- ### Training Procedure
85
 
86
  <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
 
@@ -197,8 +197,6 @@ Carbon emissions can be estimated using the [Machine Learning Impact calculator]
197
  ## Model Card Contact
198
 
199
  [More Information Needed]
200
-
201
-
202
  ### Framework versions
203
 
204
  - PEFT 0.10.0
 
1
  ---
2
  library_name: peft
3
+ base_model: facebook/bart-large
4
  ---
5
 
6
  # Model Card for Model ID
 
81
 
82
  [More Information Needed]
83
 
84
+ ### Training Procedure
85
 
86
  <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
 
 
197
  ## Model Card Contact
198
 
199
  [More Information Needed]
 
 
200
  ### Framework versions
201
 
202
  - PEFT 0.10.0
training_checkpoints/checkpoint-482/adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "facebook/bart-base",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "dense",
24
- "v_proj",
25
  "fc1",
26
- "fc2",
27
- "out_proj",
28
  "q_proj",
29
- "k_proj"
 
 
 
 
30
  ],
31
  "task_type": "SEQ_CLS",
32
  "use_dora": false,
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "facebook/bart-large",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
 
23
  "fc1",
 
 
24
  "q_proj",
25
+ "dense",
26
+ "k_proj",
27
+ "out_proj",
28
+ "fc2",
29
+ "v_proj"
30
  ],
31
  "task_type": "SEQ_CLS",
32
  "use_dora": false,
training_checkpoints/checkpoint-482/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c199fbb228a83b13abb5344bab7f80b4a68294f6c426e291525627e882339e17
3
- size 13151448
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c719daa23e1d81c0b2ff0e360aec48ddf2bcf0ebc592f710cb51c40fdbe4f484
3
+ size 34854640
training_checkpoints/checkpoint-482/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e362dd74f75ff456ef0651b2dd565518b09d655aeaf4f0d3e2700807ff6a9540
3
- size 26413626
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd4bb4f409c45c7ad2f65d2862e1bda289f8d754e1de606dea26a3b13e3bd840
3
+ size 69927198
training_checkpoints/checkpoint-482/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:86500ec09f2ca351f4fd67cf7f0aa06f34c0c4aedda09477ff2fce8239cfd608
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf6d16d5ffd16bb406bd25889a9f70b6ee3da19a14c665fcae6ea26c561db5f5
3
  size 14244
training_checkpoints/checkpoint-482/trainer_state.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "best_metric": 0.39687975393644837,
3
  "best_model_checkpoint": "bart-base-lora/checkpoint-482",
4
  "epoch": 2.998444790046656,
5
  "eval_steps": 500,
@@ -10,68 +10,68 @@
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
- "eval_accuracy": 0.611154144074361,
14
- "eval_f1_macro": 0.28705545756004525,
15
- "eval_f1_micro": 0.611154144074361,
16
- "eval_f1_weighted": 0.5574695078229231,
17
- "eval_loss": 1.3204560279846191,
18
- "eval_macro_fpr": 0.046390243375546196,
19
- "eval_macro_sensitivity": 0.302378554842108,
20
- "eval_macro_specificity": 0.9691815961629444,
21
- "eval_precision": 0.5321554731456403,
22
- "eval_precision_macro": 0.2887439395071009,
23
- "eval_recall": 0.611154144074361,
24
- "eval_recall_macro": 0.302378554842108,
25
- "eval_runtime": 45.1674,
26
- "eval_samples_per_second": 28.583,
27
- "eval_steps_per_second": 3.587,
28
- "eval_weighted_fpr": 0.04347073086248701,
29
- "eval_weighted_sensitivity": 0.611154144074361,
30
- "eval_weighted_specificity": 0.9265697983698051,
31
  "step": 160
32
  },
33
  {
34
  "epoch": 2.0,
35
- "eval_accuracy": 0.6994577846630519,
36
- "eval_f1_macro": 0.3947617401663212,
37
- "eval_f1_micro": 0.6994577846630519,
38
- "eval_f1_weighted": 0.6807748226338182,
39
- "eval_loss": 0.887535810470581,
40
- "eval_macro_fpr": 0.03062685085400977,
41
- "eval_macro_sensitivity": 0.4253981018601158,
42
- "eval_macro_specificity": 0.977359054380045,
43
- "eval_precision": 0.6728416067370143,
44
- "eval_precision_macro": 0.3822291983692299,
45
- "eval_recall": 0.6994577846630519,
46
- "eval_recall_macro": 0.4253981018601158,
47
- "eval_runtime": 44.0045,
48
- "eval_samples_per_second": 29.338,
49
- "eval_steps_per_second": 3.681,
50
- "eval_weighted_fpr": 0.02977743668457406,
51
- "eval_weighted_sensitivity": 0.6994577846630519,
52
- "eval_weighted_specificity": 0.9609280310376243,
53
  "step": 321
54
  },
55
  {
56
  "epoch": 3.0,
57
- "eval_accuracy": 0.7064291247095275,
58
- "eval_f1_macro": 0.39687975393644837,
59
- "eval_f1_micro": 0.7064291247095275,
60
- "eval_f1_weighted": 0.6751887743731854,
61
- "eval_loss": 0.842707633972168,
62
- "eval_macro_fpr": 0.029525759570689643,
63
- "eval_macro_sensitivity": 0.44418263304043937,
64
- "eval_macro_specificity": 0.9780349023882488,
65
- "eval_precision": 0.6951917464608133,
66
- "eval_precision_macro": 0.4131313886422618,
67
- "eval_recall": 0.7064291247095275,
68
- "eval_recall_macro": 0.44418263304043937,
69
- "eval_runtime": 44.2756,
70
- "eval_samples_per_second": 29.158,
71
- "eval_steps_per_second": 3.659,
72
- "eval_weighted_fpr": 0.02882786947592607,
73
- "eval_weighted_sensitivity": 0.7064291247095275,
74
- "eval_weighted_specificity": 0.9640944111142056,
75
  "step": 482
76
  }
77
  ],
@@ -79,7 +79,7 @@
79
  "max_steps": 2400,
80
  "num_train_epochs": 15,
81
  "save_steps": 500,
82
- "total_flos": 4883118407645184.0,
83
  "trial_name": null,
84
  "trial_params": null
85
  }
 
1
  {
2
+ "best_metric": 0.5270593942960143,
3
  "best_model_checkpoint": "bart-base-lora/checkpoint-482",
4
  "epoch": 2.998444790046656,
5
  "eval_steps": 500,
 
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
+ "eval_accuracy": 0.715724244771495,
14
+ "eval_f1_macro": 0.3957532686340496,
15
+ "eval_f1_micro": 0.715724244771495,
16
+ "eval_f1_weighted": 0.6834645556416842,
17
+ "eval_loss": 0.9525313973426819,
18
+ "eval_macro_fpr": 0.028501626692807605,
19
+ "eval_macro_sensitivity": 0.4415906678049911,
20
+ "eval_macro_specificity": 0.9786637894661117,
21
+ "eval_precision": 0.6787596592263883,
22
+ "eval_precision_macro": 0.38751339695744513,
23
+ "eval_recall": 0.715724244771495,
24
+ "eval_recall_macro": 0.4415906678049911,
25
+ "eval_runtime": 120.9594,
26
+ "eval_samples_per_second": 10.673,
27
+ "eval_steps_per_second": 1.339,
28
+ "eval_weighted_fpr": 0.027587762158911525,
29
+ "eval_weighted_sensitivity": 0.715724244771495,
30
+ "eval_weighted_specificity": 0.9642325972201805,
31
  "step": 160
32
  },
33
  {
34
  "epoch": 2.0,
35
+ "eval_accuracy": 0.7412858249419055,
36
+ "eval_f1_macro": 0.43370046411892155,
37
+ "eval_f1_micro": 0.7412858249419055,
38
+ "eval_f1_weighted": 0.7231329193382661,
39
+ "eval_loss": 0.7733433842658997,
40
+ "eval_macro_fpr": 0.025167370687133015,
41
+ "eval_macro_sensitivity": 0.4686708989603177,
42
+ "eval_macro_specificity": 0.9805374797463638,
43
+ "eval_precision": 0.729625861926521,
44
+ "eval_precision_macro": 0.4491323101141271,
45
+ "eval_recall": 0.7412858249419055,
46
+ "eval_recall_macro": 0.4686708989603177,
47
+ "eval_runtime": 119.8649,
48
+ "eval_samples_per_second": 10.77,
49
+ "eval_steps_per_second": 1.352,
50
+ "eval_weighted_fpr": 0.024322749781532187,
51
+ "eval_weighted_sensitivity": 0.7412858249419055,
52
+ "eval_weighted_specificity": 0.9667763712535515,
53
  "step": 321
54
  },
55
  {
56
  "epoch": 3.0,
57
+ "eval_accuracy": 0.7738187451587917,
58
+ "eval_f1_macro": 0.5270593942960143,
59
+ "eval_f1_micro": 0.7738187451587917,
60
+ "eval_f1_weighted": 0.7610767338978833,
61
+ "eval_loss": 0.7104586958885193,
62
+ "eval_macro_fpr": 0.021211805484311928,
63
+ "eval_macro_sensitivity": 0.5407847836684627,
64
+ "eval_macro_specificity": 0.9830897092527341,
65
+ "eval_precision": 0.7630858215778352,
66
+ "eval_precision_macro": 0.5565386466479504,
67
+ "eval_recall": 0.7738187451587917,
68
+ "eval_recall_macro": 0.5407847836684627,
69
+ "eval_runtime": 119.5419,
70
+ "eval_samples_per_second": 10.8,
71
+ "eval_steps_per_second": 1.355,
72
+ "eval_weighted_fpr": 0.020451043563524302,
73
+ "eval_weighted_sensitivity": 0.7738187451587917,
74
+ "eval_weighted_specificity": 0.9725268936322198,
75
  "step": 482
76
  }
77
  ],
 
79
  "max_steps": 2400,
80
  "num_train_epochs": 15,
81
  "save_steps": 500,
82
+ "total_flos": 1.7164271208462336e+16,
83
  "trial_name": null,
84
  "trial_params": null
85
  }
training_checkpoints/checkpoint-482/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cfb230625284b71a8582d0495ae9a462b745f8cb976f9d7932c4f91357944609
3
  size 4600
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cfeeb4079e0edbd60cad2df3b73a192a8e64f61f57cc52cfabb80fd9ff6105d
3
  size 4600