jdorairaj commited on
Commit
705fb3d
1 Parent(s): fe45795

3rd run sst2

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/args.json +32 -0
  2. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/logfile.log +23 -0
  3. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/sst2_bert-base-uncased_validation_loss.png +0 -0
  4. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_0/README.md +202 -0
  5. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_0/adapter_config.json +32 -0
  6. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_0/adapter_model.safetensors +3 -0
  7. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_0/all_results.json +1 -0
  8. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_0/all_results_val.json +1 -0
  9. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_0/eval_res.json +0 -0
  10. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_0/gpu_stats.json +130 -0
  11. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_0/special_tokens_map.json +7 -0
  12. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_0/tokenizer.json +0 -0
  13. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_0/tokenizer_config.json +55 -0
  14. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_0/val_res.json +0 -0
  15. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_0/vocab.txt +0 -0
  16. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_16163/README.md +202 -0
  17. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_16163/adapter_config.json +32 -0
  18. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_16163/adapter_model.safetensors +3 -0
  19. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_16163/all_results.json +1 -0
  20. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_16163/all_results_val.json +1 -0
  21. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_16163/eval_res.json +0 -0
  22. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_16163/gpu_stats.json +130 -0
  23. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_16163/special_tokens_map.json +7 -0
  24. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_16163/tokenizer.json +0 -0
  25. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_16163/tokenizer_config.json +55 -0
  26. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_16163/val_res.json +0 -0
  27. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_16163/vocab.txt +0 -0
  28. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_24245/README.md +202 -0
  29. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_24245/adapter_config.json +32 -0
  30. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_24245/adapter_model.safetensors +3 -0
  31. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_24245/all_results.json +1 -0
  32. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_24245/all_results_val.json +1 -0
  33. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_24245/eval_res.json +0 -0
  34. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_24245/gpu_stats.json +130 -0
  35. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_24245/special_tokens_map.json +7 -0
  36. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_24245/tokenizer.json +0 -0
  37. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_24245/tokenizer_config.json +55 -0
  38. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_24245/val_res.json +0 -0
  39. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_24245/vocab.txt +0 -0
  40. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_32327/README.md +202 -0
  41. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_32327/adapter_config.json +32 -0
  42. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_32327/adapter_model.safetensors +3 -0
  43. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_32327/all_results.json +1 -0
  44. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_32327/all_results_val.json +1 -0
  45. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_32327/eval_res.json +0 -0
  46. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_32327/gpu_stats.json +130 -0
  47. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_32327/special_tokens_map.json +7 -0
  48. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_32327/tokenizer.json +0 -0
  49. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_32327/tokenizer_config.json +55 -0
  50. outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_32327/val_res.json +0 -0
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/args.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_name": "sst2",
3
+ "train_file": null,
4
+ "validation_file": null,
5
+ "max_length": 300,
6
+ "pad_to_max_length": false,
7
+ "model_name_or_path": "bert-base-uncased",
8
+ "use_slow_tokenizer": false,
9
+ "per_device_train_batch_size": 8,
10
+ "per_device_eval_batch_size": 8,
11
+ "learning_rate": 0.0001,
12
+ "weight_decay": 0.0,
13
+ "num_train_epochs": 6,
14
+ "max_train_steps": null,
15
+ "gradient_accumulation_steps": 1,
16
+ "lr_scheduler_type": "linear",
17
+ "num_warmup_steps": 0,
18
+ "output_dir": "./outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345",
19
+ "seed": 12345,
20
+ "push_to_hub": false,
21
+ "hub_model_id": null,
22
+ "hub_token": null,
23
+ "checkpointing_steps": null,
24
+ "resume_from_checkpoint": null,
25
+ "with_tracking": false,
26
+ "report_to": "all",
27
+ "ignore_mismatched_sizes": true,
28
+ "lora_r": 8,
29
+ "lora_alpha": 16,
30
+ "lora_dropout": 0.1,
31
+ "testing_set": "train_val"
32
+ }
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/logfile.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 05/27/2024 20:16:00 - INFO - __main__ - Sample 27303 of the training set: {'input_ids': [101, 26380, 1010, 2065, 3576, 1010, 27060, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1], 'labels': 1}.
2
+ 05/27/2024 20:16:00 - INFO - __main__ - Sample 48017 of the training set: {'input_ids': [101, 2049, 3115, 5675, 102], 'token_type_ids': [0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1], 'labels': 0}.
3
+ 05/27/2024 20:16:00 - INFO - __main__ - Sample 666 of the training set: {'input_ids': [101, 2515, 2025, 2147, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1], 'labels': 0}.
4
+ 05/27/2024 20:16:02 - INFO - __main__ - ***** Running training *****
5
+ 05/27/2024 20:16:02 - INFO - __main__ - Num examples = 53879
6
+ 05/27/2024 20:16:02 - INFO - __main__ - Num Epochs = 6
7
+ 05/27/2024 20:16:02 - INFO - __main__ - Instantaneous batch size per device = 8
8
+ 05/27/2024 20:16:02 - INFO - __main__ - Total train batch size (w. parallel, distributed & accumulation) = 8
9
+ 05/27/2024 20:16:02 - INFO - __main__ - Gradient Accumulation steps = 1
10
+ 05/27/2024 20:16:02 - INFO - __main__ - Total optimization steps = 40410
11
+ 05/27/2024 20:16:07 - INFO - __main__ - epoch 0: {'accuracy': 0.5126146788990825}
12
+ 05/27/2024 20:16:39 - INFO - __main__ - epoch 0: {'accuracy': 0.5507795100222718}
13
+ 05/27/2024 20:22:06 - INFO - __main__ - epoch 1: {'accuracy': 0.9094036697247706}
14
+ 05/27/2024 20:22:41 - INFO - __main__ - epoch 1: {'accuracy': 0.914996288047513}
15
+ 05/27/2024 20:28:04 - INFO - __main__ - epoch 2: {'accuracy': 0.908256880733945}
16
+ 05/27/2024 20:28:40 - INFO - __main__ - epoch 2: {'accuracy': 0.9270972531551596}
17
+ 05/27/2024 20:34:04 - INFO - __main__ - epoch 3: {'accuracy': 0.9139908256880734}
18
+ 05/27/2024 20:34:39 - INFO - __main__ - epoch 3: {'accuracy': 0.9314773570898293}
19
+ 05/27/2024 20:40:05 - INFO - __main__ - epoch 4: {'accuracy': 0.9139908256880734}
20
+ 05/27/2024 20:40:40 - INFO - __main__ - epoch 4: {'accuracy': 0.9365256124721604}
21
+ 05/27/2024 20:46:05 - INFO - __main__ - epoch 5: {'accuracy': 0.9139908256880734}
22
+ 05/27/2024 20:46:40 - INFO - __main__ - epoch 5: {'accuracy': 0.9357832219747587}
23
+ 05/27/2024 20:46:40 - INFO - __main__ - ***** Completed training *****
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/sst2_bert-base-uncased_validation_loss.png ADDED
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_0/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ base_model: bert-base-uncased
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.11.1
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_0/adapter_config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "bert-base-uncased",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.1,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": [
18
+ "classifier",
19
+ "score"
20
+ ],
21
+ "peft_type": "LORA",
22
+ "r": 8,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "query",
27
+ "value"
28
+ ],
29
+ "task_type": "SEQ_CLS",
30
+ "use_dora": false,
31
+ "use_rslora": false
32
+ }
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_0/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2ba3b2b928c50ddc8ec64450030d492dfc6d7483598a76a4087295b6318b41d
3
+ size 1192672
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_0/all_results.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eval_accuracy": 0.5126146788990825}
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_0/all_results_val.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eval_accuracy": 0.5507795100222718}
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_0/eval_res.json ADDED
The diff for this file is too large to render. See raw diff
 
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_0/gpu_stats.json ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "memory_allocated": 448793088,
3
+ "max_memory_allocated": 468217856,
4
+ "memory_reserved": 524288000,
5
+ "max_memory_reserved": 524288000,
6
+ "memory_stats": {
7
+ "active.all.allocated": 745415,
8
+ "active.all.current": 266,
9
+ "active.all.freed": 745149,
10
+ "active.all.peak": 280,
11
+ "active.large_pool.allocated": 85008,
12
+ "active.large_pool.current": 76,
13
+ "active.large_pool.freed": 84932,
14
+ "active.large_pool.peak": 83,
15
+ "active.small_pool.allocated": 660407,
16
+ "active.small_pool.current": 190,
17
+ "active.small_pool.freed": 660217,
18
+ "active.small_pool.peak": 204,
19
+ "active_bytes.all.allocated": 571169226752,
20
+ "active_bytes.all.current": 448793088,
21
+ "active_bytes.all.freed": 570720433664,
22
+ "active_bytes.all.peak": 468217856,
23
+ "active_bytes.large_pool.allocated": 219569324032,
24
+ "active_bytes.large_pool.current": 447086592,
25
+ "active_bytes.large_pool.freed": 219122237440,
26
+ "active_bytes.large_pool.peak": 466354176,
27
+ "active_bytes.small_pool.allocated": 351599902720,
28
+ "active_bytes.small_pool.current": 1706496,
29
+ "active_bytes.small_pool.freed": 351598196224,
30
+ "active_bytes.small_pool.peak": 9006080,
31
+ "allocated_bytes.all.allocated": 571169226752,
32
+ "allocated_bytes.all.current": 448793088,
33
+ "allocated_bytes.all.freed": 570720433664,
34
+ "allocated_bytes.all.peak": 468217856,
35
+ "allocated_bytes.large_pool.allocated": 219569324032,
36
+ "allocated_bytes.large_pool.current": 447086592,
37
+ "allocated_bytes.large_pool.freed": 219122237440,
38
+ "allocated_bytes.large_pool.peak": 466354176,
39
+ "allocated_bytes.small_pool.allocated": 351599902720,
40
+ "allocated_bytes.small_pool.current": 1706496,
41
+ "allocated_bytes.small_pool.freed": 351598196224,
42
+ "allocated_bytes.small_pool.peak": 9006080,
43
+ "allocation.all.allocated": 745415,
44
+ "allocation.all.current": 266,
45
+ "allocation.all.freed": 745149,
46
+ "allocation.all.peak": 280,
47
+ "allocation.large_pool.allocated": 85008,
48
+ "allocation.large_pool.current": 76,
49
+ "allocation.large_pool.freed": 84932,
50
+ "allocation.large_pool.peak": 83,
51
+ "allocation.small_pool.allocated": 660407,
52
+ "allocation.small_pool.current": 190,
53
+ "allocation.small_pool.freed": 660217,
54
+ "allocation.small_pool.peak": 204,
55
+ "inactive_split.all.allocated": 407787,
56
+ "inactive_split.all.current": 23,
57
+ "inactive_split.all.freed": 407764,
58
+ "inactive_split.all.peak": 33,
59
+ "inactive_split.large_pool.allocated": 67887,
60
+ "inactive_split.large_pool.current": 18,
61
+ "inactive_split.large_pool.freed": 67869,
62
+ "inactive_split.large_pool.peak": 19,
63
+ "inactive_split.small_pool.allocated": 339900,
64
+ "inactive_split.small_pool.current": 5,
65
+ "inactive_split.small_pool.freed": 339895,
66
+ "inactive_split.small_pool.peak": 15,
67
+ "inactive_split_bytes.all.allocated": 599999330304,
68
+ "inactive_split_bytes.all.current": 46134784,
69
+ "inactive_split_bytes.all.freed": 599953195520,
70
+ "inactive_split_bytes.all.peak": 59802624,
71
+ "inactive_split_bytes.large_pool.allocated": 224162217984,
72
+ "inactive_split_bytes.large_pool.current": 45744128,
73
+ "inactive_split_bytes.large_pool.freed": 224116473856,
74
+ "inactive_split_bytes.large_pool.peak": 57409536,
75
+ "inactive_split_bytes.small_pool.allocated": 375837112320,
76
+ "inactive_split_bytes.small_pool.current": 390656,
77
+ "inactive_split_bytes.small_pool.freed": 375836721664,
78
+ "inactive_split_bytes.small_pool.peak": 6696960,
79
+ "max_split_size": -1,
80
+ "num_alloc_retries": 0,
81
+ "num_device_alloc": 26,
82
+ "num_device_free": 0,
83
+ "num_ooms": 0,
84
+ "num_sync_all_streams": 0,
85
+ "oversize_allocations.allocated": 0,
86
+ "oversize_allocations.current": 0,
87
+ "oversize_allocations.freed": 0,
88
+ "oversize_allocations.peak": 0,
89
+ "oversize_segments.allocated": 0,
90
+ "oversize_segments.current": 0,
91
+ "oversize_segments.freed": 0,
92
+ "oversize_segments.peak": 0,
93
+ "requested_bytes.all.allocated": 528646731573,
94
+ "requested_bytes.all.current": 447656440,
95
+ "requested_bytes.all.freed": 528199075133,
96
+ "requested_bytes.all.peak": 465652952,
97
+ "requested_bytes.large_pool.allocated": 177101977600,
98
+ "requested_bytes.large_pool.current": 445954048,
99
+ "requested_bytes.large_pool.freed": 176656023552,
100
+ "requested_bytes.large_pool.peak": 463796224,
101
+ "requested_bytes.small_pool.allocated": 351544753973,
102
+ "requested_bytes.small_pool.current": 1702392,
103
+ "requested_bytes.small_pool.freed": 351543051581,
104
+ "requested_bytes.small_pool.peak": 8999320,
105
+ "reserved_bytes.all.allocated": 524288000,
106
+ "reserved_bytes.all.current": 524288000,
107
+ "reserved_bytes.all.freed": 0,
108
+ "reserved_bytes.all.peak": 524288000,
109
+ "reserved_bytes.large_pool.allocated": 513802240,
110
+ "reserved_bytes.large_pool.current": 513802240,
111
+ "reserved_bytes.large_pool.freed": 0,
112
+ "reserved_bytes.large_pool.peak": 513802240,
113
+ "reserved_bytes.small_pool.allocated": 10485760,
114
+ "reserved_bytes.small_pool.current": 10485760,
115
+ "reserved_bytes.small_pool.freed": 0,
116
+ "reserved_bytes.small_pool.peak": 10485760,
117
+ "segment.all.allocated": 26,
118
+ "segment.all.current": 26,
119
+ "segment.all.freed": 0,
120
+ "segment.all.peak": 26,
121
+ "segment.large_pool.allocated": 21,
122
+ "segment.large_pool.current": 21,
123
+ "segment.large_pool.freed": 0,
124
+ "segment.large_pool.peak": 21,
125
+ "segment.small_pool.allocated": 5,
126
+ "segment.small_pool.current": 5,
127
+ "segment.small_pool.freed": 0,
128
+ "segment.small_pool.peak": 5
129
+ }
130
+ }
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_0/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_0/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_0/tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 512,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
+ "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "BertTokenizer",
54
+ "unk_token": "[UNK]"
55
+ }
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_0/val_res.json ADDED
The diff for this file is too large to render. See raw diff
 
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_0/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_16163/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ base_model: bert-base-uncased
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.11.1
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_16163/adapter_config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "bert-base-uncased",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.1,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": [
18
+ "classifier",
19
+ "score"
20
+ ],
21
+ "peft_type": "LORA",
22
+ "r": 8,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "query",
27
+ "value"
28
+ ],
29
+ "task_type": "SEQ_CLS",
30
+ "use_dora": false,
31
+ "use_rslora": false
32
+ }
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_16163/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8951d0000879006214b7e9ca5db2a4f08ee3e74162dee48170844d669037e7c8
3
+ size 1192672
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_16163/all_results.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eval_accuracy": 0.908256880733945}
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_16163/all_results_val.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eval_accuracy": 0.9270972531551596}
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_16163/eval_res.json ADDED
The diff for this file is too large to render. See raw diff
 
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_16163/gpu_stats.json ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "memory_allocated": 459686912,
3
+ "max_memory_allocated": 737890304,
4
+ "memory_reserved": 868220928,
5
+ "max_memory_reserved": 868220928,
6
+ "memory_stats": {
7
+ "active.all.allocated": 17243096,
8
+ "active.all.current": 367,
9
+ "active.all.freed": 17242729,
10
+ "active.all.peak": 627,
11
+ "active.large_pool.allocated": 2028077,
12
+ "active.large_pool.current": 77,
13
+ "active.large_pool.freed": 2028000,
14
+ "active.large_pool.peak": 191,
15
+ "active.small_pool.allocated": 15215019,
16
+ "active.small_pool.current": 290,
17
+ "active.small_pool.freed": 15214729,
18
+ "active.small_pool.peak": 550,
19
+ "active_bytes.all.allocated": 11627821522944,
20
+ "active_bytes.all.current": 459686912,
21
+ "active_bytes.all.freed": 11627361836032,
22
+ "active_bytes.all.peak": 737890304,
23
+ "active_bytes.large_pool.allocated": 4524984732672,
24
+ "active_bytes.large_pool.current": 455606272,
25
+ "active_bytes.large_pool.freed": 4524529126400,
26
+ "active_bytes.large_pool.peak": 712212480,
27
+ "active_bytes.small_pool.allocated": 7102836790272,
28
+ "active_bytes.small_pool.current": 4080640,
29
+ "active_bytes.small_pool.freed": 7102832709632,
30
+ "active_bytes.small_pool.peak": 121664000,
31
+ "allocated_bytes.all.allocated": 11627821522944,
32
+ "allocated_bytes.all.current": 459686912,
33
+ "allocated_bytes.all.freed": 11627361836032,
34
+ "allocated_bytes.all.peak": 737890304,
35
+ "allocated_bytes.large_pool.allocated": 4524984732672,
36
+ "allocated_bytes.large_pool.current": 455606272,
37
+ "allocated_bytes.large_pool.freed": 4524529126400,
38
+ "allocated_bytes.large_pool.peak": 712212480,
39
+ "allocated_bytes.small_pool.allocated": 7102836790272,
40
+ "allocated_bytes.small_pool.current": 4080640,
41
+ "allocated_bytes.small_pool.freed": 7102832709632,
42
+ "allocated_bytes.small_pool.peak": 121664000,
43
+ "allocation.all.allocated": 17243096,
44
+ "allocation.all.current": 367,
45
+ "allocation.all.freed": 17242729,
46
+ "allocation.all.peak": 627,
47
+ "allocation.large_pool.allocated": 2028077,
48
+ "allocation.large_pool.current": 77,
49
+ "allocation.large_pool.freed": 2028000,
50
+ "allocation.large_pool.peak": 191,
51
+ "allocation.small_pool.allocated": 15215019,
52
+ "allocation.small_pool.current": 290,
53
+ "allocation.small_pool.freed": 15214729,
54
+ "allocation.small_pool.peak": 550,
55
+ "inactive_split.all.allocated": 8572581,
56
+ "inactive_split.all.current": 33,
57
+ "inactive_split.all.freed": 8572548,
58
+ "inactive_split.all.peak": 103,
59
+ "inactive_split.large_pool.allocated": 1391934,
60
+ "inactive_split.large_pool.current": 19,
61
+ "inactive_split.large_pool.freed": 1391915,
62
+ "inactive_split.large_pool.peak": 22,
63
+ "inactive_split.small_pool.allocated": 7180647,
64
+ "inactive_split.small_pool.current": 14,
65
+ "inactive_split.small_pool.freed": 7180633,
66
+ "inactive_split.small_pool.peak": 84,
67
+ "inactive_split_bytes.all.allocated": 13619085684736,
68
+ "inactive_split_bytes.all.current": 66698240,
69
+ "inactive_split_bytes.all.freed": 13619018986496,
70
+ "inactive_split_bytes.all.peak": 126964736,
71
+ "inactive_split_bytes.large_pool.allocated": 5880575213568,
72
+ "inactive_split_bytes.large_pool.current": 58195968,
73
+ "inactive_split_bytes.large_pool.freed": 5880517017600,
74
+ "inactive_split_bytes.large_pool.peak": 68288512,
75
+ "inactive_split_bytes.small_pool.allocated": 7738510471168,
76
+ "inactive_split_bytes.small_pool.current": 8502272,
77
+ "inactive_split_bytes.small_pool.freed": 7738501968896,
78
+ "inactive_split_bytes.small_pool.peak": 68768768,
79
+ "max_split_size": -1,
80
+ "num_alloc_retries": 0,
81
+ "num_device_alloc": 91,
82
+ "num_device_free": 0,
83
+ "num_ooms": 0,
84
+ "num_sync_all_streams": 0,
85
+ "oversize_allocations.allocated": 0,
86
+ "oversize_allocations.current": 0,
87
+ "oversize_allocations.freed": 0,
88
+ "oversize_allocations.peak": 0,
89
+ "oversize_segments.allocated": 0,
90
+ "oversize_segments.current": 0,
91
+ "oversize_segments.freed": 0,
92
+ "oversize_segments.peak": 0,
93
+ "requested_bytes.all.allocated": 11156365995532,
94
+ "requested_bytes.all.current": 458548104,
95
+ "requested_bytes.all.freed": 11155907447428,
96
+ "requested_bytes.all.peak": 728291116,
97
+ "requested_bytes.large_pool.allocated": 4054250696192,
98
+ "requested_bytes.large_pool.current": 454473728,
99
+ "requested_bytes.large_pool.freed": 4053796222464,
100
+ "requested_bytes.large_pool.peak": 702642176,
101
+ "requested_bytes.small_pool.allocated": 7102115299340,
102
+ "requested_bytes.small_pool.current": 4074376,
103
+ "requested_bytes.small_pool.freed": 7102111224964,
104
+ "requested_bytes.small_pool.peak": 121647404,
105
+ "reserved_bytes.all.allocated": 868220928,
106
+ "reserved_bytes.all.current": 868220928,
107
+ "reserved_bytes.all.freed": 0,
108
+ "reserved_bytes.all.peak": 868220928,
109
+ "reserved_bytes.large_pool.allocated": 744488960,
110
+ "reserved_bytes.large_pool.current": 744488960,
111
+ "reserved_bytes.large_pool.freed": 0,
112
+ "reserved_bytes.large_pool.peak": 744488960,
113
+ "reserved_bytes.small_pool.allocated": 123731968,
114
+ "reserved_bytes.small_pool.current": 123731968,
115
+ "reserved_bytes.small_pool.freed": 0,
116
+ "reserved_bytes.small_pool.peak": 123731968,
117
+ "segment.all.allocated": 91,
118
+ "segment.all.current": 91,
119
+ "segment.all.freed": 0,
120
+ "segment.all.peak": 91,
121
+ "segment.large_pool.allocated": 32,
122
+ "segment.large_pool.current": 32,
123
+ "segment.large_pool.freed": 0,
124
+ "segment.large_pool.peak": 32,
125
+ "segment.small_pool.allocated": 59,
126
+ "segment.small_pool.current": 59,
127
+ "segment.small_pool.freed": 0,
128
+ "segment.small_pool.peak": 59
129
+ }
130
+ }
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_16163/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_16163/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_16163/tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 512,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
+ "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "BertTokenizer",
54
+ "unk_token": "[UNK]"
55
+ }
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_16163/val_res.json ADDED
The diff for this file is too large to render. See raw diff
 
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_16163/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_24245/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ base_model: bert-base-uncased
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.11.1
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_24245/adapter_config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "bert-base-uncased",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.1,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": [
18
+ "classifier",
19
+ "score"
20
+ ],
21
+ "peft_type": "LORA",
22
+ "r": 8,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "query",
27
+ "value"
28
+ ],
29
+ "task_type": "SEQ_CLS",
30
+ "use_dora": false,
31
+ "use_rslora": false
32
+ }
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_24245/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ab7592750a9010936ce23a0081202b82ba1463ecc6f0fb3e5dcb656345d0851
3
+ size 1192672
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_24245/all_results.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eval_accuracy": 0.9139908256880734}
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_24245/all_results_val.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eval_accuracy": 0.9314773570898293}
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_24245/eval_res.json ADDED
The diff for this file is too large to render. See raw diff
 
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_24245/gpu_stats.json ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "memory_allocated": 459685376,
3
+ "max_memory_allocated": 737890304,
4
+ "memory_reserved": 868220928,
5
+ "max_memory_reserved": 868220928,
6
+ "memory_stats": {
7
+ "active.all.allocated": 25492525,
8
+ "active.all.current": 367,
9
+ "active.all.freed": 25492158,
10
+ "active.all.peak": 627,
11
+ "active.large_pool.allocated": 3016981,
12
+ "active.large_pool.current": 77,
13
+ "active.large_pool.freed": 3016904,
14
+ "active.large_pool.peak": 191,
15
+ "active.small_pool.allocated": 22475544,
16
+ "active.small_pool.current": 290,
17
+ "active.small_pool.freed": 22475254,
18
+ "active.small_pool.peak": 550,
19
+ "active_bytes.all.allocated": 17175418445824,
20
+ "active_bytes.all.current": 459685376,
21
+ "active_bytes.all.freed": 17174958760448,
22
+ "active_bytes.all.peak": 737890304,
23
+ "active_bytes.large_pool.allocated": 6706463538176,
24
+ "active_bytes.large_pool.current": 455606272,
25
+ "active_bytes.large_pool.freed": 6706007931904,
26
+ "active_bytes.large_pool.peak": 712212480,
27
+ "active_bytes.small_pool.allocated": 10468954907648,
28
+ "active_bytes.small_pool.current": 4079104,
29
+ "active_bytes.small_pool.freed": 10468950828544,
30
+ "active_bytes.small_pool.peak": 121664000,
31
+ "allocated_bytes.all.allocated": 17175418445824,
32
+ "allocated_bytes.all.current": 459685376,
33
+ "allocated_bytes.all.freed": 17174958760448,
34
+ "allocated_bytes.all.peak": 737890304,
35
+ "allocated_bytes.large_pool.allocated": 6706463538176,
36
+ "allocated_bytes.large_pool.current": 455606272,
37
+ "allocated_bytes.large_pool.freed": 6706007931904,
38
+ "allocated_bytes.large_pool.peak": 712212480,
39
+ "allocated_bytes.small_pool.allocated": 10468954907648,
40
+ "allocated_bytes.small_pool.current": 4079104,
41
+ "allocated_bytes.small_pool.freed": 10468950828544,
42
+ "allocated_bytes.small_pool.peak": 121664000,
43
+ "allocation.all.allocated": 25492525,
44
+ "allocation.all.current": 367,
45
+ "allocation.all.freed": 25492158,
46
+ "allocation.all.peak": 627,
47
+ "allocation.large_pool.allocated": 3016981,
48
+ "allocation.large_pool.current": 77,
49
+ "allocation.large_pool.freed": 3016904,
50
+ "allocation.large_pool.peak": 191,
51
+ "allocation.small_pool.allocated": 22475544,
52
+ "allocation.small_pool.current": 290,
53
+ "allocation.small_pool.freed": 22475254,
54
+ "allocation.small_pool.peak": 550,
55
+ "inactive_split.all.allocated": 12656345,
56
+ "inactive_split.all.current": 35,
57
+ "inactive_split.all.freed": 12656310,
58
+ "inactive_split.all.peak": 103,
59
+ "inactive_split.large_pool.allocated": 2066625,
60
+ "inactive_split.large_pool.current": 19,
61
+ "inactive_split.large_pool.freed": 2066606,
62
+ "inactive_split.large_pool.peak": 22,
63
+ "inactive_split.small_pool.allocated": 10589720,
64
+ "inactive_split.small_pool.current": 16,
65
+ "inactive_split.small_pool.freed": 10589704,
66
+ "inactive_split.small_pool.peak": 84,
67
+ "inactive_split_bytes.all.allocated": 20154876839424,
68
+ "inactive_split_bytes.all.current": 68796928,
69
+ "inactive_split_bytes.all.freed": 20154808042496,
70
+ "inactive_split_bytes.all.peak": 126964736,
71
+ "inactive_split_bytes.large_pool.allocated": 8747198264320,
72
+ "inactive_split_bytes.large_pool.current": 58195968,
73
+ "inactive_split_bytes.large_pool.freed": 8747140068352,
74
+ "inactive_split_bytes.large_pool.peak": 68288512,
75
+ "inactive_split_bytes.small_pool.allocated": 11407678575104,
76
+ "inactive_split_bytes.small_pool.current": 10600960,
77
+ "inactive_split_bytes.small_pool.freed": 11407667974144,
78
+ "inactive_split_bytes.small_pool.peak": 68768768,
79
+ "max_split_size": -1,
80
+ "num_alloc_retries": 0,
81
+ "num_device_alloc": 91,
82
+ "num_device_free": 0,
83
+ "num_ooms": 0,
84
+ "num_sync_all_streams": 0,
85
+ "oversize_allocations.allocated": 0,
86
+ "oversize_allocations.current": 0,
87
+ "oversize_allocations.freed": 0,
88
+ "oversize_allocations.peak": 0,
89
+ "oversize_segments.allocated": 0,
90
+ "oversize_segments.current": 0,
91
+ "oversize_segments.freed": 0,
92
+ "oversize_segments.peak": 0,
93
+ "requested_bytes.all.allocated": 16485959412975,
94
+ "requested_bytes.all.current": 458546760,
95
+ "requested_bytes.all.freed": 16485500866215,
96
+ "requested_bytes.all.peak": 728291116,
97
+ "requested_bytes.large_pool.allocated": 6018059392256,
98
+ "requested_bytes.large_pool.current": 454473728,
99
+ "requested_bytes.large_pool.freed": 6017604918528,
100
+ "requested_bytes.large_pool.peak": 702642176,
101
+ "requested_bytes.small_pool.allocated": 10467900020719,
102
+ "requested_bytes.small_pool.current": 4073032,
103
+ "requested_bytes.small_pool.freed": 10467895947687,
104
+ "requested_bytes.small_pool.peak": 121647404,
105
+ "reserved_bytes.all.allocated": 868220928,
106
+ "reserved_bytes.all.current": 868220928,
107
+ "reserved_bytes.all.freed": 0,
108
+ "reserved_bytes.all.peak": 868220928,
109
+ "reserved_bytes.large_pool.allocated": 744488960,
110
+ "reserved_bytes.large_pool.current": 744488960,
111
+ "reserved_bytes.large_pool.freed": 0,
112
+ "reserved_bytes.large_pool.peak": 744488960,
113
+ "reserved_bytes.small_pool.allocated": 123731968,
114
+ "reserved_bytes.small_pool.current": 123731968,
115
+ "reserved_bytes.small_pool.freed": 0,
116
+ "reserved_bytes.small_pool.peak": 123731968,
117
+ "segment.all.allocated": 91,
118
+ "segment.all.current": 91,
119
+ "segment.all.freed": 0,
120
+ "segment.all.peak": 91,
121
+ "segment.large_pool.allocated": 32,
122
+ "segment.large_pool.current": 32,
123
+ "segment.large_pool.freed": 0,
124
+ "segment.large_pool.peak": 32,
125
+ "segment.small_pool.allocated": 59,
126
+ "segment.small_pool.current": 59,
127
+ "segment.small_pool.freed": 0,
128
+ "segment.small_pool.peak": 59
129
+ }
130
+ }
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_24245/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_24245/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_24245/tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 512,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
+ "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "BertTokenizer",
54
+ "unk_token": "[UNK]"
55
+ }
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_24245/val_res.json ADDED
The diff for this file is too large to render. See raw diff
 
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_24245/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_32327/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ base_model: bert-base-uncased
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.11.1
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_32327/adapter_config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "bert-base-uncased",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.1,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": [
18
+ "classifier",
19
+ "score"
20
+ ],
21
+ "peft_type": "LORA",
22
+ "r": 8,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "query",
27
+ "value"
28
+ ],
29
+ "task_type": "SEQ_CLS",
30
+ "use_dora": false,
31
+ "use_rslora": false
32
+ }
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_32327/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66194733614f4c4c13fc58fb060fd9757680cdac9faf3f13639042df94f9f025
3
+ size 1192672
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_32327/all_results.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eval_accuracy": 0.9139908256880734}
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_32327/all_results_val.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eval_accuracy": 0.9365256124721604}
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_32327/eval_res.json ADDED
The diff for this file is too large to render. See raw diff
 
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_32327/gpu_stats.json ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "memory_allocated": 459686912,
3
+ "max_memory_allocated": 737890304,
4
+ "memory_reserved": 868220928,
5
+ "max_memory_reserved": 868220928,
6
+ "memory_stats": {
7
+ "active.all.allocated": 33741786,
8
+ "active.all.current": 367,
9
+ "active.all.freed": 33741419,
10
+ "active.all.peak": 627,
11
+ "active.large_pool.allocated": 3984369,
12
+ "active.large_pool.current": 77,
13
+ "active.large_pool.freed": 3984292,
14
+ "active.large_pool.peak": 191,
15
+ "active.small_pool.allocated": 29757417,
16
+ "active.small_pool.current": 290,
17
+ "active.small_pool.freed": 29757127,
18
+ "active.small_pool.peak": 550,
19
+ "active_bytes.all.allocated": 22700349888512,
20
+ "active_bytes.all.current": 459686912,
21
+ "active_bytes.all.freed": 22699890201600,
22
+ "active_bytes.all.peak": 737890304,
23
+ "active_bytes.large_pool.allocated": 8852519870464,
24
+ "active_bytes.large_pool.current": 455606272,
25
+ "active_bytes.large_pool.freed": 8852064264192,
26
+ "active_bytes.large_pool.peak": 712212480,
27
+ "active_bytes.small_pool.allocated": 13847830018048,
28
+ "active_bytes.small_pool.current": 4080640,
29
+ "active_bytes.small_pool.freed": 13847825937408,
30
+ "active_bytes.small_pool.peak": 121664000,
31
+ "allocated_bytes.all.allocated": 22700349888512,
32
+ "allocated_bytes.all.current": 459686912,
33
+ "allocated_bytes.all.freed": 22699890201600,
34
+ "allocated_bytes.all.peak": 737890304,
35
+ "allocated_bytes.large_pool.allocated": 8852519870464,
36
+ "allocated_bytes.large_pool.current": 455606272,
37
+ "allocated_bytes.large_pool.freed": 8852064264192,
38
+ "allocated_bytes.large_pool.peak": 712212480,
39
+ "allocated_bytes.small_pool.allocated": 13847830018048,
40
+ "allocated_bytes.small_pool.current": 4080640,
41
+ "allocated_bytes.small_pool.freed": 13847825937408,
42
+ "allocated_bytes.small_pool.peak": 121664000,
43
+ "allocation.all.allocated": 33741786,
44
+ "allocation.all.current": 367,
45
+ "allocation.all.freed": 33741419,
46
+ "allocation.all.peak": 627,
47
+ "allocation.large_pool.allocated": 3984369,
48
+ "allocation.large_pool.current": 77,
49
+ "allocation.large_pool.freed": 3984292,
50
+ "allocation.large_pool.peak": 191,
51
+ "allocation.small_pool.allocated": 29757417,
52
+ "allocation.small_pool.current": 290,
53
+ "allocation.small_pool.freed": 29757127,
54
+ "allocation.small_pool.peak": 550,
55
+ "inactive_split.all.allocated": 16736217,
56
+ "inactive_split.all.current": 35,
57
+ "inactive_split.all.freed": 16736182,
58
+ "inactive_split.all.peak": 103,
59
+ "inactive_split.large_pool.allocated": 2727199,
60
+ "inactive_split.large_pool.current": 19,
61
+ "inactive_split.large_pool.freed": 2727180,
62
+ "inactive_split.large_pool.peak": 22,
63
+ "inactive_split.small_pool.allocated": 14009018,
64
+ "inactive_split.small_pool.current": 16,
65
+ "inactive_split.small_pool.freed": 14009002,
66
+ "inactive_split.small_pool.peak": 84,
67
+ "inactive_split_bytes.all.allocated": 26659522500096,
68
+ "inactive_split_bytes.all.current": 66698240,
69
+ "inactive_split_bytes.all.freed": 26659455801856,
70
+ "inactive_split_bytes.all.peak": 126964736,
71
+ "inactive_split_bytes.large_pool.allocated": 11566933429248,
72
+ "inactive_split_bytes.large_pool.current": 58195968,
73
+ "inactive_split_bytes.large_pool.freed": 11566875233280,
74
+ "inactive_split_bytes.large_pool.peak": 68288512,
75
+ "inactive_split_bytes.small_pool.allocated": 15092589070848,
76
+ "inactive_split_bytes.small_pool.current": 8502272,
77
+ "inactive_split_bytes.small_pool.freed": 15092580568576,
78
+ "inactive_split_bytes.small_pool.peak": 68768768,
79
+ "max_split_size": -1,
80
+ "num_alloc_retries": 0,
81
+ "num_device_alloc": 91,
82
+ "num_device_free": 0,
83
+ "num_ooms": 0,
84
+ "num_sync_all_streams": 0,
85
+ "oversize_allocations.allocated": 0,
86
+ "oversize_allocations.current": 0,
87
+ "oversize_allocations.freed": 0,
88
+ "oversize_allocations.peak": 0,
89
+ "oversize_segments.allocated": 0,
90
+ "oversize_segments.current": 0,
91
+ "oversize_segments.freed": 0,
92
+ "oversize_segments.peak": 0,
93
+ "requested_bytes.all.allocated": 21797095729088,
94
+ "requested_bytes.all.current": 458548680,
95
+ "requested_bytes.all.freed": 21796637180408,
96
+ "requested_bytes.all.peak": 728291116,
97
+ "requested_bytes.large_pool.allocated": 7950655192064,
98
+ "requested_bytes.large_pool.current": 454473728,
99
+ "requested_bytes.large_pool.freed": 7950200718336,
100
+ "requested_bytes.large_pool.peak": 702642176,
101
+ "requested_bytes.small_pool.allocated": 13846440537024,
102
+ "requested_bytes.small_pool.current": 4074952,
103
+ "requested_bytes.small_pool.freed": 13846436462072,
104
+ "requested_bytes.small_pool.peak": 121647404,
105
+ "reserved_bytes.all.allocated": 868220928,
106
+ "reserved_bytes.all.current": 868220928,
107
+ "reserved_bytes.all.freed": 0,
108
+ "reserved_bytes.all.peak": 868220928,
109
+ "reserved_bytes.large_pool.allocated": 744488960,
110
+ "reserved_bytes.large_pool.current": 744488960,
111
+ "reserved_bytes.large_pool.freed": 0,
112
+ "reserved_bytes.large_pool.peak": 744488960,
113
+ "reserved_bytes.small_pool.allocated": 123731968,
114
+ "reserved_bytes.small_pool.current": 123731968,
115
+ "reserved_bytes.small_pool.freed": 0,
116
+ "reserved_bytes.small_pool.peak": 123731968,
117
+ "segment.all.allocated": 91,
118
+ "segment.all.current": 91,
119
+ "segment.all.freed": 0,
120
+ "segment.all.peak": 91,
121
+ "segment.large_pool.allocated": 32,
122
+ "segment.large_pool.current": 32,
123
+ "segment.large_pool.freed": 0,
124
+ "segment.large_pool.peak": 32,
125
+ "segment.small_pool.allocated": 59,
126
+ "segment.small_pool.current": 59,
127
+ "segment.small_pool.freed": 0,
128
+ "segment.small_pool.peak": 59
129
+ }
130
+ }
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_32327/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_32327/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_32327/tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 512,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
+ "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "BertTokenizer",
54
+ "unk_token": "[UNK]"
55
+ }
outputs/sst2/bert-base-uncased_loratrain_val_8_16_0.1_0.0001_12345/step_32327/val_res.json ADDED
The diff for this file is too large to render. See raw diff