vc64 commited on
Commit
8505ccb
1 Parent(s): e17a00f

End of training

Browse files
README.md ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-4.0
3
+ base_model: deepset/tinyroberta-squad2
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: outputs
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/victor-chen/huggingface/runs/b9uesjjf)
15
+ # outputs
16
+
17
+ This model is a fine-tuned version of [deepset/tinyroberta-squad2](https://huggingface.co/deepset/tinyroberta-squad2) on an unknown dataset.
18
+
19
+ ## Model description
20
+
21
+ More information needed
22
+
23
+ ## Intended uses & limitations
24
+
25
+ More information needed
26
+
27
+ ## Training and evaluation data
28
+
29
+ More information needed
30
+
31
+ ## Training procedure
32
+
33
+ ### Training hyperparameters
34
+
35
+ The following hyperparameters were used during training:
36
+ - learning_rate: 2e-05
37
+ - train_batch_size: 16
38
+ - eval_batch_size: 8
39
+ - seed: 42
40
+ - gradient_accumulation_steps: 16
41
+ - total_train_batch_size: 256
42
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
43
+ - lr_scheduler_type: linear
44
+ - lr_scheduler_warmup_steps: 100
45
+ - training_steps: 200
46
+ - mixed_precision_training: Native AMP
47
+
48
+ ### Training results
49
+
50
+
51
+
52
+ ### Framework versions
53
+
54
+ - Transformers 4.41.0.dev0
55
+ - Pytorch 2.2.1+cu121
56
+ - Datasets 2.19.0
57
+ - Tokenizers 0.19.1
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "deepset/tinyroberta-squad2",
3
+ "architectures": [
4
+ "RobertaForQuestionAnswering"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 3072,
16
+ "language": "english",
17
+ "layer_norm_eps": 1e-05,
18
+ "max_position_embeddings": 514,
19
+ "model_type": "roberta",
20
+ "name": "Roberta",
21
+ "num_attention_heads": 12,
22
+ "num_hidden_layers": 6,
23
+ "pad_token_id": 1,
24
+ "position_embedding_type": "absolute",
25
+ "torch_dtype": "float32",
26
+ "transformers_version": "4.41.0.dev0",
27
+ "type_vocab_size": 1,
28
+ "use_cache": false,
29
+ "vocab_size": 50265
30
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7ef38264d945abbafb332173949fa907129a76dc7813e9db0a3379e75056798
3
+ size 326129688
runs/Apr20_02-04-24_406349e91be1/events.out.tfevents.1713578669.406349e91be1.910.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd57a905b0e2284b9e498711892b7796648b2db4a5e8856024c839568a7ef48e
3
+ size 5286
runs/Apr20_02-05-06_406349e91be1/events.out.tfevents.1713578707.406349e91be1.910.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39598ad93c773bac19443e9fefb095f914f82ae419612b55840d57864e6977fa
3
+ size 5287
runs/Apr20_02-06-46_406349e91be1/events.out.tfevents.1713578807.406349e91be1.910.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54a68817259d38ee378bd6f931825b43ff41561b61f3e995ae0e477fa0de2126
3
+ size 5286
runs/Apr20_02-09-34_406349e91be1/events.out.tfevents.1713578975.406349e91be1.910.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01e46c1f28c020593c4ba099fc146d6aa71f5921459e26496039b39dad5304ee
3
+ size 5286
runs/Apr20_02-11-30_406349e91be1/events.out.tfevents.1713579091.406349e91be1.910.4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b85f3c436649a06782a3b12bf36547b5e1a82d1eb3e1b6c03b4e2a982b15553
3
+ size 5286
runs/Apr20_02-16-35_406349e91be1/events.out.tfevents.1713579396.406349e91be1.910.5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b65a4c6e0db3104f1891239ce64b699e99f3388cba1fd5528f08558aa465908
3
+ size 4808
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9b7ad40d565d706ec3d2812a9ec02d2159d8e2b82f888bd1d64bf434ed26024
3
+ size 4984