AhmadFareedKhan commited on
Commit
3ce68f0
1 Parent(s): 5c96a86

Model save

Browse files
README.md CHANGED
@@ -1,58 +1,61 @@
1
- ---
2
- license: apache-2.0
3
- base_model: Twitter/twhin-bert-large
4
- tags:
5
- - generated_from_trainer
6
- model-index:
7
- - name: model
8
- results: []
9
- ---
10
-
11
- <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
- should probably proofread and complete it, then remove this comment. -->
13
-
14
- # model
15
-
16
- This model is a fine-tuned version of [Twitter/twhin-bert-large](https://huggingface.co/Twitter/twhin-bert-large) on the None dataset.
17
- It achieves the following results on the evaluation set:
18
- - Loss: 2.1670
19
-
20
- ## Model description
21
-
22
- More information needed
23
-
24
- ## Intended uses & limitations
25
-
26
- More information needed
27
-
28
- ## Training and evaluation data
29
-
30
- More information needed
31
-
32
- ## Training procedure
33
-
34
- ### Training hyperparameters
35
-
36
- The following hyperparameters were used during training:
37
- - learning_rate: 1e-05
38
- - train_batch_size: 8
39
- - eval_batch_size: 8
40
- - seed: 42
41
- - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
- - lr_scheduler_type: linear
43
- - num_epochs: 2
44
-
45
- ### Training results
46
-
47
- | Training Loss | Epoch | Step | Validation Loss |
48
- |:-------------:|:-----:|:----:|:---------------:|
49
- | No log | 1.0 | 300 | 2.2079 |
50
- | 2.4158 | 2.0 | 600 | 2.1670 |
51
-
52
-
53
- ### Framework versions
54
-
55
- - Transformers 4.32.1
56
- - Pytorch 2.1.2
57
- - Datasets 2.20.0
58
- - Tokenizers 0.13.2
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: Twitter/twhin-bert-large
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: model
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # model
15
+
16
+ This model is a fine-tuned version of [Twitter/twhin-bert-large](https://huggingface.co/Twitter/twhin-bert-large) on the None dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - Loss: 1.9379
19
+
20
+ ## Model description
21
+
22
+ More information needed
23
+
24
+ ## Intended uses & limitations
25
+
26
+ More information needed
27
+
28
+ ## Training and evaluation data
29
+
30
+ More information needed
31
+
32
+ ## Training procedure
33
+
34
+ ### Training hyperparameters
35
+
36
+ The following hyperparameters were used during training:
37
+ - learning_rate: 1e-05
38
+ - train_batch_size: 8
39
+ - eval_batch_size: 8
40
+ - seed: 42
41
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
+ - lr_scheduler_type: linear
43
+ - num_epochs: 5
44
+
45
+ ### Training results
46
+
47
+ | Training Loss | Epoch | Step | Validation Loss |
48
+ |:-------------:|:-----:|:----:|:---------------:|
49
+ | No log | 1.0 | 134 | 2.1741 |
50
+ | No log | 2.0 | 268 | 2.0266 |
51
+ | No log | 3.0 | 402 | 2.0429 |
52
+ | 2.1752 | 4.0 | 536 | 2.0212 |
53
+ | 2.1752 | 5.0 | 670 | 1.9379 |
54
+
55
+
56
+ ### Framework versions
57
+
58
+ - Transformers 4.39.3
59
+ - Pytorch 2.1.2
60
+ - Datasets 2.18.0
61
+ - Tokenizers 0.15.2
config.json CHANGED
@@ -1,25 +1,25 @@
1
- {
2
- "_name_or_path": "Twitter/twhin-bert-large",
3
- "architectures": [
4
- "BertForMaskedLM"
5
- ],
6
- "attention_probs_dropout_prob": 0.1,
7
- "classifier_dropout": null,
8
- "hidden_act": "gelu",
9
- "hidden_dropout_prob": 0.1,
10
- "hidden_size": 1024,
11
- "initializer_range": 0.02,
12
- "intermediate_size": 4096,
13
- "layer_norm_eps": 1e-12,
14
- "max_position_embeddings": 512,
15
- "model_type": "bert",
16
- "num_attention_heads": 16,
17
- "num_hidden_layers": 24,
18
- "pad_token_id": 0,
19
- "position_embedding_type": "relative_key",
20
- "torch_dtype": "float32",
21
- "transformers_version": "4.32.1",
22
- "type_vocab_size": 2,
23
- "use_cache": true,
24
- "vocab_size": 250002
25
- }
 
1
+ {
2
+ "_name_or_path": "Twitter/twhin-bert-large",
3
+ "architectures": [
4
+ "BertForMaskedLM"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 1024,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 4096,
13
+ "layer_norm_eps": 1e-12,
14
+ "max_position_embeddings": 512,
15
+ "model_type": "bert",
16
+ "num_attention_heads": 16,
17
+ "num_hidden_layers": 24,
18
+ "pad_token_id": 0,
19
+ "position_embedding_type": "relative_key",
20
+ "torch_dtype": "float32",
21
+ "transformers_version": "4.39.3",
22
+ "type_vocab_size": 2,
23
+ "use_cache": true,
24
+ "vocab_size": 250002
25
+ }
generation_config.json CHANGED
@@ -1,5 +1,5 @@
1
- {
2
- "_from_model_config": true,
3
- "pad_token_id": 0,
4
- "transformers_version": "4.32.1"
5
- }
 
1
+ {
2
+ "_from_model_config": true,
3
+ "pad_token_id": 0,
4
+ "transformers_version": "4.39.3"
5
+ }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:119130bf91e876d35efaef2dc8e0e673bb8db5dba5c78799b4644890a2d87efa
3
  size 2246902200
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6637e02763af64ff44d717f0b4f0892cf0c9b3ffccc2373c272ad0e49d81e52d
3
  size 2246902200
special_tokens_map.json CHANGED
@@ -1,15 +1,51 @@
1
- {
2
- "bos_token": "<s>",
3
- "cls_token": "<s>",
4
- "eos_token": "</s>",
5
- "mask_token": {
6
- "content": "<mask>",
7
- "lstrip": true,
8
- "normalized": false,
9
- "rstrip": false,
10
- "single_word": false
11
- },
12
- "pad_token": "<pad>",
13
- "sep_token": "</s>",
14
- "unk_token": "<unk>"
15
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4c08c80d1df11b82ada2fd707562f86a9ebd5b7de04f51ebd2b49f2cd5906d00
3
- size 17082925
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ada83ceb5cde4af83bf64839fe365b99bd25964ad8829793848e54893092812
3
+ size 17083021
tokenizer_config.json CHANGED
@@ -1,19 +1,54 @@
1
- {
2
- "bos_token": "<s>",
3
- "clean_up_tokenization_spaces": true,
4
- "cls_token": "<s>",
5
- "eos_token": "</s>",
6
- "mask_token": {
7
- "__type": "AddedToken",
8
- "content": "<mask>",
9
- "lstrip": true,
10
- "normalized": true,
11
- "rstrip": false,
12
- "single_word": false
13
- },
14
- "model_max_length": 1000000000000000019884624838656,
15
- "pad_token": "<pad>",
16
- "sep_token": "</s>",
17
- "tokenizer_class": "XLMRobertaTokenizer",
18
- "unk_token": "<unk>"
19
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "250001": {
36
+ "content": "<mask>",
37
+ "lstrip": true,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "<s>",
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "<s>",
47
+ "eos_token": "</s>",
48
+ "mask_token": "<mask>",
49
+ "model_max_length": 1000000000000000019884624838656,
50
+ "pad_token": "<pad>",
51
+ "sep_token": "</s>",
52
+ "tokenizer_class": "XLMRobertaTokenizer",
53
+ "unk_token": "<unk>"
54
+ }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a1a49092928e690814c43bf7c6e574d7d9df2a977583f4395aa8bded4f88bbeb
3
- size 4536
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef41a6e4756a444514894443eb0128ff47e7fa5e42d29d4da902ee072af8735e
3
+ size 4984