SourCoachSauers commited on
Commit
59b04f4
1 Parent(s): edeee06

color_descriptions

Browse files
README.md CHANGED
@@ -16,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # results
18
 
19
- This model is a fine-tuned version of [TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T](https://huggingface.co/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T) on the None dataset.
20
 
21
  ## Model description
22
 
@@ -54,6 +54,6 @@ The following hyperparameters were used during training:
54
 
55
  - PEFT 0.11.1
56
  - Transformers 4.42.4
57
- - Pytorch 2.2.1+cu121
58
  - Datasets 2.20.0
59
  - Tokenizers 0.19.1
 
16
 
17
  # results
18
 
19
+ This model is a fine-tuned version of [TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T](https://huggingface.co/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T) on an unknown dataset.
20
 
21
  ## Model description
22
 
 
54
 
55
  - PEFT 0.11.1
56
  - Transformers 4.42.4
57
+ - Pytorch 2.3.1+cu121
58
  - Datasets 2.20.0
59
  - Tokenizers 0.19.1
adapter_config.json CHANGED
@@ -20,11 +20,11 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "k_proj",
24
- "v_proj",
25
  "o_proj",
26
  "gate_proj",
27
- "q_proj"
 
28
  ],
29
  "task_type": "CAUSAL_LM",
30
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "q_proj",
 
24
  "o_proj",
25
  "gate_proj",
26
+ "k_proj",
27
+ "v_proj"
28
  ],
29
  "task_type": "CAUSAL_LM",
30
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c774b1d1dbab6275f08d4c335a48e408a3a60d6f29feac6c148d9b8634d4c6e2
3
  size 57701064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f75b66a4ad11de520f9fa29c2e12fe3b5f3cc284d5aeea05cc10b5ce5a38f60
3
  size 57701064
tokenizer.json CHANGED
@@ -67,6 +67,12 @@
67
  "id": "A",
68
  "type_id": 0
69
  }
 
 
 
 
 
 
70
  }
71
  ],
72
  "pair": [
@@ -82,6 +88,12 @@
82
  "type_id": 0
83
  }
84
  },
 
 
 
 
 
 
85
  {
86
  "SpecialToken": {
87
  "id": "<s>",
@@ -93,9 +105,24 @@
93
  "id": "B",
94
  "type_id": 1
95
  }
 
 
 
 
 
 
96
  }
97
  ],
98
  "special_tokens": {
 
 
 
 
 
 
 
 
 
99
  "<s>": {
100
  "id": "<s>",
101
  "ids": [
 
67
  "id": "A",
68
  "type_id": 0
69
  }
70
+ },
71
+ {
72
+ "SpecialToken": {
73
+ "id": "</s>",
74
+ "type_id": 0
75
+ }
76
  }
77
  ],
78
  "pair": [
 
88
  "type_id": 0
89
  }
90
  },
91
+ {
92
+ "SpecialToken": {
93
+ "id": "</s>",
94
+ "type_id": 0
95
+ }
96
+ },
97
  {
98
  "SpecialToken": {
99
  "id": "<s>",
 
105
  "id": "B",
106
  "type_id": 1
107
  }
108
+ },
109
+ {
110
+ "SpecialToken": {
111
+ "id": "</s>",
112
+ "type_id": 1
113
+ }
114
  }
115
  ],
116
  "special_tokens": {
117
+ "</s>": {
118
+ "id": "</s>",
119
+ "ids": [
120
+ 2
121
+ ],
122
+ "tokens": [
123
+ "</s>"
124
+ ]
125
+ },
126
  "<s>": {
127
  "id": "<s>",
128
  "ids": [
tokenizer_config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "add_bos_token": true,
3
- "add_eos_token": false,
4
  "add_prefix_space": null,
5
  "added_tokens_decoder": {
6
  "0": {
 
1
  {
2
  "add_bos_token": true,
3
+ "add_eos_token": true,
4
  "add_prefix_space": null,
5
  "added_tokens_decoder": {
6
  "0": {
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8c51b4f76cd7106c343ff68fe7e955f3a205f65d0661177b6c663fc3211c67ae
3
  size 5368
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be4fc7fe27d03f5ac19bd26ea384499bf452563447f251672b9bb0243ad7f003
3
  size 5368