lewtun HF staff commited on
Commit
80fb188
1 Parent(s): c8d792a

End of training

Browse files
README.md ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: Qwen/Qwen2-0.5B-Instruct
5
+ tags:
6
+ - trl
7
+ - reward-trainer
8
+ - generated_from_trainer
9
+ metrics:
10
+ - accuracy
11
+ model-index:
12
+ - name: Qwen2-0.5B-Reward
13
+ results: []
14
+ ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # Qwen2-0.5B-Reward
20
+
21
+ This model is a fine-tuned version of [Qwen/Qwen2-0.5B-Instruct](https://huggingface.co/Qwen/Qwen2-0.5B-Instruct) on an unknown dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 0.5182
24
+ - Accuracy: 0.728
25
+
26
+ ## Model description
27
+
28
+ More information needed
29
+
30
+ ## Intended uses & limitations
31
+
32
+ More information needed
33
+
34
+ ## Training and evaluation data
35
+
36
+ More information needed
37
+
38
+ ## Training procedure
39
+
40
+ ### Training hyperparameters
41
+
42
+ The following hyperparameters were used during training:
43
+ - learning_rate: 1e-05
44
+ - train_batch_size: 8
45
+ - eval_batch_size: 8
46
+ - seed: 42
47
+ - distributed_type: multi-GPU
48
+ - num_devices: 8
49
+ - total_train_batch_size: 64
50
+ - total_eval_batch_size: 64
51
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
52
+ - lr_scheduler_type: linear
53
+ - num_epochs: 1.0
54
+
55
+ ### Training results
56
+
57
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
58
+ |:-------------:|:------:|:----:|:---------------:|:--------:|
59
+ | 0.6444 | 0.0516 | 50 | 0.6037 | 0.672 |
60
+ | 0.5825 | 0.1032 | 100 | 0.5859 | 0.682 |
61
+ | 0.5732 | 0.1548 | 150 | 0.5751 | 0.704 |
62
+ | 0.5494 | 0.2064 | 200 | 0.5514 | 0.701 |
63
+ | 0.5654 | 0.2580 | 250 | 0.5427 | 0.709 |
64
+ | 0.5514 | 0.3096 | 300 | 0.5309 | 0.723 |
65
+ | 0.537 | 0.3612 | 350 | 0.5259 | 0.735 |
66
+ | 0.5236 | 0.4128 | 400 | 0.5368 | 0.714 |
67
+ | 0.536 | 0.4644 | 450 | 0.5451 | 0.726 |
68
+ | 0.5236 | 0.5160 | 500 | 0.5371 | 0.727 |
69
+ | 0.526 | 0.5676 | 550 | 0.5293 | 0.729 |
70
+ | 0.5197 | 0.6192 | 600 | 0.5239 | 0.727 |
71
+ | 0.525 | 0.6708 | 650 | 0.5227 | 0.732 |
72
+ | 0.5123 | 0.7224 | 700 | 0.5206 | 0.723 |
73
+ | 0.5171 | 0.7740 | 750 | 0.5237 | 0.718 |
74
+ | 0.5156 | 0.8256 | 800 | 0.5245 | 0.722 |
75
+ | 0.5115 | 0.8772 | 850 | 0.5234 | 0.723 |
76
+ | 0.5007 | 0.9288 | 900 | 0.5207 | 0.729 |
77
+ | 0.5018 | 0.9804 | 950 | 0.5182 | 0.728 |
78
+
79
+
80
+ ### Framework versions
81
+
82
+ - Transformers 4.44.2
83
+ - Pytorch 2.4.0+cu121
84
+ - Datasets 2.21.0
85
+ - Tokenizers 0.19.1
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Qwen/Qwen2-0.5B-Instruct",
3
+ "architectures": [
4
+ "Qwen2ForSequenceClassification"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "eos_token_id": 151645,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 896,
11
+ "id2label": {
12
+ "0": "LABEL_0"
13
+ },
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 4864,
16
+ "label2id": {
17
+ "LABEL_0": 0
18
+ },
19
+ "max_position_embeddings": 32768,
20
+ "max_window_layers": 24,
21
+ "model_type": "qwen2",
22
+ "num_attention_heads": 14,
23
+ "num_hidden_layers": 24,
24
+ "num_key_value_heads": 2,
25
+ "pad_token_id": 151643,
26
+ "rms_norm_eps": 1e-06,
27
+ "rope_theta": 1000000.0,
28
+ "sliding_window": null,
29
+ "tie_word_embeddings": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.44.2",
32
+ "use_cache": true,
33
+ "use_sliding_window": false,
34
+ "vocab_size": 151936
35
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8dc663e240eb3b5a1f4b8ae4e24ff03fa11cd39d397d93b73f62bef3f5645f4e
3
+ size 1976167144
special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|im_end|>",
37
+ "errors": "replace",
38
+ "model_max_length": 32768,
39
+ "pad_token": "<|endoftext|>",
40
+ "split_special_tokens": false,
41
+ "tokenizer_class": "Qwen2Tokenizer",
42
+ "unk_token": null
43
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a56f657b0820015fb3d9eeffd7193b370e0029b9f4fb1a9ce77bb75e184f10ec
3
+ size 5240
vocab.json ADDED
The diff for this file is too large to render. See raw diff