yuansui commited on
Commit
dc2e854
1 Parent(s): e5879d7

End of training

Browse files
Files changed (2) hide show
  1. README.md +7 -3
  2. config.json +1 -1
README.md CHANGED
@@ -1,21 +1,25 @@
1
  ---
2
  library_name: transformers
 
3
  tags:
 
4
  - trl
5
  - dpo
6
  - alignment-handbook
7
  - generated_from_trainer
 
 
8
  model-index:
9
- - name: llama2_7b_instruct_sft_dpo
10
  results: []
11
  ---
12
 
13
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
  should probably proofread and complete it, then remove this comment. -->
15
 
16
- # llama2_7b_instruct_sft_dpo
17
 
18
- This model was trained from scratch on an unknown dataset.
19
 
20
  ## Model description
21
 
 
1
  ---
2
  library_name: transformers
3
+ base_model: llama-2-7b-instruct-sft
4
  tags:
5
+ - alignment-handbook
6
  - trl
7
  - dpo
8
  - alignment-handbook
9
  - generated_from_trainer
10
+ datasets:
11
+ - xinlai/Math-Step-DPO-10K
12
  model-index:
13
+ - name: llama-2-7b-instruct-sft
14
  results: []
15
  ---
16
 
17
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
18
  should probably proofread and complete it, then remove this comment. -->
19
 
20
+ # llama-2-7b-instruct-sft
21
 
22
+ This model is a fine-tuned version of [llama-2-7b-instruct-sft](https://huggingface.co/llama-2-7b-instruct-sft) on the xinlai/Math-Step-DPO-10K dataset.
23
 
24
  ## Model description
25
 
config.json CHANGED
@@ -24,6 +24,6 @@
24
  "tie_word_embeddings": false,
25
  "torch_dtype": "bfloat16",
26
  "transformers_version": "4.44.2",
27
- "use_cache": false,
28
  "vocab_size": 32000
29
  }
 
24
  "tie_word_embeddings": false,
25
  "torch_dtype": "bfloat16",
26
  "transformers_version": "4.44.2",
27
+ "use_cache": true,
28
  "vocab_size": 32000
29
  }