pbevan11 commited on
Commit
b490da1
1 Parent(s): 0736e78

End of training

Browse files
README.md CHANGED
@@ -3,9 +3,16 @@ library_name: transformers
3
  license: apache-2.0
4
  base_model: pbevan11/Mistral-Nemo-Instruct-MCAI-SFT-3Epochs
5
  tags:
 
6
  - trl
7
  - dpo
8
  - generated_from_trainer
 
 
 
 
 
 
9
  model-index:
10
  - name: Mistral-Nemo-Instruct-MCAI-SFT-DPO-3Epochs
11
  results: []
@@ -16,7 +23,17 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # Mistral-Nemo-Instruct-MCAI-SFT-DPO-3Epochs
18
 
19
- This model is a fine-tuned version of [pbevan11/Mistral-Nemo-Instruct-MCAI-SFT-3Epochs](https://huggingface.co/pbevan11/Mistral-Nemo-Instruct-MCAI-SFT-3Epochs) on the None dataset.
 
 
 
 
 
 
 
 
 
 
20
 
21
  ## Model description
22
 
 
3
  license: apache-2.0
4
  base_model: pbevan11/Mistral-Nemo-Instruct-MCAI-SFT-3Epochs
5
  tags:
6
+ - alignment-handbook
7
  - trl
8
  - dpo
9
  - generated_from_trainer
10
+ - trl
11
+ - dpo
12
+ - generated_from_trainer
13
+ datasets:
14
+ - pbevan11/multilingual-constitutional-preference-pairs
15
+ - pbevan11/ultrafeedback_binarized_multilingual
16
  model-index:
17
  - name: Mistral-Nemo-Instruct-MCAI-SFT-DPO-3Epochs
18
  results: []
 
23
 
24
  # Mistral-Nemo-Instruct-MCAI-SFT-DPO-3Epochs
25
 
26
+ This model is a fine-tuned version of [pbevan11/Mistral-Nemo-Instruct-MCAI-SFT-3Epochs](https://huggingface.co/pbevan11/Mistral-Nemo-Instruct-MCAI-SFT-3Epochs) on the pbevan11/multilingual-constitutional-preference-pairs and the pbevan11/ultrafeedback_binarized_multilingual datasets.
27
+ It achieves the following results on the evaluation set:
28
+ - Loss: 0.5579
29
+ - Rewards/chosen: -0.4264
30
+ - Rewards/rejected: -0.9042
31
+ - Rewards/accuracies: 0.7466
32
+ - Rewards/margins: 0.4777
33
+ - Logps/rejected: -213.7833
34
+ - Logps/chosen: -192.1762
35
+ - Logits/rejected: 0.5653
36
+ - Logits/chosen: 0.5394
37
 
38
  ## Model description
39
 
all_results.json CHANGED
@@ -1,5 +1,18 @@
1
  {
2
  "epoch": 1.0,
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  "total_flos": 0.0,
4
  "train_loss": 0.5964088123964976,
5
  "train_runtime": 1125.1043,
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_logits/chosen": 0.539411723613739,
4
+ "eval_logits/rejected": 0.5653398036956787,
5
+ "eval_logps/chosen": -192.1762237548828,
6
+ "eval_logps/rejected": -213.7832794189453,
7
+ "eval_loss": 0.5578895807266235,
8
+ "eval_rewards/accuracies": 0.7466216087341309,
9
+ "eval_rewards/chosen": -0.42640554904937744,
10
+ "eval_rewards/margins": 0.47774478793144226,
11
+ "eval_rewards/rejected": -0.9041503667831421,
12
+ "eval_runtime": 231.2679,
13
+ "eval_samples": 2305,
14
+ "eval_samples_per_second": 9.967,
15
+ "eval_steps_per_second": 0.16,
16
  "total_flos": 0.0,
17
  "train_loss": 0.5964088123964976,
18
  "train_runtime": 1125.1043,
config.json CHANGED
@@ -22,6 +22,6 @@
22
  "tie_word_embeddings": false,
23
  "torch_dtype": "bfloat16",
24
  "transformers_version": "4.45.1",
25
- "use_cache": false,
26
  "vocab_size": 131072
27
  }
 
22
  "tie_word_embeddings": false,
23
  "torch_dtype": "bfloat16",
24
  "transformers_version": "4.45.1",
25
+ "use_cache": true,
26
  "vocab_size": 131072
27
  }
eval_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_logits/chosen": 0.539411723613739,
4
+ "eval_logits/rejected": 0.5653398036956787,
5
+ "eval_logps/chosen": -192.1762237548828,
6
+ "eval_logps/rejected": -213.7832794189453,
7
+ "eval_loss": 0.5578895807266235,
8
+ "eval_rewards/accuracies": 0.7466216087341309,
9
+ "eval_rewards/chosen": -0.42640554904937744,
10
+ "eval_rewards/margins": 0.47774478793144226,
11
+ "eval_rewards/rejected": -0.9041503667831421,
12
+ "eval_runtime": 231.2679,
13
+ "eval_samples": 2305,
14
+ "eval_samples_per_second": 9.967,
15
+ "eval_steps_per_second": 0.16
16
+ }
runs/Sep30_15-39-35_280ca1cd997c/events.out.tfevents.1727712969.280ca1cd997c.3414.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c0ca00453dc877585f9bf652a37eabfd529e7f29374420009e81e4c9a8ffcf9
3
+ size 815