Files changed (1) hide show
  1. README.md +114 -6
README.md CHANGED
@@ -1,13 +1,108 @@
1
  ---
2
- license: other
3
- license_name: qwen
4
- license_link: https://huggingface.co/Qwen/Qwen2.5-72B-Instruct/blob/main/LICENSE
5
  language:
6
  - en
7
- pipeline_tag: text-generation
8
- base_model: Qwen/Qwen2.5-72B
9
  tags:
10
  - chat
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  ---
12
 
13
  # Qwen2.5-72B-Instruct
@@ -129,4 +224,17 @@ If you find our work helpful, feel free to give us a cite.
129
  journal={arXiv preprint arXiv:2407.10671},
130
  year={2024}
131
  }
132
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
 
 
 
2
  language:
3
  - en
4
+ license: other
 
5
  tags:
6
  - chat
7
+ base_model: Qwen/Qwen2.5-72B
8
+ license_name: qwen
9
+ license_link: https://huggingface.co/Qwen/Qwen2.5-72B-Instruct/blob/main/LICENSE
10
+ pipeline_tag: text-generation
11
+ model-index:
12
+ - name: Qwen2.5-72B-Instruct
13
+ results:
14
+ - task:
15
+ type: text-generation
16
+ name: Text Generation
17
+ dataset:
18
+ name: IFEval (0-Shot)
19
+ type: HuggingFaceH4/ifeval
20
+ args:
21
+ num_few_shot: 0
22
+ metrics:
23
+ - type: inst_level_strict_acc and prompt_level_strict_acc
24
+ value: 86.5
25
+ name: strict accuracy
26
+ source:
27
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Qwen/Qwen2.5-72B-Instruct
28
+ name: Open LLM Leaderboard
29
+ - task:
30
+ type: text-generation
31
+ name: Text Generation
32
+ dataset:
33
+ name: BBH (3-Shot)
34
+ type: BBH
35
+ args:
36
+ num_few_shot: 3
37
+ metrics:
38
+ - type: acc_norm
39
+ value: 61.78
40
+ name: normalized accuracy
41
+ source:
42
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Qwen/Qwen2.5-72B-Instruct
43
+ name: Open LLM Leaderboard
44
+ - task:
45
+ type: text-generation
46
+ name: Text Generation
47
+ dataset:
48
+ name: MATH Lvl 5 (4-Shot)
49
+ type: hendrycks/competition_math
50
+ args:
51
+ num_few_shot: 4
52
+ metrics:
53
+ - type: exact_match
54
+ value: 1.28
55
+ name: exact match
56
+ source:
57
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Qwen/Qwen2.5-72B-Instruct
58
+ name: Open LLM Leaderboard
59
+ - task:
60
+ type: text-generation
61
+ name: Text Generation
62
+ dataset:
63
+ name: GPQA (0-shot)
64
+ type: Idavidrein/gpqa
65
+ args:
66
+ num_few_shot: 0
67
+ metrics:
68
+ - type: acc_norm
69
+ value: 17.45
70
+ name: acc_norm
71
+ source:
72
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Qwen/Qwen2.5-72B-Instruct
73
+ name: Open LLM Leaderboard
74
+ - task:
75
+ type: text-generation
76
+ name: Text Generation
77
+ dataset:
78
+ name: MuSR (0-shot)
79
+ type: TAUR-Lab/MuSR
80
+ args:
81
+ num_few_shot: 0
82
+ metrics:
83
+ - type: acc_norm
84
+ value: 11.81
85
+ name: acc_norm
86
+ source:
87
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Qwen/Qwen2.5-72B-Instruct
88
+ name: Open LLM Leaderboard
89
+ - task:
90
+ type: text-generation
91
+ name: Text Generation
92
+ dataset:
93
+ name: MMLU-PRO (5-shot)
94
+ type: TIGER-Lab/MMLU-Pro
95
+ config: main
96
+ split: test
97
+ args:
98
+ num_few_shot: 5
99
+ metrics:
100
+ - type: acc
101
+ value: 51.3
102
+ name: accuracy
103
+ source:
104
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Qwen/Qwen2.5-72B-Instruct
105
+ name: Open LLM Leaderboard
106
  ---
107
 
108
  # Qwen2.5-72B-Instruct
 
224
  journal={arXiv preprint arXiv:2407.10671},
225
  year={2024}
226
  }
227
+ ```
228
+ # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard)
229
+ Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_Qwen__Qwen2.5-72B-Instruct)
230
+
231
+ | Metric |Value|
232
+ |-------------------|----:|
233
+ |Avg. |38.35|
234
+ |IFEval (0-Shot) |86.50|
235
+ |BBH (3-Shot) |61.78|
236
+ |MATH Lvl 5 (4-Shot)| 1.28|
237
+ |GPQA (0-shot) |17.45|
238
+ |MuSR (0-shot) |11.81|
239
+ |MMLU-PRO (5-shot) |51.30|
240
+