eduagarcia commited on
Commit
649d6d4
1 Parent(s): 73d86a6

add evaluated models to README.md

Browse files
Files changed (2) hide show
  1. README.md +155 -0
  2. update_models_in_readme.py +27 -0
README.md CHANGED
@@ -19,6 +19,161 @@ space_ci: # See https://huggingface.co/spaces/Wauplin/gradio-space-ci
19
  - HF_TOKEN
20
  - IS_PUBLIC
21
  - HAS_HIGHER_RATE_LIMIT
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  ---
23
 
24
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
19
  - HF_TOKEN
20
  - IS_PUBLIC
21
  - HAS_HIGHER_RATE_LIMIT
22
+ models:
23
+ - 01-ai/Yi-34B-Chat
24
+ - 01-ai/Yi-6B-Chat
25
+ - 01-ai/Yi-6B
26
+ - 22h/cabrita_7b_pt_850000
27
+ - 22h/open-cabrita3b
28
+ - abacusai/Smaug-72B-v0.1
29
+ - AI-Sweden-Models/gpt-sw3-40b
30
+ - allenai/OLMo-1B
31
+ - allenai/OLMo-7B-Twin-2T
32
+ - allenai/OLMo-7B
33
+ - allenai/tulu-2-dpo-13b
34
+ - argilla/CapybaraHermes-2.5-Mistral-7B
35
+ - argilla/notus-7b-v1
36
+ - argilla/notux-8x7b-v1
37
+ - bardsai/jaskier-7b-dpo-v5.6
38
+ - berkeley-nest/Starling-LM-7B-alpha
39
+ - bigscience/bloom-1b7
40
+ - bigscience/bloom-3b
41
+ - bigscience/bloom-560m
42
+ - bigscience/bloom-7b1
43
+ - Bruno/Caramelinho
44
+ - Bruno/Caramelo_7B
45
+ - cnmoro/Mistral-7B-Portuguese
46
+ - CohereForAI/aya-101
47
+ - croissantllm/CroissantLLMBase
48
+ - dominguesm/Canarim-7B-Instruct
49
+ - dominguesm/canarim-7b
50
+ - dynamofl/dynamo-8B-v0.1
51
+ - eduagarcia/gemma-7b-it_no_chat_template
52
+ - eduagarcia/gemma-7b-it_singleturn_chat_template
53
+ - EleutherAI/pythia-1b
54
+ - EleutherAI/pythia-6.9b
55
+ - facebook/opt-30b
56
+ - facebook/opt-6.7b
57
+ - facebook/opt-66b
58
+ - fernandosola/bluearara-7B-instruct
59
+ - fernandosola/bluearara-7B
60
+ - fernandosola/bluearara-7B
61
+ - FuseAI/FuseChat-7B-VaRM
62
+ - FuseAI/OpenChat-3.5-7B-Solar
63
+ - google/gemma-2b-it
64
+ - google/gemma-2b
65
+ - google/gemma-7b-it
66
+ - google/gemma-7b-it
67
+ - google/gemma-7b
68
+ - HeyLucasLeao/gpt-neo-small-portuguese
69
+ - HuggingFaceH4/zephyr-7b-beta
70
+ - HuggingFaceH4/zephyr-7b-gemma-v0.1
71
+ - HuggingFaceTB/cosmo-1b
72
+ - huggyllama/llama-13b
73
+ - huggyllama/llama-30b
74
+ - huggyllama/llama-65b
75
+ - huggyllama/llama-7b
76
+ - Intel/neural-chat-7b-v3-1
77
+ - Intel/neural-chat-7b-v3-3
78
+ - internlm/internlm-20b
79
+ - internlm/internlm-7b
80
+ - internlm/internlm2-1_8b
81
+ - internlm/internlm2-20b
82
+ - internlm/internlm2-7b
83
+ - internlm/internlm2-base-20b
84
+ - internlm/internlm2-base-7b
85
+ - internlm/internlm2-chat-1_8b
86
+ - internlm/internlm2-chat-20b
87
+ - internlm/internlm2-chat-7b
88
+ - JJhooww/MistralReloadBR_v2_ptbr
89
+ - JJhooww/Mistral_Relora_Step2k
90
+ - josu/gpt-neo-pt-1.3B
91
+ - josu/gpt-neo-pt-br
92
+ - lmsys/vicuna-13b-v1.5
93
+ - lmsys/vicuna-7b-v1.5
94
+ - lrds-code/boana-7b-instruct
95
+ - lrds-code/samba-1.1B
96
+ - maritaca-ai/sabia-7b
97
+ - meta-llama/Llama-2-13b-hf
98
+ - meta-llama/Llama-2-70b-chat-hf
99
+ - meta-llama/Llama-2-70b-hf
100
+ - meta-llama/Llama-2-7b-hf
101
+ - microsoft/phi-1_5
102
+ - microsoft/phi-1
103
+ - microsoft/phi-2
104
+ - mistralai/Mistral-7B-Instruct-v0.2
105
+ - mistralai/Mistral-7B-v0.1
106
+ - mistralai/Mixtral-8x7B-Instruct-v0.1
107
+ - mistralai/Mixtral-8x7B-v0.1
108
+ - mlabonne/Monarch-7B
109
+ - monilouise/opt125M_portuguese
110
+ - nicholasKluge/Aira-2-portuguese-124M
111
+ - nicholasKluge/Aira-2-portuguese-1B7
112
+ - nicholasKluge/Aira-2-portuguese-560M
113
+ - nicholasKluge/TeenyTinyLlama-160m
114
+ - nicholasKluge/TeenyTinyLlama-460m-Chat
115
+ - nicholasKluge/TeenyTinyLlama-460m
116
+ - nicolasdec/cabra13b
117
+ - nicolasdec/CabraMistral7b-0.4
118
+ - nicolasdec/Cabra
119
+ - NousResearch/Nous-Hermes-13b
120
+ - NousResearch/Nous-Hermes-2-Mistral-7B-DPO
121
+ - NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO
122
+ - NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO
123
+ - NousResearch/Nous-Hermes-2-SOLAR-10.7B
124
+ - NousResearch/Nous-Hermes-2-Yi-34B
125
+ - NOVA-vision-language/GlorIA-1.3B
126
+ - openchat/openchat-3.5-0106
127
+ - openlm-research/open_llama_13b
128
+ - openlm-research/open_llama_3b
129
+ - openlm-research/open_llama_3b_v2
130
+ - openlm-research/open_llama_7b
131
+ - openlm-research/open_llama_7b_v2
132
+ - paulml/OGNO-7B
133
+ - pierreguillou/gpt2-small-portuguese
134
+ - PORTULAN/gervasio-7b-portuguese-ptbr-decoder
135
+ - PORTULAN/gervasio-7b-portuguese-ptpt-decoder
136
+ - pucpr/gpt2-bio-pt
137
+ - Qwen/Qwen-14B
138
+ - Qwen/Qwen-1_8B-Chat
139
+ - Qwen/Qwen-1_8B
140
+ - Qwen/Qwen-72B-Chat
141
+ - Qwen/Qwen-72B
142
+ - Qwen/Qwen-7B-Chat
143
+ - Qwen/Qwen-7B
144
+ - Qwen/Qwen1.5-0.5B-Chat
145
+ - Qwen/Qwen1.5-0.5B
146
+ - Qwen/Qwen1.5-1.8B-Chat
147
+ - Qwen/Qwen1.5-1.8B
148
+ - Qwen/Qwen1.5-14B-Chat
149
+ - Qwen/Qwen1.5-14B
150
+ - Qwen/Qwen1.5-4B-Chat
151
+ - Qwen/Qwen1.5-4B
152
+ - Qwen/Qwen1.5-72B-Chat
153
+ - Qwen/Qwen1.5-72B
154
+ - Qwen/Qwen1.5-7B-Chat
155
+ - Qwen/Qwen1.5-7B
156
+ - recogna-nlp/bode-13b-alpaca-pt-br
157
+ - recogna-nlp/bode-7b-alpaca-pt-br
158
+ - rishiraj/CatPPT-base
159
+ - rishiraj/CatPPT
160
+ - stabilityai/stablelm-2-1_6b
161
+ - stabilityai/stablelm-2-zephyr-1_6b
162
+ - stabilityai/stablelm-3b-4e1t
163
+ - stabilityai/stablelm-zephyr-3b
164
+ - teknium/OpenHermes-2-Mistral-7B
165
+ - teknium/OpenHermes-2.5-Mistral-7B
166
+ - TencentARC/Mistral_Pro_8B_v0.1
167
+ - tiiuae/falcon-40b
168
+ - tiiuae/falcon-7b
169
+ - TinyLlama/TinyLlama-1.1B-Chat-v1.0
170
+ - TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
171
+ - Unbabel/TowerBase-7B-v0.1
172
+ - upstage/SOLAR-10.7B-Instruct-v1.0
173
+ - upstage/SOLAR-10.7B-v1.0
174
+ - wandgibaut/periquito-3B
175
+ - xverse/XVERSE-65B-2
176
+ - xverse/XVERSE-65B
177
  ---
178
 
179
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
update_models_in_readme.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from src.envs import (
2
+ API,
3
+ EVAL_REQUESTS_PATH,
4
+ DYNAMIC_INFO_REPO,
5
+ DYNAMIC_INFO_FILE_PATH,
6
+ DYNAMIC_INFO_PATH,
7
+ EVAL_RESULTS_PATH,
8
+ H4_TOKEN, IS_PUBLIC,
9
+ QUEUE_REPO,
10
+ REPO_ID,
11
+ RESULTS_REPO,
12
+ SHOW_INCOMPLETE_EVALS
13
+ )
14
+ from huggingface_hub import snapshot_download
15
+ import os
16
+ import glob
17
+ import json
18
+
19
+ snapshot_download(repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30)
20
+
21
+ with open('model_list.txt', 'w') as fw:
22
+ for filepath in glob.glob(os.path.join(EVAL_REQUESTS_PATH, '**/*.json'), recursive=True):
23
+ with open(filepath, 'r') as f:
24
+ model_data = json.load(f)
25
+ if model_data['status'] == 'FINISHED':
26
+ print(model_data['model'])
27
+ fw.write(' - '+ model_data['model'] + '\n')