eduagarcia commited on
Commit
559bd6b
β€’
1 Parent(s): 155aef4

script for uploading initial request queue

Browse files
initial_queue.jsonl ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // 1- base models <=7B
2
+ {"model": "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
3
+ {"model": "meta-llama/Llama-2-7b-hf", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
4
+ {"model": "mistralai/Mistral-7B-v0.1", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
5
+ {"model": "huggyllama/llama-7b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
6
+ {"model": "openlm-research/open_llama_3b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
7
+ {"model": "openlm-research/open_llama_3b_v2", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
8
+ {"model": "openlm-research/open_llama_7b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
9
+ {"model": "openlm-research/open_llama_7b_v2", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
10
+ // 2 - portuguese models
11
+ {"model": "maritaca-ai/sabia-7b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)"}
12
+ {"model": "dominguesm/canarim-7b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)"}
13
+ {"model": "22h/open-cabrita3b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)"}
14
+ {"model": "recogna-nlp/bode-7b-alpaca-pt-br", "base_model": "meta-llama/Llama-2-7b-chat-hf", "revision": "main", "precision": "float16", "weight_type": "Adapter", "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)"}
15
+ {"model": "recogna-nlp/bode-13b-alpaca-pt-br", "base_model": "meta-llama/Llama-2-13b-chat-hf", "revision": "main", "precision": "float16", "weight_type": "Adapter", "model_type": "πŸ’¬ : chat models (RLHF, DPO, IFT, ...)"}
16
+ {"model": "22h/cabrita_7b_pt_850000", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)"}
17
+ {"model": "22h/cabrita-lora-v0-1", "base_model": "huggyllama/llama-7b", "revision": "main", "precision": "float16", "weight_type": "Adapter", "model_type": "πŸ”Ά : fine-tuned"}
18
+ {"model": "pierreguillou/gpt2-small-portuguese", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)"}
19
+ {"model": "pucpr/gpt2-bio-pt", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)"}
20
+ {"model": "unicamp-dl/ptt5-small-portuguese-vocab", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)"}
21
+ {"model": "unicamp-dl/ptt5-base-portuguese-vocab", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)"}
22
+ {"model": "unicamp-dl/ptt5-large-portuguese-vocab", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)"}
23
+ {"model": "unicamp-dl/ptt5-small-t5-vocab", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)"}
24
+ {"model": "unicamp-dl/ptt5-base-t5-vocab", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)"}
25
+ {"model": "unicamp-dl/ptt5-large-t5-vocab", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)"}
26
+ {"model": "josu/gpt-neo-pt-br", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)"}
27
+ {"model": "josu/gpt-neo-pt-1.3B", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)"}
28
+ {"model": "monilouise/opt125M_portuguese", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)"}
29
+ {"model": "HeyLucasLeao/gpt-neo-small-portuguese", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)"}
30
+ // 3 - Larger base models <= 13B
31
+ {"model": "meta-llama/Llama-2-13b-hf", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
32
+ {"model": "huggyllama/llama-13b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
33
+ {"model": "openlm-research/open_llama_13b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
34
+ {"model": "upstage/SOLAR-10.7B-v1.0", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
35
+ // other must-have <=7B
36
+ {"model": "01-ai/Yi-6B", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
37
+ {"model": "Unbabel/TowerBase-7B-v0.1", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "πŸ†Ž : language adapted models (FP, FT, ...)"}
38
+ {"model": "tiiuae/falcon-7b", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
39
+ {"model": "bigscience/bloom-560m", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
40
+ {"model": "bigscience/bloom-1b7", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
41
+ {"model": "bigscience/bloom-3b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
42
+ {"model": "bigscience/bloom-7b1", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
43
+ {"model": "gpt2", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
44
+ {"model": "stabilityai/stablelm-2-1_6b", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
45
+ {"model": "stabilityai/stablelm-3b-4e1t", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
46
+ {"model": "t5-small", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
47
+ {"model": "t5-base", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
48
+ {"model": "t5-large", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
49
+ {"model": "google/mt5-small", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
50
+ {"model": "google/mt5-base", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
51
+ {"model": "google/mt5-large", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
52
+ // Larger base models >13B
53
+ {"model": "mistralai/Mixtral-8x7B-v0.1", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
54
+ {"model": "huggyllama/llama-30b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
55
+ {"model": "01-ai/Yi-34B", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
56
+ {"model": "meta-llama/Llama-2-70b-hf", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
57
+ {"model": "huggyllama/llama-65b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
58
+ // others
59
+ {"model": "internlm/internlm2-7b", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "πŸ”Ά : fine-tuned"}
60
+ {"model": "internlm/internlm2-base-7b", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
61
+ {"model": "internlm/internlm-7b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
62
+ {"model": "internlm/internlm2-20b", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "πŸ”Ά : fine-tuned"}
63
+ {"model": "internlm/internlm2-base-20b", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
64
+ {"model": "internlm/internlm-20b", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
65
+ {"model": "Qwen/Qwen-1_8B", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
66
+ {"model": "Qwen/Qwen-7B", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
67
+ {"model": "Qwen/Qwen-14B", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
68
+ {"model": "Qwen/Qwen-72B", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
69
+ {"model": "DAMO-NLP-MT/polylm-1.7b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
70
+ {"model": "DAMO-NLP-MT/polylm-13b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
71
+ {"model": "Deci/DeciLM-6b", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "πŸ”Ά : fine-tuned"}
72
+ {"model": "Deci/DeciLM-7B", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "πŸ”Ά : fine-tuned"}
73
+ {"model": "EleutherAI/pythia-14m", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
74
+ {"model": "EleutherAI/pythia-70m-deduped", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
75
+ {"model": "EleutherAI/pythia-160m-deduped", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
76
+ {"model": "EleutherAI/pythia-410m-deduped", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
77
+ {"model": "EleutherAI/pythia-1b-deduped", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
78
+ {"model": "EleutherAI/pythia-2.8b-deduped", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
79
+ {"model": "EleutherAI/pythia-6.9b-deduped", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
80
+ {"model": "EleutherAI/pythia-12b-deduped", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
81
+ {"model": "facebook/opt-125m", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
82
+ {"model": "facebook/opt-350m", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
83
+ {"model": "facebook/opt-1.3b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
84
+ {"model": "facebook/opt-2.7b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
85
+ {"model": "facebook/opt-6.7b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
86
+ {"model": "facebook/opt-13b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
87
+ {"model": "facebook/opt-30b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
88
+ {"model": "facebook/opt-66b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
89
+ {"model": "EleutherAI/gpt-neo-125m", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
90
+ {"model": "EleutherAI/gpt-neo-1.3B", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
91
+ {"model": "EleutherAI/gpt-neo-2.7B", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
92
+ {"model": "EleutherAI/gpt-j-6b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
93
+ {"model": "EleutherAI/gpt-neox-20b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
94
+ {"model": "stabilityai/stablelm-base-alpha-3b-v2", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
95
+ {"model": "stabilityai/stablelm-base-alpha-7b-v2", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
96
+ {"model": "stabilityai/stablelm-base-alpha-3b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
97
+ {"model": "stabilityai/stablelm-base-alpha-7b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
98
+ {"model": "tiiuae/falcon-40b", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
99
+ {"model": "openai-community/openai-gpt", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
100
+ {"model": "openai-community/gpt2-medium", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
101
+ {"model": "openai-community/gpt2-large", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
102
+ {"model": "openai-community/gpt2-xl", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
103
+ {"model": "microsoft/phi-1", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
104
+ {"model": "microsoft/phi-1_5", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
105
+ {"model": "microsoft/phi-2", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
106
+ {"model": "mosaicml/mpt-7b", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
107
+ {"model": "01-ai/Yi-6B-200K", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
108
+ {"model": "01-ai/Yi-34B-200K", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
109
+ {"model": "google/t5-v1_1-base", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
110
+ {"model": "google/t5-v1_1-small", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
111
+ {"model": "google/t5-v1_1-large", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
112
+ {"model": "google/t5-v1_1-xl", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
113
+ {"model": "google/t5-v1_1-xxl", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
114
+ {"model": "google/mt5-xl", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
115
+ {"model": "google/mt5-xxl", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
116
+ {"model": "google/umt5-small", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
117
+ {"model": "google/umt5-base", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
118
+ {"model": "google/umt5-xl", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
119
+ {"model": "google/umt5-xxl", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
120
+ {"model": "AdaptLLM/law-LLM", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "πŸ”Ά : fine-tuned"}
121
+ {"model": "AdaptLLM/medicine-LLM", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "πŸ”Ά : fine-tuned"}
122
+ {"model": "AdaptLLM/finance-LLM", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "πŸ”Ά : fine-tuned"}
123
+ {"model": "AdaptLLM/law-LLM-13B", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "πŸ”Ά : fine-tuned"}
124
+ {"model": "AdaptLLM/medicine-LLM-13B", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "πŸ”Ά : fine-tuned"}
125
+ {"model": "AdaptLLM/finance-LLM-13B", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "πŸ”Ά : fine-tuned"}
126
+ {"model": "cerebras/Cerebras-GPT-111M", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
127
+ {"model": "cerebras/Cerebras-GPT-256M", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
128
+ {"model": "cerebras/Cerebras-GPT-590M", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
129
+ {"model": "cerebras/Cerebras-GPT-1.3B", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
130
+ {"model": "cerebras/Cerebras-GPT-2.7B", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
131
+ {"model": "cerebras/Cerebras-GPT-6.7B", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
132
+ {"model": "cerebras/Cerebras-GPT-13B", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
133
+ {"model": "cerebras/btlm-3b-8k-base", "base_model": "", "revision": "main", "precision": "bfloat16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
134
+ {"model": "ai-forever/mGPT-13B", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
135
+ {"model": "ai-forever/mGPT", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
136
+ {"model": "EleutherAI/pythia-70m", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
137
+ {"model": "EleutherAI/pythia-160m", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
138
+ {"model": "EleutherAI/pythia-410m", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
139
+ {"model": "EleutherAI/pythia-1b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
140
+ {"model": "EleutherAI/pythia-2.8b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
141
+ {"model": "EleutherAI/pythia-6.9b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
142
+ {"model": "EleutherAI/pythia-12b", "base_model": "", "revision": "main", "precision": "float16", "weight_type": "Original", "model_type": "🟒 : pretrained"}
src/submission/check_validity.py CHANGED
@@ -3,6 +3,7 @@ import os
3
  import re
4
  from collections import defaultdict
5
  from datetime import datetime, timedelta, timezone
 
6
 
7
  import huggingface_hub
8
  from huggingface_hub import ModelCard
@@ -49,6 +50,7 @@ def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_rem
49
  None
50
  )
51
  except Exception as e:
 
52
  return (False, "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?", None)
53
  return True, None, config
54
 
 
3
  import re
4
  from collections import defaultdict
5
  from datetime import datetime, timedelta, timezone
6
+ import traceback
7
 
8
  import huggingface_hub
9
  from huggingface_hub import ModelCard
 
50
  None
51
  )
52
  except Exception as e:
53
+ traceback.print_exc()
54
  return (False, "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?", None)
55
  return True, None, config
56
 
upload_initial_queue.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from src.envs import API, EVAL_REQUESTS_PATH, DYNAMIC_INFO_REPO, DYNAMIC_INFO_FILE_PATH, DYNAMIC_INFO_PATH, EVAL_RESULTS_PATH, H4_TOKEN, IS_PUBLIC, QUEUE_REPO, REPO_ID, RESULTS_REPO
4
+ from huggingface_hub import snapshot_download
5
+ from src.submission.submit import add_new_eval
6
+
7
+ snapshot_download(
8
+ repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30
9
+ )
10
+
11
+ with open('initial_queue.jsonl', 'r', encoding='utf-8') as outfile:
12
+ for line in outfile:
13
+ if not line.startswith('//'):
14
+ data = json.loads(line)
15
+ model_id = f'{data["model"]}_eval_request_False_{data["precision"]}_{data["weight_type"]}'
16
+ model_path = os.path.join(EVAL_REQUESTS_PATH, model_id + '.json')
17
+ if not os.path.exists(model_path):
18
+ print(f"Adding new eval {model_id}")
19
+ message = add_new_eval(
20
+ model=data["model"],
21
+ base_model=data["base_model"],
22
+ revision=data["revision"],
23
+ precision=data["precision"],
24
+ private=False,
25
+ weight_type=data["weight_type"],
26
+ model_type=data["model_type"]
27
+ )
28
+ print(message)
29
+
30
+