Sean Cho commited on
Commit
f73765d
โ€ข
1 Parent(s): 495b288

Initial Korean version

Browse files
Files changed (2) hide show
  1. app.py +6 -6
  2. src/assets/text_content.py +41 -43
app.py CHANGED
@@ -374,7 +374,7 @@ with demo:
374
  gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
375
 
376
  with gr.Column():
377
- with gr.Accordion(f"โœ… Finished Evaluations ({len(finished_eval_queue_df)})", open=False):
378
  with gr.Row():
379
  finished_eval_table = gr.components.Dataframe(
380
  value=finished_eval_queue_df,
@@ -382,7 +382,7 @@ with demo:
382
  datatype=EVAL_TYPES,
383
  max_rows=5,
384
  )
385
- with gr.Accordion(f"๐Ÿ”„ Running Evaluation Queue ({len(running_eval_queue_df)})", open=False):
386
  with gr.Row():
387
  running_eval_table = gr.components.Dataframe(
388
  value=running_eval_queue_df,
@@ -391,7 +391,7 @@ with demo:
391
  max_rows=5,
392
  )
393
 
394
- with gr.Accordion(f"โณ Pending Evaluation Queue ({len(pending_eval_queue_df)})", open=False):
395
  with gr.Row():
396
  pending_eval_table = gr.components.Dataframe(
397
  value=pending_eval_queue_df,
@@ -400,7 +400,7 @@ with demo:
400
  max_rows=5,
401
  )
402
  with gr.Row():
403
- gr.Markdown("# โœ‰๏ธโœจ Submit your model here!", elem_classes="markdown-text")
404
 
405
  with gr.Row():
406
  with gr.Column():
@@ -443,7 +443,7 @@ with demo:
443
  label="Base model (for delta or adapter weights)"
444
  )
445
 
446
- submit_button = gr.Button("Submit Eval")
447
  submission_result = gr.Markdown()
448
  submit_button.click(
449
  add_new_eval,
@@ -460,7 +460,7 @@ with demo:
460
  )
461
 
462
  with gr.Row():
463
- refresh_button = gr.Button("Refresh")
464
  refresh_button.click(
465
  refresh,
466
  inputs=[],
 
374
  gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
375
 
376
  with gr.Column():
377
+ with gr.Accordion(f"โœ… ํ‰๊ฐ€ ์™„๋ฃŒ ({len(finished_eval_queue_df)})", open=False):
378
  with gr.Row():
379
  finished_eval_table = gr.components.Dataframe(
380
  value=finished_eval_queue_df,
 
382
  datatype=EVAL_TYPES,
383
  max_rows=5,
384
  )
385
+ with gr.Accordion(f"๐Ÿ”„ ํ‰๊ฐ€ ์ง„ํ–‰ ๋Œ€๊ธฐ์—ด ({len(running_eval_queue_df)})", open=False):
386
  with gr.Row():
387
  running_eval_table = gr.components.Dataframe(
388
  value=running_eval_queue_df,
 
391
  max_rows=5,
392
  )
393
 
394
+ with gr.Accordion(f"โณ ํ‰๊ฐ€ ๋Œ€๊ธฐ ๋Œ€๊ธฐ์—ด ({len(pending_eval_queue_df)})", open=False):
395
  with gr.Row():
396
  pending_eval_table = gr.components.Dataframe(
397
  value=pending_eval_queue_df,
 
400
  max_rows=5,
401
  )
402
  with gr.Row():
403
+ gr.Markdown("# โœ‰๏ธโœจ ์—ฌ๊ธฐ์—์„œ ๋ชจ๋ธ์„ ์ œ์ถœํ•ด์ฃผ์„ธ์š”!", elem_classes="markdown-text")
404
 
405
  with gr.Row():
406
  with gr.Column():
 
443
  label="Base model (for delta or adapter weights)"
444
  )
445
 
446
+ submit_button = gr.Button("์ œ์ถœํ•˜๊ณ  ํ‰๊ฐ€๋ฐ›๊ธฐ")
447
  submission_result = gr.Markdown()
448
  submit_button.click(
449
  add_new_eval,
 
460
  )
461
 
462
  with gr.Row():
463
+ refresh_button = gr.Button("์ƒˆ๋กœ๊ณ ์นจ")
464
  refresh_button.click(
465
  refresh,
466
  inputs=[],
src/assets/text_content.py CHANGED
@@ -56,53 +56,54 @@ CHANGELOG_TEXT = f"""
56
  - Release the leaderboard to public
57
  """
58
 
59
- TITLE = """<h1 align="center" id="space-title">๐Ÿค— Open LLM Leaderboard</h1>"""
60
 
61
  INTRODUCTION_TEXT = f"""
62
- ๐Ÿ“ The ๐Ÿค— Open LLM Leaderboard aims to track, rank and evaluate open LLMs and chatbots.
63
 
64
- ๐Ÿค— Submit a model for automated evaluation on the ๐Ÿค— GPU cluster on the "Submit" page!
 
 
65
 
66
- The leaderboard's backend runs the great [Eleuther AI Language Model Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness) to compute numbers. Read more details and reproducibility on the "About" page!
67
-
68
- Other cool benchmarks for LLMs are developed at HuggingFace: ๐Ÿ™‹๐Ÿค– [human and GPT4 evals](https://huggingface.co/spaces/HuggingFaceH4/human_eval_llm_leaderboard), ๐Ÿ–ฅ๏ธ [performance benchmarks](https://huggingface.co/spaces/optimum/llm-perf-leaderboard)
69
-
70
- And also in other labs, check out the [AlpacaEval Leaderboard](https://tatsu-lab.github.io/alpaca_eval/) and [MT Bench](https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard) among other great ressources.
71
  """
72
 
73
  LLM_BENCHMARKS_TEXT = f"""
74
  # Context
75
- With the plethora of large language models (LLMs) and chatbots being released week upon week, often with grandiose claims of their performance, it can be hard to filter out the genuine progress that is being made by the open-source community and which model is the current state of the art.
76
 
77
  ## Icons
78
  {ModelType.PT.to_str(" : ")} model
79
  {ModelType.FT.to_str(" : ")} model
80
  {ModelType.IFT.to_str(" : ")} model
81
  {ModelType.RL.to_str(" : ")} model
82
- If there is no icon, we have not uploaded the information on the model yet, feel free to open an issue with the model information!
 
83
 
84
- ## How it works
 
85
 
86
- ๐Ÿ“ˆ We evaluate models on 4 key benchmarks using the <a href="https://github.com/EleutherAI/lm-evaluation-harness" target="_blank"> Eleuther AI Language Model Evaluation Harness </a>, a unified framework to test generative language models on a large number of different evaluation tasks.
87
 
88
- - <a href="https://arxiv.org/abs/1803.05457" target="_blank"> AI2 Reasoning Challenge </a> (25-shot) - a set of grade-school science questions.
89
- - <a href="https://arxiv.org/abs/1905.07830" target="_blank"> HellaSwag </a> (10-shot) - a test of commonsense inference, which is easy for humans (~95%) but challenging for SOTA models.
90
- - <a href="https://arxiv.org/abs/2009.03300" target="_blank"> MMLU </a> (5-shot) - a test to measure a text model's multitask accuracy. The test covers 57 tasks including elementary mathematics, US history, computer science, law, and more.
91
- - <a href="https://arxiv.org/abs/2109.07958" target="_blank"> TruthfulQA </a> (0-shot) - a test to measure a modelโ€™s propensity to reproduce falsehoods commonly found online. Note: TruthfulQA in the Harness is actually a minima a 6-shots task, as it is prepended by 6 examples systematically, even when launched using 0 for the number of few-shot examples.
 
 
 
 
92
 
93
- For all these evaluations, a higher score is a better score.
94
- We chose these benchmarks as they test a variety of reasoning and general knowledge across a wide variety of fields in 0-shot and few-shot settings.
95
 
96
  ## Details and logs
97
  You can find:
98
- - detailed numerical results in the `results` Hugging Face dataset: https://huggingface.co/datasets/open-llm-leaderboard/results
99
- - details on the input/outputs for the models in the `details` Hugging Face dataset: https://huggingface.co/datasets/open-llm-leaderboard/details
100
- - community queries and running status in the `requests` Hugging Face dataset: https://huggingface.co/datasets/open-llm-leaderboard/requests
101
 
102
  ## Reproducibility
103
- To reproduce our results, here is the commands you can run, using [this version](https://github.com/EleutherAI/lm-evaluation-harness/tree/b281b0921b636bc36ad05c0b0b0763bd6dd43463) of the Eleuther AI Harness:
104
- `python main.py --model=hf-causal --model_args="pretrained=<your_model>,use_accelerate=True,revision=<your_model_revision>"`
105
- ` --tasks=<task_list> --num_fewshot=<n_few_shot> --batch_size=2 --output_path=<output_path>`
106
 
107
  The total batch size we get for models which fit on one A100 node is 16 (8 GPUs * 2). If you don't use parallelism, adapt your batch size to fit.
108
  *You can expect results to vary slightly for different batch sizes because of padding.*
@@ -121,37 +122,34 @@ To get more information about quantization, see:
121
  """
122
 
123
  EVALUATION_QUEUE_TEXT = f"""
124
- # Evaluation Queue for the ๐Ÿค— Open LLM Leaderboard
125
-
126
- Models added here will be automatically evaluated on the ๐Ÿค— cluster.
127
 
128
- ## Some good practices before submitting a model
129
 
130
- ### 1) Make sure you can load your model and tokenizer using AutoClasses:
131
- ```python
132
  from transformers import AutoConfig, AutoModel, AutoTokenizer
133
  config = AutoConfig.from_pretrained("your model name", revision=revision)
134
  model = AutoModel.from_pretrained("your model name", revision=revision)
135
  tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
136
  ```
137
- If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
138
 
139
- Note: make sure your model is public!
140
- Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
 
141
 
142
- ### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
143
- It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
144
 
145
- ### 3) Make sure your model has an open license!
146
- This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model ๐Ÿค—
147
 
148
- ### 4) Fill up your model card
149
- When we add extra information about models to the leaderboard, it will be automatically taken from the model card
150
 
151
- ## In case of model failure
152
- If your model is displayed in the `FAILED` category, its execution stopped.
153
- Make sure you have followed the above steps first.
154
- If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
155
  """
156
 
157
  CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
 
56
  - Release the leaderboard to public
57
  """
58
 
59
+ TITLE = """<h1 align="center" id="space-title">๐Ÿš€ Open Ko-LLM Leaderboard</h1>"""
60
 
61
  INTRODUCTION_TEXT = f"""
62
+ ๐Ÿš€ Open Ko-LLM Leaderboard๋Š” ํ•œ๊ตญ์–ด ์ดˆ๊ฑฐ๋Œ€ ์–ธ์–ด๋ชจ๋ธ์˜ ์„ฑ๋Šฅ์„ ๊ฐ๊ด€์ ์œผ๋กœ ํ‰๊ฐ€ํ•ฉ๋‹ˆ๋‹ค.
63
 
64
+ "์ œ์ถœ" ํŽ˜์ด์ง€์—์„œ ๋ชจ๋ธ ์ œ์ถœ ์‹œ ์ž๋™์œผ๋กœ ํ‰๊ฐ€๋ฉ๋‹ˆ๋‹ค. ํ‰๊ฐ€์— ์‚ฌ์šฉ๋˜๋Š” GPU๋Š” KT์˜ ์ง€์›์œผ๋กœ ์šด์˜๋ฉ๋‹ˆ๋‹ค.
65
+ ํ‰๊ฐ€์— ์‚ฌ์šฉ๋˜๋Š” ๋ฐ์ดํ„ฐ๋Š” ์ „๋ฌธ ์ง€์‹, ์ถ”๋ก  ๋Šฅ๋ ฅ, ํ™˜๊ฐ, ์œค๋ฆฌ, ์ƒ์‹์˜ ๋‹ค์„ฏ๊ฐ€์ง€ ์š”์†Œ๋ฅผ ํ‰๊ฐ€ํ•˜๊ธฐ ์œ„ํ•œ ๋ฐ์ดํ„ฐ์…‹์œผ๋กœ ๊ตฌ์„ฑ๋˜์–ด ์žˆ์Šต๋‹ˆ๋‹ค.
66
+ ๋ฒค์น˜๋งˆํฌ ๋ฐ์ดํ„ฐ์…‹์— ๋Œ€ํ•œ ๋” ์ž์„ธํ•œ ์ •๋ณด๋Š” "์ •๋ณด" ํŽ˜์ด์ง€์—์„œ ์ œ๊ณต๋˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค.
67
 
68
+ ์—…์Šคํ…Œ์ด์ง€์™€ NIA๊ฐ€ ๊ณต๋™ ์ฃผ์ตœํ•˜๋ฉฐ ์—…์Šคํ…Œ์ด์ง€๊ฐ€ ์šด์˜ํ•ฉ๋‹ˆ๋‹ค.
 
 
 
 
69
  """
70
 
71
  LLM_BENCHMARKS_TEXT = f"""
72
  # Context
73
+ ๋›ฐ์–ด๋‚œ LLM ๋ชจ๋ธ๋“ค์ด ์•ž๋‹คํˆฌ์–ด ๊ณต๊ฐœ๋˜๊ณ  ์žˆ์ง€๋งŒ ์ด๋Š” ๋Œ€๋ถ€๋ถ„ ์˜์–ด ์ค‘์‹ฌ์˜, ์˜์–ด ๋ฌธํ™”๊ถŒ์— ์ต์ˆ™ํ•œ ๋ชจ๋ธ์ž…๋‹ˆ๋‹ค. ์ €ํฌ๋Š” ํ•œ๊ตญ์–ด ๋ฆฌ๋”๋ณด๋“œ ๐Ÿš€ย Open Ko-LLM์„ ์šด์˜ํ•˜์—ฌ ํ•œ๊ตญ์–ด์™€ ํ•œ๊ตญ ๋ฌธํ™”์˜ ํŠน์„ฑ์„ ๋ฐ˜์˜ํ•œ ๋ชจ๋ธ์„ ํ‰๊ฐ€ํ•˜๊ณ ์ž ํ•ฉ๋‹ˆ๋‹ค. ์ด๋ฅผ ํ†ตํ•ด ํ•œ๊ตญ์–ด ์‚ฌ์šฉ์ž๋“ค์ด ํŽธ๋ฆฌํ•˜๊ฒŒ ๋ฆฌ๋”๋ณด๋“œ๋ฅผ ์ด์šฉํ•˜๊ณ  ์ฐธ์—ฌํ•˜์—ฌ ํ•œ๊ตญ์˜ ์—ฐ๊ตฌ ์ˆ˜์ค€ ํ–ฅ์ƒ์— ๊ธฐ์—ฌํ•  ์ˆ˜ ์žˆ๊ธฐ๋ฅผ ๋ฐ”๋ž๋‹ˆ๋‹ค.
74
 
75
  ## Icons
76
  {ModelType.PT.to_str(" : ")} model
77
  {ModelType.FT.to_str(" : ")} model
78
  {ModelType.IFT.to_str(" : ")} model
79
  {ModelType.RL.to_str(" : ")} model
80
+ ๋งŒ์•ฝ ์•„์ด์ฝ˜์ด ์—†๋‹ค๋ฉด ์•„์ง ๋ชจ๋ธ์— ๋Œ€ํ•œ ์ •๋ณด๊ฐ€ ๋ถ€์กฑํ•จ์„ ๋‚˜ํƒ€๋ƒ…๋‹ˆ๋‹ค.
81
+ ๋ชจ๋ธ์— ๋Œ€ํ•œ ์ •๋ณด๋Š” issue๋ฅผ ํ†ตํ•ด ์ „๋‹ฌํ•ด์ฃผ์„ธ์š”! ๐Ÿคฉ
82
 
83
+ ๐Ÿดโ€โ˜ ๏ธ : ํ•ด๋‹น ์•„์ด์ฝ˜์€ ์ด ๋ชจ๋ธ์ด ์ปค๋ฎค๋‹ˆํ‹ฐ์— ์˜ํ•ด ์ฃผ์˜ ๋Œ€์ƒ์œผ๋กœ ์„ ์ •๋˜์—ˆ์œผ๋ฏ€๋กœ ์ด์šฉ ์ž์ œ๋ฅผ ๋ฐ”๋ž€๋‹ค๋Š” ์˜๋ฏธ์ž…๋‹ˆ๋‹ค. ์•„์ด์ฝ˜์„ ํด๋ฆญ ์‹œ ํ•ด๋‹น ๋ชจ๋ธ์— ๋Œ€ํ•œ discussion์œผ๋กœ ์ด๋™ํ•ฉ๋‹ˆ๋‹ค.
84
+ (๋†’์€ ๋ฆฌ๋”๋ณด๋“œ ์ˆœ์œ„๋ฅผ ์œ„ํ•ด ํ‰๊ฐ€์…‹์„ ํ•™์Šต์— ์ด์šฉํ•œ ๋ชจ๋ธ ๋“ฑ์ด ์ฃผ์˜ ๋Œ€์ƒ์œผ๋กœ ์„ ์ •๋ฉ๋‹ˆ๋‹ค)
85
 
86
+ ## How it works
87
 
88
+ ๐Ÿ“ˆ HuggingFace OpenLLM์—์„œ ์šด์˜ํ•˜๋Š” 4๊ฐœ์˜ ํƒœ์Šคํฌ(HellaSwag, MMLU, Arc, Truthful QA)์˜ ๋ฐ์ดํ„ฐ๋ฅผ ํ•œ๊ตญ์–ด๋กœ ๋ฒˆ์—ญํ•œ ๋ฐ์ดํ„ฐ์…‹์„ ๋น„๋กฏํ•ด ์ด 6๊ฐ€์ง€์˜ ๋ฐ์ดํ„ฐ๋กœ ๋ฒค์น˜๋งˆํฌ๋ฅผ ๊ตฌ์„ฑํ–ˆ์Šต๋‹ˆ๋‹ค.
89
+ - Ko-HellaSwag (์—…์Šคํ…Œ์ด์ง€ ์ œ๊ณต)
90
+ - Ko-MMLU (์—…์Šคํ…Œ์ด์ง€ ์ œ๊ณต)
91
+ - Ko-Arc (์—…์Šคํ…Œ์ด์ง€ ์ œ๊ณต)
92
+ - Ko-Truthful QA (์—…์Šคํ…Œ์ด์ง€ ์ œ๊ณต)
93
+ - KoCommongen (NIA ํ•œ๊ตญ์ง€๋Šฅ์ •๋ณด์‚ฌํšŒ์ง„ํฅ์› ์ œ๊ณต)
94
+ - ํ…์ŠคํŠธ ์œค๋ฆฌ๊ฒ€์ฆ ๋ฐ์ดํ„ฐ (NIA ํ•œ๊ตญ์ง€๋Šฅ์ •๋ณด์‚ฌํšŒ์ง„ํฅ์› ์ œ๊ณต)
95
+ LLM ์‹œ๋Œ€์— ๊ฑธ๋งž๋Š” ํ‰๊ฐ€๋ฅผ ์œ„ํ•ด ์ƒ์‹, ์ „๋ฌธ ์ง€์‹, ์ถ”๋ก , ํ™˜๊ฐ, ์œค๋ฆฌ์˜ ๋‹ค์„ฏ๊ฐ€์ง€ ์š”์†Œ๋ฅผ ํ‰๊ฐ€ํ•˜๊ธฐ์— ์ ํ•ฉํ•œ ๋ฐ์ดํ„ฐ์…‹๋“ค์„ ๋ฒค์น˜๋งˆํฌ๋กœ ์„ ์ •ํ–ˆ์Šต๋‹ˆ๋‹ค. ์ตœ์ข… ์ ์ˆ˜๋Š” 6๊ฐœ์˜ ํ‰๊ฐ€ ๋ฐ์ดํ„ฐ์— ๋Œ€ํ•œ ํ‰๊ท  ์ ์ˆ˜๋กœ ํ™˜์‚ฐํ•ฉ๋‹ˆ๋‹ค.
96
 
97
+ KT๋กœ๋ถ€ํ„ฐ ํ‰๊ฐ€์— ์‚ฌ์šฉ๋˜๋Š” GPU๋ฅผ ์ œ๊ณต๋ฐ›์•˜์Šต๋‹ˆ๋‹ค.
 
98
 
99
  ## Details and logs
100
  You can find:
101
+ - ์ข€ ๋” ์ž์„ธํ•œ ์ˆ˜์น˜ ์ •๋ณด๋Š”: https://huggingface.co/datasets/open-llm-leaderboard/results
102
+ - ๋ชจ๋ธ์˜ ์ž…์ถœ๋ ฅ์— ๋Œ€ํ•œ ์ž์„ธํ•œ ์ •๋ณด๋Š”: https://huggingface.co/datasets/open-llm-leaderboard/details
103
+ - ๋ชจ๋ธ์˜ ํ‰๊ฐ€ ํ์™€ ํ‰๊ฐ€ ์ƒํƒœ๋Š”: https://huggingface.co/datasets/open-llm-leaderboard/requests
104
 
105
  ## Reproducibility
106
+ ํ‰๊ฐ€ ๊ฒฐ๊ณผ๋ฅผ ์žฌํ˜„ํ•˜๊ธฐ ์œ„ํ•ด์„œ๋Š” [์ด ๋ฒ„์ „](https://github.com/EleutherAI/lm-evaluation-harness/tree/b281b0921b636bc36ad05c0b0b0763bd6dd43463)์˜ ๋ฐ์ดํ„ฐ์…‹์„ ์ด์šฉํ•˜์„ธ์š”. (๋ฐ‘์—๋Š” ์ฝ”๋“œ ๋ฐ ํ‰๊ฐ€ ํ™˜๊ฒฝ์ด๋ผ์„œ ์ผ๋‹จ skip)
 
 
107
 
108
  The total batch size we get for models which fit on one A100 node is 16 (8 GPUs * 2). If you don't use parallelism, adapt your batch size to fit.
109
  *You can expect results to vary slightly for different batch sizes because of padding.*
 
122
  """
123
 
124
  EVALUATION_QUEUE_TEXT = f"""
125
+ # ๐Ÿš€ย Open-Ko LLM ๋ฆฌ๋”๋ณด๋“œ์˜ ํ‰๊ฐ€ ํ์ž…๋‹ˆ๋‹ค.
126
+ ์ด๊ณณ์— ์ถ”๊ฐ€๋œ ๋ชจ๋ธ๋“ค์€ ๊ณง ์ž๋™์ ์œผ๋กœ KT์˜ GPU ์œ„์—์„œ ํ‰๊ฐ€๋  ์˜ˆ์ •์ž…๋‹ˆ๋‹ค!
 
127
 
128
+ ## <๋ชจ๋ธ ์ œ์ถœ ์ „ ํ™•์ธํ•˜๋ฉด ์ข‹์€ ๊ฒƒ๋“ค>
129
 
130
+ ### 1๏ธโƒฃ ๋ชจ๋ธ๊ณผ ํ† ํฌ๋‚˜์ด์ €๊ฐ€ AutoClasses๋กœ ๋ถˆ๋Ÿฌ์˜ฌ ์ˆ˜ ์žˆ๋‚˜์š”?
131
+ ```
132
  from transformers import AutoConfig, AutoModel, AutoTokenizer
133
  config = AutoConfig.from_pretrained("your model name", revision=revision)
134
  model = AutoModel.from_pretrained("your model name", revision=revision)
135
  tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
136
  ```
 
137
 
138
+ ๋งŒ์•ฝ ์ด ๋‹จ๊ณ„๊ฐ€ ์‹คํŒจํ–ˆ๋‹ค๋ฉด ์—๋Ÿฌ ๋ฉ”์„ธ์ง€๋ฅผ ๋”ฐ๋ผ ๋ชจ๋ธ์„ ๋””๋ฒ„๊น…ํ•œ ํ›„์— ์ œ์ถœํ•ด์ฃผ์„ธ์š”.
139
+ โš ๏ธ ๋ชจ๋ธ์ด public ์ƒํƒœ์—ฌ์•ผ ํ•ฉ๋‹ˆ๋‹ค!
140
+ โš ๏ธ ๋งŒ์•ฝ ๋ชจ๋ธ์ด use_remote_code=True์—ฌ์•ผ ํ•œ๋‹ค๋ฉด ์ž ์‹œ ๊ธฐ๋‹ค๋ ค์ฃผ์„ธ์š”. ํ˜„์žฌ๋กœ์„œ๋Š” ์•„์ง ์ด ์˜ต์…˜์„ ์ง€์›ํ•˜์ง€ ์•Š์ง€๋งŒ ์ž‘๋™ํ•  ์ˆ˜ ์žˆ๋„๋ก ํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค!
141
 
142
+ ### 2๏ธโƒฃ ๋ชจ๋ธ์˜ weight๋ฅผ safetensors๋กœ ๋ฐ”๊ฟจ๋‚˜์š”?
143
+ safetensors๋Š” weight๋ฅผ ๋ณด๊ด€ํ•˜๋Š” ์ƒˆ๋กœ์šด ํฌ๋งท์œผ๋กœ, ํ›จ์”ฌ ์•ˆ์ „ํ•˜๊ณ  ๋น ๋ฅด๊ฒŒ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋˜ํ•œ ๋ชจ๋ธ์˜ parameter ๊ฐœ์ˆ˜๋ฅผ Extended Viewer์— ์ถ”๊ฐ€ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค
144
 
145
+ ### 3๏ธโƒฃ ๋ชจ๋ธ์ด ์˜คํ”ˆ ๋ผ์ด์„ผ์Šค๋ฅผ ๋”ฐ๋ฅด๋‚˜์š”?
146
+ ๐Ÿš€ Open-Ko LLM์€ Open LLM์„ ์œ„ํ•œ ๋ฆฌ๋”๋ณด๋“œ๋กœ, ๋งŽ์€ ์‚ฌ๋žŒ๋“ค์ด ๋‹ค์–‘ํ•œ ๋ชจ๋ธ์„ ์‚ฌ์šฉํ•˜๊ธฐ๋ฅผ ๋ฐ”๋ž๋‹ˆ๋‹ค
147
 
148
+ ### 4๏ธโƒฃ ๋ชจ๋ธ ์นด๋“œ๋ฅผ ์ž‘์„ฑ๏ฟฝ๏ฟฝ์…จ๋‚˜์š”?
149
+ ๋ฆฌ๋”๋ณด๋“œ์— ๋ชจ๋ธ์— ๋Œ€ํ•œ ์ถ”๊ฐ€ ์ •๋ณด๋ฅผ ์—…๋กœ๋“œํ•  ๋•Œ ์ž‘์„ฑํ•˜์‹  ๋ชจ๋ธ ์นด๋“œ๊ฐ€ ์—…๋กœ๋“œ๋ฉ๋‹ˆ๋‹ค
150
 
151
+ ## ๋ชจ๋ธ์ด ์‹คํŒจํ•œ ๊ฒฝ์šฐ:
152
+ ๋งŒ์•ฝ ์ œ์ถœํ•œ ๋ชจ๋ธ์˜ ์ƒํƒœ๊ฐ€ FAILED๊ฐ€ ๋œ๋‹ค๋ฉด ์ด๋Š” ๋ชจ๋ธ์ด ์‹คํ–‰ ์ค‘๋‹จ๋˜์—ˆ์Œ์„ ์˜๋ฏธํ•ฉ๋‹ˆ๋‹ค. ๋จผ์ € ์œ„์˜ ๋„ค ๋‹จ๊ณ„๋ฅผ ๋ชจ๋‘ ๋”ฐ๋ž๋Š”์ง€ ํ™•์ธํ•ด๋ณด์„ธ์š”. ๋ชจ๋“  ๋‹จ๊ณ„๋ฅผ ๋”ฐ๋ž์Œ์—๋„ ๋ถˆ๊ตฌํ•˜๊ณ  ์‹คํ–‰ ์ค‘๋‹จ๋˜์—ˆ์„ ๋•Œ๋Š” EleutherAIHarness ๋ฅผ ๋กœ์ปฌ์—์„œ ์‹คํ–‰ํ•  ์ˆ˜ ์žˆ๋Š”์ง€ ํ™•์ธํ•˜๊ธฐ ์œ„ํ•ด ์œ„์˜ ์ฝ”๋“œ๋ฅผ ์ˆ˜์ • ์—†์ด ์‹คํ–‰ํ•˜์„ธ์š”. (ํƒœ์Šคํฌ ๋ณ„ ์˜ˆ์‹œ์˜ ์ˆ˜๋ฅผ ์ œํ•œํ•˜๊ธฐ ์œ„ํ•ด โ€”limit ํŒŒ๋ผ๋ฏธํ„ฐ๋ฅผ ์ถ”๊ฐ€ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค.)
 
 
153
  """
154
 
155
  CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"