models: | |
- model: NousResearch/Meta-Llama-3-8B | |
# Base model providing a general foundation without specific parameters | |
- model: NousResearch/Meta-Llama-3-8B-Instruct | |
parameters: | |
density: 0.60 | |
weight: 0.25 | |
- model: winglian/llama-3-8b-1m-PoSE | |
parameters: | |
density: 0.55 | |
weight: 0.15 | |
- model: MaziyarPanahi/Llama-3-8B-Instruct-DPO-v0.3 | |
parameters: | |
density: 0.55 | |
weight: 0.15 | |
- model: asiansoul/Llama-3-Open-Ko-Linear-8B | |
parameters: | |
density: 0.55 | |
weight: 0.2 | |
- model: nayohan/llama3-8b-it-translation-general-en-ko-1sent | |
parameters: | |
density: 0.55 | |
weight: 0.1 | |
- model: cognitivecomputations/dolphin-2.9-llama3-8b | |
parameters: | |
density: 0.55 | |
weight: 0.1 | |
- model: Danielbrdz/Barcenas-Llama3-8b-ORPO | |
parameters: | |
density: 0.55 | |
weight: 0.05 | |
- model: NousResearch/Hermes-2-Pro-Llama-3-8B | |
parameters: | |
density: 0.55 | |
weight: 0.1 | |
merge_method: dare_ties | |
base_model: NousResearch/Meta-Llama-3-8B | |
parameters: | |
int8_mask: true | |
dtype: bfloat16 | |