PyTorch
llama
alignment-handbook
Generated from Trainer
Junxiong Wang commited on
Commit
366d1cf
1 Parent(s): a981c20

fix configs

Browse files
Files changed (1) hide show
  1. configs.yaml +4 -4
configs.yaml CHANGED
@@ -1,13 +1,13 @@
1
- llama3_mamba_0_5_sft_3dataset_ep1:
2
  prompt_template: "zephyr-7b-alpha/prompt.txt"
3
  fn_completions: "huggingface_local_completions"
4
  completions_kwargs:
5
- model_name: "/data/junxiong/sft/dpo/llama3_mamba_0_50_sft_3dataset_ep1/"
6
  model_kwargs:
7
  torch_dtype: 'bfloat16'
8
  max_new_tokens: 2048
9
  temperature: 0.7
10
  top_p: 1.0
11
  do_sample: True
12
- pretty_name: "Mamba 0 5 From Zephyr 7B Beta"
13
- link: "https://huggingface.co/HuggingFaceH4/zephyr-7b-beta"
 
1
+ MambaInLlama_0_50:
2
  prompt_template: "zephyr-7b-alpha/prompt.txt"
3
  fn_completions: "huggingface_local_completions"
4
  completions_kwargs:
5
+ model_name: "JunxiongWang/MambaInLlama_0_50"
6
  model_kwargs:
7
  torch_dtype: 'bfloat16'
8
  max_new_tokens: 2048
9
  temperature: 0.7
10
  top_p: 1.0
11
  do_sample: True
12
+ pretty_name: "Mamba 0 5 From meta-llama/Meta-Llama-3-8B-Instruct"
13
+ link: "https://huggingface.co/JunxiongWang/MambaInLlama_0_50/"