## costa stuff model_name: vwxyzjn/EleutherAI_pythia-1b-deduped__sft__tldr model_revision: sft__55513__1706646024 dataset_name: vwxyzjn/summarize_from_feedback_oai_preprocessing_1706381144 tokenizer_name: EleutherAI/pythia-1b-deduped prompt_field: query eval_split: validation pseudo_dataset_name: mnoukhov/summarize_from_feedback_tldr3_generated_20k_relabel_pythia1b_dpo max_target_length: 128 ## hub stuff push_to_hub: True push_to_hub_organization: mnoukhov ## training stuff gold_eval: ppl eval_steps: 0.2 save_steps: 0.2 train_split: train[:1] beta: 0.5 max_steps: -1 num_train_epochs: 5 load_in_8bit: False bf16: False fp16: True learning_rate: 1e-5 use_peft: True lora_all_linear: True lora_r: 8 lora_alpha: 32 lora_dropout: 0.05 gradient_accumulation_steps: 4 per_device_train_batch_size: 4 per_device_eval_batch_size: 4 warmup_steps: 150