## dpo 2 pseudo_dataset_name: mnoukhov/summarize_from_feedback_tldr3_generated_20k_relabel_pythia1b_dpo_temp0.7_length128 train_split: train[:1] max_prompt_length: 512 max_target_length: 131 max_length: 640 ## costa stuff model_name: vwxyzjn/EleutherAI_pythia-1b-deduped__sft__tldr model_revision: sft__55513__1706646024 dataset_name: vwxyzjn/summarize_from_feedback_oai_preprocessing_1706381144 tokenizer_name: EleutherAI/pythia-1b-deduped prompt_field: query eval_split: validation ## hub stuff push_to_hub: True push_to_hub_organization: mnoukhov ## training stuff gold_eval: ppl eval_steps: 0.2 save_steps: 0.2 beta: 0.5 max_steps: -1 num_train_epochs: 1 load_in_8bit: False bf16: True fp16: False learning_rate: 3e-6 use_peft: True lora_all_linear: True lora_r: 8 lora_alpha: 32 lora_dropout: 0.05 gradient_accumulation_steps: 4 per_device_train_batch_size: 16 per_device_eval_batch_size: 4 warmup_steps: 150