bf16: 'True' chosen: chosen cutoff_len: '2048' dataset: mlfoundations-dev/gemma2-ultrafeedback-armorm dataset_dir: ONLINE ddp_timeout: '180000000' deepspeed: /opt/ml/code/zero3.json do_train: 'True' enable_liger_kernel: 'False' eval_strategy: epoch finetuning_type: full formatting: sharegpt global_batch_size: '128' gradient_accumulation_steps: '8' gradient_checkpointing: 'True' hub_model_id: mlfoundations-dev/simpo-evol_tt_5s learning_rate: 8e-07 logging_steps: '1' lr_scheduler_type: cosine messages: conversations model_name_or_path: mlfoundations-dev/evol_tt_5s num_train_epochs: '1.0' optim: adamw_torch output_dir: /opt/ml/model overwrite_cache: 'True' overwrite_output_dir: 'True' per_device_eval_batch_size: '2' per_device_train_batch_size: '2' plot_loss: 'True' pref_beta: '10' pref_loss: simpo preprocessing_num_workers: '16' push_to_db: 'True' push_to_hub: 'True' ranking: 'True' rejected: rejected report_to: wandb run_name: simpo-evol_tt_5s save_strategy: epoch simpo_gamma: '5' stage: dpo template: gemma val_size: '0.05' warmup_ratio: '0.1'