base_model: Delta-Vector/Control-8B-V1.1 model_type: AutoModelForCausalLM tokenizer_type: AutoTokenizer load_in_8bit: false load_in_4bit: false strict: false hub_model_id: jeiku/controlkto hub_strategy: "all_checkpoints" push_dataset_to_hub: hf_use_auth_token: true chat_template: llama3 rl: kto rl_beta: 0.2 kto_desirable_weight: 0.2 datasets: - path: anthracite-core/full-opus-chosen-hermes-rejected-kto-v1 type: llama3.argilla shuffle_merged_datasets: true val_set_size: 0.0 output_dir: ./outputs/out adapter: lora lora_model_dir: lora_r: 32 lora_alpha: 64 lora_dropout: 0.05 lora_target_linear: true lora_fan_in_fan_out: sequence_len: 8192 sample_packing: false eval_sample_packing: false pad_to_sequence_len: false wandb_project: controlkto wandb_entity: wandb_watch: wandb_name: controlkto wandb_log_model: gradient_accumulation_steps: 16 micro_batch_size: 2 num_epochs: 2 max_steps: 500 optimizer: adamw_8bit lr_scheduler: cosine learning_rate: 0.0001 weight_decay: 0.05 train_on_inputs: false group_by_length: false bf16: auto fp16: tf32: true gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: true remove_unused_columns: false early_stopping_patience: resume_from_checkpoint: local_rank: logging_steps: 1 xformers_attention: flash_attention: true warmup_steps: 10 evals_per_epoch: 2 eval_table_size: eval_max_new_tokens: saves_per_epoch: 1 debug: deepspeed: fsdp: fsdp_config: fsdp: fsdp_config: special_tokens: pad_token: <|finetune_right_pad_id|> eos_token: <|eot_id|>