NeMo-Aligner/examples/nlp/gpt/train_gpt_sft.py name=gemma-7b-sql-nemo trainer.precision=bf16 trainer.num_nodes=1 trainer.devices=8 trainer.sft.max_steps=-1 trainer.sft.limit_val_batches=40 trainer.sft.val_check_interval=1000 model.tensor_model_parallel_size=4 model.pipeline_model_parallel_size=1 model.megatron_amp_O2=True model.restore_from_path=/workspace/models/pytorch-7b-pt.nemo model.optim.lr=5e-6 model.answer_only_loss=True ++model.bias_activation_fusion=true model.data.num_workers=0 model.data.train_ds.micro_batch_size=1 model.data.train_ds.global_batch_size=128 model.data.train_ds.max_seq_length=8192 model.data.train_ds.file_path=nsql.jsonl model.data.validation_ds.micro_batch_size=1 model.data.validation_ds.global_batch_size=128 model.data.validation_ds.drop_last=True model.data.validation_ds.file_path=nsql.jsonl exp_manager.explicit_log_dir=models/gemma-7b-sql-nemo exp_manager.checkpoint_callback_params.save_nemo_on_train_end=True exp_manager.resume_if_exists=True exp_manager.resume_ignore_no_checkpoint=True exp_manager.create_checkpoint_callback=True exp_manager.checkpoint_callback_params.monitor=validation_loss