|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 189, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.015873015873015872, |
|
"grad_norm": 15.294358962776673, |
|
"learning_rate": 2.6315789473684208e-08, |
|
"logits/chosen": -1.6171875, |
|
"logits/rejected": -1.4296875, |
|
"logps/chosen": -139.0, |
|
"logps/rejected": -104.5, |
|
"loss": 0.6914, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.15873015873015872, |
|
"grad_norm": 15.094161248794443, |
|
"learning_rate": 2.631578947368421e-07, |
|
"logits/chosen": -1.5859375, |
|
"logits/rejected": -1.53125, |
|
"logps/chosen": -149.0, |
|
"logps/rejected": -129.0, |
|
"loss": 0.6923, |
|
"rewards/accuracies": 0.2222222238779068, |
|
"rewards/chosen": -0.0013885498046875, |
|
"rewards/margins": 0.0045166015625, |
|
"rewards/rejected": -0.005889892578125, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.31746031746031744, |
|
"grad_norm": 15.606155891832502, |
|
"learning_rate": 4.970588235294118e-07, |
|
"logits/chosen": -1.5703125, |
|
"logits/rejected": -1.546875, |
|
"logps/chosen": -152.0, |
|
"logps/rejected": -133.0, |
|
"loss": 0.6871, |
|
"rewards/accuracies": 0.3125, |
|
"rewards/chosen": -0.031005859375, |
|
"rewards/margins": 0.00125885009765625, |
|
"rewards/rejected": -0.0322265625, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.47619047619047616, |
|
"grad_norm": 14.846625318284318, |
|
"learning_rate": 4.676470588235294e-07, |
|
"logits/chosen": -1.6015625, |
|
"logits/rejected": -1.609375, |
|
"logps/chosen": -159.0, |
|
"logps/rejected": -149.0, |
|
"loss": 0.6824, |
|
"rewards/accuracies": 0.4625000059604645, |
|
"rewards/chosen": -0.06298828125, |
|
"rewards/margins": 0.0184326171875, |
|
"rewards/rejected": -0.08154296875, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.6349206349206349, |
|
"grad_norm": 12.673127513530932, |
|
"learning_rate": 4.38235294117647e-07, |
|
"logits/chosen": -1.59375, |
|
"logits/rejected": -1.578125, |
|
"logps/chosen": -170.0, |
|
"logps/rejected": -155.0, |
|
"loss": 0.6793, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": -0.0751953125, |
|
"rewards/margins": 0.042236328125, |
|
"rewards/rejected": -0.11767578125, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.7936507936507936, |
|
"grad_norm": 14.318265772228457, |
|
"learning_rate": 4.0882352941176465e-07, |
|
"logits/chosen": -1.59375, |
|
"logits/rejected": -1.546875, |
|
"logps/chosen": -146.0, |
|
"logps/rejected": -126.5, |
|
"loss": 0.6741, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -0.1357421875, |
|
"rewards/margins": 0.0634765625, |
|
"rewards/rejected": -0.19921875, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.9523809523809523, |
|
"grad_norm": 14.53000414550404, |
|
"learning_rate": 3.7941176470588235e-07, |
|
"logits/chosen": -1.5234375, |
|
"logits/rejected": -1.5, |
|
"logps/chosen": -138.0, |
|
"logps/rejected": -138.0, |
|
"loss": 0.6696, |
|
"rewards/accuracies": 0.4000000059604645, |
|
"rewards/chosen": -0.1845703125, |
|
"rewards/margins": 0.024169921875, |
|
"rewards/rejected": -0.208984375, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_logits/chosen": -1.5, |
|
"eval_logits/rejected": -1.4921875, |
|
"eval_logps/chosen": -140.0, |
|
"eval_logps/rejected": -127.0, |
|
"eval_loss": 0.6568750143051147, |
|
"eval_rewards/accuracies": 0.5714285969734192, |
|
"eval_rewards/chosen": -0.1796875, |
|
"eval_rewards/margins": 0.0966796875, |
|
"eval_rewards/rejected": -0.27734375, |
|
"eval_runtime": 14.7253, |
|
"eval_samples_per_second": 13.582, |
|
"eval_steps_per_second": 0.475, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.1111111111111112, |
|
"grad_norm": 13.811509421726951, |
|
"learning_rate": 3.5e-07, |
|
"logits/chosen": -1.53125, |
|
"logits/rejected": -1.546875, |
|
"logps/chosen": -142.0, |
|
"logps/rejected": -147.0, |
|
"loss": 0.5957, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.1337890625, |
|
"rewards/margins": 0.2177734375, |
|
"rewards/rejected": -0.3515625, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.2698412698412698, |
|
"grad_norm": 11.993229269242237, |
|
"learning_rate": 3.205882352941177e-07, |
|
"logits/chosen": -1.5546875, |
|
"logits/rejected": -1.5390625, |
|
"logps/chosen": -150.0, |
|
"logps/rejected": -135.0, |
|
"loss": 0.5614, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.1328125, |
|
"rewards/margins": 0.28515625, |
|
"rewards/rejected": -0.41796875, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.4285714285714286, |
|
"grad_norm": 11.581245327928885, |
|
"learning_rate": 2.911764705882353e-07, |
|
"logits/chosen": -1.5703125, |
|
"logits/rejected": -1.5078125, |
|
"logps/chosen": -144.0, |
|
"logps/rejected": -133.0, |
|
"loss": 0.5406, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": -0.2333984375, |
|
"rewards/margins": 0.341796875, |
|
"rewards/rejected": -0.57421875, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.5873015873015874, |
|
"grad_norm": 12.825913209383442, |
|
"learning_rate": 2.6176470588235295e-07, |
|
"logits/chosen": -1.5703125, |
|
"logits/rejected": -1.53125, |
|
"logps/chosen": -152.0, |
|
"logps/rejected": -138.0, |
|
"loss": 0.5382, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.26953125, |
|
"rewards/margins": 0.390625, |
|
"rewards/rejected": -0.66015625, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.746031746031746, |
|
"grad_norm": 12.686457389394935, |
|
"learning_rate": 2.323529411764706e-07, |
|
"logits/chosen": -1.5703125, |
|
"logits/rejected": -1.5625, |
|
"logps/chosen": -160.0, |
|
"logps/rejected": -154.0, |
|
"loss": 0.5295, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": -0.23046875, |
|
"rewards/margins": 0.46484375, |
|
"rewards/rejected": -0.6953125, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.9047619047619047, |
|
"grad_norm": 11.815278327307368, |
|
"learning_rate": 2.0294117647058823e-07, |
|
"logits/chosen": -1.5546875, |
|
"logits/rejected": -1.5, |
|
"logps/chosen": -138.0, |
|
"logps/rejected": -137.0, |
|
"loss": 0.5284, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.33984375, |
|
"rewards/margins": 0.365234375, |
|
"rewards/rejected": -0.70703125, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_logits/chosen": -1.484375, |
|
"eval_logits/rejected": -1.4765625, |
|
"eval_logps/chosen": -143.0, |
|
"eval_logps/rejected": -130.0, |
|
"eval_loss": 0.6310937404632568, |
|
"eval_rewards/accuracies": 0.625, |
|
"eval_rewards/chosen": -0.388671875, |
|
"eval_rewards/margins": 0.185546875, |
|
"eval_rewards/rejected": -0.57421875, |
|
"eval_runtime": 17.8485, |
|
"eval_samples_per_second": 11.205, |
|
"eval_steps_per_second": 0.392, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 2.0634920634920633, |
|
"grad_norm": 10.190155022883303, |
|
"learning_rate": 1.7352941176470587e-07, |
|
"logits/chosen": -1.5703125, |
|
"logits/rejected": -1.515625, |
|
"logps/chosen": -154.0, |
|
"logps/rejected": -146.0, |
|
"loss": 0.4902, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -0.298828125, |
|
"rewards/margins": 0.51953125, |
|
"rewards/rejected": -0.8203125, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 2.2222222222222223, |
|
"grad_norm": 11.482317016645608, |
|
"learning_rate": 1.441176470588235e-07, |
|
"logits/chosen": -1.5703125, |
|
"logits/rejected": -1.5390625, |
|
"logps/chosen": -146.0, |
|
"logps/rejected": -147.0, |
|
"loss": 0.4618, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": -0.298828125, |
|
"rewards/margins": 0.57421875, |
|
"rewards/rejected": -0.875, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.380952380952381, |
|
"grad_norm": 10.664901408549657, |
|
"learning_rate": 1.1470588235294116e-07, |
|
"logits/chosen": -1.484375, |
|
"logits/rejected": -1.484375, |
|
"logps/chosen": -139.0, |
|
"logps/rejected": -135.0, |
|
"loss": 0.4529, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -0.392578125, |
|
"rewards/margins": 0.56640625, |
|
"rewards/rejected": -0.95703125, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.5396825396825395, |
|
"grad_norm": 11.135694541597557, |
|
"learning_rate": 8.529411764705883e-08, |
|
"logits/chosen": -1.546875, |
|
"logits/rejected": -1.46875, |
|
"logps/chosen": -138.0, |
|
"logps/rejected": -125.5, |
|
"loss": 0.4585, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -0.34375, |
|
"rewards/margins": 0.62109375, |
|
"rewards/rejected": -0.96484375, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.6984126984126986, |
|
"grad_norm": 9.969592838659052, |
|
"learning_rate": 5.5882352941176474e-08, |
|
"logits/chosen": -1.515625, |
|
"logits/rejected": -1.515625, |
|
"logps/chosen": -144.0, |
|
"logps/rejected": -147.0, |
|
"loss": 0.4698, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -0.359375, |
|
"rewards/margins": 0.60546875, |
|
"rewards/rejected": -0.96484375, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"grad_norm": 10.412087107819227, |
|
"learning_rate": 2.6470588235294116e-08, |
|
"logits/chosen": -1.5234375, |
|
"logits/rejected": -1.4921875, |
|
"logps/chosen": -147.0, |
|
"logps/rejected": -137.0, |
|
"loss": 0.4529, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": -0.267578125, |
|
"rewards/margins": 0.63671875, |
|
"rewards/rejected": -0.90234375, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_logits/chosen": -1.484375, |
|
"eval_logits/rejected": -1.46875, |
|
"eval_logps/chosen": -143.0, |
|
"eval_logps/rejected": -131.0, |
|
"eval_loss": 0.6217187643051147, |
|
"eval_rewards/accuracies": 0.6785714030265808, |
|
"eval_rewards/chosen": -0.462890625, |
|
"eval_rewards/margins": 0.220703125, |
|
"eval_rewards/rejected": -0.68359375, |
|
"eval_runtime": 17.9909, |
|
"eval_samples_per_second": 11.117, |
|
"eval_steps_per_second": 0.389, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 189, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5609137318121693, |
|
"train_runtime": 1965.8465, |
|
"train_samples_per_second": 3.051, |
|
"train_steps_per_second": 0.096 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 189, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|