|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 189, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.015873015873015872, |
|
"grad_norm": 16.2200460330357, |
|
"learning_rate": 2.6315789473684208e-08, |
|
"logits/chosen": -1.5234375, |
|
"logits/rejected": -1.4375, |
|
"logps/chosen": -166.0, |
|
"logps/rejected": -129.0, |
|
"loss": 0.6914, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.15873015873015872, |
|
"grad_norm": 16.23804389503029, |
|
"learning_rate": 2.631578947368421e-07, |
|
"logits/chosen": -1.53125, |
|
"logits/rejected": -1.53125, |
|
"logps/chosen": -144.0, |
|
"logps/rejected": -120.0, |
|
"loss": 0.6946, |
|
"rewards/accuracies": 0.2361111044883728, |
|
"rewards/chosen": -0.000339508056640625, |
|
"rewards/margins": -0.000701904296875, |
|
"rewards/rejected": 0.0003509521484375, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.31746031746031744, |
|
"grad_norm": 15.80676425202867, |
|
"learning_rate": 4.970588235294118e-07, |
|
"logits/chosen": -1.609375, |
|
"logits/rejected": -1.5234375, |
|
"logps/chosen": -162.0, |
|
"logps/rejected": -133.0, |
|
"loss": 0.6844, |
|
"rewards/accuracies": 0.4625000059604645, |
|
"rewards/chosen": -0.026611328125, |
|
"rewards/margins": 0.0341796875, |
|
"rewards/rejected": -0.060791015625, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.47619047619047616, |
|
"grad_norm": 13.971741456622834, |
|
"learning_rate": 4.676470588235294e-07, |
|
"logits/chosen": -1.59375, |
|
"logits/rejected": -1.546875, |
|
"logps/chosen": -151.0, |
|
"logps/rejected": -133.0, |
|
"loss": 0.6601, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.10693359375, |
|
"rewards/margins": 0.07177734375, |
|
"rewards/rejected": -0.1787109375, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.6349206349206349, |
|
"grad_norm": 14.439143279421419, |
|
"learning_rate": 4.38235294117647e-07, |
|
"logits/chosen": -1.625, |
|
"logits/rejected": -1.578125, |
|
"logps/chosen": -165.0, |
|
"logps/rejected": -145.0, |
|
"loss": 0.6307, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -0.158203125, |
|
"rewards/margins": 0.14453125, |
|
"rewards/rejected": -0.302734375, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.7936507936507936, |
|
"grad_norm": 12.933116251658811, |
|
"learning_rate": 4.0882352941176465e-07, |
|
"logits/chosen": -1.5703125, |
|
"logits/rejected": -1.5078125, |
|
"logps/chosen": -155.0, |
|
"logps/rejected": -134.0, |
|
"loss": 0.6348, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.298828125, |
|
"rewards/margins": 0.203125, |
|
"rewards/rejected": -0.50390625, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.9523809523809523, |
|
"grad_norm": 13.17457815976231, |
|
"learning_rate": 3.7941176470588235e-07, |
|
"logits/chosen": -1.5390625, |
|
"logits/rejected": -1.53125, |
|
"logps/chosen": -157.0, |
|
"logps/rejected": -148.0, |
|
"loss": 0.607, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.390625, |
|
"rewards/margins": 0.1884765625, |
|
"rewards/rejected": -0.578125, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_logits/chosen": -1.609375, |
|
"eval_logits/rejected": -1.453125, |
|
"eval_logps/chosen": -161.0, |
|
"eval_logps/rejected": -117.5, |
|
"eval_loss": 0.6323873400688171, |
|
"eval_rewards/accuracies": 0.7250000238418579, |
|
"eval_rewards/chosen": -0.5625, |
|
"eval_rewards/margins": 0.2578125, |
|
"eval_rewards/rejected": -0.8203125, |
|
"eval_runtime": 8.7111, |
|
"eval_samples_per_second": 16.875, |
|
"eval_steps_per_second": 0.574, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.1111111111111112, |
|
"grad_norm": 10.940163753846868, |
|
"learning_rate": 3.5e-07, |
|
"logits/chosen": -1.5625, |
|
"logits/rejected": -1.5234375, |
|
"logps/chosen": -151.0, |
|
"logps/rejected": -126.5, |
|
"loss": 0.5062, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -0.4453125, |
|
"rewards/margins": 0.46875, |
|
"rewards/rejected": -0.9140625, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.2698412698412698, |
|
"grad_norm": 10.448217147222541, |
|
"learning_rate": 3.205882352941177e-07, |
|
"logits/chosen": -1.515625, |
|
"logits/rejected": -1.4609375, |
|
"logps/chosen": -142.0, |
|
"logps/rejected": -123.5, |
|
"loss": 0.4673, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": -0.5625, |
|
"rewards/margins": 0.57421875, |
|
"rewards/rejected": -1.1328125, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.4285714285714286, |
|
"grad_norm": 10.288072810717312, |
|
"learning_rate": 2.911764705882353e-07, |
|
"logits/chosen": -1.578125, |
|
"logits/rejected": -1.53125, |
|
"logps/chosen": -157.0, |
|
"logps/rejected": -147.0, |
|
"loss": 0.4288, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": -0.53515625, |
|
"rewards/margins": 0.6953125, |
|
"rewards/rejected": -1.2265625, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.5873015873015874, |
|
"grad_norm": 10.019896435666249, |
|
"learning_rate": 2.6176470588235295e-07, |
|
"logits/chosen": -1.609375, |
|
"logits/rejected": -1.6015625, |
|
"logps/chosen": -162.0, |
|
"logps/rejected": -155.0, |
|
"loss": 0.4419, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": -0.640625, |
|
"rewards/margins": 0.65625, |
|
"rewards/rejected": -1.296875, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.746031746031746, |
|
"grad_norm": 12.080281395370564, |
|
"learning_rate": 2.323529411764706e-07, |
|
"logits/chosen": -1.5625, |
|
"logits/rejected": -1.546875, |
|
"logps/chosen": -156.0, |
|
"logps/rejected": -148.0, |
|
"loss": 0.4641, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": -0.53515625, |
|
"rewards/margins": 0.79296875, |
|
"rewards/rejected": -1.328125, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.9047619047619047, |
|
"grad_norm": 11.329269122728626, |
|
"learning_rate": 2.0294117647058823e-07, |
|
"logits/chosen": -1.4921875, |
|
"logits/rejected": -1.5078125, |
|
"logps/chosen": -140.0, |
|
"logps/rejected": -145.0, |
|
"loss": 0.4385, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -0.5390625, |
|
"rewards/margins": 0.63671875, |
|
"rewards/rejected": -1.171875, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_logits/chosen": -1.6015625, |
|
"eval_logits/rejected": -1.453125, |
|
"eval_logps/chosen": -161.0, |
|
"eval_logps/rejected": -119.5, |
|
"eval_loss": 0.6192867755889893, |
|
"eval_rewards/accuracies": 0.7250000238418579, |
|
"eval_rewards/chosen": -0.625, |
|
"eval_rewards/margins": 0.3515625, |
|
"eval_rewards/rejected": -0.9765625, |
|
"eval_runtime": 10.8318, |
|
"eval_samples_per_second": 13.571, |
|
"eval_steps_per_second": 0.462, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 2.0634920634920633, |
|
"grad_norm": 9.801925381761396, |
|
"learning_rate": 1.7352941176470587e-07, |
|
"logits/chosen": -1.5625, |
|
"logits/rejected": -1.4921875, |
|
"logps/chosen": -159.0, |
|
"logps/rejected": -144.0, |
|
"loss": 0.404, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": -0.263671875, |
|
"rewards/margins": 0.90234375, |
|
"rewards/rejected": -1.1640625, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 2.2222222222222223, |
|
"grad_norm": 10.394699460580199, |
|
"learning_rate": 1.441176470588235e-07, |
|
"logits/chosen": -1.5859375, |
|
"logits/rejected": -1.53125, |
|
"logps/chosen": -152.0, |
|
"logps/rejected": -146.0, |
|
"loss": 0.3731, |
|
"rewards/accuracies": 0.9750000238418579, |
|
"rewards/chosen": -0.271484375, |
|
"rewards/margins": 0.96875, |
|
"rewards/rejected": -1.2421875, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.380952380952381, |
|
"grad_norm": 7.8219119833188975, |
|
"learning_rate": 1.1470588235294116e-07, |
|
"logits/chosen": -1.5390625, |
|
"logits/rejected": -1.453125, |
|
"logps/chosen": -162.0, |
|
"logps/rejected": -132.0, |
|
"loss": 0.3477, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": -0.376953125, |
|
"rewards/margins": 0.90625, |
|
"rewards/rejected": -1.28125, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.5396825396825395, |
|
"grad_norm": 8.679287106154373, |
|
"learning_rate": 8.529411764705883e-08, |
|
"logits/chosen": -1.5625, |
|
"logits/rejected": -1.4921875, |
|
"logps/chosen": -163.0, |
|
"logps/rejected": -153.0, |
|
"loss": 0.3479, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -0.3046875, |
|
"rewards/margins": 1.125, |
|
"rewards/rejected": -1.4296875, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.6984126984126986, |
|
"grad_norm": 8.351083925741897, |
|
"learning_rate": 5.5882352941176474e-08, |
|
"logits/chosen": -1.5078125, |
|
"logits/rejected": -1.53125, |
|
"logps/chosen": -146.0, |
|
"logps/rejected": -157.0, |
|
"loss": 0.3607, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": -0.4609375, |
|
"rewards/margins": 0.9375, |
|
"rewards/rejected": -1.3984375, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"grad_norm": 7.683812751753157, |
|
"learning_rate": 2.6470588235294116e-08, |
|
"logits/chosen": -1.53125, |
|
"logits/rejected": -1.546875, |
|
"logps/chosen": -154.0, |
|
"logps/rejected": -157.0, |
|
"loss": 0.3708, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": -0.408203125, |
|
"rewards/margins": 0.96875, |
|
"rewards/rejected": -1.375, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_logits/chosen": -1.6015625, |
|
"eval_logits/rejected": -1.453125, |
|
"eval_logps/chosen": -161.0, |
|
"eval_logps/rejected": -120.5, |
|
"eval_loss": 0.6126700639724731, |
|
"eval_rewards/accuracies": 0.75, |
|
"eval_rewards/chosen": -0.67578125, |
|
"eval_rewards/margins": 0.42578125, |
|
"eval_rewards/rejected": -1.1015625, |
|
"eval_runtime": 13.2152, |
|
"eval_samples_per_second": 11.124, |
|
"eval_steps_per_second": 0.378, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 189, |
|
"total_flos": 0.0, |
|
"train_loss": 0.48406885540674605, |
|
"train_runtime": 1618.971, |
|
"train_samples_per_second": 3.706, |
|
"train_steps_per_second": 0.117 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 189, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|