|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 189, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.015873015873015872, |
|
"grad_norm": 32.49287155827833, |
|
"learning_rate": 2.6315789473684208e-08, |
|
"logits/chosen": -2.453125, |
|
"logits/rejected": -1.4453125, |
|
"logps/chosen": -35.0, |
|
"logps/rejected": -71.5, |
|
"loss": 0.6914, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.15873015873015872, |
|
"grad_norm": 35.56440458755336, |
|
"learning_rate": 2.631578947368421e-07, |
|
"logits/chosen": -1.890625, |
|
"logits/rejected": -0.7421875, |
|
"logps/chosen": -65.5, |
|
"logps/rejected": -58.75, |
|
"loss": 0.6871, |
|
"rewards/accuracies": 0.4583333432674408, |
|
"rewards/chosen": 0.00341796875, |
|
"rewards/margins": 0.0299072265625, |
|
"rewards/rejected": -0.0264892578125, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.31746031746031744, |
|
"grad_norm": 27.925399110912338, |
|
"learning_rate": 4.970588235294118e-07, |
|
"logits/chosen": -1.640625, |
|
"logits/rejected": -0.69140625, |
|
"logps/chosen": -52.25, |
|
"logps/rejected": -74.5, |
|
"loss": 0.7455, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": 0.00579833984375, |
|
"rewards/margins": 0.0537109375, |
|
"rewards/rejected": -0.0478515625, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.47619047619047616, |
|
"grad_norm": 21.690722291304077, |
|
"learning_rate": 4.676470588235294e-07, |
|
"logits/chosen": -0.92578125, |
|
"logits/rejected": -1.0859375, |
|
"logps/chosen": -62.75, |
|
"logps/rejected": -57.75, |
|
"loss": 0.6262, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.10693359375, |
|
"rewards/margins": 0.1513671875, |
|
"rewards/rejected": -0.044189453125, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.6349206349206349, |
|
"grad_norm": 27.607029623598933, |
|
"learning_rate": 4.38235294117647e-07, |
|
"logits/chosen": -1.5390625, |
|
"logits/rejected": -0.6015625, |
|
"logps/chosen": -52.0, |
|
"logps/rejected": -97.5, |
|
"loss": 0.6431, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.08740234375, |
|
"rewards/margins": 1.046875, |
|
"rewards/rejected": -0.9609375, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.7936507936507936, |
|
"grad_norm": 89.33304447908654, |
|
"learning_rate": 4.0882352941176465e-07, |
|
"logits/chosen": -1.515625, |
|
"logits/rejected": -1.0625, |
|
"logps/chosen": -75.0, |
|
"logps/rejected": -74.0, |
|
"loss": 0.742, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.2197265625, |
|
"rewards/margins": 0.0859375, |
|
"rewards/rejected": -0.306640625, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.9523809523809523, |
|
"grad_norm": 28.864344086826605, |
|
"learning_rate": 3.7941176470588235e-07, |
|
"logits/chosen": -1.5, |
|
"logits/rejected": -0.96875, |
|
"logps/chosen": -59.75, |
|
"logps/rejected": -84.0, |
|
"loss": 0.6597, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": 0.205078125, |
|
"rewards/margins": 0.478515625, |
|
"rewards/rejected": -0.2734375, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_logits/chosen": -1.3671875, |
|
"eval_logits/rejected": -0.71875, |
|
"eval_logps/chosen": -61.25, |
|
"eval_logps/rejected": -89.5, |
|
"eval_loss": 0.684765636920929, |
|
"eval_rewards/accuracies": 0.625, |
|
"eval_rewards/chosen": 0.0400390625, |
|
"eval_rewards/margins": 0.4375, |
|
"eval_rewards/rejected": -0.3984375, |
|
"eval_runtime": 18.8729, |
|
"eval_samples_per_second": 10.597, |
|
"eval_steps_per_second": 0.371, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.1111111111111112, |
|
"grad_norm": 20.770261653452945, |
|
"learning_rate": 3.5e-07, |
|
"logits/chosen": -1.6171875, |
|
"logits/rejected": -0.87109375, |
|
"logps/chosen": -62.25, |
|
"logps/rejected": -85.0, |
|
"loss": 0.3381, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.578125, |
|
"rewards/margins": 1.953125, |
|
"rewards/rejected": -1.375, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.2698412698412698, |
|
"grad_norm": 18.43753108370381, |
|
"learning_rate": 3.205882352941177e-07, |
|
"logits/chosen": -1.34375, |
|
"logits/rejected": -0.84375, |
|
"logps/chosen": -49.5, |
|
"logps/rejected": -99.0, |
|
"loss": 0.3254, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 0.3671875, |
|
"rewards/margins": 2.390625, |
|
"rewards/rejected": -2.015625, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.4285714285714286, |
|
"grad_norm": 16.839062370433073, |
|
"learning_rate": 2.911764705882353e-07, |
|
"logits/chosen": -1.5078125, |
|
"logits/rejected": -0.78515625, |
|
"logps/chosen": -57.5, |
|
"logps/rejected": -72.5, |
|
"loss": 0.2715, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.84765625, |
|
"rewards/margins": 2.03125, |
|
"rewards/rejected": -1.1875, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.5873015873015874, |
|
"grad_norm": 16.580044861649466, |
|
"learning_rate": 2.6176470588235295e-07, |
|
"logits/chosen": -2.09375, |
|
"logits/rejected": -1.3359375, |
|
"logps/chosen": -51.0, |
|
"logps/rejected": -99.5, |
|
"loss": 0.2519, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": 0.345703125, |
|
"rewards/margins": 1.9375, |
|
"rewards/rejected": -1.59375, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.746031746031746, |
|
"grad_norm": 16.88214421094986, |
|
"learning_rate": 2.323529411764706e-07, |
|
"logits/chosen": -2.3125, |
|
"logits/rejected": -1.171875, |
|
"logps/chosen": -55.75, |
|
"logps/rejected": -90.5, |
|
"loss": 0.2389, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 0.197265625, |
|
"rewards/margins": 1.9296875, |
|
"rewards/rejected": -1.734375, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.9047619047619047, |
|
"grad_norm": 12.829485946331816, |
|
"learning_rate": 2.0294117647058823e-07, |
|
"logits/chosen": -1.8828125, |
|
"logits/rejected": -0.9453125, |
|
"logps/chosen": -50.5, |
|
"logps/rejected": -102.5, |
|
"loss": 0.2831, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": 0.10888671875, |
|
"rewards/margins": 2.578125, |
|
"rewards/rejected": -2.46875, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_logits/chosen": -1.84375, |
|
"eval_logits/rejected": -1.171875, |
|
"eval_logps/chosen": -66.0, |
|
"eval_logps/rejected": -97.5, |
|
"eval_loss": 0.6284375190734863, |
|
"eval_rewards/accuracies": 0.6071428656578064, |
|
"eval_rewards/chosen": -0.453125, |
|
"eval_rewards/margins": 0.75, |
|
"eval_rewards/rejected": -1.203125, |
|
"eval_runtime": 23.6296, |
|
"eval_samples_per_second": 8.464, |
|
"eval_steps_per_second": 0.296, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 2.0634920634920633, |
|
"grad_norm": 7.418629403738186, |
|
"learning_rate": 1.7352941176470587e-07, |
|
"logits/chosen": -2.109375, |
|
"logits/rejected": -0.73046875, |
|
"logps/chosen": -61.25, |
|
"logps/rejected": -106.0, |
|
"loss": 0.1749, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": 0.4296875, |
|
"rewards/margins": 2.625, |
|
"rewards/rejected": -2.203125, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 2.2222222222222223, |
|
"grad_norm": 5.917946365968759, |
|
"learning_rate": 1.441176470588235e-07, |
|
"logits/chosen": -2.390625, |
|
"logits/rejected": -1.2890625, |
|
"logps/chosen": -72.0, |
|
"logps/rejected": -100.0, |
|
"loss": 0.1132, |
|
"rewards/accuracies": 0.9750000238418579, |
|
"rewards/chosen": 0.6015625, |
|
"rewards/margins": 3.65625, |
|
"rewards/rejected": -3.0625, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.380952380952381, |
|
"grad_norm": 7.62179839987552, |
|
"learning_rate": 1.1470588235294116e-07, |
|
"logits/chosen": -2.125, |
|
"logits/rejected": -1.296875, |
|
"logps/chosen": -54.0, |
|
"logps/rejected": -92.5, |
|
"loss": 0.125, |
|
"rewards/accuracies": 0.987500011920929, |
|
"rewards/chosen": 0.66796875, |
|
"rewards/margins": 3.171875, |
|
"rewards/rejected": -2.5, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.5396825396825395, |
|
"grad_norm": 7.689817277750616, |
|
"learning_rate": 8.529411764705883e-08, |
|
"logits/chosen": -1.8671875, |
|
"logits/rejected": -2.3125, |
|
"logps/chosen": -50.25, |
|
"logps/rejected": -92.5, |
|
"loss": 0.118, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 0.3203125, |
|
"rewards/margins": 3.109375, |
|
"rewards/rejected": -2.78125, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.6984126984126986, |
|
"grad_norm": 5.583079792162674, |
|
"learning_rate": 5.5882352941176474e-08, |
|
"logits/chosen": -2.21875, |
|
"logits/rejected": -1.1328125, |
|
"logps/chosen": -66.0, |
|
"logps/rejected": -114.5, |
|
"loss": 0.145, |
|
"rewards/accuracies": 0.9750000238418579, |
|
"rewards/chosen": 0.388671875, |
|
"rewards/margins": 3.671875, |
|
"rewards/rejected": -3.28125, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"grad_norm": 10.79399873370834, |
|
"learning_rate": 2.6470588235294116e-08, |
|
"logits/chosen": -1.6640625, |
|
"logits/rejected": -1.5234375, |
|
"logps/chosen": -59.0, |
|
"logps/rejected": -87.5, |
|
"loss": 0.1329, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 0.310546875, |
|
"rewards/margins": 2.75, |
|
"rewards/rejected": -2.4375, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_logits/chosen": -2.21875, |
|
"eval_logits/rejected": -1.4921875, |
|
"eval_logps/chosen": -69.5, |
|
"eval_logps/rejected": -102.5, |
|
"eval_loss": 0.6340625286102295, |
|
"eval_rewards/accuracies": 0.6607142686843872, |
|
"eval_rewards/chosen": -0.76953125, |
|
"eval_rewards/margins": 0.921875, |
|
"eval_rewards/rejected": -1.6953125, |
|
"eval_runtime": 21.8558, |
|
"eval_samples_per_second": 9.151, |
|
"eval_steps_per_second": 0.32, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 189, |
|
"total_flos": 0.0, |
|
"train_loss": 0.354867279214203, |
|
"train_runtime": 2422.2159, |
|
"train_samples_per_second": 2.477, |
|
"train_steps_per_second": 0.078 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 189, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|