|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.985781990521327, |
|
"eval_steps": 100, |
|
"global_step": 52, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 132.98895459124606, |
|
"learning_rate": 8.333333333333333e-08, |
|
"logits/chosen": 123.11854553222656, |
|
"logits/rejected": 97.00198364257812, |
|
"logps/chosen": -425.18585205078125, |
|
"logps/rejected": -424.1869201660156, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 170.32108935367978, |
|
"learning_rate": 4.907293218369498e-07, |
|
"logits/chosen": 117.15108489990234, |
|
"logits/rejected": 135.9984588623047, |
|
"logps/chosen": -441.28948974609375, |
|
"logps/rejected": -524.968505859375, |
|
"loss": 0.707, |
|
"rewards/accuracies": 0.5347222089767456, |
|
"rewards/chosen": 0.10494841635227203, |
|
"rewards/margins": 0.14762566983699799, |
|
"rewards/rejected": -0.04267726466059685, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 101.98831849682922, |
|
"learning_rate": 3.941700805287168e-07, |
|
"logits/chosen": 125.30863952636719, |
|
"logits/rejected": 132.74871826171875, |
|
"logps/chosen": -423.9988708496094, |
|
"logps/rejected": -499.4864196777344, |
|
"loss": 0.5939, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.11328498274087906, |
|
"rewards/margins": 0.7120789289474487, |
|
"rewards/rejected": -0.5987939238548279, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 94.48847661999667, |
|
"learning_rate": 2.3293939665883228e-07, |
|
"logits/chosen": 123.69091796875, |
|
"logits/rejected": 127.02400207519531, |
|
"logps/chosen": -473.69482421875, |
|
"logps/rejected": -546.0104370117188, |
|
"loss": 0.5604, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.9287363290786743, |
|
"rewards/margins": 1.0110455751419067, |
|
"rewards/rejected": -1.9397817850112915, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 108.45257424394471, |
|
"learning_rate": 7.936171419533652e-08, |
|
"logits/chosen": 124.60306549072266, |
|
"logits/rejected": 119.5762939453125, |
|
"logps/chosen": -499.6136779785156, |
|
"logps/rejected": -527.9450073242188, |
|
"loss": 0.5394, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -1.3216161727905273, |
|
"rewards/margins": 0.8374040722846985, |
|
"rewards/rejected": -2.159019947052002, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 116.27759622068336, |
|
"learning_rate": 2.328513490917311e-09, |
|
"logits/chosen": 133.5895233154297, |
|
"logits/rejected": 134.02474975585938, |
|
"logps/chosen": -508.62506103515625, |
|
"logps/rejected": -535.1971435546875, |
|
"loss": 0.5317, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -1.2901136875152588, |
|
"rewards/margins": 0.9073255658149719, |
|
"rewards/rejected": -2.197439432144165, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"step": 52, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5840168778712933, |
|
"train_runtime": 572.7421, |
|
"train_samples_per_second": 11.785, |
|
"train_steps_per_second": 0.091 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 52, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|