|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.985781990521327, |
|
"eval_steps": 100, |
|
"global_step": 52, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 1597.1610924726594, |
|
"learning_rate": 8.333333333333333e-08, |
|
"logits/chosen": 123.11854553222656, |
|
"logits/rejected": 97.00198364257812, |
|
"logps/chosen": -425.18585205078125, |
|
"logps/rejected": -424.1869201660156, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 1960.9556077938475, |
|
"learning_rate": 4.907293218369498e-07, |
|
"logits/chosen": 117.075439453125, |
|
"logits/rejected": 135.90403747558594, |
|
"logps/chosen": -441.22174072265625, |
|
"logps/rejected": -524.5287475585938, |
|
"loss": 2.1431, |
|
"rewards/accuracies": 0.5486111044883728, |
|
"rewards/chosen": 1.300018310546875, |
|
"rewards/margins": 1.5483126640319824, |
|
"rewards/rejected": -0.24829429388046265, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 1386.5508807535327, |
|
"learning_rate": 3.941700805287168e-07, |
|
"logits/chosen": 126.82967376708984, |
|
"logits/rejected": 134.35858154296875, |
|
"logps/chosen": -432.8621520996094, |
|
"logps/rejected": -501.55560302734375, |
|
"loss": 2.1158, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -3.958549976348877, |
|
"rewards/margins": 4.468470096588135, |
|
"rewards/rejected": -8.427020072937012, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 1222.8384880370088, |
|
"learning_rate": 2.3293939665883228e-07, |
|
"logits/chosen": 127.63995361328125, |
|
"logits/rejected": 130.95755004882812, |
|
"logps/chosen": -463.97509765625, |
|
"logps/rejected": -524.4500122070312, |
|
"loss": 2.103, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -5.313003063201904, |
|
"rewards/margins": 5.028109073638916, |
|
"rewards/rejected": -10.34111213684082, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 1242.2684060861586, |
|
"learning_rate": 7.936171419533652e-08, |
|
"logits/chosen": 127.74715423583984, |
|
"logits/rejected": 122.52357482910156, |
|
"logps/chosen": -481.22698974609375, |
|
"logps/rejected": -501.05059814453125, |
|
"loss": 2.0594, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -4.827347755432129, |
|
"rewards/margins": 4.944188117980957, |
|
"rewards/rejected": -9.771535873413086, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 1482.0031541291255, |
|
"learning_rate": 2.328513490917311e-09, |
|
"logits/chosen": 136.43093872070312, |
|
"logits/rejected": 136.77505493164062, |
|
"logps/chosen": -492.9546813964844, |
|
"logps/rejected": -509.92645263671875, |
|
"loss": 2.0029, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": -6.079165458679199, |
|
"rewards/margins": 5.127718448638916, |
|
"rewards/rejected": -11.206883430480957, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"step": 52, |
|
"total_flos": 0.0, |
|
"train_loss": 2.0260768074255724, |
|
"train_runtime": 571.9301, |
|
"train_samples_per_second": 11.802, |
|
"train_steps_per_second": 0.091 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 52, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|