|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 1000, |
|
"global_step": 169, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.005917159763313609, |
|
"grad_norm": 86.11190361804692, |
|
"learning_rate": 2.941176470588235e-08, |
|
"logits/chosen": -0.1997361034154892, |
|
"logits/rejected": -0.19101263582706451, |
|
"logps/chosen": -210.513671875, |
|
"logps/rejected": -157.554931640625, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05917159763313609, |
|
"grad_norm": 78.94990602073459, |
|
"learning_rate": 2.941176470588235e-07, |
|
"logits/chosen": -0.13639792799949646, |
|
"logits/rejected": -0.1734662801027298, |
|
"logps/chosen": -218.1241912841797, |
|
"logps/rejected": -238.053466796875, |
|
"loss": 0.6903, |
|
"rewards/accuracies": 0.37037035822868347, |
|
"rewards/chosen": -0.008327378891408443, |
|
"rewards/margins": -0.011080991476774216, |
|
"rewards/rejected": 0.002753612119704485, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.11834319526627218, |
|
"grad_norm": 76.16607821763226, |
|
"learning_rate": 4.901315789473684e-07, |
|
"logits/chosen": -0.1947246491909027, |
|
"logits/rejected": -0.24646611511707306, |
|
"logps/chosen": -177.68190002441406, |
|
"logps/rejected": -174.81893920898438, |
|
"loss": 0.6672, |
|
"rewards/accuracies": 0.5833333730697632, |
|
"rewards/chosen": -0.04245181754231453, |
|
"rewards/margins": 0.05021858215332031, |
|
"rewards/rejected": -0.09267039597034454, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.17751479289940827, |
|
"grad_norm": 109.70241370606963, |
|
"learning_rate": 4.5723684210526313e-07, |
|
"logits/chosen": -0.06654810905456543, |
|
"logits/rejected": -0.10471577942371368, |
|
"logps/chosen": -205.10757446289062, |
|
"logps/rejected": -221.33865356445312, |
|
"loss": 0.6287, |
|
"rewards/accuracies": 0.6333333253860474, |
|
"rewards/chosen": -0.1406942903995514, |
|
"rewards/margins": 0.19435003399848938, |
|
"rewards/rejected": -0.33504432439804077, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.23668639053254437, |
|
"grad_norm": 104.51396601202018, |
|
"learning_rate": 4.243421052631579e-07, |
|
"logits/chosen": -0.28040310740470886, |
|
"logits/rejected": -0.22528812289237976, |
|
"logps/chosen": -218.5230712890625, |
|
"logps/rejected": -223.2051239013672, |
|
"loss": 0.5973, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.35167446732521057, |
|
"rewards/margins": 0.22981619834899902, |
|
"rewards/rejected": -0.5814906358718872, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.2958579881656805, |
|
"grad_norm": 77.4636654508096, |
|
"learning_rate": 3.914473684210526e-07, |
|
"logits/chosen": -0.09673222154378891, |
|
"logits/rejected": -0.046957988291978836, |
|
"logps/chosen": -151.72000122070312, |
|
"logps/rejected": -199.17935180664062, |
|
"loss": 0.5671, |
|
"rewards/accuracies": 0.7833333611488342, |
|
"rewards/chosen": -0.3736603260040283, |
|
"rewards/margins": 0.3569501042366028, |
|
"rewards/rejected": -0.7306104898452759, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.35502958579881655, |
|
"grad_norm": 64.26194560632132, |
|
"learning_rate": 3.5855263157894734e-07, |
|
"logits/chosen": -0.08753165602684021, |
|
"logits/rejected": -0.07491908967494965, |
|
"logps/chosen": -178.92002868652344, |
|
"logps/rejected": -237.271240234375, |
|
"loss": 0.5653, |
|
"rewards/accuracies": 0.6666666865348816, |
|
"rewards/chosen": -0.44847816228866577, |
|
"rewards/margins": 0.366828054189682, |
|
"rewards/rejected": -0.8153061866760254, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.41420118343195267, |
|
"grad_norm": 75.47610499926685, |
|
"learning_rate": 3.2565789473684206e-07, |
|
"logits/chosen": -0.25761300325393677, |
|
"logits/rejected": -0.25849616527557373, |
|
"logps/chosen": -191.11752319335938, |
|
"logps/rejected": -177.6903839111328, |
|
"loss": 0.5494, |
|
"rewards/accuracies": 0.6500000357627869, |
|
"rewards/chosen": -0.5434780120849609, |
|
"rewards/margins": 0.3972131609916687, |
|
"rewards/rejected": -0.9406911730766296, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.47337278106508873, |
|
"grad_norm": 68.31555898880357, |
|
"learning_rate": 2.9276315789473684e-07, |
|
"logits/chosen": -0.06761662662029266, |
|
"logits/rejected": -0.08591805398464203, |
|
"logps/chosen": -151.31748962402344, |
|
"logps/rejected": -214.83871459960938, |
|
"loss": 0.5368, |
|
"rewards/accuracies": 0.7666666507720947, |
|
"rewards/chosen": -0.34307175874710083, |
|
"rewards/margins": 0.5734245181083679, |
|
"rewards/rejected": -0.9164963960647583, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.5325443786982249, |
|
"grad_norm": 89.70054927345474, |
|
"learning_rate": 2.5986842105263156e-07, |
|
"logits/chosen": 0.28517499566078186, |
|
"logits/rejected": 0.20042014122009277, |
|
"logps/chosen": -219.8415985107422, |
|
"logps/rejected": -200.08901977539062, |
|
"loss": 0.5263, |
|
"rewards/accuracies": 0.7166666388511658, |
|
"rewards/chosen": -0.5583446621894836, |
|
"rewards/margins": 0.48568302392959595, |
|
"rewards/rejected": -1.0440276861190796, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.591715976331361, |
|
"grad_norm": 70.5257802097204, |
|
"learning_rate": 2.2697368421052633e-07, |
|
"logits/chosen": -0.08118040859699249, |
|
"logits/rejected": -0.05347698926925659, |
|
"logps/chosen": -132.26165771484375, |
|
"logps/rejected": -201.5222625732422, |
|
"loss": 0.5441, |
|
"rewards/accuracies": 0.6833332777023315, |
|
"rewards/chosen": -0.5124568939208984, |
|
"rewards/margins": 0.4847620129585266, |
|
"rewards/rejected": -0.997218906879425, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.650887573964497, |
|
"grad_norm": 88.85554517295625, |
|
"learning_rate": 1.9407894736842102e-07, |
|
"logits/chosen": 0.028139472007751465, |
|
"logits/rejected": -0.018909335136413574, |
|
"logps/chosen": -192.33306884765625, |
|
"logps/rejected": -235.2200469970703, |
|
"loss": 0.5436, |
|
"rewards/accuracies": 0.7666667103767395, |
|
"rewards/chosen": -0.45699724555015564, |
|
"rewards/margins": 0.7322259545326233, |
|
"rewards/rejected": -1.1892231702804565, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.7100591715976331, |
|
"grad_norm": 64.03669024190756, |
|
"learning_rate": 1.611842105263158e-07, |
|
"logits/chosen": 0.00658189645037055, |
|
"logits/rejected": -0.047699280083179474, |
|
"logps/chosen": -223.26205444335938, |
|
"logps/rejected": -258.1011657714844, |
|
"loss": 0.547, |
|
"rewards/accuracies": 0.7333333492279053, |
|
"rewards/chosen": -0.7406572103500366, |
|
"rewards/margins": 0.8165454864501953, |
|
"rewards/rejected": -1.557202935218811, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.7692307692307693, |
|
"grad_norm": 120.82289691922682, |
|
"learning_rate": 1.2828947368421054e-07, |
|
"logits/chosen": -0.1693594604730606, |
|
"logits/rejected": -0.184026300907135, |
|
"logps/chosen": -212.06900024414062, |
|
"logps/rejected": -218.6104278564453, |
|
"loss": 0.5153, |
|
"rewards/accuracies": 0.7500000596046448, |
|
"rewards/chosen": -0.5004691481590271, |
|
"rewards/margins": 0.5659549832344055, |
|
"rewards/rejected": -1.0664241313934326, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.8284023668639053, |
|
"grad_norm": 75.4403830508224, |
|
"learning_rate": 9.539473684210526e-08, |
|
"logits/chosen": -0.0047453404404222965, |
|
"logits/rejected": 0.06134886294603348, |
|
"logps/chosen": -215.7347412109375, |
|
"logps/rejected": -216.7386016845703, |
|
"loss": 0.5365, |
|
"rewards/accuracies": 0.7500000596046448, |
|
"rewards/chosen": -0.47300204634666443, |
|
"rewards/margins": 0.6861640214920044, |
|
"rewards/rejected": -1.1591660976409912, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.8875739644970414, |
|
"grad_norm": 403.81954432259465, |
|
"learning_rate": 6.25e-08, |
|
"logits/chosen": 0.07171961665153503, |
|
"logits/rejected": 0.06573580205440521, |
|
"logps/chosen": -168.96322631835938, |
|
"logps/rejected": -237.2980194091797, |
|
"loss": 0.5128, |
|
"rewards/accuracies": 0.783333420753479, |
|
"rewards/chosen": -0.5706448554992676, |
|
"rewards/margins": 0.8216232061386108, |
|
"rewards/rejected": -1.3922679424285889, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.9467455621301775, |
|
"grad_norm": 77.59593509813354, |
|
"learning_rate": 2.9605263157894734e-08, |
|
"logits/chosen": 0.030731897801160812, |
|
"logits/rejected": 0.0011664718622341752, |
|
"logps/chosen": -192.11680603027344, |
|
"logps/rejected": -223.5975799560547, |
|
"loss": 0.5015, |
|
"rewards/accuracies": 0.7833333611488342, |
|
"rewards/chosen": -0.5610911250114441, |
|
"rewards/margins": 0.7299409508705139, |
|
"rewards/rejected": -1.291032075881958, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 169, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5670995542988975, |
|
"train_runtime": 1780.4141, |
|
"train_samples_per_second": 4.532, |
|
"train_steps_per_second": 0.095 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 169, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 6, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|