CAMP_nat / checkpoint-8000 /trainer_state.json
lhallee's picture
Training in progress, step 8000, checkpoint
61eb057 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.17918738520808136,
"eval_steps": 1000,
"global_step": 8000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.002239842315101017,
"grad_norm": 71.10852813720703,
"learning_rate": 0.0001,
"loss": 3.5057,
"step": 100
},
{
"epoch": 0.004479684630202034,
"grad_norm": 57.756534576416016,
"learning_rate": 9.9998756572327e-05,
"loss": 0.8254,
"step": 200
},
{
"epoch": 0.006719526945303051,
"grad_norm": 44.61080551147461,
"learning_rate": 9.999502635115246e-05,
"loss": 0.6935,
"step": 300
},
{
"epoch": 0.008959369260404068,
"grad_norm": 50.82619094848633,
"learning_rate": 9.998880952200681e-05,
"loss": 0.5972,
"step": 400
},
{
"epoch": 0.011199211575505085,
"grad_norm": 44.26677703857422,
"learning_rate": 9.998010639409713e-05,
"loss": 0.5408,
"step": 500
},
{
"epoch": 0.013439053890606102,
"grad_norm": 42.8045654296875,
"learning_rate": 9.996891740029186e-05,
"loss": 0.5509,
"step": 600
},
{
"epoch": 0.01567889620570712,
"grad_norm": 30.989139556884766,
"learning_rate": 9.995524309709913e-05,
"loss": 0.4823,
"step": 700
},
{
"epoch": 0.017918738520808136,
"grad_norm": 34.01952362060547,
"learning_rate": 9.993908416463927e-05,
"loss": 0.5111,
"step": 800
},
{
"epoch": 0.020158580835909153,
"grad_norm": 34.079307556152344,
"learning_rate": 9.992044140661079e-05,
"loss": 0.4635,
"step": 900
},
{
"epoch": 0.02239842315101017,
"grad_norm": 26.16071128845215,
"learning_rate": 9.989931575025056e-05,
"loss": 0.4883,
"step": 1000
},
{
"epoch": 0.02239842315101017,
"eval_avg_non_pair_similarity": 0.0020343252948339737,
"eval_avg_pair_similarity": 0.008852629057131708,
"eval_loss": 0.5431402921676636,
"eval_runtime": 19.6178,
"eval_samples_per_second": 25.487,
"eval_similarity_ratio": 4.35162905343228,
"eval_steps_per_second": 0.816,
"step": 1000
},
{
"epoch": 0.024638265466111187,
"grad_norm": 35.38695526123047,
"learning_rate": 9.987570824628759e-05,
"loss": 0.4655,
"step": 1100
},
{
"epoch": 0.026878107781212204,
"grad_norm": 32.071346282958984,
"learning_rate": 9.984962006889084e-05,
"loss": 0.4342,
"step": 1200
},
{
"epoch": 0.029117950096313218,
"grad_norm": 39.6610221862793,
"learning_rate": 9.982105251561082e-05,
"loss": 0.458,
"step": 1300
},
{
"epoch": 0.03135779241141424,
"grad_norm": 31.493322372436523,
"learning_rate": 9.979000700731491e-05,
"loss": 0.4525,
"step": 1400
},
{
"epoch": 0.03359763472651525,
"grad_norm": 34.453399658203125,
"learning_rate": 9.975648508811693e-05,
"loss": 0.41,
"step": 1500
},
{
"epoch": 0.03583747704161627,
"grad_norm": 33.990074157714844,
"learning_rate": 9.972048842530012e-05,
"loss": 0.4097,
"step": 1600
},
{
"epoch": 0.038077319356717286,
"grad_norm": 30.44228172302246,
"learning_rate": 9.968201880923439e-05,
"loss": 0.4257,
"step": 1700
},
{
"epoch": 0.040317161671818307,
"grad_norm": 31.427162170410156,
"learning_rate": 9.964107815328711e-05,
"loss": 0.3821,
"step": 1800
},
{
"epoch": 0.04255700398691932,
"grad_norm": 25.877887725830078,
"learning_rate": 9.959766849372808e-05,
"loss": 0.3788,
"step": 1900
},
{
"epoch": 0.04479684630202034,
"grad_norm": 25.36798095703125,
"learning_rate": 9.955179198962817e-05,
"loss": 0.3854,
"step": 2000
},
{
"epoch": 0.04479684630202034,
"eval_avg_non_pair_similarity": 0.0016289287904792565,
"eval_avg_pair_similarity": 0.0032495629731565715,
"eval_loss": 0.4394480586051941,
"eval_runtime": 19.5064,
"eval_samples_per_second": 25.633,
"eval_similarity_ratio": 1.9949079371360972,
"eval_steps_per_second": 0.82,
"step": 2000
},
{
"epoch": 0.047036688617121354,
"grad_norm": 24.537384033203125,
"learning_rate": 9.950345092275198e-05,
"loss": 0.3886,
"step": 2100
},
{
"epoch": 0.049276530932222375,
"grad_norm": 26.4981632232666,
"learning_rate": 9.945264769744431e-05,
"loss": 0.3926,
"step": 2200
},
{
"epoch": 0.05151637324732339,
"grad_norm": 40.585941314697266,
"learning_rate": 9.939938484051063e-05,
"loss": 0.3796,
"step": 2300
},
{
"epoch": 0.05375621556242441,
"grad_norm": 26.374149322509766,
"learning_rate": 9.934366500109132e-05,
"loss": 0.3723,
"step": 2400
},
{
"epoch": 0.05599605787752542,
"grad_norm": 31.54728126525879,
"learning_rate": 9.928549095053001e-05,
"loss": 0.3639,
"step": 2500
},
{
"epoch": 0.058235900192626436,
"grad_norm": 26.00455093383789,
"learning_rate": 9.922486558223567e-05,
"loss": 0.3387,
"step": 2600
},
{
"epoch": 0.060475742507727456,
"grad_norm": 23.684057235717773,
"learning_rate": 9.916179191153873e-05,
"loss": 0.3494,
"step": 2700
},
{
"epoch": 0.06271558482282848,
"grad_norm": 24.034656524658203,
"learning_rate": 9.909627307554108e-05,
"loss": 0.362,
"step": 2800
},
{
"epoch": 0.06495542713792948,
"grad_norm": 31.07891082763672,
"learning_rate": 9.902831233296009e-05,
"loss": 0.3481,
"step": 2900
},
{
"epoch": 0.0671952694530305,
"grad_norm": 24.183626174926758,
"learning_rate": 9.895791306396644e-05,
"loss": 0.3535,
"step": 3000
},
{
"epoch": 0.0671952694530305,
"eval_avg_non_pair_similarity": -0.0007878901720614375,
"eval_avg_pair_similarity": -0.008789425778668375,
"eval_loss": 0.4187028110027313,
"eval_runtime": 19.4839,
"eval_samples_per_second": 25.662,
"eval_similarity_ratio": 11.155648452463497,
"eval_steps_per_second": 0.821,
"step": 3000
},
{
"epoch": 0.06943511176813152,
"grad_norm": 20.178213119506836,
"learning_rate": 9.888507877001616e-05,
"loss": 0.3375,
"step": 3100
},
{
"epoch": 0.07167495408323254,
"grad_norm": 31.757244110107422,
"learning_rate": 9.880981307367627e-05,
"loss": 0.3489,
"step": 3200
},
{
"epoch": 0.07391479639833355,
"grad_norm": 27.889982223510742,
"learning_rate": 9.873211971844477e-05,
"loss": 0.3463,
"step": 3300
},
{
"epoch": 0.07615463871343457,
"grad_norm": 23.282093048095703,
"learning_rate": 9.865200256856437e-05,
"loss": 0.3235,
"step": 3400
},
{
"epoch": 0.07839448102853559,
"grad_norm": 24.573863983154297,
"learning_rate": 9.856946560883034e-05,
"loss": 0.3474,
"step": 3500
},
{
"epoch": 0.08063432334363661,
"grad_norm": 20.7421932220459,
"learning_rate": 9.848451294439224e-05,
"loss": 0.3533,
"step": 3600
},
{
"epoch": 0.08287416565873762,
"grad_norm": 22.051513671875,
"learning_rate": 9.839714880054987e-05,
"loss": 0.3277,
"step": 3700
},
{
"epoch": 0.08511400797383864,
"grad_norm": 22.470027923583984,
"learning_rate": 9.830737752254294e-05,
"loss": 0.3261,
"step": 3800
},
{
"epoch": 0.08735385028893966,
"grad_norm": 20.80890464782715,
"learning_rate": 9.821520357533513e-05,
"loss": 0.3474,
"step": 3900
},
{
"epoch": 0.08959369260404068,
"grad_norm": 20.187280654907227,
"learning_rate": 9.812063154339191e-05,
"loss": 0.2984,
"step": 4000
},
{
"epoch": 0.08959369260404068,
"eval_avg_non_pair_similarity": -0.0008396646829899625,
"eval_avg_pair_similarity": -0.011315496074035763,
"eval_loss": 0.32538020610809326,
"eval_runtime": 19.4916,
"eval_samples_per_second": 25.652,
"eval_similarity_ratio": 13.476208185560939,
"eval_steps_per_second": 0.821,
"step": 4000
},
{
"epoch": 0.09183353491914169,
"grad_norm": 26.496036529541016,
"learning_rate": 9.802366613045254e-05,
"loss": 0.3326,
"step": 4100
},
{
"epoch": 0.09407337723424271,
"grad_norm": 23.102359771728516,
"learning_rate": 9.792431215929613e-05,
"loss": 0.3341,
"step": 4200
},
{
"epoch": 0.09631321954934373,
"grad_norm": 21.27369499206543,
"learning_rate": 9.782257457150177e-05,
"loss": 0.3254,
"step": 4300
},
{
"epoch": 0.09855306186444475,
"grad_norm": 19.25406265258789,
"learning_rate": 9.771845842720274e-05,
"loss": 0.3334,
"step": 4400
},
{
"epoch": 0.10079290417954576,
"grad_norm": 15.666335105895996,
"learning_rate": 9.761196890483482e-05,
"loss": 0.3064,
"step": 4500
},
{
"epoch": 0.10303274649464678,
"grad_norm": 24.592592239379883,
"learning_rate": 9.75031113008788e-05,
"loss": 0.2902,
"step": 4600
},
{
"epoch": 0.1052725888097478,
"grad_norm": 20.0572566986084,
"learning_rate": 9.739189102959696e-05,
"loss": 0.3121,
"step": 4700
},
{
"epoch": 0.10751243112484882,
"grad_norm": 19.887725830078125,
"learning_rate": 9.727831362276381e-05,
"loss": 0.3014,
"step": 4800
},
{
"epoch": 0.10975227343994982,
"grad_norm": 15.561097145080566,
"learning_rate": 9.716238472939101e-05,
"loss": 0.316,
"step": 4900
},
{
"epoch": 0.11199211575505084,
"grad_norm": 17.96786880493164,
"learning_rate": 9.704411011544629e-05,
"loss": 0.3215,
"step": 5000
},
{
"epoch": 0.11199211575505084,
"eval_avg_non_pair_similarity": 0.00024146916974524413,
"eval_avg_pair_similarity": -0.009633154251612723,
"eval_loss": 0.2951599061489105,
"eval_runtime": 19.5025,
"eval_samples_per_second": 25.638,
"eval_similarity_ratio": -39.893930400207765,
"eval_steps_per_second": 0.82,
"step": 5000
},
{
"epoch": 0.11423195807015186,
"grad_norm": 14.672723770141602,
"learning_rate": 9.692349566356677e-05,
"loss": 0.2954,
"step": 5100
},
{
"epoch": 0.11647180038525287,
"grad_norm": 17.881025314331055,
"learning_rate": 9.680054737276638e-05,
"loss": 0.2968,
"step": 5200
},
{
"epoch": 0.11871164270035389,
"grad_norm": 16.55130958557129,
"learning_rate": 9.667527135813737e-05,
"loss": 0.2707,
"step": 5300
},
{
"epoch": 0.12095148501545491,
"grad_norm": 21.650575637817383,
"learning_rate": 9.654767385054627e-05,
"loss": 0.3068,
"step": 5400
},
{
"epoch": 0.12319132733055593,
"grad_norm": 20.990921020507812,
"learning_rate": 9.641776119632397e-05,
"loss": 0.3037,
"step": 5500
},
{
"epoch": 0.12543116964565695,
"grad_norm": 21.97833824157715,
"learning_rate": 9.628553985695005e-05,
"loss": 0.3307,
"step": 5600
},
{
"epoch": 0.12767101196075797,
"grad_norm": 20.353246688842773,
"learning_rate": 9.61510164087314e-05,
"loss": 0.2907,
"step": 5700
},
{
"epoch": 0.12991085427585897,
"grad_norm": 16.254976272583008,
"learning_rate": 9.601419754247514e-05,
"loss": 0.3025,
"step": 5800
},
{
"epoch": 0.13215069659096,
"grad_norm": 24.14662742614746,
"learning_rate": 9.587509006315585e-05,
"loss": 0.2676,
"step": 5900
},
{
"epoch": 0.134390538906061,
"grad_norm": 15.861074447631836,
"learning_rate": 9.573370088957712e-05,
"loss": 0.2953,
"step": 6000
},
{
"epoch": 0.134390538906061,
"eval_avg_non_pair_similarity": -0.00021786548842126337,
"eval_avg_pair_similarity": -0.001965825233142823,
"eval_loss": 0.31484127044677734,
"eval_runtime": 19.5134,
"eval_samples_per_second": 25.623,
"eval_similarity_ratio": 9.023114433533939,
"eval_steps_per_second": 0.82,
"step": 6000
},
{
"epoch": 0.13663038122116203,
"grad_norm": 14.910375595092773,
"learning_rate": 9.559003705402737e-05,
"loss": 0.2846,
"step": 6100
},
{
"epoch": 0.13887022353626305,
"grad_norm": 17.768138885498047,
"learning_rate": 9.544410570193014e-05,
"loss": 0.3031,
"step": 6200
},
{
"epoch": 0.14111006585136407,
"grad_norm": 13.620220184326172,
"learning_rate": 9.529591409148874e-05,
"loss": 0.2716,
"step": 6300
},
{
"epoch": 0.1433499081664651,
"grad_norm": 14.697577476501465,
"learning_rate": 9.514546959332509e-05,
"loss": 0.2759,
"step": 6400
},
{
"epoch": 0.1455897504815661,
"grad_norm": 21.234310150146484,
"learning_rate": 9.499277969011334e-05,
"loss": 0.2845,
"step": 6500
},
{
"epoch": 0.1478295927966671,
"grad_norm": 16.50038719177246,
"learning_rate": 9.483785197620747e-05,
"loss": 0.2471,
"step": 6600
},
{
"epoch": 0.15006943511176812,
"grad_norm": 16.261537551879883,
"learning_rate": 9.468069415726377e-05,
"loss": 0.2978,
"step": 6700
},
{
"epoch": 0.15230927742686914,
"grad_norm": 16.553571701049805,
"learning_rate": 9.452131404985752e-05,
"loss": 0.311,
"step": 6800
},
{
"epoch": 0.15454911974197016,
"grad_norm": 18.445993423461914,
"learning_rate": 9.43597195810941e-05,
"loss": 0.2777,
"step": 6900
},
{
"epoch": 0.15678896205707119,
"grad_norm": 13.675585746765137,
"learning_rate": 9.419591878821496e-05,
"loss": 0.2417,
"step": 7000
},
{
"epoch": 0.15678896205707119,
"eval_avg_non_pair_similarity": 0.0003613333898595999,
"eval_avg_pair_similarity": 0.005676110625499859,
"eval_loss": 0.3019685447216034,
"eval_runtime": 19.5219,
"eval_samples_per_second": 25.612,
"eval_similarity_ratio": 15.708790786551377,
"eval_steps_per_second": 0.82,
"step": 7000
},
{
"epoch": 0.1590288043721722,
"grad_norm": 15.780075073242188,
"learning_rate": 9.402991981819758e-05,
"loss": 0.2866,
"step": 7100
},
{
"epoch": 0.16126864668727323,
"grad_norm": 17.34490966796875,
"learning_rate": 9.386173092735051e-05,
"loss": 0.2526,
"step": 7200
},
{
"epoch": 0.16350848900237425,
"grad_norm": 14.431266784667969,
"learning_rate": 9.36913604809026e-05,
"loss": 0.2646,
"step": 7300
},
{
"epoch": 0.16574833131747524,
"grad_norm": 11.20712661743164,
"learning_rate": 9.351881695258693e-05,
"loss": 0.2279,
"step": 7400
},
{
"epoch": 0.16798817363257626,
"grad_norm": 15.549891471862793,
"learning_rate": 9.334410892421945e-05,
"loss": 0.2884,
"step": 7500
},
{
"epoch": 0.17022801594767728,
"grad_norm": 14.635927200317383,
"learning_rate": 9.316724508527205e-05,
"loss": 0.2729,
"step": 7600
},
{
"epoch": 0.1724678582627783,
"grad_norm": 12.841429710388184,
"learning_rate": 9.298823423244038e-05,
"loss": 0.271,
"step": 7700
},
{
"epoch": 0.17470770057787932,
"grad_norm": 13.053755760192871,
"learning_rate": 9.280708526920636e-05,
"loss": 0.3094,
"step": 7800
},
{
"epoch": 0.17694754289298034,
"grad_norm": 14.694463729858398,
"learning_rate": 9.262380720539536e-05,
"loss": 0.2687,
"step": 7900
},
{
"epoch": 0.17918738520808136,
"grad_norm": 14.9842529296875,
"learning_rate": 9.243840915672804e-05,
"loss": 0.2539,
"step": 8000
},
{
"epoch": 0.17918738520808136,
"eval_avg_non_pair_similarity": -0.0009227607365296818,
"eval_avg_pair_similarity": -0.005276160409208387,
"eval_loss": 0.22878731787204742,
"eval_runtime": 19.5192,
"eval_samples_per_second": 25.616,
"eval_similarity_ratio": 5.717798992023619,
"eval_steps_per_second": 0.82,
"step": 8000
}
],
"logging_steps": 100,
"max_steps": 44646,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}