lesso11's picture
Training in progress, step 100, checkpoint
32fb940 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.1773049645390071,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0017730496453900709,
"grad_norm": 1.0425547361373901,
"learning_rate": 1e-05,
"loss": 1.706,
"step": 1
},
{
"epoch": 0.0017730496453900709,
"eval_loss": 1.5571075677871704,
"eval_runtime": 85.3018,
"eval_samples_per_second": 5.568,
"eval_steps_per_second": 0.703,
"step": 1
},
{
"epoch": 0.0035460992907801418,
"grad_norm": 1.0336596965789795,
"learning_rate": 2e-05,
"loss": 1.7009,
"step": 2
},
{
"epoch": 0.005319148936170213,
"grad_norm": 1.0084134340286255,
"learning_rate": 3e-05,
"loss": 1.7186,
"step": 3
},
{
"epoch": 0.0070921985815602835,
"grad_norm": 0.9495729207992554,
"learning_rate": 4e-05,
"loss": 1.5377,
"step": 4
},
{
"epoch": 0.008865248226950355,
"grad_norm": 1.0871260166168213,
"learning_rate": 5e-05,
"loss": 1.5425,
"step": 5
},
{
"epoch": 0.010638297872340425,
"grad_norm": 0.9690693616867065,
"learning_rate": 6e-05,
"loss": 1.3581,
"step": 6
},
{
"epoch": 0.012411347517730497,
"grad_norm": 1.0647491216659546,
"learning_rate": 7e-05,
"loss": 1.4738,
"step": 7
},
{
"epoch": 0.014184397163120567,
"grad_norm": 1.152217984199524,
"learning_rate": 8e-05,
"loss": 1.3228,
"step": 8
},
{
"epoch": 0.015957446808510637,
"grad_norm": 1.1283761262893677,
"learning_rate": 9e-05,
"loss": 1.2259,
"step": 9
},
{
"epoch": 0.015957446808510637,
"eval_loss": 1.0514769554138184,
"eval_runtime": 85.2545,
"eval_samples_per_second": 5.572,
"eval_steps_per_second": 0.704,
"step": 9
},
{
"epoch": 0.01773049645390071,
"grad_norm": 1.226361632347107,
"learning_rate": 0.0001,
"loss": 1.1531,
"step": 10
},
{
"epoch": 0.01950354609929078,
"grad_norm": 0.9020638465881348,
"learning_rate": 9.99695413509548e-05,
"loss": 0.9912,
"step": 11
},
{
"epoch": 0.02127659574468085,
"grad_norm": 0.7211796045303345,
"learning_rate": 9.987820251299122e-05,
"loss": 0.8484,
"step": 12
},
{
"epoch": 0.02304964539007092,
"grad_norm": 0.6319678425788879,
"learning_rate": 9.972609476841367e-05,
"loss": 0.7357,
"step": 13
},
{
"epoch": 0.024822695035460994,
"grad_norm": 0.6404615044593811,
"learning_rate": 9.951340343707852e-05,
"loss": 0.6613,
"step": 14
},
{
"epoch": 0.026595744680851064,
"grad_norm": 0.7243396639823914,
"learning_rate": 9.924038765061042e-05,
"loss": 0.6481,
"step": 15
},
{
"epoch": 0.028368794326241134,
"grad_norm": 0.5668323636054993,
"learning_rate": 9.890738003669029e-05,
"loss": 0.5135,
"step": 16
},
{
"epoch": 0.030141843971631204,
"grad_norm": 0.6436125040054321,
"learning_rate": 9.851478631379982e-05,
"loss": 0.5688,
"step": 17
},
{
"epoch": 0.031914893617021274,
"grad_norm": 0.8257077932357788,
"learning_rate": 9.806308479691595e-05,
"loss": 0.5156,
"step": 18
},
{
"epoch": 0.031914893617021274,
"eval_loss": 0.5304271578788757,
"eval_runtime": 85.3128,
"eval_samples_per_second": 5.568,
"eval_steps_per_second": 0.703,
"step": 18
},
{
"epoch": 0.03368794326241135,
"grad_norm": 0.827019453048706,
"learning_rate": 9.755282581475769e-05,
"loss": 0.5668,
"step": 19
},
{
"epoch": 0.03546099290780142,
"grad_norm": 0.8353906273841858,
"learning_rate": 9.698463103929542e-05,
"loss": 0.5739,
"step": 20
},
{
"epoch": 0.03723404255319149,
"grad_norm": 0.6884241104125977,
"learning_rate": 9.635919272833938e-05,
"loss": 0.4368,
"step": 21
},
{
"epoch": 0.03900709219858156,
"grad_norm": 0.5974341034889221,
"learning_rate": 9.567727288213005e-05,
"loss": 0.4444,
"step": 22
},
{
"epoch": 0.040780141843971635,
"grad_norm": 0.5769183039665222,
"learning_rate": 9.493970231495835e-05,
"loss": 0.4458,
"step": 23
},
{
"epoch": 0.0425531914893617,
"grad_norm": 0.512887179851532,
"learning_rate": 9.414737964294636e-05,
"loss": 0.3521,
"step": 24
},
{
"epoch": 0.044326241134751775,
"grad_norm": 0.6440951824188232,
"learning_rate": 9.330127018922194e-05,
"loss": 0.5032,
"step": 25
},
{
"epoch": 0.04609929078014184,
"grad_norm": 0.6105683445930481,
"learning_rate": 9.24024048078213e-05,
"loss": 0.4236,
"step": 26
},
{
"epoch": 0.047872340425531915,
"grad_norm": 0.612863302230835,
"learning_rate": 9.145187862775209e-05,
"loss": 0.3652,
"step": 27
},
{
"epoch": 0.047872340425531915,
"eval_loss": 0.3773229420185089,
"eval_runtime": 85.2975,
"eval_samples_per_second": 5.569,
"eval_steps_per_second": 0.703,
"step": 27
},
{
"epoch": 0.04964539007092199,
"grad_norm": 0.47044283151626587,
"learning_rate": 9.045084971874738e-05,
"loss": 0.3428,
"step": 28
},
{
"epoch": 0.051418439716312055,
"grad_norm": 0.5425423383712769,
"learning_rate": 8.940053768033609e-05,
"loss": 0.3305,
"step": 29
},
{
"epoch": 0.05319148936170213,
"grad_norm": 0.5877718925476074,
"learning_rate": 8.83022221559489e-05,
"loss": 0.3485,
"step": 30
},
{
"epoch": 0.0549645390070922,
"grad_norm": 0.5542867183685303,
"learning_rate": 8.715724127386972e-05,
"loss": 0.3746,
"step": 31
},
{
"epoch": 0.05673758865248227,
"grad_norm": 0.5582324266433716,
"learning_rate": 8.596699001693255e-05,
"loss": 0.4032,
"step": 32
},
{
"epoch": 0.05851063829787234,
"grad_norm": 0.4852452278137207,
"learning_rate": 8.473291852294987e-05,
"loss": 0.3047,
"step": 33
},
{
"epoch": 0.06028368794326241,
"grad_norm": 0.5330655574798584,
"learning_rate": 8.345653031794292e-05,
"loss": 0.3654,
"step": 34
},
{
"epoch": 0.06205673758865248,
"grad_norm": 0.5512088537216187,
"learning_rate": 8.213938048432697e-05,
"loss": 0.2915,
"step": 35
},
{
"epoch": 0.06382978723404255,
"grad_norm": 0.5143958330154419,
"learning_rate": 8.07830737662829e-05,
"loss": 0.3363,
"step": 36
},
{
"epoch": 0.06382978723404255,
"eval_loss": 0.32617101073265076,
"eval_runtime": 85.2732,
"eval_samples_per_second": 5.57,
"eval_steps_per_second": 0.704,
"step": 36
},
{
"epoch": 0.06560283687943262,
"grad_norm": 0.5888123512268066,
"learning_rate": 7.938926261462366e-05,
"loss": 0.4433,
"step": 37
},
{
"epoch": 0.0673758865248227,
"grad_norm": 0.5405781865119934,
"learning_rate": 7.795964517353735e-05,
"loss": 0.3826,
"step": 38
},
{
"epoch": 0.06914893617021277,
"grad_norm": 0.5684509873390198,
"learning_rate": 7.649596321166024e-05,
"loss": 0.3074,
"step": 39
},
{
"epoch": 0.07092198581560284,
"grad_norm": 0.5357254147529602,
"learning_rate": 7.500000000000001e-05,
"loss": 0.3527,
"step": 40
},
{
"epoch": 0.0726950354609929,
"grad_norm": 0.5242685675621033,
"learning_rate": 7.347357813929454e-05,
"loss": 0.3603,
"step": 41
},
{
"epoch": 0.07446808510638298,
"grad_norm": 0.487697571516037,
"learning_rate": 7.191855733945387e-05,
"loss": 0.2892,
"step": 42
},
{
"epoch": 0.07624113475177305,
"grad_norm": 0.4527439773082733,
"learning_rate": 7.033683215379002e-05,
"loss": 0.2569,
"step": 43
},
{
"epoch": 0.07801418439716312,
"grad_norm": 0.49759769439697266,
"learning_rate": 6.873032967079561e-05,
"loss": 0.2744,
"step": 44
},
{
"epoch": 0.0797872340425532,
"grad_norm": 0.5503877997398376,
"learning_rate": 6.710100716628344e-05,
"loss": 0.3245,
"step": 45
},
{
"epoch": 0.0797872340425532,
"eval_loss": 0.29267728328704834,
"eval_runtime": 85.2478,
"eval_samples_per_second": 5.572,
"eval_steps_per_second": 0.704,
"step": 45
},
{
"epoch": 0.08156028368794327,
"grad_norm": 0.6225173473358154,
"learning_rate": 6.545084971874738e-05,
"loss": 0.3491,
"step": 46
},
{
"epoch": 0.08333333333333333,
"grad_norm": 0.631844699382782,
"learning_rate": 6.378186779084995e-05,
"loss": 0.2375,
"step": 47
},
{
"epoch": 0.0851063829787234,
"grad_norm": 0.7838373184204102,
"learning_rate": 6.209609477998338e-05,
"loss": 0.3831,
"step": 48
},
{
"epoch": 0.08687943262411348,
"grad_norm": 0.5574771165847778,
"learning_rate": 6.0395584540887963e-05,
"loss": 0.293,
"step": 49
},
{
"epoch": 0.08865248226950355,
"grad_norm": 0.5543034076690674,
"learning_rate": 5.868240888334653e-05,
"loss": 0.256,
"step": 50
},
{
"epoch": 0.09042553191489362,
"grad_norm": 0.5195747017860413,
"learning_rate": 5.695865504800327e-05,
"loss": 0.232,
"step": 51
},
{
"epoch": 0.09219858156028368,
"grad_norm": 0.5101114511489868,
"learning_rate": 5.522642316338268e-05,
"loss": 0.2679,
"step": 52
},
{
"epoch": 0.09397163120567376,
"grad_norm": 0.46232369542121887,
"learning_rate": 5.348782368720626e-05,
"loss": 0.2772,
"step": 53
},
{
"epoch": 0.09574468085106383,
"grad_norm": 0.5173928141593933,
"learning_rate": 5.174497483512506e-05,
"loss": 0.2755,
"step": 54
},
{
"epoch": 0.09574468085106383,
"eval_loss": 0.27359503507614136,
"eval_runtime": 85.266,
"eval_samples_per_second": 5.571,
"eval_steps_per_second": 0.704,
"step": 54
},
{
"epoch": 0.0975177304964539,
"grad_norm": 0.5527864694595337,
"learning_rate": 5e-05,
"loss": 0.3412,
"step": 55
},
{
"epoch": 0.09929078014184398,
"grad_norm": 0.4534193277359009,
"learning_rate": 4.825502516487497e-05,
"loss": 0.2381,
"step": 56
},
{
"epoch": 0.10106382978723404,
"grad_norm": 0.49728792905807495,
"learning_rate": 4.6512176312793736e-05,
"loss": 0.2389,
"step": 57
},
{
"epoch": 0.10283687943262411,
"grad_norm": 0.6069850921630859,
"learning_rate": 4.477357683661734e-05,
"loss": 0.308,
"step": 58
},
{
"epoch": 0.10460992907801418,
"grad_norm": 0.5545309782028198,
"learning_rate": 4.3041344951996746e-05,
"loss": 0.2807,
"step": 59
},
{
"epoch": 0.10638297872340426,
"grad_norm": 0.6561558246612549,
"learning_rate": 4.131759111665349e-05,
"loss": 0.3066,
"step": 60
},
{
"epoch": 0.10815602836879433,
"grad_norm": 0.5162850022315979,
"learning_rate": 3.960441545911204e-05,
"loss": 0.2459,
"step": 61
},
{
"epoch": 0.1099290780141844,
"grad_norm": 0.6705155372619629,
"learning_rate": 3.790390522001662e-05,
"loss": 0.4057,
"step": 62
},
{
"epoch": 0.11170212765957446,
"grad_norm": 0.5775429010391235,
"learning_rate": 3.6218132209150045e-05,
"loss": 0.2761,
"step": 63
},
{
"epoch": 0.11170212765957446,
"eval_loss": 0.25868135690689087,
"eval_runtime": 85.2805,
"eval_samples_per_second": 5.57,
"eval_steps_per_second": 0.704,
"step": 63
},
{
"epoch": 0.11347517730496454,
"grad_norm": 0.5817871689796448,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.2793,
"step": 64
},
{
"epoch": 0.11524822695035461,
"grad_norm": 0.6233260631561279,
"learning_rate": 3.289899283371657e-05,
"loss": 0.2508,
"step": 65
},
{
"epoch": 0.11702127659574468,
"grad_norm": 0.4717574715614319,
"learning_rate": 3.12696703292044e-05,
"loss": 0.2132,
"step": 66
},
{
"epoch": 0.11879432624113476,
"grad_norm": 0.6881227493286133,
"learning_rate": 2.9663167846209998e-05,
"loss": 0.3046,
"step": 67
},
{
"epoch": 0.12056737588652482,
"grad_norm": 0.5428065061569214,
"learning_rate": 2.8081442660546125e-05,
"loss": 0.2845,
"step": 68
},
{
"epoch": 0.12234042553191489,
"grad_norm": 0.6920152306556702,
"learning_rate": 2.6526421860705473e-05,
"loss": 0.3518,
"step": 69
},
{
"epoch": 0.12411347517730496,
"grad_norm": 0.6600857973098755,
"learning_rate": 2.500000000000001e-05,
"loss": 0.3348,
"step": 70
},
{
"epoch": 0.12588652482269502,
"grad_norm": 0.5283850431442261,
"learning_rate": 2.350403678833976e-05,
"loss": 0.2248,
"step": 71
},
{
"epoch": 0.1276595744680851,
"grad_norm": 0.5939657688140869,
"learning_rate": 2.2040354826462668e-05,
"loss": 0.288,
"step": 72
},
{
"epoch": 0.1276595744680851,
"eval_loss": 0.24900592863559723,
"eval_runtime": 85.2474,
"eval_samples_per_second": 5.572,
"eval_steps_per_second": 0.704,
"step": 72
},
{
"epoch": 0.12943262411347517,
"grad_norm": 0.5134656429290771,
"learning_rate": 2.061073738537635e-05,
"loss": 0.2476,
"step": 73
},
{
"epoch": 0.13120567375886524,
"grad_norm": 0.5056303143501282,
"learning_rate": 1.9216926233717085e-05,
"loss": 0.2025,
"step": 74
},
{
"epoch": 0.13297872340425532,
"grad_norm": 0.5294190049171448,
"learning_rate": 1.7860619515673033e-05,
"loss": 0.2336,
"step": 75
},
{
"epoch": 0.1347517730496454,
"grad_norm": 0.4144452214241028,
"learning_rate": 1.6543469682057106e-05,
"loss": 0.2263,
"step": 76
},
{
"epoch": 0.13652482269503546,
"grad_norm": 0.5535529851913452,
"learning_rate": 1.526708147705013e-05,
"loss": 0.2731,
"step": 77
},
{
"epoch": 0.13829787234042554,
"grad_norm": 0.6666750907897949,
"learning_rate": 1.4033009983067452e-05,
"loss": 0.2748,
"step": 78
},
{
"epoch": 0.1400709219858156,
"grad_norm": 0.5558615326881409,
"learning_rate": 1.2842758726130283e-05,
"loss": 0.2561,
"step": 79
},
{
"epoch": 0.14184397163120568,
"grad_norm": 0.675848662853241,
"learning_rate": 1.1697777844051105e-05,
"loss": 0.3053,
"step": 80
},
{
"epoch": 0.14361702127659576,
"grad_norm": 0.6196485161781311,
"learning_rate": 1.0599462319663905e-05,
"loss": 0.2637,
"step": 81
},
{
"epoch": 0.14361702127659576,
"eval_loss": 0.24386928975582123,
"eval_runtime": 85.2732,
"eval_samples_per_second": 5.57,
"eval_steps_per_second": 0.704,
"step": 81
},
{
"epoch": 0.1453900709219858,
"grad_norm": 0.7320723533630371,
"learning_rate": 9.549150281252633e-06,
"loss": 0.3557,
"step": 82
},
{
"epoch": 0.14716312056737588,
"grad_norm": 0.6170211434364319,
"learning_rate": 8.548121372247918e-06,
"loss": 0.288,
"step": 83
},
{
"epoch": 0.14893617021276595,
"grad_norm": 0.5411659479141235,
"learning_rate": 7.597595192178702e-06,
"loss": 0.269,
"step": 84
},
{
"epoch": 0.15070921985815602,
"grad_norm": 0.6507031321525574,
"learning_rate": 6.698729810778065e-06,
"loss": 0.2579,
"step": 85
},
{
"epoch": 0.1524822695035461,
"grad_norm": 0.4847683608531952,
"learning_rate": 5.852620357053651e-06,
"loss": 0.2108,
"step": 86
},
{
"epoch": 0.15425531914893617,
"grad_norm": 0.5241921544075012,
"learning_rate": 5.060297685041659e-06,
"loss": 0.2421,
"step": 87
},
{
"epoch": 0.15602836879432624,
"grad_norm": 0.5839644074440002,
"learning_rate": 4.322727117869951e-06,
"loss": 0.2381,
"step": 88
},
{
"epoch": 0.15780141843971632,
"grad_norm": 0.5057505965232849,
"learning_rate": 3.6408072716606346e-06,
"loss": 0.2041,
"step": 89
},
{
"epoch": 0.1595744680851064,
"grad_norm": 0.5602439045906067,
"learning_rate": 3.0153689607045845e-06,
"loss": 0.2442,
"step": 90
},
{
"epoch": 0.1595744680851064,
"eval_loss": 0.24139147996902466,
"eval_runtime": 85.2729,
"eval_samples_per_second": 5.57,
"eval_steps_per_second": 0.704,
"step": 90
},
{
"epoch": 0.16134751773049646,
"grad_norm": 0.7721632122993469,
"learning_rate": 2.4471741852423237e-06,
"loss": 0.2454,
"step": 91
},
{
"epoch": 0.16312056737588654,
"grad_norm": 0.7036444544792175,
"learning_rate": 1.9369152030840556e-06,
"loss": 0.2381,
"step": 92
},
{
"epoch": 0.16489361702127658,
"grad_norm": 0.6270337104797363,
"learning_rate": 1.4852136862001764e-06,
"loss": 0.2547,
"step": 93
},
{
"epoch": 0.16666666666666666,
"grad_norm": 0.4347127676010132,
"learning_rate": 1.0926199633097157e-06,
"loss": 0.222,
"step": 94
},
{
"epoch": 0.16843971631205673,
"grad_norm": 0.5513346791267395,
"learning_rate": 7.596123493895991e-07,
"loss": 0.2501,
"step": 95
},
{
"epoch": 0.1702127659574468,
"grad_norm": 0.5058043003082275,
"learning_rate": 4.865965629214819e-07,
"loss": 0.2783,
"step": 96
},
{
"epoch": 0.17198581560283688,
"grad_norm": 0.5748170018196106,
"learning_rate": 2.7390523158633554e-07,
"loss": 0.2392,
"step": 97
},
{
"epoch": 0.17375886524822695,
"grad_norm": 0.6403087377548218,
"learning_rate": 1.2179748700879012e-07,
"loss": 0.2768,
"step": 98
},
{
"epoch": 0.17553191489361702,
"grad_norm": 0.5376163125038147,
"learning_rate": 3.04586490452119e-08,
"loss": 0.2586,
"step": 99
},
{
"epoch": 0.17553191489361702,
"eval_loss": 0.24068014323711395,
"eval_runtime": 85.2941,
"eval_samples_per_second": 5.569,
"eval_steps_per_second": 0.703,
"step": 99
},
{
"epoch": 0.1773049645390071,
"grad_norm": 0.5339375138282776,
"learning_rate": 0.0,
"loss": 0.2779,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.642695731478528e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}