dimasik87's picture
Training in progress, step 50, checkpoint
202452b verified
raw
history blame
12.1 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.0045333998231974065,
"eval_steps": 4,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 9.066799646394814e-05,
"grad_norm": 1.305655837059021,
"learning_rate": 2e-05,
"loss": 0.9466,
"step": 1
},
{
"epoch": 9.066799646394814e-05,
"eval_loss": 1.1514371633529663,
"eval_runtime": 281.757,
"eval_samples_per_second": 8.241,
"eval_steps_per_second": 8.241,
"step": 1
},
{
"epoch": 0.0001813359929278963,
"grad_norm": 1.7540494203567505,
"learning_rate": 4e-05,
"loss": 1.2689,
"step": 2
},
{
"epoch": 0.00027200398939184443,
"grad_norm": 1.3806679248809814,
"learning_rate": 6e-05,
"loss": 0.9866,
"step": 3
},
{
"epoch": 0.0003626719858557926,
"grad_norm": 1.6540546417236328,
"learning_rate": 8e-05,
"loss": 0.886,
"step": 4
},
{
"epoch": 0.0003626719858557926,
"eval_loss": 1.063585877418518,
"eval_runtime": 281.615,
"eval_samples_per_second": 8.245,
"eval_steps_per_second": 8.245,
"step": 4
},
{
"epoch": 0.00045333998231974066,
"grad_norm": 1.351820945739746,
"learning_rate": 0.0001,
"loss": 1.0148,
"step": 5
},
{
"epoch": 0.0005440079787836889,
"grad_norm": 1.6633617877960205,
"learning_rate": 0.00012,
"loss": 1.1922,
"step": 6
},
{
"epoch": 0.000634675975247637,
"grad_norm": 1.216033697128296,
"learning_rate": 0.00014,
"loss": 0.7838,
"step": 7
},
{
"epoch": 0.0007253439717115851,
"grad_norm": 1.0886664390563965,
"learning_rate": 0.00016,
"loss": 0.6905,
"step": 8
},
{
"epoch": 0.0007253439717115851,
"eval_loss": 0.6156781911849976,
"eval_runtime": 282.2824,
"eval_samples_per_second": 8.226,
"eval_steps_per_second": 8.226,
"step": 8
},
{
"epoch": 0.0008160119681755333,
"grad_norm": 1.2936314344406128,
"learning_rate": 0.00018,
"loss": 0.6603,
"step": 9
},
{
"epoch": 0.0009066799646394813,
"grad_norm": 1.429738163948059,
"learning_rate": 0.0002,
"loss": 0.518,
"step": 10
},
{
"epoch": 0.0009973479611034295,
"grad_norm": 1.501217007637024,
"learning_rate": 0.0001996917333733128,
"loss": 0.475,
"step": 11
},
{
"epoch": 0.0010880159575673777,
"grad_norm": 1.0113286972045898,
"learning_rate": 0.00019876883405951377,
"loss": 0.3233,
"step": 12
},
{
"epoch": 0.0010880159575673777,
"eval_loss": 0.29599475860595703,
"eval_runtime": 282.4723,
"eval_samples_per_second": 8.22,
"eval_steps_per_second": 8.22,
"step": 12
},
{
"epoch": 0.0011786839540313258,
"grad_norm": 1.045129656791687,
"learning_rate": 0.00019723699203976766,
"loss": 0.305,
"step": 13
},
{
"epoch": 0.001269351950495274,
"grad_norm": 0.7782332897186279,
"learning_rate": 0.00019510565162951537,
"loss": 0.2367,
"step": 14
},
{
"epoch": 0.001360019946959222,
"grad_norm": 1.1300699710845947,
"learning_rate": 0.0001923879532511287,
"loss": 0.1923,
"step": 15
},
{
"epoch": 0.0014506879434231703,
"grad_norm": 0.6594939231872559,
"learning_rate": 0.0001891006524188368,
"loss": 0.1823,
"step": 16
},
{
"epoch": 0.0014506879434231703,
"eval_loss": 0.2610986828804016,
"eval_runtime": 282.2669,
"eval_samples_per_second": 8.226,
"eval_steps_per_second": 8.226,
"step": 16
},
{
"epoch": 0.0015413559398871183,
"grad_norm": 1.0713460445404053,
"learning_rate": 0.00018526401643540922,
"loss": 0.2457,
"step": 17
},
{
"epoch": 0.0016320239363510666,
"grad_norm": 1.2344204187393188,
"learning_rate": 0.00018090169943749476,
"loss": 0.272,
"step": 18
},
{
"epoch": 0.0017226919328150146,
"grad_norm": 0.7098881006240845,
"learning_rate": 0.0001760405965600031,
"loss": 0.225,
"step": 19
},
{
"epoch": 0.0018133599292789627,
"grad_norm": 0.8920848965644836,
"learning_rate": 0.00017071067811865476,
"loss": 0.1479,
"step": 20
},
{
"epoch": 0.0018133599292789627,
"eval_loss": 0.23455704748630524,
"eval_runtime": 282.1688,
"eval_samples_per_second": 8.229,
"eval_steps_per_second": 8.229,
"step": 20
},
{
"epoch": 0.001904027925742911,
"grad_norm": 0.5211899876594543,
"learning_rate": 0.00016494480483301836,
"loss": 0.1664,
"step": 21
},
{
"epoch": 0.001994695922206859,
"grad_norm": 0.6904815435409546,
"learning_rate": 0.00015877852522924732,
"loss": 0.2361,
"step": 22
},
{
"epoch": 0.002085363918670807,
"grad_norm": 0.6779294013977051,
"learning_rate": 0.0001522498564715949,
"loss": 0.1939,
"step": 23
},
{
"epoch": 0.0021760319151347554,
"grad_norm": 0.8565766215324402,
"learning_rate": 0.00014539904997395468,
"loss": 0.2695,
"step": 24
},
{
"epoch": 0.0021760319151347554,
"eval_loss": 0.22548052668571472,
"eval_runtime": 282.2744,
"eval_samples_per_second": 8.226,
"eval_steps_per_second": 8.226,
"step": 24
},
{
"epoch": 0.0022666999115987033,
"grad_norm": 0.7228334546089172,
"learning_rate": 0.000138268343236509,
"loss": 0.2259,
"step": 25
},
{
"epoch": 0.0023573679080626515,
"grad_norm": 0.9707162976264954,
"learning_rate": 0.00013090169943749476,
"loss": 0.2173,
"step": 26
},
{
"epoch": 0.0024480359045265998,
"grad_norm": 0.5939818024635315,
"learning_rate": 0.00012334453638559057,
"loss": 0.2012,
"step": 27
},
{
"epoch": 0.002538703900990548,
"grad_norm": 0.3913775682449341,
"learning_rate": 0.0001156434465040231,
"loss": 0.1645,
"step": 28
},
{
"epoch": 0.002538703900990548,
"eval_loss": 0.21409040689468384,
"eval_runtime": 282.2725,
"eval_samples_per_second": 8.226,
"eval_steps_per_second": 8.226,
"step": 28
},
{
"epoch": 0.002629371897454496,
"grad_norm": 0.652338981628418,
"learning_rate": 0.0001078459095727845,
"loss": 0.1954,
"step": 29
},
{
"epoch": 0.002720039893918444,
"grad_norm": 0.5130963325500488,
"learning_rate": 0.0001,
"loss": 0.1994,
"step": 30
},
{
"epoch": 0.0028107078903823923,
"grad_norm": 0.38596802949905396,
"learning_rate": 9.215409042721552e-05,
"loss": 0.1804,
"step": 31
},
{
"epoch": 0.0029013758868463406,
"grad_norm": 0.481663316488266,
"learning_rate": 8.435655349597689e-05,
"loss": 0.1804,
"step": 32
},
{
"epoch": 0.0029013758868463406,
"eval_loss": 0.20881743729114532,
"eval_runtime": 281.3146,
"eval_samples_per_second": 8.254,
"eval_steps_per_second": 8.254,
"step": 32
},
{
"epoch": 0.0029920438833102884,
"grad_norm": 0.3412702679634094,
"learning_rate": 7.66554636144095e-05,
"loss": 0.2066,
"step": 33
},
{
"epoch": 0.0030827118797742367,
"grad_norm": 0.5542587041854858,
"learning_rate": 6.909830056250527e-05,
"loss": 0.1895,
"step": 34
},
{
"epoch": 0.003173379876238185,
"grad_norm": 0.5084295868873596,
"learning_rate": 6.173165676349103e-05,
"loss": 0.2111,
"step": 35
},
{
"epoch": 0.003264047872702133,
"grad_norm": 0.6083145141601562,
"learning_rate": 5.4600950026045326e-05,
"loss": 0.2318,
"step": 36
},
{
"epoch": 0.003264047872702133,
"eval_loss": 0.20627427101135254,
"eval_runtime": 282.4785,
"eval_samples_per_second": 8.22,
"eval_steps_per_second": 8.22,
"step": 36
},
{
"epoch": 0.003354715869166081,
"grad_norm": 0.4692092835903168,
"learning_rate": 4.7750143528405126e-05,
"loss": 0.1626,
"step": 37
},
{
"epoch": 0.0034453838656300292,
"grad_norm": 0.4230363965034485,
"learning_rate": 4.12214747707527e-05,
"loss": 0.1752,
"step": 38
},
{
"epoch": 0.0035360518620939775,
"grad_norm": 0.5133092403411865,
"learning_rate": 3.5055195166981645e-05,
"loss": 0.209,
"step": 39
},
{
"epoch": 0.0036267198585579253,
"grad_norm": 0.3762070834636688,
"learning_rate": 2.9289321881345254e-05,
"loss": 0.1563,
"step": 40
},
{
"epoch": 0.0036267198585579253,
"eval_loss": 0.20195676386356354,
"eval_runtime": 282.3618,
"eval_samples_per_second": 8.223,
"eval_steps_per_second": 8.223,
"step": 40
},
{
"epoch": 0.0037173878550218736,
"grad_norm": 0.3519875407218933,
"learning_rate": 2.3959403439996907e-05,
"loss": 0.1516,
"step": 41
},
{
"epoch": 0.003808055851485822,
"grad_norm": 0.41183072328567505,
"learning_rate": 1.9098300562505266e-05,
"loss": 0.1585,
"step": 42
},
{
"epoch": 0.00389872384794977,
"grad_norm": 0.613614022731781,
"learning_rate": 1.4735983564590783e-05,
"loss": 0.1563,
"step": 43
},
{
"epoch": 0.003989391844413718,
"grad_norm": 0.4138711988925934,
"learning_rate": 1.0899347581163221e-05,
"loss": 0.1512,
"step": 44
},
{
"epoch": 0.003989391844413718,
"eval_loss": 0.20044909417629242,
"eval_runtime": 282.2051,
"eval_samples_per_second": 8.228,
"eval_steps_per_second": 8.228,
"step": 44
},
{
"epoch": 0.004080059840877667,
"grad_norm": 0.4004136025905609,
"learning_rate": 7.612046748871327e-06,
"loss": 0.1641,
"step": 45
},
{
"epoch": 0.004170727837341614,
"grad_norm": 0.4379856586456299,
"learning_rate": 4.8943483704846475e-06,
"loss": 0.1492,
"step": 46
},
{
"epoch": 0.004261395833805562,
"grad_norm": 0.32673540711402893,
"learning_rate": 2.7630079602323442e-06,
"loss": 0.1826,
"step": 47
},
{
"epoch": 0.004352063830269511,
"grad_norm": 0.2924938499927521,
"learning_rate": 1.231165940486234e-06,
"loss": 0.1529,
"step": 48
},
{
"epoch": 0.004352063830269511,
"eval_loss": 0.20006079971790314,
"eval_runtime": 281.9182,
"eval_samples_per_second": 8.236,
"eval_steps_per_second": 8.236,
"step": 48
},
{
"epoch": 0.004442731826733459,
"grad_norm": 0.44641199707984924,
"learning_rate": 3.0826662668720364e-07,
"loss": 0.1428,
"step": 49
},
{
"epoch": 0.0045333998231974065,
"grad_norm": 0.2606041431427002,
"learning_rate": 0.0,
"loss": 0.1358,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 4,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.8825716774731776e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}