aleegis10's picture
Training in progress, step 50, checkpoint
d997c94 verified
{
"best_metric": 6.828060150146484,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.09704027171276079,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0019408054342552159,
"grad_norm": 0.4789515733718872,
"learning_rate": 2e-05,
"loss": 6.9316,
"step": 1
},
{
"epoch": 0.0019408054342552159,
"eval_loss": 6.919858932495117,
"eval_runtime": 1.137,
"eval_samples_per_second": 763.418,
"eval_steps_per_second": 95.867,
"step": 1
},
{
"epoch": 0.0038816108685104317,
"grad_norm": 0.45944270491600037,
"learning_rate": 4e-05,
"loss": 6.935,
"step": 2
},
{
"epoch": 0.005822416302765648,
"grad_norm": 0.4508989453315735,
"learning_rate": 6e-05,
"loss": 6.9345,
"step": 3
},
{
"epoch": 0.0077632217370208634,
"grad_norm": 0.47248098254203796,
"learning_rate": 8e-05,
"loss": 6.9324,
"step": 4
},
{
"epoch": 0.009704027171276079,
"grad_norm": 0.4529155492782593,
"learning_rate": 0.0001,
"loss": 6.9321,
"step": 5
},
{
"epoch": 0.011644832605531296,
"grad_norm": 0.4459758400917053,
"learning_rate": 9.987820251299122e-05,
"loss": 6.9296,
"step": 6
},
{
"epoch": 0.013585638039786511,
"grad_norm": 0.4499610364437103,
"learning_rate": 9.951340343707852e-05,
"loss": 6.9246,
"step": 7
},
{
"epoch": 0.015526443474041727,
"grad_norm": 0.46261221170425415,
"learning_rate": 9.890738003669029e-05,
"loss": 6.9182,
"step": 8
},
{
"epoch": 0.017467248908296942,
"grad_norm": 0.4620356559753418,
"learning_rate": 9.806308479691595e-05,
"loss": 6.9153,
"step": 9
},
{
"epoch": 0.019408054342552158,
"grad_norm": 0.4698457419872284,
"learning_rate": 9.698463103929542e-05,
"loss": 6.9106,
"step": 10
},
{
"epoch": 0.021348859776807377,
"grad_norm": 0.4636442959308624,
"learning_rate": 9.567727288213005e-05,
"loss": 6.9074,
"step": 11
},
{
"epoch": 0.023289665211062592,
"grad_norm": 0.4605291187763214,
"learning_rate": 9.414737964294636e-05,
"loss": 6.901,
"step": 12
},
{
"epoch": 0.025230470645317808,
"grad_norm": 0.4645829200744629,
"learning_rate": 9.24024048078213e-05,
"loss": 6.8961,
"step": 13
},
{
"epoch": 0.027171276079573023,
"grad_norm": 0.47509416937828064,
"learning_rate": 9.045084971874738e-05,
"loss": 6.8949,
"step": 14
},
{
"epoch": 0.02911208151382824,
"grad_norm": 0.4713204503059387,
"learning_rate": 8.83022221559489e-05,
"loss": 6.8898,
"step": 15
},
{
"epoch": 0.031052886948083454,
"grad_norm": 0.4826659858226776,
"learning_rate": 8.596699001693255e-05,
"loss": 6.8853,
"step": 16
},
{
"epoch": 0.03299369238233867,
"grad_norm": 0.4891706705093384,
"learning_rate": 8.345653031794292e-05,
"loss": 6.8812,
"step": 17
},
{
"epoch": 0.034934497816593885,
"grad_norm": 0.45633330941200256,
"learning_rate": 8.07830737662829e-05,
"loss": 6.8807,
"step": 18
},
{
"epoch": 0.036875303250849104,
"grad_norm": 0.45540496706962585,
"learning_rate": 7.795964517353735e-05,
"loss": 6.8775,
"step": 19
},
{
"epoch": 0.038816108685104316,
"grad_norm": 0.4875425696372986,
"learning_rate": 7.500000000000001e-05,
"loss": 6.8691,
"step": 20
},
{
"epoch": 0.040756914119359534,
"grad_norm": 0.45287904143333435,
"learning_rate": 7.191855733945387e-05,
"loss": 6.8747,
"step": 21
},
{
"epoch": 0.04269771955361475,
"grad_norm": 0.46057406067848206,
"learning_rate": 6.873032967079561e-05,
"loss": 6.8664,
"step": 22
},
{
"epoch": 0.044638524987869965,
"grad_norm": 0.44480112195014954,
"learning_rate": 6.545084971874738e-05,
"loss": 6.865,
"step": 23
},
{
"epoch": 0.046579330422125184,
"grad_norm": 0.4642225503921509,
"learning_rate": 6.209609477998338e-05,
"loss": 6.8566,
"step": 24
},
{
"epoch": 0.048520135856380396,
"grad_norm": 0.42264512181282043,
"learning_rate": 5.868240888334653e-05,
"loss": 6.8612,
"step": 25
},
{
"epoch": 0.048520135856380396,
"eval_loss": 6.847862243652344,
"eval_runtime": 1.1078,
"eval_samples_per_second": 783.548,
"eval_steps_per_second": 98.395,
"step": 25
},
{
"epoch": 0.050460941290635615,
"grad_norm": 0.3825906813144684,
"learning_rate": 5.522642316338268e-05,
"loss": 6.8667,
"step": 26
},
{
"epoch": 0.05240174672489083,
"grad_norm": 0.39277198910713196,
"learning_rate": 5.174497483512506e-05,
"loss": 6.8628,
"step": 27
},
{
"epoch": 0.054342552159146046,
"grad_norm": 0.39206662774086,
"learning_rate": 4.825502516487497e-05,
"loss": 6.8594,
"step": 28
},
{
"epoch": 0.056283357593401265,
"grad_norm": 0.3919509947299957,
"learning_rate": 4.477357683661734e-05,
"loss": 6.8515,
"step": 29
},
{
"epoch": 0.05822416302765648,
"grad_norm": 0.40495046973228455,
"learning_rate": 4.131759111665349e-05,
"loss": 6.8458,
"step": 30
},
{
"epoch": 0.060164968461911696,
"grad_norm": 0.4313710331916809,
"learning_rate": 3.790390522001662e-05,
"loss": 6.8364,
"step": 31
},
{
"epoch": 0.06210577389616691,
"grad_norm": 0.46062034368515015,
"learning_rate": 3.4549150281252636e-05,
"loss": 6.8315,
"step": 32
},
{
"epoch": 0.06404657933042213,
"grad_norm": 0.47267451882362366,
"learning_rate": 3.12696703292044e-05,
"loss": 6.8277,
"step": 33
},
{
"epoch": 0.06598738476467735,
"grad_norm": 1.8254433870315552,
"learning_rate": 2.8081442660546125e-05,
"loss": 6.8264,
"step": 34
},
{
"epoch": 0.06792819019893255,
"grad_norm": 1.8017712831497192,
"learning_rate": 2.500000000000001e-05,
"loss": 6.8216,
"step": 35
},
{
"epoch": 0.06986899563318777,
"grad_norm": 1.805820107460022,
"learning_rate": 2.2040354826462668e-05,
"loss": 6.8203,
"step": 36
},
{
"epoch": 0.07180980106744299,
"grad_norm": 1.792947769165039,
"learning_rate": 1.9216926233717085e-05,
"loss": 6.8177,
"step": 37
},
{
"epoch": 0.07375060650169821,
"grad_norm": 1.7828471660614014,
"learning_rate": 1.6543469682057106e-05,
"loss": 6.8137,
"step": 38
},
{
"epoch": 0.07569141193595343,
"grad_norm": 1.78638756275177,
"learning_rate": 1.4033009983067452e-05,
"loss": 6.8141,
"step": 39
},
{
"epoch": 0.07763221737020863,
"grad_norm": 1.765810489654541,
"learning_rate": 1.1697777844051105e-05,
"loss": 6.8104,
"step": 40
},
{
"epoch": 0.07957302280446385,
"grad_norm": 1.7625008821487427,
"learning_rate": 9.549150281252633e-06,
"loss": 6.8077,
"step": 41
},
{
"epoch": 0.08151382823871907,
"grad_norm": 1.7558047771453857,
"learning_rate": 7.597595192178702e-06,
"loss": 6.8057,
"step": 42
},
{
"epoch": 0.08345463367297429,
"grad_norm": 1.758226990699768,
"learning_rate": 5.852620357053651e-06,
"loss": 6.8066,
"step": 43
},
{
"epoch": 0.0853954391072295,
"grad_norm": 1.747825264930725,
"learning_rate": 4.322727117869951e-06,
"loss": 6.8011,
"step": 44
},
{
"epoch": 0.08733624454148471,
"grad_norm": 1.7420827150344849,
"learning_rate": 3.0153689607045845e-06,
"loss": 6.8014,
"step": 45
},
{
"epoch": 0.08927704997573993,
"grad_norm": 1.7530667781829834,
"learning_rate": 1.9369152030840556e-06,
"loss": 6.8033,
"step": 46
},
{
"epoch": 0.09121785540999515,
"grad_norm": 1.7421718835830688,
"learning_rate": 1.0926199633097157e-06,
"loss": 6.8006,
"step": 47
},
{
"epoch": 0.09315866084425037,
"grad_norm": 1.7380465269088745,
"learning_rate": 4.865965629214819e-07,
"loss": 6.7997,
"step": 48
},
{
"epoch": 0.09509946627850557,
"grad_norm": 1.743160605430603,
"learning_rate": 1.2179748700879012e-07,
"loss": 6.7994,
"step": 49
},
{
"epoch": 0.09704027171276079,
"grad_norm": 1.7376538515090942,
"learning_rate": 0.0,
"loss": 6.7979,
"step": 50
},
{
"epoch": 0.09704027171276079,
"eval_loss": 6.828060150146484,
"eval_runtime": 1.1077,
"eval_samples_per_second": 783.584,
"eval_steps_per_second": 98.399,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 961128038400.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}