Qweni / checkpoint-500 /trainer_state.json
xiaosena's picture
Upload folder using huggingface_hub
0681559 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.1111111111111112,
"eval_steps": 500,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.022222222222222223,
"grad_norm": 6.603886604309082,
"learning_rate": 7.4074074074074075e-06,
"loss": 1.7454,
"step": 10
},
{
"epoch": 0.044444444444444446,
"grad_norm": 4.177036285400391,
"learning_rate": 1.4814814814814815e-05,
"loss": 1.5071,
"step": 20
},
{
"epoch": 0.06666666666666667,
"grad_norm": 6.5375285148620605,
"learning_rate": 2.2222222222222223e-05,
"loss": 1.6012,
"step": 30
},
{
"epoch": 0.08888888888888889,
"grad_norm": 2.4201414585113525,
"learning_rate": 2.8888888888888888e-05,
"loss": 1.543,
"step": 40
},
{
"epoch": 0.1111111111111111,
"grad_norm": 8.420122146606445,
"learning_rate": 3.62962962962963e-05,
"loss": 1.6806,
"step": 50
},
{
"epoch": 0.13333333333333333,
"grad_norm": 2.9238393306732178,
"learning_rate": 4.3703703703703705e-05,
"loss": 1.4376,
"step": 60
},
{
"epoch": 0.15555555555555556,
"grad_norm": 4.334264278411865,
"learning_rate": 5.111111111111111e-05,
"loss": 1.5629,
"step": 70
},
{
"epoch": 0.17777777777777778,
"grad_norm": 4.047328472137451,
"learning_rate": 5.851851851851852e-05,
"loss": 1.5502,
"step": 80
},
{
"epoch": 0.2,
"grad_norm": 6.695739269256592,
"learning_rate": 6.592592592592593e-05,
"loss": 1.5744,
"step": 90
},
{
"epoch": 0.2222222222222222,
"grad_norm": 2.579887628555298,
"learning_rate": 7.333333333333333e-05,
"loss": 1.4887,
"step": 100
},
{
"epoch": 0.24444444444444444,
"grad_norm": 8.655933380126953,
"learning_rate": 8.074074074074075e-05,
"loss": 1.4432,
"step": 110
},
{
"epoch": 0.26666666666666666,
"grad_norm": 2.3287596702575684,
"learning_rate": 8.814814814814815e-05,
"loss": 1.7349,
"step": 120
},
{
"epoch": 0.28888888888888886,
"grad_norm": 3.626464366912842,
"learning_rate": 9.555555555555557e-05,
"loss": 1.8419,
"step": 130
},
{
"epoch": 0.3111111111111111,
"grad_norm": 13.17669677734375,
"learning_rate": 9.999732574196451e-05,
"loss": 1.7434,
"step": 140
},
{
"epoch": 0.3333333333333333,
"grad_norm": 2.938781261444092,
"learning_rate": 9.996724362426075e-05,
"loss": 1.7345,
"step": 150
},
{
"epoch": 0.35555555555555557,
"grad_norm": 3.3643951416015625,
"learning_rate": 9.990375674425109e-05,
"loss": 1.6652,
"step": 160
},
{
"epoch": 0.37777777777777777,
"grad_norm": 4.0528178215026855,
"learning_rate": 9.980690754502393e-05,
"loss": 1.6247,
"step": 170
},
{
"epoch": 0.4,
"grad_norm": 3.2510921955108643,
"learning_rate": 9.96767607734863e-05,
"loss": 1.9226,
"step": 180
},
{
"epoch": 0.4222222222222222,
"grad_norm": 3.1616647243499756,
"learning_rate": 9.951340343707852e-05,
"loss": 1.9507,
"step": 190
},
{
"epoch": 0.4444444444444444,
"grad_norm": 2.816512107849121,
"learning_rate": 9.931694474560686e-05,
"loss": 1.5901,
"step": 200
},
{
"epoch": 0.4666666666666667,
"grad_norm": 3.0883939266204834,
"learning_rate": 9.908751603823301e-05,
"loss": 1.7762,
"step": 210
},
{
"epoch": 0.4888888888888889,
"grad_norm": 3.496539354324341,
"learning_rate": 9.882527069566965e-05,
"loss": 1.7833,
"step": 220
},
{
"epoch": 0.5111111111111111,
"grad_norm": 3.6071531772613525,
"learning_rate": 9.853038403764021e-05,
"loss": 1.5891,
"step": 230
},
{
"epoch": 0.5333333333333333,
"grad_norm": 3.1297965049743652,
"learning_rate": 9.820305320567192e-05,
"loss": 1.7261,
"step": 240
},
{
"epoch": 0.5555555555555556,
"grad_norm": 2.707146167755127,
"learning_rate": 9.784349703130007e-05,
"loss": 1.6448,
"step": 250
},
{
"epoch": 0.5777777777777777,
"grad_norm": 3.1915082931518555,
"learning_rate": 9.745195588977192e-05,
"loss": 1.9097,
"step": 260
},
{
"epoch": 0.6,
"grad_norm": 2.056610584259033,
"learning_rate": 9.702869153934782e-05,
"loss": 1.6173,
"step": 270
},
{
"epoch": 0.6222222222222222,
"grad_norm": 3.044048309326172,
"learning_rate": 9.657398694630712e-05,
"loss": 1.8324,
"step": 280
},
{
"epoch": 0.6444444444444445,
"grad_norm": 3.3219234943389893,
"learning_rate": 9.608814609577585e-05,
"loss": 1.818,
"step": 290
},
{
"epoch": 0.6666666666666666,
"grad_norm": 2.5632572174072266,
"learning_rate": 9.557149378850254e-05,
"loss": 1.618,
"step": 300
},
{
"epoch": 0.6888888888888889,
"grad_norm": 2.3387904167175293,
"learning_rate": 9.502437542371812e-05,
"loss": 1.6228,
"step": 310
},
{
"epoch": 0.7111111111111111,
"grad_norm": 2.6883046627044678,
"learning_rate": 9.444715676822501e-05,
"loss": 1.7307,
"step": 320
},
{
"epoch": 0.7333333333333333,
"grad_norm": 2.9449591636657715,
"learning_rate": 9.384022371187003e-05,
"loss": 1.7338,
"step": 330
},
{
"epoch": 0.7555555555555555,
"grad_norm": 2.4884233474731445,
"learning_rate": 9.320398200956403e-05,
"loss": 1.4941,
"step": 340
},
{
"epoch": 0.7777777777777778,
"grad_norm": 5.446136951446533,
"learning_rate": 9.253885701002134e-05,
"loss": 1.9377,
"step": 350
},
{
"epoch": 0.8,
"grad_norm": 3.122303009033203,
"learning_rate": 9.184529337140002e-05,
"loss": 1.5295,
"step": 360
},
{
"epoch": 0.8222222222222222,
"grad_norm": 3.4902145862579346,
"learning_rate": 9.112375476403312e-05,
"loss": 1.688,
"step": 370
},
{
"epoch": 0.8444444444444444,
"grad_norm": 3.041170597076416,
"learning_rate": 9.037472356044962e-05,
"loss": 1.7526,
"step": 380
},
{
"epoch": 0.8666666666666667,
"grad_norm": 2.029374837875366,
"learning_rate": 8.959870051289241e-05,
"loss": 1.5125,
"step": 390
},
{
"epoch": 0.8888888888888888,
"grad_norm": 4.354530334472656,
"learning_rate": 8.879620441854872e-05,
"loss": 1.6507,
"step": 400
},
{
"epoch": 0.9111111111111111,
"grad_norm": 3.2502028942108154,
"learning_rate": 8.796777177271708e-05,
"loss": 1.6118,
"step": 410
},
{
"epoch": 0.9333333333333333,
"grad_norm": 2.198453903198242,
"learning_rate": 8.711395641014228e-05,
"loss": 1.6589,
"step": 420
},
{
"epoch": 0.9555555555555556,
"grad_norm": 3.6842291355133057,
"learning_rate": 8.623532913475847e-05,
"loss": 1.6891,
"step": 430
},
{
"epoch": 0.9777777777777777,
"grad_norm": 2.3169732093811035,
"learning_rate": 8.533247733808776e-05,
"loss": 1.5275,
"step": 440
},
{
"epoch": 1.0,
"grad_norm": 5.430471897125244,
"learning_rate": 8.440600460654958e-05,
"loss": 1.7984,
"step": 450
},
{
"epoch": 1.0222222222222221,
"grad_norm": 4.782519817352295,
"learning_rate": 8.345653031794292e-05,
"loss": 0.9174,
"step": 460
},
{
"epoch": 1.0444444444444445,
"grad_norm": 3.547177314758301,
"learning_rate": 8.248468922737188e-05,
"loss": 1.3878,
"step": 470
},
{
"epoch": 1.0666666666666667,
"grad_norm": 2.446197748184204,
"learning_rate": 8.149113104289063e-05,
"loss": 1.1277,
"step": 480
},
{
"epoch": 1.0888888888888888,
"grad_norm": 3.3164918422698975,
"learning_rate": 8.047651999115217e-05,
"loss": 1.0124,
"step": 490
},
{
"epoch": 1.1111111111111112,
"grad_norm": 7.3411359786987305,
"learning_rate": 7.944153437335057e-05,
"loss": 1.0102,
"step": 500
},
{
"epoch": 1.1111111111111112,
"eval_loss": 1.7722898721694946,
"eval_runtime": 4.268,
"eval_samples_per_second": 23.43,
"eval_steps_per_second": 23.43,
"step": 500
}
],
"logging_steps": 10,
"max_steps": 1350,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1154148007526400.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}