|
{ |
|
"best_metric": 0.9729108101201125, |
|
"best_model_checkpoint": "/scratch/mrahma45/pixel/finetuned_models/mbert/mbert-base-finetuned-pos-ud-Hindi-HDTB/checkpoint-4000", |
|
"epoch": 15.625, |
|
"global_step": 6500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 7.92e-05, |
|
"loss": 0.9473, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 7.946845637583894e-05, |
|
"loss": 0.1783, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 7.893154362416109e-05, |
|
"loss": 0.1398, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 7.840000000000001e-05, |
|
"loss": 0.1279, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 7.786308724832216e-05, |
|
"loss": 0.0987, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"eval_accuracy": 0.964704546099895, |
|
"eval_loss": 0.1141478568315506, |
|
"eval_runtime": 8.4275, |
|
"eval_samples_per_second": 196.854, |
|
"eval_steps_per_second": 24.681, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 7.73261744966443e-05, |
|
"loss": 0.0919, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 7.678926174496645e-05, |
|
"loss": 0.0895, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 7.62523489932886e-05, |
|
"loss": 0.0863, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 7.571543624161075e-05, |
|
"loss": 0.0665, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 7.51785234899329e-05, |
|
"loss": 0.0605, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"eval_accuracy": 0.9700996677740864, |
|
"eval_loss": 0.10947530716657639, |
|
"eval_runtime": 8.4196, |
|
"eval_samples_per_second": 197.04, |
|
"eval_steps_per_second": 24.704, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 7.464161073825505e-05, |
|
"loss": 0.0602, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 7.410469798657718e-05, |
|
"loss": 0.0603, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 7.356778523489933e-05, |
|
"loss": 0.0499, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 7.303087248322148e-05, |
|
"loss": 0.0439, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 7.249395973154363e-05, |
|
"loss": 0.045, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"eval_accuracy": 0.9701848539057841, |
|
"eval_loss": 0.10890347510576248, |
|
"eval_runtime": 8.429, |
|
"eval_samples_per_second": 196.82, |
|
"eval_steps_per_second": 24.677, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 7.195704697986577e-05, |
|
"loss": 0.0486, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 7.142013422818792e-05, |
|
"loss": 0.0414, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 7.088322147651007e-05, |
|
"loss": 0.0348, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 4.57, |
|
"learning_rate": 7.034630872483222e-05, |
|
"loss": 0.0359, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"learning_rate": 6.980939597315437e-05, |
|
"loss": 0.038, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"eval_accuracy": 0.9715194366357157, |
|
"eval_loss": 0.11456557363271713, |
|
"eval_runtime": 8.4277, |
|
"eval_samples_per_second": 196.85, |
|
"eval_steps_per_second": 24.68, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"learning_rate": 6.927248322147651e-05, |
|
"loss": 0.0326, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 5.29, |
|
"learning_rate": 6.873557046979866e-05, |
|
"loss": 0.0269, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 5.53, |
|
"learning_rate": 6.819865771812081e-05, |
|
"loss": 0.0285, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 5.77, |
|
"learning_rate": 6.766174496644296e-05, |
|
"loss": 0.0294, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 6.01, |
|
"learning_rate": 6.712483221476511e-05, |
|
"loss": 0.0308, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 6.01, |
|
"eval_accuracy": 0.9711219013544595, |
|
"eval_loss": 0.11870458722114563, |
|
"eval_runtime": 8.4327, |
|
"eval_samples_per_second": 196.733, |
|
"eval_steps_per_second": 24.666, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"learning_rate": 6.658791946308726e-05, |
|
"loss": 0.0213, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 6.49, |
|
"learning_rate": 6.60510067114094e-05, |
|
"loss": 0.0229, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 6.73, |
|
"learning_rate": 6.551409395973155e-05, |
|
"loss": 0.0261, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 6.97, |
|
"learning_rate": 6.49771812080537e-05, |
|
"loss": 0.0242, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 7.21, |
|
"learning_rate": 6.444026845637585e-05, |
|
"loss": 0.02, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 7.21, |
|
"eval_accuracy": 0.9710935059772269, |
|
"eval_loss": 0.1298820823431015, |
|
"eval_runtime": 8.4732, |
|
"eval_samples_per_second": 195.793, |
|
"eval_steps_per_second": 24.548, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 7.45, |
|
"learning_rate": 6.3903355704698e-05, |
|
"loss": 0.0186, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 7.69, |
|
"learning_rate": 6.336644295302015e-05, |
|
"loss": 0.0191, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 7.93, |
|
"learning_rate": 6.28295302013423e-05, |
|
"loss": 0.0202, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 8.17, |
|
"learning_rate": 6.229261744966444e-05, |
|
"loss": 0.0163, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 8.41, |
|
"learning_rate": 6.175570469798658e-05, |
|
"loss": 0.0159, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 8.41, |
|
"eval_accuracy": 0.9705539938098078, |
|
"eval_loss": 0.15600977838039398, |
|
"eval_runtime": 8.4103, |
|
"eval_samples_per_second": 197.259, |
|
"eval_steps_per_second": 24.732, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 8.65, |
|
"learning_rate": 6.121879194630873e-05, |
|
"loss": 0.0181, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 8.89, |
|
"learning_rate": 6.068187919463087e-05, |
|
"loss": 0.0175, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 9.13, |
|
"learning_rate": 6.014496644295302e-05, |
|
"loss": 0.0147, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 9.38, |
|
"learning_rate": 5.960805369127517e-05, |
|
"loss": 0.0134, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 9.62, |
|
"learning_rate": 5.907114093959732e-05, |
|
"loss": 0.0142, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 9.62, |
|
"eval_accuracy": 0.9729108101201125, |
|
"eval_loss": 0.13532687723636627, |
|
"eval_runtime": 8.4032, |
|
"eval_samples_per_second": 197.426, |
|
"eval_steps_per_second": 24.753, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 9.86, |
|
"learning_rate": 5.8534228187919466e-05, |
|
"loss": 0.0152, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 10.1, |
|
"learning_rate": 5.7997315436241614e-05, |
|
"loss": 0.0138, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 10.34, |
|
"learning_rate": 5.746040268456376e-05, |
|
"loss": 0.0109, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 10.58, |
|
"learning_rate": 5.692348993288591e-05, |
|
"loss": 0.0099, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 10.82, |
|
"learning_rate": 5.638657718120806e-05, |
|
"loss": 0.0124, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 10.82, |
|
"eval_accuracy": 0.9713206689950876, |
|
"eval_loss": 0.14761441946029663, |
|
"eval_runtime": 8.4464, |
|
"eval_samples_per_second": 196.415, |
|
"eval_steps_per_second": 24.626, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 11.06, |
|
"learning_rate": 5.58496644295302e-05, |
|
"loss": 0.0136, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 11.3, |
|
"learning_rate": 5.531275167785235e-05, |
|
"loss": 0.0091, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 11.54, |
|
"learning_rate": 5.47758389261745e-05, |
|
"loss": 0.0104, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 11.78, |
|
"learning_rate": 5.4238926174496645e-05, |
|
"loss": 0.011, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 12.02, |
|
"learning_rate": 5.3702013422818794e-05, |
|
"loss": 0.0112, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 12.02, |
|
"eval_accuracy": 0.9716614135218786, |
|
"eval_loss": 0.1558678150177002, |
|
"eval_runtime": 8.4483, |
|
"eval_samples_per_second": 196.371, |
|
"eval_steps_per_second": 24.62, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 12.26, |
|
"learning_rate": 5.316510067114094e-05, |
|
"loss": 0.0079, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 5.262818791946309e-05, |
|
"loss": 0.0089, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 12.74, |
|
"learning_rate": 5.209127516778524e-05, |
|
"loss": 0.009, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 12.98, |
|
"learning_rate": 5.155436241610739e-05, |
|
"loss": 0.0098, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 13.22, |
|
"learning_rate": 5.1017449664429535e-05, |
|
"loss": 0.0081, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 13.22, |
|
"eval_accuracy": 0.9694465740977369, |
|
"eval_loss": 0.17652873694896698, |
|
"eval_runtime": 8.4104, |
|
"eval_samples_per_second": 197.255, |
|
"eval_steps_per_second": 24.731, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 13.46, |
|
"learning_rate": 5.048053691275168e-05, |
|
"loss": 0.0078, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 13.7, |
|
"learning_rate": 4.994362416107383e-05, |
|
"loss": 0.0074, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 13.94, |
|
"learning_rate": 4.940671140939597e-05, |
|
"loss": 0.008, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 14.18, |
|
"learning_rate": 4.886979865771812e-05, |
|
"loss": 0.0068, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 14.42, |
|
"learning_rate": 4.833288590604027e-05, |
|
"loss": 0.0057, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 14.42, |
|
"eval_accuracy": 0.9713490643723202, |
|
"eval_loss": 0.1719847023487091, |
|
"eval_runtime": 8.4464, |
|
"eval_samples_per_second": 196.415, |
|
"eval_steps_per_second": 24.626, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 14.66, |
|
"learning_rate": 4.779597315436242e-05, |
|
"loss": 0.0081, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 14.9, |
|
"learning_rate": 4.7259060402684566e-05, |
|
"loss": 0.0078, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 15.14, |
|
"learning_rate": 4.6722147651006714e-05, |
|
"loss": 0.0069, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 15.38, |
|
"learning_rate": 4.619060402684565e-05, |
|
"loss": 0.0056, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 15.62, |
|
"learning_rate": 4.5653691275167796e-05, |
|
"loss": 0.0066, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 15.62, |
|
"eval_accuracy": 0.9701564585285516, |
|
"eval_loss": 0.18945461511611938, |
|
"eval_runtime": 8.4725, |
|
"eval_samples_per_second": 195.811, |
|
"eval_steps_per_second": 24.55, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 15.62, |
|
"step": 6500, |
|
"total_flos": 2.716286806308864e+16, |
|
"train_loss": 0.046262450704207786, |
|
"train_runtime": 1858.7918, |
|
"train_samples_per_second": 258.232, |
|
"train_steps_per_second": 8.07 |
|
} |
|
], |
|
"max_steps": 15000, |
|
"num_train_epochs": 37, |
|
"total_flos": 2.716286806308864e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|