|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.0, |
|
"global_step": 873, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.0303030303030305e-06, |
|
"loss": 1.946, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 6.060606060606061e-06, |
|
"loss": 1.908, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.090909090909091e-06, |
|
"loss": 2.1083, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.2121212121212122e-05, |
|
"loss": 2.3218, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.5151515151515153e-05, |
|
"loss": 1.8338, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.8181818181818182e-05, |
|
"loss": 2.0202, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 2.1212121212121215e-05, |
|
"loss": 2.1332, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 2.4242424242424244e-05, |
|
"loss": 1.8593, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 2.7272727272727273e-05, |
|
"loss": 1.5359, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 3.0303030303030306e-05, |
|
"loss": 1.327, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 1.7252, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 3.6363636363636364e-05, |
|
"loss": 1.4351, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 3.939393939393939e-05, |
|
"loss": 1.2774, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 4.242424242424243e-05, |
|
"loss": 1.5145, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 4.545454545454546e-05, |
|
"loss": 1.1529, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 4.848484848484849e-05, |
|
"loss": 1.0047, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 5.151515151515152e-05, |
|
"loss": 1.3872, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 5.4545454545454546e-05, |
|
"loss": 1.1229, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 5.757575757575758e-05, |
|
"loss": 1.3386, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 6.060606060606061e-05, |
|
"loss": 1.2493, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 6.363636363636364e-05, |
|
"loss": 1.1427, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 1.0895, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 6.96969696969697e-05, |
|
"loss": 1.1989, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 7.272727272727273e-05, |
|
"loss": 1.0438, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 7.575757575757576e-05, |
|
"loss": 1.176, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 7.878787878787879e-05, |
|
"loss": 1.1372, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 8.181818181818183e-05, |
|
"loss": 1.2983, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 8.484848484848486e-05, |
|
"loss": 0.9371, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 8.787878787878789e-05, |
|
"loss": 1.2299, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.090909090909092e-05, |
|
"loss": 0.9441, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.393939393939395e-05, |
|
"loss": 1.0011, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.696969696969698e-05, |
|
"loss": 1.1704, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0001, |
|
"loss": 1.1193, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00010303030303030303, |
|
"loss": 1.1559, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00010606060606060606, |
|
"loss": 0.8677, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00010909090909090909, |
|
"loss": 1.0865, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00011212121212121212, |
|
"loss": 1.0922, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00011515151515151516, |
|
"loss": 0.9434, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001181818181818182, |
|
"loss": 0.9144, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012121212121212122, |
|
"loss": 0.9546, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00012424242424242425, |
|
"loss": 1.0654, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00012727272727272728, |
|
"loss": 0.8077, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001303030303030303, |
|
"loss": 1.0758, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00013333333333333334, |
|
"loss": 1.1512, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00013636363636363637, |
|
"loss": 0.84, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0001393939393939394, |
|
"loss": 1.0567, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00014242424242424243, |
|
"loss": 1.0165, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00014545454545454546, |
|
"loss": 0.8678, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00014848484848484849, |
|
"loss": 1.055, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00015151515151515152, |
|
"loss": 1.0669, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00015454545454545454, |
|
"loss": 0.9915, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00015757575757575757, |
|
"loss": 0.993, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0001606060606060606, |
|
"loss": 1.1085, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00016363636363636366, |
|
"loss": 0.9391, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0001666666666666667, |
|
"loss": 0.975, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00016969696969696972, |
|
"loss": 1.0697, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00017272727272727275, |
|
"loss": 0.9462, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00017575757575757578, |
|
"loss": 1.1209, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0001787878787878788, |
|
"loss": 1.0648, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00018181818181818183, |
|
"loss": 0.9964, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00018484848484848484, |
|
"loss": 0.8451, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0001878787878787879, |
|
"loss": 0.8437, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00019090909090909092, |
|
"loss": 1.1271, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00019393939393939395, |
|
"loss": 1.161, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00019696969696969698, |
|
"loss": 1.0032, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0002, |
|
"loss": 1.1258, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00019999988957695886, |
|
"loss": 0.9543, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00019999955830807923, |
|
"loss": 1.0274, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00019999900619409279, |
|
"loss": 0.9334, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0001999982332362188, |
|
"loss": 1.0398, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00019999723943616433, |
|
"loss": 0.9049, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00019999602479612417, |
|
"loss": 0.7452, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00019999458931878073, |
|
"loss": 0.8762, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00019999293300730427, |
|
"loss": 1.0941, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00019999105586535268, |
|
"loss": 0.7713, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00019998895789707154, |
|
"loss": 0.9233, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00019998663910709416, |
|
"loss": 0.8634, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00019998409950054146, |
|
"loss": 0.9697, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00019998133908302209, |
|
"loss": 1.0816, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0001999783578606323, |
|
"loss": 0.9659, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00019997515583995603, |
|
"loss": 0.9644, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00019997173302806478, |
|
"loss": 0.8561, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00019996808943251773, |
|
"loss": 1.0016, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0001999642250613616, |
|
"loss": 0.8951, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00019996013992313073, |
|
"loss": 1.0157, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00019995583402684694, |
|
"loss": 0.9414, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00019995130738201966, |
|
"loss": 0.8097, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00019994655999864582, |
|
"loss": 0.8606, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0001999415918872098, |
|
"loss": 1.0427, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00019993640305868352, |
|
"loss": 0.9578, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00019993099352452623, |
|
"loss": 1.1097, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00019992536329668478, |
|
"loss": 0.8119, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00019991951238759325, |
|
"loss": 0.9915, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0001999134408101731, |
|
"loss": 0.838, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00019990714857783326, |
|
"loss": 0.8935, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00019990063570446984, |
|
"loss": 0.7914, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00019989390220446622, |
|
"loss": 0.8724, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00019988694809269314, |
|
"loss": 1.0374, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00019987977338450845, |
|
"loss": 0.9028, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00019987237809575723, |
|
"loss": 0.9986, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00019986476224277165, |
|
"loss": 1.113, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00019985692584237108, |
|
"loss": 0.8395, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00019984886891186184, |
|
"loss": 1.0134, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0001998405914690374, |
|
"loss": 0.8845, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00019983209353217812, |
|
"loss": 0.7507, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00019982337512005138, |
|
"loss": 0.9073, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00019981443625191148, |
|
"loss": 0.9973, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00019980527694749952, |
|
"loss": 1.0733, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00019979589722704346, |
|
"loss": 0.9148, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00019978629711125812, |
|
"loss": 0.8385, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00019977647662134488, |
|
"loss": 0.75, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00019976643577899195, |
|
"loss": 0.9002, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00019975617460637416, |
|
"loss": 0.8754, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0001997456931261529, |
|
"loss": 0.8886, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00019973499136147606, |
|
"loss": 1.0058, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00019972406933597812, |
|
"loss": 0.9276, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00019971292707377991, |
|
"loss": 0.9922, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00019970156459948873, |
|
"loss": 0.9507, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0001996899819381981, |
|
"loss": 0.9619, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00019967817911548794, |
|
"loss": 0.8163, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00019966615615742424, |
|
"loss": 1.0647, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0001996539130905593, |
|
"loss": 0.9348, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00019964144994193142, |
|
"loss": 1.0523, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.000199628766739065, |
|
"loss": 0.9063, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00019961586350997033, |
|
"loss": 1.0227, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0001996027402831438, |
|
"loss": 1.006, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00019958939708756746, |
|
"loss": 0.9082, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00019957583395270923, |
|
"loss": 0.8756, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0001995620509085228, |
|
"loss": 0.8311, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00019954804798544745, |
|
"loss": 1.0332, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00019953382521440815, |
|
"loss": 0.9427, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00019951938262681527, |
|
"loss": 0.838, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0001995047202545647, |
|
"loss": 0.8509, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00019948983813003774, |
|
"loss": 0.8944, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00019947473628610099, |
|
"loss": 0.9569, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00019945941475610623, |
|
"loss": 0.7805, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00019944387357389052, |
|
"loss": 0.9337, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0001994281127737759, |
|
"loss": 0.8712, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0001994121323905695, |
|
"loss": 0.9264, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0001993959324595634, |
|
"loss": 0.9323, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00019937951301653444, |
|
"loss": 0.8331, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0001993628740977444, |
|
"loss": 0.902, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0001993460157399396, |
|
"loss": 0.8676, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00019932893798035116, |
|
"loss": 0.8525, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00019931164085669456, |
|
"loss": 0.8571, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00019929412440716985, |
|
"loss": 1.0006, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00019927638867046142, |
|
"loss": 0.9849, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00019925843368573794, |
|
"loss": 0.9064, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0001992402594926523, |
|
"loss": 0.9716, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.0001992218661313415, |
|
"loss": 0.7553, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00019920325364242654, |
|
"loss": 0.7921, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00019918442206701245, |
|
"loss": 0.7994, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0001991653714466879, |
|
"loss": 0.8296, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00019914610182352548, |
|
"loss": 0.8116, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00019912661324008148, |
|
"loss": 0.9844, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00019910690573939557, |
|
"loss": 0.865, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00019908697936499103, |
|
"loss": 0.959, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00019906683416087448, |
|
"loss": 0.7727, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00019904647017153582, |
|
"loss": 0.707, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00019902588744194813, |
|
"loss": 0.8597, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00019900508601756756, |
|
"loss": 0.9146, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0001989840659443332, |
|
"loss": 0.9571, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0001989628272686671, |
|
"loss": 0.8537, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00019894137003747403, |
|
"loss": 0.828, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00019891969429814145, |
|
"loss": 0.8055, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0001988978000985394, |
|
"loss": 0.8432, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0001988756874870203, |
|
"loss": 0.8101, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00019885335651241903, |
|
"loss": 0.9072, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0001988308072240527, |
|
"loss": 0.7862, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00019880803967172047, |
|
"loss": 0.8303, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00019878505390570362, |
|
"loss": 0.9489, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0001987618499767653, |
|
"loss": 1.0125, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0001987384279361505, |
|
"loss": 0.809, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00019871478783558587, |
|
"loss": 0.9488, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0001986909297272796, |
|
"loss": 0.9664, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0001986668536639215, |
|
"loss": 0.9657, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0001986425596986825, |
|
"loss": 0.8123, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00019861804788521493, |
|
"loss": 0.9482, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00019859331827765212, |
|
"loss": 0.879, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00019856837093060848, |
|
"loss": 0.896, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00019854320589917927, |
|
"loss": 1.0729, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00019851782323894042, |
|
"loss": 0.9844, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0001984922230059486, |
|
"loss": 0.9131, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00019846640525674082, |
|
"loss": 0.9417, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00019844037004833473, |
|
"loss": 0.9633, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0001984141174382279, |
|
"loss": 0.968, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00019838764748439827, |
|
"loss": 0.8447, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00019836096024530373, |
|
"loss": 0.8638, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00019833405577988195, |
|
"loss": 0.9346, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0001983069341475504, |
|
"loss": 0.8969, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00019827959540820613, |
|
"loss": 0.8499, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00019825203962222572, |
|
"loss": 0.8041, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00019822426685046497, |
|
"loss": 0.9216, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00019819627715425903, |
|
"loss": 0.906, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.000198168070595422, |
|
"loss": 0.8969, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.000198139647236247, |
|
"loss": 0.7949, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00019811100713950587, |
|
"loss": 0.8996, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00019808215036844917, |
|
"loss": 0.9118, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0001980530769868059, |
|
"loss": 0.7355, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00019802378705878354, |
|
"loss": 0.8344, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00019799428064906774, |
|
"loss": 0.9639, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0001979645578228222, |
|
"loss": 0.852, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0001979346186456887, |
|
"loss": 0.8493, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00019790446318378665, |
|
"loss": 0.851, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00019787409150371328, |
|
"loss": 0.7161, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00019784350367254322, |
|
"loss": 0.9846, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0001978126997578285, |
|
"loss": 0.7883, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00019778167982759833, |
|
"loss": 0.8691, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00019775044395035907, |
|
"loss": 0.928, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0001977189921950939, |
|
"loss": 0.8244, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0001976873246312628, |
|
"loss": 1.0413, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0001976554413288023, |
|
"loss": 0.8261, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0001976233423581255, |
|
"loss": 0.823, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00019759102779012166, |
|
"loss": 0.9386, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00019755849769615628, |
|
"loss": 0.8156, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00019752575214807076, |
|
"loss": 0.8556, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00019749279121818235, |
|
"loss": 0.7769, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00019745961497928406, |
|
"loss": 1.0772, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00019742622350464418, |
|
"loss": 0.8147, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0001973926168680066, |
|
"loss": 0.9529, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00019735879514359018, |
|
"loss": 0.8688, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00019732475840608888, |
|
"loss": 0.9647, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00019729050673067156, |
|
"loss": 0.837, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00019725604019298163, |
|
"loss": 0.9211, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00019722135886913715, |
|
"loss": 0.9434, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0001971864628357304, |
|
"loss": 0.6506, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00019715135216982798, |
|
"loss": 0.8052, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00019711602694897037, |
|
"loss": 0.7852, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00019708048725117192, |
|
"loss": 0.9283, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0001970447331549207, |
|
"loss": 0.9081, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00019700876473917824, |
|
"loss": 0.9036, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00019697258208337934, |
|
"loss": 0.716, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00019693618526743197, |
|
"loss": 0.8192, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0001968995743717171, |
|
"loss": 0.9773, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.00019686274947708848, |
|
"loss": 0.8698, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0001968257106648724, |
|
"loss": 0.9062, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.00019678845801686764, |
|
"loss": 0.8984, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00019675099161534521, |
|
"loss": 0.8087, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00019671331154304822, |
|
"loss": 0.8272, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00019667541788319162, |
|
"loss": 0.784, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00019663731071946206, |
|
"loss": 0.8777, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00019659899013601772, |
|
"loss": 0.8534, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00019656045621748808, |
|
"loss": 0.9645, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00019652170904897387, |
|
"loss": 0.9692, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00019648274871604662, |
|
"loss": 0.838, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00019644357530474872, |
|
"loss": 0.7445, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0001964041889015931, |
|
"loss": 0.9065, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00019636458959356316, |
|
"loss": 0.7806, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00019632477746811232, |
|
"loss": 0.7971, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00019628475261316417, |
|
"loss": 0.8409, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00019624451511711198, |
|
"loss": 0.7432, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00019620406506881875, |
|
"loss": 0.9096, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00019616340255761676, |
|
"loss": 0.8004, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00019612252767330763, |
|
"loss": 0.7978, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0001960814405061619, |
|
"loss": 0.9535, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.000196040141146919, |
|
"loss": 0.9945, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0001959986296867869, |
|
"loss": 0.9703, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00019595690621744208, |
|
"loss": 0.9639, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00019591497083102914, |
|
"loss": 0.9312, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00019587282362016083, |
|
"loss": 0.7709, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0001958304646779175, |
|
"loss": 0.8547, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00019578789409784727, |
|
"loss": 0.8081, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00019574511197396563, |
|
"loss": 0.8476, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00019570211840075517, |
|
"loss": 0.9658, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00019565891347316552, |
|
"loss": 0.7778, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0001956154972866131, |
|
"loss": 0.9926, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0001955718699369808, |
|
"loss": 0.957, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.000195528031520618, |
|
"loss": 0.9396, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00019548398213434007, |
|
"loss": 0.9049, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00019543972187542833, |
|
"loss": 0.9683, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00019539525084162992, |
|
"loss": 0.8555, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00019535056913115725, |
|
"loss": 0.8489, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0001953056768426882, |
|
"loss": 0.8728, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00019526057407536564, |
|
"loss": 0.9443, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00019521526092879725, |
|
"loss": 0.8161, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00019516973750305532, |
|
"loss": 0.8936, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00019512400389867657, |
|
"loss": 0.8315, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00019507806021666188, |
|
"loss": 0.9298, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00019503190655847604, |
|
"loss": 0.8235, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00019498554302604766, |
|
"loss": 0.9245, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0001949389697217687, |
|
"loss": 0.8302, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00019489218674849455, |
|
"loss": 0.8488, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00019484519420954354, |
|
"loss": 0.8177, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00019479799220869682, |
|
"loss": 1.0039, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00019475058085019825, |
|
"loss": 0.7685, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00019470296023875387, |
|
"loss": 0.9174, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.000194655130479532, |
|
"loss": 1.0997, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00019460709167816274, |
|
"loss": 0.9759, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0001945588439407379, |
|
"loss": 0.9397, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00019451038737381077, |
|
"loss": 1.0367, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00019446172208439574, |
|
"loss": 0.8298, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0001944128481799682, |
|
"loss": 0.9094, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00019436376576846423, |
|
"loss": 1.1234, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00019431447495828045, |
|
"loss": 0.9103, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0001942649758582737, |
|
"loss": 0.7841, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00019421526857776072, |
|
"loss": 0.8817, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00019416535322651818, |
|
"loss": 1.0682, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00019411522991478214, |
|
"loss": 0.9201, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.000194064898753248, |
|
"loss": 4.1834, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.00019401435985307012, |
|
"loss": 1.0391, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.00019396361332586166, |
|
"loss": 2.5015, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0001939126592836944, |
|
"loss": 0.7927, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0001938614978390983, |
|
"loss": 2.2345, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00019381012910506146, |
|
"loss": 0.9311, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00019375855319502962, |
|
"loss": 0.9713, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00019370677022290624, |
|
"loss": 0.8967, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00019365478030305196, |
|
"loss": 3.095, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0001936025835502845, |
|
"loss": 1.1008, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0001935501800798783, |
|
"loss": 1.5409, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00019349757000756444, |
|
"loss": 1.02, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00019344475344953012, |
|
"loss": 1.0101, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0001933917305224187, |
|
"loss": 0.7686, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0001933385013433292, |
|
"loss": 1.1061, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0001932850660298162, |
|
"loss": 0.8083, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0001932314246998895, |
|
"loss": 1.1942, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.00019317757747201384, |
|
"loss": 0.8551, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00019312352446510878, |
|
"loss": 0.9049, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00019306926579854821, |
|
"loss": 0.7072, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00019301480159216028, |
|
"loss": 0.8552, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00019296013196622706, |
|
"loss": 0.8414, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0001929052570414843, |
|
"loss": 0.9198, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00019285017693912107, |
|
"loss": 2.1953, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00019279489178077969, |
|
"loss": 0.851, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00019273940168855518, |
|
"loss": 1.0239, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00019268370678499533, |
|
"loss": 1.5125, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00019262780719310008, |
|
"loss": 0.9171, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00019257170303632148, |
|
"loss": 0.9794, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00019251539443856344, |
|
"loss": 0.9023, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00019245888152418124, |
|
"loss": 1.058, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00019240216441798142, |
|
"loss": 0.9411, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0001923452432452215, |
|
"loss": 1.197, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0001922881181316097, |
|
"loss": 0.9253, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0001922307892033046, |
|
"loss": 1.156, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00019217325658691482, |
|
"loss": 0.9424, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00019211552040949891, |
|
"loss": 1.1147, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00019205758079856498, |
|
"loss": 0.8528, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0001919994378820704, |
|
"loss": 0.8105, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00019194109178842153, |
|
"loss": 0.9279, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00019188254264647337, |
|
"loss": 0.9231, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00019182379058552948, |
|
"loss": 1.0425, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00019176483573534142, |
|
"loss": 0.8794, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00019170567822610873, |
|
"loss": 0.9873, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0001916463181884784, |
|
"loss": 0.8146, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00019158675575354478, |
|
"loss": 1.027, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00019152699105284913, |
|
"loss": 0.8093, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0001914670242183795, |
|
"loss": 0.951, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00019140685538257028, |
|
"loss": 0.9268, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00019134648467830198, |
|
"loss": 1.0205, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00019128591223890092, |
|
"loss": 0.9043, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00019122513819813902, |
|
"loss": 0.7387, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0001911641626902333, |
|
"loss": 0.9422, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00019110298584984578, |
|
"loss": 0.9015, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0001910416078120832, |
|
"loss": 0.7522, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00019098002871249646, |
|
"loss": 0.9722, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0001909182486870806, |
|
"loss": 0.8358, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00019085626787227443, |
|
"loss": 0.9859, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00019079408640496013, |
|
"loss": 0.7796, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00019073170442246302, |
|
"loss": 0.8617, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0001906691220625513, |
|
"loss": 0.7727, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0001906063394634356, |
|
"loss": 0.8786, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0001905433567637689, |
|
"loss": 0.9117, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.000190480174102646, |
|
"loss": 0.9182, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0001904167916196033, |
|
"loss": 0.9706, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0001903532094546186, |
|
"loss": 0.8036, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0001902894277481105, |
|
"loss": 0.902, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00019022544664093854, |
|
"loss": 0.9231, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00019016126627440237, |
|
"loss": 0.9751, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0001900968867902419, |
|
"loss": 0.8373, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0001900323083306367, |
|
"loss": 0.8695, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0001899675310382057, |
|
"loss": 0.8654, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.00018990255505600706, |
|
"loss": 0.98, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.00018983738052753767, |
|
"loss": 0.7454, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.00018977200759673295, |
|
"loss": 0.829, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00018970643640796642, |
|
"loss": 0.8262, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.0001896406671060495, |
|
"loss": 1.0659, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00018957469983623112, |
|
"loss": 0.8551, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00018950853474419742, |
|
"loss": 0.7991, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0001894421719760714, |
|
"loss": 0.8662, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00018937561167841263, |
|
"loss": 0.8817, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00018930885399821693, |
|
"loss": 1.0894, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.000189241899082916, |
|
"loss": 0.8225, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00018917474708037718, |
|
"loss": 0.9065, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00018910739813890302, |
|
"loss": 0.8779, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00018903985240723104, |
|
"loss": 0.7909, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00018897211003453328, |
|
"loss": 0.7649, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00018890417117041619, |
|
"loss": 0.9788, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00018883603596492004, |
|
"loss": 0.938, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00018876770456851877, |
|
"loss": 0.9032, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00018869917713211964, |
|
"loss": 0.9059, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00018863045380706274, |
|
"loss": 0.8896, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.0001885615347451209, |
|
"loss": 0.7614, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.0001884924200984991, |
|
"loss": 0.978, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.0001884231100198344, |
|
"loss": 0.9406, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00018835360466219533, |
|
"loss": 0.7555, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.0001882839041790818, |
|
"loss": 0.9049, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00018821400872442458, |
|
"loss": 0.7041, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00018814391845258505, |
|
"loss": 0.8995, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.0001880736335183548, |
|
"loss": 0.7461, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00018800315407695539, |
|
"loss": 0.9954, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00018793248028403788, |
|
"loss": 0.9035, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.0001878616122956826, |
|
"loss": 0.9083, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00018779055026839868, |
|
"loss": 0.7286, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.0001877192943591239, |
|
"loss": 0.8001, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00018764784472522403, |
|
"loss": 0.8795, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.0001875762015244929, |
|
"loss": 0.8912, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00018750436491515163, |
|
"loss": 0.8848, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00018743233505584862, |
|
"loss": 0.8512, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00018736011210565898, |
|
"loss": 0.8537, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00018728769622408423, |
|
"loss": 0.8777, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00018721508757105202, |
|
"loss": 0.7849, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00018714228630691576, |
|
"loss": 0.9669, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.0001870692925924541, |
|
"loss": 0.9299, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00018699610658887088, |
|
"loss": 1.0188, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00018692272845779448, |
|
"loss": 0.8388, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00018684915836127765, |
|
"loss": 0.7904, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00018677539646179707, |
|
"loss": 0.9689, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00018670144292225297, |
|
"loss": 0.7339, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00018662729790596888, |
|
"loss": 0.7894, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00018655296157669117, |
|
"loss": 0.7163, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00018647843409858869, |
|
"loss": 0.8642, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00018640371563625246, |
|
"loss": 0.9281, |
|
"step": 421 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.00018632880635469526, |
|
"loss": 0.834, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.00018625370641935129, |
|
"loss": 0.7316, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.00018617841599607586, |
|
"loss": 0.8504, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.00018610293525114492, |
|
"loss": 0.8731, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.00018602726435125474, |
|
"loss": 0.8803, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.0001859514034635215, |
|
"loss": 0.8417, |
|
"step": 427 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.000185875352755481, |
|
"loss": 0.8947, |
|
"step": 428 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.00018579911239508827, |
|
"loss": 0.8368, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.00018572268255071718, |
|
"loss": 0.8231, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.00018564606339116, |
|
"loss": 0.8576, |
|
"step": 431 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.0001855692550856272, |
|
"loss": 0.8753, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.00018549225780374685, |
|
"loss": 0.7778, |
|
"step": 433 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.00018541507171556445, |
|
"loss": 0.7516, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.0001853376969915425, |
|
"loss": 0.7466, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00018526013380255999, |
|
"loss": 0.917, |
|
"step": 436 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00018518238231991218, |
|
"loss": 0.9042, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00018510444271531022, |
|
"loss": 0.8587, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00018502631516088066, |
|
"loss": 0.9001, |
|
"step": 439 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.0001849479998291651, |
|
"loss": 0.7977, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00018486949689311993, |
|
"loss": 0.8711, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00018479080652611583, |
|
"loss": 0.7192, |
|
"step": 442 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.0001847119289019373, |
|
"loss": 0.9608, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.00018463286419478255, |
|
"loss": 0.7097, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0001845536125792629, |
|
"loss": 0.7354, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0001844741742304024, |
|
"loss": 0.8711, |
|
"step": 446 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.00018439454932363755, |
|
"loss": 0.8832, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.00018431473803481684, |
|
"loss": 0.932, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.00018423474054020034, |
|
"loss": 0.8394, |
|
"step": 449 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.00018415455701645942, |
|
"loss": 0.7698, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.00018407418764067627, |
|
"loss": 0.8856, |
|
"step": 451 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.00018399363259034347, |
|
"loss": 0.8529, |
|
"step": 452 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.00018391289204336368, |
|
"loss": 0.9898, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.00018383196617804926, |
|
"loss": 0.8312, |
|
"step": 454 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.00018375085517312182, |
|
"loss": 0.8234, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.00018366955920771184, |
|
"loss": 0.7871, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.00018358807846135825, |
|
"loss": 0.9814, |
|
"step": 457 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.00018350641311400812, |
|
"loss": 0.8183, |
|
"step": 458 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.0001834245633460161, |
|
"loss": 0.8961, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.00018334252933814427, |
|
"loss": 0.9166, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.00018326031127156148, |
|
"loss": 1.0031, |
|
"step": 461 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.00018317790932784317, |
|
"loss": 0.8171, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.0001830953236889707, |
|
"loss": 0.83, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.00018301255453733134, |
|
"loss": 0.8134, |
|
"step": 464 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.0001829296020557174, |
|
"loss": 0.8561, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.0001828464664273263, |
|
"loss": 0.8669, |
|
"step": 466 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.0001827631478357597, |
|
"loss": 1.003, |
|
"step": 467 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.00018267964646502357, |
|
"loss": 0.8715, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.00018259596249952731, |
|
"loss": 0.7434, |
|
"step": 469 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.00018251209612408373, |
|
"loss": 0.9163, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.00018242804752390844, |
|
"loss": 1.0639, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.00018234381688461942, |
|
"loss": 0.8266, |
|
"step": 472 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.00018225940439223684, |
|
"loss": 0.7582, |
|
"step": 473 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0001821748102331823, |
|
"loss": 0.8547, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0001820900345942787, |
|
"loss": 0.7908, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.00018200507766274977, |
|
"loss": 0.6203, |
|
"step": 476 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0001819199396262195, |
|
"loss": 0.806, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.0001818346206727119, |
|
"loss": 0.8016, |
|
"step": 478 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.0001817491209906506, |
|
"loss": 0.8548, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.00018166344076885827, |
|
"loss": 0.9194, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.00018157758019655634, |
|
"loss": 0.8704, |
|
"step": 481 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.00018149153946336446, |
|
"loss": 0.8373, |
|
"step": 482 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.0001814053187593003, |
|
"loss": 0.8229, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00018131891827477884, |
|
"loss": 0.8289, |
|
"step": 484 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00018123233820061218, |
|
"loss": 0.7753, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00018114557872800905, |
|
"loss": 1.029, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.0001810586400485743, |
|
"loss": 0.6198, |
|
"step": 487 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.0001809715223543087, |
|
"loss": 0.8418, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.00018088422583760813, |
|
"loss": 0.7421, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.0001807967506912636, |
|
"loss": 0.8032, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.00018070909710846052, |
|
"loss": 0.7956, |
|
"step": 491 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.00018062126528277844, |
|
"loss": 0.9013, |
|
"step": 492 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.00018053325540819045, |
|
"loss": 0.9582, |
|
"step": 493 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.00018044506767906295, |
|
"loss": 0.6845, |
|
"step": 494 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.00018035670229015507, |
|
"loss": 0.8731, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.0001802681594366183, |
|
"loss": 0.8369, |
|
"step": 496 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.00018017943931399603, |
|
"loss": 0.6557, |
|
"step": 497 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.00018009054211822324, |
|
"loss": 0.7997, |
|
"step": 498 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.0001800014680456259, |
|
"loss": 0.8348, |
|
"step": 499 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.0001799122172929206, |
|
"loss": 0.9043, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.00017982279005721407, |
|
"loss": 0.8499, |
|
"step": 501 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.00017973318653600293, |
|
"loss": 0.8595, |
|
"step": 502 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.00017964340692717303, |
|
"loss": 0.9468, |
|
"step": 503 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.0001795534514289991, |
|
"loss": 0.9848, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.00017946332024014434, |
|
"loss": 0.7326, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.00017937301355965996, |
|
"loss": 0.8479, |
|
"step": 506 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.00017928253158698473, |
|
"loss": 0.8669, |
|
"step": 507 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.00017919187452194454, |
|
"loss": 0.8163, |
|
"step": 508 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.00017910104256475194, |
|
"loss": 0.926, |
|
"step": 509 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.00017901003591600575, |
|
"loss": 0.7956, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.00017891885477669064, |
|
"loss": 0.9002, |
|
"step": 511 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.00017882749934817652, |
|
"loss": 0.787, |
|
"step": 512 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.00017873596983221832, |
|
"loss": 0.7519, |
|
"step": 513 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.0001786442664309554, |
|
"loss": 0.8067, |
|
"step": 514 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.00017855238934691108, |
|
"loss": 0.8824, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.0001784603387829923, |
|
"loss": 0.8014, |
|
"step": 516 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.00017836811494248919, |
|
"loss": 0.6672, |
|
"step": 517 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.00017827571802907444, |
|
"loss": 0.8516, |
|
"step": 518 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.000178183148246803, |
|
"loss": 0.8476, |
|
"step": 519 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.00017809040580011164, |
|
"loss": 0.8493, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.0001779974908938184, |
|
"loss": 0.7288, |
|
"step": 521 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.00017790440373312223, |
|
"loss": 0.7443, |
|
"step": 522 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.00017781114452360245, |
|
"loss": 0.8767, |
|
"step": 523 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.00017771771347121842, |
|
"loss": 0.8025, |
|
"step": 524 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.0001776241107823089, |
|
"loss": 0.8842, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.00017753033666359177, |
|
"loss": 0.9648, |
|
"step": 526 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.00017743639132216353, |
|
"loss": 0.7872, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.0001773422749654988, |
|
"loss": 0.9122, |
|
"step": 528 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.00017724798780144983, |
|
"loss": 0.7688, |
|
"step": 529 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.0001771535300382461, |
|
"loss": 0.8938, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.00017705890188449394, |
|
"loss": 0.7152, |
|
"step": 531 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.0001769641035491759, |
|
"loss": 0.7077, |
|
"step": 532 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.00017686913524165036, |
|
"loss": 0.8872, |
|
"step": 533 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.00017677399717165116, |
|
"loss": 0.8775, |
|
"step": 534 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.00017667868954928694, |
|
"loss": 0.8508, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.00017658321258504092, |
|
"loss": 0.8589, |
|
"step": 536 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.00017648756648977018, |
|
"loss": 0.6499, |
|
"step": 537 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.00017639175147470538, |
|
"loss": 0.8927, |
|
"step": 538 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.00017629576775145026, |
|
"loss": 0.8702, |
|
"step": 539 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.00017619961553198108, |
|
"loss": 0.7958, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.00017610329502864625, |
|
"loss": 0.8582, |
|
"step": 541 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.00017600680645416583, |
|
"loss": 0.7905, |
|
"step": 542 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.0001759101500216311, |
|
"loss": 0.7574, |
|
"step": 543 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00017581332594450392, |
|
"loss": 0.861, |
|
"step": 544 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00017571633443661658, |
|
"loss": 0.7682, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00017561917571217093, |
|
"loss": 0.7547, |
|
"step": 546 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00017552184998573825, |
|
"loss": 0.7852, |
|
"step": 547 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.0001754243574722586, |
|
"loss": 0.7635, |
|
"step": 548 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.00017532669838704035, |
|
"loss": 0.8714, |
|
"step": 549 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.00017522887294575977, |
|
"loss": 0.7839, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.00017513088136446054, |
|
"loss": 0.8551, |
|
"step": 551 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.00017503272385955318, |
|
"loss": 0.7367, |
|
"step": 552 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.00017493440064781475, |
|
"loss": 0.9257, |
|
"step": 553 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.00017483591194638817, |
|
"loss": 0.8246, |
|
"step": 554 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.00017473725797278192, |
|
"loss": 0.8319, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.00017463843894486937, |
|
"loss": 0.8304, |
|
"step": 556 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00017453945508088853, |
|
"loss": 0.6536, |
|
"step": 557 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00017444030659944138, |
|
"loss": 0.7606, |
|
"step": 558 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00017434099371949345, |
|
"loss": 0.7084, |
|
"step": 559 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00017424151666037329, |
|
"loss": 0.8891, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.00017414187564177217, |
|
"loss": 0.6199, |
|
"step": 561 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.00017404207088374333, |
|
"loss": 0.8676, |
|
"step": 562 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.0001739421026067017, |
|
"loss": 0.8477, |
|
"step": 563 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.00017384197103142328, |
|
"loss": 0.9234, |
|
"step": 564 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.0001737416763790447, |
|
"loss": 0.9103, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.00017364121887106286, |
|
"loss": 0.7859, |
|
"step": 566 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.00017354059872933415, |
|
"loss": 0.8623, |
|
"step": 567 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.00017343981617607424, |
|
"loss": 0.6266, |
|
"step": 568 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.00017333887143385743, |
|
"loss": 0.8105, |
|
"step": 569 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.00017323776472561627, |
|
"loss": 0.7752, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0001731364962746409, |
|
"loss": 0.7873, |
|
"step": 571 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0001730350663045788, |
|
"loss": 0.8425, |
|
"step": 572 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.00017293347503943406, |
|
"loss": 0.777, |
|
"step": 573 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.000172831722703567, |
|
"loss": 0.7348, |
|
"step": 574 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.00017272980952169365, |
|
"loss": 0.7797, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.0001726277357188853, |
|
"loss": 0.8328, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.00017252550152056795, |
|
"loss": 0.7109, |
|
"step": 577 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.0001724231071525218, |
|
"loss": 0.7905, |
|
"step": 578 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.00017232055284088085, |
|
"loss": 0.7541, |
|
"step": 579 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.0001722178388121322, |
|
"loss": 0.8954, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.00017211496529311582, |
|
"loss": 0.8362, |
|
"step": 581 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.00017201193251102382, |
|
"loss": 0.8436, |
|
"step": 582 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.00017190874069340014, |
|
"loss": 0.7594, |
|
"step": 583 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.0001718053900681397, |
|
"loss": 0.9342, |
|
"step": 584 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.00017170188086348848, |
|
"loss": 0.8934, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.00017159821330804236, |
|
"loss": 0.831, |
|
"step": 586 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.0001714943876307472, |
|
"loss": 0.8053, |
|
"step": 587 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.00017139040406089786, |
|
"loss": 0.81, |
|
"step": 588 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.000171286262828138, |
|
"loss": 0.8245, |
|
"step": 589 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.00017118196416245947, |
|
"loss": 0.8232, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.00017107750829420176, |
|
"loss": 0.8244, |
|
"step": 591 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.0001709728954540516, |
|
"loss": 0.7863, |
|
"step": 592 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.00017086812587304234, |
|
"loss": 0.8274, |
|
"step": 593 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.00017076319978255345, |
|
"loss": 0.6595, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.0001706581174143101, |
|
"loss": 0.8582, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.00017055287900038263, |
|
"loss": 0.6873, |
|
"step": 596 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.00017044748477318593, |
|
"loss": 0.8673, |
|
"step": 597 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.00017034193496547902, |
|
"loss": 0.8055, |
|
"step": 598 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.00017023622981036455, |
|
"loss": 0.8232, |
|
"step": 599 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.0001701303695412881, |
|
"loss": 0.8745, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.00017002435439203808, |
|
"loss": 0.8034, |
|
"step": 601 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.00016991818459674468, |
|
"loss": 0.9006, |
|
"step": 602 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.0001698118603898798, |
|
"loss": 0.7828, |
|
"step": 603 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.00016970538200625622, |
|
"loss": 0.8413, |
|
"step": 604 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.00016959874968102735, |
|
"loss": 0.8669, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.00016949196364968646, |
|
"loss": 0.9277, |
|
"step": 606 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.00016938502414806634, |
|
"loss": 0.9256, |
|
"step": 607 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.00016927793141233868, |
|
"loss": 0.8613, |
|
"step": 608 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.00016917068567901358, |
|
"loss": 0.9439, |
|
"step": 609 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.00016906328718493906, |
|
"loss": 0.8606, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.00016895573616730044, |
|
"loss": 0.7483, |
|
"step": 611 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.00016884803286362, |
|
"loss": 0.8359, |
|
"step": 612 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.0001687401775117562, |
|
"loss": 0.7764, |
|
"step": 613 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.00016863217034990342, |
|
"loss": 0.9857, |
|
"step": 614 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.0001685240116165912, |
|
"loss": 0.8706, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.0001684157015506839, |
|
"loss": 0.867, |
|
"step": 616 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.00016830724039138003, |
|
"loss": 0.7974, |
|
"step": 617 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.00016819862837821181, |
|
"loss": 0.7835, |
|
"step": 618 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.00016808986575104465, |
|
"loss": 0.7987, |
|
"step": 619 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.0001679809527500765, |
|
"loss": 0.7383, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.0001678718896158375, |
|
"loss": 0.9224, |
|
"step": 621 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.00016776267658918928, |
|
"loss": 0.8959, |
|
"step": 622 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.00016765331391132456, |
|
"loss": 0.6702, |
|
"step": 623 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.0001675438018237665, |
|
"loss": 0.6911, |
|
"step": 624 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.00016743414056836825, |
|
"loss": 0.9364, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.00016732433038731242, |
|
"loss": 0.7902, |
|
"step": 626 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.00016721437152311054, |
|
"loss": 0.8473, |
|
"step": 627 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.00016710426421860235, |
|
"loss": 0.8765, |
|
"step": 628 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.00016699400871695555, |
|
"loss": 0.7705, |
|
"step": 629 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.00016688360526166514, |
|
"loss": 0.8653, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.0001667730540965528, |
|
"loss": 0.9137, |
|
"step": 631 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.00016666235546576648, |
|
"loss": 0.9772, |
|
"step": 632 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.0001665515096137797, |
|
"loss": 0.6433, |
|
"step": 633 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.0001664405167853912, |
|
"loss": 0.8096, |
|
"step": 634 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.00016632937722572434, |
|
"loss": 0.7298, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.00016621809118022647, |
|
"loss": 0.6841, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.00016610665889466838, |
|
"loss": 0.9471, |
|
"step": 637 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.00016599508061514404, |
|
"loss": 0.8396, |
|
"step": 638 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.00016588335658806962, |
|
"loss": 0.8769, |
|
"step": 639 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.00016577148706018328, |
|
"loss": 0.8328, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.0001656594722785445, |
|
"loss": 0.8932, |
|
"step": 641 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.0001655473124905335, |
|
"loss": 0.8203, |
|
"step": 642 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.00016543500794385084, |
|
"loss": 0.8514, |
|
"step": 643 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.00016532255888651666, |
|
"loss": 0.7396, |
|
"step": 644 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.00016520996556687028, |
|
"loss": 0.9178, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.0001650972282335697, |
|
"loss": 0.6308, |
|
"step": 646 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.00016498434713559088, |
|
"loss": 0.9018, |
|
"step": 647 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.00016487132252222727, |
|
"loss": 0.8658, |
|
"step": 648 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.00016475815464308933, |
|
"loss": 0.8228, |
|
"step": 649 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.0001646448437481039, |
|
"loss": 0.8944, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.0001645313900875136, |
|
"loss": 0.8617, |
|
"step": 651 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.00016441779391187646, |
|
"loss": 0.9726, |
|
"step": 652 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.00016430405547206516, |
|
"loss": 0.693, |
|
"step": 653 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.00016419017501926656, |
|
"loss": 0.8272, |
|
"step": 654 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.00016407615280498124, |
|
"loss": 0.8523, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.00016396198908102272, |
|
"loss": 0.7444, |
|
"step": 656 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.00016384768409951714, |
|
"loss": 0.8366, |
|
"step": 657 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.0001637332381129026, |
|
"loss": 0.7441, |
|
"step": 658 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.00016361865137392854, |
|
"loss": 0.6694, |
|
"step": 659 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.0001635039241356553, |
|
"loss": 0.8103, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.0001633890566514535, |
|
"loss": 0.9135, |
|
"step": 661 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.00016327404917500346, |
|
"loss": 0.7327, |
|
"step": 662 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.00016315890196029467, |
|
"loss": 0.8425, |
|
"step": 663 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.00016304361526162534, |
|
"loss": 0.8812, |
|
"step": 664 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.00016292818933360151, |
|
"loss": 0.777, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.0001628126244311369, |
|
"loss": 0.8864, |
|
"step": 666 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.00016269692080945198, |
|
"loss": 0.9333, |
|
"step": 667 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.00016258107872407375, |
|
"loss": 0.906, |
|
"step": 668 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.00016246509843083492, |
|
"loss": 0.7346, |
|
"step": 669 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.00016234898018587337, |
|
"loss": 0.8555, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.00016223272424563173, |
|
"loss": 0.8449, |
|
"step": 671 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.00016211633086685664, |
|
"loss": 0.8559, |
|
"step": 672 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.00016199980030659838, |
|
"loss": 0.7468, |
|
"step": 673 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.00016188313282221008, |
|
"loss": 0.7986, |
|
"step": 674 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 0.0001617663286713474, |
|
"loss": 0.7757, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 0.00016164938811196757, |
|
"loss": 0.8789, |
|
"step": 676 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 0.00016153231140232936, |
|
"loss": 0.5499, |
|
"step": 677 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 0.00016141509880099206, |
|
"loss": 0.9319, |
|
"step": 678 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.00016129775056681513, |
|
"loss": 0.6904, |
|
"step": 679 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.0001611802669589575, |
|
"loss": 0.8506, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.00016106264823687716, |
|
"loss": 0.7242, |
|
"step": 681 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.00016094489466033043, |
|
"loss": 0.6808, |
|
"step": 682 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.00016082700648937146, |
|
"loss": 0.8017, |
|
"step": 683 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 0.00016070898398435167, |
|
"loss": 0.9109, |
|
"step": 684 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 0.00016059082740591915, |
|
"loss": 0.7277, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 0.00016047253701501808, |
|
"loss": 0.8601, |
|
"step": 686 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 0.00016035411307288813, |
|
"loss": 0.9118, |
|
"step": 687 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.0001602355558410639, |
|
"loss": 0.8049, |
|
"step": 688 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.00016011686558137448, |
|
"loss": 0.8174, |
|
"step": 689 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.00015999804255594258, |
|
"loss": 0.8481, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.0001598790870271843, |
|
"loss": 0.7052, |
|
"step": 691 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 0.00015975999925780813, |
|
"loss": 0.8208, |
|
"step": 692 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 0.00015964077951081485, |
|
"loss": 0.7257, |
|
"step": 693 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 0.00015952142804949652, |
|
"loss": 0.858, |
|
"step": 694 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 0.00015940194513743624, |
|
"loss": 0.9242, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 0.0001592823310385073, |
|
"loss": 0.7924, |
|
"step": 696 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.00015916258601687274, |
|
"loss": 0.8788, |
|
"step": 697 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.0001590427103369848, |
|
"loss": 0.7946, |
|
"step": 698 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.00015892270426358414, |
|
"loss": 0.8318, |
|
"step": 699 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.00015880256806169953, |
|
"loss": 0.8983, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.00015868230199664711, |
|
"loss": 0.8889, |
|
"step": 701 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.00015856190633402968, |
|
"loss": 0.9692, |
|
"step": 702 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.0001584413813397364, |
|
"loss": 0.7787, |
|
"step": 703 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.00015832072727994193, |
|
"loss": 0.6455, |
|
"step": 704 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 0.00015819994442110616, |
|
"loss": 1.0006, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 0.00015807903302997317, |
|
"loss": 0.7384, |
|
"step": 706 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 0.00015795799337357114, |
|
"loss": 0.8517, |
|
"step": 707 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 0.00015783682571921133, |
|
"loss": 0.8446, |
|
"step": 708 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 0.00015771553033448775, |
|
"loss": 0.8227, |
|
"step": 709 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.00015759410748727662, |
|
"loss": 0.8374, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.0001574725574457354, |
|
"loss": 0.7274, |
|
"step": 711 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.00015735088047830268, |
|
"loss": 0.8728, |
|
"step": 712 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.00015722907685369723, |
|
"loss": 1.0569, |
|
"step": 713 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.00015710714684091762, |
|
"loss": 0.9775, |
|
"step": 714 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.0001569850907092415, |
|
"loss": 0.6832, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.00015686290872822504, |
|
"loss": 0.7358, |
|
"step": 716 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.00015674060116770236, |
|
"loss": 0.9015, |
|
"step": 717 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.00015661816829778494, |
|
"loss": 0.8516, |
|
"step": 718 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 0.00015649561038886094, |
|
"loss": 0.8911, |
|
"step": 719 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 0.00015637292771159472, |
|
"loss": 0.7098, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 0.00015625012053692615, |
|
"loss": 0.955, |
|
"step": 721 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 0.0001561271891360701, |
|
"loss": 0.6421, |
|
"step": 722 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.0001560041337805157, |
|
"loss": 0.8807, |
|
"step": 723 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.00015588095474202595, |
|
"loss": 0.722, |
|
"step": 724 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.00015575765229263686, |
|
"loss": 0.8055, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.00015563422670465712, |
|
"loss": 0.7822, |
|
"step": 726 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.00015551067825066728, |
|
"loss": 0.8311, |
|
"step": 727 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.00015538700720351924, |
|
"loss": 0.8519, |
|
"step": 728 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.00015526321383633568, |
|
"loss": 0.7506, |
|
"step": 729 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.0001551392984225094, |
|
"loss": 0.8056, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.00015501526123570277, |
|
"loss": 0.6968, |
|
"step": 731 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.000154891102549847, |
|
"loss": 0.829, |
|
"step": 732 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.0001547668226391417, |
|
"loss": 0.6682, |
|
"step": 733 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.00015464242177805422, |
|
"loss": 0.8295, |
|
"step": 734 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.00015451790024131895, |
|
"loss": 0.6911, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.00015439325830393687, |
|
"loss": 0.6785, |
|
"step": 736 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.00015426849624117472, |
|
"loss": 0.81, |
|
"step": 737 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.00015414361432856475, |
|
"loss": 0.9955, |
|
"step": 738 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.00015401861284190368, |
|
"loss": 0.8433, |
|
"step": 739 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.00015389349205725242, |
|
"loss": 0.618, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.00015376825225093537, |
|
"loss": 0.7747, |
|
"step": 741 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.00015364289369953967, |
|
"loss": 0.7673, |
|
"step": 742 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.0001535174166799148, |
|
"loss": 0.8066, |
|
"step": 743 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.00015339182146917183, |
|
"loss": 0.8392, |
|
"step": 744 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.0001532661083446829, |
|
"loss": 0.7949, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.00015314027758408044, |
|
"loss": 0.8698, |
|
"step": 746 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.00015301432946525684, |
|
"loss": 0.7715, |
|
"step": 747 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.00015288826426636354, |
|
"loss": 0.7583, |
|
"step": 748 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.00015276208226581064, |
|
"loss": 0.8544, |
|
"step": 749 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.00015263578374226605, |
|
"loss": 0.8272, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.0001525093689746552, |
|
"loss": 0.857, |
|
"step": 751 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.00015238283824216015, |
|
"loss": 0.9208, |
|
"step": 752 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.000152256191824219, |
|
"loss": 0.8626, |
|
"step": 753 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.00015212943000052545, |
|
"loss": 0.9418, |
|
"step": 754 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.00015200255305102803, |
|
"loss": 0.8087, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.00015187556125592945, |
|
"loss": 0.7913, |
|
"step": 756 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.00015174845489568622, |
|
"loss": 0.8973, |
|
"step": 757 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.00015162123425100762, |
|
"loss": 0.701, |
|
"step": 758 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.00015149389960285558, |
|
"loss": 0.898, |
|
"step": 759 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.00015136645123244366, |
|
"loss": 0.8809, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.00015123888942123652, |
|
"loss": 0.7334, |
|
"step": 761 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.0001511112144509495, |
|
"loss": 0.8506, |
|
"step": 762 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.00015098342660354775, |
|
"loss": 0.8469, |
|
"step": 763 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.0001508555261612457, |
|
"loss": 1.0353, |
|
"step": 764 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.0001507275134065065, |
|
"loss": 0.6269, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.00015059938862204127, |
|
"loss": 0.7825, |
|
"step": 766 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.0001504711520908086, |
|
"loss": 0.8388, |
|
"step": 767 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.00015034280409601385, |
|
"loss": 0.7383, |
|
"step": 768 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.00015021434492110852, |
|
"loss": 0.8029, |
|
"step": 769 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.00015008577484978966, |
|
"loss": 0.6527, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.00014995709416599926, |
|
"loss": 0.9434, |
|
"step": 771 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.00014982830315392358, |
|
"loss": 0.753, |
|
"step": 772 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.00014969940209799248, |
|
"loss": 0.8143, |
|
"step": 773 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.00014957039128287892, |
|
"loss": 0.8939, |
|
"step": 774 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.0001494412709934982, |
|
"loss": 0.9265, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.00014931204151500747, |
|
"loss": 0.8261, |
|
"step": 776 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.00014918270313280495, |
|
"loss": 0.8555, |
|
"step": 777 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.00014905325613252937, |
|
"loss": 0.8191, |
|
"step": 778 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.00014892370080005936, |
|
"loss": 0.9159, |
|
"step": 779 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.00014879403742151283, |
|
"loss": 0.7936, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.00014866426628324625, |
|
"loss": 0.8782, |
|
"step": 781 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.00014853438767185412, |
|
"loss": 0.6078, |
|
"step": 782 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.0001484044018741682, |
|
"loss": 0.7182, |
|
"step": 783 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 0.00014827430917725712, |
|
"loss": 0.7528, |
|
"step": 784 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 0.00014814410986842543, |
|
"loss": 0.902, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 0.00014801380423521324, |
|
"loss": 0.8765, |
|
"step": 786 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 0.00014788339256539544, |
|
"loss": 0.6332, |
|
"step": 787 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.00014775287514698105, |
|
"loss": 0.7258, |
|
"step": 788 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.00014762225226821273, |
|
"loss": 0.7754, |
|
"step": 789 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.00014749152421756595, |
|
"loss": 0.7039, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.0001473606912837485, |
|
"loss": 0.8563, |
|
"step": 791 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.00014722975375569978, |
|
"loss": 0.8956, |
|
"step": 792 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.00014709871192259026, |
|
"loss": 0.8724, |
|
"step": 793 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.0001469675660738206, |
|
"loss": 0.8885, |
|
"step": 794 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.00014683631649902132, |
|
"loss": 0.7637, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.00014670496348805195, |
|
"loss": 0.7596, |
|
"step": 796 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.00014657350733100047, |
|
"loss": 0.8221, |
|
"step": 797 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.00014644194831818266, |
|
"loss": 0.8475, |
|
"step": 798 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.00014631028674014142, |
|
"loss": 0.7966, |
|
"step": 799 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.00014617852288764625, |
|
"loss": 0.9186, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.00014604665705169237, |
|
"loss": 0.9027, |
|
"step": 801 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.0001459146895235004, |
|
"loss": 0.9357, |
|
"step": 802 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.00014578262059451537, |
|
"loss": 0.9202, |
|
"step": 803 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.00014565045055640638, |
|
"loss": 0.9226, |
|
"step": 804 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.0001455181797010658, |
|
"loss": 0.8416, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.0001453858083206086, |
|
"loss": 0.8192, |
|
"step": 806 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.0001452533367073718, |
|
"loss": 0.8309, |
|
"step": 807 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.00014512076515391375, |
|
"loss": 0.7646, |
|
"step": 808 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.00014498809395301356, |
|
"loss": 0.9335, |
|
"step": 809 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.00014485532339767037, |
|
"loss": 0.9696, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.00014472245378110277, |
|
"loss": 0.7, |
|
"step": 811 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.000144589485396748, |
|
"loss": 0.8206, |
|
"step": 812 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.0001444564185382617, |
|
"loss": 0.7417, |
|
"step": 813 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.00014432325349951667, |
|
"loss": 0.6384, |
|
"step": 814 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 0.00014418999057460276, |
|
"loss": 0.7801, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 0.0001440566300578259, |
|
"loss": 0.8459, |
|
"step": 816 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 0.0001439231722437075, |
|
"loss": 0.8863, |
|
"step": 817 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 0.000143789617426984, |
|
"loss": 0.8502, |
|
"step": 818 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 0.000143655965902606, |
|
"loss": 0.8522, |
|
"step": 819 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 0.00014352221796573757, |
|
"loss": 0.8612, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 0.00014338837391175582, |
|
"loss": 0.8065, |
|
"step": 821 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 0.0001432544340362501, |
|
"loss": 0.8777, |
|
"step": 822 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.00014312039863502145, |
|
"loss": 0.7731, |
|
"step": 823 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.00014298626800408166, |
|
"loss": 0.8791, |
|
"step": 824 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.00014285204243965306, |
|
"loss": 0.9095, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.00014271772223816757, |
|
"loss": 0.8846, |
|
"step": 826 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.00014258330769626606, |
|
"loss": 0.701, |
|
"step": 827 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 0.00014244879911079779, |
|
"loss": 0.7598, |
|
"step": 828 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 0.00014231419677881966, |
|
"loss": 1.0411, |
|
"step": 829 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 0.00014217950099759569, |
|
"loss": 0.6915, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 0.00014204471206459628, |
|
"loss": 0.8048, |
|
"step": 831 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.0001419098302774974, |
|
"loss": 0.7688, |
|
"step": 832 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.00014177485593418028, |
|
"loss": 0.7863, |
|
"step": 833 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.0001416397893327304, |
|
"loss": 0.7627, |
|
"step": 834 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.00014150463077143712, |
|
"loss": 0.7423, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.00014136938054879283, |
|
"loss": 0.7236, |
|
"step": 836 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.00014123403896349227, |
|
"loss": 0.8978, |
|
"step": 837 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.00014109860631443213, |
|
"loss": 0.9403, |
|
"step": 838 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.00014096308290071003, |
|
"loss": 0.7267, |
|
"step": 839 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.00014082746902162414, |
|
"loss": 0.7905, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.00014069176497667242, |
|
"loss": 0.8848, |
|
"step": 841 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.00014055597106555192, |
|
"loss": 0.9057, |
|
"step": 842 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.00014042008758815818, |
|
"loss": 0.7363, |
|
"step": 843 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.00014028411484458454, |
|
"loss": 0.8193, |
|
"step": 844 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.00014014805313512145, |
|
"loss": 0.7387, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.00014001190276025593, |
|
"loss": 0.8871, |
|
"step": 846 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.0001398756640206707, |
|
"loss": 0.7342, |
|
"step": 847 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.00013973933721724363, |
|
"loss": 0.8557, |
|
"step": 848 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.0001396029226510472, |
|
"loss": 0.8778, |
|
"step": 849 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.00013946642062334766, |
|
"loss": 0.7844, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.00013932983143560433, |
|
"loss": 0.7941, |
|
"step": 851 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.00013919315538946905, |
|
"loss": 0.7505, |
|
"step": 852 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.0001390563927867856, |
|
"loss": 0.8371, |
|
"step": 853 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.00013891954392958878, |
|
"loss": 0.8128, |
|
"step": 854 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.0001387826091201039, |
|
"loss": 0.7127, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.00013864558866074622, |
|
"loss": 0.8165, |
|
"step": 856 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.00013850848285411994, |
|
"loss": 0.7103, |
|
"step": 857 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.00013837129200301794, |
|
"loss": 0.8373, |
|
"step": 858 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.00013823401641042084, |
|
"loss": 0.6908, |
|
"step": 859 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.00013809665637949637, |
|
"loss": 0.7358, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.00013795921221359877, |
|
"loss": 0.7545, |
|
"step": 861 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.00013782168421626816, |
|
"loss": 0.7681, |
|
"step": 862 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 0.00013768407269122967, |
|
"loss": 1.026, |
|
"step": 863 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 0.000137546377942393, |
|
"loss": 0.761, |
|
"step": 864 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 0.0001374086002738516, |
|
"loss": 0.8442, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 0.00013727073998988202, |
|
"loss": 0.7959, |
|
"step": 866 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.00013713279739494333, |
|
"loss": 0.8061, |
|
"step": 867 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.00013699477279367636, |
|
"loss": 0.7434, |
|
"step": 868 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.000136856666490903, |
|
"loss": 0.7159, |
|
"step": 869 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.00013671847879162562, |
|
"loss": 0.867, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.00013658021000102636, |
|
"loss": 0.9237, |
|
"step": 871 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.0001364418604244664, |
|
"loss": 0.8545, |
|
"step": 872 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.00013630343036748535, |
|
"loss": 0.893, |
|
"step": 873 |
|
} |
|
], |
|
"max_steps": 2180, |
|
"num_train_epochs": 5, |
|
"total_flos": 236331181277184.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|