| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.16, | |
| "eval_steps": 500, | |
| "global_step": 500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.00032, | |
| "grad_norm": 0.24713319540023804, | |
| "learning_rate": 0.0, | |
| "loss": 0.8884, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.00064, | |
| "grad_norm": 0.20603826642036438, | |
| "learning_rate": 1.3333333333333334e-06, | |
| "loss": 0.8479, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.00096, | |
| "grad_norm": 0.24543610215187073, | |
| "learning_rate": 2.666666666666667e-06, | |
| "loss": 0.9853, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.00128, | |
| "grad_norm": 0.2051621973514557, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 0.8293, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.0016, | |
| "grad_norm": 0.23469021916389465, | |
| "learning_rate": 5.333333333333334e-06, | |
| "loss": 0.9811, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.00192, | |
| "grad_norm": 0.25180304050445557, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 1.039, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.00224, | |
| "grad_norm": 0.23135970532894135, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 0.8688, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.00256, | |
| "grad_norm": 0.22394850850105286, | |
| "learning_rate": 9.333333333333334e-06, | |
| "loss": 0.9214, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.00288, | |
| "grad_norm": 0.25566354393959045, | |
| "learning_rate": 1.0666666666666667e-05, | |
| "loss": 0.8667, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.0032, | |
| "grad_norm": 0.2881408631801605, | |
| "learning_rate": 1.2e-05, | |
| "loss": 0.9544, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.00352, | |
| "grad_norm": 0.22555342316627502, | |
| "learning_rate": 1.3333333333333333e-05, | |
| "loss": 0.9383, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.00384, | |
| "grad_norm": 0.23086762428283691, | |
| "learning_rate": 1.4666666666666668e-05, | |
| "loss": 0.8595, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.00416, | |
| "grad_norm": 0.2487766444683075, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 0.9469, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.00448, | |
| "grad_norm": 0.3970007598400116, | |
| "learning_rate": 1.7333333333333336e-05, | |
| "loss": 1.1923, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.0048, | |
| "grad_norm": 0.25204387307167053, | |
| "learning_rate": 1.866666666666667e-05, | |
| "loss": 0.8854, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.00512, | |
| "grad_norm": 0.23282122611999512, | |
| "learning_rate": 2e-05, | |
| "loss": 0.932, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.00544, | |
| "grad_norm": 0.2607545852661133, | |
| "learning_rate": 2.1333333333333335e-05, | |
| "loss": 0.9094, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.00576, | |
| "grad_norm": 0.21402348577976227, | |
| "learning_rate": 2.2666666666666668e-05, | |
| "loss": 0.7879, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.00608, | |
| "grad_norm": 0.23922377824783325, | |
| "learning_rate": 2.4e-05, | |
| "loss": 0.9193, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.0064, | |
| "grad_norm": 0.18835392594337463, | |
| "learning_rate": 2.5333333333333337e-05, | |
| "loss": 0.9786, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.00672, | |
| "grad_norm": 0.1775645613670349, | |
| "learning_rate": 2.6666666666666667e-05, | |
| "loss": 0.933, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.00704, | |
| "grad_norm": 0.20945614576339722, | |
| "learning_rate": 2.8000000000000003e-05, | |
| "loss": 0.8171, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.00736, | |
| "grad_norm": 0.16134795546531677, | |
| "learning_rate": 2.9333333333333336e-05, | |
| "loss": 0.9665, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.00768, | |
| "grad_norm": 0.1454283446073532, | |
| "learning_rate": 3.066666666666667e-05, | |
| "loss": 0.7238, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.008, | |
| "grad_norm": 0.15920202434062958, | |
| "learning_rate": 3.2000000000000005e-05, | |
| "loss": 0.7932, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.00832, | |
| "grad_norm": 0.16204868257045746, | |
| "learning_rate": 3.3333333333333335e-05, | |
| "loss": 0.8175, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.00864, | |
| "grad_norm": 0.15179912745952606, | |
| "learning_rate": 3.466666666666667e-05, | |
| "loss": 0.9067, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.00896, | |
| "grad_norm": 0.1287967711687088, | |
| "learning_rate": 3.6e-05, | |
| "loss": 0.8039, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.00928, | |
| "grad_norm": 0.13528791069984436, | |
| "learning_rate": 3.733333333333334e-05, | |
| "loss": 0.8892, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.0096, | |
| "grad_norm": 0.13059762120246887, | |
| "learning_rate": 3.866666666666667e-05, | |
| "loss": 0.7023, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.00992, | |
| "grad_norm": 0.13119487464427948, | |
| "learning_rate": 4e-05, | |
| "loss": 0.7607, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.01024, | |
| "grad_norm": 0.12565594911575317, | |
| "learning_rate": 4.133333333333333e-05, | |
| "loss": 0.808, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.01056, | |
| "grad_norm": 0.1259136199951172, | |
| "learning_rate": 4.266666666666667e-05, | |
| "loss": 0.8704, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.01088, | |
| "grad_norm": 0.1319507360458374, | |
| "learning_rate": 4.4000000000000006e-05, | |
| "loss": 0.6421, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.0112, | |
| "grad_norm": 0.1317446082830429, | |
| "learning_rate": 4.5333333333333335e-05, | |
| "loss": 0.688, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.01152, | |
| "grad_norm": 0.12360246479511261, | |
| "learning_rate": 4.666666666666667e-05, | |
| "loss": 0.798, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.01184, | |
| "grad_norm": 0.11974922567605972, | |
| "learning_rate": 4.8e-05, | |
| "loss": 0.829, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.01216, | |
| "grad_norm": 0.13918066024780273, | |
| "learning_rate": 4.933333333333334e-05, | |
| "loss": 0.8626, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.01248, | |
| "grad_norm": 0.11004938930273056, | |
| "learning_rate": 5.0666666666666674e-05, | |
| "loss": 0.8056, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.0128, | |
| "grad_norm": 0.10854440927505493, | |
| "learning_rate": 5.2000000000000004e-05, | |
| "loss": 0.8773, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.01312, | |
| "grad_norm": 0.10877286642789841, | |
| "learning_rate": 5.333333333333333e-05, | |
| "loss": 0.8947, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.01344, | |
| "grad_norm": 0.12767033278942108, | |
| "learning_rate": 5.466666666666666e-05, | |
| "loss": 0.7892, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.01376, | |
| "grad_norm": 0.11700518429279327, | |
| "learning_rate": 5.6000000000000006e-05, | |
| "loss": 0.8598, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.01408, | |
| "grad_norm": 0.12549364566802979, | |
| "learning_rate": 5.7333333333333336e-05, | |
| "loss": 0.8498, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.0144, | |
| "grad_norm": 0.11921705305576324, | |
| "learning_rate": 5.866666666666667e-05, | |
| "loss": 0.7979, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.01472, | |
| "grad_norm": 0.11621945351362228, | |
| "learning_rate": 6e-05, | |
| "loss": 0.9252, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.01504, | |
| "grad_norm": 0.10912485420703888, | |
| "learning_rate": 6.133333333333334e-05, | |
| "loss": 0.7674, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.01536, | |
| "grad_norm": 0.11039536446332932, | |
| "learning_rate": 6.266666666666667e-05, | |
| "loss": 0.841, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.01568, | |
| "grad_norm": 0.10852135717868805, | |
| "learning_rate": 6.400000000000001e-05, | |
| "loss": 0.7991, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.016, | |
| "grad_norm": 0.09653560072183609, | |
| "learning_rate": 6.533333333333334e-05, | |
| "loss": 0.6292, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.01632, | |
| "grad_norm": 0.11932919919490814, | |
| "learning_rate": 6.666666666666667e-05, | |
| "loss": 0.7327, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.01664, | |
| "grad_norm": 0.10563201457262039, | |
| "learning_rate": 6.800000000000001e-05, | |
| "loss": 0.7041, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.01696, | |
| "grad_norm": 0.1063474714756012, | |
| "learning_rate": 6.933333333333334e-05, | |
| "loss": 0.7953, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.01728, | |
| "grad_norm": 0.11396024376153946, | |
| "learning_rate": 7.066666666666667e-05, | |
| "loss": 0.899, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.0176, | |
| "grad_norm": 0.10861669480800629, | |
| "learning_rate": 7.2e-05, | |
| "loss": 0.9343, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.01792, | |
| "grad_norm": 0.12476427853107452, | |
| "learning_rate": 7.333333333333333e-05, | |
| "loss": 0.8145, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.01824, | |
| "grad_norm": 0.1240817978978157, | |
| "learning_rate": 7.466666666666667e-05, | |
| "loss": 0.7133, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.01856, | |
| "grad_norm": 0.10977518558502197, | |
| "learning_rate": 7.6e-05, | |
| "loss": 0.7576, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.01888, | |
| "grad_norm": 0.11463093012571335, | |
| "learning_rate": 7.733333333333333e-05, | |
| "loss": 0.8552, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.0192, | |
| "grad_norm": 0.1143040657043457, | |
| "learning_rate": 7.866666666666666e-05, | |
| "loss": 0.9198, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.01952, | |
| "grad_norm": 0.12839296460151672, | |
| "learning_rate": 8e-05, | |
| "loss": 0.6644, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.01984, | |
| "grad_norm": 0.11903133243322372, | |
| "learning_rate": 8.133333333333334e-05, | |
| "loss": 0.9053, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.02016, | |
| "grad_norm": 0.12296755611896515, | |
| "learning_rate": 8.266666666666667e-05, | |
| "loss": 0.7695, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.02048, | |
| "grad_norm": 0.11100097000598907, | |
| "learning_rate": 8.4e-05, | |
| "loss": 0.8909, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.0208, | |
| "grad_norm": 0.11848391592502594, | |
| "learning_rate": 8.533333333333334e-05, | |
| "loss": 0.8204, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.02112, | |
| "grad_norm": 0.11903996020555496, | |
| "learning_rate": 8.666666666666667e-05, | |
| "loss": 0.7778, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.02144, | |
| "grad_norm": 0.10964814573526382, | |
| "learning_rate": 8.800000000000001e-05, | |
| "loss": 0.6879, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.02176, | |
| "grad_norm": 0.11036152392625809, | |
| "learning_rate": 8.933333333333334e-05, | |
| "loss": 0.8051, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.02208, | |
| "grad_norm": 0.10888101160526276, | |
| "learning_rate": 9.066666666666667e-05, | |
| "loss": 0.759, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.0224, | |
| "grad_norm": 0.11158827692270279, | |
| "learning_rate": 9.200000000000001e-05, | |
| "loss": 0.7412, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.02272, | |
| "grad_norm": 0.12085185945034027, | |
| "learning_rate": 9.333333333333334e-05, | |
| "loss": 0.7978, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.02304, | |
| "grad_norm": 0.12325625121593475, | |
| "learning_rate": 9.466666666666667e-05, | |
| "loss": 0.7175, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.02336, | |
| "grad_norm": 0.10695890337228775, | |
| "learning_rate": 9.6e-05, | |
| "loss": 0.868, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.02368, | |
| "grad_norm": 0.11983097344636917, | |
| "learning_rate": 9.733333333333335e-05, | |
| "loss": 0.7965, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.024, | |
| "grad_norm": 0.11521439999341965, | |
| "learning_rate": 9.866666666666668e-05, | |
| "loss": 0.9305, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.02432, | |
| "grad_norm": 0.1252303123474121, | |
| "learning_rate": 0.0001, | |
| "loss": 0.6807, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.02464, | |
| "grad_norm": 0.1265154629945755, | |
| "learning_rate": 0.00010133333333333335, | |
| "loss": 0.718, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.02496, | |
| "grad_norm": 0.1109502986073494, | |
| "learning_rate": 0.00010266666666666666, | |
| "loss": 0.769, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.02528, | |
| "grad_norm": 0.11158560961484909, | |
| "learning_rate": 0.00010400000000000001, | |
| "loss": 0.8579, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.0256, | |
| "grad_norm": 0.13526469469070435, | |
| "learning_rate": 0.00010533333333333332, | |
| "loss": 0.7704, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.02592, | |
| "grad_norm": 0.12420251965522766, | |
| "learning_rate": 0.00010666666666666667, | |
| "loss": 0.673, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.02624, | |
| "grad_norm": 0.11838296055793762, | |
| "learning_rate": 0.00010800000000000001, | |
| "loss": 0.8576, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.02656, | |
| "grad_norm": 0.11958430707454681, | |
| "learning_rate": 0.00010933333333333333, | |
| "loss": 0.8521, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.02688, | |
| "grad_norm": 0.12049078941345215, | |
| "learning_rate": 0.00011066666666666667, | |
| "loss": 0.6008, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.0272, | |
| "grad_norm": 0.10999640822410583, | |
| "learning_rate": 0.00011200000000000001, | |
| "loss": 0.7351, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.02752, | |
| "grad_norm": 0.11500285565853119, | |
| "learning_rate": 0.00011333333333333334, | |
| "loss": 0.7841, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.02784, | |
| "grad_norm": 0.11917781829833984, | |
| "learning_rate": 0.00011466666666666667, | |
| "loss": 0.8275, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.02816, | |
| "grad_norm": 0.11745665222406387, | |
| "learning_rate": 0.000116, | |
| "loss": 0.6096, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.02848, | |
| "grad_norm": 0.1208786815404892, | |
| "learning_rate": 0.00011733333333333334, | |
| "loss": 0.6842, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.0288, | |
| "grad_norm": 0.12839952111244202, | |
| "learning_rate": 0.00011866666666666669, | |
| "loss": 0.7467, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.02912, | |
| "grad_norm": 0.12688350677490234, | |
| "learning_rate": 0.00012, | |
| "loss": 0.6507, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.02944, | |
| "grad_norm": 0.11489767581224442, | |
| "learning_rate": 0.00012133333333333335, | |
| "loss": 0.8403, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.02976, | |
| "grad_norm": 0.1305433064699173, | |
| "learning_rate": 0.00012266666666666668, | |
| "loss": 0.748, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.03008, | |
| "grad_norm": 0.13992320001125336, | |
| "learning_rate": 0.000124, | |
| "loss": 0.796, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.0304, | |
| "grad_norm": 0.12120400369167328, | |
| "learning_rate": 0.00012533333333333334, | |
| "loss": 0.6445, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.03072, | |
| "grad_norm": 0.13254240155220032, | |
| "learning_rate": 0.00012666666666666666, | |
| "loss": 0.7488, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.03104, | |
| "grad_norm": 0.13790103793144226, | |
| "learning_rate": 0.00012800000000000002, | |
| "loss": 0.8005, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 0.03136, | |
| "grad_norm": 0.12862953543663025, | |
| "learning_rate": 0.00012933333333333332, | |
| "loss": 0.7975, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.03168, | |
| "grad_norm": 0.14197330176830292, | |
| "learning_rate": 0.00013066666666666668, | |
| "loss": 0.9026, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 0.032, | |
| "grad_norm": 0.12494270503520966, | |
| "learning_rate": 0.000132, | |
| "loss": 0.7353, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.03232, | |
| "grad_norm": 0.12572987377643585, | |
| "learning_rate": 0.00013333333333333334, | |
| "loss": 0.9271, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 0.03264, | |
| "grad_norm": 0.1314753144979477, | |
| "learning_rate": 0.00013466666666666667, | |
| "loss": 0.7816, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.03296, | |
| "grad_norm": 0.12902331352233887, | |
| "learning_rate": 0.00013600000000000003, | |
| "loss": 0.7348, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 0.03328, | |
| "grad_norm": 0.14086449146270752, | |
| "learning_rate": 0.00013733333333333333, | |
| "loss": 0.831, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.0336, | |
| "grad_norm": 0.12464585155248642, | |
| "learning_rate": 0.00013866666666666669, | |
| "loss": 0.6945, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.03392, | |
| "grad_norm": 0.11436092108488083, | |
| "learning_rate": 0.00014, | |
| "loss": 0.8192, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 0.03424, | |
| "grad_norm": 0.11779410392045975, | |
| "learning_rate": 0.00014133333333333334, | |
| "loss": 0.7989, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 0.03456, | |
| "grad_norm": 0.14601927995681763, | |
| "learning_rate": 0.00014266666666666667, | |
| "loss": 0.6256, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.03488, | |
| "grad_norm": 0.13231013715267181, | |
| "learning_rate": 0.000144, | |
| "loss": 0.7445, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 0.0352, | |
| "grad_norm": 0.11249889433383942, | |
| "learning_rate": 0.00014533333333333333, | |
| "loss": 0.7338, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.03552, | |
| "grad_norm": 0.11275426298379898, | |
| "learning_rate": 0.00014666666666666666, | |
| "loss": 0.7993, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 0.03584, | |
| "grad_norm": 0.11678344756364822, | |
| "learning_rate": 0.000148, | |
| "loss": 0.9234, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.03616, | |
| "grad_norm": 0.10879474133253098, | |
| "learning_rate": 0.00014933333333333335, | |
| "loss": 0.8732, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 0.03648, | |
| "grad_norm": 0.13673344254493713, | |
| "learning_rate": 0.00015066666666666668, | |
| "loss": 0.8698, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 0.0368, | |
| "grad_norm": 0.12564410269260406, | |
| "learning_rate": 0.000152, | |
| "loss": 0.7913, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.03712, | |
| "grad_norm": 0.12272034585475922, | |
| "learning_rate": 0.00015333333333333334, | |
| "loss": 0.7244, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 0.03744, | |
| "grad_norm": 0.11841209977865219, | |
| "learning_rate": 0.00015466666666666667, | |
| "loss": 0.7648, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 0.03776, | |
| "grad_norm": 0.11938042938709259, | |
| "learning_rate": 0.00015600000000000002, | |
| "loss": 0.8381, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 0.03808, | |
| "grad_norm": 0.13476668298244476, | |
| "learning_rate": 0.00015733333333333333, | |
| "loss": 0.6931, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 0.0384, | |
| "grad_norm": 0.1274396926164627, | |
| "learning_rate": 0.00015866666666666668, | |
| "loss": 0.854, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.03872, | |
| "grad_norm": 0.11518494784832001, | |
| "learning_rate": 0.00016, | |
| "loss": 0.7591, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 0.03904, | |
| "grad_norm": 0.12739014625549316, | |
| "learning_rate": 0.00016133333333333334, | |
| "loss": 0.6956, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 0.03936, | |
| "grad_norm": 0.13384665548801422, | |
| "learning_rate": 0.00016266666666666667, | |
| "loss": 0.6487, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 0.03968, | |
| "grad_norm": 0.11407941579818726, | |
| "learning_rate": 0.000164, | |
| "loss": 0.6822, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 0.12307750433683395, | |
| "learning_rate": 0.00016533333333333333, | |
| "loss": 0.7038, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.04032, | |
| "grad_norm": 0.12420140206813812, | |
| "learning_rate": 0.0001666666666666667, | |
| "loss": 0.6589, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 0.04064, | |
| "grad_norm": 0.1327451914548874, | |
| "learning_rate": 0.000168, | |
| "loss": 0.6002, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 0.04096, | |
| "grad_norm": 0.1248873695731163, | |
| "learning_rate": 0.00016933333333333335, | |
| "loss": 0.8812, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.04128, | |
| "grad_norm": 0.11638613790273666, | |
| "learning_rate": 0.00017066666666666668, | |
| "loss": 0.7455, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 0.0416, | |
| "grad_norm": 0.12473749369382858, | |
| "learning_rate": 0.000172, | |
| "loss": 0.6218, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.04192, | |
| "grad_norm": 0.1339467465877533, | |
| "learning_rate": 0.00017333333333333334, | |
| "loss": 0.729, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 0.04224, | |
| "grad_norm": 0.11078035831451416, | |
| "learning_rate": 0.00017466666666666667, | |
| "loss": 0.8507, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 0.04256, | |
| "grad_norm": 0.11885277926921844, | |
| "learning_rate": 0.00017600000000000002, | |
| "loss": 0.8817, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 0.04288, | |
| "grad_norm": 0.13378757238388062, | |
| "learning_rate": 0.00017733333333333335, | |
| "loss": 0.705, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 0.0432, | |
| "grad_norm": 0.11696331948041916, | |
| "learning_rate": 0.00017866666666666668, | |
| "loss": 0.7089, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.04352, | |
| "grad_norm": 0.12444671988487244, | |
| "learning_rate": 0.00018, | |
| "loss": 0.699, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 0.04384, | |
| "grad_norm": 0.11136961728334427, | |
| "learning_rate": 0.00018133333333333334, | |
| "loss": 0.7805, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 0.04416, | |
| "grad_norm": 0.13461412489414215, | |
| "learning_rate": 0.00018266666666666667, | |
| "loss": 0.7581, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 0.04448, | |
| "grad_norm": 0.11344683915376663, | |
| "learning_rate": 0.00018400000000000003, | |
| "loss": 0.7234, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 0.0448, | |
| "grad_norm": 0.12247714400291443, | |
| "learning_rate": 0.00018533333333333333, | |
| "loss": 0.9326, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.04512, | |
| "grad_norm": 0.12991179525852203, | |
| "learning_rate": 0.0001866666666666667, | |
| "loss": 0.7644, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 0.04544, | |
| "grad_norm": 0.1182120144367218, | |
| "learning_rate": 0.000188, | |
| "loss": 0.8437, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 0.04576, | |
| "grad_norm": 0.10737155377864838, | |
| "learning_rate": 0.00018933333333333335, | |
| "loss": 0.8047, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 0.04608, | |
| "grad_norm": 0.11993440985679626, | |
| "learning_rate": 0.00019066666666666668, | |
| "loss": 0.7247, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.0464, | |
| "grad_norm": 0.11705442517995834, | |
| "learning_rate": 0.000192, | |
| "loss": 0.7104, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.04672, | |
| "grad_norm": 0.12065937370061874, | |
| "learning_rate": 0.00019333333333333333, | |
| "loss": 0.7243, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 0.04704, | |
| "grad_norm": 0.1178629919886589, | |
| "learning_rate": 0.0001946666666666667, | |
| "loss": 0.6946, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 0.04736, | |
| "grad_norm": 0.12744340300559998, | |
| "learning_rate": 0.000196, | |
| "loss": 0.6946, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 0.04768, | |
| "grad_norm": 0.14068365097045898, | |
| "learning_rate": 0.00019733333333333335, | |
| "loss": 0.7905, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 0.048, | |
| "grad_norm": 0.11114205420017242, | |
| "learning_rate": 0.00019866666666666668, | |
| "loss": 0.8352, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.04832, | |
| "grad_norm": 0.10463167726993561, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6664, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 0.04864, | |
| "grad_norm": 0.12237433344125748, | |
| "learning_rate": 0.00019995876288659794, | |
| "loss": 0.8114, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 0.04896, | |
| "grad_norm": 0.12069843709468842, | |
| "learning_rate": 0.0001999175257731959, | |
| "loss": 0.7901, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 0.04928, | |
| "grad_norm": 0.11398322135210037, | |
| "learning_rate": 0.00019987628865979383, | |
| "loss": 0.7131, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 0.0496, | |
| "grad_norm": 0.12183695286512375, | |
| "learning_rate": 0.00019983505154639176, | |
| "loss": 0.7609, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.04992, | |
| "grad_norm": 0.12869080901145935, | |
| "learning_rate": 0.0001997938144329897, | |
| "loss": 0.8103, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 0.05024, | |
| "grad_norm": 0.1119738221168518, | |
| "learning_rate": 0.00019975257731958762, | |
| "loss": 0.6731, | |
| "step": 157 | |
| }, | |
| { | |
| "epoch": 0.05056, | |
| "grad_norm": 0.12156844139099121, | |
| "learning_rate": 0.00019971134020618558, | |
| "loss": 0.7535, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 0.05088, | |
| "grad_norm": 0.12900514900684357, | |
| "learning_rate": 0.00019967010309278351, | |
| "loss": 0.7378, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 0.0512, | |
| "grad_norm": 0.12743592262268066, | |
| "learning_rate": 0.00019962886597938147, | |
| "loss": 0.6197, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.05152, | |
| "grad_norm": 0.128151997923851, | |
| "learning_rate": 0.0001995876288659794, | |
| "loss": 0.7812, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 0.05184, | |
| "grad_norm": 0.10753507912158966, | |
| "learning_rate": 0.00019954639175257733, | |
| "loss": 0.5774, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 0.05216, | |
| "grad_norm": 0.11813949793577194, | |
| "learning_rate": 0.00019950515463917527, | |
| "loss": 0.6894, | |
| "step": 163 | |
| }, | |
| { | |
| "epoch": 0.05248, | |
| "grad_norm": 0.1340881586074829, | |
| "learning_rate": 0.0001994639175257732, | |
| "loss": 0.7193, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 0.0528, | |
| "grad_norm": 0.10451763868331909, | |
| "learning_rate": 0.00019942268041237116, | |
| "loss": 0.572, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.05312, | |
| "grad_norm": 0.10894517600536346, | |
| "learning_rate": 0.0001993814432989691, | |
| "loss": 0.7693, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 0.05344, | |
| "grad_norm": 0.1243942379951477, | |
| "learning_rate": 0.00019934020618556702, | |
| "loss": 0.7805, | |
| "step": 167 | |
| }, | |
| { | |
| "epoch": 0.05376, | |
| "grad_norm": 0.14128854870796204, | |
| "learning_rate": 0.00019929896907216498, | |
| "loss": 0.8595, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 0.05408, | |
| "grad_norm": 0.12148380279541016, | |
| "learning_rate": 0.00019925773195876288, | |
| "loss": 0.8954, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 0.0544, | |
| "grad_norm": 0.10860492289066315, | |
| "learning_rate": 0.0001992164948453608, | |
| "loss": 0.8101, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.05472, | |
| "grad_norm": 0.12292741239070892, | |
| "learning_rate": 0.00019917525773195877, | |
| "loss": 0.7965, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 0.05504, | |
| "grad_norm": 0.13840395212173462, | |
| "learning_rate": 0.0001991340206185567, | |
| "loss": 0.7231, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 0.05536, | |
| "grad_norm": 0.11291699856519699, | |
| "learning_rate": 0.00019909278350515466, | |
| "loss": 0.8247, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 0.05568, | |
| "grad_norm": 0.11811841279268265, | |
| "learning_rate": 0.0001990515463917526, | |
| "loss": 0.8504, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 0.056, | |
| "grad_norm": 0.11040613055229187, | |
| "learning_rate": 0.00019901030927835052, | |
| "loss": 0.6464, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.05632, | |
| "grad_norm": 0.10370033234357834, | |
| "learning_rate": 0.00019896907216494845, | |
| "loss": 0.8016, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 0.05664, | |
| "grad_norm": 0.11868111789226532, | |
| "learning_rate": 0.00019892783505154639, | |
| "loss": 0.7267, | |
| "step": 177 | |
| }, | |
| { | |
| "epoch": 0.05696, | |
| "grad_norm": 0.11893659085035324, | |
| "learning_rate": 0.00019888659793814434, | |
| "loss": 0.814, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 0.05728, | |
| "grad_norm": 0.11829444020986557, | |
| "learning_rate": 0.00019884536082474227, | |
| "loss": 0.7852, | |
| "step": 179 | |
| }, | |
| { | |
| "epoch": 0.0576, | |
| "grad_norm": 0.10493418574333191, | |
| "learning_rate": 0.0001988041237113402, | |
| "loss": 0.6154, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.05792, | |
| "grad_norm": 0.10895421355962753, | |
| "learning_rate": 0.00019876288659793816, | |
| "loss": 0.8663, | |
| "step": 181 | |
| }, | |
| { | |
| "epoch": 0.05824, | |
| "grad_norm": 0.11782484501600266, | |
| "learning_rate": 0.0001987216494845361, | |
| "loss": 0.7902, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 0.05856, | |
| "grad_norm": 0.12639066576957703, | |
| "learning_rate": 0.00019868041237113403, | |
| "loss": 0.7381, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 0.05888, | |
| "grad_norm": 0.1321602165699005, | |
| "learning_rate": 0.00019863917525773196, | |
| "loss": 0.9283, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 0.0592, | |
| "grad_norm": 0.12468945235013962, | |
| "learning_rate": 0.0001985979381443299, | |
| "loss": 0.6641, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.05952, | |
| "grad_norm": 0.12850762903690338, | |
| "learning_rate": 0.00019855670103092785, | |
| "loss": 0.8694, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 0.05984, | |
| "grad_norm": 0.10586417466402054, | |
| "learning_rate": 0.00019851546391752578, | |
| "loss": 0.806, | |
| "step": 187 | |
| }, | |
| { | |
| "epoch": 0.06016, | |
| "grad_norm": 0.10202761739492416, | |
| "learning_rate": 0.00019847422680412374, | |
| "loss": 0.684, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 0.06048, | |
| "grad_norm": 0.11566773056983948, | |
| "learning_rate": 0.00019843298969072167, | |
| "loss": 0.6737, | |
| "step": 189 | |
| }, | |
| { | |
| "epoch": 0.0608, | |
| "grad_norm": 0.12295495718717575, | |
| "learning_rate": 0.0001983917525773196, | |
| "loss": 0.6539, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.06112, | |
| "grad_norm": 0.14126336574554443, | |
| "learning_rate": 0.00019835051546391753, | |
| "loss": 0.8539, | |
| "step": 191 | |
| }, | |
| { | |
| "epoch": 0.06144, | |
| "grad_norm": 0.1215134933590889, | |
| "learning_rate": 0.00019830927835051546, | |
| "loss": 0.7051, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 0.06176, | |
| "grad_norm": 0.10785145312547684, | |
| "learning_rate": 0.00019826804123711342, | |
| "loss": 0.7877, | |
| "step": 193 | |
| }, | |
| { | |
| "epoch": 0.06208, | |
| "grad_norm": 0.1299588680267334, | |
| "learning_rate": 0.00019822680412371135, | |
| "loss": 0.7942, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 0.0624, | |
| "grad_norm": 0.1072564497590065, | |
| "learning_rate": 0.00019818556701030928, | |
| "loss": 0.7055, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.06272, | |
| "grad_norm": 0.12069325149059296, | |
| "learning_rate": 0.00019814432989690724, | |
| "loss": 0.9036, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 0.06304, | |
| "grad_norm": 0.14454863965511322, | |
| "learning_rate": 0.00019810309278350517, | |
| "loss": 0.7567, | |
| "step": 197 | |
| }, | |
| { | |
| "epoch": 0.06336, | |
| "grad_norm": 0.11882466822862625, | |
| "learning_rate": 0.0001980618556701031, | |
| "loss": 0.5986, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 0.06368, | |
| "grad_norm": 0.12222633510828018, | |
| "learning_rate": 0.00019802061855670104, | |
| "loss": 0.8493, | |
| "step": 199 | |
| }, | |
| { | |
| "epoch": 0.064, | |
| "grad_norm": 0.10818106681108475, | |
| "learning_rate": 0.00019797938144329897, | |
| "loss": 0.7932, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.06432, | |
| "grad_norm": 0.1225774884223938, | |
| "learning_rate": 0.00019793814432989693, | |
| "loss": 0.7501, | |
| "step": 201 | |
| }, | |
| { | |
| "epoch": 0.06464, | |
| "grad_norm": 0.13827428221702576, | |
| "learning_rate": 0.00019789690721649486, | |
| "loss": 0.818, | |
| "step": 202 | |
| }, | |
| { | |
| "epoch": 0.06496, | |
| "grad_norm": 0.1145445853471756, | |
| "learning_rate": 0.0001978556701030928, | |
| "loss": 0.6433, | |
| "step": 203 | |
| }, | |
| { | |
| "epoch": 0.06528, | |
| "grad_norm": 0.11928769201040268, | |
| "learning_rate": 0.00019781443298969075, | |
| "loss": 0.7352, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 0.0656, | |
| "grad_norm": 0.11696071177721024, | |
| "learning_rate": 0.00019777319587628865, | |
| "loss": 0.8499, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.06592, | |
| "grad_norm": 0.1281200349330902, | |
| "learning_rate": 0.0001977319587628866, | |
| "loss": 0.7526, | |
| "step": 206 | |
| }, | |
| { | |
| "epoch": 0.06624, | |
| "grad_norm": 0.1270955204963684, | |
| "learning_rate": 0.00019769072164948454, | |
| "loss": 0.8456, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 0.06656, | |
| "grad_norm": 0.12278109043836594, | |
| "learning_rate": 0.00019764948453608247, | |
| "loss": 0.5972, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 0.06688, | |
| "grad_norm": 0.10781408101320267, | |
| "learning_rate": 0.00019760824742268043, | |
| "loss": 0.5967, | |
| "step": 209 | |
| }, | |
| { | |
| "epoch": 0.0672, | |
| "grad_norm": 0.12237494438886642, | |
| "learning_rate": 0.00019756701030927836, | |
| "loss": 0.7306, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.06752, | |
| "grad_norm": 0.1100955531001091, | |
| "learning_rate": 0.00019752577319587632, | |
| "loss": 0.7579, | |
| "step": 211 | |
| }, | |
| { | |
| "epoch": 0.06784, | |
| "grad_norm": 0.11036559194326401, | |
| "learning_rate": 0.00019748453608247422, | |
| "loss": 0.6668, | |
| "step": 212 | |
| }, | |
| { | |
| "epoch": 0.06816, | |
| "grad_norm": 0.14552150666713715, | |
| "learning_rate": 0.00019744329896907216, | |
| "loss": 0.6668, | |
| "step": 213 | |
| }, | |
| { | |
| "epoch": 0.06848, | |
| "grad_norm": 0.11125028133392334, | |
| "learning_rate": 0.0001974020618556701, | |
| "loss": 0.7342, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 0.0688, | |
| "grad_norm": 0.14329232275485992, | |
| "learning_rate": 0.00019736082474226804, | |
| "loss": 0.8708, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.06912, | |
| "grad_norm": 0.11911016702651978, | |
| "learning_rate": 0.000197319587628866, | |
| "loss": 0.7224, | |
| "step": 216 | |
| }, | |
| { | |
| "epoch": 0.06944, | |
| "grad_norm": 0.12713277339935303, | |
| "learning_rate": 0.00019727835051546393, | |
| "loss": 0.7222, | |
| "step": 217 | |
| }, | |
| { | |
| "epoch": 0.06976, | |
| "grad_norm": 0.1140274852514267, | |
| "learning_rate": 0.00019723711340206187, | |
| "loss": 0.627, | |
| "step": 218 | |
| }, | |
| { | |
| "epoch": 0.07008, | |
| "grad_norm": 0.1070765033364296, | |
| "learning_rate": 0.0001971958762886598, | |
| "loss": 0.7514, | |
| "step": 219 | |
| }, | |
| { | |
| "epoch": 0.0704, | |
| "grad_norm": 0.09937503188848495, | |
| "learning_rate": 0.00019715463917525773, | |
| "loss": 0.7539, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.07072, | |
| "grad_norm": 0.12137589603662491, | |
| "learning_rate": 0.0001971134020618557, | |
| "loss": 0.68, | |
| "step": 221 | |
| }, | |
| { | |
| "epoch": 0.07104, | |
| "grad_norm": 0.11091487109661102, | |
| "learning_rate": 0.00019707216494845362, | |
| "loss": 0.6904, | |
| "step": 222 | |
| }, | |
| { | |
| "epoch": 0.07136, | |
| "grad_norm": 0.10996535420417786, | |
| "learning_rate": 0.00019703092783505155, | |
| "loss": 0.6854, | |
| "step": 223 | |
| }, | |
| { | |
| "epoch": 0.07168, | |
| "grad_norm": 0.129950150847435, | |
| "learning_rate": 0.0001969896907216495, | |
| "loss": 0.7023, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 0.072, | |
| "grad_norm": 0.13028115034103394, | |
| "learning_rate": 0.00019694845360824744, | |
| "loss": 0.6479, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.07232, | |
| "grad_norm": 0.1166912168264389, | |
| "learning_rate": 0.00019690721649484537, | |
| "loss": 0.6498, | |
| "step": 226 | |
| }, | |
| { | |
| "epoch": 0.07264, | |
| "grad_norm": 0.1148250624537468, | |
| "learning_rate": 0.0001968659793814433, | |
| "loss": 0.7461, | |
| "step": 227 | |
| }, | |
| { | |
| "epoch": 0.07296, | |
| "grad_norm": 0.12639757990837097, | |
| "learning_rate": 0.00019682474226804123, | |
| "loss": 0.712, | |
| "step": 228 | |
| }, | |
| { | |
| "epoch": 0.07328, | |
| "grad_norm": 0.13229860365390778, | |
| "learning_rate": 0.0001967835051546392, | |
| "loss": 0.7447, | |
| "step": 229 | |
| }, | |
| { | |
| "epoch": 0.0736, | |
| "grad_norm": 0.11198769509792328, | |
| "learning_rate": 0.00019674226804123712, | |
| "loss": 0.7329, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.07392, | |
| "grad_norm": 0.11425165086984634, | |
| "learning_rate": 0.00019670103092783505, | |
| "loss": 0.868, | |
| "step": 231 | |
| }, | |
| { | |
| "epoch": 0.07424, | |
| "grad_norm": 0.12034732848405838, | |
| "learning_rate": 0.000196659793814433, | |
| "loss": 0.7276, | |
| "step": 232 | |
| }, | |
| { | |
| "epoch": 0.07456, | |
| "grad_norm": 0.10866102576255798, | |
| "learning_rate": 0.00019661855670103094, | |
| "loss": 0.7338, | |
| "step": 233 | |
| }, | |
| { | |
| "epoch": 0.07488, | |
| "grad_norm": 0.12432979047298431, | |
| "learning_rate": 0.00019657731958762887, | |
| "loss": 0.8257, | |
| "step": 234 | |
| }, | |
| { | |
| "epoch": 0.0752, | |
| "grad_norm": 0.12356464564800262, | |
| "learning_rate": 0.0001965360824742268, | |
| "loss": 0.6813, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.07552, | |
| "grad_norm": 0.1002034917473793, | |
| "learning_rate": 0.00019649484536082474, | |
| "loss": 0.7406, | |
| "step": 236 | |
| }, | |
| { | |
| "epoch": 0.07584, | |
| "grad_norm": 0.11528842151165009, | |
| "learning_rate": 0.0001964536082474227, | |
| "loss": 0.7597, | |
| "step": 237 | |
| }, | |
| { | |
| "epoch": 0.07616, | |
| "grad_norm": 0.11711521446704865, | |
| "learning_rate": 0.00019641237113402063, | |
| "loss": 0.6925, | |
| "step": 238 | |
| }, | |
| { | |
| "epoch": 0.07648, | |
| "grad_norm": 0.09948590397834778, | |
| "learning_rate": 0.00019637113402061859, | |
| "loss": 0.8017, | |
| "step": 239 | |
| }, | |
| { | |
| "epoch": 0.0768, | |
| "grad_norm": 0.11264682561159134, | |
| "learning_rate": 0.00019632989690721652, | |
| "loss": 0.6871, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.07712, | |
| "grad_norm": 0.12238943576812744, | |
| "learning_rate": 0.00019628865979381442, | |
| "loss": 0.7113, | |
| "step": 241 | |
| }, | |
| { | |
| "epoch": 0.07744, | |
| "grad_norm": 0.11449657380580902, | |
| "learning_rate": 0.00019624742268041238, | |
| "loss": 0.7144, | |
| "step": 242 | |
| }, | |
| { | |
| "epoch": 0.07776, | |
| "grad_norm": 0.12212596088647842, | |
| "learning_rate": 0.0001962061855670103, | |
| "loss": 0.697, | |
| "step": 243 | |
| }, | |
| { | |
| "epoch": 0.07808, | |
| "grad_norm": 0.12231374531984329, | |
| "learning_rate": 0.00019616494845360827, | |
| "loss": 0.6955, | |
| "step": 244 | |
| }, | |
| { | |
| "epoch": 0.0784, | |
| "grad_norm": 0.11105109006166458, | |
| "learning_rate": 0.0001961237113402062, | |
| "loss": 0.8251, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.07872, | |
| "grad_norm": 0.12369677424430847, | |
| "learning_rate": 0.00019608247422680413, | |
| "loss": 0.8814, | |
| "step": 246 | |
| }, | |
| { | |
| "epoch": 0.07904, | |
| "grad_norm": 0.12226764857769012, | |
| "learning_rate": 0.0001960412371134021, | |
| "loss": 0.7408, | |
| "step": 247 | |
| }, | |
| { | |
| "epoch": 0.07936, | |
| "grad_norm": 0.10316195338964462, | |
| "learning_rate": 0.000196, | |
| "loss": 0.792, | |
| "step": 248 | |
| }, | |
| { | |
| "epoch": 0.07968, | |
| "grad_norm": 0.12036872655153275, | |
| "learning_rate": 0.00019595876288659795, | |
| "loss": 0.6, | |
| "step": 249 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 0.1140509843826294, | |
| "learning_rate": 0.00019591752577319588, | |
| "loss": 0.7599, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.08032, | |
| "grad_norm": 0.14200666546821594, | |
| "learning_rate": 0.00019587628865979381, | |
| "loss": 0.7723, | |
| "step": 251 | |
| }, | |
| { | |
| "epoch": 0.08064, | |
| "grad_norm": 0.11529050767421722, | |
| "learning_rate": 0.00019583505154639177, | |
| "loss": 0.8305, | |
| "step": 252 | |
| }, | |
| { | |
| "epoch": 0.08096, | |
| "grad_norm": 0.11324192583560944, | |
| "learning_rate": 0.0001957938144329897, | |
| "loss": 0.7913, | |
| "step": 253 | |
| }, | |
| { | |
| "epoch": 0.08128, | |
| "grad_norm": 0.12280263006687164, | |
| "learning_rate": 0.00019575257731958764, | |
| "loss": 0.8388, | |
| "step": 254 | |
| }, | |
| { | |
| "epoch": 0.0816, | |
| "grad_norm": 0.10810542851686478, | |
| "learning_rate": 0.00019571134020618557, | |
| "loss": 0.7025, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.08192, | |
| "grad_norm": 0.11314940452575684, | |
| "learning_rate": 0.0001956701030927835, | |
| "loss": 0.6696, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 0.08224, | |
| "grad_norm": 0.10368701070547104, | |
| "learning_rate": 0.00019562886597938146, | |
| "loss": 0.581, | |
| "step": 257 | |
| }, | |
| { | |
| "epoch": 0.08256, | |
| "grad_norm": 0.11146122217178345, | |
| "learning_rate": 0.0001955876288659794, | |
| "loss": 0.8221, | |
| "step": 258 | |
| }, | |
| { | |
| "epoch": 0.08288, | |
| "grad_norm": 0.11592487245798111, | |
| "learning_rate": 0.00019554639175257732, | |
| "loss": 0.8082, | |
| "step": 259 | |
| }, | |
| { | |
| "epoch": 0.0832, | |
| "grad_norm": 0.11007897555828094, | |
| "learning_rate": 0.00019550515463917528, | |
| "loss": 0.8943, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.08352, | |
| "grad_norm": 0.11887793987989426, | |
| "learning_rate": 0.0001954639175257732, | |
| "loss": 0.7447, | |
| "step": 261 | |
| }, | |
| { | |
| "epoch": 0.08384, | |
| "grad_norm": 0.10715821385383606, | |
| "learning_rate": 0.00019542268041237114, | |
| "loss": 0.6405, | |
| "step": 262 | |
| }, | |
| { | |
| "epoch": 0.08416, | |
| "grad_norm": 0.11692032217979431, | |
| "learning_rate": 0.00019538144329896907, | |
| "loss": 0.863, | |
| "step": 263 | |
| }, | |
| { | |
| "epoch": 0.08448, | |
| "grad_norm": 0.11917733401060104, | |
| "learning_rate": 0.000195340206185567, | |
| "loss": 0.6976, | |
| "step": 264 | |
| }, | |
| { | |
| "epoch": 0.0848, | |
| "grad_norm": 0.1289709061384201, | |
| "learning_rate": 0.00019529896907216496, | |
| "loss": 0.9149, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.08512, | |
| "grad_norm": 0.10539647191762924, | |
| "learning_rate": 0.0001952577319587629, | |
| "loss": 0.8449, | |
| "step": 266 | |
| }, | |
| { | |
| "epoch": 0.08544, | |
| "grad_norm": 0.10908807814121246, | |
| "learning_rate": 0.00019521649484536085, | |
| "loss": 0.7162, | |
| "step": 267 | |
| }, | |
| { | |
| "epoch": 0.08576, | |
| "grad_norm": 0.10654058307409286, | |
| "learning_rate": 0.00019517525773195878, | |
| "loss": 0.6285, | |
| "step": 268 | |
| }, | |
| { | |
| "epoch": 0.08608, | |
| "grad_norm": 0.111301951110363, | |
| "learning_rate": 0.0001951340206185567, | |
| "loss": 0.755, | |
| "step": 269 | |
| }, | |
| { | |
| "epoch": 0.0864, | |
| "grad_norm": 0.10679474472999573, | |
| "learning_rate": 0.00019509278350515464, | |
| "loss": 0.7782, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.08672, | |
| "grad_norm": 0.10793033987283707, | |
| "learning_rate": 0.00019505154639175258, | |
| "loss": 0.7021, | |
| "step": 271 | |
| }, | |
| { | |
| "epoch": 0.08704, | |
| "grad_norm": 0.13048402965068817, | |
| "learning_rate": 0.00019501030927835053, | |
| "loss": 0.7575, | |
| "step": 272 | |
| }, | |
| { | |
| "epoch": 0.08736, | |
| "grad_norm": 0.11437942832708359, | |
| "learning_rate": 0.00019496907216494847, | |
| "loss": 0.7925, | |
| "step": 273 | |
| }, | |
| { | |
| "epoch": 0.08768, | |
| "grad_norm": 0.1390543431043625, | |
| "learning_rate": 0.0001949278350515464, | |
| "loss": 0.8013, | |
| "step": 274 | |
| }, | |
| { | |
| "epoch": 0.088, | |
| "grad_norm": 0.1289997398853302, | |
| "learning_rate": 0.00019488659793814435, | |
| "loss": 0.6053, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.08832, | |
| "grad_norm": 0.09682965278625488, | |
| "learning_rate": 0.00019484536082474229, | |
| "loss": 0.8756, | |
| "step": 276 | |
| }, | |
| { | |
| "epoch": 0.08864, | |
| "grad_norm": 0.12752555310726166, | |
| "learning_rate": 0.00019480412371134022, | |
| "loss": 0.6314, | |
| "step": 277 | |
| }, | |
| { | |
| "epoch": 0.08896, | |
| "grad_norm": 0.11673011630773544, | |
| "learning_rate": 0.00019476288659793815, | |
| "loss": 0.8842, | |
| "step": 278 | |
| }, | |
| { | |
| "epoch": 0.08928, | |
| "grad_norm": 0.108588308095932, | |
| "learning_rate": 0.00019472164948453608, | |
| "loss": 0.6348, | |
| "step": 279 | |
| }, | |
| { | |
| "epoch": 0.0896, | |
| "grad_norm": 0.1195591390132904, | |
| "learning_rate": 0.00019468041237113404, | |
| "loss": 0.8097, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.08992, | |
| "grad_norm": 0.11936333775520325, | |
| "learning_rate": 0.00019463917525773197, | |
| "loss": 0.7858, | |
| "step": 281 | |
| }, | |
| { | |
| "epoch": 0.09024, | |
| "grad_norm": 0.11627551168203354, | |
| "learning_rate": 0.00019459793814432993, | |
| "loss": 0.7322, | |
| "step": 282 | |
| }, | |
| { | |
| "epoch": 0.09056, | |
| "grad_norm": 0.10832615196704865, | |
| "learning_rate": 0.00019455670103092786, | |
| "loss": 0.6215, | |
| "step": 283 | |
| }, | |
| { | |
| "epoch": 0.09088, | |
| "grad_norm": 0.11125192046165466, | |
| "learning_rate": 0.00019451546391752576, | |
| "loss": 0.8058, | |
| "step": 284 | |
| }, | |
| { | |
| "epoch": 0.0912, | |
| "grad_norm": 0.11165905743837357, | |
| "learning_rate": 0.00019447422680412372, | |
| "loss": 0.7104, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.09152, | |
| "grad_norm": 0.10910595208406448, | |
| "learning_rate": 0.00019443298969072165, | |
| "loss": 0.707, | |
| "step": 286 | |
| }, | |
| { | |
| "epoch": 0.09184, | |
| "grad_norm": 0.11235277354717255, | |
| "learning_rate": 0.0001943917525773196, | |
| "loss": 0.8161, | |
| "step": 287 | |
| }, | |
| { | |
| "epoch": 0.09216, | |
| "grad_norm": 0.11668789386749268, | |
| "learning_rate": 0.00019435051546391754, | |
| "loss": 0.6641, | |
| "step": 288 | |
| }, | |
| { | |
| "epoch": 0.09248, | |
| "grad_norm": 0.11274772882461548, | |
| "learning_rate": 0.00019430927835051547, | |
| "loss": 0.6987, | |
| "step": 289 | |
| }, | |
| { | |
| "epoch": 0.0928, | |
| "grad_norm": 0.12356577813625336, | |
| "learning_rate": 0.0001942680412371134, | |
| "loss": 0.7051, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.09312, | |
| "grad_norm": 0.0987682193517685, | |
| "learning_rate": 0.00019422680412371134, | |
| "loss": 0.7218, | |
| "step": 291 | |
| }, | |
| { | |
| "epoch": 0.09344, | |
| "grad_norm": 0.12354995310306549, | |
| "learning_rate": 0.00019418556701030927, | |
| "loss": 0.7379, | |
| "step": 292 | |
| }, | |
| { | |
| "epoch": 0.09376, | |
| "grad_norm": 0.1022811159491539, | |
| "learning_rate": 0.00019414432989690723, | |
| "loss": 0.65, | |
| "step": 293 | |
| }, | |
| { | |
| "epoch": 0.09408, | |
| "grad_norm": 0.11507881432771683, | |
| "learning_rate": 0.00019410309278350516, | |
| "loss": 0.8681, | |
| "step": 294 | |
| }, | |
| { | |
| "epoch": 0.0944, | |
| "grad_norm": 0.12279893457889557, | |
| "learning_rate": 0.00019406185567010312, | |
| "loss": 0.6837, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.09472, | |
| "grad_norm": 0.10548900812864304, | |
| "learning_rate": 0.00019402061855670105, | |
| "loss": 0.669, | |
| "step": 296 | |
| }, | |
| { | |
| "epoch": 0.09504, | |
| "grad_norm": 0.1302396059036255, | |
| "learning_rate": 0.00019397938144329898, | |
| "loss": 0.7969, | |
| "step": 297 | |
| }, | |
| { | |
| "epoch": 0.09536, | |
| "grad_norm": 0.11492603272199631, | |
| "learning_rate": 0.0001939381443298969, | |
| "loss": 1.0184, | |
| "step": 298 | |
| }, | |
| { | |
| "epoch": 0.09568, | |
| "grad_norm": 0.131149023771286, | |
| "learning_rate": 0.00019389690721649484, | |
| "loss": 0.8521, | |
| "step": 299 | |
| }, | |
| { | |
| "epoch": 0.096, | |
| "grad_norm": 0.1252460777759552, | |
| "learning_rate": 0.0001938556701030928, | |
| "loss": 0.8791, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.09632, | |
| "grad_norm": 0.11322636157274246, | |
| "learning_rate": 0.00019381443298969073, | |
| "loss": 0.7305, | |
| "step": 301 | |
| }, | |
| { | |
| "epoch": 0.09664, | |
| "grad_norm": 0.10373307764530182, | |
| "learning_rate": 0.00019377319587628866, | |
| "loss": 0.7844, | |
| "step": 302 | |
| }, | |
| { | |
| "epoch": 0.09696, | |
| "grad_norm": 0.13182489573955536, | |
| "learning_rate": 0.00019373195876288662, | |
| "loss": 0.6581, | |
| "step": 303 | |
| }, | |
| { | |
| "epoch": 0.09728, | |
| "grad_norm": 0.1276710480451584, | |
| "learning_rate": 0.00019369072164948455, | |
| "loss": 0.6664, | |
| "step": 304 | |
| }, | |
| { | |
| "epoch": 0.0976, | |
| "grad_norm": 0.10770755261182785, | |
| "learning_rate": 0.00019364948453608248, | |
| "loss": 0.6315, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.09792, | |
| "grad_norm": 0.11755330115556717, | |
| "learning_rate": 0.00019360824742268041, | |
| "loss": 0.7562, | |
| "step": 306 | |
| }, | |
| { | |
| "epoch": 0.09824, | |
| "grad_norm": 0.1252545267343521, | |
| "learning_rate": 0.00019356701030927835, | |
| "loss": 0.6606, | |
| "step": 307 | |
| }, | |
| { | |
| "epoch": 0.09856, | |
| "grad_norm": 0.12284315377473831, | |
| "learning_rate": 0.0001935257731958763, | |
| "loss": 0.8037, | |
| "step": 308 | |
| }, | |
| { | |
| "epoch": 0.09888, | |
| "grad_norm": 0.106113962829113, | |
| "learning_rate": 0.00019348453608247424, | |
| "loss": 0.6797, | |
| "step": 309 | |
| }, | |
| { | |
| "epoch": 0.0992, | |
| "grad_norm": 0.09285733103752136, | |
| "learning_rate": 0.0001934432989690722, | |
| "loss": 0.6927, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.09952, | |
| "grad_norm": 0.10903103649616241, | |
| "learning_rate": 0.00019340206185567012, | |
| "loss": 0.7863, | |
| "step": 311 | |
| }, | |
| { | |
| "epoch": 0.09984, | |
| "grad_norm": 0.12015190720558167, | |
| "learning_rate": 0.00019336082474226806, | |
| "loss": 0.6961, | |
| "step": 312 | |
| }, | |
| { | |
| "epoch": 0.10016, | |
| "grad_norm": 0.12260061502456665, | |
| "learning_rate": 0.000193319587628866, | |
| "loss": 0.7965, | |
| "step": 313 | |
| }, | |
| { | |
| "epoch": 0.10048, | |
| "grad_norm": 0.10297476500272751, | |
| "learning_rate": 0.00019327835051546392, | |
| "loss": 0.6935, | |
| "step": 314 | |
| }, | |
| { | |
| "epoch": 0.1008, | |
| "grad_norm": 0.12614290416240692, | |
| "learning_rate": 0.00019323711340206188, | |
| "loss": 0.8751, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.10112, | |
| "grad_norm": 0.10241006314754486, | |
| "learning_rate": 0.0001931958762886598, | |
| "loss": 0.5642, | |
| "step": 316 | |
| }, | |
| { | |
| "epoch": 0.10144, | |
| "grad_norm": 0.1053333505988121, | |
| "learning_rate": 0.00019315463917525774, | |
| "loss": 0.6425, | |
| "step": 317 | |
| }, | |
| { | |
| "epoch": 0.10176, | |
| "grad_norm": 0.1024412289261818, | |
| "learning_rate": 0.0001931134020618557, | |
| "loss": 0.6414, | |
| "step": 318 | |
| }, | |
| { | |
| "epoch": 0.10208, | |
| "grad_norm": 0.10628654062747955, | |
| "learning_rate": 0.0001930721649484536, | |
| "loss": 0.8259, | |
| "step": 319 | |
| }, | |
| { | |
| "epoch": 0.1024, | |
| "grad_norm": 0.1291695237159729, | |
| "learning_rate": 0.00019303092783505153, | |
| "loss": 0.6887, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.10272, | |
| "grad_norm": 0.11315542459487915, | |
| "learning_rate": 0.0001929896907216495, | |
| "loss": 0.6316, | |
| "step": 321 | |
| }, | |
| { | |
| "epoch": 0.10304, | |
| "grad_norm": 0.11541493982076645, | |
| "learning_rate": 0.00019294845360824742, | |
| "loss": 0.8023, | |
| "step": 322 | |
| }, | |
| { | |
| "epoch": 0.10336, | |
| "grad_norm": 0.09769251197576523, | |
| "learning_rate": 0.00019290721649484538, | |
| "loss": 0.674, | |
| "step": 323 | |
| }, | |
| { | |
| "epoch": 0.10368, | |
| "grad_norm": 0.10549357533454895, | |
| "learning_rate": 0.0001928659793814433, | |
| "loss": 0.7102, | |
| "step": 324 | |
| }, | |
| { | |
| "epoch": 0.104, | |
| "grad_norm": 0.1344941407442093, | |
| "learning_rate": 0.00019282474226804124, | |
| "loss": 0.8861, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.10432, | |
| "grad_norm": 0.13625279068946838, | |
| "learning_rate": 0.00019278350515463918, | |
| "loss": 0.8333, | |
| "step": 326 | |
| }, | |
| { | |
| "epoch": 0.10464, | |
| "grad_norm": 0.1136624813079834, | |
| "learning_rate": 0.0001927422680412371, | |
| "loss": 0.7187, | |
| "step": 327 | |
| }, | |
| { | |
| "epoch": 0.10496, | |
| "grad_norm": 0.12351350486278534, | |
| "learning_rate": 0.00019270103092783506, | |
| "loss": 0.6361, | |
| "step": 328 | |
| }, | |
| { | |
| "epoch": 0.10528, | |
| "grad_norm": 0.12927868962287903, | |
| "learning_rate": 0.000192659793814433, | |
| "loss": 0.6716, | |
| "step": 329 | |
| }, | |
| { | |
| "epoch": 0.1056, | |
| "grad_norm": 0.11619272083044052, | |
| "learning_rate": 0.00019261855670103093, | |
| "loss": 0.8108, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.10592, | |
| "grad_norm": 0.12765595316886902, | |
| "learning_rate": 0.00019257731958762889, | |
| "loss": 0.77, | |
| "step": 331 | |
| }, | |
| { | |
| "epoch": 0.10624, | |
| "grad_norm": 0.13606730103492737, | |
| "learning_rate": 0.00019253608247422682, | |
| "loss": 0.7572, | |
| "step": 332 | |
| }, | |
| { | |
| "epoch": 0.10656, | |
| "grad_norm": 0.1074293777346611, | |
| "learning_rate": 0.00019249484536082475, | |
| "loss": 0.8137, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 0.10688, | |
| "grad_norm": 0.12371189147233963, | |
| "learning_rate": 0.00019245360824742268, | |
| "loss": 0.7414, | |
| "step": 334 | |
| }, | |
| { | |
| "epoch": 0.1072, | |
| "grad_norm": 0.1253589391708374, | |
| "learning_rate": 0.0001924123711340206, | |
| "loss": 0.6796, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.10752, | |
| "grad_norm": 0.11865826696157455, | |
| "learning_rate": 0.00019237113402061857, | |
| "loss": 0.7051, | |
| "step": 336 | |
| }, | |
| { | |
| "epoch": 0.10784, | |
| "grad_norm": 0.12180513888597488, | |
| "learning_rate": 0.0001923298969072165, | |
| "loss": 0.8126, | |
| "step": 337 | |
| }, | |
| { | |
| "epoch": 0.10816, | |
| "grad_norm": 0.11744590848684311, | |
| "learning_rate": 0.00019228865979381446, | |
| "loss": 0.7553, | |
| "step": 338 | |
| }, | |
| { | |
| "epoch": 0.10848, | |
| "grad_norm": 0.11796604096889496, | |
| "learning_rate": 0.0001922474226804124, | |
| "loss": 0.814, | |
| "step": 339 | |
| }, | |
| { | |
| "epoch": 0.1088, | |
| "grad_norm": 0.125954270362854, | |
| "learning_rate": 0.00019220618556701032, | |
| "loss": 0.64, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.10912, | |
| "grad_norm": 0.12020507454872131, | |
| "learning_rate": 0.00019216494845360825, | |
| "loss": 0.6175, | |
| "step": 341 | |
| }, | |
| { | |
| "epoch": 0.10944, | |
| "grad_norm": 0.10076280683279037, | |
| "learning_rate": 0.00019212371134020618, | |
| "loss": 0.6176, | |
| "step": 342 | |
| }, | |
| { | |
| "epoch": 0.10976, | |
| "grad_norm": 0.11676807701587677, | |
| "learning_rate": 0.00019208247422680414, | |
| "loss": 0.677, | |
| "step": 343 | |
| }, | |
| { | |
| "epoch": 0.11008, | |
| "grad_norm": 0.11364617198705673, | |
| "learning_rate": 0.00019204123711340207, | |
| "loss": 0.5619, | |
| "step": 344 | |
| }, | |
| { | |
| "epoch": 0.1104, | |
| "grad_norm": 0.10261007398366928, | |
| "learning_rate": 0.000192, | |
| "loss": 0.614, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.11072, | |
| "grad_norm": 0.09645744413137436, | |
| "learning_rate": 0.00019195876288659796, | |
| "loss": 0.7274, | |
| "step": 346 | |
| }, | |
| { | |
| "epoch": 0.11104, | |
| "grad_norm": 0.12451738864183426, | |
| "learning_rate": 0.0001919175257731959, | |
| "loss": 0.8735, | |
| "step": 347 | |
| }, | |
| { | |
| "epoch": 0.11136, | |
| "grad_norm": 0.11062555015087128, | |
| "learning_rate": 0.00019187628865979383, | |
| "loss": 0.6268, | |
| "step": 348 | |
| }, | |
| { | |
| "epoch": 0.11168, | |
| "grad_norm": 0.1201324462890625, | |
| "learning_rate": 0.00019183505154639176, | |
| "loss": 0.793, | |
| "step": 349 | |
| }, | |
| { | |
| "epoch": 0.112, | |
| "grad_norm": 0.12797708809375763, | |
| "learning_rate": 0.0001917938144329897, | |
| "loss": 0.6075, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.11232, | |
| "grad_norm": 0.11388446390628815, | |
| "learning_rate": 0.00019175257731958765, | |
| "loss": 0.7343, | |
| "step": 351 | |
| }, | |
| { | |
| "epoch": 0.11264, | |
| "grad_norm": 0.11379895359277725, | |
| "learning_rate": 0.00019171134020618558, | |
| "loss": 0.6767, | |
| "step": 352 | |
| }, | |
| { | |
| "epoch": 0.11296, | |
| "grad_norm": 0.11296231299638748, | |
| "learning_rate": 0.0001916701030927835, | |
| "loss": 0.7473, | |
| "step": 353 | |
| }, | |
| { | |
| "epoch": 0.11328, | |
| "grad_norm": 0.1344781517982483, | |
| "learning_rate": 0.00019162886597938147, | |
| "loss": 0.6533, | |
| "step": 354 | |
| }, | |
| { | |
| "epoch": 0.1136, | |
| "grad_norm": 0.12438254803419113, | |
| "learning_rate": 0.00019158762886597937, | |
| "loss": 0.7511, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.11392, | |
| "grad_norm": 0.11343064159154892, | |
| "learning_rate": 0.00019154639175257733, | |
| "loss": 0.6876, | |
| "step": 356 | |
| }, | |
| { | |
| "epoch": 0.11424, | |
| "grad_norm": 0.11751387268304825, | |
| "learning_rate": 0.00019150515463917526, | |
| "loss": 0.7198, | |
| "step": 357 | |
| }, | |
| { | |
| "epoch": 0.11456, | |
| "grad_norm": 0.1194872185587883, | |
| "learning_rate": 0.0001914639175257732, | |
| "loss": 0.6149, | |
| "step": 358 | |
| }, | |
| { | |
| "epoch": 0.11488, | |
| "grad_norm": 0.11200791597366333, | |
| "learning_rate": 0.00019142268041237115, | |
| "loss": 0.8605, | |
| "step": 359 | |
| }, | |
| { | |
| "epoch": 0.1152, | |
| "grad_norm": 0.11708471924066544, | |
| "learning_rate": 0.00019138144329896908, | |
| "loss": 0.6603, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.11552, | |
| "grad_norm": 0.116248220205307, | |
| "learning_rate": 0.00019134020618556704, | |
| "loss": 0.7141, | |
| "step": 361 | |
| }, | |
| { | |
| "epoch": 0.11584, | |
| "grad_norm": 0.11516683548688889, | |
| "learning_rate": 0.00019129896907216494, | |
| "loss": 0.7316, | |
| "step": 362 | |
| }, | |
| { | |
| "epoch": 0.11616, | |
| "grad_norm": 0.10961434990167618, | |
| "learning_rate": 0.00019125773195876288, | |
| "loss": 0.6173, | |
| "step": 363 | |
| }, | |
| { | |
| "epoch": 0.11648, | |
| "grad_norm": 0.12037073075771332, | |
| "learning_rate": 0.00019121649484536083, | |
| "loss": 0.6659, | |
| "step": 364 | |
| }, | |
| { | |
| "epoch": 0.1168, | |
| "grad_norm": 0.12053418904542923, | |
| "learning_rate": 0.00019117525773195877, | |
| "loss": 0.6544, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.11712, | |
| "grad_norm": 0.12387484312057495, | |
| "learning_rate": 0.00019113402061855672, | |
| "loss": 0.6406, | |
| "step": 366 | |
| }, | |
| { | |
| "epoch": 0.11744, | |
| "grad_norm": 0.12213553488254547, | |
| "learning_rate": 0.00019109278350515466, | |
| "loss": 0.6592, | |
| "step": 367 | |
| }, | |
| { | |
| "epoch": 0.11776, | |
| "grad_norm": 0.1237708106637001, | |
| "learning_rate": 0.0001910515463917526, | |
| "loss": 0.779, | |
| "step": 368 | |
| }, | |
| { | |
| "epoch": 0.11808, | |
| "grad_norm": 0.10254320502281189, | |
| "learning_rate": 0.00019101030927835052, | |
| "loss": 0.7242, | |
| "step": 369 | |
| }, | |
| { | |
| "epoch": 0.1184, | |
| "grad_norm": 0.11179213225841522, | |
| "learning_rate": 0.00019096907216494845, | |
| "loss": 0.765, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.11872, | |
| "grad_norm": 0.11824121326208115, | |
| "learning_rate": 0.0001909278350515464, | |
| "loss": 0.6971, | |
| "step": 371 | |
| }, | |
| { | |
| "epoch": 0.11904, | |
| "grad_norm": 0.13179510831832886, | |
| "learning_rate": 0.00019088659793814434, | |
| "loss": 0.7179, | |
| "step": 372 | |
| }, | |
| { | |
| "epoch": 0.11936, | |
| "grad_norm": 0.1161201074719429, | |
| "learning_rate": 0.00019084536082474227, | |
| "loss": 0.6249, | |
| "step": 373 | |
| }, | |
| { | |
| "epoch": 0.11968, | |
| "grad_norm": 0.12930230796337128, | |
| "learning_rate": 0.00019080412371134023, | |
| "loss": 0.6636, | |
| "step": 374 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 0.12602809071540833, | |
| "learning_rate": 0.00019076288659793816, | |
| "loss": 0.7044, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.12032, | |
| "grad_norm": 0.1393459141254425, | |
| "learning_rate": 0.0001907216494845361, | |
| "loss": 0.6518, | |
| "step": 376 | |
| }, | |
| { | |
| "epoch": 0.12064, | |
| "grad_norm": 0.11460117250680923, | |
| "learning_rate": 0.00019068041237113402, | |
| "loss": 0.7716, | |
| "step": 377 | |
| }, | |
| { | |
| "epoch": 0.12096, | |
| "grad_norm": 0.14296174049377441, | |
| "learning_rate": 0.00019063917525773195, | |
| "loss": 0.7899, | |
| "step": 378 | |
| }, | |
| { | |
| "epoch": 0.12128, | |
| "grad_norm": 0.14445006847381592, | |
| "learning_rate": 0.0001905979381443299, | |
| "loss": 0.7617, | |
| "step": 379 | |
| }, | |
| { | |
| "epoch": 0.1216, | |
| "grad_norm": 0.12754163146018982, | |
| "learning_rate": 0.00019055670103092784, | |
| "loss": 0.7888, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.12192, | |
| "grad_norm": 0.11828102916479111, | |
| "learning_rate": 0.00019051546391752577, | |
| "loss": 0.6641, | |
| "step": 381 | |
| }, | |
| { | |
| "epoch": 0.12224, | |
| "grad_norm": 0.11433565616607666, | |
| "learning_rate": 0.00019047422680412373, | |
| "loss": 0.5984, | |
| "step": 382 | |
| }, | |
| { | |
| "epoch": 0.12256, | |
| "grad_norm": 0.11538711935281754, | |
| "learning_rate": 0.00019043298969072166, | |
| "loss": 0.7464, | |
| "step": 383 | |
| }, | |
| { | |
| "epoch": 0.12288, | |
| "grad_norm": 0.10493431240320206, | |
| "learning_rate": 0.0001903917525773196, | |
| "loss": 0.7104, | |
| "step": 384 | |
| }, | |
| { | |
| "epoch": 0.1232, | |
| "grad_norm": 0.13114306330680847, | |
| "learning_rate": 0.00019035051546391753, | |
| "loss": 0.9104, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.12352, | |
| "grad_norm": 0.11584331095218658, | |
| "learning_rate": 0.00019030927835051546, | |
| "loss": 0.7205, | |
| "step": 386 | |
| }, | |
| { | |
| "epoch": 0.12384, | |
| "grad_norm": 0.1123526394367218, | |
| "learning_rate": 0.00019026804123711342, | |
| "loss": 0.7061, | |
| "step": 387 | |
| }, | |
| { | |
| "epoch": 0.12416, | |
| "grad_norm": 0.11471172422170639, | |
| "learning_rate": 0.00019022680412371135, | |
| "loss": 0.7491, | |
| "step": 388 | |
| }, | |
| { | |
| "epoch": 0.12448, | |
| "grad_norm": 0.1361629068851471, | |
| "learning_rate": 0.0001901855670103093, | |
| "loss": 0.8168, | |
| "step": 389 | |
| }, | |
| { | |
| "epoch": 0.1248, | |
| "grad_norm": 0.1272808462381363, | |
| "learning_rate": 0.00019014432989690724, | |
| "loss": 0.6907, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.12512, | |
| "grad_norm": 0.12672847509384155, | |
| "learning_rate": 0.00019010309278350514, | |
| "loss": 0.7511, | |
| "step": 391 | |
| }, | |
| { | |
| "epoch": 0.12544, | |
| "grad_norm": 0.12205267697572708, | |
| "learning_rate": 0.0001900618556701031, | |
| "loss": 0.8164, | |
| "step": 392 | |
| }, | |
| { | |
| "epoch": 0.12576, | |
| "grad_norm": 0.12806954979896545, | |
| "learning_rate": 0.00019002061855670103, | |
| "loss": 0.8247, | |
| "step": 393 | |
| }, | |
| { | |
| "epoch": 0.12608, | |
| "grad_norm": 0.11856722086668015, | |
| "learning_rate": 0.000189979381443299, | |
| "loss": 0.7324, | |
| "step": 394 | |
| }, | |
| { | |
| "epoch": 0.1264, | |
| "grad_norm": 0.11742661893367767, | |
| "learning_rate": 0.00018993814432989692, | |
| "loss": 0.8151, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.12672, | |
| "grad_norm": 0.11036640405654907, | |
| "learning_rate": 0.00018989690721649485, | |
| "loss": 0.5336, | |
| "step": 396 | |
| }, | |
| { | |
| "epoch": 0.12704, | |
| "grad_norm": 0.11546391993761063, | |
| "learning_rate": 0.0001898556701030928, | |
| "loss": 0.8078, | |
| "step": 397 | |
| }, | |
| { | |
| "epoch": 0.12736, | |
| "grad_norm": 0.11293994635343552, | |
| "learning_rate": 0.00018981443298969071, | |
| "loss": 0.7818, | |
| "step": 398 | |
| }, | |
| { | |
| "epoch": 0.12768, | |
| "grad_norm": 0.11465566605329514, | |
| "learning_rate": 0.00018977319587628867, | |
| "loss": 0.8291, | |
| "step": 399 | |
| }, | |
| { | |
| "epoch": 0.128, | |
| "grad_norm": 0.1251228302717209, | |
| "learning_rate": 0.0001897319587628866, | |
| "loss": 0.7199, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.12832, | |
| "grad_norm": 0.11204738169908524, | |
| "learning_rate": 0.00018969072164948454, | |
| "loss": 0.7193, | |
| "step": 401 | |
| }, | |
| { | |
| "epoch": 0.12864, | |
| "grad_norm": 0.1072632372379303, | |
| "learning_rate": 0.0001896494845360825, | |
| "loss": 0.7763, | |
| "step": 402 | |
| }, | |
| { | |
| "epoch": 0.12896, | |
| "grad_norm": 0.11516649276018143, | |
| "learning_rate": 0.00018960824742268043, | |
| "loss": 0.6853, | |
| "step": 403 | |
| }, | |
| { | |
| "epoch": 0.12928, | |
| "grad_norm": 0.1176360473036766, | |
| "learning_rate": 0.00018956701030927836, | |
| "loss": 0.6184, | |
| "step": 404 | |
| }, | |
| { | |
| "epoch": 0.1296, | |
| "grad_norm": 0.11547468602657318, | |
| "learning_rate": 0.0001895257731958763, | |
| "loss": 0.8048, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 0.12992, | |
| "grad_norm": 0.11308492720127106, | |
| "learning_rate": 0.00018948453608247422, | |
| "loss": 0.7213, | |
| "step": 406 | |
| }, | |
| { | |
| "epoch": 0.13024, | |
| "grad_norm": 0.11973299831151962, | |
| "learning_rate": 0.00018944329896907218, | |
| "loss": 0.8809, | |
| "step": 407 | |
| }, | |
| { | |
| "epoch": 0.13056, | |
| "grad_norm": 0.1213008239865303, | |
| "learning_rate": 0.0001894020618556701, | |
| "loss": 0.7794, | |
| "step": 408 | |
| }, | |
| { | |
| "epoch": 0.13088, | |
| "grad_norm": 0.13353341817855835, | |
| "learning_rate": 0.00018936082474226804, | |
| "loss": 0.6674, | |
| "step": 409 | |
| }, | |
| { | |
| "epoch": 0.1312, | |
| "grad_norm": 0.1155238002538681, | |
| "learning_rate": 0.000189319587628866, | |
| "loss": 0.7668, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.13152, | |
| "grad_norm": 0.13428455591201782, | |
| "learning_rate": 0.00018927835051546393, | |
| "loss": 0.6864, | |
| "step": 411 | |
| }, | |
| { | |
| "epoch": 0.13184, | |
| "grad_norm": 0.13777495920658112, | |
| "learning_rate": 0.00018923711340206186, | |
| "loss": 0.8271, | |
| "step": 412 | |
| }, | |
| { | |
| "epoch": 0.13216, | |
| "grad_norm": 0.12846726179122925, | |
| "learning_rate": 0.0001891958762886598, | |
| "loss": 0.9027, | |
| "step": 413 | |
| }, | |
| { | |
| "epoch": 0.13248, | |
| "grad_norm": 0.11394570022821426, | |
| "learning_rate": 0.00018915463917525772, | |
| "loss": 0.7531, | |
| "step": 414 | |
| }, | |
| { | |
| "epoch": 0.1328, | |
| "grad_norm": 0.11797945201396942, | |
| "learning_rate": 0.00018911340206185568, | |
| "loss": 0.7393, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 0.13312, | |
| "grad_norm": 0.11778296530246735, | |
| "learning_rate": 0.0001890721649484536, | |
| "loss": 0.8172, | |
| "step": 416 | |
| }, | |
| { | |
| "epoch": 0.13344, | |
| "grad_norm": 0.12857432663440704, | |
| "learning_rate": 0.00018903092783505157, | |
| "loss": 0.5693, | |
| "step": 417 | |
| }, | |
| { | |
| "epoch": 0.13376, | |
| "grad_norm": 0.1395915150642395, | |
| "learning_rate": 0.0001889896907216495, | |
| "loss": 0.7187, | |
| "step": 418 | |
| }, | |
| { | |
| "epoch": 0.13408, | |
| "grad_norm": 0.12571577727794647, | |
| "learning_rate": 0.00018894845360824743, | |
| "loss": 0.7347, | |
| "step": 419 | |
| }, | |
| { | |
| "epoch": 0.1344, | |
| "grad_norm": 0.11062180995941162, | |
| "learning_rate": 0.00018890721649484537, | |
| "loss": 0.8797, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.13472, | |
| "grad_norm": 0.12011471390724182, | |
| "learning_rate": 0.0001888659793814433, | |
| "loss": 0.7638, | |
| "step": 421 | |
| }, | |
| { | |
| "epoch": 0.13504, | |
| "grad_norm": 0.11402884870767593, | |
| "learning_rate": 0.00018882474226804126, | |
| "loss": 0.6198, | |
| "step": 422 | |
| }, | |
| { | |
| "epoch": 0.13536, | |
| "grad_norm": 0.11654789745807648, | |
| "learning_rate": 0.00018878350515463919, | |
| "loss": 0.8276, | |
| "step": 423 | |
| }, | |
| { | |
| "epoch": 0.13568, | |
| "grad_norm": 0.110030896961689, | |
| "learning_rate": 0.00018874226804123712, | |
| "loss": 0.7828, | |
| "step": 424 | |
| }, | |
| { | |
| "epoch": 0.136, | |
| "grad_norm": 0.1173168271780014, | |
| "learning_rate": 0.00018870103092783508, | |
| "loss": 0.6445, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.13632, | |
| "grad_norm": 0.11195468157529831, | |
| "learning_rate": 0.000188659793814433, | |
| "loss": 0.7336, | |
| "step": 426 | |
| }, | |
| { | |
| "epoch": 0.13664, | |
| "grad_norm": 0.1138012707233429, | |
| "learning_rate": 0.00018861855670103094, | |
| "loss": 0.7085, | |
| "step": 427 | |
| }, | |
| { | |
| "epoch": 0.13696, | |
| "grad_norm": 0.11757896840572357, | |
| "learning_rate": 0.00018857731958762887, | |
| "loss": 0.593, | |
| "step": 428 | |
| }, | |
| { | |
| "epoch": 0.13728, | |
| "grad_norm": 0.13230633735656738, | |
| "learning_rate": 0.0001885360824742268, | |
| "loss": 0.705, | |
| "step": 429 | |
| }, | |
| { | |
| "epoch": 0.1376, | |
| "grad_norm": 0.13503113389015198, | |
| "learning_rate": 0.00018849484536082476, | |
| "loss": 0.7134, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.13792, | |
| "grad_norm": 0.11426805704832077, | |
| "learning_rate": 0.0001884536082474227, | |
| "loss": 0.6781, | |
| "step": 431 | |
| }, | |
| { | |
| "epoch": 0.13824, | |
| "grad_norm": 0.11934298276901245, | |
| "learning_rate": 0.00018841237113402065, | |
| "loss": 0.7727, | |
| "step": 432 | |
| }, | |
| { | |
| "epoch": 0.13856, | |
| "grad_norm": 0.12700718641281128, | |
| "learning_rate": 0.00018837113402061858, | |
| "loss": 0.7153, | |
| "step": 433 | |
| }, | |
| { | |
| "epoch": 0.13888, | |
| "grad_norm": 0.15089921653270721, | |
| "learning_rate": 0.00018832989690721648, | |
| "loss": 0.7326, | |
| "step": 434 | |
| }, | |
| { | |
| "epoch": 0.1392, | |
| "grad_norm": 0.10799750685691833, | |
| "learning_rate": 0.00018828865979381444, | |
| "loss": 0.7738, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 0.13952, | |
| "grad_norm": 0.1076594665646553, | |
| "learning_rate": 0.00018824742268041237, | |
| "loss": 0.545, | |
| "step": 436 | |
| }, | |
| { | |
| "epoch": 0.13984, | |
| "grad_norm": 0.1204753890633583, | |
| "learning_rate": 0.00018820618556701033, | |
| "loss": 0.73, | |
| "step": 437 | |
| }, | |
| { | |
| "epoch": 0.14016, | |
| "grad_norm": 0.11598300188779831, | |
| "learning_rate": 0.00018816494845360826, | |
| "loss": 0.635, | |
| "step": 438 | |
| }, | |
| { | |
| "epoch": 0.14048, | |
| "grad_norm": 0.11010400205850601, | |
| "learning_rate": 0.0001881237113402062, | |
| "loss": 0.7891, | |
| "step": 439 | |
| }, | |
| { | |
| "epoch": 0.1408, | |
| "grad_norm": 0.12184228003025055, | |
| "learning_rate": 0.00018808247422680413, | |
| "loss": 0.7332, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.14112, | |
| "grad_norm": 0.11696261167526245, | |
| "learning_rate": 0.00018804123711340206, | |
| "loss": 0.8299, | |
| "step": 441 | |
| }, | |
| { | |
| "epoch": 0.14144, | |
| "grad_norm": 0.11464802920818329, | |
| "learning_rate": 0.000188, | |
| "loss": 0.7107, | |
| "step": 442 | |
| }, | |
| { | |
| "epoch": 0.14176, | |
| "grad_norm": 0.12120334804058075, | |
| "learning_rate": 0.00018795876288659795, | |
| "loss": 0.7729, | |
| "step": 443 | |
| }, | |
| { | |
| "epoch": 0.14208, | |
| "grad_norm": 0.11571822315454483, | |
| "learning_rate": 0.00018791752577319588, | |
| "loss": 0.6551, | |
| "step": 444 | |
| }, | |
| { | |
| "epoch": 0.1424, | |
| "grad_norm": 0.11872228980064392, | |
| "learning_rate": 0.00018787628865979384, | |
| "loss": 0.698, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 0.14272, | |
| "grad_norm": 0.12505149841308594, | |
| "learning_rate": 0.00018783505154639177, | |
| "loss": 0.751, | |
| "step": 446 | |
| }, | |
| { | |
| "epoch": 0.14304, | |
| "grad_norm": 0.10618813335895538, | |
| "learning_rate": 0.0001877938144329897, | |
| "loss": 0.7299, | |
| "step": 447 | |
| }, | |
| { | |
| "epoch": 0.14336, | |
| "grad_norm": 0.11824540048837662, | |
| "learning_rate": 0.00018775257731958763, | |
| "loss": 0.6796, | |
| "step": 448 | |
| }, | |
| { | |
| "epoch": 0.14368, | |
| "grad_norm": 0.10695328563451767, | |
| "learning_rate": 0.00018771134020618556, | |
| "loss": 0.7424, | |
| "step": 449 | |
| }, | |
| { | |
| "epoch": 0.144, | |
| "grad_norm": 0.12764234840869904, | |
| "learning_rate": 0.00018767010309278352, | |
| "loss": 0.7301, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.14432, | |
| "grad_norm": 0.12354765832424164, | |
| "learning_rate": 0.00018762886597938145, | |
| "loss": 0.7945, | |
| "step": 451 | |
| }, | |
| { | |
| "epoch": 0.14464, | |
| "grad_norm": 0.12733551859855652, | |
| "learning_rate": 0.00018758762886597938, | |
| "loss": 0.7565, | |
| "step": 452 | |
| }, | |
| { | |
| "epoch": 0.14496, | |
| "grad_norm": 0.1533960998058319, | |
| "learning_rate": 0.00018754639175257734, | |
| "loss": 0.7964, | |
| "step": 453 | |
| }, | |
| { | |
| "epoch": 0.14528, | |
| "grad_norm": 0.11748246848583221, | |
| "learning_rate": 0.00018750515463917527, | |
| "loss": 0.7715, | |
| "step": 454 | |
| }, | |
| { | |
| "epoch": 0.1456, | |
| "grad_norm": 0.12309489399194717, | |
| "learning_rate": 0.0001874639175257732, | |
| "loss": 0.7758, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 0.14592, | |
| "grad_norm": 0.10743677616119385, | |
| "learning_rate": 0.00018742268041237114, | |
| "loss": 0.8084, | |
| "step": 456 | |
| }, | |
| { | |
| "epoch": 0.14624, | |
| "grad_norm": 0.15399467945098877, | |
| "learning_rate": 0.00018738144329896907, | |
| "loss": 0.7689, | |
| "step": 457 | |
| }, | |
| { | |
| "epoch": 0.14656, | |
| "grad_norm": 0.1381152868270874, | |
| "learning_rate": 0.00018734020618556702, | |
| "loss": 0.7902, | |
| "step": 458 | |
| }, | |
| { | |
| "epoch": 0.14688, | |
| "grad_norm": 0.12086453288793564, | |
| "learning_rate": 0.00018729896907216496, | |
| "loss": 0.739, | |
| "step": 459 | |
| }, | |
| { | |
| "epoch": 0.1472, | |
| "grad_norm": 0.14193426072597504, | |
| "learning_rate": 0.00018725773195876291, | |
| "loss": 0.8245, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.14752, | |
| "grad_norm": 0.1121336966753006, | |
| "learning_rate": 0.00018721649484536085, | |
| "loss": 0.6877, | |
| "step": 461 | |
| }, | |
| { | |
| "epoch": 0.14784, | |
| "grad_norm": 0.12167156487703323, | |
| "learning_rate": 0.00018717525773195878, | |
| "loss": 0.7017, | |
| "step": 462 | |
| }, | |
| { | |
| "epoch": 0.14816, | |
| "grad_norm": 0.1287817507982254, | |
| "learning_rate": 0.0001871340206185567, | |
| "loss": 0.6229, | |
| "step": 463 | |
| }, | |
| { | |
| "epoch": 0.14848, | |
| "grad_norm": 0.128828227519989, | |
| "learning_rate": 0.00018709278350515464, | |
| "loss": 0.7917, | |
| "step": 464 | |
| }, | |
| { | |
| "epoch": 0.1488, | |
| "grad_norm": 0.13771773874759674, | |
| "learning_rate": 0.0001870515463917526, | |
| "loss": 0.6883, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 0.14912, | |
| "grad_norm": 0.10570216923952103, | |
| "learning_rate": 0.00018701030927835053, | |
| "loss": 0.7791, | |
| "step": 466 | |
| }, | |
| { | |
| "epoch": 0.14944, | |
| "grad_norm": 0.1250445395708084, | |
| "learning_rate": 0.00018696907216494846, | |
| "loss": 0.7435, | |
| "step": 467 | |
| }, | |
| { | |
| "epoch": 0.14976, | |
| "grad_norm": 0.1064952090382576, | |
| "learning_rate": 0.00018692783505154642, | |
| "loss": 0.7621, | |
| "step": 468 | |
| }, | |
| { | |
| "epoch": 0.15008, | |
| "grad_norm": 0.10514791309833527, | |
| "learning_rate": 0.00018688659793814432, | |
| "loss": 0.6403, | |
| "step": 469 | |
| }, | |
| { | |
| "epoch": 0.1504, | |
| "grad_norm": 0.10278081893920898, | |
| "learning_rate": 0.00018684536082474225, | |
| "loss": 0.8246, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.15072, | |
| "grad_norm": 0.11813073605298996, | |
| "learning_rate": 0.0001868041237113402, | |
| "loss": 0.8403, | |
| "step": 471 | |
| }, | |
| { | |
| "epoch": 0.15104, | |
| "grad_norm": 0.10654576867818832, | |
| "learning_rate": 0.00018676288659793814, | |
| "loss": 0.7483, | |
| "step": 472 | |
| }, | |
| { | |
| "epoch": 0.15136, | |
| "grad_norm": 0.11642228811979294, | |
| "learning_rate": 0.0001867216494845361, | |
| "loss": 0.6114, | |
| "step": 473 | |
| }, | |
| { | |
| "epoch": 0.15168, | |
| "grad_norm": 0.12933100759983063, | |
| "learning_rate": 0.00018668041237113403, | |
| "loss": 0.7902, | |
| "step": 474 | |
| }, | |
| { | |
| "epoch": 0.152, | |
| "grad_norm": 0.1200195848941803, | |
| "learning_rate": 0.00018663917525773196, | |
| "loss": 0.7298, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.15232, | |
| "grad_norm": 0.10747142881155014, | |
| "learning_rate": 0.0001865979381443299, | |
| "loss": 0.7376, | |
| "step": 476 | |
| }, | |
| { | |
| "epoch": 0.15264, | |
| "grad_norm": 0.10868332535028458, | |
| "learning_rate": 0.00018655670103092783, | |
| "loss": 0.7013, | |
| "step": 477 | |
| }, | |
| { | |
| "epoch": 0.15296, | |
| "grad_norm": 0.12750379741191864, | |
| "learning_rate": 0.00018651546391752579, | |
| "loss": 0.7718, | |
| "step": 478 | |
| }, | |
| { | |
| "epoch": 0.15328, | |
| "grad_norm": 0.10860800743103027, | |
| "learning_rate": 0.00018647422680412372, | |
| "loss": 0.5887, | |
| "step": 479 | |
| }, | |
| { | |
| "epoch": 0.1536, | |
| "grad_norm": 0.11445030570030212, | |
| "learning_rate": 0.00018643298969072165, | |
| "loss": 0.7955, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.15392, | |
| "grad_norm": 0.11774427443742752, | |
| "learning_rate": 0.0001863917525773196, | |
| "loss": 0.7095, | |
| "step": 481 | |
| }, | |
| { | |
| "epoch": 0.15424, | |
| "grad_norm": 0.13853546977043152, | |
| "learning_rate": 0.00018635051546391754, | |
| "loss": 0.9395, | |
| "step": 482 | |
| }, | |
| { | |
| "epoch": 0.15456, | |
| "grad_norm": 0.120772585272789, | |
| "learning_rate": 0.00018630927835051547, | |
| "loss": 0.8, | |
| "step": 483 | |
| }, | |
| { | |
| "epoch": 0.15488, | |
| "grad_norm": 0.13263949751853943, | |
| "learning_rate": 0.0001862680412371134, | |
| "loss": 0.9118, | |
| "step": 484 | |
| }, | |
| { | |
| "epoch": 0.1552, | |
| "grad_norm": 0.12179119139909744, | |
| "learning_rate": 0.00018622680412371133, | |
| "loss": 0.6567, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 0.15552, | |
| "grad_norm": 0.1244128867983818, | |
| "learning_rate": 0.0001861855670103093, | |
| "loss": 0.7702, | |
| "step": 486 | |
| }, | |
| { | |
| "epoch": 0.15584, | |
| "grad_norm": 0.12456309050321579, | |
| "learning_rate": 0.00018614432989690722, | |
| "loss": 0.7687, | |
| "step": 487 | |
| }, | |
| { | |
| "epoch": 0.15616, | |
| "grad_norm": 0.12738095223903656, | |
| "learning_rate": 0.00018610309278350518, | |
| "loss": 0.7489, | |
| "step": 488 | |
| }, | |
| { | |
| "epoch": 0.15648, | |
| "grad_norm": 0.11148428916931152, | |
| "learning_rate": 0.0001860618556701031, | |
| "loss": 0.7362, | |
| "step": 489 | |
| }, | |
| { | |
| "epoch": 0.1568, | |
| "grad_norm": 0.1029975637793541, | |
| "learning_rate": 0.00018602061855670104, | |
| "loss": 0.5808, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.15712, | |
| "grad_norm": 0.12028798460960388, | |
| "learning_rate": 0.00018597938144329897, | |
| "loss": 0.8541, | |
| "step": 491 | |
| }, | |
| { | |
| "epoch": 0.15744, | |
| "grad_norm": 0.12013960629701614, | |
| "learning_rate": 0.0001859381443298969, | |
| "loss": 0.6814, | |
| "step": 492 | |
| }, | |
| { | |
| "epoch": 0.15776, | |
| "grad_norm": 0.12026184052228928, | |
| "learning_rate": 0.00018589690721649486, | |
| "loss": 0.8444, | |
| "step": 493 | |
| }, | |
| { | |
| "epoch": 0.15808, | |
| "grad_norm": 0.11788841336965561, | |
| "learning_rate": 0.0001858556701030928, | |
| "loss": 0.5718, | |
| "step": 494 | |
| }, | |
| { | |
| "epoch": 0.1584, | |
| "grad_norm": 0.11122492700815201, | |
| "learning_rate": 0.00018581443298969073, | |
| "loss": 0.5172, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 0.15872, | |
| "grad_norm": 0.12228988111019135, | |
| "learning_rate": 0.00018577319587628868, | |
| "loss": 0.724, | |
| "step": 496 | |
| }, | |
| { | |
| "epoch": 0.15904, | |
| "grad_norm": 0.12170293927192688, | |
| "learning_rate": 0.00018573195876288662, | |
| "loss": 0.6863, | |
| "step": 497 | |
| }, | |
| { | |
| "epoch": 0.15936, | |
| "grad_norm": 0.12947559356689453, | |
| "learning_rate": 0.00018569072164948455, | |
| "loss": 0.897, | |
| "step": 498 | |
| }, | |
| { | |
| "epoch": 0.15968, | |
| "grad_norm": 0.13217338919639587, | |
| "learning_rate": 0.00018564948453608248, | |
| "loss": 0.7, | |
| "step": 499 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 0.12307247519493103, | |
| "learning_rate": 0.0001856082474226804, | |
| "loss": 0.6686, | |
| "step": 500 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 5000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 250, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.1517889587206144e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |