| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 27705, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.02707092582566324, | |
| "grad_norm": 0.9713733196258545, | |
| "learning_rate": 9.910124526258799e-05, | |
| "loss": 3.4945, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.05414185165132648, | |
| "grad_norm": 0.9521434307098389, | |
| "learning_rate": 9.819888106839921e-05, | |
| "loss": 3.5097, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.05414185165132648, | |
| "eval_loss": 2.95473575592041, | |
| "eval_runtime": 87.829, | |
| "eval_samples_per_second": 112.15, | |
| "eval_steps_per_second": 7.014, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.08121277747698971, | |
| "grad_norm": 0.9334385395050049, | |
| "learning_rate": 9.729651687421044e-05, | |
| "loss": 3.4922, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.10828370330265295, | |
| "grad_norm": 1.2239603996276855, | |
| "learning_rate": 9.639415268002166e-05, | |
| "loss": 3.477, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.10828370330265295, | |
| "eval_loss": 2.9319565296173096, | |
| "eval_runtime": 87.8964, | |
| "eval_samples_per_second": 112.064, | |
| "eval_steps_per_second": 7.008, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.1353546291283162, | |
| "grad_norm": 1.1727226972579956, | |
| "learning_rate": 9.549178848583288e-05, | |
| "loss": 3.493, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.16242555495397942, | |
| "grad_norm": 0.966973602771759, | |
| "learning_rate": 9.458942429164411e-05, | |
| "loss": 3.5013, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.16242555495397942, | |
| "eval_loss": 2.9275825023651123, | |
| "eval_runtime": 87.899, | |
| "eval_samples_per_second": 112.06, | |
| "eval_steps_per_second": 7.008, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.18949648077964265, | |
| "grad_norm": 0.9813922047615051, | |
| "learning_rate": 9.368706009745533e-05, | |
| "loss": 3.4907, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.2165674066053059, | |
| "grad_norm": 1.027085542678833, | |
| "learning_rate": 9.278469590326656e-05, | |
| "loss": 3.4588, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.2165674066053059, | |
| "eval_loss": 2.9044992923736572, | |
| "eval_runtime": 87.8776, | |
| "eval_samples_per_second": 112.088, | |
| "eval_steps_per_second": 7.01, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.24363833243096913, | |
| "grad_norm": 1.0764214992523193, | |
| "learning_rate": 9.188233170907778e-05, | |
| "loss": 3.4531, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 0.2707092582566324, | |
| "grad_norm": 1.0297119617462158, | |
| "learning_rate": 9.0979967514889e-05, | |
| "loss": 3.4445, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.2707092582566324, | |
| "eval_loss": 2.8855738639831543, | |
| "eval_runtime": 87.8728, | |
| "eval_samples_per_second": 112.094, | |
| "eval_steps_per_second": 7.01, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.2977801840822956, | |
| "grad_norm": 0.9697523713111877, | |
| "learning_rate": 9.007760332070024e-05, | |
| "loss": 3.4349, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 0.32485110990795885, | |
| "grad_norm": 0.9611329436302185, | |
| "learning_rate": 8.917523912651147e-05, | |
| "loss": 3.4213, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.32485110990795885, | |
| "eval_loss": 2.8725759983062744, | |
| "eval_runtime": 87.9054, | |
| "eval_samples_per_second": 112.052, | |
| "eval_steps_per_second": 7.008, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.3519220357336221, | |
| "grad_norm": 1.000977873802185, | |
| "learning_rate": 8.827287493232269e-05, | |
| "loss": 3.4239, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 0.3789929615592853, | |
| "grad_norm": 1.0573837757110596, | |
| "learning_rate": 8.737051073813391e-05, | |
| "loss": 3.4015, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.3789929615592853, | |
| "eval_loss": 2.8721706867218018, | |
| "eval_runtime": 87.8848, | |
| "eval_samples_per_second": 112.078, | |
| "eval_steps_per_second": 7.009, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.4060638873849486, | |
| "grad_norm": 0.9532322883605957, | |
| "learning_rate": 8.646814654394514e-05, | |
| "loss": 3.401, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 0.4331348132106118, | |
| "grad_norm": 1.0036464929580688, | |
| "learning_rate": 8.556578234975636e-05, | |
| "loss": 3.3984, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.4331348132106118, | |
| "eval_loss": 2.863417863845825, | |
| "eval_runtime": 87.8316, | |
| "eval_samples_per_second": 112.146, | |
| "eval_steps_per_second": 7.013, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.46020573903627504, | |
| "grad_norm": 0.8860335350036621, | |
| "learning_rate": 8.466341815556758e-05, | |
| "loss": 3.4081, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 0.48727666486193827, | |
| "grad_norm": 1.4852999448776245, | |
| "learning_rate": 8.376105396137882e-05, | |
| "loss": 3.4105, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.48727666486193827, | |
| "eval_loss": 2.8409109115600586, | |
| "eval_runtime": 87.9025, | |
| "eval_samples_per_second": 112.056, | |
| "eval_steps_per_second": 7.008, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.5143475906876015, | |
| "grad_norm": 0.936912477016449, | |
| "learning_rate": 8.285868976719005e-05, | |
| "loss": 3.3952, | |
| "step": 4750 | |
| }, | |
| { | |
| "epoch": 0.5414185165132648, | |
| "grad_norm": 1.0409356355667114, | |
| "learning_rate": 8.195632557300127e-05, | |
| "loss": 3.4039, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.5414185165132648, | |
| "eval_loss": 2.8329713344573975, | |
| "eval_runtime": 87.8287, | |
| "eval_samples_per_second": 112.15, | |
| "eval_steps_per_second": 7.014, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.568489442338928, | |
| "grad_norm": 0.9606480002403259, | |
| "learning_rate": 8.10539613788125e-05, | |
| "loss": 3.3819, | |
| "step": 5250 | |
| }, | |
| { | |
| "epoch": 0.5955603681645912, | |
| "grad_norm": 1.142527461051941, | |
| "learning_rate": 8.015159718462372e-05, | |
| "loss": 3.3898, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.5955603681645912, | |
| "eval_loss": 2.8344321250915527, | |
| "eval_runtime": 87.8057, | |
| "eval_samples_per_second": 112.18, | |
| "eval_steps_per_second": 7.015, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.6226312939902545, | |
| "grad_norm": 1.2294474840164185, | |
| "learning_rate": 7.924923299043494e-05, | |
| "loss": 3.3769, | |
| "step": 5750 | |
| }, | |
| { | |
| "epoch": 0.6497022198159177, | |
| "grad_norm": 1.0300960540771484, | |
| "learning_rate": 7.834686879624617e-05, | |
| "loss": 3.3621, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.6497022198159177, | |
| "eval_loss": 2.8137757778167725, | |
| "eval_runtime": 87.8367, | |
| "eval_samples_per_second": 112.14, | |
| "eval_steps_per_second": 7.013, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.676773145641581, | |
| "grad_norm": 1.076827883720398, | |
| "learning_rate": 7.74445046020574e-05, | |
| "loss": 3.3704, | |
| "step": 6250 | |
| }, | |
| { | |
| "epoch": 0.7038440714672441, | |
| "grad_norm": 1.4118528366088867, | |
| "learning_rate": 7.654214040786863e-05, | |
| "loss": 3.367, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.7038440714672441, | |
| "eval_loss": 2.8218181133270264, | |
| "eval_runtime": 87.9044, | |
| "eval_samples_per_second": 112.053, | |
| "eval_steps_per_second": 7.008, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.7309149972929074, | |
| "grad_norm": 1.0568158626556396, | |
| "learning_rate": 7.563977621367985e-05, | |
| "loss": 3.3509, | |
| "step": 6750 | |
| }, | |
| { | |
| "epoch": 0.7579859231185706, | |
| "grad_norm": 1.0436064004898071, | |
| "learning_rate": 7.473741201949108e-05, | |
| "loss": 3.3496, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.7579859231185706, | |
| "eval_loss": 2.8048973083496094, | |
| "eval_runtime": 87.8448, | |
| "eval_samples_per_second": 112.13, | |
| "eval_steps_per_second": 7.012, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.7850568489442339, | |
| "grad_norm": 0.9466687440872192, | |
| "learning_rate": 7.38350478253023e-05, | |
| "loss": 3.3381, | |
| "step": 7250 | |
| }, | |
| { | |
| "epoch": 0.8121277747698972, | |
| "grad_norm": 0.9336101412773132, | |
| "learning_rate": 7.293268363111352e-05, | |
| "loss": 3.3394, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.8121277747698972, | |
| "eval_loss": 2.80117130279541, | |
| "eval_runtime": 87.8737, | |
| "eval_samples_per_second": 112.093, | |
| "eval_steps_per_second": 7.01, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.8391987005955603, | |
| "grad_norm": 1.0283994674682617, | |
| "learning_rate": 7.203031943692475e-05, | |
| "loss": 3.3379, | |
| "step": 7750 | |
| }, | |
| { | |
| "epoch": 0.8662696264212236, | |
| "grad_norm": 0.9297059774398804, | |
| "learning_rate": 7.112795524273597e-05, | |
| "loss": 3.3196, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.8662696264212236, | |
| "eval_loss": 2.8071343898773193, | |
| "eval_runtime": 87.7691, | |
| "eval_samples_per_second": 112.226, | |
| "eval_steps_per_second": 7.018, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.8933405522468868, | |
| "grad_norm": 1.1025398969650269, | |
| "learning_rate": 7.02255910485472e-05, | |
| "loss": 3.3135, | |
| "step": 8250 | |
| }, | |
| { | |
| "epoch": 0.9204114780725501, | |
| "grad_norm": 0.9545220136642456, | |
| "learning_rate": 6.932322685435842e-05, | |
| "loss": 3.3235, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.9204114780725501, | |
| "eval_loss": 2.783069610595703, | |
| "eval_runtime": 88.0325, | |
| "eval_samples_per_second": 111.891, | |
| "eval_steps_per_second": 6.997, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.9474824038982134, | |
| "grad_norm": 1.0582618713378906, | |
| "learning_rate": 6.842086266016964e-05, | |
| "loss": 3.3076, | |
| "step": 8750 | |
| }, | |
| { | |
| "epoch": 0.9745533297238765, | |
| "grad_norm": 0.9848728775978088, | |
| "learning_rate": 6.751849846598087e-05, | |
| "loss": 3.3111, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.9745533297238765, | |
| "eval_loss": 2.7730352878570557, | |
| "eval_runtime": 87.8671, | |
| "eval_samples_per_second": 112.101, | |
| "eval_steps_per_second": 7.011, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.0016242555495398, | |
| "grad_norm": 1.0229711532592773, | |
| "learning_rate": 6.661613427179209e-05, | |
| "loss": 3.3042, | |
| "step": 9250 | |
| }, | |
| { | |
| "epoch": 1.028695181375203, | |
| "grad_norm": 1.1280988454818726, | |
| "learning_rate": 6.571377007760331e-05, | |
| "loss": 3.3033, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 1.028695181375203, | |
| "eval_loss": 2.7774875164031982, | |
| "eval_runtime": 85.5937, | |
| "eval_samples_per_second": 115.079, | |
| "eval_steps_per_second": 7.197, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 1.0557661072008662, | |
| "grad_norm": 1.2061575651168823, | |
| "learning_rate": 6.481140588341454e-05, | |
| "loss": 3.2889, | |
| "step": 9750 | |
| }, | |
| { | |
| "epoch": 1.0828370330265296, | |
| "grad_norm": 1.1995490789413452, | |
| "learning_rate": 6.390904168922578e-05, | |
| "loss": 3.2796, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.0828370330265296, | |
| "eval_loss": 2.773512363433838, | |
| "eval_runtime": 86.6895, | |
| "eval_samples_per_second": 113.624, | |
| "eval_steps_per_second": 7.106, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.1099079588521927, | |
| "grad_norm": 1.0674035549163818, | |
| "learning_rate": 6.3006677495037e-05, | |
| "loss": 3.2893, | |
| "step": 10250 | |
| }, | |
| { | |
| "epoch": 1.136978884677856, | |
| "grad_norm": 1.193318486213684, | |
| "learning_rate": 6.210431330084822e-05, | |
| "loss": 3.2957, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 1.136978884677856, | |
| "eval_loss": 2.768921375274658, | |
| "eval_runtime": 86.6142, | |
| "eval_samples_per_second": 113.723, | |
| "eval_steps_per_second": 7.112, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 1.1640498105035193, | |
| "grad_norm": 1.0185372829437256, | |
| "learning_rate": 6.120194910665945e-05, | |
| "loss": 3.2763, | |
| "step": 10750 | |
| }, | |
| { | |
| "epoch": 1.1911207363291825, | |
| "grad_norm": 0.9858824014663696, | |
| "learning_rate": 6.029958491247067e-05, | |
| "loss": 3.2935, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.1911207363291825, | |
| "eval_loss": 2.7698659896850586, | |
| "eval_runtime": 86.7972, | |
| "eval_samples_per_second": 113.483, | |
| "eval_steps_per_second": 7.097, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.2181916621548456, | |
| "grad_norm": 1.0175237655639648, | |
| "learning_rate": 5.9397220718281895e-05, | |
| "loss": 3.2765, | |
| "step": 11250 | |
| }, | |
| { | |
| "epoch": 1.245262587980509, | |
| "grad_norm": 0.9483986496925354, | |
| "learning_rate": 5.849485652409312e-05, | |
| "loss": 3.2714, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 1.245262587980509, | |
| "eval_loss": 2.758622884750366, | |
| "eval_runtime": 87.0052, | |
| "eval_samples_per_second": 113.212, | |
| "eval_steps_per_second": 7.08, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 1.2723335138061722, | |
| "grad_norm": 1.1170575618743896, | |
| "learning_rate": 5.759249232990436e-05, | |
| "loss": 3.2834, | |
| "step": 11750 | |
| }, | |
| { | |
| "epoch": 1.2994044396318354, | |
| "grad_norm": 1.1345117092132568, | |
| "learning_rate": 5.669012813571558e-05, | |
| "loss": 3.2633, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 1.2994044396318354, | |
| "eval_loss": 2.760470390319824, | |
| "eval_runtime": 87.1216, | |
| "eval_samples_per_second": 113.06, | |
| "eval_steps_per_second": 7.071, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 1.3264753654574988, | |
| "grad_norm": 0.9705148339271545, | |
| "learning_rate": 5.5787763941526805e-05, | |
| "loss": 3.2687, | |
| "step": 12250 | |
| }, | |
| { | |
| "epoch": 1.353546291283162, | |
| "grad_norm": 1.2483344078063965, | |
| "learning_rate": 5.488539974733803e-05, | |
| "loss": 3.2641, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 1.353546291283162, | |
| "eval_loss": 2.750833034515381, | |
| "eval_runtime": 87.0498, | |
| "eval_samples_per_second": 113.154, | |
| "eval_steps_per_second": 7.076, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 1.3806172171088251, | |
| "grad_norm": 0.9966532588005066, | |
| "learning_rate": 5.398303555314925e-05, | |
| "loss": 3.2619, | |
| "step": 12750 | |
| }, | |
| { | |
| "epoch": 1.4076881429344883, | |
| "grad_norm": 1.0225645303726196, | |
| "learning_rate": 5.3080671358960476e-05, | |
| "loss": 3.2647, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 1.4076881429344883, | |
| "eval_loss": 2.745871067047119, | |
| "eval_runtime": 87.2201, | |
| "eval_samples_per_second": 112.933, | |
| "eval_steps_per_second": 7.063, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 1.4347590687601515, | |
| "grad_norm": 0.9746631383895874, | |
| "learning_rate": 5.21783071647717e-05, | |
| "loss": 3.2489, | |
| "step": 13250 | |
| }, | |
| { | |
| "epoch": 1.4618299945858149, | |
| "grad_norm": 1.0829297304153442, | |
| "learning_rate": 5.1275942970582924e-05, | |
| "loss": 3.2456, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 1.4618299945858149, | |
| "eval_loss": 2.743380546569824, | |
| "eval_runtime": 86.2917, | |
| "eval_samples_per_second": 114.148, | |
| "eval_steps_per_second": 4.392, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 1.488900920411478, | |
| "grad_norm": 1.0580862760543823, | |
| "learning_rate": 5.037357877639416e-05, | |
| "loss": 3.2568, | |
| "step": 13750 | |
| }, | |
| { | |
| "epoch": 1.5159718462371412, | |
| "grad_norm": 0.9426606893539429, | |
| "learning_rate": 4.9471214582205386e-05, | |
| "loss": 3.2413, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 1.5159718462371412, | |
| "eval_loss": 2.7432825565338135, | |
| "eval_runtime": 86.3742, | |
| "eval_samples_per_second": 114.039, | |
| "eval_steps_per_second": 4.388, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 1.5430427720628046, | |
| "grad_norm": 1.001214623451233, | |
| "learning_rate": 4.856885038801661e-05, | |
| "loss": 3.2606, | |
| "step": 14250 | |
| }, | |
| { | |
| "epoch": 1.5701136978884678, | |
| "grad_norm": 1.0681531429290771, | |
| "learning_rate": 4.7666486193827834e-05, | |
| "loss": 3.2395, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 1.5701136978884678, | |
| "eval_loss": 2.7390074729919434, | |
| "eval_runtime": 86.2807, | |
| "eval_samples_per_second": 114.162, | |
| "eval_steps_per_second": 4.393, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 1.597184623714131, | |
| "grad_norm": 1.1323778629302979, | |
| "learning_rate": 4.676412199963906e-05, | |
| "loss": 3.2469, | |
| "step": 14750 | |
| }, | |
| { | |
| "epoch": 1.6242555495397943, | |
| "grad_norm": 1.0971120595932007, | |
| "learning_rate": 4.586175780545028e-05, | |
| "loss": 3.2362, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 1.6242555495397943, | |
| "eval_loss": 2.742565393447876, | |
| "eval_runtime": 86.3237, | |
| "eval_samples_per_second": 114.105, | |
| "eval_steps_per_second": 4.39, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 1.6513264753654575, | |
| "grad_norm": 0.8698033094406128, | |
| "learning_rate": 4.4959393611261505e-05, | |
| "loss": 3.2348, | |
| "step": 15250 | |
| }, | |
| { | |
| "epoch": 1.6783974011911207, | |
| "grad_norm": 1.107132077217102, | |
| "learning_rate": 4.405702941707273e-05, | |
| "loss": 3.2453, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 1.6783974011911207, | |
| "eval_loss": 2.73408842086792, | |
| "eval_runtime": 86.2136, | |
| "eval_samples_per_second": 114.251, | |
| "eval_steps_per_second": 4.396, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 1.705468327016784, | |
| "grad_norm": 1.015751838684082, | |
| "learning_rate": 4.315466522288395e-05, | |
| "loss": 3.242, | |
| "step": 15750 | |
| }, | |
| { | |
| "epoch": 1.7325392528424473, | |
| "grad_norm": 1.1737513542175293, | |
| "learning_rate": 4.2252301028695184e-05, | |
| "loss": 3.2309, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 1.7325392528424473, | |
| "eval_loss": 2.729858160018921, | |
| "eval_runtime": 86.1955, | |
| "eval_samples_per_second": 114.275, | |
| "eval_steps_per_second": 4.397, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 1.7596101786681104, | |
| "grad_norm": 1.0125904083251953, | |
| "learning_rate": 4.134993683450641e-05, | |
| "loss": 3.2313, | |
| "step": 16250 | |
| }, | |
| { | |
| "epoch": 1.7866811044937738, | |
| "grad_norm": 1.0134389400482178, | |
| "learning_rate": 4.044757264031763e-05, | |
| "loss": 3.227, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 1.7866811044937738, | |
| "eval_loss": 2.7314817905426025, | |
| "eval_runtime": 90.6312, | |
| "eval_samples_per_second": 108.682, | |
| "eval_steps_per_second": 3.63, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 1.8137520303194368, | |
| "grad_norm": 0.9060974717140198, | |
| "learning_rate": 3.954520844612886e-05, | |
| "loss": 3.2259, | |
| "step": 16750 | |
| }, | |
| { | |
| "epoch": 1.8408229561451002, | |
| "grad_norm": 1.01957368850708, | |
| "learning_rate": 3.8642844251940086e-05, | |
| "loss": 3.2254, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 1.8408229561451002, | |
| "eval_loss": 2.7291440963745117, | |
| "eval_runtime": 90.6696, | |
| "eval_samples_per_second": 108.636, | |
| "eval_steps_per_second": 3.629, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 1.8678938819707636, | |
| "grad_norm": 0.9996930360794067, | |
| "learning_rate": 3.774048005775131e-05, | |
| "loss": 3.2189, | |
| "step": 17250 | |
| }, | |
| { | |
| "epoch": 1.8949648077964265, | |
| "grad_norm": 1.0718858242034912, | |
| "learning_rate": 3.6838115863562534e-05, | |
| "loss": 3.208, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 1.8949648077964265, | |
| "eval_loss": 2.724837303161621, | |
| "eval_runtime": 90.2749, | |
| "eval_samples_per_second": 109.111, | |
| "eval_steps_per_second": 3.644, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 1.92203573362209, | |
| "grad_norm": 1.085183024406433, | |
| "learning_rate": 3.5935751669373765e-05, | |
| "loss": 3.2267, | |
| "step": 17750 | |
| }, | |
| { | |
| "epoch": 1.949106659447753, | |
| "grad_norm": 1.1373517513275146, | |
| "learning_rate": 3.503338747518499e-05, | |
| "loss": 3.2044, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 1.949106659447753, | |
| "eval_loss": 2.7174291610717773, | |
| "eval_runtime": 90.6301, | |
| "eval_samples_per_second": 108.684, | |
| "eval_steps_per_second": 3.63, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 1.9761775852734162, | |
| "grad_norm": 1.235625147819519, | |
| "learning_rate": 3.413102328099621e-05, | |
| "loss": 3.2136, | |
| "step": 18250 | |
| }, | |
| { | |
| "epoch": 2.0032485110990796, | |
| "grad_norm": 1.1179406642913818, | |
| "learning_rate": 3.322865908680744e-05, | |
| "loss": 3.2067, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 2.0032485110990796, | |
| "eval_loss": 2.718658447265625, | |
| "eval_runtime": 90.6713, | |
| "eval_samples_per_second": 108.634, | |
| "eval_steps_per_second": 3.628, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 2.030319436924743, | |
| "grad_norm": 1.0269511938095093, | |
| "learning_rate": 3.232629489261866e-05, | |
| "loss": 3.2035, | |
| "step": 18750 | |
| }, | |
| { | |
| "epoch": 2.057390362750406, | |
| "grad_norm": 0.9589810967445374, | |
| "learning_rate": 3.1423930698429885e-05, | |
| "loss": 3.2099, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 2.057390362750406, | |
| "eval_loss": 2.721846103668213, | |
| "eval_runtime": 90.6473, | |
| "eval_samples_per_second": 108.663, | |
| "eval_steps_per_second": 3.629, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 2.0844612885760694, | |
| "grad_norm": 1.1196249723434448, | |
| "learning_rate": 3.052156650424111e-05, | |
| "loss": 3.2153, | |
| "step": 19250 | |
| }, | |
| { | |
| "epoch": 2.1115322144017323, | |
| "grad_norm": 1.0364315509796143, | |
| "learning_rate": 2.961920231005234e-05, | |
| "loss": 3.2019, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 2.1115322144017323, | |
| "eval_loss": 2.7143051624298096, | |
| "eval_runtime": 88.2104, | |
| "eval_samples_per_second": 111.665, | |
| "eval_steps_per_second": 3.73, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 2.1386031402273957, | |
| "grad_norm": 1.0353423357009888, | |
| "learning_rate": 2.8716838115863563e-05, | |
| "loss": 3.1951, | |
| "step": 19750 | |
| }, | |
| { | |
| "epoch": 2.165674066053059, | |
| "grad_norm": 1.054973840713501, | |
| "learning_rate": 2.7814473921674787e-05, | |
| "loss": 3.2101, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 2.165674066053059, | |
| "eval_loss": 2.721579074859619, | |
| "eval_runtime": 88.2213, | |
| "eval_samples_per_second": 111.651, | |
| "eval_steps_per_second": 3.729, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 2.192744991878722, | |
| "grad_norm": 1.2565995454788208, | |
| "learning_rate": 2.691210972748601e-05, | |
| "loss": 3.2131, | |
| "step": 20250 | |
| }, | |
| { | |
| "epoch": 2.2198159177043855, | |
| "grad_norm": 1.0481986999511719, | |
| "learning_rate": 2.6009745533297242e-05, | |
| "loss": 3.1898, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 2.2198159177043855, | |
| "eval_loss": 2.7174248695373535, | |
| "eval_runtime": 88.1012, | |
| "eval_samples_per_second": 111.803, | |
| "eval_steps_per_second": 3.734, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 2.246886843530049, | |
| "grad_norm": 0.9449788331985474, | |
| "learning_rate": 2.5107381339108466e-05, | |
| "loss": 3.191, | |
| "step": 20750 | |
| }, | |
| { | |
| "epoch": 2.273957769355712, | |
| "grad_norm": 1.0603032112121582, | |
| "learning_rate": 2.420501714491969e-05, | |
| "loss": 3.1985, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 2.273957769355712, | |
| "eval_loss": 2.7089896202087402, | |
| "eval_runtime": 88.0285, | |
| "eval_samples_per_second": 111.896, | |
| "eval_steps_per_second": 3.737, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 2.301028695181375, | |
| "grad_norm": 1.096023440361023, | |
| "learning_rate": 2.3302652950730917e-05, | |
| "loss": 3.1891, | |
| "step": 21250 | |
| }, | |
| { | |
| "epoch": 2.3280996210070386, | |
| "grad_norm": 1.216874599456787, | |
| "learning_rate": 2.2400288756542144e-05, | |
| "loss": 3.1974, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 2.3280996210070386, | |
| "eval_loss": 2.707638740539551, | |
| "eval_runtime": 88.1174, | |
| "eval_samples_per_second": 111.783, | |
| "eval_steps_per_second": 3.734, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 2.3551705468327016, | |
| "grad_norm": 1.0820770263671875, | |
| "learning_rate": 2.1497924562353368e-05, | |
| "loss": 3.1982, | |
| "step": 21750 | |
| }, | |
| { | |
| "epoch": 2.382241472658365, | |
| "grad_norm": 1.223590612411499, | |
| "learning_rate": 2.0595560368164592e-05, | |
| "loss": 3.1779, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 2.382241472658365, | |
| "eval_loss": 2.7064244747161865, | |
| "eval_runtime": 88.1632, | |
| "eval_samples_per_second": 111.725, | |
| "eval_steps_per_second": 3.732, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 2.4093123984840283, | |
| "grad_norm": 1.0039288997650146, | |
| "learning_rate": 1.9693196173975816e-05, | |
| "loss": 3.1909, | |
| "step": 22250 | |
| }, | |
| { | |
| "epoch": 2.4363833243096913, | |
| "grad_norm": 1.0038957595825195, | |
| "learning_rate": 1.8790831979787043e-05, | |
| "loss": 3.1904, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 2.4363833243096913, | |
| "eval_loss": 2.706280469894409, | |
| "eval_runtime": 88.2453, | |
| "eval_samples_per_second": 111.621, | |
| "eval_steps_per_second": 3.728, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 2.4634542501353547, | |
| "grad_norm": 1.0922913551330566, | |
| "learning_rate": 1.7888467785598267e-05, | |
| "loss": 3.194, | |
| "step": 22750 | |
| }, | |
| { | |
| "epoch": 2.490525175961018, | |
| "grad_norm": 1.0635244846343994, | |
| "learning_rate": 1.6986103591409495e-05, | |
| "loss": 3.1961, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 2.490525175961018, | |
| "eval_loss": 2.708953619003296, | |
| "eval_runtime": 88.2167, | |
| "eval_samples_per_second": 111.657, | |
| "eval_steps_per_second": 3.729, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 2.517596101786681, | |
| "grad_norm": 1.0230063199996948, | |
| "learning_rate": 1.608373939722072e-05, | |
| "loss": 3.1841, | |
| "step": 23250 | |
| }, | |
| { | |
| "epoch": 2.5446670276123444, | |
| "grad_norm": 1.0655814409255981, | |
| "learning_rate": 1.5181375203031944e-05, | |
| "loss": 3.2059, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 2.5446670276123444, | |
| "eval_loss": 2.7019364833831787, | |
| "eval_runtime": 88.9998, | |
| "eval_samples_per_second": 110.674, | |
| "eval_steps_per_second": 3.697, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 2.5717379534380074, | |
| "grad_norm": 1.1926429271697998, | |
| "learning_rate": 1.4279011008843171e-05, | |
| "loss": 3.187, | |
| "step": 23750 | |
| }, | |
| { | |
| "epoch": 2.5988088792636708, | |
| "grad_norm": 1.09559166431427, | |
| "learning_rate": 1.3376646814654395e-05, | |
| "loss": 3.1913, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 2.5988088792636708, | |
| "eval_loss": 2.7079548835754395, | |
| "eval_runtime": 88.7957, | |
| "eval_samples_per_second": 110.929, | |
| "eval_steps_per_second": 3.705, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 2.625879805089334, | |
| "grad_norm": 1.0533467531204224, | |
| "learning_rate": 1.2474282620465621e-05, | |
| "loss": 3.189, | |
| "step": 24250 | |
| }, | |
| { | |
| "epoch": 2.6529507309149976, | |
| "grad_norm": 1.0677307844161987, | |
| "learning_rate": 1.1571918426276845e-05, | |
| "loss": 3.1908, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 2.6529507309149976, | |
| "eval_loss": 2.7036521434783936, | |
| "eval_runtime": 89.1696, | |
| "eval_samples_per_second": 110.464, | |
| "eval_steps_per_second": 3.69, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 2.6800216567406605, | |
| "grad_norm": 1.1612430810928345, | |
| "learning_rate": 1.066955423208807e-05, | |
| "loss": 3.2007, | |
| "step": 24750 | |
| }, | |
| { | |
| "epoch": 2.707092582566324, | |
| "grad_norm": 0.9524983763694763, | |
| "learning_rate": 9.767190037899298e-06, | |
| "loss": 3.1934, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 2.707092582566324, | |
| "eval_loss": 2.704775333404541, | |
| "eval_runtime": 89.0167, | |
| "eval_samples_per_second": 110.653, | |
| "eval_steps_per_second": 3.696, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 2.734163508391987, | |
| "grad_norm": 1.1597352027893066, | |
| "learning_rate": 8.864825843710522e-06, | |
| "loss": 3.1883, | |
| "step": 25250 | |
| }, | |
| { | |
| "epoch": 2.7612344342176502, | |
| "grad_norm": 1.1218962669372559, | |
| "learning_rate": 7.962461649521747e-06, | |
| "loss": 3.1889, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 2.7612344342176502, | |
| "eval_loss": 2.701597213745117, | |
| "eval_runtime": 89.0323, | |
| "eval_samples_per_second": 110.634, | |
| "eval_steps_per_second": 3.695, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 2.7883053600433136, | |
| "grad_norm": 1.1327321529388428, | |
| "learning_rate": 7.060097455332973e-06, | |
| "loss": 3.1874, | |
| "step": 25750 | |
| }, | |
| { | |
| "epoch": 2.8153762858689766, | |
| "grad_norm": 1.2180895805358887, | |
| "learning_rate": 6.157733261144198e-06, | |
| "loss": 3.1883, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 2.8153762858689766, | |
| "eval_loss": 2.703519105911255, | |
| "eval_runtime": 88.8695, | |
| "eval_samples_per_second": 110.837, | |
| "eval_steps_per_second": 3.702, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 2.84244721169464, | |
| "grad_norm": 0.9983986020088196, | |
| "learning_rate": 5.2553690669554234e-06, | |
| "loss": 3.1859, | |
| "step": 26250 | |
| }, | |
| { | |
| "epoch": 2.869518137520303, | |
| "grad_norm": 1.367304801940918, | |
| "learning_rate": 4.353004872766649e-06, | |
| "loss": 3.181, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 2.869518137520303, | |
| "eval_loss": 2.7028160095214844, | |
| "eval_runtime": 89.1113, | |
| "eval_samples_per_second": 110.536, | |
| "eval_steps_per_second": 3.692, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 2.8965890633459663, | |
| "grad_norm": 1.0023523569107056, | |
| "learning_rate": 3.450640678577874e-06, | |
| "loss": 3.1741, | |
| "step": 26750 | |
| }, | |
| { | |
| "epoch": 2.9236599891716297, | |
| "grad_norm": 1.055477499961853, | |
| "learning_rate": 2.5482764843890995e-06, | |
| "loss": 3.1904, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 2.9236599891716297, | |
| "eval_loss": 2.702355146408081, | |
| "eval_runtime": 89.0095, | |
| "eval_samples_per_second": 110.662, | |
| "eval_steps_per_second": 3.696, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 2.950730914997293, | |
| "grad_norm": 1.1611043214797974, | |
| "learning_rate": 1.645912290200325e-06, | |
| "loss": 3.1703, | |
| "step": 27250 | |
| }, | |
| { | |
| "epoch": 2.977801840822956, | |
| "grad_norm": 1.0057547092437744, | |
| "learning_rate": 7.435480960115503e-07, | |
| "loss": 3.1838, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 2.977801840822956, | |
| "eval_loss": 2.7012827396392822, | |
| "eval_runtime": 89.1882, | |
| "eval_samples_per_second": 110.441, | |
| "eval_steps_per_second": 3.689, | |
| "step": 27500 | |
| } | |
| ], | |
| "logging_steps": 250, | |
| "max_steps": 27705, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.0002326224896e+16, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |