| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 375, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 6.21875, | |
| "learning_rate": 1.0526315789473684e-05, | |
| "loss": 0.9907, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 6.875, | |
| "learning_rate": 1.9998261969639324e-05, | |
| "loss": 0.6341, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 4.6875, | |
| "learning_rate": 1.979042835741503e-05, | |
| "loss": 0.5906, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 4.75, | |
| "learning_rate": 1.9243248381877605e-05, | |
| "loss": 0.514, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 4.53125, | |
| "learning_rate": 1.8375687866379988e-05, | |
| "loss": 0.5274, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 4.875, | |
| "learning_rate": 1.721781735483921e-05, | |
| "loss": 0.5112, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "grad_norm": 4.28125, | |
| "learning_rate": 1.580976983561235e-05, | |
| "loss": 0.3529, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "grad_norm": 4.09375, | |
| "learning_rate": 1.4200349690650654e-05, | |
| "loss": 0.2895, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "grad_norm": 3.859375, | |
| "learning_rate": 1.24453410851916e-05, | |
| "loss": 0.3192, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "grad_norm": 4.40625, | |
| "learning_rate": 1.0605574430949983e-05, | |
| "loss": 0.3116, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "grad_norm": 3.25, | |
| "learning_rate": 8.744817941191862e-06, | |
| "loss": 0.2836, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "grad_norm": 3.03125, | |
| "learning_rate": 6.92756735857107e-06, | |
| "loss": 0.2987, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 2.08, | |
| "grad_norm": 2.953125, | |
| "learning_rate": 5.216810466045448e-06, | |
| "loss": 0.2291, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 2.24, | |
| "grad_norm": 2.8125, | |
| "learning_rate": 3.671843865234238e-06, | |
| "loss": 0.1602, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "grad_norm": 2.515625, | |
| "learning_rate": 2.346217694934847e-06, | |
| "loss": 0.1832, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.56, | |
| "grad_norm": 3.21875, | |
| "learning_rate": 1.2858795279787517e-06, | |
| "loss": 0.1625, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 2.7199999999999998, | |
| "grad_norm": 4.25, | |
| "learning_rate": 5.275817808796013e-07, | |
| "loss": 0.1814, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 2.88, | |
| "grad_norm": 2.890625, | |
| "learning_rate": 9.760783710056176e-08, | |
| "loss": 0.1686, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 375, | |
| "total_flos": 6.516282451872973e+16, | |
| "train_loss": 0.36450403785705565, | |
| "train_runtime": 1387.1769, | |
| "train_samples_per_second": 4.325, | |
| "train_steps_per_second": 0.27 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 375, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 5000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6.516282451872973e+16, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |