|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9969751966122202, |
|
"eval_steps": 100.0, |
|
"global_step": 206, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04839685420447671, |
|
"grad_norm": 1.21875, |
|
"learning_rate": 0.00019883937098250963, |
|
"loss": 1.4872, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.09679370840895342, |
|
"grad_norm": 2.359375, |
|
"learning_rate": 0.00019538442512436328, |
|
"loss": 0.9191, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.14519056261343014, |
|
"grad_norm": 2.578125, |
|
"learning_rate": 0.00018971536063389744, |
|
"loss": 0.7246, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.19358741681790684, |
|
"grad_norm": 0.2578125, |
|
"learning_rate": 0.00018196377112610526, |
|
"loss": 0.6294, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.24198427102238354, |
|
"grad_norm": 0.44921875, |
|
"learning_rate": 0.00017230959099527512, |
|
"loss": 0.5323, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.29038112522686027, |
|
"grad_norm": 0.361328125, |
|
"learning_rate": 0.00016097691867340545, |
|
"loss": 0.5302, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.338777979431337, |
|
"grad_norm": 0.287109375, |
|
"learning_rate": 0.00014822881472734562, |
|
"loss": 0.4468, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.3871748336358137, |
|
"grad_norm": 0.26953125, |
|
"learning_rate": 0.00013436119554425133, |
|
"loss": 0.4287, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.4355716878402904, |
|
"grad_norm": 0.35546875, |
|
"learning_rate": 0.00011969596434867063, |
|
"loss": 0.4989, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.4839685420447671, |
|
"grad_norm": 0.408203125, |
|
"learning_rate": 0.00010457353899807946, |
|
"loss": 0.4332, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.5323653962492438, |
|
"grad_norm": 0.283203125, |
|
"learning_rate": 8.93449500060124e-05, |
|
"loss": 0.4339, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.5807622504537205, |
|
"grad_norm": 0.42578125, |
|
"learning_rate": 7.436369221806201e-05, |
|
"loss": 0.4827, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.6291591046581972, |
|
"grad_norm": 0.30078125, |
|
"learning_rate": 5.9977519284372194e-05, |
|
"loss": 0.4237, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.677555958862674, |
|
"grad_norm": 0.2890625, |
|
"learning_rate": 4.652037140009259e-05, |
|
"loss": 0.414, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.7259528130671506, |
|
"grad_norm": 0.275390625, |
|
"learning_rate": 3.430462369176619e-05, |
|
"loss": 0.3736, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.7743496672716274, |
|
"grad_norm": 0.267578125, |
|
"learning_rate": 2.3613835184605525e-05, |
|
"loss": 0.3812, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.822746521476104, |
|
"grad_norm": 0.361328125, |
|
"learning_rate": 1.4696166665835853e-05, |
|
"loss": 0.4415, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.8711433756805808, |
|
"grad_norm": 0.189453125, |
|
"learning_rate": 7.758620232482084e-06, |
|
"loss": 0.4423, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.9195402298850575, |
|
"grad_norm": 0.2890625, |
|
"learning_rate": 2.9622342385589254e-06, |
|
"loss": 0.4125, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.9679370840895342, |
|
"grad_norm": 0.31640625, |
|
"learning_rate": 4.1834517933907467e-07, |
|
"loss": 0.4855, |
|
"step": 200 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 206, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.488159952764928e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|