One-Shot-CFT-Math-Qwen-7B / trainer_state.json
ubowang's picture
Upload folder using huggingface_hub
c4a974a verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 29.78527607361963,
"eval_steps": 2,
"global_step": 30,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.7852760736196319,
"grad_norm": 16.91557013833436,
"learning_rate": 6.25e-07,
"loss": 0.6924901008605957,
"memory(GiB)": 59.48,
"step": 1,
"token_acc": 0.8249132429587075,
"train_speed(iter/s)": 0.004929
},
{
"epoch": 1.7852760736196318,
"grad_norm": 32.093819128778975,
"learning_rate": 1.25e-06,
"loss": 1.3804800510406494,
"memory(GiB)": 65.24,
"step": 2,
"token_acc": 0.8298509781294038,
"train_speed(iter/s)": 0.004777
},
{
"epoch": 2.785276073619632,
"grad_norm": 28.105492367287425,
"learning_rate": 1.8750000000000003e-06,
"loss": 1.368883490562439,
"memory(GiB)": 65.24,
"step": 3,
"token_acc": 0.8234861950468065,
"train_speed(iter/s)": 0.004504
},
{
"epoch": 3.785276073619632,
"grad_norm": 23.4948914824423,
"learning_rate": 2.5e-06,
"loss": 1.3871572017669678,
"memory(GiB)": 65.24,
"step": 4,
"token_acc": 0.8268231152830742,
"train_speed(iter/s)": 0.004539
},
{
"epoch": 4.785276073619632,
"grad_norm": 22.70640438340761,
"learning_rate": 3.125e-06,
"loss": 1.3857779502868652,
"memory(GiB)": 65.24,
"step": 5,
"token_acc": 0.8242320404393895,
"train_speed(iter/s)": 0.004446
},
{
"epoch": 5.785276073619632,
"grad_norm": 15.466734654904698,
"learning_rate": 3.7500000000000005e-06,
"loss": 1.2761751413345337,
"memory(GiB)": 65.24,
"step": 6,
"token_acc": 0.8388591983556012,
"train_speed(iter/s)": 0.004472
},
{
"epoch": 6.785276073619632,
"grad_norm": 16.008101850251936,
"learning_rate": 4.3750000000000005e-06,
"loss": 1.254455804824829,
"memory(GiB)": 65.24,
"step": 7,
"token_acc": 0.8327785368277233,
"train_speed(iter/s)": 0.004412
},
{
"epoch": 7.785276073619632,
"grad_norm": 14.437159936379576,
"learning_rate": 5e-06,
"loss": 1.2218172550201416,
"memory(GiB)": 65.24,
"step": 8,
"token_acc": 0.8436573882508192,
"train_speed(iter/s)": 0.004441
},
{
"epoch": 8.785276073619633,
"grad_norm": 11.827036693336053,
"learning_rate": 4.987961816680493e-06,
"loss": 1.0844422578811646,
"memory(GiB)": 65.24,
"step": 9,
"token_acc": 0.8532927916376647,
"train_speed(iter/s)": 0.004399
},
{
"epoch": 9.785276073619633,
"grad_norm": 8.677216453543807,
"learning_rate": 4.9519632010080765e-06,
"loss": 1.0037188529968262,
"memory(GiB)": 65.24,
"step": 10,
"token_acc": 0.8562772549919669,
"train_speed(iter/s)": 0.00442
},
{
"epoch": 10.785276073619633,
"grad_norm": 9.608357831993152,
"learning_rate": 4.8923508393305224e-06,
"loss": 0.8877495527267456,
"memory(GiB)": 65.24,
"step": 11,
"token_acc": 0.8673956568612352,
"train_speed(iter/s)": 0.004387
},
{
"epoch": 11.785276073619633,
"grad_norm": 5.865247304013574,
"learning_rate": 4.809698831278217e-06,
"loss": 0.8756588697433472,
"memory(GiB)": 65.24,
"step": 12,
"token_acc": 0.8741366742033928,
"train_speed(iter/s)": 0.004405
},
{
"epoch": 12.785276073619633,
"grad_norm": 8.181094202272746,
"learning_rate": 4.704803160870888e-06,
"loss": 0.8439725637435913,
"memory(GiB)": 65.24,
"step": 13,
"token_acc": 0.8747614672415012,
"train_speed(iter/s)": 0.00438
},
{
"epoch": 13.785276073619633,
"grad_norm": 3.0779863644676038,
"learning_rate": 4.578674030756364e-06,
"loss": 0.8301749229431152,
"memory(GiB)": 65.24,
"step": 14,
"token_acc": 0.8825180146971391,
"train_speed(iter/s)": 0.0044
},
{
"epoch": 14.785276073619633,
"grad_norm": 2.2519308642126976,
"learning_rate": 4.432526133406843e-06,
"loss": 0.7788468599319458,
"memory(GiB)": 65.24,
"step": 15,
"token_acc": 0.8834006700827749,
"train_speed(iter/s)": 0.004377
},
{
"epoch": 15.785276073619633,
"grad_norm": 2.472550949106497,
"learning_rate": 4.267766952966369e-06,
"loss": 0.7373002767562866,
"memory(GiB)": 65.24,
"step": 16,
"token_acc": 0.8850886438730918,
"train_speed(iter/s)": 0.004393
},
{
"epoch": 16.78527607361963,
"grad_norm": 2.167954580123407,
"learning_rate": 4.085983210409114e-06,
"loss": 0.7213550806045532,
"memory(GiB)": 65.24,
"step": 17,
"token_acc": 0.8929335420724931,
"train_speed(iter/s)": 0.004374
},
{
"epoch": 17.78527607361963,
"grad_norm": 7.407922053021935,
"learning_rate": 3.888925582549006e-06,
"loss": 0.6829527616500854,
"memory(GiB)": 65.24,
"step": 18,
"token_acc": 0.898971411837915,
"train_speed(iter/s)": 0.004387
},
{
"epoch": 18.78527607361963,
"grad_norm": 3.205996489689816,
"learning_rate": 3.6784918420649952e-06,
"loss": 0.6937679648399353,
"memory(GiB)": 65.24,
"step": 19,
"token_acc": 0.89389552274522,
"train_speed(iter/s)": 0.004367
},
{
"epoch": 19.78527607361963,
"grad_norm": 1.8062200639280714,
"learning_rate": 3.4567085809127247e-06,
"loss": 0.6299235224723816,
"memory(GiB)": 65.24,
"step": 20,
"token_acc": 0.8986795675864391,
"train_speed(iter/s)": 0.004381
},
{
"epoch": 20.78527607361963,
"grad_norm": 1.8367029186994497,
"learning_rate": 3.225711693136156e-06,
"loss": 0.6152043342590332,
"memory(GiB)": 65.24,
"step": 21,
"token_acc": 0.8981181532678849,
"train_speed(iter/s)": 0.004366
},
{
"epoch": 21.78527607361963,
"grad_norm": 1.6858306482437981,
"learning_rate": 2.9877258050403214e-06,
"loss": 0.5852848887443542,
"memory(GiB)": 65.24,
"step": 22,
"token_acc": 0.9046662374852932,
"train_speed(iter/s)": 0.004377
},
{
"epoch": 22.78527607361963,
"grad_norm": 1.5396750222326772,
"learning_rate": 2.7450428508239024e-06,
"loss": 0.5857734680175781,
"memory(GiB)": 65.24,
"step": 23,
"token_acc": 0.9069728121195726,
"train_speed(iter/s)": 0.004362
},
{
"epoch": 23.78527607361963,
"grad_norm": 2.726801161598347,
"learning_rate": 2.5e-06,
"loss": 0.594467043876648,
"memory(GiB)": 65.24,
"step": 24,
"token_acc": 0.9061053767793295,
"train_speed(iter/s)": 0.004374
},
{
"epoch": 24.78527607361963,
"grad_norm": 1.3473993763783847,
"learning_rate": 2.2549571491760985e-06,
"loss": 0.5740076303482056,
"memory(GiB)": 65.24,
"step": 25,
"token_acc": 0.9047997112955611,
"train_speed(iter/s)": 0.004361
},
{
"epoch": 25.78527607361963,
"grad_norm": 1.4244563323397152,
"learning_rate": 2.01227419495968e-06,
"loss": 0.5681454539299011,
"memory(GiB)": 65.24,
"step": 26,
"token_acc": 0.9087721742360918,
"train_speed(iter/s)": 0.00437
},
{
"epoch": 26.78527607361963,
"grad_norm": 1.239553560433712,
"learning_rate": 1.7742883068638447e-06,
"loss": 0.5257890224456787,
"memory(GiB)": 65.24,
"step": 27,
"token_acc": 0.9064646632419414,
"train_speed(iter/s)": 0.00436
},
{
"epoch": 27.78527607361963,
"grad_norm": 1.1788437383399384,
"learning_rate": 1.5432914190872757e-06,
"loss": 0.5807796716690063,
"memory(GiB)": 65.24,
"step": 28,
"token_acc": 0.9147543418525945,
"train_speed(iter/s)": 0.004368
},
{
"epoch": 28.78527607361963,
"grad_norm": 1.1910633482768642,
"learning_rate": 1.3215081579350058e-06,
"loss": 0.5537985563278198,
"memory(GiB)": 65.24,
"step": 29,
"token_acc": 0.9150303118131536,
"train_speed(iter/s)": 0.004357
},
{
"epoch": 29.78527607361963,
"grad_norm": 1.2237195172674145,
"learning_rate": 1.1110744174509952e-06,
"loss": 0.5225300788879395,
"memory(GiB)": 65.24,
"step": 30,
"token_acc": 0.9138848454453157,
"train_speed(iter/s)": 0.004366
}
],
"logging_steps": 1,
"max_steps": 40,
"num_input_tokens_seen": 0,
"num_train_epochs": 40,
"save_steps": 2,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 47704180129792.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}