|
{ |
|
"best_global_step": 1600, |
|
"best_metric": 1.9110984802246094, |
|
"best_model_checkpoint": "./hyperclova-deobfuscation-lora/checkpoint-1600", |
|
"epoch": 2.9955555555555557, |
|
"eval_steps": 200, |
|
"global_step": 1686, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.017777777777777778, |
|
"grad_norm": 3.3687641620635986, |
|
"learning_rate": 1.8e-05, |
|
"loss": 4.1361, |
|
"mean_token_accuracy": 0.3493226237595081, |
|
"num_tokens": 22106.0, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.035555555555555556, |
|
"grad_norm": 2.5920090675354004, |
|
"learning_rate": 3.8e-05, |
|
"loss": 3.7165, |
|
"mean_token_accuracy": 0.4088538818061352, |
|
"num_tokens": 44943.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05333333333333334, |
|
"grad_norm": 2.5703377723693848, |
|
"learning_rate": 5.8e-05, |
|
"loss": 3.3356, |
|
"mean_token_accuracy": 0.4755532510578632, |
|
"num_tokens": 67397.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07111111111111111, |
|
"grad_norm": 1.698912262916565, |
|
"learning_rate": 7.800000000000001e-05, |
|
"loss": 2.9874, |
|
"mean_token_accuracy": 0.508383595943451, |
|
"num_tokens": 89803.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.08888888888888889, |
|
"grad_norm": 1.4602556228637695, |
|
"learning_rate": 9.8e-05, |
|
"loss": 2.7854, |
|
"mean_token_accuracy": 0.5358646497130394, |
|
"num_tokens": 112364.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.10666666666666667, |
|
"grad_norm": 1.5916705131530762, |
|
"learning_rate": 0.000118, |
|
"loss": 2.6546, |
|
"mean_token_accuracy": 0.5485944993793964, |
|
"num_tokens": 134028.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.12444444444444444, |
|
"grad_norm": 1.6815338134765625, |
|
"learning_rate": 0.000138, |
|
"loss": 2.606, |
|
"mean_token_accuracy": 0.5535938143730164, |
|
"num_tokens": 156703.0, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.14222222222222222, |
|
"grad_norm": 1.8009140491485596, |
|
"learning_rate": 0.00015800000000000002, |
|
"loss": 2.5307, |
|
"mean_token_accuracy": 0.5640750013291835, |
|
"num_tokens": 178986.0, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 1.4582855701446533, |
|
"learning_rate": 0.00017800000000000002, |
|
"loss": 2.5633, |
|
"mean_token_accuracy": 0.5567230455577373, |
|
"num_tokens": 201989.0, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.17777777777777778, |
|
"grad_norm": 1.663874626159668, |
|
"learning_rate": 0.00019800000000000002, |
|
"loss": 2.4672, |
|
"mean_token_accuracy": 0.5688358306884765, |
|
"num_tokens": 223936.0, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.19555555555555557, |
|
"grad_norm": 1.6701704263687134, |
|
"learning_rate": 0.00019886506935687262, |
|
"loss": 2.4388, |
|
"mean_token_accuracy": 0.5760447531938553, |
|
"num_tokens": 246101.0, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.21333333333333335, |
|
"grad_norm": 1.5731302499771118, |
|
"learning_rate": 0.00019760403530895334, |
|
"loss": 2.4377, |
|
"mean_token_accuracy": 0.5711787067353725, |
|
"num_tokens": 269187.0, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.2311111111111111, |
|
"grad_norm": 1.4479353427886963, |
|
"learning_rate": 0.00019634300126103406, |
|
"loss": 2.3596, |
|
"mean_token_accuracy": 0.5830569051206111, |
|
"num_tokens": 291454.0, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.24888888888888888, |
|
"grad_norm": 1.3653457164764404, |
|
"learning_rate": 0.00019508196721311475, |
|
"loss": 2.3648, |
|
"mean_token_accuracy": 0.5807973451912403, |
|
"num_tokens": 314204.0, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.26666666666666666, |
|
"grad_norm": 1.4210327863693237, |
|
"learning_rate": 0.00019382093316519546, |
|
"loss": 2.3186, |
|
"mean_token_accuracy": 0.5878118917346, |
|
"num_tokens": 337167.0, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.28444444444444444, |
|
"grad_norm": 1.532408356666565, |
|
"learning_rate": 0.00019255989911727615, |
|
"loss": 2.3637, |
|
"mean_token_accuracy": 0.5761628717184066, |
|
"num_tokens": 360272.0, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.3022222222222222, |
|
"grad_norm": 1.4010679721832275, |
|
"learning_rate": 0.00019129886506935687, |
|
"loss": 2.2701, |
|
"mean_token_accuracy": 0.598077318072319, |
|
"num_tokens": 382779.0, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 1.5830323696136475, |
|
"learning_rate": 0.0001900378310214376, |
|
"loss": 2.2861, |
|
"mean_token_accuracy": 0.5928302705287933, |
|
"num_tokens": 405438.0, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.3377777777777778, |
|
"grad_norm": 1.4623483419418335, |
|
"learning_rate": 0.00018877679697351828, |
|
"loss": 2.3192, |
|
"mean_token_accuracy": 0.5854370579123497, |
|
"num_tokens": 428660.0, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.35555555555555557, |
|
"grad_norm": 1.4850527048110962, |
|
"learning_rate": 0.000187515762925599, |
|
"loss": 2.256, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.35555555555555557, |
|
"eval_loss": 2.254753351211548, |
|
"eval_mean_token_accuracy": 0.5965457199811935, |
|
"eval_num_tokens": 450808.0, |
|
"eval_runtime": 30.9386, |
|
"eval_samples_per_second": 32.322, |
|
"eval_steps_per_second": 8.081, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.37333333333333335, |
|
"grad_norm": 1.4195237159729004, |
|
"learning_rate": 0.00018625472887767968, |
|
"loss": 2.2607, |
|
"mean_token_accuracy": 0.594056948274374, |
|
"num_tokens": 473434.0, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.39111111111111113, |
|
"grad_norm": 1.3114796876907349, |
|
"learning_rate": 0.0001849936948297604, |
|
"loss": 2.2947, |
|
"mean_token_accuracy": 0.5898103177547455, |
|
"num_tokens": 496482.0, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.4088888888888889, |
|
"grad_norm": 1.4004285335540771, |
|
"learning_rate": 0.00018373266078184112, |
|
"loss": 2.2542, |
|
"mean_token_accuracy": 0.5970372915267944, |
|
"num_tokens": 519379.0, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.4266666666666667, |
|
"grad_norm": 1.3860116004943848, |
|
"learning_rate": 0.0001824716267339218, |
|
"loss": 2.2636, |
|
"mean_token_accuracy": 0.59425338357687, |
|
"num_tokens": 542631.0, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.4444444444444444, |
|
"grad_norm": 1.3675146102905273, |
|
"learning_rate": 0.00018121059268600253, |
|
"loss": 2.2412, |
|
"mean_token_accuracy": 0.5928545072674751, |
|
"num_tokens": 565400.0, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.4622222222222222, |
|
"grad_norm": 1.4246889352798462, |
|
"learning_rate": 0.00017994955863808322, |
|
"loss": 2.1577, |
|
"mean_token_accuracy": 0.6061514511704444, |
|
"num_tokens": 588003.0, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 1.4046531915664673, |
|
"learning_rate": 0.00017868852459016393, |
|
"loss": 2.1862, |
|
"mean_token_accuracy": 0.6008762732148171, |
|
"num_tokens": 610974.0, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.49777777777777776, |
|
"grad_norm": 1.4038338661193848, |
|
"learning_rate": 0.00017742749054224465, |
|
"loss": 2.2219, |
|
"mean_token_accuracy": 0.5970636487007142, |
|
"num_tokens": 634093.0, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.5155555555555555, |
|
"grad_norm": 1.3291988372802734, |
|
"learning_rate": 0.00017616645649432534, |
|
"loss": 2.131, |
|
"mean_token_accuracy": 0.6172704175114632, |
|
"num_tokens": 656188.0, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.5333333333333333, |
|
"grad_norm": 1.444318413734436, |
|
"learning_rate": 0.00017490542244640606, |
|
"loss": 2.1691, |
|
"mean_token_accuracy": 0.6066021353006363, |
|
"num_tokens": 678769.0, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.5511111111111111, |
|
"grad_norm": 1.3459752798080444, |
|
"learning_rate": 0.00017364438839848675, |
|
"loss": 2.1413, |
|
"mean_token_accuracy": 0.6139265760779381, |
|
"num_tokens": 701734.0, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.5688888888888889, |
|
"grad_norm": 1.3597490787506104, |
|
"learning_rate": 0.00017238335435056746, |
|
"loss": 2.1271, |
|
"mean_token_accuracy": 0.6106095433235168, |
|
"num_tokens": 724815.0, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.5866666666666667, |
|
"grad_norm": 1.4757016897201538, |
|
"learning_rate": 0.00017112232030264818, |
|
"loss": 2.133, |
|
"mean_token_accuracy": 0.6147415205836296, |
|
"num_tokens": 746903.0, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.6044444444444445, |
|
"grad_norm": 1.4856476783752441, |
|
"learning_rate": 0.00016986128625472887, |
|
"loss": 2.1201, |
|
"mean_token_accuracy": 0.6161383926868439, |
|
"num_tokens": 768982.0, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.6222222222222222, |
|
"grad_norm": 1.2596303224563599, |
|
"learning_rate": 0.0001686002522068096, |
|
"loss": 2.1392, |
|
"mean_token_accuracy": 0.6150005847215653, |
|
"num_tokens": 791061.0, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 1.3324636220932007, |
|
"learning_rate": 0.00016733921815889028, |
|
"loss": 2.1201, |
|
"mean_token_accuracy": 0.6171063780784607, |
|
"num_tokens": 813112.0, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.6577777777777778, |
|
"grad_norm": 1.419053316116333, |
|
"learning_rate": 0.000166078184110971, |
|
"loss": 2.1237, |
|
"mean_token_accuracy": 0.6111394688487053, |
|
"num_tokens": 835469.0, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.6755555555555556, |
|
"grad_norm": 1.4507274627685547, |
|
"learning_rate": 0.0001648171500630517, |
|
"loss": 2.1387, |
|
"mean_token_accuracy": 0.604290933907032, |
|
"num_tokens": 857795.0, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.6933333333333334, |
|
"grad_norm": 1.284505844116211, |
|
"learning_rate": 0.0001635561160151324, |
|
"loss": 2.1, |
|
"mean_token_accuracy": 0.6181465938687325, |
|
"num_tokens": 879659.0, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.7111111111111111, |
|
"grad_norm": 1.5179046392440796, |
|
"learning_rate": 0.00016229508196721312, |
|
"loss": 2.0813, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.7111111111111111, |
|
"eval_loss": 2.0953471660614014, |
|
"eval_mean_token_accuracy": 0.618859866142273, |
|
"eval_num_tokens": 902240.0, |
|
"eval_runtime": 30.5153, |
|
"eval_samples_per_second": 32.77, |
|
"eval_steps_per_second": 8.193, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.7288888888888889, |
|
"grad_norm": 1.3377336263656616, |
|
"learning_rate": 0.0001610340479192938, |
|
"loss": 2.1049, |
|
"mean_token_accuracy": 0.6189975582063199, |
|
"num_tokens": 925091.0, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.7466666666666667, |
|
"grad_norm": 1.406614065170288, |
|
"learning_rate": 0.00015977301387137452, |
|
"loss": 2.1343, |
|
"mean_token_accuracy": 0.6101128354668617, |
|
"num_tokens": 948151.0, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.7644444444444445, |
|
"grad_norm": 1.3494964838027954, |
|
"learning_rate": 0.00015851197982345524, |
|
"loss": 2.0506, |
|
"mean_token_accuracy": 0.6257941454648972, |
|
"num_tokens": 970339.0, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.7822222222222223, |
|
"grad_norm": 1.3070355653762817, |
|
"learning_rate": 0.00015725094577553593, |
|
"loss": 2.0955, |
|
"mean_token_accuracy": 0.6162661850452423, |
|
"num_tokens": 993552.0, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 1.3954617977142334, |
|
"learning_rate": 0.00015598991172761665, |
|
"loss": 2.1119, |
|
"mean_token_accuracy": 0.6154530435800553, |
|
"num_tokens": 1015564.0, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.8177777777777778, |
|
"grad_norm": 1.4015129804611206, |
|
"learning_rate": 0.00015472887767969734, |
|
"loss": 2.0153, |
|
"mean_token_accuracy": 0.6296211943030358, |
|
"num_tokens": 1037721.0, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.8355555555555556, |
|
"grad_norm": 1.41290283203125, |
|
"learning_rate": 0.00015346784363177806, |
|
"loss": 2.0914, |
|
"mean_token_accuracy": 0.6156619966030121, |
|
"num_tokens": 1060627.0, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.8533333333333334, |
|
"grad_norm": 1.3715571165084839, |
|
"learning_rate": 0.00015220680958385877, |
|
"loss": 2.0674, |
|
"mean_token_accuracy": 0.6202241629362106, |
|
"num_tokens": 1082672.0, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.8711111111111111, |
|
"grad_norm": 1.3797943592071533, |
|
"learning_rate": 0.00015094577553593946, |
|
"loss": 2.0677, |
|
"mean_token_accuracy": 0.6200241416692733, |
|
"num_tokens": 1104857.0, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.8888888888888888, |
|
"grad_norm": 1.3080323934555054, |
|
"learning_rate": 0.00014968474148802018, |
|
"loss": 2.068, |
|
"mean_token_accuracy": 0.618759186565876, |
|
"num_tokens": 1127612.0, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.9066666666666666, |
|
"grad_norm": 1.4698944091796875, |
|
"learning_rate": 0.0001484237074401009, |
|
"loss": 2.0736, |
|
"mean_token_accuracy": 0.6208444744348526, |
|
"num_tokens": 1150411.0, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.9244444444444444, |
|
"grad_norm": 1.3741239309310913, |
|
"learning_rate": 0.0001471626733921816, |
|
"loss": 2.0887, |
|
"mean_token_accuracy": 0.6161769673228263, |
|
"num_tokens": 1172683.0, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.9422222222222222, |
|
"grad_norm": 1.3237783908843994, |
|
"learning_rate": 0.0001459016393442623, |
|
"loss": 1.9793, |
|
"mean_token_accuracy": 0.6360917523503303, |
|
"num_tokens": 1194160.0, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 1.3243825435638428, |
|
"learning_rate": 0.000144640605296343, |
|
"loss": 2.0095, |
|
"mean_token_accuracy": 0.6338530048727989, |
|
"num_tokens": 1215760.0, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.9777777777777777, |
|
"grad_norm": 1.3875395059585571, |
|
"learning_rate": 0.0001433795712484237, |
|
"loss": 2.0715, |
|
"mean_token_accuracy": 0.6245882242918015, |
|
"num_tokens": 1238191.0, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.9955555555555555, |
|
"grad_norm": 1.390081524848938, |
|
"learning_rate": 0.00014211853720050443, |
|
"loss": 2.0421, |
|
"mean_token_accuracy": 0.6229756608605385, |
|
"num_tokens": 1260429.0, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.0124444444444445, |
|
"grad_norm": 1.2626862525939941, |
|
"learning_rate": 0.00014085750315258512, |
|
"loss": 1.9614, |
|
"mean_token_accuracy": 0.6359066555374547, |
|
"num_tokens": 1281232.0, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.0302222222222222, |
|
"grad_norm": 1.3941477537155151, |
|
"learning_rate": 0.00013959646910466583, |
|
"loss": 1.8782, |
|
"mean_token_accuracy": 0.6482988312840462, |
|
"num_tokens": 1304130.0, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.048, |
|
"grad_norm": 1.4020227193832397, |
|
"learning_rate": 0.00013833543505674652, |
|
"loss": 1.8602, |
|
"mean_token_accuracy": 0.65641980022192, |
|
"num_tokens": 1326753.0, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.0657777777777777, |
|
"grad_norm": 1.285709023475647, |
|
"learning_rate": 0.00013707440100882724, |
|
"loss": 1.8661, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.0657777777777777, |
|
"eval_loss": 2.018383264541626, |
|
"eval_mean_token_accuracy": 0.6295498251914978, |
|
"eval_num_tokens": 1348985.0, |
|
"eval_runtime": 30.5245, |
|
"eval_samples_per_second": 32.761, |
|
"eval_steps_per_second": 8.19, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.0835555555555556, |
|
"grad_norm": 1.2745097875595093, |
|
"learning_rate": 0.00013581336696090796, |
|
"loss": 1.8705, |
|
"mean_token_accuracy": 0.650456714630127, |
|
"num_tokens": 1371318.0, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.1013333333333333, |
|
"grad_norm": 1.3518744707107544, |
|
"learning_rate": 0.00013455233291298865, |
|
"loss": 1.9056, |
|
"mean_token_accuracy": 0.6455502569675445, |
|
"num_tokens": 1393816.0, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.1191111111111112, |
|
"grad_norm": 1.4413272142410278, |
|
"learning_rate": 0.00013329129886506937, |
|
"loss": 1.8994, |
|
"mean_token_accuracy": 0.6459770023822784, |
|
"num_tokens": 1416529.0, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.1368888888888888, |
|
"grad_norm": 1.3811439275741577, |
|
"learning_rate": 0.00013203026481715006, |
|
"loss": 1.9138, |
|
"mean_token_accuracy": 0.6459063500165939, |
|
"num_tokens": 1438970.0, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.1546666666666667, |
|
"grad_norm": 1.3642174005508423, |
|
"learning_rate": 0.00013076923076923077, |
|
"loss": 1.8892, |
|
"mean_token_accuracy": 0.6444340243935585, |
|
"num_tokens": 1461324.0, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.1724444444444444, |
|
"grad_norm": 1.4544634819030762, |
|
"learning_rate": 0.0001295081967213115, |
|
"loss": 1.9248, |
|
"mean_token_accuracy": 0.6391839399933815, |
|
"num_tokens": 1484246.0, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.1902222222222223, |
|
"grad_norm": 1.3715091943740845, |
|
"learning_rate": 0.00012824716267339218, |
|
"loss": 1.9105, |
|
"mean_token_accuracy": 0.6393024668097496, |
|
"num_tokens": 1507305.0, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.208, |
|
"grad_norm": 1.3897929191589355, |
|
"learning_rate": 0.0001269861286254729, |
|
"loss": 1.8714, |
|
"mean_token_accuracy": 0.6521286174654961, |
|
"num_tokens": 1529082.0, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.2257777777777779, |
|
"grad_norm": 1.3576809167861938, |
|
"learning_rate": 0.00012572509457755359, |
|
"loss": 1.8677, |
|
"mean_token_accuracy": 0.6498221024870873, |
|
"num_tokens": 1551159.0, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.2435555555555555, |
|
"grad_norm": 1.3156862258911133, |
|
"learning_rate": 0.0001244640605296343, |
|
"loss": 1.8996, |
|
"mean_token_accuracy": 0.6485181763768196, |
|
"num_tokens": 1573348.0, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.2613333333333334, |
|
"grad_norm": 1.4738845825195312, |
|
"learning_rate": 0.00012320302648171502, |
|
"loss": 1.8953, |
|
"mean_token_accuracy": 0.6465991452336312, |
|
"num_tokens": 1595989.0, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.279111111111111, |
|
"grad_norm": 1.5254158973693848, |
|
"learning_rate": 0.00012194199243379571, |
|
"loss": 1.9236, |
|
"mean_token_accuracy": 0.6474427729845047, |
|
"num_tokens": 1617895.0, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.2968888888888888, |
|
"grad_norm": 1.4867346286773682, |
|
"learning_rate": 0.00012068095838587643, |
|
"loss": 1.8766, |
|
"mean_token_accuracy": 0.6491386488080024, |
|
"num_tokens": 1640415.0, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 1.3146666666666667, |
|
"grad_norm": 1.3776379823684692, |
|
"learning_rate": 0.00011941992433795712, |
|
"loss": 1.8644, |
|
"mean_token_accuracy": 0.6499749347567558, |
|
"num_tokens": 1662713.0, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 1.3324444444444445, |
|
"grad_norm": 1.420027256011963, |
|
"learning_rate": 0.00011815889029003783, |
|
"loss": 1.8874, |
|
"mean_token_accuracy": 0.648992708325386, |
|
"num_tokens": 1684783.0, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.3502222222222222, |
|
"grad_norm": 1.356441855430603, |
|
"learning_rate": 0.00011689785624211855, |
|
"loss": 1.8937, |
|
"mean_token_accuracy": 0.6503370434045792, |
|
"num_tokens": 1706623.0, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 1.3679999999999999, |
|
"grad_norm": 1.4901665449142456, |
|
"learning_rate": 0.00011563682219419924, |
|
"loss": 1.9094, |
|
"mean_token_accuracy": 0.6417872324585915, |
|
"num_tokens": 1729494.0, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 1.3857777777777778, |
|
"grad_norm": 1.3679572343826294, |
|
"learning_rate": 0.00011437578814627996, |
|
"loss": 1.8841, |
|
"mean_token_accuracy": 0.6478032737970352, |
|
"num_tokens": 1752045.0, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 1.4035555555555557, |
|
"grad_norm": 1.3518086671829224, |
|
"learning_rate": 0.00011311475409836065, |
|
"loss": 1.9021, |
|
"mean_token_accuracy": 0.6460829824209213, |
|
"num_tokens": 1775601.0, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 1.4213333333333333, |
|
"grad_norm": 1.400870442390442, |
|
"learning_rate": 0.00011185372005044137, |
|
"loss": 1.8566, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.4213333333333333, |
|
"eval_loss": 1.9762645959854126, |
|
"eval_mean_token_accuracy": 0.6358254022598266, |
|
"eval_num_tokens": 1798633.0, |
|
"eval_runtime": 30.7115, |
|
"eval_samples_per_second": 32.561, |
|
"eval_steps_per_second": 8.14, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.439111111111111, |
|
"grad_norm": 1.4487619400024414, |
|
"learning_rate": 0.00011059268600252208, |
|
"loss": 1.8417, |
|
"mean_token_accuracy": 0.6496566243469715, |
|
"num_tokens": 1820656.0, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 1.456888888888889, |
|
"grad_norm": 1.4507944583892822, |
|
"learning_rate": 0.00010933165195460277, |
|
"loss": 1.8829, |
|
"mean_token_accuracy": 0.647446171939373, |
|
"num_tokens": 1842871.0, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 1.4746666666666668, |
|
"grad_norm": 1.3563170433044434, |
|
"learning_rate": 0.00010807061790668349, |
|
"loss": 1.8508, |
|
"mean_token_accuracy": 0.6544376760721207, |
|
"num_tokens": 1865652.0, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 1.4924444444444445, |
|
"grad_norm": 1.366861343383789, |
|
"learning_rate": 0.00010680958385876418, |
|
"loss": 1.8756, |
|
"mean_token_accuracy": 0.648781743645668, |
|
"num_tokens": 1888455.0, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.5102222222222221, |
|
"grad_norm": 1.5031019449234009, |
|
"learning_rate": 0.0001055485498108449, |
|
"loss": 1.8461, |
|
"mean_token_accuracy": 0.6583632439374923, |
|
"num_tokens": 1910676.0, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.528, |
|
"grad_norm": 1.5248113870620728, |
|
"learning_rate": 0.00010428751576292561, |
|
"loss": 1.8857, |
|
"mean_token_accuracy": 0.6470584884285927, |
|
"num_tokens": 1933253.0, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 1.545777777777778, |
|
"grad_norm": 1.4354236125946045, |
|
"learning_rate": 0.0001030264817150063, |
|
"loss": 1.892, |
|
"mean_token_accuracy": 0.6445729210972786, |
|
"num_tokens": 1955820.0, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 1.5635555555555556, |
|
"grad_norm": 1.4288746118545532, |
|
"learning_rate": 0.00010176544766708702, |
|
"loss": 1.878, |
|
"mean_token_accuracy": 0.6476826578378677, |
|
"num_tokens": 1978120.0, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 1.5813333333333333, |
|
"grad_norm": 1.433902382850647, |
|
"learning_rate": 0.00010050441361916771, |
|
"loss": 1.8199, |
|
"mean_token_accuracy": 0.6561270505189896, |
|
"num_tokens": 2000508.0, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 1.5991111111111111, |
|
"grad_norm": 1.332987904548645, |
|
"learning_rate": 9.924337957124843e-05, |
|
"loss": 1.8555, |
|
"mean_token_accuracy": 0.6499203637242317, |
|
"num_tokens": 2023356.0, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.616888888888889, |
|
"grad_norm": 1.3830794095993042, |
|
"learning_rate": 9.798234552332913e-05, |
|
"loss": 1.8108, |
|
"mean_token_accuracy": 0.6618377715349197, |
|
"num_tokens": 2045965.0, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 1.6346666666666667, |
|
"grad_norm": 1.3988080024719238, |
|
"learning_rate": 9.672131147540983e-05, |
|
"loss": 1.8791, |
|
"mean_token_accuracy": 0.6456288158893585, |
|
"num_tokens": 2069108.0, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 1.6524444444444444, |
|
"grad_norm": 1.398549199104309, |
|
"learning_rate": 9.546027742749055e-05, |
|
"loss": 1.8885, |
|
"mean_token_accuracy": 0.6464410901069642, |
|
"num_tokens": 2091755.0, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 1.6702222222222223, |
|
"grad_norm": 1.5381189584732056, |
|
"learning_rate": 9.419924337957125e-05, |
|
"loss": 1.853, |
|
"mean_token_accuracy": 0.6534279838204384, |
|
"num_tokens": 2114496.0, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 1.688, |
|
"grad_norm": 1.4101791381835938, |
|
"learning_rate": 9.293820933165196e-05, |
|
"loss": 1.8696, |
|
"mean_token_accuracy": 0.6475102975964546, |
|
"num_tokens": 2137041.0, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.7057777777777776, |
|
"grad_norm": 1.496955156326294, |
|
"learning_rate": 9.167717528373266e-05, |
|
"loss": 1.8752, |
|
"mean_token_accuracy": 0.6469297721982002, |
|
"num_tokens": 2159711.0, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 1.7235555555555555, |
|
"grad_norm": 1.4269644021987915, |
|
"learning_rate": 9.041614123581336e-05, |
|
"loss": 1.8773, |
|
"mean_token_accuracy": 0.6508476585149765, |
|
"num_tokens": 2181675.0, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 1.7413333333333334, |
|
"grad_norm": 1.4438400268554688, |
|
"learning_rate": 8.915510718789408e-05, |
|
"loss": 1.8433, |
|
"mean_token_accuracy": 0.656335887312889, |
|
"num_tokens": 2204671.0, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 1.759111111111111, |
|
"grad_norm": 1.3846147060394287, |
|
"learning_rate": 8.789407313997479e-05, |
|
"loss": 1.8649, |
|
"mean_token_accuracy": 0.6451441869139671, |
|
"num_tokens": 2227866.0, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 1.7768888888888887, |
|
"grad_norm": 1.5432794094085693, |
|
"learning_rate": 8.663303909205549e-05, |
|
"loss": 1.8435, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.7768888888888887, |
|
"eval_loss": 1.9385051727294922, |
|
"eval_mean_token_accuracy": 0.6399562013149261, |
|
"eval_num_tokens": 2250379.0, |
|
"eval_runtime": 30.7428, |
|
"eval_samples_per_second": 32.528, |
|
"eval_steps_per_second": 8.132, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.7946666666666666, |
|
"grad_norm": 1.4225345849990845, |
|
"learning_rate": 8.537200504413619e-05, |
|
"loss": 1.8767, |
|
"mean_token_accuracy": 0.6512193940579891, |
|
"num_tokens": 2272999.0, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 1.8124444444444445, |
|
"grad_norm": 1.3732675313949585, |
|
"learning_rate": 8.41109709962169e-05, |
|
"loss": 1.845, |
|
"mean_token_accuracy": 0.6550421059131623, |
|
"num_tokens": 2295110.0, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 1.8302222222222222, |
|
"grad_norm": 1.3867266178131104, |
|
"learning_rate": 8.284993694829761e-05, |
|
"loss": 1.8236, |
|
"mean_token_accuracy": 0.6565809994935989, |
|
"num_tokens": 2317432.0, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 1.8479999999999999, |
|
"grad_norm": 1.3360997438430786, |
|
"learning_rate": 8.158890290037832e-05, |
|
"loss": 1.8642, |
|
"mean_token_accuracy": 0.6463637053966522, |
|
"num_tokens": 2340305.0, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 1.8657777777777778, |
|
"grad_norm": 1.4467201232910156, |
|
"learning_rate": 8.032786885245902e-05, |
|
"loss": 1.8661, |
|
"mean_token_accuracy": 0.653101560473442, |
|
"num_tokens": 2362746.0, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 1.8835555555555556, |
|
"grad_norm": 1.3943202495574951, |
|
"learning_rate": 7.906683480453972e-05, |
|
"loss": 1.8399, |
|
"mean_token_accuracy": 0.6510771587491035, |
|
"num_tokens": 2385647.0, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 1.9013333333333333, |
|
"grad_norm": 1.4589335918426514, |
|
"learning_rate": 7.780580075662043e-05, |
|
"loss": 1.8522, |
|
"mean_token_accuracy": 0.6478627189993859, |
|
"num_tokens": 2408732.0, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 1.919111111111111, |
|
"grad_norm": 1.5676307678222656, |
|
"learning_rate": 7.654476670870114e-05, |
|
"loss": 1.8316, |
|
"mean_token_accuracy": 0.656819324195385, |
|
"num_tokens": 2431137.0, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 1.9368888888888889, |
|
"grad_norm": 1.3882263898849487, |
|
"learning_rate": 7.528373266078185e-05, |
|
"loss": 1.7965, |
|
"mean_token_accuracy": 0.6616187065839767, |
|
"num_tokens": 2453517.0, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 1.9546666666666668, |
|
"grad_norm": 1.5195387601852417, |
|
"learning_rate": 7.402269861286255e-05, |
|
"loss": 1.8346, |
|
"mean_token_accuracy": 0.6537548035383225, |
|
"num_tokens": 2475737.0, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.9724444444444444, |
|
"grad_norm": 1.3485065698623657, |
|
"learning_rate": 7.276166456494325e-05, |
|
"loss": 1.835, |
|
"mean_token_accuracy": 0.6512499779462815, |
|
"num_tokens": 2497880.0, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 1.9902222222222221, |
|
"grad_norm": 1.5932726860046387, |
|
"learning_rate": 7.150063051702396e-05, |
|
"loss": 1.8432, |
|
"mean_token_accuracy": 0.6523947417736053, |
|
"num_tokens": 2519974.0, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 2.007111111111111, |
|
"grad_norm": 1.3682020902633667, |
|
"learning_rate": 7.023959646910467e-05, |
|
"loss": 1.7383, |
|
"mean_token_accuracy": 0.6761166123967421, |
|
"num_tokens": 2540296.0, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 2.024888888888889, |
|
"grad_norm": 1.4417686462402344, |
|
"learning_rate": 6.897856242118538e-05, |
|
"loss": 1.697, |
|
"mean_token_accuracy": 0.6766574695706368, |
|
"num_tokens": 2561919.0, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 2.042666666666667, |
|
"grad_norm": 1.375542163848877, |
|
"learning_rate": 6.771752837326608e-05, |
|
"loss": 1.7361, |
|
"mean_token_accuracy": 0.6702861517667771, |
|
"num_tokens": 2584915.0, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 2.0604444444444443, |
|
"grad_norm": 1.4783133268356323, |
|
"learning_rate": 6.645649432534678e-05, |
|
"loss": 1.6927, |
|
"mean_token_accuracy": 0.6790769457817077, |
|
"num_tokens": 2606857.0, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 2.078222222222222, |
|
"grad_norm": 1.5346624851226807, |
|
"learning_rate": 6.519546027742749e-05, |
|
"loss": 1.6938, |
|
"mean_token_accuracy": 0.6730351656675339, |
|
"num_tokens": 2629737.0, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 2.096, |
|
"grad_norm": 1.430298089981079, |
|
"learning_rate": 6.39344262295082e-05, |
|
"loss": 1.6476, |
|
"mean_token_accuracy": 0.6835471093654633, |
|
"num_tokens": 2651936.0, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 2.113777777777778, |
|
"grad_norm": 1.4968252182006836, |
|
"learning_rate": 6.267339218158891e-05, |
|
"loss": 1.7242, |
|
"mean_token_accuracy": 0.6697919353842735, |
|
"num_tokens": 2675241.0, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 2.1315555555555554, |
|
"grad_norm": 1.3892192840576172, |
|
"learning_rate": 6.141235813366961e-05, |
|
"loss": 1.6916, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.1315555555555554, |
|
"eval_loss": 1.934017539024353, |
|
"eval_mean_token_accuracy": 0.6421641361713409, |
|
"eval_num_tokens": 2698219.0, |
|
"eval_runtime": 30.3979, |
|
"eval_samples_per_second": 32.897, |
|
"eval_steps_per_second": 8.224, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.1493333333333333, |
|
"grad_norm": 1.4893920421600342, |
|
"learning_rate": 6.0151324085750316e-05, |
|
"loss": 1.7047, |
|
"mean_token_accuracy": 0.6755686655640603, |
|
"num_tokens": 2721580.0, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 2.167111111111111, |
|
"grad_norm": 1.50564444065094, |
|
"learning_rate": 5.889029003783102e-05, |
|
"loss": 1.7058, |
|
"mean_token_accuracy": 0.6746152400970459, |
|
"num_tokens": 2744110.0, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 2.1848888888888887, |
|
"grad_norm": 1.461367130279541, |
|
"learning_rate": 5.7629255989911736e-05, |
|
"loss": 1.684, |
|
"mean_token_accuracy": 0.6813082948327065, |
|
"num_tokens": 2765844.0, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 2.2026666666666666, |
|
"grad_norm": 1.553553819656372, |
|
"learning_rate": 5.636822194199244e-05, |
|
"loss": 1.6848, |
|
"mean_token_accuracy": 0.677204079926014, |
|
"num_tokens": 2788070.0, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 2.2204444444444444, |
|
"grad_norm": 1.4453001022338867, |
|
"learning_rate": 5.510718789407314e-05, |
|
"loss": 1.7182, |
|
"mean_token_accuracy": 0.6765570789575577, |
|
"num_tokens": 2810964.0, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 2.2382222222222223, |
|
"grad_norm": 1.5605733394622803, |
|
"learning_rate": 5.384615384615385e-05, |
|
"loss": 1.6772, |
|
"mean_token_accuracy": 0.678339496254921, |
|
"num_tokens": 2833176.0, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 2.2560000000000002, |
|
"grad_norm": 1.514710783958435, |
|
"learning_rate": 5.258511979823455e-05, |
|
"loss": 1.7192, |
|
"mean_token_accuracy": 0.6710417225956917, |
|
"num_tokens": 2855440.0, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 2.2737777777777777, |
|
"grad_norm": 1.510834813117981, |
|
"learning_rate": 5.132408575031527e-05, |
|
"loss": 1.6599, |
|
"mean_token_accuracy": 0.6822926640510559, |
|
"num_tokens": 2877713.0, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 2.2915555555555556, |
|
"grad_norm": 1.3550519943237305, |
|
"learning_rate": 5.006305170239597e-05, |
|
"loss": 1.7072, |
|
"mean_token_accuracy": 0.6754195600748062, |
|
"num_tokens": 2899934.0, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 2.3093333333333335, |
|
"grad_norm": 1.5602107048034668, |
|
"learning_rate": 4.8802017654476674e-05, |
|
"loss": 1.7111, |
|
"mean_token_accuracy": 0.6702851369976998, |
|
"num_tokens": 2923244.0, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.327111111111111, |
|
"grad_norm": 1.5889501571655273, |
|
"learning_rate": 4.754098360655738e-05, |
|
"loss": 1.6858, |
|
"mean_token_accuracy": 0.6781487062573432, |
|
"num_tokens": 2945580.0, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 2.344888888888889, |
|
"grad_norm": 1.4793872833251953, |
|
"learning_rate": 4.627994955863809e-05, |
|
"loss": 1.6799, |
|
"mean_token_accuracy": 0.6740961462259293, |
|
"num_tokens": 2969470.0, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 2.3626666666666667, |
|
"grad_norm": 1.6188234090805054, |
|
"learning_rate": 4.501891551071879e-05, |
|
"loss": 1.6838, |
|
"mean_token_accuracy": 0.6732241719961166, |
|
"num_tokens": 2991982.0, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 2.3804444444444446, |
|
"grad_norm": 1.474108338356018, |
|
"learning_rate": 4.37578814627995e-05, |
|
"loss": 1.7024, |
|
"mean_token_accuracy": 0.675683145225048, |
|
"num_tokens": 3014206.0, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 2.398222222222222, |
|
"grad_norm": 1.4645053148269653, |
|
"learning_rate": 4.2496847414880205e-05, |
|
"loss": 1.6564, |
|
"mean_token_accuracy": 0.6787498995661736, |
|
"num_tokens": 3036651.0, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 2.416, |
|
"grad_norm": 1.498451828956604, |
|
"learning_rate": 4.1235813366960915e-05, |
|
"loss": 1.694, |
|
"mean_token_accuracy": 0.6752896070480346, |
|
"num_tokens": 3058745.0, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 2.433777777777778, |
|
"grad_norm": 1.5558826923370361, |
|
"learning_rate": 3.997477931904162e-05, |
|
"loss": 1.7106, |
|
"mean_token_accuracy": 0.6742441862821579, |
|
"num_tokens": 3081726.0, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 2.4515555555555557, |
|
"grad_norm": 1.5872586965560913, |
|
"learning_rate": 3.871374527112232e-05, |
|
"loss": 1.6848, |
|
"mean_token_accuracy": 0.6764188826084137, |
|
"num_tokens": 3104292.0, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 2.469333333333333, |
|
"grad_norm": 1.551299810409546, |
|
"learning_rate": 3.745271122320303e-05, |
|
"loss": 1.6909, |
|
"mean_token_accuracy": 0.6762390181422233, |
|
"num_tokens": 3126334.0, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 2.487111111111111, |
|
"grad_norm": 1.57632315158844, |
|
"learning_rate": 3.6191677175283736e-05, |
|
"loss": 1.7113, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.487111111111111, |
|
"eval_loss": 1.922593355178833, |
|
"eval_mean_token_accuracy": 0.6445384075641633, |
|
"eval_num_tokens": 3149845.0, |
|
"eval_runtime": 30.0209, |
|
"eval_samples_per_second": 33.31, |
|
"eval_steps_per_second": 8.328, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.504888888888889, |
|
"grad_norm": 1.487930178642273, |
|
"learning_rate": 3.4930643127364446e-05, |
|
"loss": 1.7087, |
|
"mean_token_accuracy": 0.6737867616117, |
|
"num_tokens": 3172117.0, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 2.522666666666667, |
|
"grad_norm": 1.5210868120193481, |
|
"learning_rate": 3.366960907944515e-05, |
|
"loss": 1.7009, |
|
"mean_token_accuracy": 0.6799842938780785, |
|
"num_tokens": 3194261.0, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 2.5404444444444443, |
|
"grad_norm": 1.6295726299285889, |
|
"learning_rate": 3.240857503152585e-05, |
|
"loss": 1.6027, |
|
"mean_token_accuracy": 0.6899775773286819, |
|
"num_tokens": 3216455.0, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 2.558222222222222, |
|
"grad_norm": 1.561673879623413, |
|
"learning_rate": 3.114754098360656e-05, |
|
"loss": 1.7273, |
|
"mean_token_accuracy": 0.6699303150177002, |
|
"num_tokens": 3238359.0, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 2.576, |
|
"grad_norm": 1.5006392002105713, |
|
"learning_rate": 2.9886506935687263e-05, |
|
"loss": 1.7243, |
|
"mean_token_accuracy": 0.6692202746868133, |
|
"num_tokens": 3261568.0, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 2.5937777777777775, |
|
"grad_norm": 1.602378249168396, |
|
"learning_rate": 2.8625472887767974e-05, |
|
"loss": 1.7255, |
|
"mean_token_accuracy": 0.6683675542473793, |
|
"num_tokens": 3284517.0, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 2.6115555555555554, |
|
"grad_norm": 1.6410186290740967, |
|
"learning_rate": 2.7364438839848677e-05, |
|
"loss": 1.6826, |
|
"mean_token_accuracy": 0.6792753636837006, |
|
"num_tokens": 3306623.0, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 2.6293333333333333, |
|
"grad_norm": 1.4993571043014526, |
|
"learning_rate": 2.610340479192938e-05, |
|
"loss": 1.6629, |
|
"mean_token_accuracy": 0.6814461290836334, |
|
"num_tokens": 3329270.0, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 2.647111111111111, |
|
"grad_norm": 1.4495617151260376, |
|
"learning_rate": 2.484237074401009e-05, |
|
"loss": 1.6848, |
|
"mean_token_accuracy": 0.6769454509019852, |
|
"num_tokens": 3352533.0, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 2.664888888888889, |
|
"grad_norm": 1.5677741765975952, |
|
"learning_rate": 2.3581336696090794e-05, |
|
"loss": 1.6679, |
|
"mean_token_accuracy": 0.6842619329690933, |
|
"num_tokens": 3374132.0, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.6826666666666665, |
|
"grad_norm": 1.5430514812469482, |
|
"learning_rate": 2.23203026481715e-05, |
|
"loss": 1.7122, |
|
"mean_token_accuracy": 0.6701778277754784, |
|
"num_tokens": 3397176.0, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 2.7004444444444444, |
|
"grad_norm": 1.5498685836791992, |
|
"learning_rate": 2.1059268600252208e-05, |
|
"loss": 1.6631, |
|
"mean_token_accuracy": 0.6801098987460137, |
|
"num_tokens": 3418961.0, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 2.7182222222222223, |
|
"grad_norm": 1.5674185752868652, |
|
"learning_rate": 1.9798234552332915e-05, |
|
"loss": 1.6677, |
|
"mean_token_accuracy": 0.679096283018589, |
|
"num_tokens": 3441815.0, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 2.7359999999999998, |
|
"grad_norm": 1.4433151483535767, |
|
"learning_rate": 1.8537200504413622e-05, |
|
"loss": 1.6903, |
|
"mean_token_accuracy": 0.6747847631573677, |
|
"num_tokens": 3464582.0, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 2.7537777777777777, |
|
"grad_norm": 1.4974557161331177, |
|
"learning_rate": 1.7276166456494325e-05, |
|
"loss": 1.6533, |
|
"mean_token_accuracy": 0.683028981089592, |
|
"num_tokens": 3486694.0, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 2.7715555555555556, |
|
"grad_norm": 1.4934104681015015, |
|
"learning_rate": 1.6015132408575032e-05, |
|
"loss": 1.699, |
|
"mean_token_accuracy": 0.6773065477609634, |
|
"num_tokens": 3508908.0, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 2.7893333333333334, |
|
"grad_norm": 1.5283113718032837, |
|
"learning_rate": 1.4754098360655739e-05, |
|
"loss": 1.6329, |
|
"mean_token_accuracy": 0.6867676630616188, |
|
"num_tokens": 3530306.0, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 2.8071111111111113, |
|
"grad_norm": 1.6120567321777344, |
|
"learning_rate": 1.3493064312736444e-05, |
|
"loss": 1.7164, |
|
"mean_token_accuracy": 0.6712423786520958, |
|
"num_tokens": 3552880.0, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 2.824888888888889, |
|
"grad_norm": 1.5219435691833496, |
|
"learning_rate": 1.223203026481715e-05, |
|
"loss": 1.7018, |
|
"mean_token_accuracy": 0.6758274272084236, |
|
"num_tokens": 3575800.0, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 2.8426666666666667, |
|
"grad_norm": 1.5157614946365356, |
|
"learning_rate": 1.0970996216897856e-05, |
|
"loss": 1.6983, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.8426666666666667, |
|
"eval_loss": 1.9110984802246094, |
|
"eval_mean_token_accuracy": 0.6461525177955627, |
|
"eval_num_tokens": 3598214.0, |
|
"eval_runtime": 30.748, |
|
"eval_samples_per_second": 32.522, |
|
"eval_steps_per_second": 8.131, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.8604444444444446, |
|
"grad_norm": 1.5881015062332153, |
|
"learning_rate": 9.709962168978563e-06, |
|
"loss": 1.679, |
|
"mean_token_accuracy": 0.6803914837539196, |
|
"num_tokens": 3620353.0, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 2.878222222222222, |
|
"grad_norm": 1.5353198051452637, |
|
"learning_rate": 8.448928121059268e-06, |
|
"loss": 1.681, |
|
"mean_token_accuracy": 0.6778546258807182, |
|
"num_tokens": 3642281.0, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 2.896, |
|
"grad_norm": 1.5839284658432007, |
|
"learning_rate": 7.187894073139975e-06, |
|
"loss": 1.6397, |
|
"mean_token_accuracy": 0.6827977553009987, |
|
"num_tokens": 3664853.0, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 2.913777777777778, |
|
"grad_norm": 1.6904590129852295, |
|
"learning_rate": 5.926860025220681e-06, |
|
"loss": 1.7405, |
|
"mean_token_accuracy": 0.6691433653235436, |
|
"num_tokens": 3688123.0, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 2.9315555555555557, |
|
"grad_norm": 1.5607562065124512, |
|
"learning_rate": 4.665825977301387e-06, |
|
"loss": 1.7285, |
|
"mean_token_accuracy": 0.6671519264578819, |
|
"num_tokens": 3710934.0, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 2.9493333333333336, |
|
"grad_norm": 1.613752841949463, |
|
"learning_rate": 3.404791929382094e-06, |
|
"loss": 1.7251, |
|
"mean_token_accuracy": 0.676729716360569, |
|
"num_tokens": 3733298.0, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 2.967111111111111, |
|
"grad_norm": 1.4459993839263916, |
|
"learning_rate": 2.1437578814628e-06, |
|
"loss": 1.729, |
|
"mean_token_accuracy": 0.6713886946439743, |
|
"num_tokens": 3755641.0, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 2.984888888888889, |
|
"grad_norm": 1.6163533926010132, |
|
"learning_rate": 8.827238335435058e-07, |
|
"loss": 1.6633, |
|
"mean_token_accuracy": 0.6792998388409615, |
|
"num_tokens": 3777938.0, |
|
"step": 1680 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1686, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 200, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.3348842111787008e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|