| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 0.9999074502545118, |
| "eval_steps": 100, |
| "global_step": 2701, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.0018509949097639982, |
| "grad_norm": 2.749824922426202, |
| "learning_rate": 3.690036900369004e-07, |
| "loss": 1.1027, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.0037019898195279964, |
| "grad_norm": 2.7133127995482353, |
| "learning_rate": 7.380073800738008e-07, |
| "loss": 1.107, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.005552984729291994, |
| "grad_norm": 2.5060043757547894, |
| "learning_rate": 1.1070110701107011e-06, |
| "loss": 1.0411, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.007403979639055993, |
| "grad_norm": 2.580301155440442, |
| "learning_rate": 1.4760147601476015e-06, |
| "loss": 1.0394, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.00925497454881999, |
| "grad_norm": 2.101103627946831, |
| "learning_rate": 1.845018450184502e-06, |
| "loss": 1.025, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.011105969458583989, |
| "grad_norm": 1.7798274009053514, |
| "learning_rate": 2.2140221402214023e-06, |
| "loss": 1.0642, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.012956964368347987, |
| "grad_norm": 1.4788395927397677, |
| "learning_rate": 2.5830258302583027e-06, |
| "loss": 0.9636, |
| "step": 35 |
| }, |
| { |
| "epoch": 0.014807959278111986, |
| "grad_norm": 1.4694159866777265, |
| "learning_rate": 2.952029520295203e-06, |
| "loss": 1.0495, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.016658954187875982, |
| "grad_norm": 1.529975198759594, |
| "learning_rate": 3.3210332103321034e-06, |
| "loss": 0.9308, |
| "step": 45 |
| }, |
| { |
| "epoch": 0.01850994909763998, |
| "grad_norm": 1.4286077278093599, |
| "learning_rate": 3.690036900369004e-06, |
| "loss": 0.9909, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.02036094400740398, |
| "grad_norm": 1.336405308330729, |
| "learning_rate": 4.059040590405905e-06, |
| "loss": 0.948, |
| "step": 55 |
| }, |
| { |
| "epoch": 0.022211938917167977, |
| "grad_norm": 1.411657382339046, |
| "learning_rate": 4.428044280442805e-06, |
| "loss": 0.9434, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.024062933826931976, |
| "grad_norm": 1.3370491817510695, |
| "learning_rate": 4.797047970479705e-06, |
| "loss": 0.9112, |
| "step": 65 |
| }, |
| { |
| "epoch": 0.025913928736695974, |
| "grad_norm": 1.238804041054237, |
| "learning_rate": 5.166051660516605e-06, |
| "loss": 0.864, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.027764923646459973, |
| "grad_norm": 1.5683504507074115, |
| "learning_rate": 5.535055350553506e-06, |
| "loss": 0.927, |
| "step": 75 |
| }, |
| { |
| "epoch": 0.02961591855622397, |
| "grad_norm": 1.3407079562724307, |
| "learning_rate": 5.904059040590406e-06, |
| "loss": 0.8787, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.03146691346598797, |
| "grad_norm": 1.1838954340614056, |
| "learning_rate": 6.273062730627307e-06, |
| "loss": 0.9208, |
| "step": 85 |
| }, |
| { |
| "epoch": 0.033317908375751965, |
| "grad_norm": 1.5881922549502974, |
| "learning_rate": 6.642066420664207e-06, |
| "loss": 0.8529, |
| "step": 90 |
| }, |
| { |
| "epoch": 0.035168903285515966, |
| "grad_norm": 1.4451601927412367, |
| "learning_rate": 7.011070110701108e-06, |
| "loss": 0.9031, |
| "step": 95 |
| }, |
| { |
| "epoch": 0.03701989819527996, |
| "grad_norm": 1.2272450676249622, |
| "learning_rate": 7.380073800738008e-06, |
| "loss": 0.8384, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.03701989819527996, |
| "eval_loss": 0.8794825673103333, |
| "eval_runtime": 125.7829, |
| "eval_samples_per_second": 1.018, |
| "eval_steps_per_second": 0.509, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.03887089310504396, |
| "grad_norm": 1.1893073354157337, |
| "learning_rate": 7.749077490774908e-06, |
| "loss": 0.889, |
| "step": 105 |
| }, |
| { |
| "epoch": 0.04072188801480796, |
| "grad_norm": 1.3052090560256295, |
| "learning_rate": 8.11808118081181e-06, |
| "loss": 0.8133, |
| "step": 110 |
| }, |
| { |
| "epoch": 0.04257288292457196, |
| "grad_norm": 1.37698008425289, |
| "learning_rate": 8.48708487084871e-06, |
| "loss": 0.865, |
| "step": 115 |
| }, |
| { |
| "epoch": 0.044423877834335955, |
| "grad_norm": 1.303834447656376, |
| "learning_rate": 8.85608856088561e-06, |
| "loss": 0.846, |
| "step": 120 |
| }, |
| { |
| "epoch": 0.04627487274409996, |
| "grad_norm": 1.2327474858734326, |
| "learning_rate": 9.22509225092251e-06, |
| "loss": 0.7942, |
| "step": 125 |
| }, |
| { |
| "epoch": 0.04812586765386395, |
| "grad_norm": 1.2058003679999099, |
| "learning_rate": 9.59409594095941e-06, |
| "loss": 0.8417, |
| "step": 130 |
| }, |
| { |
| "epoch": 0.04997686256362795, |
| "grad_norm": 1.2290254882715699, |
| "learning_rate": 9.963099630996312e-06, |
| "loss": 0.8701, |
| "step": 135 |
| }, |
| { |
| "epoch": 0.05182785747339195, |
| "grad_norm": 1.1760267952662564, |
| "learning_rate": 1.033210332103321e-05, |
| "loss": 0.8252, |
| "step": 140 |
| }, |
| { |
| "epoch": 0.053678852383155944, |
| "grad_norm": 1.269131376133505, |
| "learning_rate": 1.0701107011070112e-05, |
| "loss": 0.8302, |
| "step": 145 |
| }, |
| { |
| "epoch": 0.055529847292919945, |
| "grad_norm": 1.1597954970515953, |
| "learning_rate": 1.1070110701107012e-05, |
| "loss": 0.8479, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.05738084220268394, |
| "grad_norm": 1.2448895450141289, |
| "learning_rate": 1.1439114391143913e-05, |
| "loss": 0.8521, |
| "step": 155 |
| }, |
| { |
| "epoch": 0.05923183711244794, |
| "grad_norm": 1.1897405950550726, |
| "learning_rate": 1.1808118081180812e-05, |
| "loss": 0.7592, |
| "step": 160 |
| }, |
| { |
| "epoch": 0.06108283202221194, |
| "grad_norm": 1.314365342475427, |
| "learning_rate": 1.2177121771217713e-05, |
| "loss": 0.8289, |
| "step": 165 |
| }, |
| { |
| "epoch": 0.06293382693197594, |
| "grad_norm": 1.3600021570595577, |
| "learning_rate": 1.2546125461254614e-05, |
| "loss": 0.8119, |
| "step": 170 |
| }, |
| { |
| "epoch": 0.06478482184173993, |
| "grad_norm": 1.2381592999546391, |
| "learning_rate": 1.2915129151291515e-05, |
| "loss": 0.8081, |
| "step": 175 |
| }, |
| { |
| "epoch": 0.06663581675150393, |
| "grad_norm": 1.2650464435907114, |
| "learning_rate": 1.3284132841328414e-05, |
| "loss": 0.8173, |
| "step": 180 |
| }, |
| { |
| "epoch": 0.06848681166126794, |
| "grad_norm": 1.2523135217657222, |
| "learning_rate": 1.3653136531365315e-05, |
| "loss": 0.8105, |
| "step": 185 |
| }, |
| { |
| "epoch": 0.07033780657103193, |
| "grad_norm": 1.1760807838355647, |
| "learning_rate": 1.4022140221402215e-05, |
| "loss": 0.8644, |
| "step": 190 |
| }, |
| { |
| "epoch": 0.07218880148079593, |
| "grad_norm": 1.2557569983889052, |
| "learning_rate": 1.4391143911439116e-05, |
| "loss": 0.8031, |
| "step": 195 |
| }, |
| { |
| "epoch": 0.07403979639055992, |
| "grad_norm": 1.2032960225457723, |
| "learning_rate": 1.4760147601476015e-05, |
| "loss": 0.8639, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.07403979639055992, |
| "eval_loss": 0.8400982022285461, |
| "eval_runtime": 125.9532, |
| "eval_samples_per_second": 1.016, |
| "eval_steps_per_second": 0.508, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.07589079130032392, |
| "grad_norm": 1.394672996108549, |
| "learning_rate": 1.5129151291512916e-05, |
| "loss": 0.8164, |
| "step": 205 |
| }, |
| { |
| "epoch": 0.07774178621008793, |
| "grad_norm": 1.3016173569055736, |
| "learning_rate": 1.5498154981549817e-05, |
| "loss": 0.8478, |
| "step": 210 |
| }, |
| { |
| "epoch": 0.07959278111985192, |
| "grad_norm": 1.406780438265723, |
| "learning_rate": 1.5867158671586716e-05, |
| "loss": 0.841, |
| "step": 215 |
| }, |
| { |
| "epoch": 0.08144377602961592, |
| "grad_norm": 1.2377690323264836, |
| "learning_rate": 1.623616236162362e-05, |
| "loss": 0.8287, |
| "step": 220 |
| }, |
| { |
| "epoch": 0.08329477093937991, |
| "grad_norm": 1.39607937433599, |
| "learning_rate": 1.6605166051660518e-05, |
| "loss": 0.8176, |
| "step": 225 |
| }, |
| { |
| "epoch": 0.08514576584914392, |
| "grad_norm": 1.3980920756979587, |
| "learning_rate": 1.697416974169742e-05, |
| "loss": 0.7867, |
| "step": 230 |
| }, |
| { |
| "epoch": 0.08699676075890792, |
| "grad_norm": 1.2563235907029433, |
| "learning_rate": 1.734317343173432e-05, |
| "loss": 0.8447, |
| "step": 235 |
| }, |
| { |
| "epoch": 0.08884775566867191, |
| "grad_norm": 1.4806853438242649, |
| "learning_rate": 1.771217712177122e-05, |
| "loss": 0.807, |
| "step": 240 |
| }, |
| { |
| "epoch": 0.0906987505784359, |
| "grad_norm": 1.2447141754207756, |
| "learning_rate": 1.8081180811808117e-05, |
| "loss": 0.7962, |
| "step": 245 |
| }, |
| { |
| "epoch": 0.09254974548819991, |
| "grad_norm": 1.3080325661809735, |
| "learning_rate": 1.845018450184502e-05, |
| "loss": 0.7745, |
| "step": 250 |
| }, |
| { |
| "epoch": 0.09440074039796391, |
| "grad_norm": 1.179323447687895, |
| "learning_rate": 1.8819188191881922e-05, |
| "loss": 0.8574, |
| "step": 255 |
| }, |
| { |
| "epoch": 0.0962517353077279, |
| "grad_norm": 1.2896705705328326, |
| "learning_rate": 1.918819188191882e-05, |
| "loss": 0.8296, |
| "step": 260 |
| }, |
| { |
| "epoch": 0.0981027302174919, |
| "grad_norm": 1.3644542064844416, |
| "learning_rate": 1.955719557195572e-05, |
| "loss": 0.8251, |
| "step": 265 |
| }, |
| { |
| "epoch": 0.0999537251272559, |
| "grad_norm": 1.286282268357473, |
| "learning_rate": 1.9926199261992623e-05, |
| "loss": 0.7754, |
| "step": 270 |
| }, |
| { |
| "epoch": 0.1018047200370199, |
| "grad_norm": 1.2329112299042364, |
| "learning_rate": 1.999986628620426e-05, |
| "loss": 0.7915, |
| "step": 275 |
| }, |
| { |
| "epoch": 0.1036557149467839, |
| "grad_norm": 1.3173883195813032, |
| "learning_rate": 1.9999323080037623e-05, |
| "loss": 0.866, |
| "step": 280 |
| }, |
| { |
| "epoch": 0.10550670985654789, |
| "grad_norm": 1.2611947477770145, |
| "learning_rate": 1.9998362047068548e-05, |
| "loss": 0.83, |
| "step": 285 |
| }, |
| { |
| "epoch": 0.10735770476631189, |
| "grad_norm": 1.1796656758558173, |
| "learning_rate": 1.9996983227454284e-05, |
| "loss": 0.8357, |
| "step": 290 |
| }, |
| { |
| "epoch": 0.1092086996760759, |
| "grad_norm": 1.1353613990508302, |
| "learning_rate": 1.9995186678809513e-05, |
| "loss": 0.7181, |
| "step": 295 |
| }, |
| { |
| "epoch": 0.11105969458583989, |
| "grad_norm": 1.2567062023606574, |
| "learning_rate": 1.999297247620393e-05, |
| "loss": 0.8598, |
| "step": 300 |
| }, |
| { |
| "epoch": 0.11105969458583989, |
| "eval_loss": 0.8292834758758545, |
| "eval_runtime": 125.837, |
| "eval_samples_per_second": 1.017, |
| "eval_steps_per_second": 0.509, |
| "step": 300 |
| }, |
| { |
| "epoch": 0.11291068949560389, |
| "grad_norm": 1.3854824655270053, |
| "learning_rate": 1.999034071215912e-05, |
| "loss": 0.869, |
| "step": 305 |
| }, |
| { |
| "epoch": 0.11476168440536788, |
| "grad_norm": 1.4180966128082728, |
| "learning_rate": 1.998729149664468e-05, |
| "loss": 0.7802, |
| "step": 310 |
| }, |
| { |
| "epoch": 0.11661267931513189, |
| "grad_norm": 1.2687340435719516, |
| "learning_rate": 1.9983824957073632e-05, |
| "loss": 0.7751, |
| "step": 315 |
| }, |
| { |
| "epoch": 0.11846367422489588, |
| "grad_norm": 1.2012101992327227, |
| "learning_rate": 1.99799412382971e-05, |
| "loss": 0.7831, |
| "step": 320 |
| }, |
| { |
| "epoch": 0.12031466913465988, |
| "grad_norm": 1.2085415925635907, |
| "learning_rate": 1.9975640502598243e-05, |
| "loss": 0.788, |
| "step": 325 |
| }, |
| { |
| "epoch": 0.12216566404442387, |
| "grad_norm": 1.2183184035114982, |
| "learning_rate": 1.9970922929685496e-05, |
| "loss": 0.7998, |
| "step": 330 |
| }, |
| { |
| "epoch": 0.12401665895418787, |
| "grad_norm": 1.2657715995907186, |
| "learning_rate": 1.996578871668504e-05, |
| "loss": 0.7978, |
| "step": 335 |
| }, |
| { |
| "epoch": 0.12586765386395188, |
| "grad_norm": 1.1898542144979154, |
| "learning_rate": 1.996023807813258e-05, |
| "loss": 0.7861, |
| "step": 340 |
| }, |
| { |
| "epoch": 0.12771864877371586, |
| "grad_norm": 1.2193829841446657, |
| "learning_rate": 1.995427124596437e-05, |
| "loss": 0.8067, |
| "step": 345 |
| }, |
| { |
| "epoch": 0.12956964368347987, |
| "grad_norm": 1.1563840168462576, |
| "learning_rate": 1.9947888469507527e-05, |
| "loss": 0.7666, |
| "step": 350 |
| }, |
| { |
| "epoch": 0.13142063859324388, |
| "grad_norm": 1.3445764875998873, |
| "learning_rate": 1.9941090015469614e-05, |
| "loss": 0.7855, |
| "step": 355 |
| }, |
| { |
| "epoch": 0.13327163350300786, |
| "grad_norm": 1.2515125913797396, |
| "learning_rate": 1.9933876167927494e-05, |
| "loss": 0.8211, |
| "step": 360 |
| }, |
| { |
| "epoch": 0.13512262841277187, |
| "grad_norm": 1.2363244217826992, |
| "learning_rate": 1.9926247228315455e-05, |
| "loss": 0.7726, |
| "step": 365 |
| }, |
| { |
| "epoch": 0.13697362332253588, |
| "grad_norm": 1.1398366050989162, |
| "learning_rate": 1.9918203515412616e-05, |
| "loss": 0.7809, |
| "step": 370 |
| }, |
| { |
| "epoch": 0.13882461823229986, |
| "grad_norm": 1.1609837477400817, |
| "learning_rate": 1.9909745365329623e-05, |
| "loss": 0.7981, |
| "step": 375 |
| }, |
| { |
| "epoch": 0.14067561314206387, |
| "grad_norm": 1.1428050730123682, |
| "learning_rate": 1.990087313149457e-05, |
| "loss": 0.8055, |
| "step": 380 |
| }, |
| { |
| "epoch": 0.14252660805182785, |
| "grad_norm": 1.1811407177495057, |
| "learning_rate": 1.9891587184638274e-05, |
| "loss": 0.7898, |
| "step": 385 |
| }, |
| { |
| "epoch": 0.14437760296159186, |
| "grad_norm": 1.1089125886531135, |
| "learning_rate": 1.9881887912778738e-05, |
| "loss": 0.8435, |
| "step": 390 |
| }, |
| { |
| "epoch": 0.14622859787135586, |
| "grad_norm": 1.1264567214650973, |
| "learning_rate": 1.9871775721204973e-05, |
| "loss": 0.8027, |
| "step": 395 |
| }, |
| { |
| "epoch": 0.14807959278111985, |
| "grad_norm": 1.2984452085681775, |
| "learning_rate": 1.9861251032460053e-05, |
| "loss": 0.8238, |
| "step": 400 |
| }, |
| { |
| "epoch": 0.14807959278111985, |
| "eval_loss": 0.8187944889068604, |
| "eval_runtime": 125.8941, |
| "eval_samples_per_second": 1.017, |
| "eval_steps_per_second": 0.508, |
| "step": 400 |
| }, |
| { |
| "epoch": 0.14993058769088385, |
| "grad_norm": 1.262457467293623, |
| "learning_rate": 1.985031428632345e-05, |
| "loss": 0.7656, |
| "step": 405 |
| }, |
| { |
| "epoch": 0.15178158260064784, |
| "grad_norm": 1.222235652953353, |
| "learning_rate": 1.9838965939792666e-05, |
| "loss": 0.8033, |
| "step": 410 |
| }, |
| { |
| "epoch": 0.15363257751041184, |
| "grad_norm": 1.0872174045726444, |
| "learning_rate": 1.9827206467064133e-05, |
| "loss": 0.8079, |
| "step": 415 |
| }, |
| { |
| "epoch": 0.15548357242017585, |
| "grad_norm": 1.188894944380026, |
| "learning_rate": 1.9815036359513408e-05, |
| "loss": 0.7931, |
| "step": 420 |
| }, |
| { |
| "epoch": 0.15733456732993983, |
| "grad_norm": 1.0474799030998936, |
| "learning_rate": 1.980245612567462e-05, |
| "loss": 0.7752, |
| "step": 425 |
| }, |
| { |
| "epoch": 0.15918556223970384, |
| "grad_norm": 1.0714711020851349, |
| "learning_rate": 1.9789466291219246e-05, |
| "loss": 0.7787, |
| "step": 430 |
| }, |
| { |
| "epoch": 0.16103655714946785, |
| "grad_norm": 1.2916090137573928, |
| "learning_rate": 1.9776067398934122e-05, |
| "loss": 0.8464, |
| "step": 435 |
| }, |
| { |
| "epoch": 0.16288755205923183, |
| "grad_norm": 1.0837760464706834, |
| "learning_rate": 1.9762260008698787e-05, |
| "loss": 0.742, |
| "step": 440 |
| }, |
| { |
| "epoch": 0.16473854696899584, |
| "grad_norm": 1.0638766335715402, |
| "learning_rate": 1.974804469746206e-05, |
| "loss": 0.7909, |
| "step": 445 |
| }, |
| { |
| "epoch": 0.16658954187875982, |
| "grad_norm": 1.1650137243413088, |
| "learning_rate": 1.9733422059217954e-05, |
| "loss": 0.8142, |
| "step": 450 |
| }, |
| { |
| "epoch": 0.16844053678852383, |
| "grad_norm": 1.1494193680521951, |
| "learning_rate": 1.9718392704980852e-05, |
| "loss": 0.7187, |
| "step": 455 |
| }, |
| { |
| "epoch": 0.17029153169828784, |
| "grad_norm": 1.141831785166932, |
| "learning_rate": 1.9702957262759964e-05, |
| "loss": 0.7338, |
| "step": 460 |
| }, |
| { |
| "epoch": 0.17214252660805182, |
| "grad_norm": 1.1597583259361357, |
| "learning_rate": 1.9687116377533102e-05, |
| "loss": 0.802, |
| "step": 465 |
| }, |
| { |
| "epoch": 0.17399352151781583, |
| "grad_norm": 1.1829435674368602, |
| "learning_rate": 1.9670870711219708e-05, |
| "loss": 0.7982, |
| "step": 470 |
| }, |
| { |
| "epoch": 0.1758445164275798, |
| "grad_norm": 1.2456510178235929, |
| "learning_rate": 1.9654220942653223e-05, |
| "loss": 0.796, |
| "step": 475 |
| }, |
| { |
| "epoch": 0.17769551133734382, |
| "grad_norm": 1.1776301109970682, |
| "learning_rate": 1.9637167767552687e-05, |
| "loss": 0.7754, |
| "step": 480 |
| }, |
| { |
| "epoch": 0.17954650624710783, |
| "grad_norm": 1.116849386127364, |
| "learning_rate": 1.9619711898493707e-05, |
| "loss": 0.7919, |
| "step": 485 |
| }, |
| { |
| "epoch": 0.1813975011568718, |
| "grad_norm": 1.090344008956795, |
| "learning_rate": 1.9601854064878645e-05, |
| "loss": 0.84, |
| "step": 490 |
| }, |
| { |
| "epoch": 0.18324849606663582, |
| "grad_norm": 1.0862481361440726, |
| "learning_rate": 1.9583595012906173e-05, |
| "loss": 0.7697, |
| "step": 495 |
| }, |
| { |
| "epoch": 0.18509949097639983, |
| "grad_norm": 1.142567519924341, |
| "learning_rate": 1.956493550554006e-05, |
| "loss": 0.7635, |
| "step": 500 |
| }, |
| { |
| "epoch": 0.18509949097639983, |
| "eval_loss": 0.8105577230453491, |
| "eval_runtime": 125.9442, |
| "eval_samples_per_second": 1.016, |
| "eval_steps_per_second": 0.508, |
| "step": 500 |
| }, |
| { |
| "epoch": 0.1869504858861638, |
| "grad_norm": 1.023479803471901, |
| "learning_rate": 1.954587632247732e-05, |
| "loss": 0.7747, |
| "step": 505 |
| }, |
| { |
| "epoch": 0.18880148079592782, |
| "grad_norm": 1.063385408309198, |
| "learning_rate": 1.9526418260115615e-05, |
| "loss": 0.75, |
| "step": 510 |
| }, |
| { |
| "epoch": 0.1906524757056918, |
| "grad_norm": 1.1699474105718337, |
| "learning_rate": 1.9506562131519978e-05, |
| "loss": 0.7914, |
| "step": 515 |
| }, |
| { |
| "epoch": 0.1925034706154558, |
| "grad_norm": 1.2205261625534392, |
| "learning_rate": 1.9486308766388843e-05, |
| "loss": 0.8228, |
| "step": 520 |
| }, |
| { |
| "epoch": 0.19435446552521982, |
| "grad_norm": 1.2493323461591765, |
| "learning_rate": 1.9465659011019384e-05, |
| "loss": 0.7494, |
| "step": 525 |
| }, |
| { |
| "epoch": 0.1962054604349838, |
| "grad_norm": 1.083050859566808, |
| "learning_rate": 1.9444613728272127e-05, |
| "loss": 0.7478, |
| "step": 530 |
| }, |
| { |
| "epoch": 0.1980564553447478, |
| "grad_norm": 1.122573251625821, |
| "learning_rate": 1.9423173797534924e-05, |
| "loss": 0.8088, |
| "step": 535 |
| }, |
| { |
| "epoch": 0.1999074502545118, |
| "grad_norm": 1.2528116581295516, |
| "learning_rate": 1.9401340114686187e-05, |
| "loss": 0.7854, |
| "step": 540 |
| }, |
| { |
| "epoch": 0.2017584451642758, |
| "grad_norm": 1.2245250113750425, |
| "learning_rate": 1.9379113592057468e-05, |
| "loss": 0.7745, |
| "step": 545 |
| }, |
| { |
| "epoch": 0.2036094400740398, |
| "grad_norm": 1.3603783721746314, |
| "learning_rate": 1.9356495158395317e-05, |
| "loss": 0.8214, |
| "step": 550 |
| }, |
| { |
| "epoch": 0.20546043498380379, |
| "grad_norm": 1.1676680192314415, |
| "learning_rate": 1.9333485758822495e-05, |
| "loss": 0.7731, |
| "step": 555 |
| }, |
| { |
| "epoch": 0.2073114298935678, |
| "grad_norm": 1.2142076942764124, |
| "learning_rate": 1.931008635479847e-05, |
| "loss": 0.8128, |
| "step": 560 |
| }, |
| { |
| "epoch": 0.2091624248033318, |
| "grad_norm": 1.1799986315552842, |
| "learning_rate": 1.9286297924079244e-05, |
| "loss": 0.7536, |
| "step": 565 |
| }, |
| { |
| "epoch": 0.21101341971309578, |
| "grad_norm": 1.0965045706780743, |
| "learning_rate": 1.92621214606765e-05, |
| "loss": 0.7744, |
| "step": 570 |
| }, |
| { |
| "epoch": 0.2128644146228598, |
| "grad_norm": 1.2434542202635104, |
| "learning_rate": 1.9237557974816063e-05, |
| "loss": 0.7965, |
| "step": 575 |
| }, |
| { |
| "epoch": 0.21471540953262377, |
| "grad_norm": 1.2091983359476026, |
| "learning_rate": 1.921260849289568e-05, |
| "loss": 0.7837, |
| "step": 580 |
| }, |
| { |
| "epoch": 0.21656640444238778, |
| "grad_norm": 1.1940311597580997, |
| "learning_rate": 1.9187274057442153e-05, |
| "loss": 0.774, |
| "step": 585 |
| }, |
| { |
| "epoch": 0.2184173993521518, |
| "grad_norm": 1.0743701754676238, |
| "learning_rate": 1.916155572706776e-05, |
| "loss": 0.8083, |
| "step": 590 |
| }, |
| { |
| "epoch": 0.22026839426191577, |
| "grad_norm": 1.119330031173641, |
| "learning_rate": 1.913545457642601e-05, |
| "loss": 0.7692, |
| "step": 595 |
| }, |
| { |
| "epoch": 0.22211938917167978, |
| "grad_norm": 1.1838482419139507, |
| "learning_rate": 1.9108971696166778e-05, |
| "loss": 0.7388, |
| "step": 600 |
| }, |
| { |
| "epoch": 0.22211938917167978, |
| "eval_loss": 0.8023950457572937, |
| "eval_runtime": 125.9122, |
| "eval_samples_per_second": 1.017, |
| "eval_steps_per_second": 0.508, |
| "step": 600 |
| }, |
| { |
| "epoch": 0.22397038408144376, |
| "grad_norm": 1.1918846048877687, |
| "learning_rate": 1.9082108192890673e-05, |
| "loss": 0.806, |
| "step": 605 |
| }, |
| { |
| "epoch": 0.22582137899120777, |
| "grad_norm": 1.1474696064483147, |
| "learning_rate": 1.905486518910286e-05, |
| "loss": 0.7756, |
| "step": 610 |
| }, |
| { |
| "epoch": 0.22767237390097178, |
| "grad_norm": 1.1873077682787359, |
| "learning_rate": 1.9027243823166107e-05, |
| "loss": 0.7807, |
| "step": 615 |
| }, |
| { |
| "epoch": 0.22952336881073576, |
| "grad_norm": 1.1370410152260748, |
| "learning_rate": 1.8999245249253247e-05, |
| "loss": 0.8518, |
| "step": 620 |
| }, |
| { |
| "epoch": 0.23137436372049977, |
| "grad_norm": 1.1592714123770727, |
| "learning_rate": 1.8970870637298936e-05, |
| "loss": 0.7947, |
| "step": 625 |
| }, |
| { |
| "epoch": 0.23322535863026378, |
| "grad_norm": 1.167379979369555, |
| "learning_rate": 1.8942121172950765e-05, |
| "loss": 0.8175, |
| "step": 630 |
| }, |
| { |
| "epoch": 0.23507635354002776, |
| "grad_norm": 1.0148428174894528, |
| "learning_rate": 1.8912998057519735e-05, |
| "loss": 0.7813, |
| "step": 635 |
| }, |
| { |
| "epoch": 0.23692734844979177, |
| "grad_norm": 1.2294496173862697, |
| "learning_rate": 1.8883502507930044e-05, |
| "loss": 0.8282, |
| "step": 640 |
| }, |
| { |
| "epoch": 0.23877834335955575, |
| "grad_norm": 1.153834698104633, |
| "learning_rate": 1.885363575666823e-05, |
| "loss": 0.819, |
| "step": 645 |
| }, |
| { |
| "epoch": 0.24062933826931976, |
| "grad_norm": 1.0699702689958812, |
| "learning_rate": 1.8823399051731698e-05, |
| "loss": 0.7333, |
| "step": 650 |
| }, |
| { |
| "epoch": 0.24248033317908377, |
| "grad_norm": 1.0339682532907033, |
| "learning_rate": 1.8792793656576544e-05, |
| "loss": 0.7723, |
| "step": 655 |
| }, |
| { |
| "epoch": 0.24433132808884775, |
| "grad_norm": 1.083476975643199, |
| "learning_rate": 1.876182085006478e-05, |
| "loss": 0.7932, |
| "step": 660 |
| }, |
| { |
| "epoch": 0.24618232299861176, |
| "grad_norm": 0.9842008335663365, |
| "learning_rate": 1.873048192641088e-05, |
| "loss": 0.7978, |
| "step": 665 |
| }, |
| { |
| "epoch": 0.24803331790837574, |
| "grad_norm": 1.0859880941267326, |
| "learning_rate": 1.8698778195127715e-05, |
| "loss": 0.7903, |
| "step": 670 |
| }, |
| { |
| "epoch": 0.24988431281813975, |
| "grad_norm": 1.0920881647401042, |
| "learning_rate": 1.866671098097183e-05, |
| "loss": 0.7805, |
| "step": 675 |
| }, |
| { |
| "epoch": 0.25173530772790376, |
| "grad_norm": 1.0447814725747497, |
| "learning_rate": 1.863428162388808e-05, |
| "loss": 0.8156, |
| "step": 680 |
| }, |
| { |
| "epoch": 0.25358630263766774, |
| "grad_norm": 1.122277511505867, |
| "learning_rate": 1.860149147895366e-05, |
| "loss": 0.7803, |
| "step": 685 |
| }, |
| { |
| "epoch": 0.2554372975474317, |
| "grad_norm": 1.1085629206709198, |
| "learning_rate": 1.856834191632144e-05, |
| "loss": 0.8045, |
| "step": 690 |
| }, |
| { |
| "epoch": 0.25728829245719576, |
| "grad_norm": 1.087490803470686, |
| "learning_rate": 1.8534834321162778e-05, |
| "loss": 0.822, |
| "step": 695 |
| }, |
| { |
| "epoch": 0.25913928736695974, |
| "grad_norm": 1.0114699174712363, |
| "learning_rate": 1.850097009360958e-05, |
| "loss": 0.7234, |
| "step": 700 |
| }, |
| { |
| "epoch": 0.25913928736695974, |
| "eval_loss": 0.7961795926094055, |
| "eval_runtime": 125.8984, |
| "eval_samples_per_second": 1.017, |
| "eval_steps_per_second": 0.508, |
| "step": 700 |
| }, |
| { |
| "epoch": 0.2609902822767237, |
| "grad_norm": 1.1666771129689475, |
| "learning_rate": 1.8466750648695826e-05, |
| "loss": 0.8284, |
| "step": 705 |
| }, |
| { |
| "epoch": 0.26284127718648775, |
| "grad_norm": 1.0304043488373609, |
| "learning_rate": 1.843217741629843e-05, |
| "loss": 0.7025, |
| "step": 710 |
| }, |
| { |
| "epoch": 0.26469227209625174, |
| "grad_norm": 1.0509818064952419, |
| "learning_rate": 1.83972518410775e-05, |
| "loss": 0.739, |
| "step": 715 |
| }, |
| { |
| "epoch": 0.2665432670060157, |
| "grad_norm": 0.9979057999152209, |
| "learning_rate": 1.836197538241596e-05, |
| "loss": 0.7452, |
| "step": 720 |
| }, |
| { |
| "epoch": 0.26839426191577975, |
| "grad_norm": 1.0805505088606497, |
| "learning_rate": 1.8326349514358594e-05, |
| "loss": 0.7787, |
| "step": 725 |
| }, |
| { |
| "epoch": 0.27024525682554373, |
| "grad_norm": 1.091857948129845, |
| "learning_rate": 1.8290375725550417e-05, |
| "loss": 0.8327, |
| "step": 730 |
| }, |
| { |
| "epoch": 0.2720962517353077, |
| "grad_norm": 1.0581119370365102, |
| "learning_rate": 1.8254055519174502e-05, |
| "loss": 0.7103, |
| "step": 735 |
| }, |
| { |
| "epoch": 0.27394724664507175, |
| "grad_norm": 1.1460921794947827, |
| "learning_rate": 1.821739041288915e-05, |
| "loss": 0.7927, |
| "step": 740 |
| }, |
| { |
| "epoch": 0.27579824155483573, |
| "grad_norm": 0.9651613777166085, |
| "learning_rate": 1.818038193876448e-05, |
| "loss": 0.7406, |
| "step": 745 |
| }, |
| { |
| "epoch": 0.2776492364645997, |
| "grad_norm": 0.9502996970486759, |
| "learning_rate": 1.8143031643218413e-05, |
| "loss": 0.7027, |
| "step": 750 |
| }, |
| { |
| "epoch": 0.2795002313743637, |
| "grad_norm": 1.1463924114726602, |
| "learning_rate": 1.8105341086952052e-05, |
| "loss": 0.8102, |
| "step": 755 |
| }, |
| { |
| "epoch": 0.28135122628412773, |
| "grad_norm": 1.1234130601114334, |
| "learning_rate": 1.806731184488447e-05, |
| "loss": 0.7802, |
| "step": 760 |
| }, |
| { |
| "epoch": 0.2832022211938917, |
| "grad_norm": 1.0971581054230324, |
| "learning_rate": 1.8028945506086898e-05, |
| "loss": 0.7516, |
| "step": 765 |
| }, |
| { |
| "epoch": 0.2850532161036557, |
| "grad_norm": 1.107825346177917, |
| "learning_rate": 1.799024367371631e-05, |
| "loss": 0.7435, |
| "step": 770 |
| }, |
| { |
| "epoch": 0.28690421101341973, |
| "grad_norm": 1.1319764102322498, |
| "learning_rate": 1.795120796494848e-05, |
| "loss": 0.8346, |
| "step": 775 |
| }, |
| { |
| "epoch": 0.2887552059231837, |
| "grad_norm": 1.054146904567116, |
| "learning_rate": 1.791184001091035e-05, |
| "loss": 0.7668, |
| "step": 780 |
| }, |
| { |
| "epoch": 0.2906062008329477, |
| "grad_norm": 0.9546703867947438, |
| "learning_rate": 1.7872141456611904e-05, |
| "loss": 0.7729, |
| "step": 785 |
| }, |
| { |
| "epoch": 0.29245719574271173, |
| "grad_norm": 1.0520354517409871, |
| "learning_rate": 1.7832113960877445e-05, |
| "loss": 0.7666, |
| "step": 790 |
| }, |
| { |
| "epoch": 0.2943081906524757, |
| "grad_norm": 1.0339039958969705, |
| "learning_rate": 1.779175919627624e-05, |
| "loss": 0.7879, |
| "step": 795 |
| }, |
| { |
| "epoch": 0.2961591855622397, |
| "grad_norm": 1.0374069302809197, |
| "learning_rate": 1.775107884905266e-05, |
| "loss": 0.7693, |
| "step": 800 |
| }, |
| { |
| "epoch": 0.2961591855622397, |
| "eval_loss": 0.7911367416381836, |
| "eval_runtime": 125.8736, |
| "eval_samples_per_second": 1.017, |
| "eval_steps_per_second": 0.508, |
| "step": 800 |
| }, |
| { |
| "epoch": 0.2980101804720037, |
| "grad_norm": 1.023031947891097, |
| "learning_rate": 1.7710074619055707e-05, |
| "loss": 0.7701, |
| "step": 805 |
| }, |
| { |
| "epoch": 0.2998611753817677, |
| "grad_norm": 1.02308459176092, |
| "learning_rate": 1.7668748219668007e-05, |
| "loss": 0.7079, |
| "step": 810 |
| }, |
| { |
| "epoch": 0.3017121702915317, |
| "grad_norm": 1.0193146042109342, |
| "learning_rate": 1.7627101377734176e-05, |
| "loss": 0.7814, |
| "step": 815 |
| }, |
| { |
| "epoch": 0.30356316520129567, |
| "grad_norm": 1.1175824255386086, |
| "learning_rate": 1.7585135833488692e-05, |
| "loss": 0.8352, |
| "step": 820 |
| }, |
| { |
| "epoch": 0.3054141601110597, |
| "grad_norm": 0.9758167149410084, |
| "learning_rate": 1.7542853340483175e-05, |
| "loss": 0.7525, |
| "step": 825 |
| }, |
| { |
| "epoch": 0.3072651550208237, |
| "grad_norm": 0.9971503389642156, |
| "learning_rate": 1.7500255665513112e-05, |
| "loss": 0.7279, |
| "step": 830 |
| }, |
| { |
| "epoch": 0.30911614993058767, |
| "grad_norm": 1.169901204414489, |
| "learning_rate": 1.7457344588544018e-05, |
| "loss": 0.7755, |
| "step": 835 |
| }, |
| { |
| "epoch": 0.3109671448403517, |
| "grad_norm": 1.0595523773603162, |
| "learning_rate": 1.7414121902637083e-05, |
| "loss": 0.7673, |
| "step": 840 |
| }, |
| { |
| "epoch": 0.3128181397501157, |
| "grad_norm": 1.1751687012571783, |
| "learning_rate": 1.7370589413874226e-05, |
| "loss": 0.741, |
| "step": 845 |
| }, |
| { |
| "epoch": 0.31466913465987967, |
| "grad_norm": 1.1835719361646306, |
| "learning_rate": 1.7326748941282638e-05, |
| "loss": 0.7788, |
| "step": 850 |
| }, |
| { |
| "epoch": 0.3165201295696437, |
| "grad_norm": 1.0111560919225886, |
| "learning_rate": 1.7282602316758774e-05, |
| "loss": 0.7678, |
| "step": 855 |
| }, |
| { |
| "epoch": 0.3183711244794077, |
| "grad_norm": 1.0835946252135704, |
| "learning_rate": 1.72381513849918e-05, |
| "loss": 0.7476, |
| "step": 860 |
| }, |
| { |
| "epoch": 0.32022211938917167, |
| "grad_norm": 1.1099985814534863, |
| "learning_rate": 1.7193398003386514e-05, |
| "loss": 0.779, |
| "step": 865 |
| }, |
| { |
| "epoch": 0.3220731142989357, |
| "grad_norm": 0.9066789182044317, |
| "learning_rate": 1.7148344041985736e-05, |
| "loss": 0.6901, |
| "step": 870 |
| }, |
| { |
| "epoch": 0.3239241092086997, |
| "grad_norm": 1.1422072479983323, |
| "learning_rate": 1.710299138339217e-05, |
| "loss": 0.806, |
| "step": 875 |
| }, |
| { |
| "epoch": 0.32577510411846367, |
| "grad_norm": 0.9742950243183045, |
| "learning_rate": 1.7057341922689733e-05, |
| "loss": 0.7739, |
| "step": 880 |
| }, |
| { |
| "epoch": 0.32762609902822765, |
| "grad_norm": 1.0227221722499922, |
| "learning_rate": 1.701139756736436e-05, |
| "loss": 0.7406, |
| "step": 885 |
| }, |
| { |
| "epoch": 0.3294770939379917, |
| "grad_norm": 1.0785612634689687, |
| "learning_rate": 1.696516023722431e-05, |
| "loss": 0.7728, |
| "step": 890 |
| }, |
| { |
| "epoch": 0.33132808884775566, |
| "grad_norm": 1.0880856661959288, |
| "learning_rate": 1.691863186431996e-05, |
| "loss": 0.7462, |
| "step": 895 |
| }, |
| { |
| "epoch": 0.33317908375751965, |
| "grad_norm": 1.0817764375919297, |
| "learning_rate": 1.6871814392863037e-05, |
| "loss": 0.8385, |
| "step": 900 |
| }, |
| { |
| "epoch": 0.33317908375751965, |
| "eval_loss": 0.7849360704421997, |
| "eval_runtime": 125.8514, |
| "eval_samples_per_second": 1.017, |
| "eval_steps_per_second": 0.509, |
| "step": 900 |
| }, |
| { |
| "epoch": 0.3350300786672837, |
| "grad_norm": 1.0848147512517947, |
| "learning_rate": 1.682470977914541e-05, |
| "loss": 0.7653, |
| "step": 905 |
| }, |
| { |
| "epoch": 0.33688107357704766, |
| "grad_norm": 1.1609482279651488, |
| "learning_rate": 1.6777319991457325e-05, |
| "loss": 0.786, |
| "step": 910 |
| }, |
| { |
| "epoch": 0.33873206848681164, |
| "grad_norm": 1.2171008251999773, |
| "learning_rate": 1.6729647010005175e-05, |
| "loss": 0.8124, |
| "step": 915 |
| }, |
| { |
| "epoch": 0.3405830633965757, |
| "grad_norm": 0.9446888141489528, |
| "learning_rate": 1.6681692826828743e-05, |
| "loss": 0.8249, |
| "step": 920 |
| }, |
| { |
| "epoch": 0.34243405830633966, |
| "grad_norm": 1.099582146653851, |
| "learning_rate": 1.6633459445717973e-05, |
| "loss": 0.7901, |
| "step": 925 |
| }, |
| { |
| "epoch": 0.34428505321610364, |
| "grad_norm": 0.9771996099399858, |
| "learning_rate": 1.6584948882129238e-05, |
| "loss": 0.759, |
| "step": 930 |
| }, |
| { |
| "epoch": 0.3461360481258677, |
| "grad_norm": 1.0185009245061947, |
| "learning_rate": 1.653616316310112e-05, |
| "loss": 0.7295, |
| "step": 935 |
| }, |
| { |
| "epoch": 0.34798704303563166, |
| "grad_norm": 1.0804952434925557, |
| "learning_rate": 1.6487104327169702e-05, |
| "loss": 0.755, |
| "step": 940 |
| }, |
| { |
| "epoch": 0.34983803794539564, |
| "grad_norm": 1.0894047826173676, |
| "learning_rate": 1.6437774424283414e-05, |
| "loss": 0.7916, |
| "step": 945 |
| }, |
| { |
| "epoch": 0.3516890328551596, |
| "grad_norm": 0.9805181725379623, |
| "learning_rate": 1.6388175515717336e-05, |
| "loss": 0.7285, |
| "step": 950 |
| }, |
| { |
| "epoch": 0.35354002776492366, |
| "grad_norm": 1.356858685263938, |
| "learning_rate": 1.63383096739871e-05, |
| "loss": 0.7864, |
| "step": 955 |
| }, |
| { |
| "epoch": 0.35539102267468764, |
| "grad_norm": 0.9525556743311706, |
| "learning_rate": 1.6288178982762287e-05, |
| "loss": 0.742, |
| "step": 960 |
| }, |
| { |
| "epoch": 0.3572420175844516, |
| "grad_norm": 1.0831204564195782, |
| "learning_rate": 1.6237785536779322e-05, |
| "loss": 0.7386, |
| "step": 965 |
| }, |
| { |
| "epoch": 0.35909301249421566, |
| "grad_norm": 0.9806687465833773, |
| "learning_rate": 1.618713144175399e-05, |
| "loss": 0.7399, |
| "step": 970 |
| }, |
| { |
| "epoch": 0.36094400740397964, |
| "grad_norm": 0.9492684447869085, |
| "learning_rate": 1.6136218814293422e-05, |
| "loss": 0.763, |
| "step": 975 |
| }, |
| { |
| "epoch": 0.3627950023137436, |
| "grad_norm": 1.05964205886519, |
| "learning_rate": 1.6085049781807656e-05, |
| "loss": 0.7658, |
| "step": 980 |
| }, |
| { |
| "epoch": 0.36464599722350766, |
| "grad_norm": 1.007330323233069, |
| "learning_rate": 1.603362648242076e-05, |
| "loss": 0.7371, |
| "step": 985 |
| }, |
| { |
| "epoch": 0.36649699213327164, |
| "grad_norm": 0.9531551141111623, |
| "learning_rate": 1.5981951064881456e-05, |
| "loss": 0.7538, |
| "step": 990 |
| }, |
| { |
| "epoch": 0.3683479870430356, |
| "grad_norm": 1.0737293889710073, |
| "learning_rate": 1.5930025688473353e-05, |
| "loss": 0.7896, |
| "step": 995 |
| }, |
| { |
| "epoch": 0.37019898195279966, |
| "grad_norm": 1.0915409073010398, |
| "learning_rate": 1.5877852522924733e-05, |
| "loss": 0.7926, |
| "step": 1000 |
| }, |
| { |
| "epoch": 0.37019898195279966, |
| "eval_loss": 0.7791726589202881, |
| "eval_runtime": 125.9827, |
| "eval_samples_per_second": 1.016, |
| "eval_steps_per_second": 0.508, |
| "step": 1000 |
| }, |
| { |
| "epoch": 0.37204997686256364, |
| "grad_norm": 1.1973665058190375, |
| "learning_rate": 1.5825433748317857e-05, |
| "loss": 0.7215, |
| "step": 1005 |
| }, |
| { |
| "epoch": 0.3739009717723276, |
| "grad_norm": 1.0701603196497034, |
| "learning_rate": 1.577277155499789e-05, |
| "loss": 0.7396, |
| "step": 1010 |
| }, |
| { |
| "epoch": 0.3757519666820916, |
| "grad_norm": 1.0987041230123638, |
| "learning_rate": 1.5719868143481385e-05, |
| "loss": 0.7429, |
| "step": 1015 |
| }, |
| { |
| "epoch": 0.37760296159185563, |
| "grad_norm": 1.0534343239222719, |
| "learning_rate": 1.5666725724364296e-05, |
| "loss": 0.7545, |
| "step": 1020 |
| }, |
| { |
| "epoch": 0.3794539565016196, |
| "grad_norm": 1.028797489541622, |
| "learning_rate": 1.5613346518229652e-05, |
| "loss": 0.7264, |
| "step": 1025 |
| }, |
| { |
| "epoch": 0.3813049514113836, |
| "grad_norm": 1.0217316773582072, |
| "learning_rate": 1.5559732755554734e-05, |
| "loss": 0.7177, |
| "step": 1030 |
| }, |
| { |
| "epoch": 0.38315594632114763, |
| "grad_norm": 1.047042319865492, |
| "learning_rate": 1.5505886676617902e-05, |
| "loss": 0.7714, |
| "step": 1035 |
| }, |
| { |
| "epoch": 0.3850069412309116, |
| "grad_norm": 1.0810668413004525, |
| "learning_rate": 1.545181053140495e-05, |
| "loss": 0.715, |
| "step": 1040 |
| }, |
| { |
| "epoch": 0.3868579361406756, |
| "grad_norm": 1.0363170821457612, |
| "learning_rate": 1.539750657951513e-05, |
| "loss": 0.7664, |
| "step": 1045 |
| }, |
| { |
| "epoch": 0.38870893105043963, |
| "grad_norm": 1.0452365868797338, |
| "learning_rate": 1.5342977090066696e-05, |
| "loss": 0.7236, |
| "step": 1050 |
| }, |
| { |
| "epoch": 0.3905599259602036, |
| "grad_norm": 0.9982499977285321, |
| "learning_rate": 1.52882243416021e-05, |
| "loss": 0.7731, |
| "step": 1055 |
| }, |
| { |
| "epoch": 0.3924109208699676, |
| "grad_norm": 1.030850130209849, |
| "learning_rate": 1.523325062199281e-05, |
| "loss": 0.7617, |
| "step": 1060 |
| }, |
| { |
| "epoch": 0.39426191577973163, |
| "grad_norm": 1.1101345309616182, |
| "learning_rate": 1.5178058228343662e-05, |
| "loss": 0.7552, |
| "step": 1065 |
| }, |
| { |
| "epoch": 0.3961129106894956, |
| "grad_norm": 0.9346869642461403, |
| "learning_rate": 1.5122649466896899e-05, |
| "loss": 0.7606, |
| "step": 1070 |
| }, |
| { |
| "epoch": 0.3979639055992596, |
| "grad_norm": 0.9976085317381093, |
| "learning_rate": 1.5067026652935823e-05, |
| "loss": 0.7303, |
| "step": 1075 |
| }, |
| { |
| "epoch": 0.3998149005090236, |
| "grad_norm": 0.8951591117484915, |
| "learning_rate": 1.5011192110688008e-05, |
| "loss": 0.6998, |
| "step": 1080 |
| }, |
| { |
| "epoch": 0.4016658954187876, |
| "grad_norm": 1.0337985910400393, |
| "learning_rate": 1.4955148173228209e-05, |
| "loss": 0.7814, |
| "step": 1085 |
| }, |
| { |
| "epoch": 0.4035168903285516, |
| "grad_norm": 1.0299336976844324, |
| "learning_rate": 1.4898897182380872e-05, |
| "loss": 0.7532, |
| "step": 1090 |
| }, |
| { |
| "epoch": 0.4053678852383156, |
| "grad_norm": 0.9541582795109266, |
| "learning_rate": 1.4842441488622266e-05, |
| "loss": 0.7609, |
| "step": 1095 |
| }, |
| { |
| "epoch": 0.4072188801480796, |
| "grad_norm": 1.1428468537841259, |
| "learning_rate": 1.4785783450982276e-05, |
| "loss": 0.7463, |
| "step": 1100 |
| }, |
| { |
| "epoch": 0.4072188801480796, |
| "eval_loss": 0.7768411636352539, |
| "eval_runtime": 125.9173, |
| "eval_samples_per_second": 1.017, |
| "eval_steps_per_second": 0.508, |
| "step": 1100 |
| }, |
| { |
| "epoch": 0.4090698750578436, |
| "grad_norm": 0.9432900818713729, |
| "learning_rate": 1.4728925436945838e-05, |
| "loss": 0.7225, |
| "step": 1105 |
| }, |
| { |
| "epoch": 0.41092086996760757, |
| "grad_norm": 0.9568114894842158, |
| "learning_rate": 1.4671869822353993e-05, |
| "loss": 0.7414, |
| "step": 1110 |
| }, |
| { |
| "epoch": 0.4127718648773716, |
| "grad_norm": 1.1008061214147924, |
| "learning_rate": 1.4614618991304625e-05, |
| "loss": 0.78, |
| "step": 1115 |
| }, |
| { |
| "epoch": 0.4146228597871356, |
| "grad_norm": 1.0503790052814121, |
| "learning_rate": 1.4557175336052844e-05, |
| "loss": 0.7223, |
| "step": 1120 |
| }, |
| { |
| "epoch": 0.41647385469689957, |
| "grad_norm": 0.9583451918719977, |
| "learning_rate": 1.4499541256911008e-05, |
| "loss": 0.7495, |
| "step": 1125 |
| }, |
| { |
| "epoch": 0.4183248496066636, |
| "grad_norm": 1.0275134379520088, |
| "learning_rate": 1.4441719162148433e-05, |
| "loss": 0.7653, |
| "step": 1130 |
| }, |
| { |
| "epoch": 0.4201758445164276, |
| "grad_norm": 1.0425892623904864, |
| "learning_rate": 1.4383711467890776e-05, |
| "loss": 0.7055, |
| "step": 1135 |
| }, |
| { |
| "epoch": 0.42202683942619157, |
| "grad_norm": 1.069154922576832, |
| "learning_rate": 1.4325520598019049e-05, |
| "loss": 0.7837, |
| "step": 1140 |
| }, |
| { |
| "epoch": 0.42387783433595555, |
| "grad_norm": 0.9896521412449755, |
| "learning_rate": 1.4267148984068359e-05, |
| "loss": 0.7702, |
| "step": 1145 |
| }, |
| { |
| "epoch": 0.4257288292457196, |
| "grad_norm": 1.025563581357004, |
| "learning_rate": 1.4208599065126292e-05, |
| "loss": 0.7604, |
| "step": 1150 |
| }, |
| { |
| "epoch": 0.42757982415548357, |
| "grad_norm": 1.001282739654851, |
| "learning_rate": 1.4149873287731005e-05, |
| "loss": 0.7874, |
| "step": 1155 |
| }, |
| { |
| "epoch": 0.42943081906524755, |
| "grad_norm": 1.0858912388862354, |
| "learning_rate": 1.4090974105768977e-05, |
| "loss": 0.8066, |
| "step": 1160 |
| }, |
| { |
| "epoch": 0.4312818139750116, |
| "grad_norm": 0.9891342220670727, |
| "learning_rate": 1.4031903980372503e-05, |
| "loss": 0.7532, |
| "step": 1165 |
| }, |
| { |
| "epoch": 0.43313280888477557, |
| "grad_norm": 1.1089360136811275, |
| "learning_rate": 1.3972665379816821e-05, |
| "loss": 0.7344, |
| "step": 1170 |
| }, |
| { |
| "epoch": 0.43498380379453955, |
| "grad_norm": 1.0804929458235695, |
| "learning_rate": 1.3913260779416999e-05, |
| "loss": 0.7127, |
| "step": 1175 |
| }, |
| { |
| "epoch": 0.4368347987043036, |
| "grad_norm": 0.9487754961773092, |
| "learning_rate": 1.3853692661424485e-05, |
| "loss": 0.7544, |
| "step": 1180 |
| }, |
| { |
| "epoch": 0.43868579361406757, |
| "grad_norm": 1.1689376731656378, |
| "learning_rate": 1.3793963514923398e-05, |
| "loss": 0.7571, |
| "step": 1185 |
| }, |
| { |
| "epoch": 0.44053678852383155, |
| "grad_norm": 1.1079751308984782, |
| "learning_rate": 1.3734075835726515e-05, |
| "loss": 0.7632, |
| "step": 1190 |
| }, |
| { |
| "epoch": 0.4423877834335956, |
| "grad_norm": 0.9701136035154755, |
| "learning_rate": 1.3674032126270982e-05, |
| "loss": 0.7155, |
| "step": 1195 |
| }, |
| { |
| "epoch": 0.44423877834335956, |
| "grad_norm": 1.0783370064152846, |
| "learning_rate": 1.3613834895513748e-05, |
| "loss": 0.8017, |
| "step": 1200 |
| }, |
| { |
| "epoch": 0.44423877834335956, |
| "eval_loss": 0.7741294503211975, |
| "eval_runtime": 125.9108, |
| "eval_samples_per_second": 1.017, |
| "eval_steps_per_second": 0.508, |
| "step": 1200 |
| }, |
| { |
| "epoch": 0.44608977325312354, |
| "grad_norm": 1.039233538517332, |
| "learning_rate": 1.355348665882673e-05, |
| "loss": 0.7332, |
| "step": 1205 |
| }, |
| { |
| "epoch": 0.4479407681628875, |
| "grad_norm": 1.1306337428774784, |
| "learning_rate": 1.3492989937891694e-05, |
| "loss": 0.7475, |
| "step": 1210 |
| }, |
| { |
| "epoch": 0.44979176307265156, |
| "grad_norm": 1.0587177357723303, |
| "learning_rate": 1.3432347260594911e-05, |
| "loss": 0.7603, |
| "step": 1215 |
| }, |
| { |
| "epoch": 0.45164275798241554, |
| "grad_norm": 1.0290685165693072, |
| "learning_rate": 1.3371561160921507e-05, |
| "loss": 0.7646, |
| "step": 1220 |
| }, |
| { |
| "epoch": 0.4534937528921795, |
| "grad_norm": 1.34640973701702, |
| "learning_rate": 1.3310634178849583e-05, |
| "loss": 0.6996, |
| "step": 1225 |
| }, |
| { |
| "epoch": 0.45534474780194356, |
| "grad_norm": 0.9941580643005229, |
| "learning_rate": 1.3249568860244081e-05, |
| "loss": 0.7254, |
| "step": 1230 |
| }, |
| { |
| "epoch": 0.45719574271170754, |
| "grad_norm": 1.0409471890052142, |
| "learning_rate": 1.3188367756750413e-05, |
| "loss": 0.7774, |
| "step": 1235 |
| }, |
| { |
| "epoch": 0.4590467376214715, |
| "grad_norm": 0.9723468054375276, |
| "learning_rate": 1.312703342568782e-05, |
| "loss": 0.7169, |
| "step": 1240 |
| }, |
| { |
| "epoch": 0.46089773253123556, |
| "grad_norm": 0.9902905902420863, |
| "learning_rate": 1.306556842994254e-05, |
| "loss": 0.7297, |
| "step": 1245 |
| }, |
| { |
| "epoch": 0.46274872744099954, |
| "grad_norm": 0.9601290745180161, |
| "learning_rate": 1.3003975337860686e-05, |
| "loss": 0.7538, |
| "step": 1250 |
| }, |
| { |
| "epoch": 0.4645997223507635, |
| "grad_norm": 0.9053050963379172, |
| "learning_rate": 1.2942256723140951e-05, |
| "loss": 0.7109, |
| "step": 1255 |
| }, |
| { |
| "epoch": 0.46645071726052756, |
| "grad_norm": 1.0874571881287778, |
| "learning_rate": 1.2880415164727058e-05, |
| "loss": 0.7919, |
| "step": 1260 |
| }, |
| { |
| "epoch": 0.46830171217029154, |
| "grad_norm": 1.1451121612817037, |
| "learning_rate": 1.2818453246699981e-05, |
| "loss": 0.7391, |
| "step": 1265 |
| }, |
| { |
| "epoch": 0.4701527070800555, |
| "grad_norm": 0.9681114397766631, |
| "learning_rate": 1.2756373558169992e-05, |
| "loss": 0.743, |
| "step": 1270 |
| }, |
| { |
| "epoch": 0.4720037019898195, |
| "grad_norm": 0.9223359776123135, |
| "learning_rate": 1.2694178693168468e-05, |
| "loss": 0.72, |
| "step": 1275 |
| }, |
| { |
| "epoch": 0.47385469689958354, |
| "grad_norm": 1.0469506863925069, |
| "learning_rate": 1.2631871250539478e-05, |
| "loss": 0.728, |
| "step": 1280 |
| }, |
| { |
| "epoch": 0.4757056918093475, |
| "grad_norm": 1.0014451232715824, |
| "learning_rate": 1.2569453833831222e-05, |
| "loss": 0.717, |
| "step": 1285 |
| }, |
| { |
| "epoch": 0.4775566867191115, |
| "grad_norm": 0.9639634997081045, |
| "learning_rate": 1.250692905118721e-05, |
| "loss": 0.7507, |
| "step": 1290 |
| }, |
| { |
| "epoch": 0.47940768162887554, |
| "grad_norm": 0.9929137343346536, |
| "learning_rate": 1.2444299515237298e-05, |
| "loss": 0.7643, |
| "step": 1295 |
| }, |
| { |
| "epoch": 0.4812586765386395, |
| "grad_norm": 1.0240687855090735, |
| "learning_rate": 1.238156784298851e-05, |
| "loss": 0.7072, |
| "step": 1300 |
| }, |
| { |
| "epoch": 0.4812586765386395, |
| "eval_loss": 0.7685373425483704, |
| "eval_runtime": 125.8483, |
| "eval_samples_per_second": 1.017, |
| "eval_steps_per_second": 0.509, |
| "step": 1300 |
| }, |
| { |
| "epoch": 0.4831096714484035, |
| "grad_norm": 1.0317937768510643, |
| "learning_rate": 1.2318736655715689e-05, |
| "loss": 0.7638, |
| "step": 1305 |
| }, |
| { |
| "epoch": 0.48496066635816754, |
| "grad_norm": 0.9255922281298357, |
| "learning_rate": 1.2255808578851958e-05, |
| "loss": 0.7467, |
| "step": 1310 |
| }, |
| { |
| "epoch": 0.4868116612679315, |
| "grad_norm": 0.9721165058296256, |
| "learning_rate": 1.2192786241879033e-05, |
| "loss": 0.7546, |
| "step": 1315 |
| }, |
| { |
| "epoch": 0.4886626561776955, |
| "grad_norm": 0.9644367734588111, |
| "learning_rate": 1.2129672278217328e-05, |
| "loss": 0.7169, |
| "step": 1320 |
| }, |
| { |
| "epoch": 0.49051365108745953, |
| "grad_norm": 1.0130538871266401, |
| "learning_rate": 1.2066469325115927e-05, |
| "loss": 0.7369, |
| "step": 1325 |
| }, |
| { |
| "epoch": 0.4923646459972235, |
| "grad_norm": 0.9834288208571581, |
| "learning_rate": 1.2003180023542375e-05, |
| "loss": 0.7325, |
| "step": 1330 |
| }, |
| { |
| "epoch": 0.4942156409069875, |
| "grad_norm": 0.960205484882582, |
| "learning_rate": 1.1939807018072345e-05, |
| "loss": 0.7277, |
| "step": 1335 |
| }, |
| { |
| "epoch": 0.4960666358167515, |
| "grad_norm": 1.000713485591568, |
| "learning_rate": 1.1876352956779114e-05, |
| "loss": 0.7671, |
| "step": 1340 |
| }, |
| { |
| "epoch": 0.4979176307265155, |
| "grad_norm": 0.9972457796952376, |
| "learning_rate": 1.1812820491122918e-05, |
| "loss": 0.7496, |
| "step": 1345 |
| }, |
| { |
| "epoch": 0.4997686256362795, |
| "grad_norm": 0.9176925753783508, |
| "learning_rate": 1.1749212275840159e-05, |
| "loss": 0.7719, |
| "step": 1350 |
| }, |
| { |
| "epoch": 0.5016196205460435, |
| "grad_norm": 0.9486097980237569, |
| "learning_rate": 1.1685530968832474e-05, |
| "loss": 0.7286, |
| "step": 1355 |
| }, |
| { |
| "epoch": 0.5034706154558075, |
| "grad_norm": 1.0669908596524535, |
| "learning_rate": 1.1621779231055677e-05, |
| "loss": 0.7279, |
| "step": 1360 |
| }, |
| { |
| "epoch": 0.5053216103655715, |
| "grad_norm": 1.0656718017579419, |
| "learning_rate": 1.1557959726408567e-05, |
| "loss": 0.7338, |
| "step": 1365 |
| }, |
| { |
| "epoch": 0.5071726052753355, |
| "grad_norm": 1.2390741577593356, |
| "learning_rate": 1.1494075121621618e-05, |
| "loss": 0.7997, |
| "step": 1370 |
| }, |
| { |
| "epoch": 0.5090236001850995, |
| "grad_norm": 1.0260656127221468, |
| "learning_rate": 1.1430128086145542e-05, |
| "loss": 0.7257, |
| "step": 1375 |
| }, |
| { |
| "epoch": 0.5108745950948634, |
| "grad_norm": 0.981403662935476, |
| "learning_rate": 1.1366121292039756e-05, |
| "loss": 0.6885, |
| "step": 1380 |
| }, |
| { |
| "epoch": 0.5127255900046275, |
| "grad_norm": 0.9506854040681342, |
| "learning_rate": 1.1302057413860717e-05, |
| "loss": 0.7523, |
| "step": 1385 |
| }, |
| { |
| "epoch": 0.5145765849143915, |
| "grad_norm": 0.9571399024989548, |
| "learning_rate": 1.1237939128550167e-05, |
| "loss": 0.7406, |
| "step": 1390 |
| }, |
| { |
| "epoch": 0.5164275798241554, |
| "grad_norm": 0.9379988174297818, |
| "learning_rate": 1.1173769115323276e-05, |
| "loss": 0.7152, |
| "step": 1395 |
| }, |
| { |
| "epoch": 0.5182785747339195, |
| "grad_norm": 0.9799140133111384, |
| "learning_rate": 1.1109550055556704e-05, |
| "loss": 0.6862, |
| "step": 1400 |
| }, |
| { |
| "epoch": 0.5182785747339195, |
| "eval_loss": 0.762523889541626, |
| "eval_runtime": 125.8824, |
| "eval_samples_per_second": 1.017, |
| "eval_steps_per_second": 0.508, |
| "step": 1400 |
| }, |
| { |
| "epoch": 0.5201295696436835, |
| "grad_norm": 1.0128868126138035, |
| "learning_rate": 1.1045284632676535e-05, |
| "loss": 0.7491, |
| "step": 1405 |
| }, |
| { |
| "epoch": 0.5219805645534474, |
| "grad_norm": 1.1054871656808112, |
| "learning_rate": 1.098097553204616e-05, |
| "loss": 0.7193, |
| "step": 1410 |
| }, |
| { |
| "epoch": 0.5238315594632115, |
| "grad_norm": 1.0534251043687004, |
| "learning_rate": 1.0916625440854074e-05, |
| "loss": 0.7263, |
| "step": 1415 |
| }, |
| { |
| "epoch": 0.5256825543729755, |
| "grad_norm": 1.0586326058499738, |
| "learning_rate": 1.0852237048001568e-05, |
| "loss": 0.771, |
| "step": 1420 |
| }, |
| { |
| "epoch": 0.5275335492827394, |
| "grad_norm": 1.0689916226823393, |
| "learning_rate": 1.0787813043990405e-05, |
| "loss": 0.7936, |
| "step": 1425 |
| }, |
| { |
| "epoch": 0.5293845441925035, |
| "grad_norm": 0.9447698094104972, |
| "learning_rate": 1.0723356120810367e-05, |
| "loss": 0.7594, |
| "step": 1430 |
| }, |
| { |
| "epoch": 0.5312355391022675, |
| "grad_norm": 1.0832488716625996, |
| "learning_rate": 1.0658868971826785e-05, |
| "loss": 0.7371, |
| "step": 1435 |
| }, |
| { |
| "epoch": 0.5330865340120314, |
| "grad_norm": 1.0138170678877134, |
| "learning_rate": 1.0594354291667995e-05, |
| "loss": 0.7389, |
| "step": 1440 |
| }, |
| { |
| "epoch": 0.5349375289217955, |
| "grad_norm": 1.0133195189163617, |
| "learning_rate": 1.0529814776112724e-05, |
| "loss": 0.7443, |
| "step": 1445 |
| }, |
| { |
| "epoch": 0.5367885238315595, |
| "grad_norm": 1.062091180379715, |
| "learning_rate": 1.046525312197747e-05, |
| "loss": 0.7549, |
| "step": 1450 |
| }, |
| { |
| "epoch": 0.5386395187413234, |
| "grad_norm": 0.9945798468589808, |
| "learning_rate": 1.0400672027003795e-05, |
| "loss": 0.7033, |
| "step": 1455 |
| }, |
| { |
| "epoch": 0.5404905136510875, |
| "grad_norm": 1.0798176885934982, |
| "learning_rate": 1.0336074189745617e-05, |
| "loss": 0.7873, |
| "step": 1460 |
| }, |
| { |
| "epoch": 0.5423415085608515, |
| "grad_norm": 0.949930038696299, |
| "learning_rate": 1.027146230945643e-05, |
| "loss": 0.7655, |
| "step": 1465 |
| }, |
| { |
| "epoch": 0.5441925034706154, |
| "grad_norm": 0.9128607712688908, |
| "learning_rate": 1.0206839085976528e-05, |
| "loss": 0.7045, |
| "step": 1470 |
| }, |
| { |
| "epoch": 0.5460434983803795, |
| "grad_norm": 0.9894531926372522, |
| "learning_rate": 1.014220721962018e-05, |
| "loss": 0.7288, |
| "step": 1475 |
| }, |
| { |
| "epoch": 0.5478944932901435, |
| "grad_norm": 1.0508653514368567, |
| "learning_rate": 1.0077569411062804e-05, |
| "loss": 0.7393, |
| "step": 1480 |
| }, |
| { |
| "epoch": 0.5497454881999074, |
| "grad_norm": 1.0144705488797956, |
| "learning_rate": 1.001292836122812e-05, |
| "loss": 0.7388, |
| "step": 1485 |
| }, |
| { |
| "epoch": 0.5515964831096715, |
| "grad_norm": 1.0862811301549016, |
| "learning_rate": 9.948286771175288e-06, |
| "loss": 0.7666, |
| "step": 1490 |
| }, |
| { |
| "epoch": 0.5534474780194355, |
| "grad_norm": 0.9773752334499499, |
| "learning_rate": 9.883647341986032e-06, |
| "loss": 0.7572, |
| "step": 1495 |
| }, |
| { |
| "epoch": 0.5552984729291994, |
| "grad_norm": 0.965789789651361, |
| "learning_rate": 9.819012774651788e-06, |
| "loss": 0.7466, |
| "step": 1500 |
| }, |
| { |
| "epoch": 0.5552984729291994, |
| "eval_loss": 0.759286105632782, |
| "eval_runtime": 125.877, |
| "eval_samples_per_second": 1.017, |
| "eval_steps_per_second": 0.508, |
| "step": 1500 |
| }, |
| { |
| "epoch": 0.5571494678389635, |
| "grad_norm": 1.0286512648148671, |
| "learning_rate": 9.75438576996084e-06, |
| "loss": 0.7419, |
| "step": 1505 |
| }, |
| { |
| "epoch": 0.5590004627487274, |
| "grad_norm": 0.9779662642913994, |
| "learning_rate": 9.689769028385463e-06, |
| "loss": 0.7594, |
| "step": 1510 |
| }, |
| { |
| "epoch": 0.5608514576584914, |
| "grad_norm": 0.9878313389871681, |
| "learning_rate": 9.625165249969075e-06, |
| "loss": 0.7236, |
| "step": 1515 |
| }, |
| { |
| "epoch": 0.5627024525682555, |
| "grad_norm": 1.020571296588044, |
| "learning_rate": 9.56057713421343e-06, |
| "loss": 0.7498, |
| "step": 1520 |
| }, |
| { |
| "epoch": 0.5645534474780194, |
| "grad_norm": 0.9995998586225845, |
| "learning_rate": 9.496007379965801e-06, |
| "loss": 0.7606, |
| "step": 1525 |
| }, |
| { |
| "epoch": 0.5664044423877834, |
| "grad_norm": 1.060622141268455, |
| "learning_rate": 9.431458685306227e-06, |
| "loss": 0.6984, |
| "step": 1530 |
| }, |
| { |
| "epoch": 0.5682554372975475, |
| "grad_norm": 0.9493673992150904, |
| "learning_rate": 9.366933747434758e-06, |
| "loss": 0.6911, |
| "step": 1535 |
| }, |
| { |
| "epoch": 0.5701064322073114, |
| "grad_norm": 1.0167667579449835, |
| "learning_rate": 9.302435262558748e-06, |
| "loss": 0.6778, |
| "step": 1540 |
| }, |
| { |
| "epoch": 0.5719574271170754, |
| "grad_norm": 1.0392115654580312, |
| "learning_rate": 9.237965925780207e-06, |
| "loss": 0.7691, |
| "step": 1545 |
| }, |
| { |
| "epoch": 0.5738084220268395, |
| "grad_norm": 1.0593025980336248, |
| "learning_rate": 9.173528430983167e-06, |
| "loss": 0.738, |
| "step": 1550 |
| }, |
| { |
| "epoch": 0.5756594169366034, |
| "grad_norm": 0.971968443552632, |
| "learning_rate": 9.109125470721141e-06, |
| "loss": 0.7421, |
| "step": 1555 |
| }, |
| { |
| "epoch": 0.5775104118463674, |
| "grad_norm": 0.932647291828516, |
| "learning_rate": 9.044759736104584e-06, |
| "loss": 0.7055, |
| "step": 1560 |
| }, |
| { |
| "epoch": 0.5793614067561315, |
| "grad_norm": 0.8632798438231859, |
| "learning_rate": 8.98043391668847e-06, |
| "loss": 0.6961, |
| "step": 1565 |
| }, |
| { |
| "epoch": 0.5812124016658954, |
| "grad_norm": 0.9441485921057498, |
| "learning_rate": 8.916150700359896e-06, |
| "loss": 0.7468, |
| "step": 1570 |
| }, |
| { |
| "epoch": 0.5830633965756594, |
| "grad_norm": 0.9939474443131038, |
| "learning_rate": 8.85191277322575e-06, |
| "loss": 0.7149, |
| "step": 1575 |
| }, |
| { |
| "epoch": 0.5849143914854235, |
| "grad_norm": 1.0783126296675078, |
| "learning_rate": 8.787722819500513e-06, |
| "loss": 0.7135, |
| "step": 1580 |
| }, |
| { |
| "epoch": 0.5867653863951874, |
| "grad_norm": 0.9705292869514378, |
| "learning_rate": 8.723583521394054e-06, |
| "loss": 0.7109, |
| "step": 1585 |
| }, |
| { |
| "epoch": 0.5886163813049514, |
| "grad_norm": 0.9367179465636712, |
| "learning_rate": 8.659497558999579e-06, |
| "loss": 0.7114, |
| "step": 1590 |
| }, |
| { |
| "epoch": 0.5904673762147155, |
| "grad_norm": 1.0408124857679764, |
| "learning_rate": 8.595467610181638e-06, |
| "loss": 0.7161, |
| "step": 1595 |
| }, |
| { |
| "epoch": 0.5923183711244794, |
| "grad_norm": 0.971797951572304, |
| "learning_rate": 8.53149635046421e-06, |
| "loss": 0.7508, |
| "step": 1600 |
| }, |
| { |
| "epoch": 0.5923183711244794, |
| "eval_loss": 0.7539975643157959, |
| "eval_runtime": 125.8675, |
| "eval_samples_per_second": 1.017, |
| "eval_steps_per_second": 0.508, |
| "step": 1600 |
| }, |
| { |
| "epoch": 0.5941693660342434, |
| "grad_norm": 0.9800435183445545, |
| "learning_rate": 8.467586452918942e-06, |
| "loss": 0.715, |
| "step": 1605 |
| }, |
| { |
| "epoch": 0.5960203609440075, |
| "grad_norm": 1.0113663434618827, |
| "learning_rate": 8.403740588053408e-06, |
| "loss": 0.6989, |
| "step": 1610 |
| }, |
| { |
| "epoch": 0.5978713558537714, |
| "grad_norm": 1.0355947843868953, |
| "learning_rate": 8.339961423699563e-06, |
| "loss": 0.7193, |
| "step": 1615 |
| }, |
| { |
| "epoch": 0.5997223507635354, |
| "grad_norm": 1.0175465647016573, |
| "learning_rate": 8.276251624902235e-06, |
| "loss": 0.7059, |
| "step": 1620 |
| }, |
| { |
| "epoch": 0.6015733456732995, |
| "grad_norm": 1.0190101018280562, |
| "learning_rate": 8.212613853807783e-06, |
| "loss": 0.7302, |
| "step": 1625 |
| }, |
| { |
| "epoch": 0.6034243405830634, |
| "grad_norm": 1.053793043329184, |
| "learning_rate": 8.149050769552856e-06, |
| "loss": 0.7198, |
| "step": 1630 |
| }, |
| { |
| "epoch": 0.6052753354928274, |
| "grad_norm": 1.0096414981283535, |
| "learning_rate": 8.085565028153261e-06, |
| "loss": 0.7581, |
| "step": 1635 |
| }, |
| { |
| "epoch": 0.6071263304025913, |
| "grad_norm": 0.9812300345845569, |
| "learning_rate": 8.02215928239301e-06, |
| "loss": 0.7472, |
| "step": 1640 |
| }, |
| { |
| "epoch": 0.6089773253123554, |
| "grad_norm": 1.0007016748169104, |
| "learning_rate": 7.958836181713445e-06, |
| "loss": 0.7582, |
| "step": 1645 |
| }, |
| { |
| "epoch": 0.6108283202221194, |
| "grad_norm": 0.9909841301661106, |
| "learning_rate": 7.895598372102547e-06, |
| "loss": 0.7247, |
| "step": 1650 |
| }, |
| { |
| "epoch": 0.6126793151318833, |
| "grad_norm": 1.0341673384965404, |
| "learning_rate": 7.832448495984368e-06, |
| "loss": 0.6798, |
| "step": 1655 |
| }, |
| { |
| "epoch": 0.6145303100416474, |
| "grad_norm": 0.9331106658706707, |
| "learning_rate": 7.769389192108608e-06, |
| "loss": 0.7123, |
| "step": 1660 |
| }, |
| { |
| "epoch": 0.6163813049514114, |
| "grad_norm": 0.9435545677177092, |
| "learning_rate": 7.706423095440367e-06, |
| "loss": 0.7243, |
| "step": 1665 |
| }, |
| { |
| "epoch": 0.6182322998611753, |
| "grad_norm": 0.991710730848484, |
| "learning_rate": 7.643552837050026e-06, |
| "loss": 0.7433, |
| "step": 1670 |
| }, |
| { |
| "epoch": 0.6200832947709394, |
| "grad_norm": 1.0668190323716082, |
| "learning_rate": 7.580781044003324e-06, |
| "loss": 0.7375, |
| "step": 1675 |
| }, |
| { |
| "epoch": 0.6219342896807034, |
| "grad_norm": 1.0279605405376717, |
| "learning_rate": 7.518110339251568e-06, |
| "loss": 0.7234, |
| "step": 1680 |
| }, |
| { |
| "epoch": 0.6237852845904673, |
| "grad_norm": 0.9640733339122612, |
| "learning_rate": 7.455543341522042e-06, |
| "loss": 0.7168, |
| "step": 1685 |
| }, |
| { |
| "epoch": 0.6256362795002314, |
| "grad_norm": 0.9907817877992142, |
| "learning_rate": 7.393082665208587e-06, |
| "loss": 0.7053, |
| "step": 1690 |
| }, |
| { |
| "epoch": 0.6274872744099954, |
| "grad_norm": 1.0177580646696514, |
| "learning_rate": 7.33073092026233e-06, |
| "loss": 0.7772, |
| "step": 1695 |
| }, |
| { |
| "epoch": 0.6293382693197593, |
| "grad_norm": 1.0511047158796323, |
| "learning_rate": 7.268490712082667e-06, |
| "loss": 0.7027, |
| "step": 1700 |
| }, |
| { |
| "epoch": 0.6293382693197593, |
| "eval_loss": 0.7508743405342102, |
| "eval_runtime": 125.8608, |
| "eval_samples_per_second": 1.017, |
| "eval_steps_per_second": 0.508, |
| "step": 1700 |
| }, |
| { |
| "epoch": 0.6311892642295234, |
| "grad_norm": 0.9786722954600374, |
| "learning_rate": 7.206364641408358e-06, |
| "loss": 0.754, |
| "step": 1705 |
| }, |
| { |
| "epoch": 0.6330402591392874, |
| "grad_norm": 1.001046944711236, |
| "learning_rate": 7.144355304208877e-06, |
| "loss": 0.7351, |
| "step": 1710 |
| }, |
| { |
| "epoch": 0.6348912540490513, |
| "grad_norm": 0.9068727644526208, |
| "learning_rate": 7.082465291575928e-06, |
| "loss": 0.7213, |
| "step": 1715 |
| }, |
| { |
| "epoch": 0.6367422489588154, |
| "grad_norm": 0.9817053840255833, |
| "learning_rate": 7.02069718961518e-06, |
| "loss": 0.7405, |
| "step": 1720 |
| }, |
| { |
| "epoch": 0.6385932438685794, |
| "grad_norm": 0.9963090460429577, |
| "learning_rate": 6.959053579338202e-06, |
| "loss": 0.7329, |
| "step": 1725 |
| }, |
| { |
| "epoch": 0.6404442387783433, |
| "grad_norm": 1.0063439847490308, |
| "learning_rate": 6.8975370365546055e-06, |
| "loss": 0.7466, |
| "step": 1730 |
| }, |
| { |
| "epoch": 0.6422952336881074, |
| "grad_norm": 1.0756259763530167, |
| "learning_rate": 6.836150131764434e-06, |
| "loss": 0.7328, |
| "step": 1735 |
| }, |
| { |
| "epoch": 0.6441462285978714, |
| "grad_norm": 1.0006380504007437, |
| "learning_rate": 6.7748954300507405e-06, |
| "loss": 0.7291, |
| "step": 1740 |
| }, |
| { |
| "epoch": 0.6459972235076353, |
| "grad_norm": 1.0476443249218472, |
| "learning_rate": 6.713775490972396e-06, |
| "loss": 0.722, |
| "step": 1745 |
| }, |
| { |
| "epoch": 0.6478482184173994, |
| "grad_norm": 0.9299325627763796, |
| "learning_rate": 6.652792868457159e-06, |
| "loss": 0.7259, |
| "step": 1750 |
| }, |
| { |
| "epoch": 0.6496992133271634, |
| "grad_norm": 1.0107437601362794, |
| "learning_rate": 6.591950110694928e-06, |
| "loss": 0.7313, |
| "step": 1755 |
| }, |
| { |
| "epoch": 0.6515502082369273, |
| "grad_norm": 1.0116149889736052, |
| "learning_rate": 6.531249760031304e-06, |
| "loss": 0.7495, |
| "step": 1760 |
| }, |
| { |
| "epoch": 0.6534012031466914, |
| "grad_norm": 1.060084766737352, |
| "learning_rate": 6.4706943528613135e-06, |
| "loss": 0.7406, |
| "step": 1765 |
| }, |
| { |
| "epoch": 0.6552521980564553, |
| "grad_norm": 0.9893182231305682, |
| "learning_rate": 6.410286419523457e-06, |
| "loss": 0.7236, |
| "step": 1770 |
| }, |
| { |
| "epoch": 0.6571031929662193, |
| "grad_norm": 1.0249584716250184, |
| "learning_rate": 6.350028484193971e-06, |
| "loss": 0.7735, |
| "step": 1775 |
| }, |
| { |
| "epoch": 0.6589541878759834, |
| "grad_norm": 0.9922083712983304, |
| "learning_rate": 6.2899230647813315e-06, |
| "loss": 0.7092, |
| "step": 1780 |
| }, |
| { |
| "epoch": 0.6608051827857473, |
| "grad_norm": 1.079323750602494, |
| "learning_rate": 6.229972672821081e-06, |
| "loss": 0.6999, |
| "step": 1785 |
| }, |
| { |
| "epoch": 0.6626561776955113, |
| "grad_norm": 0.9678468916444759, |
| "learning_rate": 6.170179813370838e-06, |
| "loss": 0.7215, |
| "step": 1790 |
| }, |
| { |
| "epoch": 0.6645071726052754, |
| "grad_norm": 0.9400337665409387, |
| "learning_rate": 6.110546984905661e-06, |
| "loss": 0.7189, |
| "step": 1795 |
| }, |
| { |
| "epoch": 0.6663581675150393, |
| "grad_norm": 0.9282443073802316, |
| "learning_rate": 6.051076679213632e-06, |
| "loss": 0.7228, |
| "step": 1800 |
| }, |
| { |
| "epoch": 0.6663581675150393, |
| "eval_loss": 0.7472089529037476, |
| "eval_runtime": 125.9422, |
| "eval_samples_per_second": 1.016, |
| "eval_steps_per_second": 0.508, |
| "step": 1800 |
| }, |
| { |
| "epoch": 0.6682091624248033, |
| "grad_norm": 0.9554957232923886, |
| "learning_rate": 5.991771381291727e-06, |
| "loss": 0.7502, |
| "step": 1805 |
| }, |
| { |
| "epoch": 0.6700601573345674, |
| "grad_norm": 1.0695199061812564, |
| "learning_rate": 5.932633569242e-06, |
| "loss": 0.7725, |
| "step": 1810 |
| }, |
| { |
| "epoch": 0.6719111522443313, |
| "grad_norm": 0.9775812687514566, |
| "learning_rate": 5.8736657141680066e-06, |
| "loss": 0.7201, |
| "step": 1815 |
| }, |
| { |
| "epoch": 0.6737621471540953, |
| "grad_norm": 1.0498313112511333, |
| "learning_rate": 5.814870280071581e-06, |
| "loss": 0.7066, |
| "step": 1820 |
| }, |
| { |
| "epoch": 0.6756131420638594, |
| "grad_norm": 0.8996562202890858, |
| "learning_rate": 5.756249723749847e-06, |
| "loss": 0.6839, |
| "step": 1825 |
| }, |
| { |
| "epoch": 0.6774641369736233, |
| "grad_norm": 0.9343357642622071, |
| "learning_rate": 5.697806494692575e-06, |
| "loss": 0.7006, |
| "step": 1830 |
| }, |
| { |
| "epoch": 0.6793151318833873, |
| "grad_norm": 0.9122261783600852, |
| "learning_rate": 5.6395430349798376e-06, |
| "loss": 0.71, |
| "step": 1835 |
| }, |
| { |
| "epoch": 0.6811661267931514, |
| "grad_norm": 1.0003802308196195, |
| "learning_rate": 5.581461779179924e-06, |
| "loss": 0.7352, |
| "step": 1840 |
| }, |
| { |
| "epoch": 0.6830171217029153, |
| "grad_norm": 0.9577445500508659, |
| "learning_rate": 5.5235651542476745e-06, |
| "loss": 0.7187, |
| "step": 1845 |
| }, |
| { |
| "epoch": 0.6848681166126793, |
| "grad_norm": 0.9048825285696591, |
| "learning_rate": 5.465855579423012e-06, |
| "loss": 0.6786, |
| "step": 1850 |
| }, |
| { |
| "epoch": 0.6867191115224434, |
| "grad_norm": 0.9658805694805159, |
| "learning_rate": 5.4083354661298816e-06, |
| "loss": 0.7012, |
| "step": 1855 |
| }, |
| { |
| "epoch": 0.6885701064322073, |
| "grad_norm": 0.9198866529754813, |
| "learning_rate": 5.351007217875493e-06, |
| "loss": 0.7193, |
| "step": 1860 |
| }, |
| { |
| "epoch": 0.6904211013419713, |
| "grad_norm": 0.9860185880901673, |
| "learning_rate": 5.293873230149851e-06, |
| "loss": 0.6928, |
| "step": 1865 |
| }, |
| { |
| "epoch": 0.6922720962517354, |
| "grad_norm": 0.9158051945254762, |
| "learning_rate": 5.236935890325717e-06, |
| "loss": 0.7283, |
| "step": 1870 |
| }, |
| { |
| "epoch": 0.6941230911614993, |
| "grad_norm": 1.0229541633123165, |
| "learning_rate": 5.180197577558792e-06, |
| "loss": 0.6934, |
| "step": 1875 |
| }, |
| { |
| "epoch": 0.6959740860712633, |
| "grad_norm": 1.058766779890904, |
| "learning_rate": 5.123660662688352e-06, |
| "loss": 0.7485, |
| "step": 1880 |
| }, |
| { |
| "epoch": 0.6978250809810274, |
| "grad_norm": 0.8745098195983263, |
| "learning_rate": 5.067327508138148e-06, |
| "loss": 0.7205, |
| "step": 1885 |
| }, |
| { |
| "epoch": 0.6996760758907913, |
| "grad_norm": 0.9669286665541216, |
| "learning_rate": 5.0112004678177e-06, |
| "loss": 0.7461, |
| "step": 1890 |
| }, |
| { |
| "epoch": 0.7015270708005553, |
| "grad_norm": 0.9439248557044554, |
| "learning_rate": 4.955281887023955e-06, |
| "loss": 0.7075, |
| "step": 1895 |
| }, |
| { |
| "epoch": 0.7033780657103192, |
| "grad_norm": 0.9612004193161535, |
| "learning_rate": 4.899574102343247e-06, |
| "loss": 0.7206, |
| "step": 1900 |
| }, |
| { |
| "epoch": 0.7033780657103192, |
| "eval_loss": 0.7448270320892334, |
| "eval_runtime": 125.9022, |
| "eval_samples_per_second": 1.017, |
| "eval_steps_per_second": 0.508, |
| "step": 1900 |
| }, |
| { |
| "epoch": 0.7052290606200833, |
| "grad_norm": 0.9770375482265843, |
| "learning_rate": 4.844079441553717e-06, |
| "loss": 0.7312, |
| "step": 1905 |
| }, |
| { |
| "epoch": 0.7070800555298473, |
| "grad_norm": 0.9346202648428155, |
| "learning_rate": 4.7888002235279915e-06, |
| "loss": 0.7138, |
| "step": 1910 |
| }, |
| { |
| "epoch": 0.7089310504396112, |
| "grad_norm": 1.043008859124202, |
| "learning_rate": 4.733738758136327e-06, |
| "loss": 0.7201, |
| "step": 1915 |
| }, |
| { |
| "epoch": 0.7107820453493753, |
| "grad_norm": 0.9898196140644209, |
| "learning_rate": 4.678897346150067e-06, |
| "loss": 0.666, |
| "step": 1920 |
| }, |
| { |
| "epoch": 0.7126330402591393, |
| "grad_norm": 1.030447829309688, |
| "learning_rate": 4.624278279145509e-06, |
| "loss": 0.6968, |
| "step": 1925 |
| }, |
| { |
| "epoch": 0.7144840351689032, |
| "grad_norm": 1.0002038057708351, |
| "learning_rate": 4.56988383940817e-06, |
| "loss": 0.6815, |
| "step": 1930 |
| }, |
| { |
| "epoch": 0.7163350300786673, |
| "grad_norm": 0.8768349207452324, |
| "learning_rate": 4.515716299837376e-06, |
| "loss": 0.7143, |
| "step": 1935 |
| }, |
| { |
| "epoch": 0.7181860249884313, |
| "grad_norm": 0.9922854914505618, |
| "learning_rate": 4.461777923851337e-06, |
| "loss": 0.7168, |
| "step": 1940 |
| }, |
| { |
| "epoch": 0.7200370198981952, |
| "grad_norm": 0.9051677770762211, |
| "learning_rate": 4.408070965292534e-06, |
| "loss": 0.6871, |
| "step": 1945 |
| }, |
| { |
| "epoch": 0.7218880148079593, |
| "grad_norm": 0.9724233713765718, |
| "learning_rate": 4.354597668333551e-06, |
| "loss": 0.7079, |
| "step": 1950 |
| }, |
| { |
| "epoch": 0.7237390097177233, |
| "grad_norm": 1.0146841138791294, |
| "learning_rate": 4.30136026738332e-06, |
| "loss": 0.754, |
| "step": 1955 |
| }, |
| { |
| "epoch": 0.7255900046274872, |
| "grad_norm": 0.9260856251952512, |
| "learning_rate": 4.2483609869937115e-06, |
| "loss": 0.7366, |
| "step": 1960 |
| }, |
| { |
| "epoch": 0.7274409995372513, |
| "grad_norm": 0.9416530579594775, |
| "learning_rate": 4.195602041766638e-06, |
| "loss": 0.6743, |
| "step": 1965 |
| }, |
| { |
| "epoch": 0.7292919944470153, |
| "grad_norm": 0.9416273470678079, |
| "learning_rate": 4.143085636261452e-06, |
| "loss": 0.7548, |
| "step": 1970 |
| }, |
| { |
| "epoch": 0.7311429893567792, |
| "grad_norm": 0.9500826070023658, |
| "learning_rate": 4.090813964902889e-06, |
| "loss": 0.7318, |
| "step": 1975 |
| }, |
| { |
| "epoch": 0.7329939842665433, |
| "grad_norm": 1.044688650303936, |
| "learning_rate": 4.038789211889329e-06, |
| "loss": 0.691, |
| "step": 1980 |
| }, |
| { |
| "epoch": 0.7348449791763073, |
| "grad_norm": 0.9723998488803949, |
| "learning_rate": 3.987013551101543e-06, |
| "loss": 0.7205, |
| "step": 1985 |
| }, |
| { |
| "epoch": 0.7366959740860712, |
| "grad_norm": 1.0125026598661098, |
| "learning_rate": 3.9354891460118695e-06, |
| "loss": 0.7348, |
| "step": 1990 |
| }, |
| { |
| "epoch": 0.7385469689958353, |
| "grad_norm": 0.9786383589933646, |
| "learning_rate": 3.884218149593776e-06, |
| "loss": 0.6945, |
| "step": 1995 |
| }, |
| { |
| "epoch": 0.7403979639055993, |
| "grad_norm": 1.0768006764875049, |
| "learning_rate": 3.833202704231944e-06, |
| "loss": 0.7419, |
| "step": 2000 |
| }, |
| { |
| "epoch": 0.7403979639055993, |
| "eval_loss": 0.7422940135002136, |
| "eval_runtime": 125.9877, |
| "eval_samples_per_second": 1.016, |
| "eval_steps_per_second": 0.508, |
| "step": 2000 |
| }, |
| { |
| "epoch": 0.7422489588153632, |
| "grad_norm": 0.9714783620843155, |
| "learning_rate": 3.7824449416327123e-06, |
| "loss": 0.7486, |
| "step": 2005 |
| }, |
| { |
| "epoch": 0.7440999537251273, |
| "grad_norm": 0.9621418965839199, |
| "learning_rate": 3.7319469827350117e-06, |
| "loss": 0.7492, |
| "step": 2010 |
| }, |
| { |
| "epoch": 0.7459509486348913, |
| "grad_norm": 0.8838654671554304, |
| "learning_rate": 3.6817109376217574e-06, |
| "loss": 0.6973, |
| "step": 2015 |
| }, |
| { |
| "epoch": 0.7478019435446552, |
| "grad_norm": 1.0141612675604508, |
| "learning_rate": 3.631738905431641e-06, |
| "loss": 0.633, |
| "step": 2020 |
| }, |
| { |
| "epoch": 0.7496529384544193, |
| "grad_norm": 1.2478799462658503, |
| "learning_rate": 3.5820329742714666e-06, |
| "loss": 0.723, |
| "step": 2025 |
| }, |
| { |
| "epoch": 0.7515039333641832, |
| "grad_norm": 1.2113929317378798, |
| "learning_rate": 3.532595221128843e-06, |
| "loss": 0.7189, |
| "step": 2030 |
| }, |
| { |
| "epoch": 0.7533549282739472, |
| "grad_norm": 1.076623825378696, |
| "learning_rate": 3.483427711785449e-06, |
| "loss": 0.6935, |
| "step": 2035 |
| }, |
| { |
| "epoch": 0.7552059231837113, |
| "grad_norm": 1.0039638646776863, |
| "learning_rate": 3.4345325007306752e-06, |
| "loss": 0.7032, |
| "step": 2040 |
| }, |
| { |
| "epoch": 0.7570569180934752, |
| "grad_norm": 1.0481522906376972, |
| "learning_rate": 3.38591163107579e-06, |
| "loss": 0.7537, |
| "step": 2045 |
| }, |
| { |
| "epoch": 0.7589079130032392, |
| "grad_norm": 1.0167538112300836, |
| "learning_rate": 3.337567134468579e-06, |
| "loss": 0.7492, |
| "step": 2050 |
| }, |
| { |
| "epoch": 0.7607589079130033, |
| "grad_norm": 1.11799756175693, |
| "learning_rate": 3.2895010310084174e-06, |
| "loss": 0.687, |
| "step": 2055 |
| }, |
| { |
| "epoch": 0.7626099028227672, |
| "grad_norm": 1.026751398004723, |
| "learning_rate": 3.241715329161903e-06, |
| "loss": 0.7888, |
| "step": 2060 |
| }, |
| { |
| "epoch": 0.7644608977325312, |
| "grad_norm": 0.898992232633679, |
| "learning_rate": 3.1942120256788966e-06, |
| "loss": 0.7056, |
| "step": 2065 |
| }, |
| { |
| "epoch": 0.7663118926422953, |
| "grad_norm": 0.9452341682541248, |
| "learning_rate": 3.146993105509104e-06, |
| "loss": 0.6935, |
| "step": 2070 |
| }, |
| { |
| "epoch": 0.7681628875520592, |
| "grad_norm": 1.0381440177041223, |
| "learning_rate": 3.10006054171913e-06, |
| "loss": 0.6718, |
| "step": 2075 |
| }, |
| { |
| "epoch": 0.7700138824618232, |
| "grad_norm": 0.90151266081136, |
| "learning_rate": 3.0534162954100264e-06, |
| "loss": 0.734, |
| "step": 2080 |
| }, |
| { |
| "epoch": 0.7718648773715873, |
| "grad_norm": 0.9562956991505516, |
| "learning_rate": 3.0070623156353685e-06, |
| "loss": 0.7341, |
| "step": 2085 |
| }, |
| { |
| "epoch": 0.7737158722813512, |
| "grad_norm": 0.8801270739522954, |
| "learning_rate": 2.9610005393197707e-06, |
| "loss": 0.6526, |
| "step": 2090 |
| }, |
| { |
| "epoch": 0.7755668671911152, |
| "grad_norm": 0.9441163162257625, |
| "learning_rate": 2.9152328911780027e-06, |
| "loss": 0.6828, |
| "step": 2095 |
| }, |
| { |
| "epoch": 0.7774178621008793, |
| "grad_norm": 0.9634570912343101, |
| "learning_rate": 2.869761283634526e-06, |
| "loss": 0.7299, |
| "step": 2100 |
| }, |
| { |
| "epoch": 0.7774178621008793, |
| "eval_loss": 0.7405505180358887, |
| "eval_runtime": 125.9141, |
| "eval_samples_per_second": 1.017, |
| "eval_steps_per_second": 0.508, |
| "step": 2100 |
| }, |
| { |
| "epoch": 0.7792688570106432, |
| "grad_norm": 0.9315181675274804, |
| "learning_rate": 2.8245876167435924e-06, |
| "loss": 0.7134, |
| "step": 2105 |
| }, |
| { |
| "epoch": 0.7811198519204072, |
| "grad_norm": 0.899902372156068, |
| "learning_rate": 2.779713778109867e-06, |
| "loss": 0.7112, |
| "step": 2110 |
| }, |
| { |
| "epoch": 0.7829708468301713, |
| "grad_norm": 0.9552279346547553, |
| "learning_rate": 2.7351416428095157e-06, |
| "loss": 0.7195, |
| "step": 2115 |
| }, |
| { |
| "epoch": 0.7848218417399352, |
| "grad_norm": 0.9400225445949076, |
| "learning_rate": 2.6908730733119025e-06, |
| "loss": 0.7465, |
| "step": 2120 |
| }, |
| { |
| "epoch": 0.7866728366496992, |
| "grad_norm": 0.9800712358563762, |
| "learning_rate": 2.6469099194017144e-06, |
| "loss": 0.739, |
| "step": 2125 |
| }, |
| { |
| "epoch": 0.7885238315594633, |
| "grad_norm": 0.9495123514350187, |
| "learning_rate": 2.603254018101715e-06, |
| "loss": 0.7729, |
| "step": 2130 |
| }, |
| { |
| "epoch": 0.7903748264692272, |
| "grad_norm": 0.981224001693797, |
| "learning_rate": 2.5599071935959495e-06, |
| "loss": 0.6977, |
| "step": 2135 |
| }, |
| { |
| "epoch": 0.7922258213789912, |
| "grad_norm": 0.979970394782048, |
| "learning_rate": 2.5168712571535305e-06, |
| "loss": 0.7, |
| "step": 2140 |
| }, |
| { |
| "epoch": 0.7940768162887553, |
| "grad_norm": 1.0931405844387208, |
| "learning_rate": 2.4741480070529657e-06, |
| "loss": 0.7212, |
| "step": 2145 |
| }, |
| { |
| "epoch": 0.7959278111985192, |
| "grad_norm": 0.9525278212316626, |
| "learning_rate": 2.4317392285069885e-06, |
| "loss": 0.7107, |
| "step": 2150 |
| }, |
| { |
| "epoch": 0.7977788061082832, |
| "grad_norm": 0.9587323014669643, |
| "learning_rate": 2.3896466935879957e-06, |
| "loss": 0.6903, |
| "step": 2155 |
| }, |
| { |
| "epoch": 0.7996298010180471, |
| "grad_norm": 0.8878613939635316, |
| "learning_rate": 2.3478721611539712e-06, |
| "loss": 0.755, |
| "step": 2160 |
| }, |
| { |
| "epoch": 0.8014807959278112, |
| "grad_norm": 0.9724146216671814, |
| "learning_rate": 2.3064173767750055e-06, |
| "loss": 0.695, |
| "step": 2165 |
| }, |
| { |
| "epoch": 0.8033317908375752, |
| "grad_norm": 1.0468722007497135, |
| "learning_rate": 2.265284072660362e-06, |
| "loss": 0.7233, |
| "step": 2170 |
| }, |
| { |
| "epoch": 0.8051827857473391, |
| "grad_norm": 1.0316210745516678, |
| "learning_rate": 2.224473967586073e-06, |
| "loss": 0.7607, |
| "step": 2175 |
| }, |
| { |
| "epoch": 0.8070337806571032, |
| "grad_norm": 1.0548409137457446, |
| "learning_rate": 2.1839887668231486e-06, |
| "loss": 0.697, |
| "step": 2180 |
| }, |
| { |
| "epoch": 0.8088847755668672, |
| "grad_norm": 1.0039170894073437, |
| "learning_rate": 2.1438301620662994e-06, |
| "loss": 0.7324, |
| "step": 2185 |
| }, |
| { |
| "epoch": 0.8107357704766311, |
| "grad_norm": 0.9588651745318834, |
| "learning_rate": 2.103999831363258e-06, |
| "loss": 0.6748, |
| "step": 2190 |
| }, |
| { |
| "epoch": 0.8125867653863952, |
| "grad_norm": 0.8996370636189378, |
| "learning_rate": 2.064499439044657e-06, |
| "loss": 0.73, |
| "step": 2195 |
| }, |
| { |
| "epoch": 0.8144377602961592, |
| "grad_norm": 0.9239380519912793, |
| "learning_rate": 2.0253306356544843e-06, |
| "loss": 0.7057, |
| "step": 2200 |
| }, |
| { |
| "epoch": 0.8144377602961592, |
| "eval_loss": 0.738842248916626, |
| "eval_runtime": 125.9296, |
| "eval_samples_per_second": 1.016, |
| "eval_steps_per_second": 0.508, |
| "step": 2200 |
| }, |
| { |
| "epoch": 0.8162887552059231, |
| "grad_norm": 0.9429244883085828, |
| "learning_rate": 1.986495057881126e-06, |
| "loss": 0.6772, |
| "step": 2205 |
| }, |
| { |
| "epoch": 0.8181397501156872, |
| "grad_norm": 1.0195318731984253, |
| "learning_rate": 1.947994328488949e-06, |
| "loss": 0.7356, |
| "step": 2210 |
| }, |
| { |
| "epoch": 0.8199907450254512, |
| "grad_norm": 0.9756633823952066, |
| "learning_rate": 1.9098300562505266e-06, |
| "loss": 0.7717, |
| "step": 2215 |
| }, |
| { |
| "epoch": 0.8218417399352151, |
| "grad_norm": 0.9865779961666936, |
| "learning_rate": 1.872003835879389e-06, |
| "loss": 0.6844, |
| "step": 2220 |
| }, |
| { |
| "epoch": 0.8236927348449792, |
| "grad_norm": 1.1283985725895185, |
| "learning_rate": 1.8345172479633977e-06, |
| "loss": 0.6513, |
| "step": 2225 |
| }, |
| { |
| "epoch": 0.8255437297547432, |
| "grad_norm": 1.008863839428332, |
| "learning_rate": 1.79737185889871e-06, |
| "loss": 0.6812, |
| "step": 2230 |
| }, |
| { |
| "epoch": 0.8273947246645071, |
| "grad_norm": 0.9300553415281679, |
| "learning_rate": 1.7605692208242953e-06, |
| "loss": 0.6909, |
| "step": 2235 |
| }, |
| { |
| "epoch": 0.8292457195742712, |
| "grad_norm": 0.9710044483167964, |
| "learning_rate": 1.7241108715571197e-06, |
| "loss": 0.718, |
| "step": 2240 |
| }, |
| { |
| "epoch": 0.8310967144840352, |
| "grad_norm": 1.015816894080233, |
| "learning_rate": 1.6879983345278528e-06, |
| "loss": 0.6821, |
| "step": 2245 |
| }, |
| { |
| "epoch": 0.8329477093937991, |
| "grad_norm": 0.9542344503119925, |
| "learning_rate": 1.652233118717229e-06, |
| "loss": 0.719, |
| "step": 2250 |
| }, |
| { |
| "epoch": 0.8347987043035632, |
| "grad_norm": 0.9708933601771149, |
| "learning_rate": 1.6168167185929883e-06, |
| "loss": 0.7041, |
| "step": 2255 |
| }, |
| { |
| "epoch": 0.8366496992133272, |
| "grad_norm": 1.005762211417554, |
| "learning_rate": 1.5817506140474248e-06, |
| "loss": 0.7491, |
| "step": 2260 |
| }, |
| { |
| "epoch": 0.8385006941230911, |
| "grad_norm": 1.1324500609372297, |
| "learning_rate": 1.5470362703355678e-06, |
| "loss": 0.7599, |
| "step": 2265 |
| }, |
| { |
| "epoch": 0.8403516890328552, |
| "grad_norm": 1.0774483423911148, |
| "learning_rate": 1.5126751380139203e-06, |
| "loss": 0.7304, |
| "step": 2270 |
| }, |
| { |
| "epoch": 0.8422026839426192, |
| "grad_norm": 0.9746075541108256, |
| "learning_rate": 1.4786686528798878e-06, |
| "loss": 0.7859, |
| "step": 2275 |
| }, |
| { |
| "epoch": 0.8440536788523831, |
| "grad_norm": 0.9132206341810313, |
| "learning_rate": 1.4450182359117493e-06, |
| "loss": 0.6567, |
| "step": 2280 |
| }, |
| { |
| "epoch": 0.8459046737621472, |
| "grad_norm": 0.9843969970761268, |
| "learning_rate": 1.4117252932093007e-06, |
| "loss": 0.7079, |
| "step": 2285 |
| }, |
| { |
| "epoch": 0.8477556686719111, |
| "grad_norm": 0.909900305880051, |
| "learning_rate": 1.3787912159350903e-06, |
| "loss": 0.7092, |
| "step": 2290 |
| }, |
| { |
| "epoch": 0.8496066635816751, |
| "grad_norm": 0.9862835059860517, |
| "learning_rate": 1.3462173802562917e-06, |
| "loss": 0.688, |
| "step": 2295 |
| }, |
| { |
| "epoch": 0.8514576584914392, |
| "grad_norm": 0.9817682414409103, |
| "learning_rate": 1.3140051472872062e-06, |
| "loss": 0.7146, |
| "step": 2300 |
| }, |
| { |
| "epoch": 0.8514576584914392, |
| "eval_loss": 0.7375185489654541, |
| "eval_runtime": 125.8616, |
| "eval_samples_per_second": 1.017, |
| "eval_steps_per_second": 0.508, |
| "step": 2300 |
| }, |
| { |
| "epoch": 0.8533086534012031, |
| "grad_norm": 0.97635505354809, |
| "learning_rate": 1.282155863032377e-06, |
| "loss": 0.6785, |
| "step": 2305 |
| }, |
| { |
| "epoch": 0.8551596483109671, |
| "grad_norm": 0.9432652702625532, |
| "learning_rate": 1.2506708583303495e-06, |
| "loss": 0.6829, |
| "step": 2310 |
| }, |
| { |
| "epoch": 0.8570106432207312, |
| "grad_norm": 1.0500105672657278, |
| "learning_rate": 1.2195514487980643e-06, |
| "loss": 0.7097, |
| "step": 2315 |
| }, |
| { |
| "epoch": 0.8588616381304951, |
| "grad_norm": 0.9677123461437018, |
| "learning_rate": 1.188798934775881e-06, |
| "loss": 0.7271, |
| "step": 2320 |
| }, |
| { |
| "epoch": 0.8607126330402591, |
| "grad_norm": 0.9822692297793124, |
| "learning_rate": 1.158414601273251e-06, |
| "loss": 0.74, |
| "step": 2325 |
| }, |
| { |
| "epoch": 0.8625636279500232, |
| "grad_norm": 1.02828865110176, |
| "learning_rate": 1.1283997179149987e-06, |
| "loss": 0.7406, |
| "step": 2330 |
| }, |
| { |
| "epoch": 0.8644146228597871, |
| "grad_norm": 1.0596259638715508, |
| "learning_rate": 1.0987555388883042e-06, |
| "loss": 0.6883, |
| "step": 2335 |
| }, |
| { |
| "epoch": 0.8662656177695511, |
| "grad_norm": 0.9898076031142375, |
| "learning_rate": 1.0694833028902686e-06, |
| "loss": 0.7012, |
| "step": 2340 |
| }, |
| { |
| "epoch": 0.8681166126793152, |
| "grad_norm": 0.9639230942285071, |
| "learning_rate": 1.0405842330761651e-06, |
| "loss": 0.7491, |
| "step": 2345 |
| }, |
| { |
| "epoch": 0.8699676075890791, |
| "grad_norm": 0.9865850178730345, |
| "learning_rate": 1.012059537008332e-06, |
| "loss": 0.7295, |
| "step": 2350 |
| }, |
| { |
| "epoch": 0.8718186024988431, |
| "grad_norm": 0.9302298993217906, |
| "learning_rate": 9.839104066057025e-07, |
| "loss": 0.7505, |
| "step": 2355 |
| }, |
| { |
| "epoch": 0.8736695974086072, |
| "grad_norm": 0.9246047712950533, |
| "learning_rate": 9.561380180940182e-07, |
| "loss": 0.7165, |
| "step": 2360 |
| }, |
| { |
| "epoch": 0.8755205923183711, |
| "grad_norm": 0.9341328111976939, |
| "learning_rate": 9.287435319566618e-07, |
| "loss": 0.6928, |
| "step": 2365 |
| }, |
| { |
| "epoch": 0.8773715872281351, |
| "grad_norm": 1.0029919467384116, |
| "learning_rate": 9.017280928861727e-07, |
| "loss": 0.7503, |
| "step": 2370 |
| }, |
| { |
| "epoch": 0.8792225821378992, |
| "grad_norm": 0.924763857124508, |
| "learning_rate": 8.750928297364192e-07, |
| "loss": 0.6788, |
| "step": 2375 |
| }, |
| { |
| "epoch": 0.8810735770476631, |
| "grad_norm": 0.9276137090660882, |
| "learning_rate": 8.488388554754223e-07, |
| "loss": 0.6975, |
| "step": 2380 |
| }, |
| { |
| "epoch": 0.8829245719574271, |
| "grad_norm": 0.9511441358772591, |
| "learning_rate": 8.229672671388578e-07, |
| "loss": 0.7379, |
| "step": 2385 |
| }, |
| { |
| "epoch": 0.8847755668671912, |
| "grad_norm": 0.9703292177752127, |
| "learning_rate": 7.974791457842012e-07, |
| "loss": 0.6863, |
| "step": 2390 |
| }, |
| { |
| "epoch": 0.8866265617769551, |
| "grad_norm": 0.9665966427045252, |
| "learning_rate": 7.723755564455771e-07, |
| "loss": 0.7569, |
| "step": 2395 |
| }, |
| { |
| "epoch": 0.8884775566867191, |
| "grad_norm": 0.9199955573458448, |
| "learning_rate": 7.476575480892357e-07, |
| "loss": 0.7246, |
| "step": 2400 |
| }, |
| { |
| "epoch": 0.8884775566867191, |
| "eval_loss": 0.7366934418678284, |
| "eval_runtime": 125.8212, |
| "eval_samples_per_second": 1.017, |
| "eval_steps_per_second": 0.509, |
| "step": 2400 |
| }, |
| { |
| "epoch": 0.8903285515964832, |
| "grad_norm": 0.9866170842794822, |
| "learning_rate": 7.233261535697333e-07, |
| "loss": 0.6831, |
| "step": 2405 |
| }, |
| { |
| "epoch": 0.8921795465062471, |
| "grad_norm": 0.9020922896178015, |
| "learning_rate": 6.99382389586769e-07, |
| "loss": 0.7543, |
| "step": 2410 |
| }, |
| { |
| "epoch": 0.8940305414160111, |
| "grad_norm": 0.8891310119602659, |
| "learning_rate": 6.758272566427027e-07, |
| "loss": 0.6992, |
| "step": 2415 |
| }, |
| { |
| "epoch": 0.895881536325775, |
| "grad_norm": 1.0258150846316885, |
| "learning_rate": 6.526617390007506e-07, |
| "loss": 0.7159, |
| "step": 2420 |
| }, |
| { |
| "epoch": 0.8977325312355391, |
| "grad_norm": 0.9315994997755779, |
| "learning_rate": 6.298868046438533e-07, |
| "loss": 0.7339, |
| "step": 2425 |
| }, |
| { |
| "epoch": 0.8995835261453031, |
| "grad_norm": 0.8794790525509596, |
| "learning_rate": 6.075034052342288e-07, |
| "loss": 0.6877, |
| "step": 2430 |
| }, |
| { |
| "epoch": 0.901434521055067, |
| "grad_norm": 1.0216931119406394, |
| "learning_rate": 5.855124760736119e-07, |
| "loss": 0.7273, |
| "step": 2435 |
| }, |
| { |
| "epoch": 0.9032855159648311, |
| "grad_norm": 0.9162022348716465, |
| "learning_rate": 5.63914936064165e-07, |
| "loss": 0.6129, |
| "step": 2440 |
| }, |
| { |
| "epoch": 0.9051365108745951, |
| "grad_norm": 1.0548470483887789, |
| "learning_rate": 5.427116876700877e-07, |
| "loss": 0.7462, |
| "step": 2445 |
| }, |
| { |
| "epoch": 0.906987505784359, |
| "grad_norm": 0.9119109915842329, |
| "learning_rate": 5.219036168798986e-07, |
| "loss": 0.7091, |
| "step": 2450 |
| }, |
| { |
| "epoch": 0.9088385006941231, |
| "grad_norm": 0.9773112801791846, |
| "learning_rate": 5.014915931694253e-07, |
| "loss": 0.7402, |
| "step": 2455 |
| }, |
| { |
| "epoch": 0.9106894956038871, |
| "grad_norm": 0.967120980381721, |
| "learning_rate": 4.81476469465465e-07, |
| "loss": 0.7563, |
| "step": 2460 |
| }, |
| { |
| "epoch": 0.912540490513651, |
| "grad_norm": 1.0900216384603985, |
| "learning_rate": 4.618590821101432e-07, |
| "loss": 0.7257, |
| "step": 2465 |
| }, |
| { |
| "epoch": 0.9143914854234151, |
| "grad_norm": 0.9410281325856403, |
| "learning_rate": 4.4264025082597084e-07, |
| "loss": 0.726, |
| "step": 2470 |
| }, |
| { |
| "epoch": 0.9162424803331791, |
| "grad_norm": 0.9555645068242209, |
| "learning_rate": 4.2382077868159286e-07, |
| "loss": 0.6977, |
| "step": 2475 |
| }, |
| { |
| "epoch": 0.918093475242943, |
| "grad_norm": 1.1057620347138868, |
| "learning_rate": 4.054014520582283e-07, |
| "loss": 0.7611, |
| "step": 2480 |
| }, |
| { |
| "epoch": 0.9199444701527071, |
| "grad_norm": 0.9023240417887544, |
| "learning_rate": 3.8738304061681107e-07, |
| "loss": 0.7097, |
| "step": 2485 |
| }, |
| { |
| "epoch": 0.9217954650624711, |
| "grad_norm": 0.8961027836341736, |
| "learning_rate": 3.6976629726583115e-07, |
| "loss": 0.7533, |
| "step": 2490 |
| }, |
| { |
| "epoch": 0.923646459972235, |
| "grad_norm": 0.9463370594210976, |
| "learning_rate": 3.525519581298731e-07, |
| "loss": 0.7657, |
| "step": 2495 |
| }, |
| { |
| "epoch": 0.9254974548819991, |
| "grad_norm": 0.8857551112046717, |
| "learning_rate": 3.357407425188541e-07, |
| "loss": 0.6637, |
| "step": 2500 |
| }, |
| { |
| "epoch": 0.9254974548819991, |
| "eval_loss": 0.736148476600647, |
| "eval_runtime": 125.8161, |
| "eval_samples_per_second": 1.017, |
| "eval_steps_per_second": 0.509, |
| "step": 2500 |
| }, |
| { |
| "epoch": 0.9273484497917631, |
| "grad_norm": 0.9557036435732803, |
| "learning_rate": 3.1933335289797364e-07, |
| "loss": 0.7211, |
| "step": 2505 |
| }, |
| { |
| "epoch": 0.929199444701527, |
| "grad_norm": 0.9411968516873396, |
| "learning_rate": 3.033304748583543e-07, |
| "loss": 0.6711, |
| "step": 2510 |
| }, |
| { |
| "epoch": 0.9310504396112911, |
| "grad_norm": 0.9529612292006595, |
| "learning_rate": 2.877327770883964e-07, |
| "loss": 0.7352, |
| "step": 2515 |
| }, |
| { |
| "epoch": 0.9329014345210551, |
| "grad_norm": 0.9072504944314087, |
| "learning_rate": 2.7254091134583995e-07, |
| "loss": 0.702, |
| "step": 2520 |
| }, |
| { |
| "epoch": 0.934752429430819, |
| "grad_norm": 1.0216263771503715, |
| "learning_rate": 2.577555124305209e-07, |
| "loss": 0.6984, |
| "step": 2525 |
| }, |
| { |
| "epoch": 0.9366034243405831, |
| "grad_norm": 0.9271796888513735, |
| "learning_rate": 2.433771981578581e-07, |
| "loss": 0.7594, |
| "step": 2530 |
| }, |
| { |
| "epoch": 0.9384544192503471, |
| "grad_norm": 0.9325885923721386, |
| "learning_rate": 2.2940656933302607e-07, |
| "loss": 0.721, |
| "step": 2535 |
| }, |
| { |
| "epoch": 0.940305414160111, |
| "grad_norm": 1.0530858890236963, |
| "learning_rate": 2.1584420972586174e-07, |
| "loss": 0.7108, |
| "step": 2540 |
| }, |
| { |
| "epoch": 0.9421564090698751, |
| "grad_norm": 1.9244924879459369, |
| "learning_rate": 2.0269068604646058e-07, |
| "loss": 0.7149, |
| "step": 2545 |
| }, |
| { |
| "epoch": 0.944007403979639, |
| "grad_norm": 1.0361671451409022, |
| "learning_rate": 1.8994654792150125e-07, |
| "loss": 0.7285, |
| "step": 2550 |
| }, |
| { |
| "epoch": 0.945858398889403, |
| "grad_norm": 0.9757871793671443, |
| "learning_rate": 1.7761232787127936e-07, |
| "loss": 0.6992, |
| "step": 2555 |
| }, |
| { |
| "epoch": 0.9477093937991671, |
| "grad_norm": 0.9675283656651317, |
| "learning_rate": 1.6568854128745537e-07, |
| "loss": 0.7044, |
| "step": 2560 |
| }, |
| { |
| "epoch": 0.949560388708931, |
| "grad_norm": 0.9846806445819078, |
| "learning_rate": 1.5417568641151848e-07, |
| "loss": 0.6704, |
| "step": 2565 |
| }, |
| { |
| "epoch": 0.951411383618695, |
| "grad_norm": 0.9281209052786342, |
| "learning_rate": 1.4307424431396654e-07, |
| "loss": 0.7176, |
| "step": 2570 |
| }, |
| { |
| "epoch": 0.9532623785284591, |
| "grad_norm": 0.8999935681059046, |
| "learning_rate": 1.323846788742078e-07, |
| "loss": 0.7003, |
| "step": 2575 |
| }, |
| { |
| "epoch": 0.955113373438223, |
| "grad_norm": 0.9213878579995839, |
| "learning_rate": 1.2210743676117188e-07, |
| "loss": 0.6906, |
| "step": 2580 |
| }, |
| { |
| "epoch": 0.956964368347987, |
| "grad_norm": 0.9250556197998173, |
| "learning_rate": 1.1224294741464914e-07, |
| "loss": 0.6794, |
| "step": 2585 |
| }, |
| { |
| "epoch": 0.9588153632577511, |
| "grad_norm": 0.9221148649933307, |
| "learning_rate": 1.0279162302734624e-07, |
| "loss": 0.6645, |
| "step": 2590 |
| }, |
| { |
| "epoch": 0.960666358167515, |
| "grad_norm": 0.9308662826828088, |
| "learning_rate": 9.375385852765983e-08, |
| "loss": 0.7139, |
| "step": 2595 |
| }, |
| { |
| "epoch": 0.962517353077279, |
| "grad_norm": 0.9640193787486492, |
| "learning_rate": 8.513003156317978e-08, |
| "loss": 0.7416, |
| "step": 2600 |
| }, |
| { |
| "epoch": 0.962517353077279, |
| "eval_loss": 0.7359679937362671, |
| "eval_runtime": 125.8349, |
| "eval_samples_per_second": 1.017, |
| "eval_steps_per_second": 0.509, |
| "step": 2600 |
| }, |
| { |
| "epoch": 0.9643683479870431, |
| "grad_norm": 0.9534219021183425, |
| "learning_rate": 7.692050248490291e-08, |
| "loss": 0.7339, |
| "step": 2605 |
| }, |
| { |
| "epoch": 0.966219342896807, |
| "grad_norm": 0.9352372053413327, |
| "learning_rate": 6.912561433217946e-08, |
| "loss": 0.7221, |
| "step": 2610 |
| }, |
| { |
| "epoch": 0.968070337806571, |
| "grad_norm": 0.9840909103193446, |
| "learning_rate": 6.174569281837573e-08, |
| "loss": 0.7012, |
| "step": 2615 |
| }, |
| { |
| "epoch": 0.9699213327163351, |
| "grad_norm": 0.9413628052352356, |
| "learning_rate": 5.4781046317267103e-08, |
| "loss": 0.7021, |
| "step": 2620 |
| }, |
| { |
| "epoch": 0.971772327626099, |
| "grad_norm": 0.9862336464985173, |
| "learning_rate": 4.823196585015066e-08, |
| "loss": 0.6618, |
| "step": 2625 |
| }, |
| { |
| "epoch": 0.973623322535863, |
| "grad_norm": 0.9484184901249422, |
| "learning_rate": 4.209872507368706e-08, |
| "loss": 0.6719, |
| "step": 2630 |
| }, |
| { |
| "epoch": 0.9754743174456271, |
| "grad_norm": 1.0274897071656908, |
| "learning_rate": 3.6381580268463056e-08, |
| "loss": 0.7218, |
| "step": 2635 |
| }, |
| { |
| "epoch": 0.977325312355391, |
| "grad_norm": 0.9664338709751774, |
| "learning_rate": 3.108077032828116e-08, |
| "loss": 0.6997, |
| "step": 2640 |
| }, |
| { |
| "epoch": 0.979176307265155, |
| "grad_norm": 1.1238393315125719, |
| "learning_rate": 2.6196516750183198e-08, |
| "loss": 0.7338, |
| "step": 2645 |
| }, |
| { |
| "epoch": 0.9810273021749191, |
| "grad_norm": 0.8933118725313647, |
| "learning_rate": 2.1729023625189916e-08, |
| "loss": 0.7035, |
| "step": 2650 |
| }, |
| { |
| "epoch": 0.982878297084683, |
| "grad_norm": 0.9704540461821111, |
| "learning_rate": 1.767847762977337e-08, |
| "loss": 0.7668, |
| "step": 2655 |
| }, |
| { |
| "epoch": 0.984729291994447, |
| "grad_norm": 0.9466080459329079, |
| "learning_rate": 1.4045048018059837e-08, |
| "loss": 0.6771, |
| "step": 2660 |
| }, |
| { |
| "epoch": 0.9865802869042111, |
| "grad_norm": 0.9644726251473568, |
| "learning_rate": 1.0828886614754342e-08, |
| "loss": 0.7539, |
| "step": 2665 |
| }, |
| { |
| "epoch": 0.988431281813975, |
| "grad_norm": 0.8818176818424777, |
| "learning_rate": 8.030127808797972e-09, |
| "loss": 0.719, |
| "step": 2670 |
| }, |
| { |
| "epoch": 0.990282276723739, |
| "grad_norm": 0.8961966562928385, |
| "learning_rate": 5.648888547750142e-09, |
| "loss": 0.697, |
| "step": 2675 |
| }, |
| { |
| "epoch": 0.992133271633503, |
| "grad_norm": 1.2004341644367225, |
| "learning_rate": 3.6852683329058336e-09, |
| "loss": 0.7457, |
| "step": 2680 |
| }, |
| { |
| "epoch": 0.993984266543267, |
| "grad_norm": 0.9729347850621183, |
| "learning_rate": 2.1393492151333684e-09, |
| "loss": 0.6743, |
| "step": 2685 |
| }, |
| { |
| "epoch": 0.995835261453031, |
| "grad_norm": 0.9956973839014348, |
| "learning_rate": 1.0111957914515914e-09, |
| "loss": 0.6976, |
| "step": 2690 |
| }, |
| { |
| "epoch": 0.997686256362795, |
| "grad_norm": 0.9334887689919787, |
| "learning_rate": 3.008552023242572e-10, |
| "loss": 0.7241, |
| "step": 2695 |
| }, |
| { |
| "epoch": 0.999537251272559, |
| "grad_norm": 0.9781490083976876, |
| "learning_rate": 8.357129693825628e-12, |
| "loss": 0.7024, |
| "step": 2700 |
| }, |
| { |
| "epoch": 0.999537251272559, |
| "eval_loss": 0.7359436750411987, |
| "eval_runtime": 125.9053, |
| "eval_samples_per_second": 1.017, |
| "eval_steps_per_second": 0.508, |
| "step": 2700 |
| }, |
| { |
| "epoch": 0.9999074502545118, |
| "step": 2701, |
| "total_flos": 76938190258176.0, |
| "train_loss": 0.7557382643200741, |
| "train_runtime": 83163.2816, |
| "train_samples_per_second": 0.26, |
| "train_steps_per_second": 0.032 |
| } |
| ], |
| "logging_steps": 5, |
| "max_steps": 2701, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 1, |
| "save_steps": 500, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": false, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 76938190258176.0, |
| "train_batch_size": 1, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|