| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.0, | |
| "eval_steps": 100, | |
| "global_step": 574, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.003484320557491289, | |
| "grad_norm": 497.3575626103625, | |
| "learning_rate": 1.5517241379310344e-08, | |
| "logits/chosen": -2.5345611572265625, | |
| "logits/rejected": -2.581700563430786, | |
| "logps/chosen": -60.002105712890625, | |
| "logps/rejected": -99.98374938964844, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": 0.0, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": 0.0, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.03484320557491289, | |
| "grad_norm": 462.4792154122565, | |
| "learning_rate": 1.5517241379310344e-07, | |
| "logits/chosen": -2.563889503479004, | |
| "logits/rejected": -2.562492609024048, | |
| "logps/chosen": -59.64662551879883, | |
| "logps/rejected": -73.37532043457031, | |
| "loss": 0.6974, | |
| "rewards/accuracies": 0.2083333283662796, | |
| "rewards/chosen": 0.011204251088202, | |
| "rewards/margins": 0.00639230664819479, | |
| "rewards/rejected": 0.004811946302652359, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.06968641114982578, | |
| "grad_norm": 537.1826669031543, | |
| "learning_rate": 3.103448275862069e-07, | |
| "logits/chosen": -2.6071362495422363, | |
| "logits/rejected": -2.565964698791504, | |
| "logps/chosen": -103.9787826538086, | |
| "logps/rejected": -94.84336853027344, | |
| "loss": 0.6775, | |
| "rewards/accuracies": 0.3375000059604645, | |
| "rewards/chosen": 0.12751302123069763, | |
| "rewards/margins": 0.08386950194835663, | |
| "rewards/rejected": 0.0436435230076313, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.10452961672473868, | |
| "grad_norm": 586.0340590697643, | |
| "learning_rate": 4.6551724137931035e-07, | |
| "logits/chosen": -2.5952959060668945, | |
| "logits/rejected": -2.5755515098571777, | |
| "logps/chosen": -81.9345932006836, | |
| "logps/rejected": -91.35636901855469, | |
| "loss": 0.6673, | |
| "rewards/accuracies": 0.33125001192092896, | |
| "rewards/chosen": 0.48329487442970276, | |
| "rewards/margins": 0.3252241015434265, | |
| "rewards/rejected": 0.15807083249092102, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.13937282229965156, | |
| "grad_norm": 508.2061569040689, | |
| "learning_rate": 6.206896551724138e-07, | |
| "logits/chosen": -2.495903491973877, | |
| "logits/rejected": -2.4941375255584717, | |
| "logps/chosen": -78.31864929199219, | |
| "logps/rejected": -74.04932403564453, | |
| "loss": 0.6704, | |
| "rewards/accuracies": 0.3187499940395355, | |
| "rewards/chosen": -0.3442053496837616, | |
| "rewards/margins": 0.5570718050003052, | |
| "rewards/rejected": -0.9012771844863892, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.17421602787456447, | |
| "grad_norm": 371.6317110957719, | |
| "learning_rate": 7.758620689655172e-07, | |
| "logits/chosen": -2.522507667541504, | |
| "logits/rejected": -2.5270426273345947, | |
| "logps/chosen": -63.30371856689453, | |
| "logps/rejected": -75.92526245117188, | |
| "loss": 0.7487, | |
| "rewards/accuracies": 0.30000001192092896, | |
| "rewards/chosen": 0.5464689135551453, | |
| "rewards/margins": 0.3932306468486786, | |
| "rewards/rejected": 0.15323826670646667, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.20905923344947736, | |
| "grad_norm": 345.88352506142644, | |
| "learning_rate": 8.965116279069767e-07, | |
| "logits/chosen": -2.493328094482422, | |
| "logits/rejected": -2.4874939918518066, | |
| "logps/chosen": -70.6145248413086, | |
| "logps/rejected": -66.24369049072266, | |
| "loss": 0.7394, | |
| "rewards/accuracies": 0.3125, | |
| "rewards/chosen": 2.2291111946105957, | |
| "rewards/margins": 0.3673287332057953, | |
| "rewards/rejected": 1.8617826700210571, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.24390243902439024, | |
| "grad_norm": 461.9022420517148, | |
| "learning_rate": 8.790697674418603e-07, | |
| "logits/chosen": -2.5233232975006104, | |
| "logits/rejected": -2.518942356109619, | |
| "logps/chosen": -61.38080978393555, | |
| "logps/rejected": -66.21412658691406, | |
| "loss": 0.8029, | |
| "rewards/accuracies": 0.2874999940395355, | |
| "rewards/chosen": 2.4556429386138916, | |
| "rewards/margins": 0.5528993606567383, | |
| "rewards/rejected": 1.9027433395385742, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.2787456445993031, | |
| "grad_norm": 445.0623855333619, | |
| "learning_rate": 8.616279069767441e-07, | |
| "logits/chosen": -2.464928388595581, | |
| "logits/rejected": -2.4558563232421875, | |
| "logps/chosen": -72.87749481201172, | |
| "logps/rejected": -75.74545288085938, | |
| "loss": 0.8699, | |
| "rewards/accuracies": 0.3062500059604645, | |
| "rewards/chosen": 2.397139310836792, | |
| "rewards/margins": 0.8753674626350403, | |
| "rewards/rejected": 1.521772027015686, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.313588850174216, | |
| "grad_norm": 523.0685685145581, | |
| "learning_rate": 8.441860465116279e-07, | |
| "logits/chosen": -2.514057159423828, | |
| "logits/rejected": -2.529362201690674, | |
| "logps/chosen": -63.073158264160156, | |
| "logps/rejected": -67.64947509765625, | |
| "loss": 0.9667, | |
| "rewards/accuracies": 0.2874999940395355, | |
| "rewards/chosen": 2.5018441677093506, | |
| "rewards/margins": 0.38231611251831055, | |
| "rewards/rejected": 2.11952805519104, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.34843205574912894, | |
| "grad_norm": 538.3596051073214, | |
| "learning_rate": 8.267441860465116e-07, | |
| "logits/chosen": -2.5051705837249756, | |
| "logits/rejected": -2.5038981437683105, | |
| "logps/chosen": -72.73633575439453, | |
| "logps/rejected": -79.80201721191406, | |
| "loss": 0.9734, | |
| "rewards/accuracies": 0.3125, | |
| "rewards/chosen": 2.179957389831543, | |
| "rewards/margins": 1.1052770614624023, | |
| "rewards/rejected": 1.0746803283691406, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.34843205574912894, | |
| "eval_logits/chosen": -2.5853304862976074, | |
| "eval_logits/rejected": -2.569460868835449, | |
| "eval_logps/chosen": -72.08978271484375, | |
| "eval_logps/rejected": -79.57595825195312, | |
| "eval_loss": 0.9202933311462402, | |
| "eval_rewards/accuracies": 0.329365074634552, | |
| "eval_rewards/chosen": 2.065248489379883, | |
| "eval_rewards/margins": 0.730187714099884, | |
| "eval_rewards/rejected": 1.3350608348846436, | |
| "eval_runtime": 113.407, | |
| "eval_samples_per_second": 17.636, | |
| "eval_steps_per_second": 0.556, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.3832752613240418, | |
| "grad_norm": 565.1538822918781, | |
| "learning_rate": 8.093023255813954e-07, | |
| "logits/chosen": -2.517366409301758, | |
| "logits/rejected": -2.486677646636963, | |
| "logps/chosen": -71.63627624511719, | |
| "logps/rejected": -61.792930603027344, | |
| "loss": 0.9796, | |
| "rewards/accuracies": 0.2562499940395355, | |
| "rewards/chosen": 1.3190182447433472, | |
| "rewards/margins": 0.27183371782302856, | |
| "rewards/rejected": 1.0471845865249634, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.4181184668989547, | |
| "grad_norm": 415.3172615536407, | |
| "learning_rate": 7.91860465116279e-07, | |
| "logits/chosen": -2.5475993156433105, | |
| "logits/rejected": -2.518019437789917, | |
| "logps/chosen": -75.4908676147461, | |
| "logps/rejected": -65.90876770019531, | |
| "loss": 0.9023, | |
| "rewards/accuracies": 0.30000001192092896, | |
| "rewards/chosen": 1.6948095560073853, | |
| "rewards/margins": 1.0387893915176392, | |
| "rewards/rejected": 0.6560201644897461, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.4529616724738676, | |
| "grad_norm": 654.650382698047, | |
| "learning_rate": 7.744186046511627e-07, | |
| "logits/chosen": -2.57270884513855, | |
| "logits/rejected": -2.5545814037323, | |
| "logps/chosen": -82.57270812988281, | |
| "logps/rejected": -87.82828521728516, | |
| "loss": 1.0166, | |
| "rewards/accuracies": 0.34375, | |
| "rewards/chosen": 1.1249371767044067, | |
| "rewards/margins": 1.3999314308166504, | |
| "rewards/rejected": -0.2749941945075989, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.4878048780487805, | |
| "grad_norm": 392.83640221764773, | |
| "learning_rate": 7.569767441860465e-07, | |
| "logits/chosen": -2.4829936027526855, | |
| "logits/rejected": -2.474769353866577, | |
| "logps/chosen": -78.97613525390625, | |
| "logps/rejected": -70.12017059326172, | |
| "loss": 0.9314, | |
| "rewards/accuracies": 0.32499998807907104, | |
| "rewards/chosen": 1.8655914068222046, | |
| "rewards/margins": 1.3116776943206787, | |
| "rewards/rejected": 0.5539135932922363, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.5226480836236934, | |
| "grad_norm": 496.0616334612732, | |
| "learning_rate": 7.395348837209303e-07, | |
| "logits/chosen": -2.5784010887145996, | |
| "logits/rejected": -2.5391345024108887, | |
| "logps/chosen": -77.13214111328125, | |
| "logps/rejected": -78.45569610595703, | |
| "loss": 1.0681, | |
| "rewards/accuracies": 0.28125, | |
| "rewards/chosen": 2.022892713546753, | |
| "rewards/margins": 1.0811107158660889, | |
| "rewards/rejected": 0.9417816400527954, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.5574912891986062, | |
| "grad_norm": 572.3657252794802, | |
| "learning_rate": 7.220930232558139e-07, | |
| "logits/chosen": -2.579862117767334, | |
| "logits/rejected": -2.5984551906585693, | |
| "logps/chosen": -62.170875549316406, | |
| "logps/rejected": -70.7127914428711, | |
| "loss": 0.996, | |
| "rewards/accuracies": 0.28125, | |
| "rewards/chosen": 2.024418354034424, | |
| "rewards/margins": 0.9407089948654175, | |
| "rewards/rejected": 1.0837091207504272, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.5923344947735192, | |
| "grad_norm": 568.8903494255211, | |
| "learning_rate": 7.046511627906976e-07, | |
| "logits/chosen": -2.5988354682922363, | |
| "logits/rejected": -2.5848004817962646, | |
| "logps/chosen": -66.76710510253906, | |
| "logps/rejected": -75.21239471435547, | |
| "loss": 0.9987, | |
| "rewards/accuracies": 0.25, | |
| "rewards/chosen": 1.8922548294067383, | |
| "rewards/margins": 0.944841206073761, | |
| "rewards/rejected": 0.9474137425422668, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.627177700348432, | |
| "grad_norm": 521.2606597836349, | |
| "learning_rate": 6.872093023255814e-07, | |
| "logits/chosen": -2.6132636070251465, | |
| "logits/rejected": -2.602483034133911, | |
| "logps/chosen": -89.0395736694336, | |
| "logps/rejected": -84.7410659790039, | |
| "loss": 1.0952, | |
| "rewards/accuracies": 0.3375000059604645, | |
| "rewards/chosen": 2.4231231212615967, | |
| "rewards/margins": 0.9014472961425781, | |
| "rewards/rejected": 1.5216760635375977, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.662020905923345, | |
| "grad_norm": 416.32272877425527, | |
| "learning_rate": 6.697674418604651e-07, | |
| "logits/chosen": -2.6020960807800293, | |
| "logits/rejected": -2.594320297241211, | |
| "logps/chosen": -68.85299682617188, | |
| "logps/rejected": -79.95862579345703, | |
| "loss": 1.0472, | |
| "rewards/accuracies": 0.2874999940395355, | |
| "rewards/chosen": 1.8906943798065186, | |
| "rewards/margins": 0.5408729314804077, | |
| "rewards/rejected": 1.3498214483261108, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.6968641114982579, | |
| "grad_norm": 736.2428171588583, | |
| "learning_rate": 6.523255813953487e-07, | |
| "logits/chosen": -2.6261801719665527, | |
| "logits/rejected": -2.6321043968200684, | |
| "logps/chosen": -86.841064453125, | |
| "logps/rejected": -90.11552429199219, | |
| "loss": 0.9883, | |
| "rewards/accuracies": 0.36250001192092896, | |
| "rewards/chosen": 3.2811522483825684, | |
| "rewards/margins": 1.6169086694717407, | |
| "rewards/rejected": 1.6642436981201172, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.6968641114982579, | |
| "eval_logits/chosen": -2.5874733924865723, | |
| "eval_logits/rejected": -2.5707614421844482, | |
| "eval_logps/chosen": -71.6690673828125, | |
| "eval_logps/rejected": -79.74640655517578, | |
| "eval_loss": 1.0967082977294922, | |
| "eval_rewards/accuracies": 0.3373015820980072, | |
| "eval_rewards/chosen": 2.4270544052124023, | |
| "eval_rewards/margins": 1.2385817766189575, | |
| "eval_rewards/rejected": 1.1884726285934448, | |
| "eval_runtime": 113.2711, | |
| "eval_samples_per_second": 17.657, | |
| "eval_steps_per_second": 0.556, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.7317073170731707, | |
| "grad_norm": 760.3151343554975, | |
| "learning_rate": 6.348837209302325e-07, | |
| "logits/chosen": -2.594675064086914, | |
| "logits/rejected": -2.570430278778076, | |
| "logps/chosen": -66.980224609375, | |
| "logps/rejected": -62.78594207763672, | |
| "loss": 1.0072, | |
| "rewards/accuracies": 0.3375000059604645, | |
| "rewards/chosen": 2.011408805847168, | |
| "rewards/margins": 1.4795125722885132, | |
| "rewards/rejected": 0.5318960547447205, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.7665505226480837, | |
| "grad_norm": 481.77954463638264, | |
| "learning_rate": 6.174418604651163e-07, | |
| "logits/chosen": -2.645392894744873, | |
| "logits/rejected": -2.6270740032196045, | |
| "logps/chosen": -70.76057434082031, | |
| "logps/rejected": -69.80204772949219, | |
| "loss": 1.1057, | |
| "rewards/accuracies": 0.24375000596046448, | |
| "rewards/chosen": 2.319939136505127, | |
| "rewards/margins": 0.7193705439567566, | |
| "rewards/rejected": 1.6005685329437256, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.8013937282229965, | |
| "grad_norm": 765.046306381631, | |
| "learning_rate": 6e-07, | |
| "logits/chosen": -2.6732935905456543, | |
| "logits/rejected": -2.65371036529541, | |
| "logps/chosen": -86.65088653564453, | |
| "logps/rejected": -87.35969543457031, | |
| "loss": 1.1788, | |
| "rewards/accuracies": 0.3375000059604645, | |
| "rewards/chosen": 2.8807787895202637, | |
| "rewards/margins": 2.20210599899292, | |
| "rewards/rejected": 0.6786726713180542, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.8362369337979094, | |
| "grad_norm": 548.8932956260087, | |
| "learning_rate": 5.825581395348838e-07, | |
| "logits/chosen": -2.6739866733551025, | |
| "logits/rejected": -2.6417675018310547, | |
| "logps/chosen": -83.05066680908203, | |
| "logps/rejected": -77.82533264160156, | |
| "loss": 1.1372, | |
| "rewards/accuracies": 0.34375, | |
| "rewards/chosen": 3.1742072105407715, | |
| "rewards/margins": 1.1869542598724365, | |
| "rewards/rejected": 1.9872528314590454, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.8710801393728222, | |
| "grad_norm": 616.7927885159357, | |
| "learning_rate": 5.651162790697674e-07, | |
| "logits/chosen": -2.671238899230957, | |
| "logits/rejected": -2.637321949005127, | |
| "logps/chosen": -92.645263671875, | |
| "logps/rejected": -88.57808685302734, | |
| "loss": 0.8737, | |
| "rewards/accuracies": 0.3499999940395355, | |
| "rewards/chosen": 2.4114432334899902, | |
| "rewards/margins": 1.2348735332489014, | |
| "rewards/rejected": 1.1765693426132202, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.9059233449477352, | |
| "grad_norm": 478.21518650366113, | |
| "learning_rate": 5.476744186046511e-07, | |
| "logits/chosen": -2.555266857147217, | |
| "logits/rejected": -2.5690598487854004, | |
| "logps/chosen": -57.567100524902344, | |
| "logps/rejected": -65.19673919677734, | |
| "loss": 1.1295, | |
| "rewards/accuracies": 0.3062500059604645, | |
| "rewards/chosen": 1.2747440338134766, | |
| "rewards/margins": 0.9036104083061218, | |
| "rewards/rejected": 0.37113362550735474, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.9407665505226481, | |
| "grad_norm": 730.4342654590606, | |
| "learning_rate": 5.302325581395349e-07, | |
| "logits/chosen": -2.6346664428710938, | |
| "logits/rejected": -2.633521556854248, | |
| "logps/chosen": -67.13279724121094, | |
| "logps/rejected": -82.27578735351562, | |
| "loss": 1.0712, | |
| "rewards/accuracies": 0.3062500059604645, | |
| "rewards/chosen": 1.5027388334274292, | |
| "rewards/margins": 1.282596468925476, | |
| "rewards/rejected": 0.2201424092054367, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.975609756097561, | |
| "grad_norm": 595.5490958688126, | |
| "learning_rate": 5.127906976744186e-07, | |
| "logits/chosen": -2.5305819511413574, | |
| "logits/rejected": -2.510344982147217, | |
| "logps/chosen": -65.64894104003906, | |
| "logps/rejected": -70.33460998535156, | |
| "loss": 0.9263, | |
| "rewards/accuracies": 0.32499998807907104, | |
| "rewards/chosen": 1.8794810771942139, | |
| "rewards/margins": 1.4496572017669678, | |
| "rewards/rejected": 0.42982417345046997, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.0104529616724738, | |
| "grad_norm": 23.979212075374758, | |
| "learning_rate": 4.953488372093023e-07, | |
| "logits/chosen": -2.573585033416748, | |
| "logits/rejected": -2.5460705757141113, | |
| "logps/chosen": -66.83503723144531, | |
| "logps/rejected": -65.80335998535156, | |
| "loss": 0.7697, | |
| "rewards/accuracies": 0.38749998807907104, | |
| "rewards/chosen": 4.500079154968262, | |
| "rewards/margins": 5.2650299072265625, | |
| "rewards/rejected": -0.7649505734443665, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.0452961672473868, | |
| "grad_norm": 44.13210545620442, | |
| "learning_rate": 4.779069767441861e-07, | |
| "logits/chosen": -2.6247591972351074, | |
| "logits/rejected": -2.610288381576538, | |
| "logps/chosen": -56.94911575317383, | |
| "logps/rejected": -78.56959533691406, | |
| "loss": 0.4215, | |
| "rewards/accuracies": 0.4124999940395355, | |
| "rewards/chosen": 8.255823135375977, | |
| "rewards/margins": 16.339366912841797, | |
| "rewards/rejected": -8.08354377746582, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.0452961672473868, | |
| "eval_logits/chosen": -2.656022548675537, | |
| "eval_logits/rejected": -2.6403489112854004, | |
| "eval_logps/chosen": -70.90100860595703, | |
| "eval_logps/rejected": -79.04632568359375, | |
| "eval_loss": 1.1233958005905151, | |
| "eval_rewards/accuracies": 0.3313491940498352, | |
| "eval_rewards/chosen": 3.0875895023345947, | |
| "eval_rewards/margins": 1.2970443964004517, | |
| "eval_rewards/rejected": 1.790545105934143, | |
| "eval_runtime": 113.4658, | |
| "eval_samples_per_second": 17.626, | |
| "eval_steps_per_second": 0.555, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.0801393728222997, | |
| "grad_norm": 40.17101348318783, | |
| "learning_rate": 4.604651162790698e-07, | |
| "logits/chosen": -2.611358165740967, | |
| "logits/rejected": -2.6116092205047607, | |
| "logps/chosen": -58.17817306518555, | |
| "logps/rejected": -85.24242401123047, | |
| "loss": 0.4163, | |
| "rewards/accuracies": 0.4375, | |
| "rewards/chosen": 8.134294509887695, | |
| "rewards/margins": 16.730432510375977, | |
| "rewards/rejected": -8.596138000488281, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 1.1149825783972125, | |
| "grad_norm": 153.73635089362784, | |
| "learning_rate": 4.4302325581395346e-07, | |
| "logits/chosen": -2.647315740585327, | |
| "logits/rejected": -2.6344220638275146, | |
| "logps/chosen": -64.29966735839844, | |
| "logps/rejected": -88.07272338867188, | |
| "loss": 0.3886, | |
| "rewards/accuracies": 0.4749999940395355, | |
| "rewards/chosen": 8.3077974319458, | |
| "rewards/margins": 16.785139083862305, | |
| "rewards/rejected": -8.477343559265137, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.1498257839721253, | |
| "grad_norm": 196.76703226808354, | |
| "learning_rate": 4.255813953488372e-07, | |
| "logits/chosen": -2.6384284496307373, | |
| "logits/rejected": -2.6142385005950928, | |
| "logps/chosen": -73.04707336425781, | |
| "logps/rejected": -88.73326873779297, | |
| "loss": 0.4236, | |
| "rewards/accuracies": 0.518750011920929, | |
| "rewards/chosen": 9.5239896774292, | |
| "rewards/margins": 18.215595245361328, | |
| "rewards/rejected": -8.691606521606445, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.1846689895470384, | |
| "grad_norm": 16.30501354867415, | |
| "learning_rate": 4.081395348837209e-07, | |
| "logits/chosen": -2.6152892112731934, | |
| "logits/rejected": -2.617434024810791, | |
| "logps/chosen": -72.81196594238281, | |
| "logps/rejected": -107.9176254272461, | |
| "loss": 0.4009, | |
| "rewards/accuracies": 0.550000011920929, | |
| "rewards/chosen": 9.404792785644531, | |
| "rewards/margins": 20.270700454711914, | |
| "rewards/rejected": -10.8659086227417, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.2195121951219512, | |
| "grad_norm": 253.70590340563848, | |
| "learning_rate": 3.9069767441860464e-07, | |
| "logits/chosen": -2.627079486846924, | |
| "logits/rejected": -2.594177007675171, | |
| "logps/chosen": -57.3041877746582, | |
| "logps/rejected": -77.43646240234375, | |
| "loss": 0.4007, | |
| "rewards/accuracies": 0.48750001192092896, | |
| "rewards/chosen": 9.71183967590332, | |
| "rewards/margins": 19.823144912719727, | |
| "rewards/rejected": -10.111306190490723, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.254355400696864, | |
| "grad_norm": 137.93033306808474, | |
| "learning_rate": 3.7325581395348837e-07, | |
| "logits/chosen": -2.603165626525879, | |
| "logits/rejected": -2.573925495147705, | |
| "logps/chosen": -62.46228790283203, | |
| "logps/rejected": -73.45440673828125, | |
| "loss": 0.374, | |
| "rewards/accuracies": 0.4437499940395355, | |
| "rewards/chosen": 8.073513984680176, | |
| "rewards/margins": 14.77312183380127, | |
| "rewards/rejected": -6.699607849121094, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.289198606271777, | |
| "grad_norm": 2.3357390678870833, | |
| "learning_rate": 3.5581395348837204e-07, | |
| "logits/chosen": -2.562800407409668, | |
| "logits/rejected": -2.5790553092956543, | |
| "logps/chosen": -61.26555633544922, | |
| "logps/rejected": -86.61184692382812, | |
| "loss": 0.4449, | |
| "rewards/accuracies": 0.46875, | |
| "rewards/chosen": 8.428425788879395, | |
| "rewards/margins": 18.987430572509766, | |
| "rewards/rejected": -10.559003829956055, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.32404181184669, | |
| "grad_norm": 240.07418332327254, | |
| "learning_rate": 3.383720930232558e-07, | |
| "logits/chosen": -2.55873966217041, | |
| "logits/rejected": -2.556434392929077, | |
| "logps/chosen": -77.422607421875, | |
| "logps/rejected": -101.52474212646484, | |
| "loss": 0.4144, | |
| "rewards/accuracies": 0.5375000238418579, | |
| "rewards/chosen": 8.128644943237305, | |
| "rewards/margins": 21.078929901123047, | |
| "rewards/rejected": -12.950288772583008, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.3588850174216027, | |
| "grad_norm": 26.553648537118264, | |
| "learning_rate": 3.2093023255813955e-07, | |
| "logits/chosen": -2.6296944618225098, | |
| "logits/rejected": -2.6129908561706543, | |
| "logps/chosen": -56.90552520751953, | |
| "logps/rejected": -84.36430358886719, | |
| "loss": 0.378, | |
| "rewards/accuracies": 0.4375, | |
| "rewards/chosen": 8.677267074584961, | |
| "rewards/margins": 19.946393966674805, | |
| "rewards/rejected": -11.269128799438477, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 1.3937282229965158, | |
| "grad_norm": 11.915044642787674, | |
| "learning_rate": 3.034883720930232e-07, | |
| "logits/chosen": -2.6192545890808105, | |
| "logits/rejected": -2.591175079345703, | |
| "logps/chosen": -77.75074768066406, | |
| "logps/rejected": -112.25565338134766, | |
| "loss": 0.393, | |
| "rewards/accuracies": 0.518750011920929, | |
| "rewards/chosen": 7.220816135406494, | |
| "rewards/margins": 19.133440017700195, | |
| "rewards/rejected": -11.912622451782227, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.3937282229965158, | |
| "eval_logits/chosen": -2.625042676925659, | |
| "eval_logits/rejected": -2.609307050704956, | |
| "eval_logps/chosen": -74.64740753173828, | |
| "eval_logps/rejected": -83.32994842529297, | |
| "eval_loss": 1.223362684249878, | |
| "eval_rewards/accuracies": 0.32341268658638, | |
| "eval_rewards/chosen": -0.1343114674091339, | |
| "eval_rewards/margins": 1.759063720703125, | |
| "eval_rewards/rejected": -1.893375277519226, | |
| "eval_runtime": 113.3813, | |
| "eval_samples_per_second": 17.64, | |
| "eval_steps_per_second": 0.556, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.4285714285714286, | |
| "grad_norm": 265.4416418102539, | |
| "learning_rate": 2.8604651162790695e-07, | |
| "logits/chosen": -2.6114742755889893, | |
| "logits/rejected": -2.601598024368286, | |
| "logps/chosen": -73.08380889892578, | |
| "logps/rejected": -94.53117370605469, | |
| "loss": 0.6924, | |
| "rewards/accuracies": 0.4937500059604645, | |
| "rewards/chosen": 8.195378303527832, | |
| "rewards/margins": 20.368560791015625, | |
| "rewards/rejected": -12.17318344116211, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.4634146341463414, | |
| "grad_norm": 110.44055155377453, | |
| "learning_rate": 2.6860465116279067e-07, | |
| "logits/chosen": -2.651491641998291, | |
| "logits/rejected": -2.65342378616333, | |
| "logps/chosen": -69.68745422363281, | |
| "logps/rejected": -97.7309341430664, | |
| "loss": 0.4257, | |
| "rewards/accuracies": 0.42500001192092896, | |
| "rewards/chosen": 5.508909702301025, | |
| "rewards/margins": 16.567991256713867, | |
| "rewards/rejected": -11.059081077575684, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.4982578397212545, | |
| "grad_norm": 0.6603991710359594, | |
| "learning_rate": 2.511627906976744e-07, | |
| "logits/chosen": -2.606182336807251, | |
| "logits/rejected": -2.5925304889678955, | |
| "logps/chosen": -62.015174865722656, | |
| "logps/rejected": -85.03865814208984, | |
| "loss": 0.3861, | |
| "rewards/accuracies": 0.4749999940395355, | |
| "rewards/chosen": 5.7165632247924805, | |
| "rewards/margins": 16.509204864501953, | |
| "rewards/rejected": -10.792640686035156, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 1.533101045296167, | |
| "grad_norm": 171.49719572579374, | |
| "learning_rate": 2.3372093023255815e-07, | |
| "logits/chosen": -2.5812370777130127, | |
| "logits/rejected": -2.5880465507507324, | |
| "logps/chosen": -62.13433074951172, | |
| "logps/rejected": -84.38856506347656, | |
| "loss": 0.4158, | |
| "rewards/accuracies": 0.42500001192092896, | |
| "rewards/chosen": 6.746127128601074, | |
| "rewards/margins": 16.110437393188477, | |
| "rewards/rejected": -9.364312171936035, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.5679442508710801, | |
| "grad_norm": 28.499265540380463, | |
| "learning_rate": 2.1627906976744185e-07, | |
| "logits/chosen": -2.641430139541626, | |
| "logits/rejected": -2.6030945777893066, | |
| "logps/chosen": -79.95354461669922, | |
| "logps/rejected": -103.62571716308594, | |
| "loss": 0.4177, | |
| "rewards/accuracies": 0.512499988079071, | |
| "rewards/chosen": 9.364060401916504, | |
| "rewards/margins": 24.554065704345703, | |
| "rewards/rejected": -15.1900053024292, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.6027874564459932, | |
| "grad_norm": 208.9071332592839, | |
| "learning_rate": 1.9883720930232558e-07, | |
| "logits/chosen": -2.6206653118133545, | |
| "logits/rejected": -2.597672939300537, | |
| "logps/chosen": -66.5615463256836, | |
| "logps/rejected": -93.40229797363281, | |
| "loss": 0.4043, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": 8.271448135375977, | |
| "rewards/margins": 22.373382568359375, | |
| "rewards/rejected": -14.101933479309082, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.6376306620209058, | |
| "grad_norm": 0.3844536104459883, | |
| "learning_rate": 1.8139534883720928e-07, | |
| "logits/chosen": -2.5973286628723145, | |
| "logits/rejected": -2.5908195972442627, | |
| "logps/chosen": -54.78232955932617, | |
| "logps/rejected": -81.273681640625, | |
| "loss": 0.4316, | |
| "rewards/accuracies": 0.4000000059604645, | |
| "rewards/chosen": 5.607256889343262, | |
| "rewards/margins": 13.865686416625977, | |
| "rewards/rejected": -8.258430480957031, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.6724738675958188, | |
| "grad_norm": 150.7317542423224, | |
| "learning_rate": 1.63953488372093e-07, | |
| "logits/chosen": -2.599632501602173, | |
| "logits/rejected": -2.584341049194336, | |
| "logps/chosen": -49.672950744628906, | |
| "logps/rejected": -57.88109588623047, | |
| "loss": 0.4093, | |
| "rewards/accuracies": 0.35624998807907104, | |
| "rewards/chosen": 3.7741661071777344, | |
| "rewards/margins": 8.84811782836914, | |
| "rewards/rejected": -5.073951721191406, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.7073170731707317, | |
| "grad_norm": 51.065123641891695, | |
| "learning_rate": 1.4651162790697676e-07, | |
| "logits/chosen": -2.571542978286743, | |
| "logits/rejected": -2.561433792114258, | |
| "logps/chosen": -66.06110382080078, | |
| "logps/rejected": -81.25625610351562, | |
| "loss": 0.5041, | |
| "rewards/accuracies": 0.4000000059604645, | |
| "rewards/chosen": 6.158200740814209, | |
| "rewards/margins": 15.785115242004395, | |
| "rewards/rejected": -9.626913070678711, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.7421602787456445, | |
| "grad_norm": 9.459943139553493, | |
| "learning_rate": 1.2906976744186046e-07, | |
| "logits/chosen": -2.496741771697998, | |
| "logits/rejected": -2.4906914234161377, | |
| "logps/chosen": -68.98286437988281, | |
| "logps/rejected": -96.44841003417969, | |
| "loss": 0.3986, | |
| "rewards/accuracies": 0.46875, | |
| "rewards/chosen": 6.282340049743652, | |
| "rewards/margins": 17.225933074951172, | |
| "rewards/rejected": -10.943593978881836, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.7421602787456445, | |
| "eval_logits/chosen": -2.607004404067993, | |
| "eval_logits/rejected": -2.5908732414245605, | |
| "eval_logps/chosen": -74.26602935791016, | |
| "eval_logps/rejected": -83.2776107788086, | |
| "eval_loss": 1.2247282266616821, | |
| "eval_rewards/accuracies": 0.3214285671710968, | |
| "eval_rewards/chosen": 0.1936810314655304, | |
| "eval_rewards/margins": 2.0420379638671875, | |
| "eval_rewards/rejected": -1.848357081413269, | |
| "eval_runtime": 113.3979, | |
| "eval_samples_per_second": 17.637, | |
| "eval_steps_per_second": 0.556, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.7770034843205575, | |
| "grad_norm": 22.381813019053016, | |
| "learning_rate": 1.1162790697674417e-07, | |
| "logits/chosen": -2.572063446044922, | |
| "logits/rejected": -2.551802635192871, | |
| "logps/chosen": -62.93878173828125, | |
| "logps/rejected": -80.62296295166016, | |
| "loss": 0.4109, | |
| "rewards/accuracies": 0.45625001192092896, | |
| "rewards/chosen": 6.723170280456543, | |
| "rewards/margins": 16.216602325439453, | |
| "rewards/rejected": -9.49343204498291, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.8118466898954704, | |
| "grad_norm": 34.84061860816185, | |
| "learning_rate": 9.418604651162791e-08, | |
| "logits/chosen": -2.5822184085845947, | |
| "logits/rejected": -2.5742483139038086, | |
| "logps/chosen": -66.30496978759766, | |
| "logps/rejected": -88.59693908691406, | |
| "loss": 0.3956, | |
| "rewards/accuracies": 0.4625000059604645, | |
| "rewards/chosen": 6.850976467132568, | |
| "rewards/margins": 16.641326904296875, | |
| "rewards/rejected": -9.790349006652832, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.8466898954703832, | |
| "grad_norm": 26.726116225430992, | |
| "learning_rate": 7.674418604651163e-08, | |
| "logits/chosen": -2.5606324672698975, | |
| "logits/rejected": -2.5552823543548584, | |
| "logps/chosen": -62.88715744018555, | |
| "logps/rejected": -84.60001373291016, | |
| "loss": 0.3962, | |
| "rewards/accuracies": 0.4625000059604645, | |
| "rewards/chosen": 7.2661237716674805, | |
| "rewards/margins": 18.649456024169922, | |
| "rewards/rejected": -11.383334159851074, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.8815331010452963, | |
| "grad_norm": 746.1912709837381, | |
| "learning_rate": 5.930232558139535e-08, | |
| "logits/chosen": -2.5763189792633057, | |
| "logits/rejected": -2.589749336242676, | |
| "logps/chosen": -54.900634765625, | |
| "logps/rejected": -89.78245544433594, | |
| "loss": 0.4804, | |
| "rewards/accuracies": 0.45625001192092896, | |
| "rewards/chosen": 7.59392786026001, | |
| "rewards/margins": 21.733322143554688, | |
| "rewards/rejected": -14.139394760131836, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.916376306620209, | |
| "grad_norm": 0.6251683362748027, | |
| "learning_rate": 4.1860465116279067e-08, | |
| "logits/chosen": -2.5647377967834473, | |
| "logits/rejected": -2.5423054695129395, | |
| "logps/chosen": -79.24058532714844, | |
| "logps/rejected": -106.3379898071289, | |
| "loss": 0.4013, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": 9.971015930175781, | |
| "rewards/margins": 26.699609756469727, | |
| "rewards/rejected": -16.728595733642578, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.951219512195122, | |
| "grad_norm": 252.84212758390947, | |
| "learning_rate": 2.441860465116279e-08, | |
| "logits/chosen": -2.560770034790039, | |
| "logits/rejected": -2.567601442337036, | |
| "logps/chosen": -57.37895584106445, | |
| "logps/rejected": -80.76757049560547, | |
| "loss": 0.4091, | |
| "rewards/accuracies": 0.4437499940395355, | |
| "rewards/chosen": 5.453306674957275, | |
| "rewards/margins": 14.458730697631836, | |
| "rewards/rejected": -9.005423545837402, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.986062717770035, | |
| "grad_norm": 110.2396617228431, | |
| "learning_rate": 6.976744186046511e-09, | |
| "logits/chosen": -2.637852191925049, | |
| "logits/rejected": -2.608257293701172, | |
| "logps/chosen": -58.60048294067383, | |
| "logps/rejected": -71.56431579589844, | |
| "loss": 0.3934, | |
| "rewards/accuracies": 0.4375, | |
| "rewards/chosen": 5.686958312988281, | |
| "rewards/margins": 13.428431510925293, | |
| "rewards/rejected": -7.7414727210998535, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "step": 574, | |
| "total_flos": 0.0, | |
| "train_loss": 0.6794358088995106, | |
| "train_runtime": 6413.3344, | |
| "train_samples_per_second": 5.719, | |
| "train_steps_per_second": 0.09 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 574, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |