| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.6872160934458145, | |
| "eval_steps": 25, | |
| "global_step": 325, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.12978585334198572, | |
| "grad_norm": 0.0001288286439375952, | |
| "learning_rate": 9.985564567238237e-05, | |
| "logits/chosen": -2.892515182495117, | |
| "logits/rejected": -2.8839669227600098, | |
| "logps/chosen": -65.34242248535156, | |
| "logps/rejected": -120.23748779296875, | |
| "loss": 0.2047, | |
| "rewards/accuracies": 0.7599999904632568, | |
| "rewards/chosen": -2.3054304122924805, | |
| "rewards/margins": 7.21456241607666, | |
| "rewards/rejected": -9.519991874694824, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.12978585334198572, | |
| "eval_logits/chosen": -3.1952435970306396, | |
| "eval_logits/rejected": -3.1321091651916504, | |
| "eval_logps/chosen": -126.43878936767578, | |
| "eval_logps/rejected": -265.6844177246094, | |
| "eval_loss": 4.6241984819062054e-07, | |
| "eval_rewards/accuracies": 1.0, | |
| "eval_rewards/chosen": -8.37295913696289, | |
| "eval_rewards/margins": 15.654376983642578, | |
| "eval_rewards/rejected": -24.02733612060547, | |
| "eval_runtime": 140.1846, | |
| "eval_samples_per_second": 0.585, | |
| "eval_steps_per_second": 0.585, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.25957170668397145, | |
| "grad_norm": 1.2395206795190461e-05, | |
| "learning_rate": 9.795296239506012e-05, | |
| "logits/chosen": -2.2458553314208984, | |
| "logits/rejected": -2.1915533542633057, | |
| "logps/chosen": -99.26245880126953, | |
| "logps/rejected": -254.2440643310547, | |
| "loss": 0.0, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -5.69963264465332, | |
| "rewards/margins": 17.1381778717041, | |
| "rewards/rejected": -22.837810516357422, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.25957170668397145, | |
| "eval_logits/chosen": -1.7933237552642822, | |
| "eval_logits/rejected": -1.7323336601257324, | |
| "eval_logps/chosen": -89.24785614013672, | |
| "eval_logps/rejected": -249.6978302001953, | |
| "eval_loss": 4.8683162390261714e-08, | |
| "eval_rewards/accuracies": 1.0, | |
| "eval_rewards/chosen": -4.653865337371826, | |
| "eval_rewards/margins": 17.77481460571289, | |
| "eval_rewards/rejected": -22.428680419921875, | |
| "eval_runtime": 140.0068, | |
| "eval_samples_per_second": 0.586, | |
| "eval_steps_per_second": 0.586, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.3893575600259572, | |
| "grad_norm": 3.164160079904832e-05, | |
| "learning_rate": 9.392069159975199e-05, | |
| "logits/chosen": -1.7290905714035034, | |
| "logits/rejected": -1.6691365242004395, | |
| "logps/chosen": -87.64579010009766, | |
| "logps/rejected": -252.55418395996094, | |
| "loss": 0.0, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -4.562257766723633, | |
| "rewards/margins": 18.078048706054688, | |
| "rewards/rejected": -22.640310287475586, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.3893575600259572, | |
| "eval_logits/chosen": -1.7083513736724854, | |
| "eval_logits/rejected": -1.6462993621826172, | |
| "eval_logps/chosen": -86.74995422363281, | |
| "eval_logps/rejected": -249.47621154785156, | |
| "eval_loss": 3.741008924862399e-08, | |
| "eval_rewards/accuracies": 1.0, | |
| "eval_rewards/chosen": -4.404075622558594, | |
| "eval_rewards/margins": 18.002437591552734, | |
| "eval_rewards/rejected": -22.406513214111328, | |
| "eval_runtime": 140.046, | |
| "eval_samples_per_second": 0.586, | |
| "eval_steps_per_second": 0.586, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.5191434133679429, | |
| "grad_norm": 9.557259545545094e-06, | |
| "learning_rate": 8.793790613463955e-05, | |
| "logits/chosen": -1.706992506980896, | |
| "logits/rejected": -1.6317412853240967, | |
| "logps/chosen": -85.46414184570312, | |
| "logps/rejected": -248.94418334960938, | |
| "loss": 0.0, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -4.233969211578369, | |
| "rewards/margins": 18.118690490722656, | |
| "rewards/rejected": -22.3526611328125, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.5191434133679429, | |
| "eval_logits/chosen": -1.701735019683838, | |
| "eval_logits/rejected": -1.6400282382965088, | |
| "eval_logps/chosen": -86.22808837890625, | |
| "eval_logps/rejected": -250.09986877441406, | |
| "eval_loss": 3.298516659810957e-08, | |
| "eval_rewards/accuracies": 1.0, | |
| "eval_rewards/chosen": -4.3518877029418945, | |
| "eval_rewards/margins": 18.11699104309082, | |
| "eval_rewards/rejected": -22.468881607055664, | |
| "eval_runtime": 140.2981, | |
| "eval_samples_per_second": 0.584, | |
| "eval_steps_per_second": 0.584, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.6489292667099286, | |
| "grad_norm": 6.237313300516689e-06, | |
| "learning_rate": 8.027030106031836e-05, | |
| "logits/chosen": -1.6746500730514526, | |
| "logits/rejected": -1.6049107313156128, | |
| "logps/chosen": -85.8425521850586, | |
| "logps/rejected": -251.0710906982422, | |
| "loss": 0.0, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -4.243345260620117, | |
| "rewards/margins": 18.34173011779785, | |
| "rewards/rejected": -22.585073471069336, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.6489292667099286, | |
| "eval_logits/chosen": -1.7027819156646729, | |
| "eval_logits/rejected": -1.64132559299469, | |
| "eval_logps/chosen": -85.93473052978516, | |
| "eval_logps/rejected": -250.78919982910156, | |
| "eval_loss": 2.9771682008572498e-08, | |
| "eval_rewards/accuracies": 1.0, | |
| "eval_rewards/chosen": -4.32255220413208, | |
| "eval_rewards/margins": 18.21526336669922, | |
| "eval_rewards/rejected": -22.537818908691406, | |
| "eval_runtime": 140.288, | |
| "eval_samples_per_second": 0.585, | |
| "eval_steps_per_second": 0.585, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.7787151200519143, | |
| "grad_norm": 1.6801877791294828e-05, | |
| "learning_rate": 7.125839415178204e-05, | |
| "logits/chosen": -1.7362980842590332, | |
| "logits/rejected": -1.6591914892196655, | |
| "logps/chosen": -86.65206146240234, | |
| "logps/rejected": -249.38253784179688, | |
| "loss": 0.0, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -4.383853912353516, | |
| "rewards/margins": 18.103958129882812, | |
| "rewards/rejected": -22.487812042236328, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.7787151200519143, | |
| "eval_logits/chosen": -1.7042571306228638, | |
| "eval_logits/rejected": -1.6435108184814453, | |
| "eval_logps/chosen": -85.64505004882812, | |
| "eval_logps/rejected": -251.6405029296875, | |
| "eval_loss": 2.621119676859962e-08, | |
| "eval_rewards/accuracies": 1.0, | |
| "eval_rewards/chosen": -4.293583393096924, | |
| "eval_rewards/margins": 18.329362869262695, | |
| "eval_rewards/rejected": -22.622947692871094, | |
| "eval_runtime": 140.1349, | |
| "eval_samples_per_second": 0.585, | |
| "eval_steps_per_second": 0.585, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.9085009733939, | |
| "grad_norm": 9.597943972039502e-06, | |
| "learning_rate": 6.130240352918675e-05, | |
| "logits/chosen": -1.6720119714736938, | |
| "logits/rejected": -1.6064784526824951, | |
| "logps/chosen": -85.91622161865234, | |
| "logps/rejected": -251.5516815185547, | |
| "loss": 0.0, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -4.345089435577393, | |
| "rewards/margins": 18.316930770874023, | |
| "rewards/rejected": -22.662019729614258, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.9085009733939, | |
| "eval_logits/chosen": -1.7073944807052612, | |
| "eval_logits/rejected": -1.6472467184066772, | |
| "eval_logps/chosen": -85.54946899414062, | |
| "eval_logps/rejected": -252.8058319091797, | |
| "eval_loss": 2.286074263224691e-08, | |
| "eval_rewards/accuracies": 1.0, | |
| "eval_rewards/chosen": -4.284026145935059, | |
| "eval_rewards/margins": 18.455448150634766, | |
| "eval_rewards/rejected": -22.739477157592773, | |
| "eval_runtime": 140.0268, | |
| "eval_samples_per_second": 0.586, | |
| "eval_steps_per_second": 0.586, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 1.0382868267358858, | |
| "grad_norm": 6.195426067279186e-06, | |
| "learning_rate": 5.084447400069655e-05, | |
| "logits/chosen": -1.6958116292953491, | |
| "logits/rejected": -1.639534592628479, | |
| "logps/chosen": -84.86266326904297, | |
| "logps/rejected": -252.62850952148438, | |
| "loss": 0.0, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -4.275487899780273, | |
| "rewards/margins": 18.430957794189453, | |
| "rewards/rejected": -22.706445693969727, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.0382868267358858, | |
| "eval_logits/chosen": -1.7099387645721436, | |
| "eval_logits/rejected": -1.6504337787628174, | |
| "eval_logps/chosen": -85.46736907958984, | |
| "eval_logps/rejected": -253.481689453125, | |
| "eval_loss": 2.1164542118867757e-08, | |
| "eval_rewards/accuracies": 1.0, | |
| "eval_rewards/chosen": -4.275816440582275, | |
| "eval_rewards/margins": 18.531248092651367, | |
| "eval_rewards/rejected": -22.807064056396484, | |
| "eval_runtime": 140.2558, | |
| "eval_samples_per_second": 0.585, | |
| "eval_steps_per_second": 0.585, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.1680726800778716, | |
| "grad_norm": 5.08538914800738e-06, | |
| "learning_rate": 4.034904144421135e-05, | |
| "logits/chosen": -1.7136883735656738, | |
| "logits/rejected": -1.6375319957733154, | |
| "logps/chosen": -85.2287826538086, | |
| "logps/rejected": -254.05406188964844, | |
| "loss": 0.0, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -4.193078517913818, | |
| "rewards/margins": 18.700963973999023, | |
| "rewards/rejected": -22.894041061401367, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 1.1680726800778716, | |
| "eval_logits/chosen": -1.7104138135910034, | |
| "eval_logits/rejected": -1.6512484550476074, | |
| "eval_logps/chosen": -85.35152435302734, | |
| "eval_logps/rejected": -253.81373596191406, | |
| "eval_loss": 2.0240927156578437e-08, | |
| "eval_rewards/accuracies": 1.0, | |
| "eval_rewards/chosen": -4.264232158660889, | |
| "eval_rewards/margins": 18.57603645324707, | |
| "eval_rewards/rejected": -22.840267181396484, | |
| "eval_runtime": 140.3068, | |
| "eval_samples_per_second": 0.584, | |
| "eval_steps_per_second": 0.584, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 1.2978585334198574, | |
| "grad_norm": 2.7552045139600523e-05, | |
| "learning_rate": 3.0282207244334082e-05, | |
| "logits/chosen": -1.7763639688491821, | |
| "logits/rejected": -1.7230497598648071, | |
| "logps/chosen": -85.6513900756836, | |
| "logps/rejected": -256.676513671875, | |
| "loss": 0.0, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -4.309081554412842, | |
| "rewards/margins": 18.76219940185547, | |
| "rewards/rejected": -23.071279525756836, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.2978585334198574, | |
| "eval_logits/chosen": -1.7111458778381348, | |
| "eval_logits/rejected": -1.6523288488388062, | |
| "eval_logps/chosen": -85.29007720947266, | |
| "eval_logps/rejected": -254.06683349609375, | |
| "eval_loss": 1.954951045490816e-08, | |
| "eval_rewards/accuracies": 1.0, | |
| "eval_rewards/chosen": -4.258088111877441, | |
| "eval_rewards/margins": 18.607492446899414, | |
| "eval_rewards/rejected": -22.865581512451172, | |
| "eval_runtime": 140.2669, | |
| "eval_samples_per_second": 0.585, | |
| "eval_steps_per_second": 0.585, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.427644386761843, | |
| "grad_norm": 3.1899812711344566e-06, | |
| "learning_rate": 2.109103876430864e-05, | |
| "logits/chosen": -1.7086889743804932, | |
| "logits/rejected": -1.6506984233856201, | |
| "logps/chosen": -86.3566665649414, | |
| "logps/rejected": -254.89923095703125, | |
| "loss": 0.0, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -4.3109564781188965, | |
| "rewards/margins": 18.709728240966797, | |
| "rewards/rejected": -23.02068328857422, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 1.427644386761843, | |
| "eval_logits/chosen": -1.711836814880371, | |
| "eval_logits/rejected": -1.6533582210540771, | |
| "eval_logps/chosen": -85.26038360595703, | |
| "eval_logps/rejected": -254.31178283691406, | |
| "eval_loss": 1.9007002194371125e-08, | |
| "eval_rewards/accuracies": 1.0, | |
| "eval_rewards/chosen": -4.255118370056152, | |
| "eval_rewards/margins": 18.634958267211914, | |
| "eval_rewards/rejected": -22.89007568359375, | |
| "eval_runtime": 140.4683, | |
| "eval_samples_per_second": 0.584, | |
| "eval_steps_per_second": 0.584, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 1.5574302401038287, | |
| "grad_norm": 1.0732557711889967e-05, | |
| "learning_rate": 1.3183715117440142e-05, | |
| "logits/chosen": -1.6755353212356567, | |
| "logits/rejected": -1.6350597143173218, | |
| "logps/chosen": -84.75931549072266, | |
| "logps/rejected": -254.6855926513672, | |
| "loss": 0.0, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -4.289437770843506, | |
| "rewards/margins": 18.664514541625977, | |
| "rewards/rejected": -22.95395278930664, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.5574302401038287, | |
| "eval_logits/chosen": -1.7125476598739624, | |
| "eval_logits/rejected": -1.6541738510131836, | |
| "eval_logps/chosen": -85.2017822265625, | |
| "eval_logps/rejected": -254.44964599609375, | |
| "eval_loss": 1.8585970096296478e-08, | |
| "eval_rewards/accuracies": 1.0, | |
| "eval_rewards/chosen": -4.249257564544678, | |
| "eval_rewards/margins": 18.65460205078125, | |
| "eval_rewards/rejected": -22.903860092163086, | |
| "eval_runtime": 140.3494, | |
| "eval_samples_per_second": 0.584, | |
| "eval_steps_per_second": 0.584, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.6872160934458145, | |
| "grad_norm": 2.5711274247441906e-06, | |
| "learning_rate": 6.911399962822518e-06, | |
| "logits/chosen": -1.7367602586746216, | |
| "logits/rejected": -1.6708805561065674, | |
| "logps/chosen": -84.29496765136719, | |
| "logps/rejected": -253.51669311523438, | |
| "loss": 0.0, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -4.2055463790893555, | |
| "rewards/margins": 18.59516143798828, | |
| "rewards/rejected": -22.800708770751953, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 1.6872160934458145, | |
| "eval_logits/chosen": -1.7123589515686035, | |
| "eval_logits/rejected": -1.6541532278060913, | |
| "eval_logps/chosen": -85.1590347290039, | |
| "eval_logps/rejected": -254.51121520996094, | |
| "eval_loss": 1.8418553793253523e-08, | |
| "eval_rewards/accuracies": 1.0, | |
| "eval_rewards/chosen": -4.244982719421387, | |
| "eval_rewards/margins": 18.665035247802734, | |
| "eval_rewards/rejected": -22.910018920898438, | |
| "eval_runtime": 139.9542, | |
| "eval_samples_per_second": 0.586, | |
| "eval_steps_per_second": 0.586, | |
| "step": 325 | |
| } | |
| ], | |
| "logging_steps": 25, | |
| "max_steps": 384, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 25, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |