From 2a94bfff261c30b3b87ef2b43feca39d6c695e8d Mon Sep 17 00:00:00 2001
From: celestinoalan <celestino.alan@gmail.com>
Date: Wed, 23 Oct 2024 23:52:51 -0300
Subject: [PATCH] Append epoch rather than best val. loss to val_loss

**Problem**
Currently, we're val_loss.append(best_val_loss) in each epoch. This is misleading because we're appending the corresponding epoch (not best across epochs) quantities in train_loss, train_prep, and val_prep. This is also inconvenient, as one often would like to plot both train and validation losses as a function of the epochs to look for overfitting.

**Solution**
val_loss.append(eval_epoch_loss)
---
 src/llama_recipes/utils/train_utils.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/llama_recipes/utils/train_utils.py b/src/llama_recipes/utils/train_utils.py
index a2facea5..d3b42ae1 100644
--- a/src/llama_recipes/utils/train_utils.py
+++ b/src/llama_recipes/utils/train_utils.py
@@ -288,7 +288,7 @@ def train(model, train_dataloader,eval_dataloader, tokenizer, optimizer, lr_sche
                         print(f"best eval loss on epoch {epoch+1} is {best_val_loss}")
                 else:
                         print(f"best eval loss on epoch {epoch+1} is {best_val_loss}")
-            val_loss.append(float(best_val_loss))
+            val_loss.append(float(eval_epoch_loss))
             val_prep.append(float(eval_ppl))
         if train_config.enable_fsdp:
             if rank==0:
-- 
GitLab