From b4e1a420c83bbfd92994caa7b1cd7929e6415cb5 Mon Sep 17 00:00:00 2001
From: Merovingian <99087793+tokenizer-decode@users.noreply.github.com>
Date: Wed, 15 May 2024 19:24:08 +0300
Subject: [PATCH] Freeze layer bug fix

---
 src/llama_recipes/finetuning.py | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/src/llama_recipes/finetuning.py b/src/llama_recipes/finetuning.py
index f581f7d1..3f5079c4 100644
--- a/src/llama_recipes/finetuning.py
+++ b/src/llama_recipes/finetuning.py
@@ -166,8 +166,7 @@ def main(**kwargs):
     #setting up FSDP if enable_fsdp is enabled
     if train_config.enable_fsdp:
         if not train_config.use_peft and train_config.freeze_layers:
-
-            freeze_transformer_layers(train_config.num_freeze_layers)
+            freeze_transformer_layers(model, train_config.num_freeze_layers)
 
         mixed_precision_policy, wrapping_policy = get_policies(fsdp_config, rank)
         my_auto_wrapping_policy = fsdp_auto_wrap_policy(model, LlamaDecoderLayer)
@@ -217,7 +216,7 @@ def main(**kwargs):
         split="test",
     )
     if not train_config.enable_fsdp or rank == 0:
-            print(f"--> Validation Set Length = {len(dataset_val)}")
+        print(f"--> Validation Set Length = {len(dataset_val)}")
 
     if train_config.batching_strategy == "packing":
         dataset_train = ConcatDataset(dataset_train, chunk_size=train_config.context_length)
-- 
GitLab