diff --git a/src/llama_recipes/finetuning.py b/src/llama_recipes/finetuning.py
index e1d702e2005b557650e63f5d6c424a03bf686daf..2a86234c30857d58e562736813f229d8171f39f5 100644
--- a/src/llama_recipes/finetuning.py
+++ b/src/llama_recipes/finetuning.py
@@ -237,7 +237,8 @@ def main(**kwargs):
             
         if not train_config.use_peft and train_config.freeze_LLM_only and config.model_type == "mllama":
             freeze_LLM_only(model)
-            
+        
+        print_model_size(model, train_config, rank if train_config.enable_fsdp else 0)
 
         mixed_precision_policy, wrapping_policy = get_policies(fsdp_config, rank)
         # Create the FSDP wrapper for MllamaSelfAttentionDecoderLayer,MllamaSelfAttentionDecoderLayer,MllamaVisionEncoderLayer in vision models
@@ -306,8 +307,6 @@ def main(**kwargs):
         dataset_processer = processor
     else:
         dataset_processer = tokenizer
-
-    print_model_size(model, train_config, rank if train_config.enable_fsdp else 0)
     
     # Load and preprocess the dataset for training and validation