diff --git a/docs/multi_gpu.md b/docs/multi_gpu.md
index 8e1064cb0331b8e39a12d44e1d697a1279b37068..56913103309fb5ea5c3dfd92c2f0255e020b84cc 100644
--- a/docs/multi_gpu.md
+++ b/docs/multi_gpu.md
@@ -34,7 +34,7 @@ The args used in the command above are:
 
 * `--use_peft` boolean flag to enable PEFT methods in the script
 
-* `--peft_method` to specify the PEFT method, here we use `lora` other options are `llama_adapter`, `prefix`.
+* `--peft_method` to specify the PEFT method, here we use `lora` other options are `llama_adapter`.
 
 We use `torchrun` here to spawn multiple processes for FSDP.
 
diff --git a/docs/single_gpu.md b/docs/single_gpu.md
index 850d886ca428fa445e58c636d5bfefdd072fa466..168acadd9387033dd798056958f18b7e85e044d8 100644
--- a/docs/single_gpu.md
+++ b/docs/single_gpu.md
@@ -27,7 +27,7 @@ The args used in the command above are:
 
 * `--use_peft` boolean flag to enable PEFT methods in the script
 
-* `--peft_method` to specify the PEFT method, here we use `lora` other options are `llama_adapter`, `prefix`.
+* `--peft_method` to specify the PEFT method, here we use `lora` other options are `llama_adapter`.
 
 * `--quantization` boolean flag to enable int8 quantization
 
diff --git a/src/llama_recipes/finetuning.py b/src/llama_recipes/finetuning.py
index 32cf743b988f0cebe50e3dbc890033f72eb8bb4d..3fef7222b486b8087a29d7b65710a03dfbcc424d 100644
--- a/src/llama_recipes/finetuning.py
+++ b/src/llama_recipes/finetuning.py
@@ -154,12 +154,13 @@ def main(**kwargs):
         # Load the pre-trained peft model checkpoint and setup its configuration
         if train_config.from_peft_checkpoint:
             model = PeftModel.from_pretrained(model, train_config.from_peft_checkpoint, is_trainable=True)
+            peft_config = model.peft_config()
         # Generate the peft config and start fine-tuning from original model
         else:
             peft_config = generate_peft_config(train_config, kwargs)
             model = get_peft_model(model, peft_config)
-            if wandb_run:
-                wandb_run.config.update(peft_config)
+        if wandb_run:
+            wandb_run.config.update(peft_config)
         model.print_trainable_parameters()