From a6208317620acd89fa9984234ea34770a5c500eb Mon Sep 17 00:00:00 2001 From: Huang Zhihong <59925853+24kMengXin@users.noreply.github.com> Date: Wed, 9 Oct 2024 01:35:49 +0800 Subject: [PATCH] Fix the bug when continue the peft. (#717) --- src/llama_recipes/finetuning.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llama_recipes/finetuning.py b/src/llama_recipes/finetuning.py index 4cec952a..0e140a79 100644 --- a/src/llama_recipes/finetuning.py +++ b/src/llama_recipes/finetuning.py @@ -167,7 +167,7 @@ def main(**kwargs): # Load the pre-trained peft model checkpoint and setup its configuration if train_config.from_peft_checkpoint: model = PeftModel.from_pretrained(model, train_config.from_peft_checkpoint, is_trainable=True) - peft_config = model.peft_config() + peft_config = model.peft_config # Generate the peft config and start fine-tuning from original model else: peft_config = generate_peft_config(train_config, kwargs) -- GitLab