diff --git a/src/llama_recipes/finetuning.py b/src/llama_recipes/finetuning.py index 6b5650b20534f57bf3fd7d8873b8741414b050c8..39b974f62f711683c80daa734234783ba33af0ee 100644 --- a/src/llama_recipes/finetuning.py +++ b/src/llama_recipes/finetuning.py @@ -9,7 +9,7 @@ import fire import random import torch import torch.optim as optim -from peft import get_peft_model, prepare_model_for_int8_training +from peft import get_peft_model, prepare_model_for_kbit_training from torch.distributed.fsdp import ( FullyShardedDataParallel as FSDP, ShardingStrategy