diff --git a/src/llama_recipes/finetuning.py b/src/llama_recipes/finetuning.py index 3b4973cf19cbe2e74623961b7144b064566f9d1c..d0a72400ed8b82c128050a36e88c484074d17860 100644 --- a/src/llama_recipes/finetuning.py +++ b/src/llama_recipes/finetuning.py @@ -94,7 +94,7 @@ def main(**kwargs): load_in_8bit=True if train_config.quantization else None, device_map="auto" if train_config.quantization else None, use_cache=use_cache, - attn_implementation="eager" if train_config.use_fast_kernels else None, + attn_implementation="sdpa" if train_config.use_fast_kernels else None, ) else: llama_config = LlamaConfig.from_pretrained(train_config.model_name) @@ -108,7 +108,7 @@ def main(**kwargs): load_in_8bit=True if train_config.quantization else None, device_map="auto" if train_config.quantization else None, use_cache=use_cache, - attn_implementation="eager" if train_config.use_fast_kernels else None, + attn_implementation="sdpa" if train_config.use_fast_kernels else None, ) # Load the tokenizer and add special tokens diff --git a/src/llama_recipes/inference/model_utils.py b/src/llama_recipes/inference/model_utils.py index cc7cc6d20c81517f902ba27a1d29e8ef5fa51e92..e70cb82c5c138ebeb02c7a59416000ff1097bb0b 100644 --- a/src/llama_recipes/inference/model_utils.py +++ b/src/llama_recipes/inference/model_utils.py @@ -13,7 +13,7 @@ def load_model(model_name, quantization, use_fast_kernels): load_in_8bit=quantization, device_map="auto", low_cpu_mem_usage=True, - attn_implementation="eager" if use_fast_kernels else None, + attn_implementation="sdpa" if use_fast_kernels else None, ) return model