diff --git a/inference/code-llama/code_completion_example.py b/inference/code-llama/code_completion_example.py index 1aeb47ad8a5f79edd4306f3c158c6a77a673fb67..47e2dacf49d5264c08979630eb7a28cb526f0933 100644 --- a/inference/code-llama/code_completion_example.py +++ b/inference/code-llama/code_completion_example.py @@ -10,7 +10,7 @@ import sys import time from typing import List -from transformers import CodeLlamaTokenizer +from transformers import AutoTokenizer sys.path.append("..") from safety_utils import get_safety_checker from model_utils import load_model, load_peft_model, load_llama_from_config @@ -69,7 +69,7 @@ def main( except ImportError: print("Module 'optimum' not found. Please install 'optimum' it before proceeding.") - tokenizer = CodeLlamaTokenizer.from_pretrained(model_name) + tokenizer = AutoTokenizer.from_pretrained(model_name) tokenizer.add_special_tokens( { diff --git a/inference/code-llama/code_infilling_example.py b/inference/code-llama/code_infilling_example.py index 8f6c6fb376426a4deac568b25c605d717c3a122e..f27c628ad4f31f11bc747231bb6dc507f7316348 100644 --- a/inference/code-llama/code_infilling_example.py +++ b/inference/code-llama/code_infilling_example.py @@ -10,7 +10,7 @@ import sys import time from typing import List -from transformers import CodeLlamaTokenizer +from transformers import AutoTokenizer sys.path.append("..") from safety_utils import get_safety_checker from model_utils import load_model, load_peft_model, load_llama_from_config @@ -46,7 +46,6 @@ def main( else: print("No user prompt provided. Exiting.") sys.exit(1) - # Set the seeds for reproducibility torch.cuda.manual_seed(seed) torch.manual_seed(seed) @@ -70,7 +69,7 @@ def main( except ImportError: print("Module 'optimum' not found. Please install 'optimum' it before proceeding.") - tokenizer = CodeLlamaTokenizer.from_pretrained(model_name) + tokenizer = AutoTokenizer.from_pretrained(model_name) tokenizer.add_special_tokens( {