From 277a292fbcc2d1b87c4f5cf40655575f0a7e9b56 Mon Sep 17 00:00:00 2001
From: Hamid Shojanazeri <hamid.nazeri2010@gmail.com>
Date: Mon, 28 Aug 2023 19:42:36 +0000
Subject: [PATCH] adding autotokenizer

---
 inference/code-llama/code_completion_example.py | 4 ++--
 inference/code-llama/code_infilling_example.py  | 5 ++---
 2 files changed, 4 insertions(+), 5 deletions(-)

diff --git a/inference/code-llama/code_completion_example.py b/inference/code-llama/code_completion_example.py
index 1aeb47ad..47e2dacf 100644
--- a/inference/code-llama/code_completion_example.py
+++ b/inference/code-llama/code_completion_example.py
@@ -10,7 +10,7 @@ import sys
 import time
 from typing import List
 
-from transformers import CodeLlamaTokenizer
+from transformers import AutoTokenizer
 sys.path.append("..")
 from safety_utils import get_safety_checker
 from model_utils import load_model, load_peft_model, load_llama_from_config
@@ -69,7 +69,7 @@ def main(
         except ImportError:
             print("Module 'optimum' not found. Please install 'optimum' it before proceeding.")
 
-    tokenizer = CodeLlamaTokenizer.from_pretrained(model_name)
+    tokenizer = AutoTokenizer.from_pretrained(model_name)
     tokenizer.add_special_tokens(
         {
          
diff --git a/inference/code-llama/code_infilling_example.py b/inference/code-llama/code_infilling_example.py
index 8f6c6fb3..f27c628a 100644
--- a/inference/code-llama/code_infilling_example.py
+++ b/inference/code-llama/code_infilling_example.py
@@ -10,7 +10,7 @@ import sys
 import time
 from typing import List
 
-from transformers import CodeLlamaTokenizer
+from transformers import AutoTokenizer
 sys.path.append("..")
 from safety_utils import get_safety_checker
 from model_utils import load_model, load_peft_model, load_llama_from_config
@@ -46,7 +46,6 @@ def main(
     else:
         print("No user prompt provided. Exiting.")
         sys.exit(1)
-
     # Set the seeds for reproducibility
     torch.cuda.manual_seed(seed)
     torch.manual_seed(seed)
@@ -70,7 +69,7 @@ def main(
         except ImportError:
             print("Module 'optimum' not found. Please install 'optimum' it before proceeding.")
 
-    tokenizer = CodeLlamaTokenizer.from_pretrained(model_name)
+    tokenizer = AutoTokenizer.from_pretrained(model_name)
     tokenizer.add_special_tokens(
         {
          
-- 
GitLab