diff --git a/examples/inference.py b/examples/inference.py
index 28952920178ab4ba87c714d68b4c142743ded365..ab4e7139f1dcd631fcc5992a8a7df97359b79713 100644
--- a/examples/inference.py
+++ b/examples/inference.py
@@ -72,13 +72,7 @@ def main(
             print("Module 'optimum' not found. Please install 'optimum' it before proceeding.")
 
     tokenizer = LlamaTokenizer.from_pretrained(model_name)
-    tokenizer.add_special_tokens(
-        {
-         
-            "pad_token": "<PAD>",
-        }
-    )
-    model.resize_token_embeddings(model.config.vocab_size + 1) 
+    tokenizer.pad_token = tokenizer.eos_token
     
     safety_checker = get_safety_checker(enable_azure_content_safety,
                                         enable_sensitive_topics,