diff --git a/inference/chat_completion.py b/inference/chat_completion.py
index 274f381f9c0f88c75b11c671665c452ec39b4483..bc5311d62ac735ced4043357ebae0144cf335884 100644
--- a/inference/chat_completion.py
+++ b/inference/chat_completion.py
@@ -62,13 +62,11 @@ def main(
     tokenizer = LlamaTokenizer.from_pretrained(model_name)
     tokenizer.add_special_tokens(
         {
-            "eos_token": "</s>",
-            "bos_token": "</s>",
-            "unk_token": "</s>",
-            "pad_token": "[PAD]",
+         
+            "pad_token": "<PAD>",
         }
     )
-
+    
     chats = format_tokens(dialogs, tokenizer)
 
     with torch.no_grad():