diff --git a/inference/inference.py b/inference/inference.py
index 60b1cd6e9e07675bb7dd9dbdda175ff1b1fd6b7b..58c094a5f4c2adc362a45a6a253378295ecdfe7d 100644
--- a/inference/inference.py
+++ b/inference/inference.py
@@ -52,7 +52,6 @@ def main(
     torch.manual_seed(seed)
     
     model = load_model(model_name, quantization)
-    model.config.pretraining_tp=8
     tokenizer = LlamaTokenizer.from_pretrained(model_name)
     tokenizer.add_special_tokens(
         {
diff --git a/llama_finetuning.py b/llama_finetuning.py
index 02aebdecb721776659cdaadd6de93e7ca4ef907b..4e324140781dabc6fd31a8915fe6293951346b9f 100644
--- a/llama_finetuning.py
+++ b/llama_finetuning.py
@@ -109,13 +109,11 @@ def main(**kwargs):
     # Load the tokenizer and add special tokens
     tokenizer = LlamaTokenizer.from_pretrained(train_config.model_name)
     tokenizer.add_special_tokens(
-        {
-            "eos_token": "</s>",
-            "bos_token": "</s>",
-            "unk_token": "</s>",
-            "pad_token": '[PAD]',
-        }
-    )
+            {
+            
+                "pad_token": "<PAD>",
+            }
+        )
     if train_config.use_peft:
         peft_config = generate_peft_config(train_config, kwargs)
         model = get_peft_model(model, peft_config)