From e554c1c8bf3cc1149c14a0e4f6582d23cacd748e Mon Sep 17 00:00:00 2001
From: Yuanhao <wuyhthu@gmail.com>
Date: Thu, 17 Aug 2023 16:10:28 +0800
Subject: [PATCH] The tokenizer will not add eos_token by default

---
 src/llama_recipes/inference/chat_utils.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/src/llama_recipes/inference/chat_utils.py b/src/llama_recipes/inference/chat_utils.py
index 8d781e31..530fdcf7 100644
--- a/src/llama_recipes/inference/chat_utils.py
+++ b/src/llama_recipes/inference/chat_utils.py
@@ -44,7 +44,7 @@ def format_tokens(dialogs, tokenizer):
             [
                 tokenizer.encode(
                     f"{B_INST} {(prompt['content']).strip()} {E_INST} {(answer['content']).strip()} ",
-                )
+                ) + [tokenizer.eos_token_id]
                 for prompt, answer in zip(dialog[::2], dialog[1::2])
             ],
             [],
@@ -62,4 +62,4 @@ def format_tokens(dialogs, tokenizer):
 def read_dialogs_from_file(file_path):
     with open(file_path, 'r') as file:
         dialogs = json.load(file)
-    return dialogs
\ No newline at end of file
+    return dialogs
-- 
GitLab