diff --git a/src/llama_recipes/inference/chat_utils.py b/src/llama_recipes/inference/chat_utils.py index 8d781e31a66ff08d4c7ef1fd06cf08fd0654743f..530fdcf7d9ee0a4b397be00edbc3eadce937f388 100644 --- a/src/llama_recipes/inference/chat_utils.py +++ b/src/llama_recipes/inference/chat_utils.py @@ -44,7 +44,7 @@ def format_tokens(dialogs, tokenizer): [ tokenizer.encode( f"{B_INST} {(prompt['content']).strip()} {E_INST} {(answer['content']).strip()} ", - ) + ) + [tokenizer.eos_token_id] for prompt, answer in zip(dialog[::2], dialog[1::2]) ], [], @@ -62,4 +62,4 @@ def format_tokens(dialogs, tokenizer): def read_dialogs_from_file(file_path): with open(file_path, 'r') as file: dialogs = json.load(file) - return dialogs \ No newline at end of file + return dialogs