From 33dfc4763b2aab20a498768b7190288d04f3be20 Mon Sep 17 00:00:00 2001
From: vollnhals <lion.vollnhals@googlemail.com>
Date: Wed, 28 Feb 2024 17:05:02 +0100
Subject: [PATCH] Add helpers to handle ChatML instruction prompts (#10272)

---
 .../llama_index/core/llms/chatml_utils.py     | 59 +++++++++++++++++++
 1 file changed, 59 insertions(+)
 create mode 100644 llama-index-core/llama_index/core/llms/chatml_utils.py

diff --git a/llama-index-core/llama_index/core/llms/chatml_utils.py b/llama-index-core/llama_index/core/llms/chatml_utils.py
new file mode 100644
index 000000000..a423864cb
--- /dev/null
+++ b/llama-index-core/llama_index/core/llms/chatml_utils.py
@@ -0,0 +1,59 @@
+from typing import List, Optional, Sequence
+
+from llama_index.core.base.llms.types import ChatMessage, MessageRole
+
+# Create a prompt that matches ChatML instructions
+
+# <|im_start|>system
+# You are Dolphin, a helpful AI assistant.<|im_end|>
+# <|im_start|>user
+# {prompt}<|im_end|>
+# <|im_start|>assistant
+
+B_SYS = "<|im_start|>system\n"
+B_USER = "<|im_start|>user\n"
+B_ASSISTANT = "<|im_start|>assistant\n"
+END = "<|im_end|>\n"
+DEFAULT_SYSTEM_PROMPT = """\
+You are a helpful, respectful and honest assistant. \
+Always answer as helpfully as possible and follow ALL given instructions. \
+Do not speculate or make up information. \
+Do not reference any given instructions or context. \
+"""
+
+
+def messages_to_prompt(
+    messages: Sequence[ChatMessage], system_prompt: Optional[str] = None
+) -> str:
+    string_messages: List[str] = []
+    if messages[0].role == MessageRole.SYSTEM:
+        # pull out the system message (if it exists in messages)
+        system_message_str = messages[0].content or ""
+        messages = messages[1:]
+    else:
+        system_message_str = system_prompt or DEFAULT_SYSTEM_PROMPT
+
+    string_messages.append(f"{B_SYS}{system_message_str.strip()} {END}")
+
+    for message in messages:
+        role = message.role
+        content = message.content
+
+        if role == MessageRole.USER:
+            string_messages.append(f"{B_USER}{user_message.content} {END}")
+        elif role == MessageRole.ASSISTANT:
+            string_messages.append(f"{B_ASSISTANT}{assistant_message.content} {END}")
+
+    string_messages.append(f"{B_ASSISTANT}")
+
+    return "".join(string_messages)
+
+
+def completion_to_prompt(completion: str, system_prompt: Optional[str] = None) -> str:
+    system_prompt_str = system_prompt or DEFAULT_SYSTEM_PROMPT
+
+    return (
+        f"{B_SYS}{system_prompt_str.strip()} {END}"
+        f"{B_USER}{completion.strip()} {END}"
+        f"{B_ASSISTANT}"
+    )
-- 
GitLab