From b18b7600ebb319a11297a74667ad07aa2fb38a61 Mon Sep 17 00:00:00 2001 From: Paavo Pohndorff <paavo@pohndorff.com> Date: Wed, 13 Mar 2024 22:02:38 +0100 Subject: [PATCH] Fixed import error for ChatMessage (#11902) --- .../llama_index/llms/mistralai/base.py | 8 ++++---- .../llms/llama-index-llms-mistralai/pyproject.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/llama-index-integrations/llms/llama-index-llms-mistralai/llama_index/llms/mistralai/base.py b/llama-index-integrations/llms/llama-index-llms-mistralai/llama_index/llms/mistralai/base.py index 05977726e8..78f9f58bc0 100644 --- a/llama-index-integrations/llms/llama-index-llms-mistralai/llama_index/llms/mistralai/base.py +++ b/llama-index-integrations/llms/llama-index-llms-mistralai/llama_index/llms/mistralai/base.py @@ -173,7 +173,7 @@ class MistralAI(LLM): @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: # convert messages to mistral ChatMessage - from mistralai.client import ChatMessage as mistral_chatmessage + from mistralai.models.chat_completion import ChatMessage as mistral_chatmessage messages = [ mistral_chatmessage(role=x.role, content=x.content) for x in messages @@ -199,7 +199,7 @@ class MistralAI(LLM): self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: # convert messages to mistral ChatMessage - from mistralai.client import ChatMessage as mistral_chatmessage + from mistralai.models.chat_completion import ChatMessage as mistral_chatmessage messages = [ mistral_chatmessage(role=message.role, content=message.content) @@ -237,7 +237,7 @@ class MistralAI(LLM): self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponse: # convert messages to mistral ChatMessage - from mistralai.client import ChatMessage as mistral_chatmessage + from mistralai.models.chat_completion import ChatMessage as mistral_chatmessage messages = [ mistral_chatmessage(role=message.role, content=message.content) @@ -264,7 +264,7 @@ class MistralAI(LLM): self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseAsyncGen: # convert messages to mistral ChatMessage - from mistralai.client import ChatMessage as mistral_chatmessage + from mistralai.models.chat_completion import ChatMessage as mistral_chatmessage messages = [ mistral_chatmessage(role=x.role, content=x.content) for x in messages diff --git a/llama-index-integrations/llms/llama-index-llms-mistralai/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-mistralai/pyproject.toml index d538db4ae3..1069342f72 100644 --- a/llama-index-integrations/llms/llama-index-llms-mistralai/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-mistralai/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-mistralai" readme = "README.md" -version = "0.1.5" +version = "0.1.6" [tool.poetry.dependencies] python = ">=3.9,<4.0" -- GitLab