From fadef5f31ef6acd9b39b72103931b5eb62f98585 Mon Sep 17 00:00:00 2001
From: Sam Tsao <sam89881@gmail.com>
Date: Wed, 13 Dec 2023 13:24:48 +0800
Subject: [PATCH] Make ollama support additional_kwargs correctly (#9455)

make ollama_additional_kwargs working
---
 llama_index/embeddings/ollama_embedding.py | 2 +-
 llama_index/llms/ollama.py                 | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/llama_index/embeddings/ollama_embedding.py b/llama_index/embeddings/ollama_embedding.py
index d35f2d9f7e..de26ca1c63 100644
--- a/llama_index/embeddings/ollama_embedding.py
+++ b/llama_index/embeddings/ollama_embedding.py
@@ -83,7 +83,7 @@ class OllamaEmbedding(BaseEmbedding):
         ollama_request_body = {
             "prompt": prompt,
             "model": self.model_name,
-            **self.ollama_additional_kwargs,
+            "options": self.ollama_additional_kwargs,
         }
 
         response = requests.post(
diff --git a/llama_index/llms/ollama.py b/llama_index/llms/ollama.py
index 078e2f7a3c..3cf5af31c5 100644
--- a/llama_index/llms/ollama.py
+++ b/llama_index/llms/ollama.py
@@ -63,7 +63,7 @@ class Ollama(CustomLLM):
     def _model_kwargs(self) -> Dict[str, Any]:
         base_kwargs = {
             "temperature": self.temperature,
-            "max_length": self.context_window,
+            "num_ctx": self.context_window,
         }
         return {
             **base_kwargs,
@@ -117,7 +117,7 @@ class Ollama(CustomLLM):
         response = requests.post(
             url=f"{self.base_url}/api/generate/",
             headers={"Content-Type": "application/json"},
-            json={"prompt": prompt, "model": self.model, **all_kwargs},
+            json={"prompt": prompt, "model": self.model, "options": all_kwargs},
             stream=True,
         )
         response.encoding = "utf-8"
-- 
GitLab