diff --git a/llama_index/embeddings/ollama_embedding.py b/llama_index/embeddings/ollama_embedding.py
index d35f2d9f7e18175075e6bed725e7595d42412a68..de26ca1c63e9e1ffc804ac7a697e281b9c269002 100644
--- a/llama_index/embeddings/ollama_embedding.py
+++ b/llama_index/embeddings/ollama_embedding.py
@@ -83,7 +83,7 @@ class OllamaEmbedding(BaseEmbedding):
         ollama_request_body = {
             "prompt": prompt,
             "model": self.model_name,
-            **self.ollama_additional_kwargs,
+            "options": self.ollama_additional_kwargs,
         }
 
         response = requests.post(
diff --git a/llama_index/llms/ollama.py b/llama_index/llms/ollama.py
index 078e2f7a3c6ea76ccd5290303a17aee6a05b28fb..3cf5af31c56bd86edb7bb662c64706590545fc50 100644
--- a/llama_index/llms/ollama.py
+++ b/llama_index/llms/ollama.py
@@ -63,7 +63,7 @@ class Ollama(CustomLLM):
     def _model_kwargs(self) -> Dict[str, Any]:
         base_kwargs = {
             "temperature": self.temperature,
-            "max_length": self.context_window,
+            "num_ctx": self.context_window,
         }
         return {
             **base_kwargs,
@@ -117,7 +117,7 @@ class Ollama(CustomLLM):
         response = requests.post(
             url=f"{self.base_url}/api/generate/",
             headers={"Content-Type": "application/json"},
-            json={"prompt": prompt, "model": self.model, **all_kwargs},
+            json={"prompt": prompt, "model": self.model, "options": all_kwargs},
             stream=True,
         )
         response.encoding = "utf-8"