From 4927b3d27bd79d383c862bb30a61c83d98fb2126 Mon Sep 17 00:00:00 2001
From: Ofer Mendelevitch <ofermend@gmail.com>
Date: Fri, 2 Feb 2024 22:15:35 -0800
Subject: [PATCH] bug fix - make sure no embedding is required with OpenAI
 (#10426)

bug fix
---
 llama_index/indices/managed/vectara/base.py | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/llama_index/indices/managed/vectara/base.py b/llama_index/indices/managed/vectara/base.py
index d6fd6c5ff9..d49e33f85f 100644
--- a/llama_index/indices/managed/vectara/base.py
+++ b/llama_index/indices/managed/vectara/base.py
@@ -70,7 +70,9 @@ class VectaraIndex(BaseManagedIndex):
         super().__init__(
             show_progress=show_progress,
             index_struct=index_struct,
-            service_context=ServiceContext.from_defaults(llm=None, llm_predictor=None),
+            service_context=ServiceContext.from_defaults(
+                llm=None, llm_predictor=None, embed_model=None
+            ),
             **kwargs,
         )
         self._vectara_customer_id = vectara_customer_id or os.environ.get(
-- 
GitLab