diff --git a/llama-index-integrations/indices/llama-index-indices-managed-vectara/llama_index/indices/managed/vectara/retriever.py b/llama-index-integrations/indices/llama-index-indices-managed-vectara/llama_index/indices/managed/vectara/retriever.py
index 0018475f6c0eee84667cdd6ec45404d1ce0ad3f8..705fcf6e879ed1ca157b3c6cdea514caf2946812 100644
--- a/llama-index-integrations/indices/llama-index-indices-managed-vectara/llama_index/indices/managed/vectara/retriever.py
+++ b/llama-index-integrations/indices/llama-index-indices-managed-vectara/llama_index/indices/managed/vectara/retriever.py
@@ -89,6 +89,9 @@ class VectaraRetriever(BaseRetriever):
         summary_response_lang: language to use for summary generation.
         summary_num_results: number of results to use for summary generation.
         summary_prompt_name: name of the prompt to use for summary generation.
+        prompt_text: the custom prompt, using appropriate prompt variables and functions.
+            See (https://docs.vectara.com/docs/1.0/prompts/custom-prompts-with-metadata)
+            for more details.
         citations_style: The style of the citations in the summary generation,
             either "numeric", "html", "markdown", or "none".
             This is a Vectara Scale only feature. Defaults to None.
@@ -118,6 +121,7 @@ class VectaraRetriever(BaseRetriever):
         summary_response_lang: str = "eng",
         summary_num_results: int = 7,
         summary_prompt_name: str = "vectara-summary-ext-24-05-sml",
+        prompt_text: Optional[str] = None,
         citations_style: Optional[str] = None,
         citations_url_pattern: Optional[str] = None,
         citations_text_pattern: Optional[str] = None,
@@ -132,6 +136,7 @@ class VectaraRetriever(BaseRetriever):
         self._n_sentences_before = n_sentences_before
         self._n_sentences_after = n_sentences_after
         self._filter = filter
+        self._prompt_text = prompt_text
         self._citations_style = citations_style.upper() if citations_style else None
         self._citations_url_pattern = citations_url_pattern
         self._citations_text_pattern = citations_text_pattern
@@ -286,6 +291,8 @@ class VectaraRetriever(BaseRetriever):
                 "summarizerPromptName": self._summary_prompt_name,
             }
             data["query"][0]["summary"] = [summary_config]
+            if self._prompt_text:
+                data["query"][0]["summary"][0]["promptText"] = self._prompt_text
             if chat:
                 data["query"][0]["summary"][0]["chat"] = {
                     "store": True,
diff --git a/llama-index-integrations/indices/llama-index-indices-managed-vectara/pyproject.toml b/llama-index-integrations/indices/llama-index-indices-managed-vectara/pyproject.toml
index c15ff6ade811ed4e22053f7354394be293224fae..590de33e61bc291368ff3283b919ede09d23bb6b 100644
--- a/llama-index-integrations/indices/llama-index-indices-managed-vectara/pyproject.toml
+++ b/llama-index-integrations/indices/llama-index-indices-managed-vectara/pyproject.toml
@@ -31,7 +31,7 @@ exclude = ["**/BUILD"]
 license = "MIT"
 name = "llama-index-indices-managed-vectara"
 readme = "README.md"
-version = "0.2.4"
+version = "0.2.5"
 
 [tool.poetry.dependencies]
 python = ">=3.8.1,<4.0"
diff --git a/llama-index-integrations/indices/llama-index-indices-managed-vectara/tests/test_indices_managed_vectara.py b/llama-index-integrations/indices/llama-index-indices-managed-vectara/tests/test_indices_managed_vectara.py
index 04b4ef3ec20cacb02eaabe0ecb1d98ed6a71b5c2..b15e8ae9640aeb66b75ffc12a46db789e9daa8a7 100644
--- a/llama-index-integrations/indices/llama-index-indices-managed-vectara/tests/test_indices_managed_vectara.py
+++ b/llama-index-integrations/indices/llama-index-indices-managed-vectara/tests/test_indices_managed_vectara.py
@@ -214,6 +214,26 @@ def test_chain_rerank_retrieval(vectara1) -> None:
     assert res[0].node.get_content() == docs[2].text
 
 
+def test_custom_prompt(vectara1) -> None:
+    docs = get_docs()
+
+    qe = vectara1.as_query_engine(
+        similarity_top_k=3,
+        n_sentences_before=0,
+        n_sentences_after=0,
+        reranker="mmr",
+        mmr_diversity_bias=0.2,
+        summary_enabled=True,
+        prompt_text='[\n  {"role": "system", "content": "You are an expert in summarizing the future of Vectara\'s inegration with LlamaIndex. Your summaries are insightful, concise, and highlight key innovations and changes."},\n  #foreach ($result in $vectaraQueryResults)\n    {"role": "user", "content": "What are the key points in result number $vectaraIdxWord[$foreach.index] about Vectara\'s LlamaIndex integration?"},\n    {"role": "assistant", "content": "In result number $vectaraIdxWord[$foreach.index], the key points are: ${result.getText()}"},\n  #end\n  {"role": "user", "content": "Can you generate a comprehensive summary on \'Vectara\'s LlamaIndex Integration\' incorporating all the key points discussed?"}\n]\n',
+    )
+
+    res = qe.query("How will Vectara's integration look in the future?")
+    assert "integration" in str(res).lower()
+    assert "llamaindex" in str(res).lower()
+    assert "vectara" in str(res).lower()
+    assert "first" in str(res).lower()
+
+
 @pytest.fixture()
 def vectara2():
     try: