diff --git a/docs/docs/examples/managed/zcpDemo.ipynb b/docs/docs/examples/managed/zcpDemo.ipynb
index 0d691a3b00c676b6e23bc3280f6be9f28cd0fd7d..10e194c7b49e606b0055b94475369e4c5aefd5e7 100644
--- a/docs/docs/examples/managed/zcpDemo.ipynb
+++ b/docs/docs/examples/managed/zcpDemo.ipynb
@@ -47,7 +47,7 @@
    "id": "3fc94b2f",
    "metadata": {},
    "source": [
-    "2. Configure credentials of your [OpenAI](https://platform.openai.com) & [Zilliz Cloud](https://cloud.zilliz.com/signup?utm_source=twitter&utm_medium=social%20&utm_campaign=2023-12-22_social_pipeline-llamaindex_twitter) accounts."
+    "2. Configure credentials of your [Zilliz Cloud](https://cloud.zilliz.com/signup?utm_source=twitter&utm_medium=social%20&utm_campaign=2023-12-22_social_pipeline-llamaindex_twitter) accounts."
    ]
   },
   {
@@ -81,50 +81,61 @@
    "source": [
     "## Indexing documents\n",
     "\n",
+    "> It is optional to add metadata for each document. The metadata can be used to filter doc data during retrieval.\n",
+    "\n",
     "### From Signed URL\n",
     "\n",
-    "Zilliz Cloud Pipelines accepts files from AWS S3 and Google Cloud Storage. You can generate a presigned url from the Object Storage and use `from_document_url()` or `insert_doc_url()` to ingest the file. It can automatically index the document and store the doc chunks as vectors on Zilliz Cloud."
+    "Zilliz Cloud Pipelines accepts files from AWS S3 and Google Cloud Storage. You can generate a presigned url from the Object Storage and use `from_document_url()` to ingest the file. It can automatically index the document and store the doc chunks as vectors on Zilliz Cloud."
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "97d5c934",
+   "id": "f08ed6de",
    "metadata": {},
    "outputs": [
     {
-     "data": {
-      "text/plain": [
-       "{'token_usage': 984, 'doc_name': 'milvus_doc_22.md', 'num_chunks': 3}"
-      ]
-     },
-     "execution_count": null,
-     "metadata": {},
-     "output_type": "execute_result"
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "{'INGESTION': 'pipe-d639f220f27320e2e381de', 'SEARCH': 'pipe-47bd43fe8fd54502874a08', 'DELETION': 'pipe-bd434c99e064282f1a28e8'}\n"
+     ]
     }
    ],
    "source": [
     "from llama_index.indices.managed.zilliz import ZillizCloudPipelineIndex\n",
     "\n",
-    "zcp_index = ZillizCloudPipelineIndex.from_document_url(\n",
-    "    # a public or pre-signed url of a file stored on AWS S3 or Google Cloud Storage\n",
-    "    url=\"https://publicdataset.zillizcloud.com/milvus_doc.md\",\n",
+    "# Create pipelines: skip this step if you have prepared valid pipelines\n",
+    "pipeline_ids = ZillizCloudPipelineIndex.create_pipelines(\n",
     "    project_id=ZILLIZ_PROJECT_ID,\n",
     "    cluster_id=ZILLIZ_CLUSTER_ID,\n",
-    "    token=ZILLIZ_TOKEN,\n",
-    "    # optional\n",
-    "    metadata={\"version\": \"2.3\"},  # used for filtering\n",
-    "    collection_name=\"zcp_llamalection\",  # change this value will specify customized collection name\n",
+    "    api_key=ZILLIZ_TOKEN,\n",
+    "    data_type=\"doc\",\n",
+    "    collection_name=\"zcp_llamalection_doc\",  # change this value will customize collection name\n",
+    "    metadata_schema={\"user_id\": \"VarChar\"},\n",
     ")\n",
-    "\n",
-    "# Insert more docs, eg. a Milvus v2.2 document\n",
-    "zcp_index.insert_doc_url(\n",
-    "    url=\"https://publicdataset.zillizcloud.com/milvus_doc_22.md\",\n",
-    "    metadata={\"version\": \"2.2\"},\n",
+    "print(pipeline_ids)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "97d5c934",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "zcp_doc_index = ZillizCloudPipelineIndex.from_document_url(\n",
+    "    # a public or pre-signed url of a file stored on AWS S3 or Google Cloud Storage\n",
+    "    url=\"https://publicdataset.zillizcloud.com/milvus_doc.md\",\n",
+    "    pipeline_ids=pipeline_ids,\n",
+    "    api_key=ZILLIZ_TOKEN,\n",
+    "    metadata={\n",
+    "        \"user_id\": \"user-001\"\n",
+    "    },  # optional, which can be used for filtering\n",
     ")\n",
     "\n",
     "# # Delete docs by doc name\n",
-    "# zcp_index.delete_by_doc_name(doc_name=\"milvus_doc_22.md\")"
+    "# zcp_doc_index.delete_by_expression(expression=\"doc_name == 'milvus_doc_22.md'\")"
    ]
   },
   {
@@ -132,11 +143,56 @@
    "id": "d16a498e",
    "metadata": {},
    "source": [
-    "> It is optional to add metadata for each document. The metadata can be used to filter doc chunks during retrieval.\n",
+    "### From Document Nodes\n",
+    "\n",
+    "Zilliz Cloud Pipelines support text as data input as well. The following example prepares data with a sample document node."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "904b6cce",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "{'INGESTION': 'pipe-2bbab10f273a57eb987024', 'SEARCH': 'pipe-e1914a072ec5e6f83e446a', 'DELETION': 'pipe-72bbabf273a51af0b0c447'}\n"
+     ]
+    }
+   ],
+   "source": [
+    "from llama_index.core import Document\n",
+    "from llama_index.indices.managed.zilliz import ZillizCloudPipelineIndex\n",
     "\n",
-    "### From Raw Text\n",
+    "# prepare documents\n",
+    "documents = [Document(text=\"The number that is being searched for is ten.\")]\n",
     "\n",
-    "Coming soon."
+    "# create pipelines: skip this step if you have prepared valid pipelines\n",
+    "pipeline_ids = ZillizCloudPipelineIndex.create_pipelines(\n",
+    "    project_id=ZILLIZ_PROJECT_ID,\n",
+    "    cluster_id=ZILLIZ_CLUSTER_ID,\n",
+    "    api_key=ZILLIZ_TOKEN,\n",
+    "    data_type=\"text\",\n",
+    "    collection_name=\"zcp_llamalection_text\",  # change this value will customize collection name\n",
+    ")\n",
+    "print(pipeline_ids)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "2d06a204",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "zcp_text_index = ZillizCloudPipelineIndex.from_documents(\n",
+    "    # a public or pre-signed url of a file stored on AWS S3 or Google Cloud Storage\n",
+    "    documents=documents,\n",
+    "    pipeline_ids=pipeline_ids,\n",
+    "    api_key=ZILLIZ_TOKEN,\n",
+    ")"
    ]
   },
   {
@@ -171,18 +227,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters\n",
-    "\n",
-    "query_engine_milvus23 = zcp_index.as_query_engine(\n",
-    "    search_top_k=3,\n",
-    "    filters=MetadataFilters(\n",
-    "        filters=[\n",
-    "            ExactMatchFilter(key=\"version\", value=\"2.3\")\n",
-    "        ]  # version == \"2.3\"\n",
-    "    ),\n",
-    "    output_metadata=[\"version\"],\n",
-    "    llm=None,\n",
-    ")"
+    "query_engine = zcp_doc_index.as_query_engine(search_top_k=3)"
    ]
   },
   {
@@ -205,24 +250,16 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "[NodeWithScore(node=TextNode(id_='448986959334710210', embedding=None, metadata={'version': '2.3'}, excluded_embed_metadata_keys=[], excluded_llm_metadata_keys=[], relationships={}, text='# Delete Entities\\nThis topic describes how to delete entities in Milvus.  \\nMilvus supports deleting entities by primary key or complex boolean expressions. Deleting entities by primary key is much faster and lighter than deleting them by complex boolean expressions. This is because Milvus executes queries first when deleting data by complex boolean expressions.  \\nDeleted entities can still be retrieved immediately after the deletion if the consistency level is set lower than Strong.\\nEntities deleted beyond the pre-specified span of time for Time Travel cannot be retrieved again.\\nFrequent deletion operations will impact the system performance.  \\nBefore deleting entities by comlpex boolean expressions, make sure the collection has been loaded.\\nDeleting entities by complex boolean expressions is not an atomic operation. Therefore, if it fails halfway through, some data may still be deleted.\\nDeleting entities by complex boolean expressions is supported only when the consistency is set to Bounded. For details, see Consistency.\\\\\\n\\\\\\n# Delete Entities\\n## Prepare boolean expression\\nPrepare the boolean expression that filters the entities to delete.  \\nMilvus supports deleting entities by primary key or complex boolean expressions. For more information on expression rules and supported operators, see Boolean Expression Rules.\\\\\\n\\\\\\n# Delete Entities\\n## Prepare boolean expression\\n### Simple boolean expression\\nUse a simple expression to filter data with primary key values of 0 and 1:  \\n```python\\nexpr = \"book_id in [0,1]\"\\n```\\\\\\n\\\\\\n# Delete Entities\\n## Prepare boolean expression\\n### Complex boolean expression\\nTo filter entities that meet specific conditions, define complex boolean expressions.  \\nFilter entities whose word_count is greater than or equal to 11000:  \\n```python\\nexpr = \"word_count >= 11000\"\\n```  \\nFilter entities whose book_name is not Unknown:  \\n```python\\nexpr = \"book_name != Unknown\"\\n```  \\nFilter entities whose primary key values are greater than 5 and word_count is smaller than or equal to 9999:  \\n```python\\nexpr = \"book_id > 5 && word_count <= 9999\"\\n```', start_char_idx=None, end_char_idx=None, text_template='{metadata_str}\\n\\n{content}', metadata_template='{key}: {value}', metadata_seperator='\\n'), score=0.742070198059082), NodeWithScore(node=TextNode(id_='448986959334710211', embedding=None, metadata={'version': '2.3'}, excluded_embed_metadata_keys=[], excluded_llm_metadata_keys=[], relationships={}, text='# Delete Entities\\n## Delete entities\\nDelete the entities with the boolean expression you created. Milvus returns the ID list of the deleted entities.\\n```python\\nfrom pymilvus import Collection\\ncollection = Collection(\"book\")      # Get an existing collection.\\ncollection.delete(expr)\\n```  \\nParameter\\tDescription\\nexpr\\tBoolean expression that specifies the entities to delete.\\npartition_name (optional)\\tName of the partition to delete entities from.\\\\\\n\\\\\\n# Upsert Entities\\nThis topic describes how to upsert entities in Milvus.  \\nUpserting is a combination of insert and delete operations. In the context of a Milvus vector database, an upsert is a data-level operation that will overwrite an existing entity if a specified field already exists in a collection, and insert a new entity if the specified value doesn’t already exist.  \\nThe following example upserts 3,000 rows of randomly generated data as the example data. When performing upsert operations, it\\'s important to note that the operation may compromise performance. This is because the operation involves deleting data during execution.\\\\\\n\\\\\\n# Upsert Entities\\n## Prepare data\\nFirst, prepare the data to upsert. The type of data to upsert must match the schema of the collection, otherwise Milvus will raise an exception.  \\nMilvus supports default values for scalar fields, excluding a primary key field. This indicates that some fields can be left empty during data inserts or upserts. For more information, refer to Create a Collection.  \\n```python\\n# Generate data to upsert\\n\\nimport random\\nnb = 3000\\ndim = 8\\nvectors = [[random.random() for _ in range(dim)] for _ in range(nb)]\\ndata = [\\n[i for i in range(nb)],\\n[str(i) for i in range(nb)],\\n[i for i in range(10000, 10000+nb)],\\nvectors,\\n[str(\"dy\"*i) for i in range(nb)]\\n]\\n```', start_char_idx=None, end_char_idx=None, text_template='{metadata_str}\\n\\n{content}', metadata_template='{key}: {value}', metadata_seperator='\\n'), score=0.6409814953804016), NodeWithScore(node=TextNode(id_='448986959334710212', embedding=None, metadata={'version': '2.3'}, excluded_embed_metadata_keys=[], excluded_llm_metadata_keys=[], relationships={}, text='# Upsert Entities\\n## Upsert data\\nUpsert the data to the collection.  \\n```python\\nfrom pymilvus import Collection\\ncollection = Collection(\"book\") # Get an existing collection.\\nmr = collection.upsert(data)\\n```  \\nParameter\\tDescription\\ndata\\tData to upsert into Milvus.\\npartition_name (optional)\\tName of the partition to upsert data into.\\ntimeout (optional)\\tAn optional duration of time in seconds to allow for the RPC. If it is set to None, the client keeps waiting until the server responds or error occurs.\\nAfter upserting entities into a collection that has previously been indexed, you do not need to re-index the collection, as Milvus will automatically create an index for the newly upserted data. For more information, refer to Can indexes be created after inserting vectors?\\\\\\n\\\\\\n# Upsert Entities\\n## Flush data\\nWhen data is upserted into Milvus it is updated and inserted into segments. Segments have to reach a certain size to be sealed and indexed. Unsealed segments will be searched brute force. In order to avoid this with any remainder data, it is best to call flush(). The flush() call will seal any remaining segments and send them for indexing. It is important to only call this method at the end of an upsert session. Calling it too often will cause fragmented data that will need to be cleaned later on.\\\\\\n\\\\\\n# Upsert Entities\\n## Limits\\nUpdating primary key fields is not supported by upsert().\\nupsert() is not applicable and an error can occur if autoID is set to True for primary key fields.', start_char_idx=None, end_char_idx=None, text_template='{metadata_str}\\n\\n{content}', metadata_template='{key}: {value}', metadata_seperator='\\n'), score=0.5456743240356445)]\n"
+      "[NodeWithScore(node=TextNode(id_='449755997496672548', embedding=None, metadata={}, excluded_embed_metadata_keys=[], excluded_llm_metadata_keys=[], relationships={}, text='# Delete Entities\\nThis topic describes how to delete entities in Milvus.  \\nMilvus supports deleting entities by primary key or complex boolean expressions. Deleting entities by primary key is much faster and lighter than deleting them by complex boolean expressions. This is because Milvus executes queries first when deleting data by complex boolean expressions.  \\nDeleted entities can still be retrieved immediately after the deletion if the consistency level is set lower than Strong.\\nEntities deleted beyond the pre-specified span of time for Time Travel cannot be retrieved again.\\nFrequent deletion operations will impact the system performance.  \\nBefore deleting entities by comlpex boolean expressions, make sure the collection has been loaded.\\nDeleting entities by complex boolean expressions is not an atomic operation. Therefore, if it fails halfway through, some data may still be deleted.\\nDeleting entities by complex boolean expressions is supported only when the consistency is set to Bounded. For details, see Consistency.\\\\\\n\\\\\\n# Delete Entities\\n## Prepare boolean expression\\nPrepare the boolean expression that filters the entities to delete.  \\nMilvus supports deleting entities by primary key or complex boolean expressions. For more information on expression rules and supported operators, see Boolean Expression Rules.\\\\\\n\\\\\\n# Delete Entities\\n## Prepare boolean expression\\n### Simple boolean expression\\nUse a simple expression to filter data with primary key values of 0 and 1:  \\n```python\\nexpr = \"book_id in [0,1]\"\\n```\\\\\\n\\\\\\n# Delete Entities\\n## Prepare boolean expression\\n### Complex boolean expression\\nTo filter entities that meet specific conditions, define complex boolean expressions.  \\nFilter entities whose word_count is greater than or equal to 11000:  \\n```python\\nexpr = \"word_count >= 11000\"\\n```  \\nFilter entities whose book_name is not Unknown:  \\n```python\\nexpr = \"book_name != Unknown\"\\n```  \\nFilter entities whose primary key values are greater than 5 and word_count is smaller than or equal to 9999:  \\n```python\\nexpr = \"book_id > 5 && word_count <= 9999\"\\n```', start_char_idx=None, end_char_idx=None, text_template='{metadata_str}\\n\\n{content}', metadata_template='{key}: {value}', metadata_seperator='\\n'), score=0.742070198059082), NodeWithScore(node=TextNode(id_='449755997496672549', embedding=None, metadata={}, excluded_embed_metadata_keys=[], excluded_llm_metadata_keys=[], relationships={}, text='# Delete Entities\\n## Delete entities\\nDelete the entities with the boolean expression you created. Milvus returns the ID list of the deleted entities.\\n```python\\nfrom pymilvus import Collection\\ncollection = Collection(\"book\")      # Get an existing collection.\\ncollection.delete(expr)\\n```  \\nParameter\\tDescription\\nexpr\\tBoolean expression that specifies the entities to delete.\\npartition_name (optional)\\tName of the partition to delete entities from.\\\\\\n\\\\\\n# Upsert Entities\\nThis topic describes how to upsert entities in Milvus.  \\nUpserting is a combination of insert and delete operations. In the context of a Milvus vector database, an upsert is a data-level operation that will overwrite an existing entity if a specified field already exists in a collection, and insert a new entity if the specified value doesn’t already exist.  \\nThe following example upserts 3,000 rows of randomly generated data as the example data. When performing upsert operations, it\\'s important to note that the operation may compromise performance. This is because the operation involves deleting data during execution.\\\\\\n\\\\\\n# Upsert Entities\\n## Prepare data\\nFirst, prepare the data to upsert. The type of data to upsert must match the schema of the collection, otherwise Milvus will raise an exception.  \\nMilvus supports default values for scalar fields, excluding a primary key field. This indicates that some fields can be left empty during data inserts or upserts. For more information, refer to Create a Collection.  \\n```python\\n# Generate data to upsert\\n\\nimport random\\nnb = 3000\\ndim = 8\\nvectors = [[random.random() for _ in range(dim)] for _ in range(nb)]\\ndata = [\\n[i for i in range(nb)],\\n[str(i) for i in range(nb)],\\n[i for i in range(10000, 10000+nb)],\\nvectors,\\n[str(\"dy\"*i) for i in range(nb)]\\n]\\n```', start_char_idx=None, end_char_idx=None, text_template='{metadata_str}\\n\\n{content}', metadata_template='{key}: {value}', metadata_seperator='\\n'), score=0.6409814953804016), NodeWithScore(node=TextNode(id_='449755997496672550', embedding=None, metadata={}, excluded_embed_metadata_keys=[], excluded_llm_metadata_keys=[], relationships={}, text='# Upsert Entities\\n## Upsert data\\nUpsert the data to the collection.  \\n```python\\nfrom pymilvus import Collection\\ncollection = Collection(\"book\") # Get an existing collection.\\nmr = collection.upsert(data)\\n```  \\nParameter\\tDescription\\ndata\\tData to upsert into Milvus.\\npartition_name (optional)\\tName of the partition to upsert data into.\\ntimeout (optional)\\tAn optional duration of time in seconds to allow for the RPC. If it is set to None, the client keeps waiting until the server responds or error occurs.\\nAfter upserting entities into a collection that has previously been indexed, you do not need to re-index the collection, as Milvus will automatically create an index for the newly upserted data. For more information, refer to Can indexes be created after inserting vectors?\\\\\\n\\\\\\n# Upsert Entities\\n## Flush data\\nWhen data is upserted into Milvus it is updated and inserted into segments. Segments have to reach a certain size to be sealed and indexed. Unsealed segments will be searched brute force. In order to avoid this with any remainder data, it is best to call flush(). The flush() call will seal any remaining segments and send them for indexing. It is important to only call this method at the end of an upsert session. Calling it too often will cause fragmented data that will need to be cleaned later on.\\\\\\n\\\\\\n# Upsert Entities\\n## Limits\\nUpdating primary key fields is not supported by upsert().\\nupsert() is not applicable and an error can occur if autoID is set to True for primary key fields.', start_char_idx=None, end_char_idx=None, text_template='{metadata_str}\\n\\n{content}', metadata_template='{key}: {value}', metadata_seperator='\\n'), score=0.5456743240356445)]\n"
      ]
     }
    ],
    "source": [
     "question = \"Can users delete entities by filtering non-primary fields?\"\n",
-    "retrieved_nodes = query_engine_milvus23.retrieve(question)\n",
+    "retrieved_nodes = query_engine.retrieve(question)\n",
     "print(retrieved_nodes)"
    ]
   },
-  {
-   "cell_type": "markdown",
-   "id": "d47d322d",
-   "metadata": {},
-   "source": [
-    "> The query engine with filters retrieves only text nodes with \\\"version 2.3\\\" tag."
-   ]
-  },
   {
    "cell_type": "markdown",
    "id": "e91c5896",
@@ -246,96 +283,16 @@
     }
    ],
    "source": [
-    "response = query_engine_milvus23.query(question)\n",
+    "response = query_engine.query(question)\n",
     "print(response.response)"
    ]
   },
-  {
-   "cell_type": "markdown",
-   "id": "4c3c239b",
-   "metadata": {},
-   "source": [
-    "## Advanced\n",
-    "\n",
-    "You are able to get the managed index without running data ingestion. In order to get ready with Zilliz Cloud Pipelines, you need to provide either pipeline ids or collection name:\n",
-    "\n",
-    "- pipeline_ids: The dictionary of pipeline ids for INGESTION, SEARCH, DELETION. Defaults to None. For example: {\"INGESTION\": \"pipe-xx1\", \"SEARCH\": \"pipe-xx2\", \"DELETION\": \"pipe-xx3\"}.\n",
-    "- collection_name: The collection name, defaults to 'zcp_llamalection'. If no pipeline_ids is given, the index will try to get pipelines with collection_name."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "857746c4",
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "No available pipelines. Please create pipelines first.\n"
-     ]
-    }
-   ],
-   "source": [
-    "from llama_index.indices.managed.zilliz import ZillizCloudPipelineIndex\n",
-    "\n",
-    "\n",
-    "advanced_zcp_index = ZillizCloudPipelineIndex(\n",
-    "    project_id=ZILLIZ_PROJECT_ID,\n",
-    "    cluster_id=ZILLIZ_CLUSTER_ID,\n",
-    "    token=ZILLIZ_TOKEN,\n",
-    "    collection_name=\"zcp_llamalection_advanced\",\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "91f99675",
-   "metadata": {},
-   "source": [
-    "### Customize Pipelines\n",
-    "\n",
-    "If no pipelines are provided or found, then you can manually create and customize pipelines with the following **optional** parameters:\n",
-    "\n",
-    "- **metadata_schema**: A dictionary of metadata schema with field name as key and data type as value. For example, {\"user_id\": \"VarChar\"}.\n",
-    "- **chunkSize**: An integer of chunk size using token as unit. If no chunk size is specified, then Zilliz Cloud Pipeline will use a built-in default chunk size (500 tokens) to split documents.\n",
-    "- **(others)**: Refer to [Zilliz Cloud Pipelines](https://docs.zilliz.com/docs/pipelines) for more available pipeline parameters."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "51079a30",
-   "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "{'INGESTION': 'pipe-9b58a7a79b25ae31467fa4',\n",
-       " 'SEARCH': 'pipe-ea117c9922961a565929eb',\n",
-       " 'DELETION': 'pipe-26d76179b259b67e641b33'}"
-      ]
-     },
-     "execution_count": null,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "advanced_zcp_index.create_pipelines(\n",
-    "    metadata_schema={\"user_id\": \"VarChar\"},\n",
-    "    chunkSize=350,\n",
-    "    # other pipeline params\n",
-    ")"
-   ]
-  },
   {
    "cell_type": "markdown",
    "id": "a6efa3b9",
    "metadata": {},
    "source": [
-    "### Multi-Tenancy\n",
+    "## Multi-Tenancy\n",
     "\n",
     "With the tenant-specific value (eg. user id) as metadata, the managed index is able to achieve multi-tenancy by applying metadata filters.\n",
     "\n",
@@ -351,7 +308,7 @@
     {
      "data": {
       "text/plain": [
-       "{'token_usage': 1247, 'doc_name': 'milvus_doc.md', 'num_chunks': 4}"
+       "{'token_usage': 984, 'doc_name': 'milvus_doc_22.md', 'num_chunks': 3}"
       ]
      },
      "execution_count": null,
@@ -360,9 +317,9 @@
     }
    ],
    "source": [
-    "advanced_zcp_index.insert_doc_url(\n",
-    "    url=\"https://publicdataset.zillizcloud.com/milvus_doc.md\",\n",
-    "    metadata={\"user_id\": \"user_001\"},\n",
+    "zcp_doc_index._insert_doc_url(\n",
+    "    url=\"https://publicdataset.zillizcloud.com/milvus_doc_22.md\",\n",
+    "    metadata={\"user_id\": \"user_002\"},\n",
     ")"
    ]
   },
@@ -383,10 +340,10 @@
    "source": [
     "from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters\n",
     "\n",
-    "query_engine_for_user_001 = advanced_zcp_index.as_query_engine(\n",
+    "query_engine_for_user_002 = zcp_doc_index.as_query_engine(\n",
     "    search_top_k=3,\n",
     "    filters=MetadataFilters(\n",
-    "        filters=[ExactMatchFilter(key=\"user_id\", value=\"user_001\")]\n",
+    "        filters=[ExactMatchFilter(key=\"user_id\", value=\"user_002\")]\n",
     "    ),\n",
     "    output_metadata=[\"user_id\"],  # optional, display user_id in outputs\n",
     ")"
@@ -410,15 +367,15 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "Yes, you can delete entities by filtering non-primary fields using complex boolean expressions in Milvus.\n"
+      "Milvus only supports deleting entities by primary key filtered with boolean expressions. Other operators can be used only in query or scalar filtering in vector search.\n"
      ]
     }
    ],
    "source": [
     "question = \"Can I delete entities by filtering non-primary fields?\"\n",
     "\n",
-    "# search_results = query_engine_for_user_001.retrieve(question)\n",
-    "response = query_engine_for_user_001.query(question)\n",
+    "# search_results = query_engine_for_user_002.retrieve(question)\n",
+    "response = query_engine_for_user_002.query(question)\n",
     "print(response.response)"
    ]
   }
diff --git a/docs/docs/examples/vector_stores/MilvusIndexDemo.ipynb b/docs/docs/examples/vector_stores/MilvusIndexDemo.ipynb
index 66d6c69b440feed1b480dbcac58b8e8e4b3bace6..f6146bd26ab6bb595d3db63b632ba8be69839477 100644
--- a/docs/docs/examples/vector_stores/MilvusIndexDemo.ipynb
+++ b/docs/docs/examples/vector_stores/MilvusIndexDemo.ipynb
@@ -94,7 +94,7 @@
    "source": [
     "import openai\n",
     "\n",
-    "openai.api_key = \"sk-\""
+    "openai.api_key = \"sk-***********\""
    ]
   },
   {
@@ -137,7 +137,7 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "Document ID: 4d28b248-a306-4a42-9ace-8fd88df7e484\n"
+      "Document ID: 11c3a6fe-799e-4e40-8122-2339936c2722\n"
      ]
     }
    ],
@@ -157,7 +157,7 @@
     "### Create an index across the data\n",
     "Now that we have a document, we can can create an index and insert the document. For the index we will use a GPTMilvusIndex. GPTMilvusIndex takes in a few arguments:\n",
     "\n",
-    "- `uri (str, optional)`: The URI to connect to, comes in the form of \"https://address:port\" if using Milvus or Zilliz Cloud service, or \"path/to/local/milvus.db\" is using a lite local Milvus. Defaults to \"http://localhost:19530\".\n",
+    "- `uri (str, optional)`: The URI to connect to, comes in the form of \"https://address:port\" if using Milvus or Zilliz Cloud service, or \"path/to/local/milvus.db\" if using a lite local Milvus. Defaults to \"./milvus_llamaindex.db\".\n",
     "- `token (str, optional)`: The token for log in. Empty if not using rbac, if using rbac it will most likely be \"username:password\". Defaults to \"\".\n",
     "- `collection_name (str, optional)`: The name of the collection where data will be stored. Defaults to \"llamalection\".\n",
     "- `dim (int, optional)`: The dimension of the embeddings. If it is not provided, collection creation will be done on first insert. Defaults to None.\n",
@@ -168,7 +168,9 @@
     "- `overwrite (bool, optional)`: Whether to overwrite existing collection with same name. Defaults to False.\n",
     "- `text_key (str, optional)`: What key text is stored in in the passed collection. Used when bringing your own collection. Defaults to None.\n",
     "- `index_config (dict, optional)`: The configuration used for building the Milvus index. Defaults to None.\n",
-    "- `search_config (dict, optional)`: The configuration used for searching the Milvus index. Note that this must be compatible with the index type specified by index_config. Defaults to None."
+    "- `search_config (dict, optional)`: The configuration used for searching the Milvus index. Note that this must be compatible with the index type specified by index_config. Defaults to None.\n",
+    "\n",
+    "> Please note that **Milvus Lite** requires `pymilvus>=2.4.2`."
    ]
   },
   {
@@ -211,13 +213,12 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "The author learned about programming on early computers like the IBM 1401, where programs were typed\n",
-      "on punch cards, loaded into memory, and run to produce output. Later, with the advent of\n",
-      "microcomputers like the TRS-80, the author was able to have a computer directly in front of them,\n",
-      "responding to keystrokes in real-time. This shift in technology allowed the author to write simple\n",
-      "games, predictive programs, and even a word processor. Additionally, the author explored philosophy\n",
-      "in college but found it lacking in ultimate truths, leading to a switch to studying AI, inspired by\n",
-      "works like Heinlein's \"The Moon is a Harsh Mistress\" and seeing SHRDLU in a PBS documentary.\n"
+      "The author learned about programming on early computers like the IBM 1401 using Fortran, the\n",
+      "limitations of early computing technology, the transition to microcomputers, and the excitement of\n",
+      "having a personal computer like the TRS-80. Additionally, the author explored different academic\n",
+      "paths, initially planning to study philosophy but eventually switching to AI due to a lack of\n",
+      "interest in philosophy courses. Later on, the author pursued art education, attending RISD and the\n",
+      "Accademia di Belli Arti in Florence, where they encountered a different approach to teaching art.\n"
      ]
     }
    ],
@@ -237,9 +238,8 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "The author experienced a difficult moment when their mother had a stroke and was put in a nursing\n",
-      "home. The stroke destroyed her balance, and the author and their sister were determined to help her\n",
-      "get out of the nursing home and back to her house.\n"
+      "Dealing with the stress and challenges related to managing Hacker News was a difficult moment for\n",
+      "the author.\n"
      ]
     }
    ],
@@ -267,12 +267,14 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "Res: The author is the one who wrote the text or created the content.\n"
+      "Res: The author is the individual who created the content or work in question.\n"
      ]
     }
    ],
    "source": [
-    "vector_store = MilvusVectorStore(dim=1536, overwrite=True)\n",
+    "vector_store = MilvusVectorStore(\n",
+    "    uri=\"./milvus_demo.db\", dim=1536, overwrite=True\n",
+    ")\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
     "index = VectorStoreIndex.from_documents(\n",
     "    [Document(text=\"The number that is being searched for is ten.\")],\n",
@@ -309,7 +311,7 @@
    "source": [
     "del index, vector_store, storage_context, query_engine\n",
     "\n",
-    "vector_store = MilvusVectorStore(overwrite=False)\n",
+    "vector_store = MilvusVectorStore(uri=\"./milvus_demo.db\", overwrite=False)\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
     "index = VectorStoreIndex.from_documents(\n",
     "    documents, storage_context=storage_context\n",
diff --git a/llama-index-integrations/indices/llama-index-indices-managed-zilliz/llama_index/indices/managed/zilliz/base.py b/llama-index-integrations/indices/llama-index-indices-managed-zilliz/llama_index/indices/managed/zilliz/base.py
index 10df554dda5e50bc2d98131a72eacbe3cb60bc25..145724bddd04ad4a92434248691903003eb22a65 100644
--- a/llama-index-integrations/indices/llama-index-indices-managed-zilliz/llama_index/indices/managed/zilliz/base.py
+++ b/llama-index-integrations/indices/llama-index-indices-managed-zilliz/llama_index/indices/managed/zilliz/base.py
@@ -6,20 +6,13 @@ interfaces a managed service.
 """
 
 import logging
-from typing import Any, Dict, List, Optional, Sequence, Type
+from typing import Any, Dict, Optional, Sequence, Type
 
 import requests
 from llama_index.core.base.base_retriever import BaseRetriever
-from llama_index.core.callbacks.base import CallbackManager
 from llama_index.core.data_structs.data_structs import IndexDict, IndexStructType
-from llama_index.core.indices.managed.base import (
-    BaseManagedIndex,
-    IndexType,
-    TransformComponent,
-)
+from llama_index.core.indices.managed.base import BaseManagedIndex, IndexType
 from llama_index.core.schema import BaseNode, Document
-from llama_index.core.service_context import ServiceContext
-from llama_index.core.storage.storage_context import StorageContext
 
 logger = logging.getLogger(__name__)
 
@@ -56,43 +49,43 @@ class ZillizCloudPipelineIndex(BaseManagedIndex):
     The Zilliz Cloud Pipeline's index implements a managed index that uses Zilliz Cloud Pipelines as the backend.
 
     Args:
-        project_id (str): Zilliz Cloud's project ID.
-        cluster_id (str): Zilliz Cloud's cluster ID.
-        token (str): Zilliz Cloud's token.
+        pipeline_ids (dict): A dictionary of pipeline ids for INGESTION, SEARCH, DELETION.
+        api_key (str): Zilliz Cloud's API key.
         cloud_region (str='gcp-us-west1'): The region of Zilliz Cloud's cluster. Defaults to 'gcp-us-west1'.
-        pipeline_ids (dict=None): A dictionary of pipeline ids for INGESTION, SEARCH, DELETION. Defaults to None.
-        collection_name (str='zcp_llamalection'): A collection name, defaults to 'zcp_llamalection'. If no pipeline_ids is given, get pipelines with collection_name.
         show_progress (bool): Whether to show tqdm progress bars. Defaults to False.
     """
 
     def __init__(
         self,
-        project_id: str,
-        cluster_id: str,
-        token: str,
+        pipeline_ids: Dict,
+        api_key: str = None,
         cloud_region: str = "gcp-us-west1",
-        pipeline_ids: Optional[Dict] = None,
-        collection_name: str = "zcp_llamalection",
         show_progress: bool = False,
         **kwargs: Any,
     ) -> None:
-        self.project_id = project_id
-        self.cluster_id = cluster_id
-        self.token = token
+        self.token = api_key
         self.cloud_region = cloud_region
-        self.collection_name = collection_name
         self.domain = (
             f"https://controller.api.{cloud_region}.zillizcloud.com/v1/pipelines"
         )
         self.headers = {
-            "Authorization": f"Bearer {token}",
+            "Authorization": f"Bearer {self.token}",
             "Accept": "application/json",
             "Content-Type": "application/json",
         }
-        self.pipeline_ids = pipeline_ids or self.get_pipeline_ids()
+        self.pipeline_ids = pipeline_ids or {}
+
+        if len(self.pipeline_ids) == 0:
+            print(
+                "Pipeline ids are required. You can use the classmethod `ZillizCloudPipelineIndex.create_pipelines` to create pipelines and get pipeline ids."
+            )
+        else:
+            assert set(PIPELINE_TYPES).issubset(
+                set(self.pipeline_ids.keys())
+            ), f"Missing pipeline(s): {set(PIPELINE_TYPES) - set(self.pipeline_ids.keys())}"
 
         index_struct = ZillizCloudPipelineIndexStruct(
-            index_id=collection_name,
+            index_id="-".join(pipeline_ids.values()),
             summary="Zilliz Cloud Pipeline Index",
         )
 
@@ -100,36 +93,32 @@ class ZillizCloudPipelineIndex(BaseManagedIndex):
             show_progress=show_progress, index_struct=index_struct, **kwargs
         )
 
-        if len(self.pipeline_ids) == 0:
-            print("No available pipelines. Please create pipelines first.")
-        else:
-            assert set(PIPELINE_TYPES).issubset(
-                set(self.pipeline_ids.keys())
-            ), f"Missing pipeline(s): {set(PIPELINE_TYPES) - set(self.pipeline_ids.keys())}"
-
-    def insert_doc_url(self, url: str, metadata: Optional[Dict] = None) -> None:
-        """Insert doc from url with an initialized index.
+    def _insert_doc_url(self, url: str, metadata: Optional[Dict] = None) -> None:
+        """Insert doc from url with an initialized index using doc pipelines."""
+        ingest_pipe_id = self.pipeline_ids.get("INGESTION")
+        ingestion_url = f"{self.domain}/{ingest_pipe_id}/run"
 
+        if metadata is None:
+            metadata = {}
+        params = {"data": {"doc_url": url}}
+        params["data"].update(metadata)
+        response = requests.post(ingestion_url, headers=self.headers, json=params)
+        if response.status_code != 200:
+            raise RuntimeError(response.text)
+        response_dict = response.json()
+        if response_dict["code"] != 200:
+            raise RuntimeError(response_dict)
+        return response_dict["data"]
 
-        Example:
-        >>> from llama_index.indices import ZillizCloudPipelineIndex
-        >>> index = ZillizCloudPipelineIndex(
-        >>>     project_id='YOUR_ZILLIZ_CLOUD_PROJECT_ID',
-        >>>     cluster_id='YOUR_ZILLIZ_CLOUD_CLUSTER_ID',
-        >>>     token='YOUR_ZILLIZ_CLOUD_API_KEY',
-        >>>     collection_name='your_collection_name'
-        >>> )
-        >>> index.insert_doc_url(
-        >>>     url='https://oss_bucket.test_doc.ext',
-        >>>     metadata={'year': 2023, 'author': 'zilliz'}  # only required when the Index was created with metadata schemas
-        >>> )
-        """
+    def _insert(self, nodes: Sequence[BaseNode], metadata: Optional[Dict] = None):
+        """Insert doc from text nodes with an initialized index using text pipelines."""
         ingest_pipe_id = self.pipeline_ids.get("INGESTION")
         ingestion_url = f"{self.domain}/{ingest_pipe_id}/run"
 
+        text_list = [n.get_content() for n in nodes]
         if metadata is None:
             metadata = {}
-        params = {"data": {"doc_url": url}}
+        params = {"data": {"text_list": text_list}}
         params["data"].update(metadata)
         response = requests.post(ingestion_url, headers=self.headers, json=params)
         if response.status_code != 200:
@@ -139,7 +128,22 @@ class ZillizCloudPipelineIndex(BaseManagedIndex):
             raise RuntimeError(response_dict)
         return response_dict["data"]
 
-    def delete_by_doc_name(self, doc_name: str) -> int:
+    def delete_by_expression(self, expression: str):
+        """Delete data by Milvus boolean expression if using the corresponding deletion pipeline."""
+        deletion_pipe_id = self.pipeline_ids.get("DELETION")
+        deletion_url = f"{self.domain}/{deletion_pipe_id}/run"
+
+        params = {"data": {"expression": expression}}
+        response = requests.post(deletion_url, headers=self.headers, json=params)
+        if response.status_code != 200:
+            raise RuntimeError(response.text)
+        response_dict = response.json()
+        if response_dict["code"] != 200:
+            raise RuntimeError(response_dict)
+        return response_dict["data"]
+
+    def delete_by_doc_name(self, doc_name: str):
+        """Delete data by doc name if using the corresponding deletion pipeline."""
         deletion_pipe_id = self.pipeline_ids.get("DELETION")
         deletion_url = f"{self.domain}/{deletion_pipe_id}/run"
 
@@ -150,10 +154,19 @@ class ZillizCloudPipelineIndex(BaseManagedIndex):
         response_dict = response.json()
         if response_dict["code"] != 200:
             raise RuntimeError(response_dict)
-        try:
-            return response_dict["data"]
-        except Exception as e:
-            raise RuntimeError(f"Run Zilliz Cloud Pipelines failed: {e}")
+        return response_dict["data"]
+
+    def delete_ref_doc(
+        self, ref_doc_id: str, delete_from_docstore: bool = False, **delete_kwargs: Any
+    ) -> None:
+        raise NotImplementedError(
+            "Deleting a reference document is not yet supported with Zilliz Cloud Pipeline."
+        )
+
+    def update_ref_doc(self, document: Document, **update_kwargs: Any) -> None:
+        raise NotImplementedError(
+            "Updating referenced document is not yet supported with Zilliz Cloud Pipeline."
+        )
 
     def as_retriever(self, **kwargs: Any) -> BaseRetriever:
         """Return a retriever."""
@@ -163,59 +176,37 @@ class ZillizCloudPipelineIndex(BaseManagedIndex):
 
         return ZillizCloudPipelineRetriever(self, **kwargs)
 
-    def get_pipeline_ids(self) -> dict:
-        """Get pipeline ids."""
-        url = f"{self.domain}?projectId={self.project_id}"
-
-        # Get pipelines
-        response = requests.get(url, headers=self.headers)
-        if response.status_code != 200:
-            raise RuntimeError(response.text)
-        response_dict = response.json()
-        if response_dict["code"] != 200:
-            raise RuntimeError(response_dict)
-        data = response_dict["data"]
-        pipeline_ids = {}
-        for pipe_info in data:
-            pipe_id = pipe_info["pipelineId"]
-            pipe_type = pipe_info["type"]
-
-            if pipe_type == "SEARCH":
-                pipe_clusters = [x["clusterId"] for x in pipe_info["functions"]]
-                pipe_collections = [x["collectionName"] for x in pipe_info["functions"]]
-                if (
-                    self.cluster_id in pipe_clusters
-                    and self.collection_name in pipe_collections
-                ):
-                    pipeline_ids[pipe_type] = pipe_id
-            elif pipe_type == "INGESTION":
-                if (
-                    self.cluster_id == pipe_info["clusterId"]
-                    and self.collection_name == pipe_info["newCollectionName"]
-                ):
-                    pipeline_ids[pipe_type] = pipe_id
-            elif pipe_type == "DELETION":
-                if (
-                    self.cluster_id == pipe_info["clusterId"]
-                    and self.collection_name == pipe_info["collectionName"]
-                ):
-                    pipeline_ids[pipe_type] = pipe_id
-        return pipeline_ids
-
+    @staticmethod
     def create_pipelines(
-        self, metadata_schema: Optional[Dict] = None, **kwargs: str
+        project_id: str,
+        cluster_id: str,
+        cloud_region: str = "gcp-us-west1",
+        api_key: str = None,
+        collection_name: str = "zcp_llamalection",
+        data_type: str = "text",
+        metadata_schema: Optional[Dict] = None,
+        **kwargs: Any,
     ) -> dict:
         """Create INGESTION, SEARCH, DELETION pipelines using self.collection_name.
 
         Args:
+            project_id (str): Zilliz Cloud's project ID.
+            cluster_id (str): Zilliz Cloud's cluster ID.
+            api_key (str=None): Zilliz Cloud's API key. Defaults to None.
+            cloud_region (str='gcp-us-west1'): The region of Zilliz Cloud's cluster. Defaults to 'gcp-us-west1'.
+            collection_name (str="zcp_llamalection"): A collection name, defaults to 'zcp_llamalection'.
+            data_type (str="text"): The data type of pipelines, defaults to "text". Currently only "text" or "doc" are supported.
             metadata_schema (Dict=None): A dictionary of metadata schema, defaults to None. Use metadata name as key and the corresponding data type as value: {'field_name': 'field_type'}.
                 Only support the following values as the field type: 'Bool', 'Int8', 'Int16', 'Int32', 'Int64', 'Float', 'Double', 'VarChar'.
-            kwargs: optional parameters to create ingestion pipeline
-                - chunkSize: An integer within range [20, 500] to customize chunk size.
+            kwargs: optional function parameters to create ingestion & search pipelines.
                 - language: The language of documents. Available options: "ENGLISH", "CHINESE".
+                - embedding: The embedding service used in both ingestion & search pipeline.
+                - reranker: The reranker service used in search function.
+                - chunkSize: The chunk size to split a document. Only for doc data.
+                - splitBy: The separators to chunking a document. Only for doc data.
 
         Returns:
-            A dictionary of pipeline ids for INGESTION, SEARCH, and DELETION pipelines.
+            The pipeline ids of created pipelines.
 
         Example:
             >>> from llama_index.indices import ZillizCloudPipelineIndex
@@ -229,20 +220,29 @@ class ZillizCloudPipelineIndex(BaseManagedIndex):
             >>>     metadata_schema={'year': 'Int32', 'author': 'VarChar'}  # optional, defaults to None
             >>> )
         """
-        if len(self.pipeline_ids) > 0:
-            raise RuntimeError(
-                f"Pipelines already exist for collection {self.collection_name}: {self.pipeline_ids}"
-            )
+        if data_type == "text":
+            ingest_action = "INDEX_TEXT"
+            search_action = "SEARCH_TEXT"
+        elif data_type == "doc":
+            ingest_action = "INDEX_DOC"
+            search_action = "SEARCH_DOC_CHUNK"
+        else:
+            raise Exception("Only text or doc is supported as the data type.")
 
         params_dict = {}
-        index_doc_func = {
-            "name": "index_my_doc",
-            "action": "INDEX_DOC",
-            "inputField": "doc_url",
-            "language": "ENGLISH",
+        additional_params = kwargs or {}
+
+        language = additional_params.pop("language", "ENGLISH")
+        embedding = additional_params.pop("embedding", "zilliz/bge-base-en-v1.5")
+        reranker = additional_params.pop("reranker", None)
+        index_func = {
+            "name": "llamaindex_index",
+            "action": ingest_action,
+            "language": language,
+            "embedding": embedding,
         }
-        index_doc_func.update(kwargs)
-        functions = [index_doc_func]
+        index_func.update(additional_params)
+        ingest_functions = [index_func]
         if metadata_schema:
             for k, v in metadata_schema.items():
                 preserve_func = {
@@ -252,67 +252,71 @@ class ZillizCloudPipelineIndex(BaseManagedIndex):
                     "outputField": k,
                     "fieldType": v,
                 }
-                functions.append(preserve_func)
+                ingest_functions.append(preserve_func)
         params_dict["INGESTION"] = {
-            "name": f"{self.collection_name}_ingestion",
-            "projectId": self.project_id,
-            "clusterId": self.cluster_id,
-            "newCollectionName": self.collection_name,
+            "name": f"{collection_name}_ingestion",
+            "projectId": project_id,
+            "clusterId": cluster_id,
+            "collectionName": collection_name,
             "type": "INGESTION",
-            "functions": functions,
+            "functions": ingest_functions,
         }
 
+        search_function = {
+            "name": "llamaindex_search",
+            "action": search_action,
+            "clusterId": cluster_id,
+            "collectionName": collection_name,
+            "embedding": embedding,
+        }
+        if reranker:
+            search_function["reranker"] = reranker
         params_dict["SEARCH"] = {
-            "name": f"{self.collection_name}_search",
-            "projectId": self.project_id,
+            "name": f"{collection_name}_search",
+            "projectId": project_id,
             "type": "SEARCH",
-            "functions": [
-                {
-                    "name": "search_chunk_text",
-                    "action": "SEARCH_DOC_CHUNK",
-                    "inputField": "query_text",
-                    "clusterId": self.cluster_id,
-                    "collectionName": self.collection_name,
-                }
-            ],
+            "functions": [search_function],
         }
 
         params_dict["DELETION"] = {
-            "name": f"{self.collection_name}_deletion",
+            "name": f"{collection_name}_deletion",
             "type": "DELETION",
             "functions": [
                 {
-                    "name": "purge_chunks_by_doc_name",
-                    "action": "PURGE_DOC_INDEX",
-                    "inputField": "doc_name",
+                    "name": "purge_by_expression",
+                    "action": "PURGE_BY_EXPRESSION",
                 }
             ],
-            "projectId": self.project_id,
-            "clusterId": self.cluster_id,
-            "collectionName": self.collection_name,
+            "projectId": project_id,
+            "clusterId": cluster_id,
+            "collectionName": collection_name,
         }
 
+        domain = f"https://controller.api.{cloud_region}.zillizcloud.com/v1/pipelines"
+        headers = {
+            "Authorization": f"Bearer {api_key}",
+            "Accept": "application/json",
+            "Content-Type": "application/json",
+        }
+        pipeline_ids = {}
+
         for k, v in params_dict.items():
-            response = requests.post(self.domain, headers=self.headers, json=v)
+            response = requests.post(domain, headers=headers, json=v)
             if response.status_code != 200:
                 raise RuntimeError(response.text)
             response_dict = response.json()
             if response_dict["code"] != 200:
                 raise RuntimeError(response_dict)
-            self.pipeline_ids[k] = response_dict["data"]["pipelineId"]
+            pipeline_ids[k] = response_dict["data"]["pipelineId"]
 
-        return self.pipeline_ids
+        return pipeline_ids
 
     @classmethod
     def from_document_url(
         cls,
         url: str,
-        project_id: str,
-        cluster_id: str,
-        token: str,
-        cloud_region: str = "gcp-us-west1",
         pipeline_ids: Optional[Dict] = None,
-        collection_name: str = "zcp_llamalection",
+        api_key: Optional[str] = None,
         metadata: Optional[Dict] = None,
         show_progress: bool = False,
         **kwargs: Any,
@@ -321,12 +325,8 @@ class ZillizCloudPipelineIndex(BaseManagedIndex):
 
         Args:
             url: a gcs or s3 signed url.
-            project_id (str): Zilliz Cloud's project ID.
-            cluster_id (str): Zilliz Cloud's cluster ID.
-            token (str): Zilliz Cloud's token.
-            cloud_region (str='gcp-us-west1'): The region of Zilliz Cloud's cluster. Defaults to 'gcp-us-west1'.
             pipeline_ids (dict=None): A dictionary of pipeline ids for INGESTION, SEARCH, DELETION. Defaults to None.
-            collection_name (str='zcp_llamalection'): A collection name, defaults to 'zcp_llamalection'. If no pipeline_ids is given, get or create pipelines with collection_name.
+            api_key (str): Zilliz Cloud's API Key.
             metadata (Dict=None): A dictionary of metadata. Defaults to None. The key must be string and the value must be a string, float, integer, or boolean.
             show_progress (bool): Whether to show tqdm progress bars. Defaults to False.
 
@@ -335,73 +335,86 @@ class ZillizCloudPipelineIndex(BaseManagedIndex):
 
         Example:
             >>> from llama_index.indices import ZillizCloudPipelineIndex
-            >>> index = ZillizCloudPipelineIndex.from_document_url(
+            >>> api_key = "{YOUR_ZILLIZ_CLOUD_API_KEY}"
+            >>> pipeline_ids = ZillizCloudPipelineIndex.create_pipelines(
+            >>>     project_id="{YOUR_ZILLIZ_PROJECT_ID}",
+            >>>     cluster_id="{YOUR_ZILLIZ_CLUSTER_ID}",
+            >>>     api_key=api_key,
+            >>>     data_type="doc"
+            >>> )
+            >>> ZillizCloudPipelineIndex.from_document_url(
             >>>     url='https://oss_bucket.test_doc.ext',
-            >>>     project_id='YOUR_ZILLIZ_CLOUD_PROJECT_ID',
-            >>>     cluster_id='YOUR_ZILLIZ_CLOUD_CLUSTER_ID',
-            >>>     token='YOUR_ZILLIZ_CLOUD_API_KEY',
-            >>>     collection_name='your_collection_name'
+            >>>     pipeline_ids=pipeline_ids,
+            >>>     api_key=api_key
             >>> )
         """
         metadata = metadata or {}
         index = cls(
-            project_id=project_id,
-            cluster_id=cluster_id,
-            token=token,
-            cloud_region=cloud_region,
             pipeline_ids=pipeline_ids,
-            collection_name=collection_name,
+            api_key=api_key,
             show_progress=show_progress,
             **kwargs,
         )
-        if len(index.pipeline_ids) == 0:
-            index.pipeline_ids = index.create_pipelines(
-                metadata_schema={k: get_zcp_type(v) for k, v in metadata.items()}
-            )
-            print("Pipelines are automatically created.")
 
         try:
-            index.insert_doc_url(url=url, metadata=metadata)
+            index._insert_doc_url(url=url, metadata=metadata)
         except Exception as e:
             logger.error(
                 "Failed to build managed index given document url (%s):\n%s", url, e
             )
         return index
 
-    def _insert(self, nodes: Sequence[BaseNode], **insert_kwargs: Any) -> None:
-        raise NotImplementedError(
-            "Inserting nodes is not yet supported with Zilliz Cloud Pipeline."
-        )
-
-    def delete_ref_doc(
-        self, ref_doc_id: str, delete_from_docstore: bool = False, **delete_kwargs: Any
-    ) -> None:
-        raise NotImplementedError(
-            "Deleting a reference document is not yet supported with Zilliz Cloud Pipeline."
-        )
-
-    def update_ref_doc(self, document: Document, **update_kwargs: Any) -> None:
-        raise NotImplementedError(
-            "Updating referenced document is not yet supported with Zilliz Cloud Pipeline."
-        )
-
     @classmethod
     def from_documents(
         cls: Type[IndexType],
         documents: Sequence[Document],
-        storage_context: Optional[StorageContext] = None,
+        pipeline_ids: Optional[Dict] = None,
+        api_key: Optional[str] = None,
         show_progress: bool = False,
-        callback_manager: Optional[CallbackManager] = None,
-        transformations: Optional[List[TransformComponent]] = None,
-        # deprecated
-        service_context: Optional[ServiceContext] = None,
+        metadata: Optional[Dict] = None,
         **kwargs: Any,
     ) -> IndexType:
-        """Build a Zilliz Cloud Pipeline index from a sequence of documents."""
-        raise NotImplementedError(
-            "Loading from document texts is not yet supported with Zilliz Cloud Pipeline."
+        """Build a Zilliz Cloud Pipeline index from a sequence of documents.
+
+        Args:
+            documents: a sequence of llamaindex documents.
+            pipeline_ids (dict=None): A dictionary of pipeline ids for INGESTION, SEARCH, DELETION. Defaults to None.
+            api_key (str): Zilliz Cloud's API Key.
+            metadata (Dict=None): A dictionary of metadata. Defaults to None. The key must be string and the value must be a string, float, integer, or boolean.
+            show_progress (bool): Whether to show tqdm progress bars. Defaults to False.
+
+        Returns:
+            An initialized ZillizCloudPipelineIndex
+
+        Example:
+            >>> from llama_index.indices import ZillizCloudPipelineIndex
+            >>> api_key = "{YOUR_ZILLIZ_CLOUD_API_KEY}"
+            >>> pipeline_ids = ZillizCloudPipelineIndex.create_pipelines(
+            >>>     project_id="{YOUR_ZILLIZ_PROJECT_ID}",
+            >>>     cluster_id="{YOUR_ZILLIZ_CLUSTER_ID}",
+            >>>     api_key=api_key,
+            >>>     data_type="text"
+            >>> )
+            >>> ZillizCloudPipelineIndex.from_documents(
+            >>>     documents=my_documents,
+            >>>     pipeline_ids=pipeline_ids,
+            >>>     api_key=api_key
+            >>> )
+        """
+        metadata = metadata or {}
+        index = cls(
+            pipeline_ids=pipeline_ids,
+            api_key=api_key,
+            show_progress=show_progress,
+            **kwargs,
         )
 
+        try:
+            index._insert(nodes=documents, metadata=metadata)
+        except Exception as e:
+            logger.error("Failed to build managed index given documents:\n%s", e)
+        return index
+
     def _build_index_from_nodes(self, nodes: Sequence[BaseNode]) -> IndexDict:
         raise NotImplementedError(
             "Building index from nodes is not yet supported with Zilliz Cloud Pipeline."
diff --git a/llama-index-integrations/indices/llama-index-indices-managed-zilliz/pyproject.toml b/llama-index-integrations/indices/llama-index-indices-managed-zilliz/pyproject.toml
index 3b3177eb95486c350512957b2ac915acec7c4ecd..902a66c76c075f8709660746669e58e5a7b56939 100644
--- a/llama-index-integrations/indices/llama-index-indices-managed-zilliz/pyproject.toml
+++ b/llama-index-integrations/indices/llama-index-indices-managed-zilliz/pyproject.toml
@@ -30,7 +30,7 @@ exclude = ["**/BUILD"]
 license = "MIT"
 name = "llama-index-indices-managed-zilliz"
 readme = "README.md"
-version = "0.1.3"
+version = "0.1.4"
 
 [tool.poetry.dependencies]
 python = ">=3.8.1,<4.0"
diff --git a/llama-index-integrations/vector_stores/llama-index-vector-stores-milvus/llama_index/vector_stores/milvus/base.py b/llama-index-integrations/vector_stores/llama-index-vector-stores-milvus/llama_index/vector_stores/milvus/base.py
index a9b4a8fa36043095581cc3d1d758970090ccb370..2655abb4d43009a26e6532f2892d91b5017d0931 100644
--- a/llama-index-integrations/vector_stores/llama-index-vector-stores-milvus/llama_index/vector_stores/milvus/base.py
+++ b/llama-index-integrations/vector_stores/llama-index-vector-stores-milvus/llama_index/vector_stores/milvus/base.py
@@ -74,7 +74,7 @@ class MilvusVectorStore(BasePydanticVectorStore):
         uri (str, optional): The URI to connect to, comes in the form of
             "https://address:port" for Milvus or Zilliz Cloud service,
             or "path/to/local/milvus.db" for the lite local Milvus. Defaults to
-            "http://localhost:19530".
+            "./milvus_llamaindex.db".
         token (str, optional): The token for log in. Empty if not using rbac, if
             using rbac it will most likely be "username:password".
         collection_name (str, optional): The name of the collection where data will be
@@ -148,7 +148,7 @@ class MilvusVectorStore(BasePydanticVectorStore):
     stores_text: bool = True
     stores_node: bool = True
 
-    uri: str = "http://localhost:19530"
+    uri: str = "./milvus_llamaindex.db"
     token: str = ""
     collection_name: str = "llamacollection"
     dim: Optional[int]
@@ -173,7 +173,7 @@ class MilvusVectorStore(BasePydanticVectorStore):
 
     def __init__(
         self,
-        uri: str = "http://localhost:19530",
+        uri: str = "./milvus_llamaindex.db",
         token: str = "",
         collection_name: str = "llamacollection",
         dim: Optional[int] = None,
diff --git a/llama-index-integrations/vector_stores/llama-index-vector-stores-milvus/pyproject.toml b/llama-index-integrations/vector_stores/llama-index-vector-stores-milvus/pyproject.toml
index 7f040dd42ca4add3a90ae30469647ec70457cc79..d83ef581340cfaa53fe0a51daa33f362b2c08d83 100644
--- a/llama-index-integrations/vector_stores/llama-index-vector-stores-milvus/pyproject.toml
+++ b/llama-index-integrations/vector_stores/llama-index-vector-stores-milvus/pyproject.toml
@@ -27,7 +27,7 @@ exclude = ["**/BUILD"]
 license = "MIT"
 name = "llama-index-vector-stores-milvus"
 readme = "README.md"
-version = "0.1.12"
+version = "0.1.13"
 
 [tool.poetry.dependencies]
 python = ">=3.8.1,<4.0"