diff --git a/examples/composable_indices/city_analysis/PineconeDemo-CityAnalysis.ipynb b/examples/composable_indices/city_analysis/PineconeDemo-CityAnalysis.ipynb
index 685476174652ed0bdc1a1424063b72722cc2a64e..7dfe0ceeb0b87e8e98d2856b0a3f0c45d00cb6ff 100644
--- a/examples/composable_indices/city_analysis/PineconeDemo-CityAnalysis.ipynb
+++ b/examples/composable_indices/city_analysis/PineconeDemo-CityAnalysis.ipynb
@@ -31,7 +31,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 25,
+   "execution_count": null,
    "id": "d35ov8dk_6WP",
    "metadata": {
     "id": "d35ov8dk_6WP"
@@ -45,7 +45,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 1,
+   "execution_count": null,
    "id": "fa0e62b6",
    "metadata": {
     "id": "fa0e62b6",
@@ -62,7 +62,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 16,
+   "execution_count": null,
    "id": "e27b0473-4bda-47f0-b6ed-fd482eac1a13",
    "metadata": {
     "id": "e27b0473-4bda-47f0-b6ed-fd482eac1a13",
@@ -73,13 +73,11 @@
     "from llama_index import (\n",
     "    GPTPineconeIndex, \n",
     "    GPTSimpleKeywordTableIndex, \n",
-    "    GPTListIndex, \n",
     "    SimpleDirectoryReader,\n",
     "    LLMPredictor,\n",
     "    ServiceContext\n",
     ")\n",
-    "from langchain.llms.openai import OpenAIChat, OpenAI\n",
-    "import requests"
+    "from langchain.llms.openai import OpenAIChat"
    ]
   },
   {
@@ -96,7 +94,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 3,
+   "execution_count": null,
    "id": "fc4692a1",
    "metadata": {
     "id": "fc4692a1",
@@ -110,7 +108,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 10,
+   "execution_count": null,
    "id": "9ec16a8b-6aae-4bf7-9b83-b82087b4ea52",
    "metadata": {
     "id": "9ec16a8b-6aae-4bf7-9b83-b82087b4ea52",
@@ -146,7 +144,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 4,
+   "execution_count": null,
    "id": "39c00aeb-adef-4ce3-8134-031de18e64ea",
    "metadata": {
     "id": "39c00aeb-adef-4ce3-8134-031de18e64ea",
@@ -157,7 +155,7 @@
     "# Load all wiki documents\n",
     "city_docs = {}\n",
     "for wiki_title in wiki_titles:\n",
-    "    city_docs[wiki_title] = SimpleDirectoryReader(input_files=[f\"data/{wiki_title}.txt\"]).load_data()\n"
+    "    city_docs[wiki_title] = SimpleDirectoryReader(input_files=[f\"data/{wiki_title}.txt\"]).load_data()"
    ]
   },
   {
@@ -165,142 +163,164 @@
    "id": "84bfcaa1-db15-45ba-8af1-fee548354965",
    "metadata": {},
    "source": [
-    "### Initialize Pinecone Indexes"
+    "## Initialize Pinecone Indexes"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 5,
+   "execution_count": null,
    "id": "06b261d7-f2a3-47b3-89ce-5f6103926174",
    "metadata": {
     "tags": []
    },
    "outputs": [],
    "source": [
-    "import pinecone"
+    "import pinecone\n",
+    "import os"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 6,
-   "id": "73a0cf0f-62b4-4f52-9c7f-726838d71f9a",
-   "metadata": {
-    "tags": []
-   },
+   "execution_count": null,
+   "id": "e2fc4bfb",
+   "metadata": {},
    "outputs": [],
    "source": [
     "api_key = \"\"\n",
-    "pinecone.init(api_key=api_key, environment=\"us-west1-gcp\")"
+    "environment = \"eu-west1-gcp\"\n",
+    "index_name = \"quickstart\""
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 7,
-   "id": "7bfbbf9d-4432-4af9-94ff-01e084c0cde0",
+   "execution_count": null,
+   "id": "7df2f613",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "os.environ['PINECONE_API_KEY'] = api_key"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "M0GylZB-C2zL",
    "metadata": {
+    "id": "M0GylZB-C2zL",
+    "scrolled": true,
     "tags": []
    },
    "outputs": [],
    "source": [
-    "index = pinecone.Index(\"quickstart\")"
+    "# LLM Predictor (gpt-3.5-turbo)\n",
+    "llm_predictor_chatgpt = LLMPredictor(\n",
+    "    llm=OpenAIChat(temperature=0, model_name=\"gpt-3.5-turbo\")\n",
+    ")\n",
+    "service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor_chatgpt)"
    ]
   },
   {
    "cell_type": "markdown",
-   "id": "f1782198-c0de-4679-8951-1297c21b8639",
+   "id": "b373db78",
+   "metadata": {},
+   "source": [
+    "### Recommended Option:  Pass API key via env variable, and index_name & environment as argument"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "cd7e6946",
    "metadata": {
-    "id": "f1782198-c0de-4679-8951-1297c21b8639"
+    "scrolled": true
    },
+   "outputs": [],
+   "source": [
+    "# Build city document index\n",
+    "city_indices = {}\n",
+    "for pinecone_title, wiki_title in zip(pinecone_titles, wiki_titles):\n",
+    "    metadata_filters = {\"wiki_title\": wiki_title}\n",
+    "    city_indices[wiki_title] = GPTPineconeIndex.from_documents(\n",
+    "        city_docs[wiki_title], index_name=index_name, environment=environment, metadata_filters=metadata_filters\n",
+    "    )\n",
+    "    # set summary text for city\n",
+    "    city_indices[wiki_title].index_struct.index_id = pinecone_title\n",
+    "    city_indices[wiki_title].save_to_disk(f'index_{wiki_title}.json')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "c35d3c6b",
+   "metadata": {},
    "source": [
-    "### Building the document indices\n",
-    "Build a vector index for the wiki pages about cities and persons, and PG essay"
+    "**NOTE**: Directly load the index without specifying `pinecon_index` again!"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 17,
-   "id": "M0GylZB-C2zL",
+   "execution_count": null,
+   "id": "9b555dac",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# If indices already saved, try loading\n",
+    "city_indices = {}\n",
+    "for wiki_title in wiki_titles:\n",
+    "    city_indices[wiki_title] = GPTPineconeIndex.load_from_disk(\n",
+    "      f'index_{wiki_title}.json'\n",
+    "    )"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "b1d69b03",
+   "metadata": {},
+   "source": [
+    "### Alternative Option: instantiate pinecone client first, then pass to GPTPineconeIndex"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "73a0cf0f-62b4-4f52-9c7f-726838d71f9a",
    "metadata": {
-    "id": "M0GylZB-C2zL",
     "tags": []
    },
    "outputs": [],
    "source": [
-    "# LLM Predictor (gpt-3.5-turbo)\n",
-    "llm_predictor_chatgpt = LLMPredictor(llm=OpenAIChat(temperature=0, model_name=\"gpt-3.5-turbo\"))\n",
-    "service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor_chatgpt)"
+    "pinecone.init(api_key=api_key, environment=environment)"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "5431e83e-428b-4473-bad1-24b7a6c4db38",
+   "id": "7bfbbf9d-4432-4af9-94ff-01e084c0cde0",
    "metadata": {
-    "colab": {
-     "base_uri": "https://localhost:8080/",
-     "height": 183,
-     "referenced_widgets": [
-      "b5566e3db2914ddebd80d7bde75b2559",
-      "208d404f405a42a3b06d65ad67fb7322",
-      "7da29a2b6508494282acbc459eccbb96",
-      "47838fa763ca40598b2622a9d1e79444",
-      "ff32a3f12e814740a1cd5dd12bd731d4",
-      "3fef46c902524717b377dee6c1dfc929",
-      "fd8b887c1f7149f2876cf8a31e534ad6",
-      "7438aea716f44d85ad1c2b49a93acd83",
-      "fe39f994fa9b4d7daa232e1dcd2b0e8b",
-      "b102e756f9b848a98f58396fc825be84",
-      "fbd7219af1924d2ead5310eb7b35aab0",
-      "3b4c1066797b43a586611ec2d63e7ca1",
-      "c06865c1e01a441698dacf48600dd03c",
-      "9d229e5dd56e4d539ca2c1b9f0a37812",
-      "868aa268dd28498d902782215e53c6fa",
-      "46f644cf589e4a48a6fad1742f0c0575",
-      "adb40ef11f094594b14776e238955224",
-      "7b47c78391a4431aa2d3f84677f24046",
-      "398f1c0f56fe4f218d999df138adfdac",
-      "f1839e86863948f68314f81ba6bca4c9",
-      "3c37e72850c746ce9c919add5340dede",
-      "2053e6adef1b4dba89f861eaf3d916fd",
-      "eab4127882d24acfa9518ebff6f4e22a",
-      "64b754f563834be0a6963349b1f2dcf2",
-      "c7636a6d7380465895b8c86d34caf500",
-      "f7803dea63994cc2a31acf805bd19e67",
-      "380a0c11434241b191b17421e395be8b",
-      "a02534c347aa4865ab4ab3de3a3ee2f5",
-      "b0ccb9d9d96e4ed8bec4d540c34d337c",
-      "f22e9615de674e05978f332eb88750cf",
-      "b53e8481f6d64018988dc03081bf2765",
-      "b458d6fa793d4fa080b9f1e5013af3de",
-      "119d6d7a8d524aa49170f5784ebc6b9e",
-      "d55f842766484d299c75f74e31e7aa6a",
-      "1bdaf4dab16f48dbaeed3fb9bf268e45",
-      "026cc1a42e154f1f92b5236869311929",
-      "a2edbc4195d843e0acfba83726a08e78",
-      "40e148c291ad4f739998a7eac55a8af6",
-      "028aa5d1f7a74d538b5c606d4a6d146f",
-      "c078fe9a056a473dab7d474cd7907154",
-      "4cc9ec6ba46647aba2d53e352f91c137",
-      "f2a1c5087d0e44909139697ed90474e8",
-      "7b24b46d6c3643e581ba003a9c473745",
-      "3f748152b9274556afad2555572aa9f4"
-     ]
-    },
-    "id": "5431e83e-428b-4473-bad1-24b7a6c4db38",
-    "outputId": "5721e863-d460-4f5c-9e36-5a586180b669",
     "tags": []
    },
    "outputs": [],
+   "source": [
+    "pinecone_index = pinecone.Index(index_name)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "3cb9e7ec",
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
    "source": [
     "# Build city document index\n",
     "city_indices = {}\n",
     "for pinecone_title, wiki_title in zip(pinecone_titles, wiki_titles):\n",
     "    metadata_filters = {\"wiki_title\": wiki_title}\n",
     "    city_indices[wiki_title] = GPTPineconeIndex.from_documents(\n",
-    "        city_docs[wiki_title], pinecone_index=index, metadata_filters=metadata_filters\n",
+    "        city_docs[wiki_title], pinecone_index=pinecone_index, metadata_filters=metadata_filters\n",
     "    )\n",
     "    # set summary text for city\n",
-    "    city_indices[wiki_title].index_struct.doc_id = pinecone_title\n",
+    "    city_indices[wiki_title].index_struct.index_id = pinecone_title\n",
     "    city_indices[wiki_title].save_to_disk(f'index_{wiki_title}.json')"
    ]
   },
@@ -311,16 +331,16 @@
     "id": "b8aaf556-df77-4fac-812b-0b6c6d1da0ef"
    },
    "source": [
-    "### Loading the indices\n",
-    "Build a vector index for the NYC wiki page and PG essay"
+    "**NOTE**: When we load the indices, we must specify `pinecone_index=pinecone_index` again."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 18,
+   "execution_count": null,
    "id": "98068ef8-aead-46e7-8dac-0d05b5a86e6a",
    "metadata": {
-    "id": "98068ef8-aead-46e7-8dac-0d05b5a86e6a"
+    "id": "98068ef8-aead-46e7-8dac-0d05b5a86e6a",
+    "scrolled": true
    },
    "outputs": [],
    "source": [
@@ -328,7 +348,7 @@
     "city_indices = {}\n",
     "for wiki_title in wiki_titles:\n",
     "    city_indices[wiki_title] = GPTPineconeIndex.load_from_disk(\n",
-    "      f'index_{wiki_title}.json', pinecone_index=index\n",
+    "      f'index_{wiki_title}.json', pinecone_index=pinecone_index\n",
     "    )"
    ]
   },
@@ -342,23 +362,12 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 20,
+   "execution_count": null,
    "id": "d5865649-16c2-4681-a6cf-ccee589dcaa7",
    "metadata": {
     "tags": []
    },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "INFO:gpt_index.token_counter.token_counter:> [query] Total LLM token usage: 4381 tokens\n",
-      "> [query] Total LLM token usage: 4381 tokens\n",
-      "INFO:gpt_index.token_counter.token_counter:> [query] Total embedding token usage: 9 tokens\n",
-      "> [query] Total embedding token usage: 9 tokens\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "response = city_indices[\"Boston\"].query(\n",
     "    \"Tell me about the arts and culture of Boston\",\n",
@@ -368,23 +377,12 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 22,
+   "execution_count": null,
    "id": "f8ec6f33-73d1-46cb-93f0-d76f9c42d78d",
    "metadata": {
     "tags": []
    },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Boston has a rich arts and culture scene, with numerous art galleries and museums such as the Institute of Contemporary Art, Boston Children's Museum, Museum of Science, and the New England Aquarium. The city is also home to several historic churches, including the oldest church in Boston, First Church in Boston, and King's Chapel, the city's first Anglican church. Boston has a strong religious presence, with the Roman Catholic Archdiocese of Boston and the Episcopal Diocese of Massachusetts both based in the city. The city also has a love for sports, with teams in the four major North American men's professional sports leagues plus Major League Soccer, and has won 39 championships in these leagues. Boston Common, the oldest public park in the United States, and the adjacent Boston Public Garden are part of the Emerald Necklace, a string of parks designed by Frederick Law Olmsted to encircle the city. The city's park system is well-reputed nationally, with Boston tied with Sacramento and San Francisco for having the third-best park system among the 50 most populous US cities. Additionally, Boston has a thriving media scene, with a variety of publications serving different communities, such as the teen publication Teens in Print, the lifestyle magazine The Improper Bostonian, and several Spanish-language newspapers like El Planeta and La Semana. The city's large LGBT population is served by publications like The Rainbow Times. Boston also has a vibrant radio and television market, with major stations like WBZ and WEEI serving the area, as well as college and university radio stations.\n",
-      "> Source (Doc id: None): \n",
-      "\n",
-      "Institute of Contemporary Art is housed in a contemporary building designed by Diller Scofidio ...\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "print(str(response))\n",
     "print(response.get_formatted_sources())"
@@ -405,19 +403,19 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 24,
+   "execution_count": null,
    "id": "6d68750c-e5ae-481a-8b03-6173020c9bf3",
    "metadata": {
     "id": "6d68750c-e5ae-481a-8b03-6173020c9bf3"
    },
    "outputs": [],
    "source": [
-    "from llama_index.composability.graph import ComposableGraph"
+    "from gpt_index.indices.composability.graph import ComposableGraph"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 25,
+   "execution_count": null,
    "id": "2c01db2c-07b1-4e9b-bfda-e25b8953cde9",
    "metadata": {
     "tags": []
@@ -433,32 +431,12 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 26,
+   "execution_count": null,
    "id": "586b404a-5cb6-465f-8a0f-dfb3b27cd80a",
    "metadata": {
     "tags": []
    },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "INFO:gpt_index.token_counter.token_counter:> [build_index_from_nodes] Total LLM token usage: 0 tokens\n",
-      "> [build_index_from_nodes] Total LLM token usage: 0 tokens\n",
-      "INFO:gpt_index.token_counter.token_counter:> [build_index_from_nodes] Total embedding token usage: 0 tokens\n",
-      "> [build_index_from_nodes] Total embedding token usage: 0 tokens\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "[nltk_data] Downloading package stopwords to\n",
-      "[nltk_data]     /Users/jerryliu/nltk_data...\n",
-      "[nltk_data]   Package stopwords is already up-to-date!\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "graph = ComposableGraph.from_indices(\n",
     "    GPTSimpleKeywordTableIndex,\n",
@@ -470,7 +448,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 27,
+   "execution_count": null,
    "id": "ae127943-afac-48b4-b22d-84a37e553e4b",
    "metadata": {
     "id": "ae127943-afac-48b4-b22d-84a37e553e4b"
@@ -483,7 +461,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 28,
+   "execution_count": null,
    "id": "dca2b64b-9af1-456f-8dab-822bfdc5d0ac",
    "metadata": {
     "id": "dca2b64b-9af1-456f-8dab-822bfdc5d0ac"
@@ -491,12 +469,23 @@
    "outputs": [],
    "source": [
     "# [optional] load from disk\n",
-    "graph = ComposableGraph.load_from_disk(\"index_multi_doc_graph.json\")"
+    "\n",
+    "# NOTE: only necessary if we passed in pinecone_index when constructing the vector store indexes\n",
+    "query_context_kwargs = {\n",
+    "    index.index_struct.index_id: {\n",
+    "        'vector_store': {\n",
+    "            'pinecone_index': pinecone_index, \n",
+    "        }\n",
+    "    }\n",
+    "    for index in city_indices.values()\n",
+    "}\n",
+    "\n",
+    "graph = ComposableGraph.load_from_disk(\"index_multi_doc_graph.json\", query_context_kwargs=query_context_kwargs)"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 29,
+   "execution_count": null,
    "id": "76c251ca-b06b-42e9-ac99-aa0a0a5187d4",
    "metadata": {
     "id": "76c251ca-b06b-42e9-ac99-aa0a0a5187d4"
@@ -506,14 +495,13 @@
     "# set query config\n",
     "# NOTE: we need to specify a query config for every pinecone index \n",
     "query_configs = []\n",
-    "for pinecone_title, wiki_title in zip(pinecone_title, wiki_title):\n",
+    "for pinecone_title, wiki_title in zip(pinecone_titles, wiki_titles):\n",
     "    query_config = {\n",
     "        \"index_struct_id\": pinecone_title,\n",
     "        \"index_struct_type\": \"pinecone\",\n",
     "        \"query_mode\": \"default\",\n",
     "        \"query_kwargs\": {\n",
     "            \"similarity_top_k\": 1,\n",
-    "            \"pinecone_index\": index,\n",
     "        }\n",
     "    }\n",
     "    query_configs.append(query_config)\n",
@@ -549,28 +537,13 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 31,
+   "execution_count": null,
    "id": "OVnzf3myEz88",
    "metadata": {
     "id": "OVnzf3myEz88",
     "tags": []
    },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "INFO:gpt_index.indices.query.keyword_table.query:> Starting query: Tell me more about Boston\n",
-      "> Starting query: Tell me more about Boston\n",
-      "INFO:gpt_index.indices.query.keyword_table.query:query keywords: ['tell', 'boston']\n",
-      "query keywords: ['tell', 'boston']\n",
-      "INFO:gpt_index.indices.query.keyword_table.query:> Extracted keywords: ['boston']\n",
-      "> Extracted keywords: ['boston']\n",
-      "INFO:gpt_index.indices.common.tree.base:> Building index from nodes: 0 chunks\n",
-      "> Building index from nodes: 0 chunks\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "query_str = \"Tell me more about Boston\"\n",
     "response_chatgpt = graph.query(query_str, query_configs=query_configs, service_context=service_context)"
@@ -578,26 +551,13 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 32,
+   "execution_count": null,
    "id": "29f32345-6f28-4545-afa9-e3c5849dfb82",
    "metadata": {
     "id": "29f32345-6f28-4545-afa9-e3c5849dfb82",
     "outputId": "904002ea-f062-4f7d-8fe6-3e6b7b13b420"
    },
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Boston is a historic city located in the northeastern United States and is the capital of Massachusetts. It has a population of over 700,000 people and played a significant role in the American Revolution, with events such as the Boston Tea Party and the Battle of Bunker Hill taking place there. Boston is also known for its universities, including Harvard and MIT, and its thriving economy, which is driven by industries such as finance, healthcare, and technology. The city underwent urban renewal projects in the mid-20th century, which aimed to revitalize the city but also resulted in the displacement of many low-income residents. However, the city experienced an economic recovery in the 1970s and 1980s, which led to the development of new businesses and cultural institutions. Today, Boston is a vibrant and diverse city with a rich history and a bright future.\n",
-      "> Source (Doc id: None): The existing answer provides a comprehensive overview of Boston, including its history, populatio...\n",
-      "\n",
-      "> Source (Doc id: None): \n",
-      "\n",
-      "Boston (US: ), officially the City of Boston, is the capital and largest city of the Commonweal...\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "print(response_chatgpt)\n",
     "print(response_chatgpt.get_formatted_sources())"
@@ -606,7 +566,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "9d84142e-7985-42b2-b0dd-445861f8617d",
+   "id": "38438507",
    "metadata": {},
    "outputs": [],
    "source": []
@@ -617,9 +577,9 @@
    "provenance": []
   },
   "kernelspec": {
-   "display_name": "llama_index",
+   "display_name": "Python 3 (ipykernel)",
    "language": "python",
-   "name": "llama_index"
+   "name": "python3"
   },
   "language_info": {
    "codemirror_mode": {
@@ -631,7 +591,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.10.10"
+   "version": "3.11.0"
   },
   "vscode": {
    "interpreter": {
diff --git a/gpt_index/constants.py b/gpt_index/constants.py
index 122b616fed36e25fad10884fcda599e4ddc1eb7e..6e5471faf3ef01e94248a49b5753c3c0be197b16 100644
--- a/gpt_index/constants.py
+++ b/gpt_index/constants.py
@@ -7,7 +7,8 @@ NUM_OUTPUTS = 256
 
 INDEX_STRUCT_KEY = "index_struct"
 DOCSTORE_KEY = "docstore"
-VECTOR_STORE_CONFIG_DICT_KEY = "vector_store"
+VECTOR_STORE_KEY = "vector_store"
+ADDITIONAL_QUERY_CONTEXT_KEY = "query_context"
 
 TYPE_KEY = "__type__"
 DATA_KEY = "__data__"
diff --git a/gpt_index/indices/base.py b/gpt_index/indices/base.py
index 4458a1c7cb15486c6b008798fe6521fa7292ffa1..5d5028f68624040bf7121a5df4a616aa0544e055 100644
--- a/gpt_index/indices/base.py
+++ b/gpt_index/indices/base.py
@@ -194,6 +194,16 @@ class BaseGPTIndex(Generic[IS], ABC):
 
         return refreshed_documents
 
+    @property
+    def query_context(self) -> Dict[str, Any]:
+        """Additional context necessary for making a query.
+
+        This should capture any index-specific clients, services, etc,
+        that's not captured by index struct, docstore, and service context.
+        For example, a vector store index would pass vector store.
+        """
+        return {}
+
     def _preprocess_query(self, mode: QueryMode, query_kwargs: Dict) -> None:
         """Preprocess query.
 
@@ -235,6 +245,7 @@ class BaseGPTIndex(Generic[IS], ABC):
         query_runner = QueryRunner(
             index_struct=self._index_struct,
             service_context=self._service_context,
+            query_context={self._index_struct.index_id: self.query_context},
             docstore=self._docstore,
             query_configs=[query_config],
             query_transform=query_transform,
@@ -276,9 +287,10 @@ class BaseGPTIndex(Generic[IS], ABC):
             query_kwargs=query_kwargs,
         )
         query_runner = QueryRunner(
-            self._index_struct,
-            self._service_context,
-            self._docstore,
+            index_struct=self._index_struct,
+            service_context=self._service_context,
+            query_context={self._index_struct.index_id: self.query_context},
+            docstore=self._docstore,
             query_configs=[query_config],
             query_transform=query_transform,
             recursive=False,
diff --git a/gpt_index/indices/composability/graph.py b/gpt_index/indices/composability/graph.py
index a76cf8ca14cca8c6129cd821f5cc0b119301a099..7a4013dda2bf0abc86e9c7782045e20992b49860 100644
--- a/gpt_index/indices/composability/graph.py
+++ b/gpt_index/indices/composability/graph.py
@@ -3,13 +3,21 @@
 import json
 from typing import Any, Dict, List, Optional, Sequence, Type, Union, cast
 
-from gpt_index.constants import DOCSTORE_KEY, INDEX_STRUCT_KEY
+from gpt_index.constants import (
+    ADDITIONAL_QUERY_CONTEXT_KEY,
+    DOCSTORE_KEY,
+    INDEX_STRUCT_KEY,
+)
 from gpt_index.data_structs.data_structs_v2 import CompositeIndex
 from gpt_index.data_structs.data_structs_v2 import V2IndexStruct
 from gpt_index.data_structs.data_structs_v2 import V2IndexStruct as IndexStruct
 from gpt_index.data_structs.node_v2 import IndexNode, DocumentRelationship
 from gpt_index.docstore import DocumentStore
 from gpt_index.indices.base import BaseGPTIndex
+from gpt_index.indices.composability.utils import (
+    load_query_context_from_dict,
+    save_query_context_to_dict,
+)
 from gpt_index.indices.query.query_runner import QueryRunner
 from gpt_index.indices.query.query_transform.base import BaseQueryTransform
 from gpt_index.indices.query.schema import QueryBundle, QueryConfig
@@ -28,11 +36,14 @@ class ComposableGraph:
         index_struct: CompositeIndex,
         docstore: DocumentStore,
         service_context: Optional[ServiceContext] = None,
+        query_context: Optional[Dict[str, Dict[str, Any]]] = None,
+        **kwargs: Any,
     ) -> None:
         """Init params."""
         self._docstore = docstore
         self._index_struct = index_struct
         self._service_context = service_context or ServiceContext.from_defaults()
+        self._query_context = query_context or {}
 
     @property
     def index_struct(self) -> CompositeIndex:
@@ -48,7 +59,8 @@ class ComposableGraph:
         all_index_structs: Dict[str, IndexStruct],
         root_id: str,
         docstores: Sequence[DocumentStore],
-        **kwargs: Any,
+        query_context: Optional[Dict[str, Dict[str, Any]]] = None,
+        service_context: Optional[ServiceContext] = None,
     ) -> "ComposableGraph":
         composite_index_struct = CompositeIndex(
             all_index_structs=all_index_structs,
@@ -56,7 +68,10 @@ class ComposableGraph:
         )
         merged_docstore = DocumentStore.merge(docstores)
         return cls(
-            index_struct=composite_index_struct, docstore=merged_docstore, **kwargs
+            index_struct=composite_index_struct,
+            docstore=merged_docstore,
+            query_context=query_context,
+            service_context=service_context,
         )
 
     @classmethod
@@ -67,12 +82,7 @@ class ComposableGraph:
         index_summaries: Optional[Sequence[str]] = None,
         **kwargs: Any,
     ) -> "ComposableGraph":  # type: ignore
-        """Create composable graph using this index class as the root.
-
-        NOTE: this is mostly syntactic sugar,
-        roughly equivalent to directly calling `ComposableGraph.from_indices`.
-
-        """
+        """Create composable graph using this index class as the root."""
         if index_summaries is None:
             for index in children_indices:
                 if index.index_struct.summary is None:
@@ -116,6 +126,13 @@ class ComposableGraph:
             root_index
         ]
 
+        # collect query context, e.g. vector stores
+        query_context: Dict[str, Dict[str, Any]] = {}
+        for index in list(children_indices) + [root_index]:
+            assert isinstance(index.index_struct, V2IndexStruct)
+            index_id = index.index_struct.index_id
+            query_context[index_id] = index.query_context
+
         return cls.from_index_structs_and_docstores(
             all_index_structs={
                 index.index_struct.index_id: index.index_struct for index in all_indices
@@ -123,6 +140,7 @@ class ComposableGraph:
             root_id=root_index.index_struct.index_id,
             docstores=[index.docstore for index in all_indices],
             service_context=root_index.service_context,
+            query_context=query_context,
         )
 
     def query(
@@ -137,6 +155,7 @@ class ComposableGraph:
         query_runner = QueryRunner(
             index_struct=self._index_struct,
             service_context=service_context,
+            query_context=self._query_context,
             docstore=self._docstore,
             query_configs=query_configs,
             query_transform=query_transform,
@@ -156,6 +175,7 @@ class ComposableGraph:
         query_runner = QueryRunner(
             index_struct=self._index_struct,
             service_context=service_context,
+            query_context=self._query_context,
             docstore=self._docstore,
             query_configs=query_configs,
             query_transform=query_transform,
@@ -193,11 +213,24 @@ class ComposableGraph:
         # lazy load registry
         from gpt_index.indices.registry import load_index_struct_from_dict
 
-        result_dict = json.loads(index_string)
-        index_struct = load_index_struct_from_dict(result_dict["index_struct"])
-        docstore = DocumentStore.load_from_dict(result_dict["docstore"])
+        result_dict: Dict[str, Any] = json.loads(index_string)
+        index_struct = load_index_struct_from_dict(result_dict[INDEX_STRUCT_KEY])
+        docstore = DocumentStore.load_from_dict(result_dict[DOCSTORE_KEY])
+
+        # NOTE: this allows users to pass in kwargs at load time
+        #       e.g. passing in vector store client
+        query_context_kwargs = kwargs.pop("query_context_kwargs", None)
+        query_context = load_query_context_from_dict(
+            result_dict.get(ADDITIONAL_QUERY_CONTEXT_KEY, {}),
+            query_context_kwargs=query_context_kwargs,
+        )
         assert isinstance(index_struct, CompositeIndex)
-        return cls(index_struct, docstore, **kwargs)
+        return cls(
+            index_struct=index_struct,
+            docstore=docstore,
+            query_context=query_context,
+            **kwargs,
+        )
 
     @classmethod
     def load_from_disk(cls, save_path: str, **kwargs: Any) -> "ComposableGraph":
@@ -231,6 +264,9 @@ class ComposableGraph:
         out_dict: Dict[str, Any] = {
             INDEX_STRUCT_KEY: self._index_struct.to_dict(),
             DOCSTORE_KEY: self._docstore.serialize_to_dict(),
+            ADDITIONAL_QUERY_CONTEXT_KEY: save_query_context_to_dict(
+                self._query_context
+            ),
         }
         return json.dumps(out_dict)
 
diff --git a/gpt_index/indices/composability/utils.py b/gpt_index/indices/composability/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ff7154502bd74aa147c2d06aa925ec14d8d1f2e
--- /dev/null
+++ b/gpt_index/indices/composability/utils.py
@@ -0,0 +1,70 @@
+from typing import Any, Dict, Optional, Type
+from gpt_index.constants import VECTOR_STORE_KEY
+from gpt_index.vector_stores.registry import (
+    VectorStoreType,
+    load_vector_store_from_dict,
+    save_vector_store_to_dict,
+)
+from gpt_index.vector_stores.types import VectorStore
+
+
+def save_query_context_to_dict(
+    query_context: Dict[str, Dict[str, Any]],
+    vector_store_cls_to_type: Optional[Dict[Type[VectorStore], VectorStoreType]] = None,
+) -> Dict[str, Dict[str, Any]]:
+    """Save index-specific query context dict to JSON dict.
+
+    Example query context dict to save:
+    query_context = {
+        <index_id>: {
+            'vector_store': <vector_store>
+        }
+    }
+
+    NOTE: Right now, only consider vector stores.
+    """
+    save_dict = {}
+    for index_id, index_context_dict in query_context.items():
+        index_save_dict = {}
+        for key, val in index_context_dict.items():
+            if isinstance(val, VectorStore):
+                index_save_dict[key] = save_vector_store_to_dict(
+                    val, cls_to_type=vector_store_cls_to_type
+                )
+        save_dict[index_id] = index_save_dict
+    return save_dict
+
+
+def load_query_context_from_dict(
+    save_dict: Dict[str, Dict[str, Any]],
+    vector_store_type_to_cls: Optional[Dict[VectorStoreType, Type[VectorStore]]] = None,
+    query_context_kwargs: Optional[Dict[str, Dict[str, Any]]] = None,
+) -> Dict[str, Dict[str, Any]]:
+    """Load index-specific query context from JSON dict.
+
+    Example loaded query context dict:
+    query_context = {
+        <index_id>: {
+            'vector_store': <vector_store>
+        }
+    }
+
+    NOTE: Right now, only consider vector stores.
+    """
+    if query_context_kwargs is None:
+        query_context_kwargs = {}
+
+    context_dict = {}
+    for index_id, index_save_dict in save_dict.items():
+        index_context_dict = {}
+        index_kwargs = query_context_kwargs.get(index_id, {})
+        for key, val in index_save_dict.items():
+            if key == VECTOR_STORE_KEY:
+                key_kwargs = index_kwargs.get(key, {})
+                index_context_dict[key] = load_vector_store_from_dict(
+                    val, vector_store_type_to_cls, **key_kwargs
+                )
+            else:
+                index_context_dict[key] = val
+        context_dict[index_id] = index_context_dict
+    return context_dict
diff --git a/gpt_index/indices/query/query_runner.py b/gpt_index/indices/query/query_runner.py
index 17778b99588673f0587b0f60aa26dec2f96ab826..f9c17b6635478eedc4366cedcc1ac76b0c48da5b 100644
--- a/gpt_index/indices/query/query_runner.py
+++ b/gpt_index/indices/query/query_runner.py
@@ -85,6 +85,7 @@ class QueryRunner:
         index_struct: IndexStruct,
         service_context: ServiceContext,
         docstore: DocumentStore,
+        query_context: Dict[str, Dict[str, Any]],
         query_configs: Optional[List[QUERY_CONFIG_TYPE]] = None,
         query_transform: Optional[BaseQueryTransform] = None,
         query_combiner: Optional[BaseQueryCombiner] = None,
@@ -96,6 +97,7 @@ class QueryRunner:
         self._index_struct = index_struct
         self._service_context = service_context
         self._docstore = docstore
+        self._query_context = query_context
 
         # query configurations and transformation
         self._query_config_map = _get_query_config_map(query_configs)
@@ -164,6 +166,11 @@ class QueryRunner:
 
         query_cls = INDEX_STRUT_TYPE_TO_QUERY_MAP[index_struct_type][mode]
         query_kwargs = self._get_query_kwargs(config)
+
+        # Inject additional query context into query kwargs
+        query_context = self._query_context.get(index_struct.index_id, {})
+        query_kwargs.update(query_context)
+
         query_obj = query_cls(
             index_struct=index_struct,
             docstore=self._docstore,
diff --git a/gpt_index/indices/vector_store/base.py b/gpt_index/indices/vector_store/base.py
index e8ec402fd3adaa0446fa963b1468928689a77c4b..e07198fa391fccd8950a0b45282031609dec4a5e 100644
--- a/gpt_index/indices/vector_store/base.py
+++ b/gpt_index/indices/vector_store/base.py
@@ -7,7 +7,7 @@ An index that that is built on top of an existing vector store.
 from typing import Any, Dict, List, Optional, Sequence, Set, Tuple
 
 from gpt_index.async_utils import run_async_tasks
-from gpt_index.constants import VECTOR_STORE_CONFIG_DICT_KEY
+from gpt_index.constants import VECTOR_STORE_KEY
 from gpt_index.data_structs.data_structs_v2 import IndexDict
 from gpt_index.data_structs.node_v2 import Node
 from gpt_index.indices.base import BaseGPTIndex, QueryMap
@@ -15,6 +15,10 @@ from gpt_index.indices.query.schema import QueryMode
 from gpt_index.indices.service_context import ServiceContext
 from gpt_index.indices.vector_store.base_query import GPTVectorStoreIndexQuery
 from gpt_index.token_counter.token_counter import llm_token_counter
+from gpt_index.vector_stores.registry import (
+    load_vector_store_from_dict,
+    save_vector_store_to_dict,
+)
 from gpt_index.vector_stores.simple import SimpleVectorStore
 from gpt_index.vector_stores.types import NodeEmbeddingResult, VectorStore
 
@@ -246,10 +250,10 @@ class GPTVectorStoreIndex(BaseGPTIndex[IndexDict]):
             BaseGPTIndex: The loaded index.
 
         """
-        config_dict = {}
-        if VECTOR_STORE_CONFIG_DICT_KEY in result_dict:
-            config_dict = result_dict[VECTOR_STORE_CONFIG_DICT_KEY]
-        return super().load_from_dict(result_dict, **config_dict, **kwargs)
+        vector_store = load_vector_store_from_dict(
+            result_dict[VECTOR_STORE_KEY], **kwargs
+        )
+        return super().load_from_dict(result_dict, vector_store=vector_store, **kwargs)
 
     def save_to_dict(self, **save_kwargs: Any) -> dict:
         """Save to string.
@@ -265,11 +269,9 @@ class GPTVectorStoreIndex(BaseGPTIndex[IndexDict]):
 
         """
         out_dict = super().save_to_dict()
-        out_dict[VECTOR_STORE_CONFIG_DICT_KEY] = self._vector_store.config_dict
+        out_dict[VECTOR_STORE_KEY] = save_vector_store_to_dict(self._vector_store)
         return out_dict
 
-    def _preprocess_query(self, mode: QueryMode, query_kwargs: Any) -> None:
-        super()._preprocess_query(mode, query_kwargs)
-        # NOTE: Pass along vector store instance to query objects
-        # TODO: refactor this to be more explicit
-        query_kwargs["vector_store"] = self._vector_store
+    @property
+    def query_context(self) -> Dict[str, Any]:
+        return {"vector_store": self._vector_store}
diff --git a/gpt_index/indices/vector_store/queries.py b/gpt_index/indices/vector_store/queries.py
deleted file mode 100644
index f4918dba8b00ee5ba7a119e754b4ec4521202173..0000000000000000000000000000000000000000
--- a/gpt_index/indices/vector_store/queries.py
+++ /dev/null
@@ -1,260 +0,0 @@
-"""Vector-store specific query classes."""
-
-
-from typing import Any, Dict, Optional
-
-from gpt_index.data_structs.data_structs_v2 import IndexDict
-from gpt_index.indices.vector_store.base_query import GPTVectorStoreIndexQuery
-from gpt_index.vector_stores import (
-    ChatGPTRetrievalPluginClient,
-    ChromaVectorStore,
-    FaissVectorStore,
-    OpensearchVectorStore,
-    PineconeVectorStore,
-    QdrantVectorStore,
-    SimpleVectorStore,
-    WeaviateVectorStore,
-)
-from gpt_index.vector_stores.opensearch import OpensearchVectorClient
-from requests.adapters import Retry
-
-
-class GPTSimpleVectorIndexQuery(GPTVectorStoreIndexQuery):
-    """GPT simple vector index query.
-
-    Args:
-        embed_model (Optional[BaseEmbedding]): embedding model
-        similarity_top_k (int): number of top k results to return
-        simple_vector_store_data_dict: (Optional[dict]): simple vector store data dict,
-
-    """
-
-    def __init__(
-        self,
-        index_struct: IndexDict,
-        simple_vector_store_data_dict: Optional[Dict] = None,
-        **kwargs: Any,
-    ) -> None:
-        """Initialize params."""
-        # TODO: this is a temporary hack to allow composable
-        # indices to work for simple vector stores
-        # Our composability framework at the moment only allows for storage
-        # of index_struct, not vector_store. Therefore in order to
-        # allow simple vector indices to be composed, we need to "infer"
-        # the vector store from the index struct.
-        # NOTE: the next refactor would be to allow users to pass in
-        # the vector store during query-time. However this is currently
-        # not complete in our composability framework because the configs
-        # are keyed on index type, not index id (which means that users
-        # can't pass in distinct vector stores for different subindices).
-        # NOTE: composability on top of other vector stores (pinecone/weaviate)
-        # was already broken in this form.
-        if simple_vector_store_data_dict is None:
-            if len(index_struct.embeddings_dict) > 0:
-                simple_vector_store_data_dict = {
-                    "embedding_dict": index_struct.embeddings_dict,
-                }
-                vector_store = SimpleVectorStore(
-                    simple_vector_store_data_dict=simple_vector_store_data_dict
-                )
-            else:
-                raise ValueError("Vector store is required for vector store query.")
-        else:
-            vector_store = SimpleVectorStore(
-                simple_vector_store_data_dict=simple_vector_store_data_dict
-            )
-        super().__init__(index_struct=index_struct, vector_store=vector_store, **kwargs)
-
-
-class GPTFaissIndexQuery(GPTVectorStoreIndexQuery):
-    """GPT faiss vector index query.
-
-    Args:
-        embed_model (Optional[BaseEmbedding]): embedding model
-        similarity_top_k (int): number of top k results to return
-        faiss_index (faiss.Index): A Faiss Index object (required). Note: the index
-            will be reset during index construction.
-
-    """
-
-    def __init__(
-        self,
-        index_struct: IndexDict,
-        faiss_index: Optional[Any] = None,
-        **kwargs: Any,
-    ) -> None:
-        """Initialize params."""
-        if faiss_index is None:
-            raise ValueError("faiss_index is required.")
-        vector_store = FaissVectorStore(faiss_index)
-        super().__init__(index_struct=index_struct, vector_store=vector_store, **kwargs)
-
-
-class GPTPineconeIndexQuery(GPTVectorStoreIndexQuery):
-    """GPT pinecone vector index query.
-
-    Args:
-        embed_model (Optional[BaseEmbedding]): embedding model
-        similarity_top_k (int): number of top k results to return
-        pinecone_index (Optional[pinecone.Index]): Pinecone index instance
-        pinecone_kwargs (Optional[dict]): Pinecone index kwargs
-
-    """
-
-    def __init__(
-        self,
-        index_struct: IndexDict,
-        pinecone_index: Optional[Any] = None,
-        metadata_filters: Optional[Dict[str, Any]] = None,
-        pinecone_kwargs: Optional[Dict] = None,
-        insert_kwargs: Optional[Dict] = None,
-        query_kwargs: Optional[Dict] = None,
-        delete_kwargs: Optional[Dict] = None,
-        **kwargs: Any,
-    ) -> None:
-        """Initialize params."""
-        if pinecone_index is None and pinecone_kwargs is None:
-            raise ValueError("pinecone_index or pinecone_kwargs is required.")
-        vector_store = PineconeVectorStore(
-            pinecone_index=pinecone_index,
-            metadata_filters=metadata_filters,
-            pinecone_kwargs=pinecone_kwargs,
-            insert_kwargs=insert_kwargs,
-            query_kwargs=query_kwargs,
-            delete_kwargs=delete_kwargs,
-        )
-        super().__init__(index_struct=index_struct, vector_store=vector_store, **kwargs)
-
-
-class GPTWeaviateIndexQuery(GPTVectorStoreIndexQuery):
-    """GPT Weaviate vector index query.
-
-    Args:
-        embed_model (Optional[BaseEmbedding]): embedding model
-        similarity_top_k (int): number of top k results to return
-        weaviate_client (Optional[Any]): Weaviate client instance
-        class_prefix (Optional[str]): Weaviate class prefix
-
-    """
-
-    def __init__(
-        self,
-        index_struct: IndexDict,
-        weaviate_client: Optional[Any] = None,
-        class_prefix: Optional[str] = None,
-        **kwargs: Any,
-    ) -> None:
-        """Initialize params."""
-        if weaviate_client is None:
-            raise ValueError("weaviate_client is required.")
-        vector_store = WeaviateVectorStore(
-            weaviate_client=weaviate_client, class_prefix=class_prefix
-        )
-        super().__init__(index_struct=index_struct, vector_store=vector_store, **kwargs)
-
-
-class GPTQdrantIndexQuery(GPTVectorStoreIndexQuery):
-    """GPT Qdrant vector index query.
-
-    Args:
-        embed_model (Optional[BaseEmbedding]): embedding model
-        similarity_top_k (int): number of top k results to return
-        client (Optional[Any]): QdrantClient instance from `qdrant-client` package
-        collection_name: (Optional[str]): name of the Qdrant collection
-
-    """
-
-    def __init__(
-        self,
-        index_struct: IndexDict,
-        client: Optional[Any] = None,
-        collection_name: Optional[str] = None,
-        **kwargs: Any,
-    ) -> None:
-        """Initialize params."""
-        if client is None:
-            raise ValueError("client is required.")
-        if collection_name is None:
-            raise ValueError("collection_name is required.")
-        vector_store = QdrantVectorStore(client=client, collection_name=collection_name)
-        super().__init__(index_struct=index_struct, vector_store=vector_store, **kwargs)
-
-
-class GPTChromaIndexQuery(GPTVectorStoreIndexQuery):
-    """GPT Chroma vector index query.
-
-    Args:
-        text_qa_template (Optional[QuestionAnswerPrompt]): A Question-Answer Prompt
-            (see :ref:`Prompt-Templates`).
-        embed_model (Optional[BaseEmbedding]): Embedding model to use for
-            embedding similarity.
-        chroma_collection (Optional[Any]): Collection instance from `chromadb` package.
-
-    """
-
-    def __init__(
-        self,
-        index_struct: IndexDict,
-        chroma_collection: Optional[Any] = None,
-        **kwargs: Any,
-    ) -> None:
-        """Initialize params."""
-        if chroma_collection is None:
-            raise ValueError("chroma_collection is required.")
-        vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
-        super().__init__(index_struct=index_struct, vector_store=vector_store, **kwargs)
-
-
-class GPTOpensearchIndexQuery(GPTVectorStoreIndexQuery):
-    """GPT Opensearch vector index query.
-
-    Args:
-        text_qa_template (Optional[QuestionAnswerPrompt]): A Question-Answer Prompt
-            (see :ref:`Prompt-Templates`).
-        embed_model (Optional[BaseEmbedding]): Embedding model to use for
-            embedding similarity.
-        client (Optional[OpensearchVectorClient]): Opensearch vector client.
-
-    """
-
-    def __init__(
-        self,
-        index_struct: IndexDict,
-        client: Optional[OpensearchVectorClient] = None,
-        **kwargs: Any,
-    ) -> None:
-        """Initialize params."""
-        if client is None:
-            raise ValueError("OpensearchVectorClient client is required.")
-        vector_store = OpensearchVectorStore(client=client)
-        super().__init__(index_struct=index_struct, vector_store=vector_store, **kwargs)
-
-
-class ChatGPTRetrievalPluginQuery(GPTVectorStoreIndexQuery):
-    """GPT retrieval plugin query.
-
-    Args:
-        text_qa_template (Optional[QuestionAnswerPrompt]): A Question-Answer Prompt
-            (see :ref:`Prompt-Templates`).
-        embed_model (Optional[BaseEmbedding]): Embedding model to use for
-            embedding similarity.
-
-    """
-
-    def __init__(
-        self,
-        index_struct: IndexDict,
-        endpoint_url: str,
-        bearer_token: Optional[str] = None,
-        retries: Optional[Retry] = None,
-        batch_size: int = 100,
-        **kwargs: Any,
-    ) -> None:
-        """Initialize params."""
-        vector_store = ChatGPTRetrievalPluginClient(
-            endpoint_url=endpoint_url,
-            bearer_token=bearer_token,
-            retries=retries,
-            batch_size=batch_size,
-        )
-        super().__init__(index_struct=index_struct, vector_store=vector_store, **kwargs)
diff --git a/gpt_index/indices/vector_store/vector_indices.py b/gpt_index/indices/vector_store/vector_indices.py
index 32a2615e05cf8565238affddbf9175282fa8cdd2..253b0ec789b2e5868d8a141166e8c0a0ad035e4d 100644
--- a/gpt_index/indices/vector_store/vector_indices.py
+++ b/gpt_index/indices/vector_store/vector_indices.py
@@ -1,6 +1,6 @@
 """Deprecated vector store indices."""
 
-from typing import Any, Dict, Optional, Sequence, Type, cast
+from typing import Any, Dict, Optional, Sequence, Type
 
 from requests.adapters import Retry
 
@@ -16,18 +16,7 @@ from gpt_index.data_structs.data_structs_v2 import (
     WeaviateIndexDict,
 )
 from gpt_index.data_structs.node_v2 import Node
-from gpt_index.indices.base import BaseGPTIndex, QueryMap
-from gpt_index.indices.query.schema import QueryMode
-from gpt_index.indices.vector_store.queries import (
-    ChatGPTRetrievalPluginQuery,
-    GPTChromaIndexQuery,
-    GPTFaissIndexQuery,
-    GPTOpensearchIndexQuery,
-    GPTPineconeIndexQuery,
-    GPTQdrantIndexQuery,
-    GPTSimpleVectorIndexQuery,
-    GPTWeaviateIndexQuery,
-)
+from gpt_index.indices.base import BaseGPTIndex
 from gpt_index.indices.service_context import ServiceContext
 from gpt_index.indices.vector_store.base import GPTVectorStoreIndex
 from gpt_index.vector_stores import (
@@ -71,21 +60,10 @@ class GPTSimpleVectorIndex(GPTVectorStoreIndex):
         nodes: Optional[Sequence[Node]] = None,
         index_struct: Optional[IndexDict] = None,
         service_context: Optional[ServiceContext] = None,
-        simple_vector_store_data_dict: Optional[dict] = None,
+        vector_store: Optional[SimpleVectorStore] = None,
         **kwargs: Any,
     ) -> None:
         """Init params."""
-        # TODO: temporary hack to "infer" vector store from
-        # index struct if index_struct exists
-        if index_struct is not None and len(index_struct.embeddings_dict) > 0:
-            simple_vector_store_data_dict = {
-                "embedding_dict": index_struct.embeddings_dict,
-            }
-
-        vector_store = SimpleVectorStore(
-            simple_vector_store_data_dict=simple_vector_store_data_dict
-        )
-
         super().__init__(
             nodes=nodes,
             index_struct=index_struct,
@@ -94,25 +72,6 @@ class GPTSimpleVectorIndex(GPTVectorStoreIndex):
             **kwargs,
         )
 
-        # TODO: Temporary hack to also store embeddings in index_struct
-        embedding_dict = vector_store._data.embedding_dict
-        self._index_struct.embeddings_dict = embedding_dict
-
-    @classmethod
-    def get_query_map(self) -> QueryMap:
-        """Get query map."""
-        return {
-            QueryMode.DEFAULT: GPTSimpleVectorIndexQuery,
-            QueryMode.EMBEDDING: GPTSimpleVectorIndexQuery,
-        }
-
-    def _preprocess_query(self, mode: QueryMode, query_kwargs: Any) -> None:
-        """Preprocess query."""
-        super()._preprocess_query(mode, query_kwargs)
-        del query_kwargs["vector_store"]
-        vector_store = cast(SimpleVectorStore, self._vector_store)
-        query_kwargs["simple_vector_store_data_dict"] = vector_store._data
-
 
 class GPTFaissIndex(GPTVectorStoreIndex):
     """GPT Faiss Index.
@@ -157,21 +116,6 @@ class GPTFaissIndex(GPTVectorStoreIndex):
             **kwargs,
         )
 
-    @classmethod
-    def get_query_map(self) -> QueryMap:
-        """Get query map."""
-        return {
-            QueryMode.DEFAULT: GPTFaissIndexQuery,
-            QueryMode.EMBEDDING: GPTFaissIndexQuery,
-        }
-
-    def _preprocess_query(self, mode: QueryMode, query_kwargs: Any) -> None:
-        """Preprocess query."""
-        super()._preprocess_query(mode, query_kwargs)
-        del query_kwargs["vector_store"]
-        vector_store = cast(FaissVectorStore, self._vector_store)
-        query_kwargs["faiss_index"] = vector_store._faiss_index
-
     @classmethod
     def load_from_disk(
         cls, save_path: str, faiss_index_save_path: Optional[str] = None, **kwargs: Any
@@ -260,6 +204,8 @@ class GPTPineconeIndex(GPTVectorStoreIndex):
         self,
         nodes: Optional[Sequence[Node]] = None,
         pinecone_index: Optional[Any] = None,
+        index_name: Optional[str] = None,
+        environment: Optional[str] = None,
         metadata_filters: Optional[Dict[str, Any]] = None,
         pinecone_kwargs: Optional[Dict] = None,
         insert_kwargs: Optional[Dict] = None,
@@ -267,25 +213,24 @@ class GPTPineconeIndex(GPTVectorStoreIndex):
         delete_kwargs: Optional[Dict] = None,
         index_struct: Optional[IndexDict] = None,
         service_context: Optional[ServiceContext] = None,
+        vector_store: Optional[PineconeVectorStore] = None,
         **kwargs: Any,
     ) -> None:
         """Init params."""
-        if pinecone_index is None:
-            raise ValueError("pinecone_index is required.")
-        if pinecone_kwargs is None:
-            pinecone_kwargs = {}
-
-        vector_store = kwargs.pop(
-            "vector_store",
-            PineconeVectorStore(
+        pinecone_kwargs = pinecone_kwargs or {}
+
+        if vector_store is None:
+            vector_store = PineconeVectorStore(
                 pinecone_index=pinecone_index,
+                index_name=index_name,
+                environment=environment,
                 metadata_filters=metadata_filters,
                 pinecone_kwargs=pinecone_kwargs,
                 insert_kwargs=insert_kwargs,
                 query_kwargs=query_kwargs,
                 delete_kwargs=delete_kwargs,
-            ),
-        )
+            )
+        assert vector_store is not None
 
         super().__init__(
             nodes=nodes,
@@ -295,26 +240,6 @@ class GPTPineconeIndex(GPTVectorStoreIndex):
             **kwargs,
         )
 
-    @classmethod
-    def get_query_map(self) -> QueryMap:
-        """Get query map."""
-        return {
-            QueryMode.DEFAULT: GPTPineconeIndexQuery,
-            QueryMode.EMBEDDING: GPTPineconeIndexQuery,
-        }
-
-    def _preprocess_query(self, mode: QueryMode, query_kwargs: Any) -> None:
-        """Preprocess query."""
-        super()._preprocess_query(mode, query_kwargs)
-        del query_kwargs["vector_store"]
-        vector_store = cast(PineconeVectorStore, self._vector_store)
-        query_kwargs["pinecone_index"] = vector_store._pinecone_index
-        query_kwargs["metadata_filters"] = vector_store._metadata_filters
-        query_kwargs["pinecone_kwargs"] = vector_store._pinecone_kwargs
-        query_kwargs["insert_kwargs"] = vector_store._insert_kwargs
-        query_kwargs["query_kwargs"] = vector_store._query_kwargs
-        query_kwargs["delete_kwargs"] = vector_store._delete_kwargs
-
 
 class GPTWeaviateIndex(GPTVectorStoreIndex):
     """GPT Weaviate Index.
@@ -360,22 +285,6 @@ class GPTWeaviateIndex(GPTVectorStoreIndex):
             **kwargs,
         )
 
-    @classmethod
-    def get_query_map(self) -> QueryMap:
-        """Get query map."""
-        return {
-            QueryMode.DEFAULT: GPTWeaviateIndexQuery,
-            QueryMode.EMBEDDING: GPTWeaviateIndexQuery,
-        }
-
-    def _preprocess_query(self, mode: QueryMode, query_kwargs: Any) -> None:
-        """Preprocess query."""
-        super()._preprocess_query(mode, query_kwargs)
-        del query_kwargs["vector_store"]
-        vector_store = cast(WeaviateVectorStore, self._vector_store)
-        query_kwargs["weaviate_client"] = vector_store._client
-        query_kwargs["class_prefix"] = vector_store._class_prefix
-
 
 class GPTQdrantIndex(GPTVectorStoreIndex):
     """GPT Qdrant Index.
@@ -423,22 +332,6 @@ class GPTQdrantIndex(GPTVectorStoreIndex):
             **kwargs,
         )
 
-    @classmethod
-    def get_query_map(self) -> QueryMap:
-        """Get query map."""
-        return {
-            QueryMode.DEFAULT: GPTQdrantIndexQuery,
-            QueryMode.EMBEDDING: GPTQdrantIndexQuery,
-        }
-
-    def _preprocess_query(self, mode: QueryMode, query_kwargs: Any) -> None:
-        """Preprocess query."""
-        super()._preprocess_query(mode, query_kwargs)
-        del query_kwargs["vector_store"]
-        vector_store = cast(QdrantVectorStore, self._vector_store)
-        query_kwargs["client"] = vector_store._client
-        query_kwargs["collection_name"] = vector_store._collection_name
-
 
 class GPTChromaIndex(GPTVectorStoreIndex):
     """GPT Chroma Index.
@@ -483,21 +376,6 @@ class GPTChromaIndex(GPTVectorStoreIndex):
             **kwargs,
         )
 
-    @classmethod
-    def get_query_map(self) -> QueryMap:
-        """Get query map."""
-        return {
-            QueryMode.DEFAULT: GPTChromaIndexQuery,
-            QueryMode.EMBEDDING: GPTChromaIndexQuery,
-        }
-
-    def _preprocess_query(self, mode: QueryMode, query_kwargs: Any) -> None:
-        """Preprocess query."""
-        super()._preprocess_query(mode, query_kwargs)
-        del query_kwargs["vector_store"]
-        vector_store = cast(ChromaVectorStore, self._vector_store)
-        query_kwargs["chroma_collection"] = vector_store._collection
-
 
 class GPTOpensearchIndex(GPTVectorStoreIndex):
     """GPT Opensearch Index.
@@ -546,21 +424,6 @@ class GPTOpensearchIndex(GPTVectorStoreIndex):
             **kwargs,
         )
 
-    @classmethod
-    def get_query_map(self) -> QueryMap:
-        """Get query map."""
-        return {
-            QueryMode.DEFAULT: GPTOpensearchIndexQuery,
-            QueryMode.EMBEDDING: GPTOpensearchIndexQuery,
-        }
-
-    def _preprocess_query(self, mode: QueryMode, query_kwargs: Any) -> None:
-        """Preprocess query."""
-        super()._preprocess_query(mode, query_kwargs)
-        del query_kwargs["vector_store"]
-        vector_store = cast(OpensearchVectorStore, self._vector_store)
-        query_kwargs["client"] = vector_store._client
-
 
 class ChatGPTRetrievalPluginIndex(GPTVectorStoreIndex):
     """ChatGPTRetrievalPlugin index.
@@ -609,21 +472,3 @@ class ChatGPTRetrievalPluginIndex(GPTVectorStoreIndex):
             vector_store=vector_store,
             **kwargs,
         )
-
-    @classmethod
-    def get_query_map(self) -> QueryMap:
-        """Get query map."""
-        return {
-            QueryMode.DEFAULT: ChatGPTRetrievalPluginQuery,
-            QueryMode.EMBEDDING: ChatGPTRetrievalPluginQuery,
-        }
-
-    def _preprocess_query(self, mode: QueryMode, query_kwargs: Any) -> None:
-        """Preprocess query."""
-        super()._preprocess_query(mode, query_kwargs)
-        del query_kwargs["vector_store"]
-        vector_store = cast(ChatGPTRetrievalPluginClient, self._vector_store)
-        query_kwargs["endpoint_url"] = vector_store._endpoint_url
-        query_kwargs["bearer_token"] = vector_store._bearer_token
-        query_kwargs["retries"] = vector_store._retries
-        query_kwargs["batch_size"] = vector_store._batch_size
diff --git a/gpt_index/vector_stores/chatgpt_plugin.py b/gpt_index/vector_stores/chatgpt_plugin.py
index 6055087e70f11f1f40fcb9616db02e6ece7fa314..8dbf0fc1638fc2547c1857e4591056d79536d284 100644
--- a/gpt_index/vector_stores/chatgpt_plugin.py
+++ b/gpt_index/vector_stores/chatgpt_plugin.py
@@ -81,6 +81,10 @@ class ChatGPTRetrievalPluginClient(VectorStore):
         self._s = requests.Session()
         self._s.mount("http://", HTTPAdapter(max_retries=self._retries))
 
+    @classmethod
+    def from_dict(cls, config_dict: Dict[str, Any]) -> "VectorStore":
+        return cls(**config_dict)
+
     @property
     def client(self) -> None:
         """Get client."""
@@ -89,7 +93,11 @@ class ChatGPTRetrievalPluginClient(VectorStore):
     @property
     def config_dict(self) -> dict:
         """Get config dict."""
-        return {"batch_size": self._batch_size}
+        return {
+            "endpoint_url": self._endpoint_url,
+            "batch_size": self._batch_size,
+            "retries": self._retries,
+        }
 
     def add(
         self,
diff --git a/gpt_index/vector_stores/chroma.py b/gpt_index/vector_stores/chroma.py
index 000baee6df23b3db71a7e9808b7ce9da221858b8..17ce84483465bc3febfa10b086403448f90e7204 100644
--- a/gpt_index/vector_stores/chroma.py
+++ b/gpt_index/vector_stores/chroma.py
@@ -1,7 +1,7 @@
 """Chroma vector store."""
 import logging
 import math
-from typing import Any, List, Optional, cast
+from typing import Any, Dict, List, Optional, cast
 
 from gpt_index.data_structs.node_v2 import DocumentRelationship, Node
 from gpt_index.utils import truncate_text
@@ -43,6 +43,12 @@ class ChromaVectorStore(VectorStore):
 
         self._collection = cast(Collection, chroma_collection)
 
+    @classmethod
+    def from_dict(cls, config_dict: Dict[str, Any]) -> "VectorStore":
+        if "chroma_collection" not in config_dict:
+            raise ValueError("Missing chroma collection!")
+        return cls(**config_dict)
+
     @property
     def config_dict(self) -> dict:
         """Return config dict."""
diff --git a/gpt_index/vector_stores/faiss.py b/gpt_index/vector_stores/faiss.py
index 123da926002e81da6f2eeff87a8ee2518f448060..b84e0e3a2b29b3b5f81a3dd86104e1c6a4deac69 100644
--- a/gpt_index/vector_stores/faiss.py
+++ b/gpt_index/vector_stores/faiss.py
@@ -4,7 +4,7 @@ An index that that is built on top of an existing vector store.
 
 """
 
-from typing import Any, List, Optional, cast
+from typing import Any, Dict, List, Optional, cast
 
 import numpy as np
 
@@ -30,10 +30,7 @@ class FaissVectorStore(VectorStore):
 
     stores_text: bool = False
 
-    def __init__(
-        self,
-        faiss_index: Any,
-    ) -> None:
+    def __init__(self, faiss_index: Any, save_path: Optional[str] = None) -> None:
         """Initialize params."""
         import_err_msg = """
             `faiss` package not found. For instructions on
@@ -46,11 +43,24 @@ class FaissVectorStore(VectorStore):
             raise ImportError(import_err_msg)
 
         self._faiss_index = cast(faiss.Index, faiss_index)
+        self._save_path = save_path or "./faiss.json"
+
+    @classmethod
+    def from_dict(cls, config_dict: Dict[str, Any]) -> "FaissVectorStore":
+        if "faiss_index" in config_dict:
+            return cls(**config_dict)
+        else:
+            save_path = config_dict.get("save_path", None)
+            if save_path is not None:
+                return cls.load(save_path=save_path)
+            else:
+                raise ValueError("Missing both faiss index and save path!")
 
     @property
     def config_dict(self) -> dict:
         """Return config dict."""
-        return {}
+        self.save(self._save_path)
+        return {"save_path": self._save_path}
 
     def add(
         self,
diff --git a/gpt_index/vector_stores/opensearch.py b/gpt_index/vector_stores/opensearch.py
index 658d9d7060e8323d80639e2cd3e6af477b512fdc..13f413f83f3509e9548b0461694f8f6e13760ec7 100644
--- a/gpt_index/vector_stores/opensearch.py
+++ b/gpt_index/vector_stores/opensearch.py
@@ -156,6 +156,12 @@ class OpensearchVectorStore(VectorStore):
             raise ImportError(import_err_msg)
         self._client = client
 
+    @classmethod
+    def from_dict(cls, config_dict: Dict[str, Any]) -> "VectorStore":
+        if "client" not in config_dict:
+            raise ValueError("Missing Opensearch client!")
+        return cls(**config_dict)
+
     @property
     def client(self) -> Any:
         """Get client."""
diff --git a/gpt_index/vector_stores/pinecone.py b/gpt_index/vector_stores/pinecone.py
index d0d56e98f89579e0b6c6bc72de8a4a9487d7d143..7b2c44907d0ba1878702e29947cc8d135606507a 100644
--- a/gpt_index/vector_stores/pinecone.py
+++ b/gpt_index/vector_stores/pinecone.py
@@ -4,6 +4,7 @@ An index that that is built on top of an existing vector store.
 
 """
 
+import os
 from typing import Any, Dict, List, Optional, cast
 
 from gpt_index.data_structs.node_v2 import Node, DocumentRelationship
@@ -12,6 +13,9 @@ from gpt_index.vector_stores.types import (
     VectorStore,
     VectorStoreQueryResult,
 )
+import logging
+
+_logger = logging.getLogger(__name__)
 
 
 def get_metadata_from_node_info(
@@ -60,11 +64,14 @@ class PineconeVectorStore(VectorStore):
     def __init__(
         self,
         pinecone_index: Optional[Any] = None,
+        index_name: Optional[str] = None,
+        environment: Optional[str] = None,
         metadata_filters: Optional[Dict[str, Any]] = None,
         pinecone_kwargs: Optional[Dict] = None,
         insert_kwargs: Optional[Dict] = None,
         query_kwargs: Optional[Dict] = None,
         delete_kwargs: Optional[Dict] = None,
+        **kwargs: Any,
     ) -> None:
         """Initialize params."""
         import_err_msg = (
@@ -74,7 +81,32 @@ class PineconeVectorStore(VectorStore):
             import pinecone  # noqa: F401
         except ImportError:
             raise ImportError(import_err_msg)
-        self._pinecone_index = cast(pinecone.Index, pinecone_index)
+
+        self._index_name = index_name
+        self._environment = environment
+        if pinecone_index is not None:
+            self._pinecone_index = cast(pinecone.Index, pinecone_index)
+            _logger.warn(
+                "If directly passing in client, cannot automatically reconstruct "
+                "connetion after save_to_disk/load_from_disk."
+                "For automatic reload, store PINECONE_API_KEY in env variable and "
+                "pass in index_name and environment instead."
+            )
+        else:
+            if "PINECONE_API_KEY" not in os.environ:
+                raise ValueError(
+                    "Must specify PINECONE_API_KEY via env variable "
+                    "if not directly passing in client."
+                )
+            if index_name is None or environment is None:
+                raise ValueError(
+                    "Must specify index_name and environment "
+                    "if not directly passing in client."
+                )
+
+            pinecone.init(environment=environment)
+            self._pinecone_index = pinecone.Index(index_name)
+
         self._metadata_filters = metadata_filters or {}
         self._pinecone_kwargs = pinecone_kwargs or {}
         if pinecone_kwargs and (insert_kwargs or query_kwargs or delete_kwargs):
@@ -91,10 +123,16 @@ class PineconeVectorStore(VectorStore):
             self._query_kwargs = query_kwargs or {}
             self._delete_kwargs = delete_kwargs or {}
 
+    @classmethod
+    def from_dict(cls, config_dict: Dict[str, Any]) -> "VectorStore":
+        return cls(**config_dict)
+
     @property
     def config_dict(self) -> dict:
         """Return config dict."""
         return {
+            "index_name": self._index_name,
+            "environment": self._environment,
             "metadata_filters": self._metadata_filters,
             "pinecone_kwargs": self._pinecone_kwargs,
             "insert_kwargs": self._insert_kwargs,
diff --git a/gpt_index/vector_stores/qdrant.py b/gpt_index/vector_stores/qdrant.py
index 0b2fd8efdc63a93e41c8eaddb3f60b279c6e702d..0ae5556bef09f4ab59f76f1546d4907bbd8ac544 100644
--- a/gpt_index/vector_stores/qdrant.py
+++ b/gpt_index/vector_stores/qdrant.py
@@ -4,7 +4,7 @@ An index that is built on top of an existing Qdrant collection.
 
 """
 import logging
-from typing import Any, List, Optional, cast
+from typing import Any, Dict, List, Optional, cast
 
 from gpt_index.data_structs.node_v2 import DocumentRelationship, Node
 from gpt_index.utils import get_new_id
@@ -46,12 +46,18 @@ class QdrantVectorStore(VectorStore):
             raise ImportError(import_err_msg)
 
         if client is None:
-            raise ValueError("client cannot be None.")
+            raise ValueError("Missing Qdrant client!")
 
         self._client = cast(qdrant_client.QdrantClient, client)
         self._collection_name = collection_name
         self._collection_initialized = self._collection_exists(collection_name)
 
+    @classmethod
+    def from_dict(cls, config_dict: Dict[str, Any]) -> "VectorStore":
+        if "client" not in config_dict:
+            raise ValueError("Missing Qdrant client!")
+        return cls(**config_dict)
+
     @property
     def config_dict(self) -> dict:
         """Return config dict."""
diff --git a/gpt_index/vector_stores/registry.py b/gpt_index/vector_stores/registry.py
new file mode 100644
index 0000000000000000000000000000000000000000..8309628285666a1a35e59c181137c0159396a2e3
--- /dev/null
+++ b/gpt_index/vector_stores/registry.py
@@ -0,0 +1,67 @@
+from enum import Enum
+from typing import Any, Dict, Optional, Type
+from gpt_index.constants import DATA_KEY, TYPE_KEY
+from gpt_index.vector_stores.chatgpt_plugin import ChatGPTRetrievalPluginClient
+from gpt_index.vector_stores.chroma import ChromaVectorStore
+from gpt_index.vector_stores.faiss import FaissVectorStore
+from gpt_index.vector_stores.opensearch import OpensearchVectorStore
+from gpt_index.vector_stores.pinecone import PineconeVectorStore
+from gpt_index.vector_stores.qdrant import QdrantVectorStore
+from gpt_index.vector_stores.simple import SimpleVectorStore
+
+from gpt_index.vector_stores.types import VectorStore
+from gpt_index.vector_stores.weaviate import WeaviateVectorStore
+
+
+class VectorStoreType(str, Enum):
+    SIMPLE = "simple"
+    WEAVIATE = "weaviate"
+    QDRANT = "qdrant"
+    PINECONE = "pinecone"
+    OPENSEARCH = "opensearch"
+    FAISS = "faiss"
+    CHROMA = "chroma"
+    CHATGPT_PLUGIN = "chatgpt_plugin"
+
+
+VECTOR_STORE_TYPE_TO_VECTOR_STORE_CLASS: Dict[VectorStoreType, Type[VectorStore]] = {
+    VectorStoreType.SIMPLE: SimpleVectorStore,
+    VectorStoreType.WEAVIATE: WeaviateVectorStore,
+    VectorStoreType.QDRANT: QdrantVectorStore,
+    VectorStoreType.PINECONE: PineconeVectorStore,
+    VectorStoreType.OPENSEARCH: OpensearchVectorStore,
+    VectorStoreType.FAISS: FaissVectorStore,
+    VectorStoreType.CHROMA: ChromaVectorStore,
+    VectorStoreType.CHATGPT_PLUGIN: ChatGPTRetrievalPluginClient,
+}
+
+VECTOR_STORE_CLASS_TO_VECTOR_STORE_TYPE: Dict[Type[VectorStore], VectorStoreType] = {
+    cls_: type_ for type_, cls_ in VECTOR_STORE_TYPE_TO_VECTOR_STORE_CLASS.items()
+}
+
+
+def load_vector_store_from_dict(
+    vector_store_dict: Dict[str, Any],
+    type_to_cls: Optional[Dict[VectorStoreType, Type[VectorStore]]] = None,
+    **kwargs: Any,
+) -> VectorStore:
+    type_to_cls = type_to_cls or VECTOR_STORE_TYPE_TO_VECTOR_STORE_CLASS
+    type = vector_store_dict[TYPE_KEY]
+    config_dict: Dict[str, Any] = vector_store_dict[DATA_KEY]
+
+    # Inject kwargs into data dict.
+    # This allows us to explicitly pass in unserializable objects
+    # like the vector store client.
+    config_dict.update(kwargs)
+
+    cls = type_to_cls[type]
+    return cls.from_dict(config_dict)
+
+
+def save_vector_store_to_dict(
+    vector_store: VectorStore,
+    cls_to_type: Optional[Dict[Type[VectorStore], VectorStoreType]] = None,
+) -> Dict[str, Any]:
+    cls_to_type = cls_to_type or VECTOR_STORE_CLASS_TO_VECTOR_STORE_TYPE
+    type_ = cls_to_type[type(vector_store)]
+    return {TYPE_KEY: type_, DATA_KEY: vector_store.config_dict}
diff --git a/gpt_index/vector_stores/simple.py b/gpt_index/vector_stores/simple.py
index 318c8289f76ed111a36a6ecd6cc4f3c40e1eb4c9..865cb77dd67dc071ee2225ced3fbdd14fe6039c5 100644
--- a/gpt_index/vector_stores/simple.py
+++ b/gpt_index/vector_stores/simple.py
@@ -51,6 +51,10 @@ class SimpleVectorStore(VectorStore):
         else:
             self._data = SimpleVectorStoreData.from_dict(simple_vector_store_data_dict)
 
+    @classmethod
+    def from_dict(cls, config_dict: Dict[str, Any]) -> "SimpleVectorStore":
+        return cls(**config_dict)
+
     @property
     def client(self) -> None:
         """Get client."""
diff --git a/gpt_index/vector_stores/types.py b/gpt_index/vector_stores/types.py
index 9b7bad5a258573aed0604259da112632e8b4899b..3485e10cbacca08903a81c2a04d9a7dbf72e5049 100644
--- a/gpt_index/vector_stores/types.py
+++ b/gpt_index/vector_stores/types.py
@@ -2,7 +2,7 @@
 
 
 from dataclasses import dataclass
-from typing import Any, List, Optional, Protocol
+from typing import Any, Dict, List, Optional, Protocol, runtime_checkable
 
 from gpt_index.data_structs.node_v2 import Node
 
@@ -34,12 +34,17 @@ class VectorStoreQueryResult:
     ids: Optional[List[str]] = None
 
 
+@runtime_checkable
 class VectorStore(Protocol):
     """Abstract vector store protocol."""
 
     stores_text: bool
     is_embedding_query: bool = True
 
+    @classmethod
+    def from_dict(cls, config_dict: Dict[str, Any]) -> "VectorStore":
+        ...
+
     @property
     def client(self) -> Any:
         """Get client."""
diff --git a/gpt_index/vector_stores/weaviate.py b/gpt_index/vector_stores/weaviate.py
index bdd3968268f3ba925b4c942d765166ad955c9854..65e68c559fed6d9dfcbebd012d9218468965241e 100644
--- a/gpt_index/vector_stores/weaviate.py
+++ b/gpt_index/vector_stores/weaviate.py
@@ -4,7 +4,7 @@ An index that that is built on top of an existing vector store.
 
 """
 
-from typing import Any, List, Optional, cast
+from typing import Any, Dict, List, Optional, cast
 
 from gpt_index.readers.weaviate.data_structs import WeaviateNode
 from gpt_index.readers.weaviate.utils import get_default_class_prefix
@@ -49,6 +49,9 @@ class WeaviateVectorStore(VectorStore):
         except ImportError:
             raise ImportError(import_err_msg)
 
+        if weaviate_client is None:
+            raise ValueError("Missing Weaviate client!")
+
         self._client = cast(Client, weaviate_client)
         # validate class prefix starts with a capital letter
         if class_prefix is not None and not class_prefix[0].isupper():
@@ -59,6 +62,12 @@ class WeaviateVectorStore(VectorStore):
         # try to create schema
         WeaviateNode.create_schema(self._client, self._class_prefix)
 
+    @classmethod
+    def from_dict(cls, config_dict: Dict[str, Any]) -> "VectorStore":
+        if "weaviate_client" not in config_dict:
+            raise ValueError("Missing Weaviate client!")
+        return cls(**config_dict)
+
     @property
     def client(self) -> Any:
         """Get client."""
diff --git a/tests/indices/composability/__init__.py b/tests/indices/composability/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tests/indices/composability/test_utils.py b/tests/indices/composability/test_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f15715180c698f37f9d217cf1eb2ef7666d4d09
--- /dev/null
+++ b/tests/indices/composability/test_utils.py
@@ -0,0 +1,113 @@
+from typing import Any, Dict, List, Optional
+
+from gpt_index.constants import DATA_KEY, TYPE_KEY
+from gpt_index.indices.composability.utils import (
+    load_query_context_from_dict,
+    save_query_context_to_dict,
+)
+from gpt_index.vector_stores.types import (
+    NodeEmbeddingResult,
+    VectorStore,
+    VectorStoreQueryResult,
+)
+
+
+class MockVectorStore(VectorStore):
+    stores_text: bool = True
+
+    def __init__(self, config_dict: Optional[Dict[str, Any]] = None) -> None:
+        self._config_dict = config_dict or {
+            "attr1": 0,
+            "attr2": "attr2_val",
+        }
+
+    @classmethod
+    def from_dict(cls, config_dict: Dict[str, Any]) -> "MockVectorStore":
+        return cls(config_dict)
+
+    @property
+    def config_dict(self) -> Dict[str, Any]:
+        return self._config_dict
+
+    @property
+    def client(self) -> Any:
+        """Get client."""
+        return None
+
+    def add(
+        self,
+        embedding_results: List[NodeEmbeddingResult],
+    ) -> List[str]:
+        """Add embedding results to vector store."""
+        raise NotImplementedError()
+
+    def delete(self, doc_id: str, **delete_kwargs: Any) -> None:
+        """Delete doc."""
+        raise NotImplementedError()
+
+    def query(
+        self,
+        query_embedding: List[float],
+        similarity_top_k: int,
+        doc_ids: Optional[List[str]] = None,
+        query_str: Optional[str] = None,
+    ) -> VectorStoreQueryResult:
+        """Query vector store."""
+        raise NotImplementedError()
+
+
+def test_save_query_context_to_dict() -> None:
+    """Test save query context to dict."""
+    vector_store = MockVectorStore()
+    query_context = {"test_index_id": {"vector_store": vector_store}}
+
+    expected_dict = {
+        "test_index_id": {
+            "vector_store": {
+                TYPE_KEY: "mock_type",
+                DATA_KEY: vector_store.config_dict,
+            }
+        }
+    }
+
+    save_dict = save_query_context_to_dict(
+        query_context,
+        vector_store_cls_to_type={MockVectorStore: "mock_type"},  # type:ignore
+    )
+
+    assert save_dict == expected_dict
+
+
+def test_load_query_context_from_dict() -> None:
+    """Test load query context from dict."""
+    vector_store = MockVectorStore()
+
+    save_dict = {
+        "test_index_id": {
+            "vector_store": {
+                TYPE_KEY: "mock_type",
+                DATA_KEY: vector_store.config_dict,
+            }
+        }
+    }
+
+    # Test without kwargs
+    query_context = load_query_context_from_dict(
+        save_dict,
+        vector_store_type_to_cls={"mock_type": MockVectorStore},  # type:ignore
+    )
+    loaded_vector_store = query_context["test_index_id"]["vector_store"]
+    assert isinstance(loaded_vector_store, MockVectorStore)
+
+    # Test with kwargs
+    query_context_kwargs = {
+        "test_index_id": {"vector_store": {"extra_key": "extra_value"}}
+    }
+    query_context = load_query_context_from_dict(
+        save_dict,
+        vector_store_type_to_cls={"mock_type": MockVectorStore},  # type:ignore
+        query_context_kwargs=query_context_kwargs,
+    )
+    loaded_vector_store = query_context["test_index_id"]["vector_store"]
+    assert isinstance(loaded_vector_store, MockVectorStore)
+    assert loaded_vector_store.config_dict["extra_key"] == "extra_value"