diff --git a/docs/docs/examples/agent/agent_builder.ipynb b/docs/docs/examples/agent/agent_builder.ipynb
index 57f8f3f55daa67a8dbc6617dc8c600bb9a800041..ae451806bcfa7d361e5a050f8e2f93bf163540e9 100644
--- a/docs/docs/examples/agent/agent_builder.ipynb
+++ b/docs/docs/examples/agent/agent_builder.ipynb
@@ -49,7 +49,8 @@
     "from llama_index.llms.openai import OpenAI\n",
     "from llama_index.core import Settings\n",
     "\n",
-    "Settings.llm = OpenAI(model=\"gpt-4\")\n",
+    "llm = OpenAI(model=\"gpt-4\")\n",
+    "Settings.llm = llm\n",
     "Settings.embed_model = OpenAIEmbedding(model=\"text-embedding-3-small\")"
    ]
   },
@@ -193,13 +194,11 @@
    "source": [
     "# define an \"object\" index and retriever over these tools\n",
     "from llama_index.core import VectorStoreIndex\n",
-    "from llama_index.core.objects import ObjectIndex, SimpleToolNodeMapping\n",
+    "from llama_index.core.objects import ObjectIndex\n",
     "\n",
-    "tool_mapping = SimpleToolNodeMapping.from_objects(list(tool_dict.values()))\n",
     "tool_index = ObjectIndex.from_objects(\n",
     "    list(tool_dict.values()),\n",
-    "    tool_mapping,\n",
-    "    VectorStoreIndex,\n",
+    "    index_cls=VectorStoreIndex,\n",
     ")\n",
     "tool_retriever = tool_index.as_retriever(similarity_top_k=1)"
    ]
@@ -337,20 +336,19 @@
       "Calling function: create_system_prompt with args: {\n",
       "  \"task\": \"tell me about Toronto\"\n",
       "}\n",
-      "Got output: System Prompt: \n",
-      "\n",
-      "\"Sure! I can provide you with information about Toronto. Toronto is the capital city of the province of Ontario, Canada. It is the largest city in Canada and one of the most multicultural cities in the world. Known for its diverse population, vibrant arts and culture scene, and iconic landmarks, Toronto offers a unique blend of modernity and history.\n",
-      "\n",
-      "Toronto is home to the CN Tower, which is one of the tallest freestanding structures in the world and offers breathtaking views of the city. The city also boasts beautiful waterfront areas, such as the Harbourfront Centre and the Toronto Islands, where visitors can enjoy recreational activities and scenic views.\n",
-      "\n",
-      "In terms of arts and culture, Toronto is renowned for its theaters, including the Royal Alexandra Theatre and the Princess of Wales Theatre, which host a variety of Broadway shows and musicals. The city is also home to numerous museums and galleries, such as the Art Gallery of Ontario and the Royal Ontario Museum, where visitors can explore a wide range of art and historical artifacts.\n",
-      "\n",
-      "Toronto is a sports-loving city, with professional sports teams in hockey, basketball, baseball, and soccer. The Toronto Maple Leafs, Toronto Raptors, Toronto Blue Jays, and Toronto FC are some of the popular teams that attract passionate fans.\n",
+      "Got output: \"Generate a brief summary about Toronto, including its history, culture, landmarks, and notable features.\"\n",
+      "========================\n",
       "\n",
-      "The city's diverse culinary scene offers a wide range of international cuisines, reflecting the multiculturalism of its residents. From fine dining restaurants to street food vendors, Toronto has something to satisfy every palate.\n",
+      "=== Calling Function ===\n",
+      "Calling function: get_tools with args: {\n",
+      "  \"task\": \"tell me about Toronto\"\n",
+      "}\n",
+      "Got output: ['Toronto']\n",
+      "========================\n",
       "\n",
-      "Whether you're interested in exploring its vibrant neighborhoods, shopping in trendy boutiques, or attending exciting festivals and events, Toronto has something for everyone. Let me know if there's anything specific you'd like to know about Toronto!\"\n",
-      "  \"system_prompt\": \"Sure! I can provide you with information about Toronto. Toronto is the capital city of the province of Ontario, Canada. It is the largest city in Canada and one of the most multicultural cities in the world. Known for its diverse population, vibrant arts and culture scene, and iconic landmarks, Toronto offers a unique blend of modernity and history.\\n\\nToronto is home to the CN Tower, which is one of the tallest freestanding structures in the world and offers breathtaking views of the city. The city also boasts beautiful waterfront areas, such as the Harbourfront Centre and the Toronto Islands, where visitors can enjoy recreational activities and scenic views.\\n\\nIn terms of arts and culture, Toronto is renowned for its theaters, including the Royal Alexandra Theatre and the Princess of Wales Theatre, which host a variety of Broadway shows and musicals. The city is also home to numerous museums and galleries, such as the Art Gallery of Ontario and the Royal Ontario Museum, where visitors can explore a wide range of art and historical artifacts.\\n\\nToronto is a sports-loving city, with professional sports teams in hockey, basketball, baseball, and soccer. The Toronto Maple Leafs, Toronto Raptors, Toronto Blue Jays, and Toronto FC are some of the popular teams that attract passionate fans.\\n\\nThe city's diverse culinary scene offers a wide range of international cuisines, reflecting the multiculturalism of its residents. From fine dining restaurants to street food vendors, Toronto has something to satisfy every palate.\\n\\nWhether you're interested in exploring its vibrant neighborhoods, shopping in trendy boutiques, or attending exciting festivals and events, Toronto has something for everyone. Let me know if there's anything specific you'd like to know about Toronto!\",\n",
+      "=== Calling Function ===\n",
+      "Calling function: create_agent with args: {\n",
+      "  \"system_prompt\": \"Generate a brief summary about Toronto, including its history, culture, landmarks, and notable features.\",\n",
       "  \"tool_names\": [\"Toronto\"]\n",
       "}\n",
       "Got output: Agent created successfully.\n",
@@ -361,7 +359,7 @@
     {
      "data": {
       "text/plain": [
-       "Response(response='The agent has been successfully created. It can provide detailed information about Toronto, including its history, culture, landmarks, sports teams, and culinary scene. If you have any specific questions or need more information about Toronto, feel free to ask the agent.', source_nodes=[], metadata=None)"
+       "Response(response='The agent has been successfully created. It can now provide information about Toronto, including its history, culture, landmarks, and notable features.', source_nodes=[], metadata=None)"
       ]
      },
      "execution_count": null,
@@ -394,36 +392,29 @@
      "output_type": "stream",
      "text": [
       "Added user message to memory: Tell me about the parks in Toronto\n",
-      "=== Calling Function ===\n",
-      "Calling function: Toronto with args: {\n",
-      "  \"input\": \"parks in Toronto\"\n",
-      "}\n",
-      "Got output: Toronto has a diverse array of public spaces and parks. Some of the notable ones include Nathan Phillips Square, Yonge–Dundas Square, Harbourfront Square, and Mel Lastman Square. There are also large downtown parks like Allan Gardens, Christie Pits, Grange Park, Little Norway Park, Moss Park, Queen's Park, Riverdale Park and Trinity Bellwoods Park. Other parks include Tommy Thompson Park and the Toronto Islands. The outer areas of the city have parks like High Park, Humber Bay Park, Centennial Park, Downsview Park, Guild Park and Gardens, Sunnybrook Park and Morningside Park. Toronto also operates several public golf courses. Morningside Park is the largest park managed by the city. Parts of Rouge National Urban Park, the largest urban park in North America, is also in Toronto.\n",
-      "========================\n",
-      "\n",
-      "Toronto is known for its diverse array of public spaces and parks. Some of the notable ones include:\n",
+      "Toronto is known for its beautiful and diverse parks. Here are a few of the most popular ones:\n",
       "\n",
-      "1. **Nathan Phillips Square**: This vibrant city square features the Toronto City Hall, an ice rink, and a peace garden.\n",
+      "1. **High Park**: This is Toronto's largest public park featuring many hiking trails, sports facilities, a beautiful lakefront, convenient parking, easy public transit access, a dog park, a zoo, and playgrounds for children. It's also known for its spring cherry blossoms.\n",
       "\n",
-      "2. **Yonge–Dundas Square**: Often considered Toronto's Times Square, this public square hosts many public events, performances, and art displays.\n",
+      "2. **Toronto Islands**: A group of small islands located just off the shore of the city's downtown district, offering stunning views of the city skyline. The islands provide a great escape from the city with their car-free environment, picnic spots, swimming beaches, and Centreville Amusement Park.\n",
       "\n",
-      "3. **Harbourfront Square**: Located by the Lake Ontario, it's a key cultural district with theatres, galleries, and dining.\n",
+      "3. **Trinity Bellwoods Park**: A popular park in the downtown area, it's a great place for picnics, sports, dog-walking, or just relaxing. It also has a community recreation centre with a pool and gym.\n",
       "\n",
-      "4. **Mel Lastman Square**: Located in North York, it's a public square that hosts a variety of activities throughout the year including concerts, markets, and festivals.\n",
+      "4. **Rouge National Urban Park**: Located in the city's east end, this is Canada's first national urban park. It offers hiking, swimming, camping, and a chance to learn about the area's cultural and agricultural heritage.\n",
       "\n",
-      "5. **Allan Gardens**: A park and indoor botanical garden located downtown, known for its historic, cast-iron and glass domed \"Palm House\" built in 1910.\n",
+      "5. **Riverdale Farm**: This 7.5-acre farm in the heart of Toronto provides an opportunity to experience farm life and interact with a variety of farm animals.\n",
       "\n",
-      "6. **High Park**: It's the largest park in downtown Toronto featuring many hiking trails, sports facilities, a beautiful lakefront, a dog park, a zoo, and playgrounds.\n",
+      "6. **Evergreen Brick Works**: A former industrial site that has been transformed into an eco-friendly community center with a park, farmers market, and cultural events.\n",
       "\n",
-      "7. **Toronto Islands**: A group of small islands located offshore from the city centre, providing shelter for the Toronto Harbour and offering stunning views of the city skyline.\n",
+      "7. **Scarborough Bluffs Park**: Offers a unique natural environment with stunning views of Lake Ontario from atop the bluffs.\n",
       "\n",
-      "8. **Tommy Thompson Park**: An urban wilderness park located on the Leslie Street Spit, known for bird watching.\n",
+      "8. **Edwards Gardens**: A beautiful botanical garden located in North York, perfect for a peaceful walk surrounded by nature.\n",
       "\n",
-      "9. **Rouge National Urban Park**: The largest urban park in North America, it's located in the city's eastern portion.\n",
+      "9. **Sunnybrook Park**: A large public park that offers many recreational activities including horseback riding, sports fields, and picnic areas.\n",
       "\n",
-      "10. **Morningside Park**: The largest park managed by the city, it's located in Scarborough and features picnic areas, walking trails, and a creek.\n",
+      "10. **Cherry Beach**: Located on the waterfront, this park offers a sandy beach, picnic areas, and a dog off-leash area. It's a great spot for swimming, sunbathing, and barbecuing.\n",
       "\n",
-      "These parks offer a variety of recreational activities and provide a green respite from the bustling city.\n"
+      "These parks offer a variety of experiences, from urban amenities to natural beauty, making Toronto a great city for outdoor enthusiasts.\n"
      ]
     }
    ],
diff --git a/docs/docs/examples/agent/multi_document_agents-v1.ipynb b/docs/docs/examples/agent/multi_document_agents-v1.ipynb
index d29f2787109c09898308072f3e73196adc2aa815..51ceef40ca447a2c3bf28073d9bccfcc28a498cb 100644
--- a/docs/docs/examples/agent/multi_document_agents-v1.ipynb
+++ b/docs/docs/examples/agent/multi_document_agents-v1.ipynb
@@ -45,21 +45,13 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "%pip install llama-index-core\n",
     "%pip install llama-index-agent-openai\n",
     "%pip install llama-index-readers-file\n",
     "%pip install llama-index-postprocessor-cohere-rerank\n",
     "%pip install llama-index-llms-openai\n",
-    "%pip install llama-index-embeddings-openai"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "4eff88ab",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "!pip install llama-index llama-hub"
+    "%pip install llama-index-embeddings-openai\n",
+    "%pip install unstructured[html]"
    ]
   },
   {
@@ -139,7 +131,7 @@
     {
      "data": {
       "text/plain": [
-       "638"
+       "1219"
       ]
      },
      "execution_count": null,
@@ -214,8 +206,11 @@
     "from llama_index.embeddings.openai import OpenAIEmbedding\n",
     "from llama_index.core import Settings\n",
     "\n",
-    "Settings.llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
-    "Settings.embed_model = OpenAIEmbedding(model=\"text-embedding-3-small\")"
+    "llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
+    "Settings.llm = llm\n",
+    "Settings.embed_model = OpenAIEmbedding(\n",
+    "    model=\"text-embedding-3-small\", embed_batch_size=256\n",
+    ")"
    ]
   },
   {
@@ -364,129 +359,7 @@
    "execution_count": null,
    "id": "44748b46-dd6b-4d4f-bc70-7022ae96413f",
    "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "application/vnd.jupyter.widget-view+json": {
-       "model_id": "de5dca576251400083b450c6e1559c72",
-       "version_major": 2,
-       "version_minor": 0
-      },
-      "text/plain": [
-       "  0%|          | 0/101 [00:00<?, ?it/s]"
-      ]
-     },
-     "metadata": {},
-     "output_type": "display_data"
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "latest_search\n",
-      "latest_genindex\n",
-      "latest_index\n",
-      "community_frequently_asked_questions\n",
-      "community_integrations\n",
-      "community_full_stack_projects\n",
-      "integrations_tonicvalidate\n",
-      "integrations_using_with_langchain\n",
-      "integrations_trulens\n",
-      "integrations_deepeval\n",
-      "integrations_managed_indices\n",
-      "integrations_chatgpt_plugins\n",
-      "integrations_graphsignal\n",
-      "integrations_lmformatenforcer\n",
-      "integrations_graph_stores\n",
-      "integrations_vector_stores\n",
-      "integrations_fleet_libraries_context\n",
-      "llama_packs_root\n",
-      "faq_vector_database\n",
-      "faq_query_engines\n",
-      "faq_embeddings\n",
-      "faq_chat_engines\n",
-      "faq_llms\n",
-      "getting_started_reading\n",
-      "getting_started_discover_llamaindex\n",
-      "getting_started_starter_example\n",
-      "getting_started_customization\n",
-      "getting_started_installation\n",
-      "getting_started_concepts\n",
-      "api_reference_storage\n",
-      "api_reference_multi_modal\n",
-      "api_reference_response\n",
-      "api_reference_callbacks\n",
-      "api_reference_node\n",
-      "api_reference_readers\n",
-      "api_reference_agents\n",
-      "api_reference_query\n",
-      "api_reference_example_notebooks\n",
-      "api_reference_node_postprocessor\n",
-      "api_reference_composability\n",
-      "api_reference_llm_predictor\n",
-      "api_reference_service_context\n",
-      "api_reference_prompts\n",
-      "api_reference_struct_store\n",
-      "api_reference_indices\n",
-      "api_reference_evaluation\n",
-      "api_reference_index\n",
-      "api_reference_memory\n",
-      "api_reference_llms\n",
-      "langchain_integrations_base\n",
-      "storage_kv_store\n",
-      "storage_vector_store\n",
-      "storage_index_store\n",
-      "storage_docstore\n",
-      "storage_indices_save_load\n",
-      "multi_modal_openai\n",
-      "query_query_transform\n",
-      "query_query_bundle\n",
-      "query_retrievers\n",
-      "query_response_synthesizer\n",
-      "query_query_engines\n",
-      "retrievers_vector_store\n",
-      "retrievers_empty\n",
-      "retrievers_transform\n",
-      "retrievers_tree\n",
-      "retrievers_kg\n",
-      "retrievers_table\n",
-      "query_engines_graph_query_engine\n",
-      "query_engines_citation_query_engine\n",
-      "query_engines_pandas_query_engine\n",
-      "query_engines_retriever_query_engine\n",
-      "query_engines_knowledge_graph_query_engine\n",
-      "query_engines_retriever_router_query_engine\n",
-      "query_engines_sub_question_query_engine\n",
-      "query_engines_flare_query_engine\n",
-      "query_engines_sql_query_engine\n",
-      "query_engines_transform_query_engine\n",
-      "query_engines_sql_join_query_engine\n",
-      "query_engines_router_query_engine\n",
-      "chat_engines_condense_question_chat_engine\n",
-      "chat_engines_simple_chat_engine\n",
-      "chat_engines_condense_plus_context_chat_engine\n",
-      "indices_vector_store\n",
-      "indices_tree\n",
-      "indices_kg\n",
-      "indices_list\n",
-      "indices_table\n",
-      "indices_struct_store\n",
-      "service_context_embeddings\n",
-      "service_context_prompt_helper\n",
-      "llms_xinference\n",
-      "llms_langchain\n",
-      "llms_replicate\n",
-      "llms_llama_cpp\n",
-      "llms_azure_openai\n",
-      "llms_openai\n",
-      "llms_predibase\n",
-      "llms_gradient_model_adapter\n",
-      "llms_anthropic\n",
-      "llms_openllm\n",
-      "llms_huggingface\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "agents_dict, extra_info_dict = await build_agents(docs)"
    ]
@@ -539,7 +412,7 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "ToolMetadata(description='This document provides examples and instructions for searching within the Llama Index documentation.', name='tool_latest_search', fn_schema=<class 'llama_index.tools.types.DefaultToolFnSchema'>)\n"
+      "ToolMetadata(description='This document provides examples and documentation for an agent on the llama index platform.', name='tool_latest_index', fn_schema=<class 'llama_index.core.tools.types.DefaultToolFnSchema'>)\n"
      ]
     }
    ],
@@ -558,50 +431,48 @@
     "from llama_index.core import VectorStoreIndex\n",
     "from llama_index.core.objects import (\n",
     "    ObjectIndex,\n",
-    "    SimpleToolNodeMapping,\n",
     "    ObjectRetriever,\n",
     ")\n",
-    "from llama_index.core.retrievers import BaseRetriever\n",
     "from llama_index.postprocessor.cohere_rerank import CohereRerank\n",
     "from llama_index.core.query_engine import SubQuestionQueryEngine\n",
+    "from llama_index.core.schema import QueryBundle\n",
     "from llama_index.llms.openai import OpenAI\n",
     "\n",
+    "\n",
     "llm = OpenAI(model_name=\"gpt-4-0613\")\n",
     "\n",
-    "tool_mapping = SimpleToolNodeMapping.from_objects(all_tools)\n",
     "obj_index = ObjectIndex.from_objects(\n",
     "    all_tools,\n",
-    "    tool_mapping,\n",
-    "    VectorStoreIndex,\n",
+    "    index_cls=VectorStoreIndex,\n",
+    ")\n",
+    "vector_node_retriever = obj_index.as_node_retriever(\n",
+    "    similarity_top_k=10,\n",
     ")\n",
-    "vector_node_retriever = obj_index.as_node_retriever(similarity_top_k=10)\n",
-    "\n",
-    "\n",
-    "# define a custom retriever with reranking\n",
-    "class CustomRetriever(BaseRetriever):\n",
-    "    def __init__(self, vector_retriever, postprocessor=None):\n",
-    "        self._vector_retriever = vector_retriever\n",
-    "        self._postprocessor = postprocessor or CohereRerank(top_n=5)\n",
-    "        super().__init__()\n",
-    "\n",
-    "    def _retrieve(self, query_bundle):\n",
-    "        retrieved_nodes = self._vector_retriever.retrieve(query_bundle)\n",
-    "        filtered_nodes = self._postprocessor.postprocess_nodes(\n",
-    "            retrieved_nodes, query_bundle=query_bundle\n",
-    "        )\n",
-    "\n",
-    "        return filtered_nodes\n",
     "\n",
     "\n",
     "# define a custom object retriever that adds in a query planning tool\n",
     "class CustomObjectRetriever(ObjectRetriever):\n",
-    "    def __init__(self, retriever, object_node_mapping, all_tools, llm=None):\n",
+    "    def __init__(\n",
+    "        self,\n",
+    "        retriever,\n",
+    "        object_node_mapping,\n",
+    "        node_postprocessors=None,\n",
+    "        llm=None,\n",
+    "    ):\n",
     "        self._retriever = retriever\n",
     "        self._object_node_mapping = object_node_mapping\n",
     "        self._llm = llm or OpenAI(\"gpt-4-0613\")\n",
+    "        self._node_postprocessors = node_postprocessors or []\n",
     "\n",
     "    def retrieve(self, query_bundle):\n",
+    "        if isinstance(query_bundle, str):\n",
+    "            query_bundle = QueryBundle(query_str=query_bundle)\n",
+    "\n",
     "        nodes = self._retriever.retrieve(query_bundle)\n",
+    "        for processor in self._node_postprocessors:\n",
+    "            nodes = processor.postprocess_nodes(\n",
+    "                nodes, query_bundle=query_bundle\n",
+    "            )\n",
     "        tools = [self._object_node_mapping.from_node(n.node) for n in nodes]\n",
     "\n",
     "        sub_question_engine = SubQuestionQueryEngine.from_defaults(\n",
@@ -628,11 +499,12 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "custom_node_retriever = CustomRetriever(vector_node_retriever)\n",
-    "\n",
     "# wrap it with ObjectRetriever to return objects\n",
     "custom_obj_retriever = CustomObjectRetriever(\n",
-    "    custom_node_retriever, tool_mapping, all_tools, llm=llm\n",
+    "    vector_node_retriever,\n",
+    "    obj_index.object_node_mapping,\n",
+    "    node_postprocessors=[CohereRerank(top_n=5)],\n",
+    "    llm=llm,\n",
     ")"
    ]
   },
@@ -652,6 +524,8 @@
    ],
    "source": [
     "tmps = custom_obj_retriever.retrieve(\"hello\")\n",
+    "\n",
+    "# should be 5 + 1 -- 5 from reranker, 1 from subquestion\n",
     "print(len(tmps))"
    ]
   },
@@ -662,11 +536,11 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.agent.openai_legacy import FnRetrieverOpenAIAgent\n",
+    "from llama_index.agent.openai import OpenAIAgent\n",
     "from llama_index.core.agent import ReActAgent\n",
     "\n",
-    "top_agent = FnRetrieverOpenAIAgent.from_retriever(\n",
-    "    custom_obj_retriever,\n",
+    "top_agent = OpenAIAgent.from_tools(\n",
+    "    tool_retriever=custom_obj_retriever,\n",
     "    system_prompt=\"\"\" \\\n",
     "You are an agent designed to answer queries about the documentation.\n",
     "Please always use the tools provided to answer a question. Do not rely on prior knowledge.\\\n",
@@ -743,33 +617,36 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
+      "Added user message to memory: What types of agents are available in LlamaIndex?\n",
       "=== Calling Function ===\n",
-      "Calling function: tool_api_reference_evaluation with args: {\n",
-      "  \"input\": \"types of evaluation\"\n",
-      "}\n",
+      "Calling function: tool_agents_index with args: {\"input\":\"types of agents\"}\n",
+      "Added user message to memory: types of agents\n",
       "=== Calling Function ===\n",
-      "Calling function: vector_tool_api_reference_evaluation with args: {\n",
-      "  \"input\": \"types of evaluation\"\n",
+      "Calling function: vector_tool_agents_index with args: {\n",
+      "  \"input\": \"types of agents\"\n",
       "}\n",
-      "Got output: The types of evaluation can include correctness evaluation, faithfulness evaluation, guideline evaluation, hit rate evaluation, MRR (Mean Reciprocal Rank) evaluation, pairwise comparison evaluation, relevancy evaluation, and response evaluation.\n",
+      "Got output: The types of agents mentioned in the provided context are ReActAgent, Native OpenAIAgent, OpenAIAgent with Query Engine Tools, OpenAIAgent Query Planning, OpenAI Assistant, OpenAI Assistant Cookbook, Forced Function Calling, Parallel Function Calling, and Context Retrieval.\n",
       "========================\n",
-      "Got output: The types of evaluation mentioned in the `api_reference_evaluation.html` part of the LlamaIndex docs include:\n",
       "\n",
-      "1. Correctness Evaluation\n",
-      "2. Faithfulness Evaluation\n",
-      "3. Guideline Evaluation\n",
-      "4. Hit Rate Evaluation\n",
-      "5. MRR (Mean Reciprocal Rank) Evaluation\n",
-      "6. Pairwise Comparison Evaluation\n",
-      "7. Relevancy Evaluation\n",
-      "8. Response Evaluation\n",
-      "========================\n"
+      "Got output: The types of agents mentioned in the `agents_index.html` part of the LlamaIndex docs are:\n",
+      "\n",
+      "1. ReActAgent\n",
+      "2. Native OpenAIAgent\n",
+      "3. OpenAIAgent with Query Engine Tools\n",
+      "4. OpenAIAgent Query Planning\n",
+      "5. OpenAI Assistant\n",
+      "6. OpenAI Assistant Cookbook\n",
+      "7. Forced Function Calling\n",
+      "8. Parallel Function Calling\n",
+      "9. Context Retrieval\n",
+      "========================\n",
+      "\n"
      ]
     }
    ],
    "source": [
     "response = top_agent.query(\n",
-    "    \"Tell me about the different types of evaluation in LlamaIndex\"\n",
+    "    \"What types of agents are available in LlamaIndex?\",\n",
     ")"
    ]
   },
@@ -783,25 +660,7 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "There are several types of evaluation in LlamaIndex:\n",
-      "\n",
-      "1. Correctness Evaluation: This type of evaluation measures the accuracy of the retrieval results. It checks if the retrieved documents are correct and relevant to the query.\n",
-      "\n",
-      "2. Faithfulness Evaluation: Faithfulness evaluation measures how faithfully the retrieved documents represent the original data. It checks if the retrieved documents accurately reflect the information in the original documents.\n",
-      "\n",
-      "3. Guideline Evaluation: Guideline evaluation involves comparing the retrieval results against a set of guidelines or ground truth. It checks if the retrieval results align with the expected or desired outcomes.\n",
-      "\n",
-      "4. Hit Rate Evaluation: Hit rate evaluation measures the percentage of queries that return at least one relevant document. It is a binary evaluation metric that indicates the effectiveness of the retrieval system in finding relevant documents.\n",
-      "\n",
-      "5. MRR (Mean Reciprocal Rank) Evaluation: MRR evaluation measures the average rank of the first relevant document in the retrieval results. It provides a single value that represents the effectiveness of the retrieval system in ranking relevant documents.\n",
-      "\n",
-      "6. Pairwise Comparison Evaluation: Pairwise comparison evaluation involves comparing the retrieval results of different systems or algorithms. It helps determine which system performs better in terms of retrieval accuracy and relevance.\n",
-      "\n",
-      "7. Relevancy Evaluation: Relevancy evaluation measures the relevance of the retrieved documents to the query. It can be done using various metrics such as precision, recall, and F1 score.\n",
-      "\n",
-      "8. Response Evaluation: Response evaluation measures the quality of the response generated by the retrieval system. It checks if the response is informative, accurate, and helpful to the user.\n",
-      "\n",
-      "These evaluation types help assess the performance and effectiveness of the retrieval system in LlamaIndex.\n"
+      "The types of agents available in LlamaIndex include ReActAgent, Native OpenAIAgent, OpenAIAgent with Query Engine Tools, OpenAIAgent Query Planning, OpenAI Assistant, OpenAI Assistant Cookbook, Forced Function Calling, Parallel Function Calling, and Context Retrieval.\n"
      ]
     }
    ],
@@ -819,14 +678,14 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "LlamaIndex utilizes various types of evaluation methods to assess its performance and effectiveness. These evaluation methods include RelevancyEvaluator, RetrieverEvaluator, SemanticSimilarityEvaluator, PairwiseComparisonEvaluator, CorrectnessEvaluator, FaithfulnessEvaluator, and GuidelineEvaluator. Each of these evaluators serves a specific purpose in evaluating different aspects of the LlamaIndex system.\n"
+      "The types of agents available in LlamaIndex are ReActAgent, Native OpenAIAgent, and OpenAIAgent.\n"
      ]
     }
    ],
    "source": [
     "# baseline\n",
     "response = base_query_engine.query(\n",
-    "    \"Tell me about the different types of evaluation in LlamaIndex\"\n",
+    "    \"What types of agents are available in LlamaIndex?\",\n",
     ")\n",
     "print(str(response))"
    ]
@@ -841,45 +700,41 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
+      "Added user message to memory: Compare the content in the agents page vs. tools page.\n",
       "=== Calling Function ===\n",
-      "Calling function: compare_tool with args: {\n",
-      "  \"input\": \"content in the contributions page vs. index page\"\n",
-      "}\n",
+      "Calling function: compare_tool with args: {\"input\":\"agents vs tools\"}\n",
       "Generated 2 sub questions.\n",
-      "\u001b[1;3;38;2;237;90;200m[tool_development_contributing] Q: What is the content of the contributions page?\n",
-      "\u001b[0m\u001b[1;3;38;2;90;149;237m[tool_latest_index] Q: What is the content of the index page?\n",
-      "\u001b[0m=== Calling Function ===\n",
-      "Calling function: summary_tool_development_contributing with args: {\n",
-      "  \"input\": \"development_contributing.html\"\n",
+      "\u001b[1;3;38;2;237;90;200m[tool_understanding_index] Q: What are the functionalities of agents in the Llama Index platform?\n",
+      "\u001b[0mAdded user message to memory: What are the functionalities of agents in the Llama Index platform?\n",
+      "\u001b[1;3;38;2;90;149;237m[tool_understanding_index] Q: How do agents differ from tools in the Llama Index platform?\n",
+      "\u001b[0mAdded user message to memory: How do agents differ from tools in the Llama Index platform?\n",
+      "=== Calling Function ===\n",
+      "Calling function: vector_tool_understanding_index with args: {\n",
+      "  \"input\": \"difference between agents and tools\"\n",
       "}\n",
       "=== Calling Function ===\n",
-      "Calling function: vector_tool_latest_index with args: {\n",
-      "  \"input\": \"content of the index page\"\n",
+      "Calling function: vector_tool_understanding_index with args: {\n",
+      "  \"input\": \"functionalities of agents\"\n",
       "}\n",
-      "Got output: The development_contributing.html file provides information on how to contribute to LlamaIndex. It includes guidelines on what to work on, such as extending core modules, fixing bugs, adding usage examples, adding experimental features, and improving code quality and documentation. The file also provides details on each module, including data loaders, node parsers, text splitters, document/index/KV stores, managed index, vector stores, retrievers, query engines, query transforms, token usage optimizers, node postprocessors, and output parsers. Additionally, the file includes a development guideline section that covers environment setup, validating changes, formatting/linting, testing, creating example notebooks, and creating a pull request.\n",
-      "========================\n",
-      "Got output: The content of the index page provides information about LlamaIndex, a data framework for LLM applications. It explains why LlamaIndex is useful for augmenting LLM models with private or domain-specific data that may be distributed across different applications and data stores. LlamaIndex offers tools such as data connectors, data indexes, engines, and data agents to ingest, structure, and access data. It is designed for beginners as well as advanced users who can customize and extend its modules. The page also provides installation instructions, tutorials, and links to the LlamaIndex ecosystem and associated projects.\n",
+      "Got output: Agents are typically individuals or entities that act on behalf of others, making decisions and taking actions based on predefined rules or instructions. On the other hand, tools are instruments or devices used to carry out specific functions or tasks, often under the control or direction of an agent.\n",
       "========================\n",
-      "\u001b[1;3;38;2;90;149;237m[tool_latest_index] A: The content of the `latest_index.html` page provides comprehensive information about LlamaIndex, a data framework for LLM applications. It explains the utility of LlamaIndex in augmenting LLM models with private or domain-specific data that may be distributed across different applications and data stores. \n",
-      "\n",
-      "The page details the tools offered by LlamaIndex, such as data connectors, data indexes, engines, and data agents, which are used to ingest, structure, and access data. It is designed to cater to both beginners and advanced users, with the flexibility to customize and extend its modules.\n",
       "\n",
-      "Additionally, the page provides installation instructions and tutorials for users. It also includes links to the LlamaIndex ecosystem and associated projects for further exploration and understanding.\n",
-      "\u001b[0m\u001b[1;3;38;2;237;90;200m[tool_development_contributing] A: The `development_contributing.html` page of the LlamaIndex docs provides comprehensive information on how to contribute to the project. It includes guidelines on the areas to focus on, such as extending core modules, fixing bugs, adding usage examples, adding experimental features, and improving code quality and documentation.\n",
-      "\n",
-      "The page also provides detailed information on each module, including data loaders, node parsers, text splitters, document/index/KV stores, managed index, vector stores, retrievers, query engines, query transforms, token usage optimizers, node postprocessors, and output parsers.\n",
+      "Got output: Agents typically have a range of functionalities that allow them to perform tasks autonomously or semi-autonomously. These functionalities may include data collection, analysis, decision-making, communication with other systems or users, and executing specific actions based on predefined rules or algorithms.\n",
+      "========================\n",
       "\n",
-      "In addition, there is a development guideline section that covers various aspects of the development process, including environment setup, validating changes, formatting/linting, testing, creating example notebooks, and creating a pull request.\n",
-      "\u001b[0mGot output: The content in the contributions page of the LlamaIndex documentation provides comprehensive information on how to contribute to the project, including guidelines on areas to focus on and detailed information on each module. It also covers various aspects of the development process. \n",
+      "\u001b[1;3;38;2;90;149;237m[tool_understanding_index] A: In the context of the Llama Index platform, agents are entities that make decisions and take actions based on predefined rules or instructions. They are designed to interact with users, understand their queries, and provide appropriate responses. \n",
       "\n",
-      "On the other hand, the content in the index page of the LlamaIndex documentation provides comprehensive information about LlamaIndex itself, explaining its utility in augmenting LLM models with private or domain-specific data. It details the tools offered by LlamaIndex and provides installation instructions, tutorials, and links to the LlamaIndex ecosystem and associated projects.\n",
-      "========================\n"
+      "On the other hand, tools are instruments or devices that are used to perform specific functions or tasks. They are typically controlled or directed by an agent and do not make decisions on their own. They are used to assist the agents in providing accurate and relevant responses to user queries.\n",
+      "\u001b[0m\u001b[1;3;38;2;237;90;200m[tool_understanding_index] A: In the Llama Index platform, agents have a variety of functionalities. They can perform tasks autonomously or semi-autonomously. These tasks include data collection and analysis, making decisions, communicating with other systems or users, and executing specific actions. These actions are based on predefined rules or algorithms.\n",
+      "\u001b[0mGot output: Agents in the Llama Index platform are responsible for making decisions and taking actions based on predefined rules or instructions. They interact with users, understand queries, and provide appropriate responses. On the other hand, tools in the platform are instruments or devices used to perform specific functions or tasks. Unlike agents, tools are typically controlled or directed by an agent and do not make decisions independently. Their role is to assist agents in delivering accurate and relevant responses to user queries.\n",
+      "========================\n",
+      "\n"
      ]
     }
    ],
    "source": [
     "response = top_agent.query(\n",
-    "    \"Compare the content in the contributions page vs. index page.\"\n",
+    "    \"Compare the content in the agents page vs. tools page.\"\n",
     ")"
    ]
   },
@@ -893,11 +748,7 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "The contributions page of the LlamaIndex documentation provides guidelines for contributing to LlamaIndex, including extending core modules, fixing bugs, adding usage examples, adding experimental features, and improving code quality and documentation. It also includes information on the environment setup, validating changes, formatting and linting, testing, creating example notebooks, and creating a pull request.\n",
-      "\n",
-      "On the other hand, the index page of the LlamaIndex documentation provides information about LlamaIndex itself. It explains that LlamaIndex is a data framework that allows LLM applications to ingest, structure, and access private or domain-specific data. It provides tools such as data connectors, data indexes, engines, data agents, and application integrations. The index page also mentions that LlamaIndex is designed for beginners, advanced users, and everyone in between, and offers both high-level and lower-level APIs for customization. It provides installation instructions, links to the GitHub and PyPi repositories, and information about the LlamaIndex community on Twitter and Discord.\n",
-      "\n",
-      "In summary, the contributions page focuses on contributing to LlamaIndex, while the index page provides an overview of LlamaIndex and its features.\n"
+      "The comparison between the content in the agents page and the tools page highlights the difference in their roles and functionalities. Agents on the Llama Index platform are responsible for decision-making and interacting with users, while tools are instruments used to perform specific functions or tasks, controlled by agents to assist in providing responses.\n"
      ]
     }
    ],
@@ -910,10 +761,75 @@
    "execution_count": null,
    "id": "a8d97266-8e22-43a8-adfe-b9a7f833c06d",
    "metadata": {},
-   "outputs": [],
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Added user message to memory: Can you compare the compact and tree_summarize response synthesizer response modes at a very high-level?\n",
+      "=== Calling Function ===\n",
+      "Calling function: compare_tool with args: {\"input\":\"Compare the compact and tree_summarize response synthesizer response modes at a very high-level.\"}\n",
+      "Generated 4 sub questions.\n",
+      "\u001b[1;3;38;2;237;90;200m[tool_querying_index] Q: What are the key differences between the compact and tree_summarize response synthesizer response modes?\n",
+      "\u001b[0mAdded user message to memory: What are the key differences between the compact and tree_summarize response synthesizer response modes?\n",
+      "\u001b[1;3;38;2;90;149;237m[tool_querying_index] Q: How does the compact response synthesizer response mode optimize query logic and response quality?\n",
+      "\u001b[0mAdded user message to memory: How does the compact response synthesizer response mode optimize query logic and response quality?\n",
+      "\u001b[1;3;38;2;11;159;203m[tool_querying_index] Q: How does the tree_summarize response synthesizer response mode optimize query logic and response quality?\n",
+      "\u001b[0mAdded user message to memory: How does the tree_summarize response synthesizer response mode optimize query logic and response quality?\n",
+      "\u001b[1;3;38;2;155;135;227m[tool_evaluating_index] Q: What are the guidelines for evaluating retrievals in the context of response synthesizer response modes?\n",
+      "\u001b[0mAdded user message to memory: What are the guidelines for evaluating retrievals in the context of response synthesizer response modes?\n",
+      "=== Calling Function ===\n",
+      "Calling function: vector_tool_querying_index with args: {\n",
+      "  \"input\": \"compact response synthesizer response mode\"\n",
+      "}\n",
+      "=== Calling Function ===\n",
+      "Calling function: summary_tool_querying_index with args: {\n",
+      "  \"input\": \"tree_summarize response synthesizer response mode\"\n",
+      "}\n",
+      "=== Calling Function ===\n",
+      "Calling function: vector_tool_querying_index with args: {\n",
+      "  \"input\": \"compact vs tree_summarize response synthesizer response modes\"\n",
+      "}\n",
+      "=== Calling Function ===\n",
+      "Calling function: vector_tool_evaluating_index with args: {\n",
+      "  \"input\": \"evaluating retrievals response synthesizer response modes\"\n",
+      "}\n",
+      "Got output: The response modes for the response synthesizer include \"compact\" and \"tree_summarize\".\n",
+      "========================\n",
+      "\n",
+      "Got output: The response mode \"tree_summarize\" in the response synthesizer configures the system to recursively construct a tree from a set of Node objects and the query, returning the root node as the final response. This mode is particularly useful for summarization purposes.\n",
+      "========================\n",
+      "\n",
+      "Got output: \"compact\" the prompt during each LLM call by stuffing as many Node text chunks that can fit within the maximum prompt size. If there are too many chunks to stuff in one prompt, \"create and refine\" an answer by going through multiple prompts.\n",
+      "========================\n",
+      "\n",
+      "=== Calling Function ===\n",
+      "Calling function: summary_tool_querying_index with args: {\n",
+      "  \"input\": \"compact vs tree_summarize response synthesizer response modes\"\n",
+      "}\n",
+      "Got output: Response synthesizer response modes can be evaluated by comparing what was retrieved for a query to a set of nodes that were expected to be retrieved. This evaluation process typically involves analyzing metrics such as Mean Reciprocal Rank (MRR) and Hit Rate. It is important to evaluate a batch of retrievals to get a comprehensive understanding of the performance. If you are making calls to a hosted, remote LLM, you may also want to consider analyzing the cost implications of your application.\n",
+      "========================\n",
+      "\n",
+      "Got output: The response modes for the response synthesizer include \"compact\" and \"tree_summarize\".\n",
+      "========================\n",
+      "\n",
+      "\u001b[1;3;38;2;90;149;237m[tool_querying_index] A: The compact response synthesizer response mode optimizes query logic and response quality by compacting the prompt during each LLM call. It does this by stuffing as many Node text chunks that can fit within the maximum prompt size. If there are too many chunks to fit in one prompt, it will \"create and refine\" an answer by going through multiple prompts. This approach allows for a more efficient use of the prompt space and can lead to more refined and accurate responses.\n",
+      "\u001b[0m\u001b[1;3;38;2;11;159;203m[tool_querying_index] A: The \"tree_summarize\" response synthesizer response mode optimizes query logic and response quality by recursively constructing a tree from a set of Node objects and the query. This approach allows the system to handle complex queries and generate comprehensive responses. The root node, which is returned as the final response, contains a summarized version of the information, making it easier for users to understand the response. This mode is particularly useful for summarization purposes, where the goal is to provide a concise yet comprehensive answer to a query.\n",
+      "\u001b[0m\u001b[1;3;38;2;155;135;227m[tool_evaluating_index] A: When evaluating retrievals in the context of response synthesizer response modes, you should compare what was retrieved for a query to a set of nodes that were expected to be retrieved. This evaluation process typically involves analyzing metrics such as Mean Reciprocal Rank (MRR) and Hit Rate. It's crucial to evaluate a batch of retrievals to get a comprehensive understanding of the performance. If you are making calls to a hosted, remote LLM, you may also want to consider analyzing the cost implications of your application.\n",
+      "\u001b[0m\u001b[1;3;38;2;237;90;200m[tool_querying_index] A: The \"compact\" and \"tree_summarize\" are two different response modes for the response synthesizer in LlamaIndex. \n",
+      "\n",
+      "The \"compact\" mode provides a more concise response, focusing on delivering the most relevant information in a compact format. This mode is useful when you want a brief and direct answer to your query.\n",
+      "\n",
+      "On the other hand, the \"tree_summarize\" mode provides a more detailed and structured response. It breaks down the information into a tree-like structure, making it easier to understand the relationships and hierarchy of the information. This mode is useful when you want a comprehensive understanding of the query topic.\n",
+      "\u001b[0mGot output: The \"compact\" response synthesizer mode focuses on providing a concise and direct response, while the \"tree_summarize\" mode offers a more detailed and structured response by breaking down information into a tree-like structure. The compact mode aims to deliver the most relevant information in a compact format, suitable for brief answers, whereas the tree_summarize mode is designed to provide a comprehensive understanding of the query topic by presenting information in a hierarchical manner.\n",
+      "========================\n",
+      "\n"
+     ]
+    }
+   ],
    "source": [
     "response = top_agent.query(\n",
-    "    \"Can you compare the tree index and list index at a very high-level?\"\n",
+    "    \"Can you compare the compact and tree_summarize response synthesizer response modes at a very high-level?\"\n",
     ")"
    ]
   },
@@ -927,13 +843,7 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "At a high level, the Tree Index and List Index are two different types of indexes used in the system. \n",
-      "\n",
-      "The Tree Index is a tree-structured index that is built specifically for each query. It allows for the construction of a query-specific tree from leaf nodes to return a response. The Tree Index is designed to provide a more optimized and efficient way of retrieving nodes based on a query.\n",
-      "\n",
-      "On the other hand, the List Index is a keyword table index that supports operations such as inserting and deleting documents, retrieving nodes based on a query, and refreshing the index with updated documents. The List Index is a simpler index that uses a keyword table approach for retrieval.\n",
-      "\n",
-      "Both indexes have their own advantages and use cases. The choice between them depends on the specific requirements and constraints of the system.\n"
+      "The \"compact\" response synthesizer mode provides concise and direct responses, while the \"tree_summarize\" mode offers detailed and structured responses in a tree-like format. The compact mode is suitable for brief answers, while the tree_summarize mode presents information hierarchically for a comprehensive understanding of the query topic.\n"
      ]
     }
    ],
diff --git a/docs/docs/examples/agent/multi_document_agents.ipynb b/docs/docs/examples/agent/multi_document_agents.ipynb
index b850defb2196c188e892533a4033c0fded61c561..bca60563e91d76af0791ee06696c710132afbd91 100644
--- a/docs/docs/examples/agent/multi_document_agents.ipynb
+++ b/docs/docs/examples/agent/multi_document_agents.ipynb
@@ -365,13 +365,11 @@
    "source": [
     "# define an \"object\" index and retriever over these tools\n",
     "from llama_index.core import VectorStoreIndex\n",
-    "from llama_index.core.objects import ObjectIndex, SimpleToolNodeMapping\n",
+    "from llama_index.core.objects import ObjectIndex\n",
     "\n",
-    "tool_mapping = SimpleToolNodeMapping.from_objects(all_tools)\n",
     "obj_index = ObjectIndex.from_objects(\n",
     "    all_tools,\n",
-    "    tool_mapping,\n",
-    "    VectorStoreIndex,\n",
+    "    index_cls=VectorStoreIndex,\n",
     ")"
    ]
   },
@@ -382,10 +380,10 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.agent.openai_legacy import FnRetrieverOpenAIAgent\n",
+    "from llama_index.agent.openai import OpenAIAgent\n",
     "\n",
-    "top_agent = FnRetrieverOpenAIAgent.from_retriever(\n",
-    "    obj_index.as_retriever(similarity_top_k=3),\n",
+    "top_agent = OpenAIAgent.from_tools(\n",
+    "    tool_retriever=obj_index.as_retriever(similarity_top_k=3),\n",
     "    system_prompt=\"\"\" \\\n",
     "You are an agent designed to answer queries about a set of given cities.\n",
     "Please always use the tools provided to answer a question. Do not rely on prior knowledge.\\\n",
diff --git a/docs/docs/examples/agent/openai_agent_retrieval.ipynb b/docs/docs/examples/agent/openai_agent_retrieval.ipynb
index 7b41f8fe9103673a01d1ec304ebffa9cb8448a47..b36809db960047b9ae616e988fe5131046ecd862 100644
--- a/docs/docs/examples/agent/openai_agent_retrieval.ipynb
+++ b/docs/docs/examples/agent/openai_agent_retrieval.ipynb
@@ -24,7 +24,7 @@
    "id": "673df1fe-eb6c-46ea-9a73-a96e7ae7942e",
    "metadata": {},
    "source": [
-    "In this tutorial, we show you how to use our `FnRetrieverOpenAI` implementation\n",
+    "In this tutorial, we show you how to use our `OpenAIAgent` implementation with a tool retriever, \n",
     "to build an agent on top of OpenAI's function API and store/index an arbitrary number of tools. Our indexing/retrieval modules help to remove the complexity of having too many functions to fit in the prompt."
    ]
   },
@@ -172,13 +172,11 @@
    "source": [
     "# define an \"object\" index over these tools\n",
     "from llama_index.core import VectorStoreIndex\n",
-    "from llama_index.core.objects import ObjectIndex, SimpleToolNodeMapping\n",
+    "from llama_index.core.objects import ObjectIndex\n",
     "\n",
-    "tool_mapping = SimpleToolNodeMapping.from_objects(all_tools)\n",
     "obj_index = ObjectIndex.from_objects(\n",
     "    all_tools,\n",
-    "    tool_mapping,\n",
-    "    VectorStoreIndex,\n",
+    "    index_cls=VectorStoreIndex,\n",
     ")"
    ]
   },
@@ -188,7 +186,7 @@
    "id": "707d30b8-6405-4187-a9ed-6146dcc42167",
    "metadata": {},
    "source": [
-    "## Our `FnRetrieverOpenAIAgent` Implementation "
+    "## `OpenAIAgent` w/ Tool Retrieval "
    ]
   },
   {
@@ -197,7 +195,7 @@
    "id": "798ca3fd-6711-4c0c-a853-d868dd14b484",
    "metadata": {},
    "source": [
-    "We provide a `FnRetrieverOpenAIAgent` implementation in LlamaIndex, which can take in an `ObjectRetriever` over a set of `BaseTool` objects.\n",
+    "We provide a `OpenAIAgent` implementation in LlamaIndex, which can take in an `ObjectRetriever` over a set of `BaseTool` objects.\n",
     "\n",
     "During query-time, we would first use the `ObjectRetriever` to retrieve a set of relevant Tools. These tools would then be passed into the agent; more specifically, their function signatures would be passed into the OpenAI Function calling API. "
    ]
@@ -209,7 +207,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.agent.openai_legacy import FnRetrieverOpenAIAgent"
+    "from llama_index.agent.openai import OpenAIAgent"
    ]
   },
   {
@@ -219,8 +217,8 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "agent = FnRetrieverOpenAIAgent.from_retriever(\n",
-    "    obj_index.as_retriever(), verbose=True\n",
+    "agent = OpenAIAgent.from_tools(\n",
+    "    tool_retriever=obj_index.as_retriever(similarity_top_k=2), verbose=True\n",
     ")"
    ]
   },
diff --git a/docs/docs/examples/objects/object_index.ipynb b/docs/docs/examples/objects/object_index.ipynb
index ab10c666a806b58cfb86d1d074fcc31e4d397f69..e3b99bd326dcd8869d4ffd1ff92582ef36cc2f10 100644
--- a/docs/docs/examples/objects/object_index.ipynb
+++ b/docs/docs/examples/objects/object_index.ipynb
@@ -24,6 +24,18 @@
     "In this notebook, we'll quickly cover how you can build an `ObjectIndex` using a `SimpleObjectNodeMapping`."
    ]
   },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "56660700",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from llama_index.core import Settings\n",
+    "\n",
+    "Settings.embed_model = \"local\""
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -40,13 +52,19 @@
     "obj3 = \"llamaindex is an awesome library!\"\n",
     "arbitrary_objects = [obj1, obj2, obj3]\n",
     "\n",
-    "# object-node mapping\n",
+    "# (optional) object-node mapping\n",
     "obj_node_mapping = SimpleObjectNodeMapping.from_objects(arbitrary_objects)\n",
     "nodes = obj_node_mapping.to_nodes(arbitrary_objects)\n",
     "\n",
     "# object index\n",
     "object_index = ObjectIndex(\n",
-    "    index=VectorStoreIndex(nodes=nodes), object_node_mapping=obj_node_mapping\n",
+    "    index=VectorStoreIndex(nodes=nodes),\n",
+    "    object_node_mapping=obj_node_mapping,\n",
+    ")\n",
+    "\n",
+    "# object index from_objects (default index_cls=VectorStoreIndex)\n",
+    "object_index = ObjectIndex.from_objects(\n",
+    "    arbitrary_objects, index_cls=VectorStoreIndex\n",
     ")"
    ]
   },
@@ -81,12 +99,258 @@
     "object_retriever.retrieve(\"llamaindex\")"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "id": "db53c825",
+   "metadata": {},
+   "source": [
+    "We can also add node-postprocessors to an object index retriever, for easy convience to things like rerankers and more."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "df68a0d1",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%pip install llama-index-postprocessor-colbert-rerank"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "f1fd4b6e",
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "['llamaindex is an awesome library!']"
+      ]
+     },
+     "execution_count": null,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "from llama_index.postprocessor.colbert_rerank import ColbertRerank\n",
+    "\n",
+    "retriever = object_index.as_retriever(\n",
+    "    similarity_top_k=2, node_postprocessors=[ColbertRerank(top_n=1)]\n",
+    ")\n",
+    "retriever.retrieve(\"a random list object\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "3c5d574c",
+   "metadata": {},
+   "source": [
+    "## Using a Storage Integration (i.e. Chroma)\n",
+    "\n",
+    "The object index supports integrations with any existing storage backend in LlamaIndex.\n",
+    "\n",
+    "The following section walks through how to set that up using `Chroma` as an example."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "86a6eb5e",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%pip install llama-index-vector-stores-chroma"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "d3fa5b3f",
+   "metadata": {},
+   "outputs": [
+    {
+     "ename": "FileNotFoundError",
+     "evalue": "[Errno 2] No such file or directory: './chroma_db2'",
+     "output_type": "error",
+     "traceback": [
+      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+      "\u001b[0;31mFileNotFoundError\u001b[0m                         Traceback (most recent call last)",
+      "Cell \u001b[0;32mIn[31], line 5\u001b[0m\n\u001b[1;32m      2\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mllama_index\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mvector_stores\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mchroma\u001b[39;00m \u001b[39mimport\u001b[39;00m ChromaVectorStore\n\u001b[1;32m      3\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mchromadb\u001b[39;00m\n\u001b[0;32m----> 5\u001b[0m db \u001b[39m=\u001b[39m chromadb\u001b[39m.\u001b[39;49mPersistentClient(path\u001b[39m=\u001b[39;49m\u001b[39m\"\u001b[39;49m\u001b[39m./chroma_db2\u001b[39;49m\u001b[39m\"\u001b[39;49m)\n\u001b[1;32m      6\u001b[0m chroma_collection \u001b[39m=\u001b[39m db\u001b[39m.\u001b[39mget_or_create_collection(\u001b[39m\"\u001b[39m\u001b[39mquickstart2\u001b[39m\u001b[39m\"\u001b[39m)\n\u001b[1;32m      7\u001b[0m vector_store \u001b[39m=\u001b[39m ChromaVectorStore(chroma_collection\u001b[39m=\u001b[39mchroma_collection)\n",
+      "File \u001b[0;32m~/giant_change/llama_index/venv/lib/python3.10/site-packages/chromadb/__init__.py:146\u001b[0m, in \u001b[0;36mPersistentClient\u001b[0;34m(path, settings, tenant, database)\u001b[0m\n\u001b[1;32m    143\u001b[0m tenant \u001b[39m=\u001b[39m \u001b[39mstr\u001b[39m(tenant)\n\u001b[1;32m    144\u001b[0m database \u001b[39m=\u001b[39m \u001b[39mstr\u001b[39m(database)\n\u001b[0;32m--> 146\u001b[0m \u001b[39mreturn\u001b[39;00m ClientCreator(tenant\u001b[39m=\u001b[39;49mtenant, database\u001b[39m=\u001b[39;49mdatabase, settings\u001b[39m=\u001b[39;49msettings)\n",
+      "File \u001b[0;32m~/giant_change/llama_index/venv/lib/python3.10/site-packages/chromadb/api/client.py:139\u001b[0m, in \u001b[0;36mClient.__init__\u001b[0;34m(self, tenant, database, settings)\u001b[0m\n\u001b[1;32m    133\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39m__init__\u001b[39m(\n\u001b[1;32m    134\u001b[0m     \u001b[39mself\u001b[39m,\n\u001b[1;32m    135\u001b[0m     tenant: \u001b[39mstr\u001b[39m \u001b[39m=\u001b[39m DEFAULT_TENANT,\n\u001b[1;32m    136\u001b[0m     database: \u001b[39mstr\u001b[39m \u001b[39m=\u001b[39m DEFAULT_DATABASE,\n\u001b[1;32m    137\u001b[0m     settings: Settings \u001b[39m=\u001b[39m Settings(),\n\u001b[1;32m    138\u001b[0m ) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m \u001b[39mNone\u001b[39;00m:\n\u001b[0;32m--> 139\u001b[0m     \u001b[39msuper\u001b[39;49m()\u001b[39m.\u001b[39;49m\u001b[39m__init__\u001b[39;49m(settings\u001b[39m=\u001b[39;49msettings)\n\u001b[1;32m    140\u001b[0m     \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mtenant \u001b[39m=\u001b[39m tenant\n\u001b[1;32m    141\u001b[0m     \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mdatabase \u001b[39m=\u001b[39m database\n",
+      "File \u001b[0;32m~/giant_change/llama_index/venv/lib/python3.10/site-packages/chromadb/api/client.py:43\u001b[0m, in \u001b[0;36mSharedSystemClient.__init__\u001b[0;34m(self, settings)\u001b[0m\n\u001b[1;32m     38\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39m__init__\u001b[39m(\n\u001b[1;32m     39\u001b[0m     \u001b[39mself\u001b[39m,\n\u001b[1;32m     40\u001b[0m     settings: Settings \u001b[39m=\u001b[39m Settings(),\n\u001b[1;32m     41\u001b[0m ) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m \u001b[39mNone\u001b[39;00m:\n\u001b[1;32m     42\u001b[0m     \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_identifier \u001b[39m=\u001b[39m SharedSystemClient\u001b[39m.\u001b[39m_get_identifier_from_settings(settings)\n\u001b[0;32m---> 43\u001b[0m     SharedSystemClient\u001b[39m.\u001b[39;49m_create_system_if_not_exists(\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_identifier, settings)\n",
+      "File \u001b[0;32m~/giant_change/llama_index/venv/lib/python3.10/site-packages/chromadb/api/client.py:54\u001b[0m, in \u001b[0;36mSharedSystemClient._create_system_if_not_exists\u001b[0;34m(cls, identifier, settings)\u001b[0m\n\u001b[1;32m     51\u001b[0m     \u001b[39mcls\u001b[39m\u001b[39m.\u001b[39m_identifer_to_system[identifier] \u001b[39m=\u001b[39m new_system\n\u001b[1;32m     53\u001b[0m     new_system\u001b[39m.\u001b[39minstance(ProductTelemetryClient)\n\u001b[0;32m---> 54\u001b[0m     new_system\u001b[39m.\u001b[39;49minstance(ServerAPI)\n\u001b[1;32m     56\u001b[0m     new_system\u001b[39m.\u001b[39mstart()\n\u001b[1;32m     57\u001b[0m \u001b[39melse\u001b[39;00m:\n",
+      "File \u001b[0;32m~/giant_change/llama_index/venv/lib/python3.10/site-packages/chromadb/config.py:382\u001b[0m, in \u001b[0;36mSystem.instance\u001b[0;34m(self, type)\u001b[0m\n\u001b[1;32m    379\u001b[0m     \u001b[39mtype\u001b[39m \u001b[39m=\u001b[39m get_class(fqn, \u001b[39mtype\u001b[39m)\n\u001b[1;32m    381\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mtype\u001b[39m \u001b[39mnot\u001b[39;00m \u001b[39min\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_instances:\n\u001b[0;32m--> 382\u001b[0m     impl \u001b[39m=\u001b[39m \u001b[39mtype\u001b[39;49m(\u001b[39mself\u001b[39;49m)\n\u001b[1;32m    383\u001b[0m     \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_instances[\u001b[39mtype\u001b[39m] \u001b[39m=\u001b[39m impl\n\u001b[1;32m    384\u001b[0m     \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_running:\n",
+      "File \u001b[0;32m~/giant_change/llama_index/venv/lib/python3.10/site-packages/chromadb/api/segment.py:102\u001b[0m, in \u001b[0;36mSegmentAPI.__init__\u001b[0;34m(self, system)\u001b[0m\n\u001b[1;32m    100\u001b[0m \u001b[39msuper\u001b[39m()\u001b[39m.\u001b[39m\u001b[39m__init__\u001b[39m(system)\n\u001b[1;32m    101\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_settings \u001b[39m=\u001b[39m system\u001b[39m.\u001b[39msettings\n\u001b[0;32m--> 102\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_sysdb \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mrequire(SysDB)\n\u001b[1;32m    103\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_manager \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mrequire(SegmentManager)\n\u001b[1;32m    104\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_quota \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mrequire(QuotaEnforcer)\n",
+      "File \u001b[0;32m~/giant_change/llama_index/venv/lib/python3.10/site-packages/chromadb/config.py:281\u001b[0m, in \u001b[0;36mComponent.require\u001b[0;34m(self, type)\u001b[0m\n\u001b[1;32m    278\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mrequire\u001b[39m(\u001b[39mself\u001b[39m, \u001b[39mtype\u001b[39m: Type[T]) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m T:\n\u001b[1;32m    279\u001b[0m \u001b[39m    \u001b[39m\u001b[39m\"\"\"Get a Component instance of the given type, and register as a dependency of\u001b[39;00m\n\u001b[1;32m    280\u001b[0m \u001b[39m    that instance.\"\"\"\u001b[39;00m\n\u001b[0;32m--> 281\u001b[0m     inst \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_system\u001b[39m.\u001b[39;49minstance(\u001b[39mtype\u001b[39;49m)\n\u001b[1;32m    282\u001b[0m     \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_dependencies\u001b[39m.\u001b[39madd(inst)\n\u001b[1;32m    283\u001b[0m     \u001b[39mreturn\u001b[39;00m inst\n",
+      "File \u001b[0;32m~/giant_change/llama_index/venv/lib/python3.10/site-packages/chromadb/config.py:382\u001b[0m, in \u001b[0;36mSystem.instance\u001b[0;34m(self, type)\u001b[0m\n\u001b[1;32m    379\u001b[0m     \u001b[39mtype\u001b[39m \u001b[39m=\u001b[39m get_class(fqn, \u001b[39mtype\u001b[39m)\n\u001b[1;32m    381\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mtype\u001b[39m \u001b[39mnot\u001b[39;00m \u001b[39min\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_instances:\n\u001b[0;32m--> 382\u001b[0m     impl \u001b[39m=\u001b[39m \u001b[39mtype\u001b[39;49m(\u001b[39mself\u001b[39;49m)\n\u001b[1;32m    383\u001b[0m     \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_instances[\u001b[39mtype\u001b[39m] \u001b[39m=\u001b[39m impl\n\u001b[1;32m    384\u001b[0m     \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_running:\n",
+      "File \u001b[0;32m~/giant_change/llama_index/venv/lib/python3.10/site-packages/chromadb/db/impl/sqlite.py:88\u001b[0m, in \u001b[0;36mSqliteDB.__init__\u001b[0;34m(self, system)\u001b[0m\n\u001b[1;32m     84\u001b[0m     \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_db_file \u001b[39m=\u001b[39m (\n\u001b[1;32m     85\u001b[0m         \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_settings\u001b[39m.\u001b[39mrequire(\u001b[39m\"\u001b[39m\u001b[39mpersist_directory\u001b[39m\u001b[39m\"\u001b[39m) \u001b[39m+\u001b[39m \u001b[39m\"\u001b[39m\u001b[39m/chroma.sqlite3\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m     86\u001b[0m     )\n\u001b[1;32m     87\u001b[0m     \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m os\u001b[39m.\u001b[39mpath\u001b[39m.\u001b[39mexists(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_db_file):\n\u001b[0;32m---> 88\u001b[0m         os\u001b[39m.\u001b[39;49mmakedirs(os\u001b[39m.\u001b[39;49mpath\u001b[39m.\u001b[39;49mdirname(\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_db_file), exist_ok\u001b[39m=\u001b[39;49m\u001b[39mTrue\u001b[39;49;00m)\n\u001b[1;32m     89\u001b[0m     \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_conn_pool \u001b[39m=\u001b[39m PerThreadPool(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_db_file)\n\u001b[1;32m     90\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_tx_stack \u001b[39m=\u001b[39m local()\n",
+      "File \u001b[0;32m~/miniforge3/lib/python3.10/os.py:225\u001b[0m, in \u001b[0;36mmakedirs\u001b[0;34m(name, mode, exist_ok)\u001b[0m\n\u001b[1;32m    223\u001b[0m         \u001b[39mreturn\u001b[39;00m\n\u001b[1;32m    224\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m--> 225\u001b[0m     mkdir(name, mode)\n\u001b[1;32m    226\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mOSError\u001b[39;00m:\n\u001b[1;32m    227\u001b[0m     \u001b[39m# Cannot rely on checking for EEXIST, since the operating system\u001b[39;00m\n\u001b[1;32m    228\u001b[0m     \u001b[39m# could give priority to other errors like EACCES or EROFS\u001b[39;00m\n\u001b[1;32m    229\u001b[0m     \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m exist_ok \u001b[39mor\u001b[39;00m \u001b[39mnot\u001b[39;00m path\u001b[39m.\u001b[39misdir(name):\n",
+      "\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: './chroma_db2'"
+     ]
+    }
+   ],
+   "source": [
+    "from llama_index.core import StorageContext, VectorStoreIndex\n",
+    "from llama_index.vector_stores.chroma import ChromaVectorStore\n",
+    "import chromadb\n",
+    "\n",
+    "db = chromadb.PersistentClient(path=\"./chroma_db\")\n",
+    "chroma_collection = db.get_or_create_collection(\"quickstart2\")\n",
+    "vector_store = ChromaVectorStore(chroma_collection=chroma_collection)\n",
+    "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
+    "\n",
+    "object_index = ObjectIndex.from_objects(\n",
+    "    arbitrary_objects,\n",
+    "    index_cls=VectorStoreIndex,\n",
+    "    storage_context=storage_context,\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "28cda697",
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "['llamaindex is an awesome library!']"
+      ]
+     },
+     "execution_count": null,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "object_retriever = object_index.as_retriever(similarity_top_k=1)\n",
+    "object_retriever.retrieve(\"llamaindex\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "358994af",
+   "metadata": {},
+   "source": [
+    "Now, lets \"reload\" the index"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "61134380",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "db = chromadb.PersistentClient(path=\"./chroma_db\")\n",
+    "chroma_collection = db.get_or_create_collection(\"quickstart\")\n",
+    "vector_store = ChromaVectorStore(chroma_collection=chroma_collection)\n",
+    "\n",
+    "index = VectorStoreIndex.from_vector_store(vector_store=vector_store)\n",
+    "\n",
+    "object_index = ObjectIndex.from_objects_and_index(arbitrary_objects, index)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "644a32d0",
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "['llamaindex is an awesome library!']"
+      ]
+     },
+     "execution_count": null,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "object_retriever = object_index.as_retriever(similarity_top_k=1)\n",
+    "object_retriever.retrieve(\"llamaindex\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "78bfe419",
+   "metadata": {},
+   "source": [
+    "Note that when we reload the index, we still have to pass the objects, since those are not saved in the actual index/vector db."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "a578c2c9",
+   "metadata": {},
+   "source": [
+    "## [Advanced] Customizing the Mapping\n",
+    "\n",
+    "For specialized cases where you want full control over how objects are mapped to nodes, you can also provide a `to_node_fn()` and `from_node_fn()` hook.\n",
+    "\n",
+    "This is useful for when you are converting specialized objects, or want to dynamically create objects at runtime rather than keeping them in memory.\n",
+    "\n",
+    "A small example is shown below."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "1cdbd6c9",
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "['llamaindex is an awesome library!']"
+      ]
+     },
+     "execution_count": null,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "from llama_index.core.schema import TextNode\n",
+    "\n",
+    "my_objects = {\n",
+    "    str(hash(str(obj))): obj for i, obj in enumerate(arbitrary_objects)\n",
+    "}\n",
+    "\n",
+    "\n",
+    "def from_node_fn(node):\n",
+    "    return my_objects[node.id]\n",
+    "\n",
+    "\n",
+    "def to_node_fn(obj):\n",
+    "    return TextNode(id=str(hash(str(obj))), text=str(obj))\n",
+    "\n",
+    "\n",
+    "object_index = ObjectIndex.from_objects(\n",
+    "    arbitrary_objects,\n",
+    "    index_cls=VectorStoreIndex,\n",
+    "    from_node_fn=from_node_fn,\n",
+    "    to_node_fn=to_node_fn,\n",
+    ")\n",
+    "\n",
+    "object_retriever = object_index.as_retriever(similarity_top_k=1)\n",
+    "\n",
+    "object_retriever.retrieve(\"llamaindex\")"
+   ]
+  },
   {
    "cell_type": "markdown",
    "id": "0032d0e7-815d-414d-9fcc-384709b59484",
    "metadata": {},
    "source": [
-    "## Persisting `ObjectIndex`\n",
+    "## Persisting `ObjectIndex` to Disk with Objects\n",
     "\n",
     "When it comes to persisting the `ObjectIndex`, we have to handle both the index as well as the object-node mapping. Persisting the index is straightforward and can be handled by usual means (e.g., see this [guide](https://docs.llamaindex.ai/en/stable/module_guides/storing/save_load.html#persisting-loading-data)). However, it's a bit of a different story when it comes to persisting the `ObjectNodeMapping`. Since we're indexing aribtrary Python objects with the `ObjectIndex`, it may be the case (and perhaps more often than we'd like), that the arbitrary objects are not serializable. In those cases, you can persist the index, but the user would have to maintain a way to re-construct the `ObjectNodeMapping` to be able to re-construct the `ObjectIndex`. For convenience, there are the `persist` and `from_persist_dir` methods on the `ObjectIndex` that will attempt to persist and load a previously saved `ObjectIndex`, respectively."
    ]
@@ -273,9 +537,9 @@
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "llama_index_3.10",
+   "display_name": "venv",
    "language": "python",
-   "name": "llama_index_3.10"
+   "name": "python3"
   },
   "language_info": {
    "codemirror_mode": {
diff --git a/docs/docs/examples/query_engine/RetrieverRouterQueryEngine.ipynb b/docs/docs/examples/query_engine/RetrieverRouterQueryEngine.ipynb
index 6bfbc06d2cc17b83e32de60b99c55e35f13ceb01..f96c3ceab26b004ebead86a980c8d1e080d66299 100644
--- a/docs/docs/examples/query_engine/RetrieverRouterQueryEngine.ipynb
+++ b/docs/docs/examples/query_engine/RetrieverRouterQueryEngine.ipynb
@@ -266,13 +266,11 @@
    ],
    "source": [
     "from llama_index.core import VectorStoreIndex\n",
-    "from llama_index.core.objects import ObjectIndex, SimpleToolNodeMapping\n",
+    "from llama_index.core.objects import ObjectIndex\n",
     "\n",
-    "tool_mapping = SimpleToolNodeMapping.from_objects([list_tool, vector_tool])\n",
     "obj_index = ObjectIndex.from_objects(\n",
     "    [list_tool, vector_tool],\n",
-    "    tool_mapping,\n",
-    "    VectorStoreIndex,\n",
+    "    index_cls=VectorStoreIndex,\n",
     ")"
    ]
   },
diff --git a/docs/docs/module_guides/deploying/agents/usage_pattern.md b/docs/docs/module_guides/deploying/agents/usage_pattern.md
index f95c431e10c9f1b9d54d94430a193469114aa086..d75209479ed0bd75ee8e626f43f9d712d38e0d45 100644
--- a/docs/docs/module_guides/deploying/agents/usage_pattern.md
+++ b/docs/docs/module_guides/deploying/agents/usage_pattern.md
@@ -161,26 +161,26 @@ We first build an `ObjectIndex` over an existing set of Tools.
 ```python
 # define an "object" index over these tools
 from llama_index.core import VectorStoreIndex
-from llama_index.core.objects import ObjectIndex, SimpleToolNodeMapping
+from llama_index.core.objects import ObjectIndex
 
-tool_mapping = SimpleToolNodeMapping.from_objects(all_tools)
 obj_index = ObjectIndex.from_objects(
     all_tools,
-    tool_mapping,
-    VectorStoreIndex,
+    index_cls=VectorStoreIndex,
 )
 ```
 
-We then define our `FnRetrieverOpenAIAgent`:
+We then define our `OpenAIAgent`:
 
 ```python
-from llama_index.agent.openai_legacy import FnRetrieverOpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
-agent = FnRetrieverOpenAIAgent.from_retriever(
-    obj_index.as_retriever(), verbose=True
+agent = OpenAIAgent.from_tools(
+    tool_retriever=obj_index.as_retriever(similarity_top_k=2), verbose=True
 )
 ```
 
+You can find more details on the object index in the [full guide](../../../examples/objects/object_index.ipynb).
+
 ### Context Retrieval Agents
 
 Our context-augmented OpenAI Agent will always perform retrieval before calling any tools.
diff --git a/llama-index-core/llama_index/core/objects/base.py b/llama-index-core/llama_index/core/objects/base.py
index 1fa576a6ae969ec26b7939499deadc8fde15d43b..15f7067173c31bce49670d34bd82c18306dd41a7 100644
--- a/llama-index-core/llama_index/core/objects/base.py
+++ b/llama-index-core/llama_index/core/objects/base.py
@@ -2,7 +2,7 @@
 
 import pickle
 import warnings
-from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar
+from typing import Any, Callable, Dict, Generic, List, Optional, Sequence, Type, TypeVar
 
 from llama_index.core.base.base_retriever import BaseRetriever
 from llama_index.core.base.query_pipeline.query import (
@@ -16,12 +16,13 @@ from llama_index.core.bridge.pydantic import Field
 from llama_index.core.callbacks.base import CallbackManager
 from llama_index.core.indices.base import BaseIndex
 from llama_index.core.indices.vector_store.base import VectorStoreIndex
+from llama_index.core.postprocessor.types import BaseNodePostprocessor
 from llama_index.core.objects.base_node_mapping import (
     DEFAULT_PERSIST_FNAME,
     BaseObjectNodeMapping,
     SimpleObjectNodeMapping,
 )
-from llama_index.core.schema import QueryType
+from llama_index.core.schema import QueryBundle, QueryType, TextNode
 from llama_index.core.storage.storage_context import (
     DEFAULT_PERSIST_DIR,
     StorageContext,
@@ -34,22 +35,56 @@ class ObjectRetriever(ChainableMixin, Generic[OT]):
     """Object retriever."""
 
     def __init__(
-        self, retriever: BaseRetriever, object_node_mapping: BaseObjectNodeMapping[OT]
+        self,
+        retriever: BaseRetriever,
+        object_node_mapping: BaseObjectNodeMapping[OT],
+        node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
     ):
         self._retriever = retriever
         self._object_node_mapping = object_node_mapping
+        self._node_postprocessors = node_postprocessors or []
 
     @property
     def retriever(self) -> BaseRetriever:
         """Retriever."""
         return self._retriever
 
+    @property
+    def object_node_mapping(self) -> BaseObjectNodeMapping[OT]:
+        """Object node mapping."""
+        return self._object_node_mapping
+
+    @property
+    def node_postprocessors(self) -> List[BaseNodePostprocessor]:
+        """Node postprocessors."""
+        return self._node_postprocessors
+
     def retrieve(self, str_or_query_bundle: QueryType) -> List[OT]:
-        nodes = self._retriever.retrieve(str_or_query_bundle)
+        if isinstance(str_or_query_bundle, str):
+            query_bundle = QueryBundle(query_str=str_or_query_bundle)
+        else:
+            query_bundle = str_or_query_bundle
+
+        nodes = self._retriever.retrieve(query_bundle)
+        for node_postprocessor in self._node_postprocessors:
+            nodes = node_postprocessor.postprocess_nodes(
+                nodes, query_bundle=query_bundle
+            )
+
         return [self._object_node_mapping.from_node(node.node) for node in nodes]
 
     async def aretrieve(self, str_or_query_bundle: QueryType) -> List[OT]:
-        nodes = await self._retriever.aretrieve(str_or_query_bundle)
+        if isinstance(str_or_query_bundle, str):
+            query_bundle = QueryBundle(query_str=str_or_query_bundle)
+        else:
+            query_bundle = str_or_query_bundle
+
+        nodes = await self._retriever.aretrieve(query_bundle)
+        for node_postprocessor in self._node_postprocessors:
+            nodes = node_postprocessor.postprocess_nodes(
+                nodes, query_bundle=query_bundle
+            )
+
         return [self._object_node_mapping.from_node(node.node) for node in nodes]
 
     def _as_query_component(self, **kwargs: Any) -> QueryComponent:
@@ -105,29 +140,75 @@ class ObjectIndex(Generic[OT]):
         self._index = index
         self._object_node_mapping = object_node_mapping
 
+    @property
+    def index(self) -> BaseIndex:
+        """Index."""
+        return self._index
+
+    @property
+    def object_node_mapping(self) -> BaseObjectNodeMapping:
+        """Object node mapping."""
+        return self._object_node_mapping
+
     @classmethod
     def from_objects(
         cls,
         objects: Sequence[OT],
         object_mapping: Optional[BaseObjectNodeMapping] = None,
+        from_node_fn: Optional[Callable[[TextNode], OT]] = None,
+        to_node_fn: Optional[Callable[[OT], TextNode]] = None,
         index_cls: Type[BaseIndex] = VectorStoreIndex,
         **index_kwargs: Any,
     ) -> "ObjectIndex":
+        from llama_index.core.objects.utils import get_object_mapping
+
+        # pick the best mapping if not provided
         if object_mapping is None:
-            object_mapping = SimpleObjectNodeMapping.from_objects(objects)
+            object_mapping = get_object_mapping(
+                objects,
+                from_node_fn=from_node_fn,
+                to_node_fn=to_node_fn,
+            )
+
         nodes = object_mapping.to_nodes(objects)
         index = index_cls(nodes, **index_kwargs)
         return cls(index, object_mapping)
 
+    @classmethod
+    def from_objects_and_index(
+        cls,
+        objects: Sequence[OT],
+        index: BaseIndex,
+        object_mapping: Optional[BaseObjectNodeMapping] = None,
+        from_node_fn: Optional[Callable[[TextNode], OT]] = None,
+        to_node_fn: Optional[Callable[[OT], TextNode]] = None,
+    ) -> "ObjectIndex":
+        from llama_index.core.objects.utils import get_object_mapping
+
+        # pick the best mapping if not provided
+        if object_mapping is None:
+            object_mapping = get_object_mapping(
+                objects,
+                from_node_fn=from_node_fn,
+                to_node_fn=to_node_fn,
+            )
+
+        return cls(index, object_mapping)
+
     def insert_object(self, obj: Any) -> None:
         self._object_node_mapping.add_object(obj)
         node = self._object_node_mapping.to_node(obj)
         self._index.insert_nodes([node])
 
-    def as_retriever(self, **kwargs: Any) -> ObjectRetriever:
+    def as_retriever(
+        self,
+        node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
+        **kwargs: Any,
+    ) -> ObjectRetriever:
         return ObjectRetriever(
             retriever=self._index.as_retriever(**kwargs),
             object_node_mapping=self._object_node_mapping,
+            node_postprocessors=node_postprocessors,
         )
 
     def as_node_retriever(self, **kwargs: Any) -> BaseRetriever:
diff --git a/llama-index-core/llama_index/core/objects/base_node_mapping.py b/llama-index-core/llama_index/core/objects/base_node_mapping.py
index 3a6f5afcbc6679e05bf3bfb4a080f009f6a5956e..eb4bfab6f818eb9fdd9862f236e1abd386351c5f 100644
--- a/llama-index-core/llama_index/core/objects/base_node_mapping.py
+++ b/llama-index-core/llama_index/core/objects/base_node_mapping.py
@@ -137,7 +137,7 @@ class SimpleObjectNodeMapping(BaseObjectNodeMapping[Any]):
         self._objs[hash(str(obj))] = obj
 
     def to_node(self, obj: Any) -> TextNode:
-        return TextNode(text=str(obj))
+        return TextNode(id_=str(hash(str(obj))), text=str(obj))
 
     def _from_node(self, node: BaseNode) -> Any:
         return self._objs[hash(node.get_content(metadata_mode=MetadataMode.NONE))]
diff --git a/llama-index-core/llama_index/core/objects/fn_node_mapping.py b/llama-index-core/llama_index/core/objects/fn_node_mapping.py
new file mode 100644
index 0000000000000000000000000000000000000000..41dd46ac682bdb9847b84a813082b8da83e92dd2
--- /dev/null
+++ b/llama-index-core/llama_index/core/objects/fn_node_mapping.py
@@ -0,0 +1,64 @@
+"""Table node mapping."""
+
+from typing import Any, Callable, Dict, Sequence
+
+from llama_index.core.objects.base_node_mapping import (
+    DEFAULT_PERSIST_DIR,
+    DEFAULT_PERSIST_FNAME,
+    BaseObjectNodeMapping,
+)
+from llama_index.core.schema import TextNode
+
+
+class FnNodeMapping(BaseObjectNodeMapping[Any]):
+    """Fn node mapping."""
+
+    def __init__(
+        self,
+        from_node_fn: Callable[[TextNode], Any],
+        to_node_fn: Callable[[Any], TextNode],
+    ) -> None:
+        self._to_node_fn = to_node_fn
+        self._from_node_fn = from_node_fn
+
+    @classmethod
+    def from_objects(
+        cls,
+        objs: Sequence[Any],
+        from_node_fn: Callable[[TextNode], Any],
+        to_node_fn: Callable[[Any], TextNode],
+        *args: Any,
+        **kwargs: Any,
+    ) -> "BaseObjectNodeMapping":
+        """Initialize node mapping."""
+        return cls(from_node_fn, to_node_fn)
+
+    def _add_object(self, obj: Any) -> None:
+        """Add object. NOTE: unused."""
+
+    def to_node(self, obj: Any) -> TextNode:
+        """To node."""
+        return self._to_node_fn(obj)
+
+    def _from_node(self, node: TextNode) -> Any:
+        """From node."""
+        return self._from_node_fn(node)
+
+    @property
+    def obj_node_mapping(self) -> Dict[int, Any]:
+        """The mapping data structure between node and object."""
+        raise NotImplementedError("FnNodeMapping does not support obj_node_mapping")
+
+    def persist(
+        self, persist_dir: str = ..., obj_node_mapping_fname: str = ...
+    ) -> None:
+        """Persist objs."""
+        raise NotImplementedError("FnNodeMapping does not support persist method.")
+
+    @classmethod
+    def from_persist_dir(
+        cls,
+        persist_dir: str = DEFAULT_PERSIST_DIR,
+        obj_node_mapping_fname: str = DEFAULT_PERSIST_FNAME,
+    ) -> "FnNodeMapping":
+        raise NotImplementedError("FnNodeMapping does not support persist method.")
diff --git a/llama-index-core/llama_index/core/objects/table_node_mapping.py b/llama-index-core/llama_index/core/objects/table_node_mapping.py
index 0b6ec0faecb25ab278b9ff2814c6e19196e002bc..8fdb295f8c33205d8f3b6e68a487cd0ab607a368 100644
--- a/llama-index-core/llama_index/core/objects/table_node_mapping.py
+++ b/llama-index-core/llama_index/core/objects/table_node_mapping.py
@@ -57,7 +57,10 @@ class SQLTableNodeMapping(BaseObjectNodeMapping[SQLTableSchema]):
             table_text += obj.context_str
             metadata["context"] = obj.context_str
 
+        table_identity = f"{obj.table_name}{obj.context_str}"
+
         return TextNode(
+            id_=str(hash(table_identity)),
             text=table_text,
             metadata=metadata,
             excluded_embed_metadata_keys=["name", "context"],
diff --git a/llama-index-core/llama_index/core/objects/tool_node_mapping.py b/llama-index-core/llama_index/core/objects/tool_node_mapping.py
index aac117e5f9eed66a8d262c44f047f05bc3f78df2..3f41bc3b77a8d03277c30d1bfb4fb2e52b906b4c 100644
--- a/llama-index-core/llama_index/core/objects/tool_node_mapping.py
+++ b/llama-index-core/llama_index/core/objects/tool_node_mapping.py
@@ -20,7 +20,13 @@ def convert_tool_to_node(tool: BaseTool) -> TextNode:
     )
     if tool.metadata.fn_schema is not None:
         node_text += f"Tool schema: {tool.metadata.fn_schema.schema()}\n"
+
+    tool_identity = (
+        f"{tool.metadata.name}{tool.metadata.description}{tool.metadata.fn_schema}"
+    )
+
     return TextNode(
+        id_=str(hash(tool_identity)),
         text=node_text,
         metadata={"name": tool.metadata.name},
         excluded_embed_metadata_keys=["name"],
diff --git a/llama-index-core/llama_index/core/objects/utils.py b/llama-index-core/llama_index/core/objects/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb83e77609c520f5ea03afbf7a59cd521d3655bc
--- /dev/null
+++ b/llama-index-core/llama_index/core/objects/utils.py
@@ -0,0 +1,22 @@
+from typing import Any, Callable, Optional, Sequence
+
+from llama_index.core.tools import BaseTool
+from llama_index.core.objects.base import SimpleObjectNodeMapping
+from llama_index.core.objects.base_node_mapping import BaseObjectNodeMapping
+from llama_index.core.objects.fn_node_mapping import FnNodeMapping
+from llama_index.core.objects.tool_node_mapping import SimpleToolNodeMapping
+from llama_index.core.schema import TextNode
+
+
+def get_object_mapping(
+    objects: Sequence[Any],
+    from_node_fn: Optional[Callable[[TextNode], Any]] = None,
+    to_node_fn: Optional[Callable[[Any], TextNode]] = None,
+) -> BaseObjectNodeMapping:
+    """Get object mapping according to object."""
+    if from_node_fn is not None and to_node_fn is not None:
+        return FnNodeMapping.from_objects(objects, from_node_fn, to_node_fn)
+    elif all(isinstance(obj, BaseTool) for obj in objects):
+        return SimpleToolNodeMapping.from_objects(objects)
+    else:
+        return SimpleObjectNodeMapping.from_objects(objects)
diff --git a/llama-index-core/tests/objects/test_base.py b/llama-index-core/tests/objects/test_base.py
index 75e86261a4fa81a447265ce3626f943821c8d496..ca2f96b3442a94b893e472ae0e26c0c2080f0c1f 100644
--- a/llama-index-core/tests/objects/test_base.py
+++ b/llama-index-core/tests/objects/test_base.py
@@ -6,6 +6,7 @@ from llama_index.core.objects.base_node_mapping import SimpleObjectNodeMapping
 from llama_index.core.objects.tool_node_mapping import SimpleToolNodeMapping
 from llama_index.core.service_context import ServiceContext
 from llama_index.core.tools.function_tool import FunctionTool
+from llama_index.core.schema import TextNode
 
 
 def test_object_index(mock_service_context: ServiceContext) -> None:
@@ -22,6 +23,43 @@ def test_object_index(mock_service_context: ServiceContext) -> None:
     assert obj_index.as_retriever().retrieve("test") == ["a", "b", "c", "d"]
 
 
+def test_object_index_default_mapping(mock_service_context: ServiceContext) -> None:
+    """Test object index."""
+    obj_index = ObjectIndex.from_objects(["a", "b", "c"], index_cls=SummaryIndex)
+    # should just retrieve everything
+    assert obj_index.as_retriever().retrieve("test") == ["a", "b", "c"]
+
+    # test adding an object
+    obj_index.insert_object("d")
+    assert obj_index.as_retriever().retrieve("test") == ["a", "b", "c", "d"]
+
+
+def test_object_index_fn_mapping(mock_service_context: ServiceContext) -> None:
+    """Test object index."""
+    objects = {obj: obj for obj in ["a", "b", "c", "d"]}
+    print(objects)
+
+    def to_node_fn(obj: str) -> TextNode:
+        return TextNode(id_=obj, text=obj)
+
+    def from_node_fn(node: TextNode) -> str:
+        return objects[node.id_]
+
+    obj_index = ObjectIndex.from_objects(
+        ["a", "b", "c"],
+        index_cls=SummaryIndex,
+        from_node_fn=from_node_fn,
+        to_node_fn=to_node_fn,
+    )
+
+    # should just retrieve everything
+    assert obj_index.as_retriever().retrieve("test") == ["a", "b", "c"]
+
+    # test adding an object
+    obj_index.insert_object("d")
+    assert obj_index.as_retriever().retrieve("test") == ["a", "b", "c", "d"]
+
+
 def test_object_index_persist(mock_service_context: ServiceContext) -> None:
     """Test object index persist/load."""
     object_mapping = SimpleObjectNodeMapping.from_objects(["a", "b", "c"])