diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index afde6f03db2bf81ccf15189432e2d220eadd0404..68e9994d2ae6c3925398103443f6f50dccff33f8 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -16,15 +16,24 @@ repos:
       - id: mixed-line-ending
       - id: trailing-whitespace
   - repo: https://github.com/charliermarsh/ruff-pre-commit
-    rev: v0.0.292
+    rev: v0.1.0
 
     hooks:
       - id: ruff
         args: [--fix, --exit-non-zero-on-fix]
   - repo: https://github.com/psf/black-pre-commit-mirror
-    rev: 23.9.1
+    rev: 23.10.0
     hooks:
       - id: black-jupyter
+        name: black-src
+        exclude: docs/
+  - repo: https://github.com/psf/black-pre-commit-mirror
+    rev: 23.10.0
+    hooks:
+      - id: black-jupyter
+        name: black-docs
+        files: docs/
+        args: [--line-length=79]
   - repo: https://github.com/pappasam/toml-sort
     rev: v0.23.1
     hooks:
diff --git a/Makefile b/Makefile
index bd2936e223ef1eabff0f25485413f0df4e958cf2..5c9a853807fc4d4331004c1cc1acd446ff88f7b7 100644
--- a/Makefile
+++ b/Makefile
@@ -4,7 +4,8 @@ help:	## Show all Makefile targets.
 	@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[33m%-30s\033[0m %s\n", $$1, $$2}'
 
 format:	## Run code autoformatters (black).
-	black .
+	pre-commit install
+	pre-commit run black-jupyter --all-files
 
 lint:	## Run linters: pre-commit (black, ruff, codespell) and mypy
 	pre-commit install && pre-commit run --all-files
diff --git a/docs/core_modules/data_modules/index/index_progress_bars.ipynb b/docs/core_modules/data_modules/index/index_progress_bars.ipynb
index 0d99aa71a2699d53ac5462c7df58c25f87698b01..70891b3690414383697d4d4f442a45d27a3c017e 100644
--- a/docs/core_modules/data_modules/index/index_progress_bars.ipynb
+++ b/docs/core_modules/data_modules/index/index_progress_bars.ipynb
@@ -69,7 +69,9 @@
    "outputs": [],
    "source": [
     "# Load documents\n",
-    "documents = SimpleDirectoryReader(\"../../../examples/data/paul_graham\").load_data()"
+    "documents = SimpleDirectoryReader(\n",
+    "    \"../../../examples/data/paul_graham\"\n",
+    ").load_data()"
    ]
   },
   {
@@ -226,11 +228,15 @@
    "source": [
     "llm_chatgpt = OpenAI(temperature=0, model=\"gpt-3.5-turbo\")\n",
     "\n",
-    "service_context = ServiceContext.from_defaults(llm=llm_chatgpt, chunk_size=1024)\n",
+    "service_context = ServiceContext.from_defaults(\n",
+    "    llm=llm_chatgpt, chunk_size=1024\n",
+    ")\n",
     "\n",
     "print(\"\\nDocumentSummaryIndex with show_progress=True\\n\")\n",
     "response_synthesizer = get_response_synthesizer(\n",
-    "    response_mode=\"tree_summarize\", use_async=True, service_context=service_context\n",
+    "    response_mode=\"tree_summarize\",\n",
+    "    use_async=True,\n",
+    "    service_context=service_context,\n",
     ")\n",
     "DocumentSummaryIndex.from_documents(\n",
     "    documents,\n",
@@ -556,16 +562,24 @@
     "llm = MockLLM(max_tokens=256)\n",
     "service_context = ServiceContext.from_defaults(llm=llm)\n",
     "TreeIndex.from_documents(\n",
-    "    documents, service_context=service_context, show_progress=True, use_async=True\n",
+    "    documents,\n",
+    "    service_context=service_context,\n",
+    "    show_progress=True,\n",
+    "    use_async=True,\n",
     ")\n",
     "\n",
     "print(\"\\nTreeIndex with show_progress=True, use_async=False\\n\")\n",
     "TreeIndex.from_documents(\n",
-    "    documents, service_context=service_context, show_progress=True, use_async=False\n",
+    "    documents,\n",
+    "    service_context=service_context,\n",
+    "    show_progress=True,\n",
+    "    use_async=False,\n",
     ")\n",
     "\n",
     "print(\"\\nTreeIndex with show_progress=False, use_async=True\\n\")\n",
-    "TreeIndex.from_documents(documents, service_context=service_context, use_async=True)\n",
+    "TreeIndex.from_documents(\n",
+    "    documents, service_context=service_context, use_async=True\n",
+    ")\n",
     "\n",
     "print(\"\\nTreeIndex with show_progress=False, use_async=False\\n\")\n",
     "TreeIndex.from_documents(documents, service_context=service_context)"
diff --git a/docs/core_modules/data_modules/index/vector_store_guide.ipynb b/docs/core_modules/data_modules/index/vector_store_guide.ipynb
index a55f9e7f4ee199276f44d2271be922448463c9d3..c76ed500042d3e473a4a9e51dc350a082248c44f 100644
--- a/docs/core_modules/data_modules/index/vector_store_guide.ipynb
+++ b/docs/core_modules/data_modules/index/vector_store_guide.ipynb
@@ -36,7 +36,9 @@
     "from llama_index import VectorStoreIndex, SimpleDirectoryReader\n",
     "\n",
     "# Load documents and build index\n",
-    "documents = SimpleDirectoryReader(\"../../examples/data/paul_graham\").load_data()\n",
+    "documents = SimpleDirectoryReader(\n",
+    "    \"../../examples/data/paul_graham\"\n",
+    ").load_data()\n",
     "index = VectorStoreIndex.from_documents(documents)"
    ]
   },
@@ -63,7 +65,9 @@
     "\n",
     "# init pinecone\n",
     "pinecone.init(api_key=\"<api_key>\", environment=\"<environment>\")\n",
-    "pinecone.create_index(\"quickstart\", dimension=1536, metric=\"euclidean\", pod_type=\"p1\")\n",
+    "pinecone.create_index(\n",
+    "    \"quickstart\", dimension=1536, metric=\"euclidean\", pod_type=\"p1\"\n",
+    ")\n",
     "\n",
     "# construct vector store and customize storage context\n",
     "storage_context = StorageContext.from_defaults(\n",
@@ -71,8 +75,12 @@
     ")\n",
     "\n",
     "# Load documents and build index\n",
-    "documents = SimpleDirectoryReader(\"../../examples/data/paul_graham\").load_data()\n",
-    "index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)"
+    "documents = SimpleDirectoryReader(\n",
+    "    \"../../examples/data/paul_graham\"\n",
+    ").load_data()\n",
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
@@ -191,7 +199,9 @@
    "source": [
     "from llama_index import get_response_synthesizer\n",
     "from llama_index.indices.vector_store.retrievers import VectorIndexRetriever\n",
-    "from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine\n",
+    "from llama_index.query_engine.retriever_query_engine import (\n",
+    "    RetrieverQueryEngine,\n",
+    ")\n",
     "\n",
     "# build retriever\n",
     "retriever = VectorIndexRetriever(\n",
@@ -256,8 +266,12 @@
    "outputs": [],
    "source": [
     "from llama_index import get_response_synthesizer\n",
-    "from llama_index.indices.vector_store.retrievers import VectorIndexAutoRetriever\n",
-    "from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine\n",
+    "from llama_index.indices.vector_store.retrievers import (\n",
+    "    VectorIndexAutoRetriever,\n",
+    ")\n",
+    "from llama_index.query_engine.retriever_query_engine import (\n",
+    "    RetrieverQueryEngine,\n",
+    ")\n",
     "from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n",
     "\n",
     "\n",
@@ -278,7 +292,9 @@
     ")\n",
     "\n",
     "# build retriever\n",
-    "retriever = VectorIndexAutoRetriever(index, vector_store_info=vector_store_info)\n",
+    "retriever = VectorIndexAutoRetriever(\n",
+    "    index, vector_store_info=vector_store_info\n",
+    ")\n",
     "\n",
     "# build query engine\n",
     "query_engine = RetrieverQueryEngine(\n",
@@ -286,7 +302,9 @@
     ")\n",
     "\n",
     "# query\n",
-    "response = query_engine.query(\"Tell me about two celebrities from United States\")"
+    "response = query_engine.query(\n",
+    "    \"Tell me about two celebrities from United States\"\n",
+    ")"
    ]
   }
  ],
diff --git a/docs/end_to_end_tutorials/structured_data/Airbyte_demo.ipynb b/docs/end_to_end_tutorials/structured_data/Airbyte_demo.ipynb
index 8cf3d9617f015545cb9a782409a8125ada61f73a..880480d0de14bc6e440aa1d62b2eca99b0e79391 100644
--- a/docs/end_to_end_tutorials/structured_data/Airbyte_demo.ipynb
+++ b/docs/end_to_end_tutorials/structured_data/Airbyte_demo.ipynb
@@ -311,7 +311,9 @@
     "\n",
     "    import snowflake.sqlalchemy.snowdialect\n",
     "\n",
-    "    snowflake.sqlalchemy.snowdialect.SnowflakeDialect.returns_unicode_strings = True\n",
+    "    snowflake.sqlalchemy.snowdialect.SnowflakeDialect.returns_unicode_strings = (\n",
+    "        True\n",
+    "    )\n",
     "\n",
     "    # make has_table() support the `info_cache` kwarg\n",
     "    import snowflake.sqlalchemy.snowdialect\n",
@@ -474,9 +476,7 @@
     "    sql_database=sql_database,\n",
     "    tables=[\"github_issues\", \"github_comments\", \"github_users\"],\n",
     ")\n",
-    "query_str = (\n",
-    "    \"Which issues have the most comments? Give the top 10 and use a join on url.\"\n",
-    ")\n",
+    "query_str = \"Which issues have the most comments? Give the top 10 and use a join on url.\"\n",
     "response = query_engine.query(query_str)\n",
     "display(Markdown(f\"<b>{response}</b>\"))"
    ]
@@ -606,8 +606,14 @@
     }
    ],
    "source": [
-    "from llama_index.indices.struct_store.sql_query import SQLTableRetrieverQueryEngine\n",
-    "from llama_index.objects import SQLTableNodeMapping, ObjectIndex, SQLTableSchema\n",
+    "from llama_index.indices.struct_store.sql_query import (\n",
+    "    SQLTableRetrieverQueryEngine,\n",
+    ")\n",
+    "from llama_index.objects import (\n",
+    "    SQLTableNodeMapping,\n",
+    "    ObjectIndex,\n",
+    "    SQLTableSchema,\n",
+    ")\n",
     "from llama_index import VectorStoreIndex\n",
     "\n",
     "table_node_mapping = SQLTableNodeMapping(sql_database)\n",
diff --git a/docs/examples/agent/Chatbot_SEC.ipynb b/docs/examples/agent/Chatbot_SEC.ipynb
index 1433f9e4bcbf26693341a0e34c72772330356da9..0d8ecac7e66d52cfcf4294e3151911b23d97444e 100644
--- a/docs/examples/agent/Chatbot_SEC.ipynb
+++ b/docs/examples/agent/Chatbot_SEC.ipynb
@@ -438,7 +438,9 @@
     "\n",
     "index_set = {}\n",
     "for year in years:\n",
-    "    storage_context = StorageContext.from_defaults(persist_dir=f\"./storage/{year}\")\n",
+    "    storage_context = StorageContext.from_defaults(\n",
+    "        persist_dir=f\"./storage/{year}\"\n",
+    "    )\n",
     "    cur_index = load_index_from_storage(\n",
     "        storage_context, service_context=service_context\n",
     "    )\n",
@@ -494,7 +496,10 @@
     "        query_engine=index_set[year].as_query_engine(),\n",
     "        metadata=ToolMetadata(\n",
     "            name=f\"vector_index_{year}\",\n",
-    "            description=f\"useful for when you want to answer queries about the {year} SEC 10-K for Uber\",\n",
+    "            description=(\n",
+    "                \"useful for when you want to answer queries about the\"\n",
+    "                f\" {year} SEC 10-K for Uber\"\n",
+    "            ),\n",
     "        ),\n",
     "    )\n",
     "    for year in years\n",
@@ -587,7 +592,10 @@
     "    query_engine=query_engine,\n",
     "    metadata=ToolMetadata(\n",
     "        name=\"sub_question_query_engine\",\n",
-    "        description=\"useful for when you want to answer queries that require analyzing multiple SEC 10-K documents for Uber\",\n",
+    "        description=(\n",
+    "            \"useful for when you want to answer queries that require analyzing\"\n",
+    "            \" multiple SEC 10-K documents for Uber\"\n",
+    "        ),\n",
     "    ),\n",
     ")"
    ]
@@ -788,7 +796,9 @@
     }
    ],
    "source": [
-    "response = agent.chat(\"What were some of the biggest risk factors in 2020 for Uber?\")\n",
+    "response = agent.chat(\n",
+    "    \"What were some of the biggest risk factors in 2020 for Uber?\"\n",
+    ")\n",
     "print(str(response))"
    ]
   },
@@ -887,7 +897,10 @@
     }
    ],
    "source": [
-    "cross_query_str = \"Compare/contrast the risk factors described in the Uber 10-K across years. Give answer in bullet points.\"\n",
+    "cross_query_str = (\n",
+    "    \"Compare/contrast the risk factors described in the Uber 10-K across\"\n",
+    "    \" years. Give answer in bullet points.\"\n",
+    ")\n",
     "\n",
     "response = agent.chat(cross_query_str)\n",
     "print(str(response))"
diff --git a/docs/examples/agent/multi_document_agents-v1.ipynb b/docs/examples/agent/multi_document_agents-v1.ipynb
index ca358336d2c5d8d1e8f8b80e7fb80d6719cf98ec..95564d643156f19940e1d6781e9dc5c585986220 100644
--- a/docs/examples/agent/multi_document_agents-v1.ipynb
+++ b/docs/examples/agent/multi_document_agents-v1.ipynb
@@ -263,7 +263,9 @@
     "\n",
     "    # define query engines\n",
     "    vector_query_engine = vector_index.as_query_engine()\n",
-    "    summary_query_engine = summary_index.as_query_engine(response_mode=\"tree_summarize\")\n",
+    "    summary_query_engine = summary_index.as_query_engine(\n",
+    "        response_mode=\"tree_summarize\"\n",
+    "    )\n",
     "\n",
     "    # extract a summary\n",
     "    if not os.path.exists(summary_out_path):\n",
@@ -410,7 +412,11 @@
    "source": [
     "# define an \"object\" index and retriever over these tools\n",
     "from llama_index import VectorStoreIndex\n",
-    "from llama_index.objects import ObjectIndex, SimpleToolNodeMapping, ObjectRetriever\n",
+    "from llama_index.objects import (\n",
+    "    ObjectIndex,\n",
+    "    SimpleToolNodeMapping,\n",
+    "    ObjectRetriever,\n",
+    ")\n",
     "from llama_index.retrievers import BaseRetriever\n",
     "from llama_index.indices.postprocessor import CohereRerank\n",
     "from llama_index.tools import QueryPlanTool\n",
@@ -557,7 +563,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "all_nodes = [n for extra_info in extra_info_dict.values() for n in extra_info[\"nodes\"]]"
+    "all_nodes = [\n",
+    "    n for extra_info in extra_info_dict.values() for n in extra_info[\"nodes\"]\n",
+    "]"
    ]
   },
   {
diff --git a/docs/examples/agent/multi_document_agents.ipynb b/docs/examples/agent/multi_document_agents.ipynb
index 6e224eec69ac4aa86777639b287bc62da63381c2..366fd19a433465d9b8fa7ea3959d52a4c78204f4 100644
--- a/docs/examples/agent/multi_document_agents.ipynb
+++ b/docs/examples/agent/multi_document_agents.ipynb
@@ -204,7 +204,9 @@
     "    if not os.path.exists(f\"./data/{wiki_title}\"):\n",
     "        # build vector index\n",
     "        vector_index = VectorStoreIndex(nodes, service_context=service_context)\n",
-    "        vector_index.storage_context.persist(persist_dir=f\"./data/{wiki_title}\")\n",
+    "        vector_index.storage_context.persist(\n",
+    "            persist_dir=f\"./data/{wiki_title}\"\n",
+    "        )\n",
     "    else:\n",
     "        vector_index = load_index_from_storage(\n",
     "            StorageContext.from_defaults(persist_dir=f\"./data/{wiki_title}\"),\n",
@@ -223,14 +225,22 @@
     "            query_engine=vector_query_engine,\n",
     "            metadata=ToolMetadata(\n",
     "                name=\"vector_tool\",\n",
-    "                description=f\"Useful for questions related to specific aspects of {wiki_title} (e.g. the history, arts and culture, sports, demographics, or more).\",\n",
+    "                description=(\n",
+    "                    \"Useful for questions related to specific aspects of\"\n",
+    "                    f\" {wiki_title} (e.g. the history, arts and culture,\"\n",
+    "                    \" sports, demographics, or more).\"\n",
+    "                ),\n",
     "            ),\n",
     "        ),\n",
     "        QueryEngineTool(\n",
     "            query_engine=summary_query_engine,\n",
     "            metadata=ToolMetadata(\n",
     "                name=\"summary_tool\",\n",
-    "                description=f\"Useful for any requests that require a holistic summary of EVERYTHING about {wiki_title}. For questions about more specific sections, please use the vector_tool.\",\n",
+    "                description=(\n",
+    "                    \"Useful for any requests that require a holistic summary\"\n",
+    "                    f\" of EVERYTHING about {wiki_title}. For questions about\"\n",
+    "                    \" more specific sections, please use the vector_tool.\"\n",
+    "                ),\n",
     "            ),\n",
     "        ),\n",
     "    ]\n",
@@ -248,7 +258,9 @@
     "    )\n",
     "\n",
     "    agents[wiki_title] = agent\n",
-    "    query_engines[wiki_title] = vector_index.as_query_engine(similarity_top_k=2)"
+    "    query_engines[wiki_title] = vector_index.as_query_engine(\n",
+    "        similarity_top_k=2\n",
+    "    )"
    ]
   },
   {
@@ -276,8 +288,8 @@
     "all_tools = []\n",
     "for wiki_title in wiki_titles:\n",
     "    wiki_summary = (\n",
-    "        f\"This content contains Wikipedia articles about {wiki_title}. \"\n",
-    "        f\"Use this tool if you want to answer any questions about {wiki_title}.\\n\"\n",
+    "        f\"This content contains Wikipedia articles about {wiki_title}. Use\"\n",
+    "        f\" this tool if you want to answer any questions about {wiki_title}.\\n\"\n",
     "    )\n",
     "    doc_tool = QueryEngineTool(\n",
     "        query_engine=agents[wiki_title],\n",
@@ -433,7 +445,9 @@
    ],
    "source": [
     "# baseline\n",
-    "response = base_query_engine.query(\"Tell me about the arts and culture in Boston\")\n",
+    "response = base_query_engine.query(\n",
+    "    \"Tell me about the arts and culture in Boston\"\n",
+    ")\n",
     "print(str(response))"
    ]
   },
@@ -476,7 +490,9 @@
    ],
    "source": [
     "# should use Houston agent -> vector tool\n",
-    "response = top_agent.query(\"Give me a summary of all the positive aspects of Houston\")"
+    "response = top_agent.query(\n",
+    "    \"Give me a summary of all the positive aspects of Houston\"\n",
+    ")"
    ]
   },
   {
@@ -587,7 +603,8 @@
    ],
    "source": [
     "response = top_agent.query(\n",
-    "    \"Tell the demographics of Houston, and then compare that with the demographics of Chicago\"\n",
+    "    \"Tell the demographics of Houston, and then compare that with the\"\n",
+    "    \" demographics of Chicago\"\n",
     ")"
    ]
   },
@@ -634,7 +651,8 @@
    "source": [
     "# baseline\n",
     "response = base_query_engine.query(\n",
-    "    \"Tell the demographics of Houston, and then compare that with the demographics of Chicago\"\n",
+    "    \"Tell the demographics of Houston, and then compare that with the\"\n",
+    "    \" demographics of Chicago\"\n",
     ")\n",
     "print(str(response))"
    ]
@@ -727,7 +745,8 @@
    ],
    "source": [
     "response = top_agent.query(\n",
-    "    \"Tell me the differences between Shanghai and Beijing in terms of history and current economy\"\n",
+    "    \"Tell me the differences between Shanghai and Beijing in terms of history\"\n",
+    "    \" and current economy\"\n",
     ")"
    ]
   },
@@ -776,7 +795,8 @@
    "source": [
     "# baseline\n",
     "response = base_query_engine.query(\n",
-    "    \"Tell me the differences between Shanghai and Beijing in terms of history and current economy\"\n",
+    "    \"Tell me the differences between Shanghai and Beijing in terms of history\"\n",
+    "    \" and current economy\"\n",
     ")\n",
     "print(str(response))"
    ]
diff --git a/docs/examples/agent/openai_agent.ipynb b/docs/examples/agent/openai_agent.ipynb
index 4969ed0e8673768161382c39d3c881a133f56ab0..86554b27f2c6db8ec66074a2b7fd9d3c1303df00 100644
--- a/docs/examples/agent/openai_agent.ipynb
+++ b/docs/examples/agent/openai_agent.ipynb
@@ -143,7 +143,8 @@
     "        chat_history = self._chat_history\n",
     "        chat_history.append(ChatMessage(role=\"user\", content=message))\n",
     "        functions = [\n",
-    "            tool.metadata.to_openai_function() for _, tool in self._tools.items()\n",
+    "            tool.metadata.to_openai_function()\n",
+    "            for _, tool in self._tools.items()\n",
     "        ]\n",
     "\n",
     "        ai_message = self._llm.chat(chat_history, functions=functions).message\n",
@@ -271,7 +272,9 @@
    "outputs": [],
    "source": [
     "llm = OpenAI(model=\"gpt-3.5-turbo-0613\")\n",
-    "agent = OpenAIAgent.from_tools([multiply_tool, add_tool], llm=llm, verbose=True)"
+    "agent = OpenAIAgent.from_tools(\n",
+    "    [multiply_tool, add_tool], llm=llm, verbose=True\n",
+    ")"
    ]
   },
   {
@@ -416,7 +419,8 @@
    ],
    "source": [
     "response = agent.stream_chat(\n",
-    "    \"What is 121 * 2? Once you have the answer, use that number to write a story about a group of mice.\"\n",
+    "    \"What is 121 * 2? Once you have the answer, use that number to write a\"\n",
+    "    \" story about a group of mice.\"\n",
     ")\n",
     "\n",
     "response_gen = response.response_gen\n",
@@ -474,7 +478,8 @@
    ],
    "source": [
     "response = await agent.astream_chat(\n",
-    "    \"What is 121 + 8? Once you have the answer, use that number to write a story about a group of mice.\"\n",
+    "    \"What is 121 + 8? Once you have the answer, use that number to write a\"\n",
+    "    \" story about a group of mice.\"\n",
     ")\n",
     "\n",
     "response_gen = response.response_gen\n",
diff --git a/docs/examples/agent/openai_agent_context_retrieval.ipynb b/docs/examples/agent/openai_agent_context_retrieval.ipynb
index 0a8518d4c90b9685e906302fd6dac86417833246..795897da7fa49da0835d78855a368ca1500d23d9 100644
--- a/docs/examples/agent/openai_agent_context_retrieval.ipynb
+++ b/docs/examples/agent/openai_agent_context_retrieval.ipynb
@@ -64,13 +64,19 @@
    "outputs": [],
    "source": [
     "try:\n",
-    "    storage_context = StorageContext.from_defaults(persist_dir=\"./storage/march\")\n",
+    "    storage_context = StorageContext.from_defaults(\n",
+    "        persist_dir=\"./storage/march\"\n",
+    "    )\n",
     "    march_index = load_index_from_storage(storage_context)\n",
     "\n",
-    "    storage_context = StorageContext.from_defaults(persist_dir=\"./storage/june\")\n",
+    "    storage_context = StorageContext.from_defaults(\n",
+    "        persist_dir=\"./storage/june\"\n",
+    "    )\n",
     "    june_index = load_index_from_storage(storage_context)\n",
     "\n",
-    "    storage_context = StorageContext.from_defaults(persist_dir=\"./storage/sept\")\n",
+    "    storage_context = StorageContext.from_defaults(\n",
+    "        persist_dir=\"./storage/sept\"\n",
+    "    )\n",
     "    sept_index = load_index_from_storage(storage_context)\n",
     "\n",
     "    index_loaded = True\n",
@@ -134,24 +140,30 @@
     "        query_engine=march_engine,\n",
     "        metadata=ToolMetadata(\n",
     "            name=\"uber_march_10q\",\n",
-    "            description=\"Provides information about Uber 10Q filings for March 2022. \"\n",
-    "            \"Use a detailed plain text question as input to the tool.\",\n",
+    "            description=(\n",
+    "                \"Provides information about Uber 10Q filings for March 2022. \"\n",
+    "                \"Use a detailed plain text question as input to the tool.\"\n",
+    "            ),\n",
     "        ),\n",
     "    ),\n",
     "    QueryEngineTool(\n",
     "        query_engine=june_engine,\n",
     "        metadata=ToolMetadata(\n",
     "            name=\"uber_june_10q\",\n",
-    "            description=\"Provides information about Uber financials for June 2021. \"\n",
-    "            \"Use a detailed plain text question as input to the tool.\",\n",
+    "            description=(\n",
+    "                \"Provides information about Uber financials for June 2021. \"\n",
+    "                \"Use a detailed plain text question as input to the tool.\"\n",
+    "            ),\n",
     "        ),\n",
     "    ),\n",
     "    QueryEngineTool(\n",
     "        query_engine=sept_engine,\n",
     "        metadata=ToolMetadata(\n",
     "            name=\"uber_sept_10q\",\n",
-    "            description=\"Provides information about Uber financials for Sept 2021. \"\n",
-    "            \"Use a detailed plain text question as input to the tool.\",\n",
+    "            description=(\n",
+    "                \"Provides information about Uber financials for Sept 2021. \"\n",
+    "                \"Use a detailed plain text question as input to the tool.\"\n",
+    "            ),\n",
     "        ),\n",
     "    ),\n",
     "]"
@@ -205,7 +217,9 @@
    "outputs": [],
    "source": [
     "context_agent = ContextRetrieverOpenAIAgent.from_tools_and_retriever(\n",
-    "    query_engine_tools, context_index.as_retriever(similarity_top_k=1), verbose=True\n",
+    "    query_engine_tools,\n",
+    "    context_index.as_retriever(similarity_top_k=1),\n",
+    "    verbose=True,\n",
     ")"
    ]
   },
@@ -490,7 +504,9 @@
     }
    ],
    "source": [
-    "response = context_agent.chat(\"Can you run MAGIC_FORMULA on Uber's revenue and cost?\")"
+    "response = context_agent.chat(\n",
+    "    \"Can you run MAGIC_FORMULA on Uber's revenue and cost?\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/agent/openai_agent_query_cookbook.ipynb b/docs/examples/agent/openai_agent_query_cookbook.ipynb
index 98169787a39be359cab10b935c0a1777b70d96f4..42b0334a1bf16875ab86bc179118dfe9adaf4400 100644
--- a/docs/examples/agent/openai_agent_query_cookbook.ipynb
+++ b/docs/examples/agent/openai_agent_query_cookbook.ipynb
@@ -113,35 +113,56 @@
     "\n",
     "nodes = [\n",
     "    TextNode(\n",
-    "        text=\"Michael Jordan is a retired professional basketball player, widely regarded as one of the greatest basketball players of all time.\",\n",
+    "        text=(\n",
+    "            \"Michael Jordan is a retired professional basketball player,\"\n",
+    "            \" widely regarded as one of the greatest basketball players of all\"\n",
+    "            \" time.\"\n",
+    "        ),\n",
     "        metadata={\n",
     "            \"category\": \"Sports\",\n",
     "            \"country\": \"United States\",\n",
     "        },\n",
     "    ),\n",
     "    TextNode(\n",
-    "        text=\"Angelina Jolie is an American actress, filmmaker, and humanitarian. She has received numerous awards for her acting and is known for her philanthropic work.\",\n",
+    "        text=(\n",
+    "            \"Angelina Jolie is an American actress, filmmaker, and\"\n",
+    "            \" humanitarian. She has received numerous awards for her acting\"\n",
+    "            \" and is known for her philanthropic work.\"\n",
+    "        ),\n",
     "        metadata={\n",
     "            \"category\": \"Entertainment\",\n",
     "            \"country\": \"United States\",\n",
     "        },\n",
     "    ),\n",
     "    TextNode(\n",
-    "        text=\"Elon Musk is a business magnate, industrial designer, and engineer. He is the founder, CEO, and lead designer of SpaceX, Tesla, Inc., Neuralink, and The Boring Company.\",\n",
+    "        text=(\n",
+    "            \"Elon Musk is a business magnate, industrial designer, and\"\n",
+    "            \" engineer. He is the founder, CEO, and lead designer of SpaceX,\"\n",
+    "            \" Tesla, Inc., Neuralink, and The Boring Company.\"\n",
+    "        ),\n",
     "        metadata={\n",
     "            \"category\": \"Business\",\n",
     "            \"country\": \"United States\",\n",
     "        },\n",
     "    ),\n",
     "    TextNode(\n",
-    "        text=\"Rihanna is a Barbadian singer, actress, and businesswoman. She has achieved significant success in the music industry and is known for her versatile musical style.\",\n",
+    "        text=(\n",
+    "            \"Rihanna is a Barbadian singer, actress, and businesswoman. She\"\n",
+    "            \" has achieved significant success in the music industry and is\"\n",
+    "            \" known for her versatile musical style.\"\n",
+    "        ),\n",
     "        metadata={\n",
     "            \"category\": \"Music\",\n",
     "            \"country\": \"Barbados\",\n",
     "        },\n",
     "    ),\n",
     "    TextNode(\n",
-    "        text=\"Cristiano Ronaldo is a Portuguese professional footballer who is considered one of the greatest football players of all time. He has won numerous awards and set multiple records during his career.\",\n",
+    "        text=(\n",
+    "            \"Cristiano Ronaldo is a Portuguese professional footballer who is\"\n",
+    "            \" considered one of the greatest football players of all time. He\"\n",
+    "            \" has won numerous awards and set multiple records during his\"\n",
+    "            \" career.\"\n",
+    "        ),\n",
     "        metadata={\n",
     "            \"category\": \"Sports\",\n",
     "            \"country\": \"Portugal\",\n",
@@ -157,7 +178,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vector_store = PineconeVectorStore(pinecone_index=pinecone_index, namespace=\"test\")\n",
+    "vector_store = PineconeVectorStore(\n",
+    "    pinecone_index=pinecone_index, namespace=\"test\"\n",
+    ")\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)"
    ]
   },
@@ -224,12 +247,18 @@
     "        MetadataInfo(\n",
     "            name=\"category\",\n",
     "            type=\"str\",\n",
-    "            description=\"Category of the celebrity, one of [Sports, Entertainment, Business, Music]\",\n",
+    "            description=(\n",
+    "                \"Category of the celebrity, one of [Sports, Entertainment,\"\n",
+    "                \" Business, Music]\"\n",
+    "            ),\n",
     "        ),\n",
     "        MetadataInfo(\n",
     "            name=\"country\",\n",
     "            type=\"str\",\n",
-    "            description=\"Country of the celebrity, one of [United States, Barbados, Portugal]\",\n",
+    "            description=(\n",
+    "                \"Country of the celebrity, one of [United States, Barbados,\"\n",
+    "                \" Portugal]\"\n",
+    "            ),\n",
     "        ),\n",
     "    ],\n",
     ")\n",
@@ -244,7 +273,8 @@
     "    filter_value_list: List[str] = Field(\n",
     "        ...,\n",
     "        description=(\n",
-    "            \"List of metadata filter field values (corresponding to names specified in filter_key_list)\"\n",
+    "            \"List of metadata filter field values (corresponding to names\"\n",
+    "            \" specified in filter_key_list)\"\n",
     "        ),\n",
     "    )\n",
     "\n",
@@ -264,7 +294,9 @@
     "        for k, v in zip(filter_key_list, filter_value_list)\n",
     "    ]\n",
     "    retriever = VectorIndexRetriever(\n",
-    "        index, filters=MetadataFilters(filters=exact_match_filters), top_k=top_k\n",
+    "        index,\n",
+    "        filters=MetadataFilters(filters=exact_match_filters),\n",
+    "        top_k=top_k,\n",
     "    )\n",
     "    query_engine = RetrieverQueryEngine.from_args(retriever)\n",
     "\n",
@@ -306,7 +338,9 @@
     "from llama_index.llms import OpenAI\n",
     "\n",
     "agent = OpenAIAgent.from_tools(\n",
-    "    [auto_retrieve_tool], llm=OpenAI(temperature=0, model=\"gpt-4-0613\"), verbose=True\n",
+    "    [auto_retrieve_tool],\n",
+    "    llm=OpenAI(temperature=0, model=\"gpt-4-0613\"),\n",
+    "    verbose=True,\n",
     ")"
    ]
   },
@@ -549,7 +583,11 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index import WikipediaReader, SimpleDirectoryReader, VectorStoreIndex"
+    "from llama_index import (\n",
+    "    WikipediaReader,\n",
+    "    SimpleDirectoryReader,\n",
+    "    VectorStoreIndex,\n",
+    ")"
    ]
   },
   {
@@ -676,7 +714,10 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.query_engine import SQLAutoVectorQueryEngine, RetrieverQueryEngine\n",
+    "from llama_index.query_engine import (\n",
+    "    SQLAutoVectorQueryEngine,\n",
+    "    RetrieverQueryEngine,\n",
+    ")\n",
     "from llama_index.tools.query_engine import QueryEngineTool\n",
     "from llama_index.indices.vector_store import VectorIndexAutoRetriever"
    ]
@@ -688,15 +729,21 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.indices.vector_store.retrievers import VectorIndexAutoRetriever\n",
+    "from llama_index.indices.vector_store.retrievers import (\n",
+    "    VectorIndexAutoRetriever,\n",
+    ")\n",
     "from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n",
-    "from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine\n",
+    "from llama_index.query_engine.retriever_query_engine import (\n",
+    "    RetrieverQueryEngine,\n",
+    ")\n",
     "\n",
     "\n",
     "vector_store_info = VectorStoreInfo(\n",
     "    content_info=\"articles about different cities\",\n",
     "    metadata_info=[\n",
-    "        MetadataInfo(name=\"title\", type=\"str\", description=\"The name of the city\"),\n",
+    "        MetadataInfo(\n",
+    "            name=\"title\", type=\"str\", description=\"The name of the city\"\n",
+    "        ),\n",
     "    ],\n",
     ")\n",
     "vector_auto_retriever = VectorIndexAutoRetriever(\n",
@@ -719,14 +766,17 @@
     "    query_engine=query_engine,\n",
     "    name=\"sql_tool\",\n",
     "    description=(\n",
-    "        \"Useful for translating a natural language query into a SQL query over a table containing: \"\n",
-    "        \"city_stats, containing the population/country of each city\"\n",
+    "        \"Useful for translating a natural language query into a SQL query over\"\n",
+    "        \" a table containing: city_stats, containing the population/country of\"\n",
+    "        \" each city\"\n",
     "    ),\n",
     ")\n",
     "vector_tool = QueryEngineTool.from_defaults(\n",
     "    query_engine=retriever_query_engine,\n",
     "    name=\"vector_tool\",\n",
-    "    description=f\"Useful for answering semantic questions about different cities\",\n",
+    "    description=(\n",
+    "        f\"Useful for answering semantic questions about different cities\"\n",
+    "    ),\n",
     ")"
    ]
   },
@@ -750,7 +800,9 @@
     "from llama_index.llms import OpenAI\n",
     "\n",
     "agent = OpenAIAgent.from_tools(\n",
-    "    [sql_tool, vector_tool], llm=OpenAI(temperature=0, model=\"gpt-4-0613\"), verbose=True\n",
+    "    [sql_tool, vector_tool],\n",
+    "    llm=OpenAI(temperature=0, model=\"gpt-4-0613\"),\n",
+    "    verbose=True,\n",
     ")"
    ]
   },
@@ -793,7 +845,8 @@
    "source": [
     "# NOTE: gpt-3.5 gives the wrong answer, but gpt-4 is able to reason over both loops\n",
     "response = agent.chat(\n",
-    "    \"Tell me about the arts and culture of the city with the highest population\"\n",
+    "    \"Tell me about the arts and culture of the city with the highest\"\n",
+    "    \" population\"\n",
     ")\n",
     "print(str(response))"
    ]
@@ -866,7 +919,9 @@
     }
    ],
    "source": [
-    "response = agent.chat(\"Can you give me the country corresponding to each city?\")\n",
+    "response = agent.chat(\n",
+    "    \"Can you give me the country corresponding to each city?\"\n",
+    ")\n",
     "print(str(response))"
    ]
   }
diff --git a/docs/examples/agent/openai_agent_query_plan.ipynb b/docs/examples/agent/openai_agent_query_plan.ipynb
index f3ccddbed488713b9feed2d725bc59402fa6411a..cabc1fe15fb445cc342cc42ef87c0df0b93a3738 100644
--- a/docs/examples/agent/openai_agent_query_plan.ipynb
+++ b/docs/examples/agent/openai_agent_query_plan.ipynb
@@ -165,17 +165,26 @@
     "query_tool_sept = QueryEngineTool.from_defaults(\n",
     "    query_engine=sept_engine,\n",
     "    name=\"sept_2022\",\n",
-    "    description=f\"Provides information about Uber quarterly financials ending September 2022\",\n",
+    "    description=(\n",
+    "        f\"Provides information about Uber quarterly financials ending\"\n",
+    "        f\" September 2022\"\n",
+    "    ),\n",
     ")\n",
     "query_tool_june = QueryEngineTool.from_defaults(\n",
     "    query_engine=june_engine,\n",
     "    name=\"june_2022\",\n",
-    "    description=f\"Provides information about Uber quarterly financials ending June 2022\",\n",
+    "    description=(\n",
+    "        f\"Provides information about Uber quarterly financials ending June\"\n",
+    "        f\" 2022\"\n",
+    "    ),\n",
     ")\n",
     "query_tool_march = QueryEngineTool.from_defaults(\n",
     "    query_engine=march_engine,\n",
     "    name=\"march_2022\",\n",
-    "    description=f\"Provides information about Uber quarterly financials ending March 2022\",\n",
+    "    description=(\n",
+    "        f\"Provides information about Uber quarterly financials ending March\"\n",
+    "        f\" 2022\"\n",
+    "    ),\n",
     ")"
    ]
   },
@@ -190,7 +199,9 @@
     "from llama_index.tools import QueryPlanTool\n",
     "from llama_index import get_response_synthesizer\n",
     "\n",
-    "response_synthesizer = get_response_synthesizer(service_context=service_context)\n",
+    "response_synthesizer = get_response_synthesizer(\n",
+    "    service_context=service_context\n",
+    ")\n",
     "query_plan_tool = QueryPlanTool.from_defaults(\n",
     "    query_engine_tools=[query_tool_sept, query_tool_june, query_tool_march],\n",
     "    response_synthesizer=response_synthesizer,\n",
@@ -402,7 +413,9 @@
     }
    ],
    "source": [
-    "response = agent.query(\"Analyze Uber revenue growth in March, June, and September\")"
+    "response = agent.query(\n",
+    "    \"Analyze Uber revenue growth in March, June, and September\"\n",
+    ")"
    ]
   },
   {
@@ -504,7 +517,8 @@
    "source": [
     "response = agent.query(\n",
     "    \"First look at Uber's revenue growth and risk factors in March, \"\n",
-    "    + \"then revenue growth and risk factors in September, and then compare and contrast the two documents?\"\n",
+    "    + \"then revenue growth and risk factors in September, and then compare and\"\n",
+    "    \" contrast the two documents?\"\n",
     ")"
    ]
   },
diff --git a/docs/examples/agent/openai_agent_retrieval.ipynb b/docs/examples/agent/openai_agent_retrieval.ipynb
index 8d045649d868dbadf18da05ad94a613ae131e506..64868bf4d08d35887d6d0a91297a1379370c80c5 100644
--- a/docs/examples/agent/openai_agent_retrieval.ipynb
+++ b/docs/examples/agent/openai_agent_retrieval.ipynb
@@ -181,7 +181,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "agent = FnRetrieverOpenAIAgent.from_retriever(obj_index.as_retriever(), verbose=True)"
+    "agent = FnRetrieverOpenAIAgent.from_retriever(\n",
+    "    obj_index.as_retriever(), verbose=True\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/agent/openai_agent_with_query_engine.ipynb b/docs/examples/agent/openai_agent_with_query_engine.ipynb
index 39542ac064aae7f79ddd3e38afdac4ed844db67a..ea08ce93d3b99604b76de2276828aa1b6e5134ec 100644
--- a/docs/examples/agent/openai_agent_with_query_engine.ipynb
+++ b/docs/examples/agent/openai_agent_with_query_engine.ipynb
@@ -41,10 +41,14 @@
    "outputs": [],
    "source": [
     "try:\n",
-    "    storage_context = StorageContext.from_defaults(persist_dir=\"./storage/lyft\")\n",
+    "    storage_context = StorageContext.from_defaults(\n",
+    "        persist_dir=\"./storage/lyft\"\n",
+    "    )\n",
     "    lyft_index = load_index_from_storage(storage_context)\n",
     "\n",
-    "    storage_context = StorageContext.from_defaults(persist_dir=\"./storage/uber\")\n",
+    "    storage_context = StorageContext.from_defaults(\n",
+    "        persist_dir=\"./storage/uber\"\n",
+    "    )\n",
     "    uber_index = load_index_from_storage(storage_context)\n",
     "\n",
     "    index_loaded = True\n",
@@ -100,16 +104,20 @@
     "        query_engine=lyft_engine,\n",
     "        metadata=ToolMetadata(\n",
     "            name=\"lyft_10k\",\n",
-    "            description=\"Provides information about Lyft financials for year 2021. \"\n",
-    "            \"Use a detailed plain text question as input to the tool.\",\n",
+    "            description=(\n",
+    "                \"Provides information about Lyft financials for year 2021. \"\n",
+    "                \"Use a detailed plain text question as input to the tool.\"\n",
+    "            ),\n",
     "        ),\n",
     "    ),\n",
     "    QueryEngineTool(\n",
     "        query_engine=uber_engine,\n",
     "        metadata=ToolMetadata(\n",
     "            name=\"uber_10k\",\n",
-    "            description=\"Provides information about Uber financials for year 2021. \"\n",
-    "            \"Use a detailed plain text question as input to the tool.\",\n",
+    "            description=(\n",
+    "                \"Provides information about Uber financials for year 2021. \"\n",
+    "                \"Use a detailed plain text question as input to the tool.\"\n",
+    "            ),\n",
     "        ),\n",
     "    ),\n",
     "]"
diff --git a/docs/examples/agent/react_agent_with_query_engine.ipynb b/docs/examples/agent/react_agent_with_query_engine.ipynb
index 42972067c16c9048f0a7c0ac804c3ce66de056ff..326999b6b10a39264f16bbae409fe2a2c716390a 100644
--- a/docs/examples/agent/react_agent_with_query_engine.ipynb
+++ b/docs/examples/agent/react_agent_with_query_engine.ipynb
@@ -52,10 +52,14 @@
    "outputs": [],
    "source": [
     "try:\n",
-    "    storage_context = StorageContext.from_defaults(persist_dir=\"./storage/lyft\")\n",
+    "    storage_context = StorageContext.from_defaults(\n",
+    "        persist_dir=\"./storage/lyft\"\n",
+    "    )\n",
     "    lyft_index = load_index_from_storage(storage_context)\n",
     "\n",
-    "    storage_context = StorageContext.from_defaults(persist_dir=\"./storage/uber\")\n",
+    "    storage_context = StorageContext.from_defaults(\n",
+    "        persist_dir=\"./storage/uber\"\n",
+    "    )\n",
     "    uber_index = load_index_from_storage(storage_context)\n",
     "\n",
     "    index_loaded = True\n",
@@ -111,16 +115,20 @@
     "        query_engine=lyft_engine,\n",
     "        metadata=ToolMetadata(\n",
     "            name=\"lyft_10k\",\n",
-    "            description=\"Provides information about Lyft financials for year 2021. \"\n",
-    "            \"Use a detailed plain text question as input to the tool.\",\n",
+    "            description=(\n",
+    "                \"Provides information about Lyft financials for year 2021. \"\n",
+    "                \"Use a detailed plain text question as input to the tool.\"\n",
+    "            ),\n",
     "        ),\n",
     "    ),\n",
     "    QueryEngineTool(\n",
     "        query_engine=uber_engine,\n",
     "        metadata=ToolMetadata(\n",
     "            name=\"uber_10k\",\n",
-    "            description=\"Provides information about Uber financials for year 2021. \"\n",
-    "            \"Use a detailed plain text question as input to the tool.\",\n",
+    "            description=(\n",
+    "                \"Provides information about Uber financials for year 2021. \"\n",
+    "                \"Use a detailed plain text question as input to the tool.\"\n",
+    "            ),\n",
     "        ),\n",
     "    ),\n",
     "]"
@@ -219,7 +227,8 @@
    ],
    "source": [
     "response = agent.chat(\n",
-    "    \"Compare and contrast the revenue growth of Uber and Lyft in 2021, then give an analysis\"\n",
+    "    \"Compare and contrast the revenue growth of Uber and Lyft in 2021, then\"\n",
+    "    \" give an analysis\"\n",
     ")\n",
     "print(str(response))"
    ]
@@ -246,7 +255,8 @@
     "nest_asyncio.apply()\n",
     "\n",
     "response = await agent.achat(\n",
-    "    \"Compare and contrast the risks of Uber and Lyft in 2021, then give an analysis\"\n",
+    "    \"Compare and contrast the risks of Uber and Lyft in 2021, then give an\"\n",
+    "    \" analysis\"\n",
     ")\n",
     "print(str(response))"
    ]
@@ -341,7 +351,8 @@
    ],
    "source": [
     "response = agent.chat(\n",
-    "    \"Compare and contrast the revenue growth of Uber and Lyft in 2021, then give an analysis\"\n",
+    "    \"Compare and contrast the revenue growth of Uber and Lyft in 2021, then\"\n",
+    "    \" give an analysis\"\n",
     ")\n",
     "print(str(response))"
    ]
@@ -363,7 +374,8 @@
    ],
    "source": [
     "response = agent_instruct.chat(\n",
-    "    \"Compare and contrast the revenue growth of Uber and Lyft in 2021, then give an analysis\"\n",
+    "    \"Compare and contrast the revenue growth of Uber and Lyft in 2021, then\"\n",
+    "    \" give an analysis\"\n",
     ")\n",
     "print(str(response))"
    ]
@@ -393,7 +405,8 @@
    ],
    "source": [
     "response = agent.chat(\n",
-    "    \"Can you tell me about the risk factors of the company with the higher revenue?\"\n",
+    "    \"Can you tell me about the risk factors of the company with the higher\"\n",
+    "    \" revenue?\"\n",
     ")\n",
     "print(str(response))"
    ]
@@ -415,7 +428,8 @@
    ],
    "source": [
     "response = agent_instruct.query(\n",
-    "    \"Can you tell me about the risk factors of the company with the higher revenue?\"\n",
+    "    \"Can you tell me about the risk factors of the company with the higher\"\n",
+    "    \" revenue?\"\n",
     ")\n",
     "print(str(response))"
    ]
diff --git a/docs/examples/callbacks/AimCallback.ipynb b/docs/examples/callbacks/AimCallback.ipynb
index a2ce55b6d64e7bff1a747cacbc73e05531e15eac..0fcc6df16d6685e7c473a0e8d5a51038b371d942 100644
--- a/docs/examples/callbacks/AimCallback.ipynb
+++ b/docs/examples/callbacks/AimCallback.ipynb
@@ -92,7 +92,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "service_context = ServiceContext.from_defaults(callback_manager=callback_manager)\n",
+    "service_context = ServiceContext.from_defaults(\n",
+    "    callback_manager=callback_manager\n",
+    ")\n",
     "index = SummaryIndex.from_documents(docs, service_context=service_context)\n",
     "query_engine = index.as_query_engine()"
    ]
diff --git a/docs/examples/callbacks/HoneyHiveLlamaIndexTracer.ipynb b/docs/examples/callbacks/HoneyHiveLlamaIndexTracer.ipynb
index 0e7dd8f18d3bed774932be0f988ff0af8ee926dc..e139965c7c905e118540d1cf29efb62e046e0877 100644
--- a/docs/examples/callbacks/HoneyHiveLlamaIndexTracer.ipynb
+++ b/docs/examples/callbacks/HoneyHiveLlamaIndexTracer.ipynb
@@ -40,7 +40,8 @@
     "\n",
     "if os.getenv(\"OPENAI_API_KEY\") is None:\n",
     "    os.environ[\"OPENAI_API_KEY\"] = getpass(\n",
-    "        \"Paste your OpenAI key from: https://platform.openai.com/account/api-keys\\n\"\n",
+    "        \"Paste your OpenAI key from:\"\n",
+    "        \" https://platform.openai.com/account/api-keys\\n\"\n",
     "    )\n",
     "assert os.getenv(\"OPENAI_API_KEY\", \"\").startswith(\n",
     "    \"sk-\"\n",
@@ -76,7 +77,8 @@
     "\n",
     "if os.getenv(\"HONEYHIVE_API_KEY\") is None:\n",
     "    os.environ[\"HONEYHIVE_API_KEY\"] = getpass(\n",
-    "        \"Paste your HoneyHive key from: https://app.honeyhive.ai/settings/account\\n\"\n",
+    "        \"Paste your HoneyHive key from:\"\n",
+    "        \" https://app.honeyhive.ai/settings/account\\n\"\n",
     "    )\n",
     "print(\"HoneyHive API key configured\")"
    ]
@@ -239,7 +241,9 @@
     }
    ],
    "source": [
-    "index = GPTVectorStoreIndex.from_documents(docs, service_context=service_context)"
+    "index = GPTVectorStoreIndex.from_documents(\n",
+    "    docs, service_context=service_context\n",
+    ")"
    ]
   },
   {
@@ -370,7 +374,9 @@
    "source": [
     "# build NYC index\n",
     "nyc_index = GPTVectorStoreIndex.from_documents(\n",
-    "    nyc_documents, service_context=service_context, storage_context=storage_context\n",
+    "    nyc_documents,\n",
+    "    service_context=service_context,\n",
+    "    storage_context=storage_context,\n",
     ")"
    ]
   },
@@ -397,7 +403,9 @@
    "source": [
     "# build essay index\n",
     "essay_index = GPTVectorStoreIndex.from_documents(\n",
-    "    essay_documents, service_context=service_context, storage_context=storage_context\n",
+    "    essay_documents,\n",
+    "    service_context=service_context,\n",
+    "    storage_context=storage_context,\n",
     ")"
    ]
   },
@@ -522,7 +530,8 @@
     "query_engine = graph.as_query_engine()\n",
     "\n",
     "response = query_engine.query(\n",
-    "    \"What is the climate of New York City like? How cold is it during the winter?\",\n",
+    "    \"What is the climate of New York City like? How cold is it during the\"\n",
+    "    \" winter?\",\n",
     ")\n",
     "print(response, sep=\"\\n\")"
    ]
diff --git a/docs/examples/callbacks/LlamaDebugHandler.ipynb b/docs/examples/callbacks/LlamaDebugHandler.ipynb
index c1724c63c0b42630555672ec04f6ca6588a0f80e..da7cb2e0b87948ba47cba9a2811ff67a115128b6 100644
--- a/docs/examples/callbacks/LlamaDebugHandler.ipynb
+++ b/docs/examples/callbacks/LlamaDebugHandler.ipynb
@@ -22,7 +22,11 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.callbacks import CallbackManager, LlamaDebugHandler, CBEventType"
+    "from llama_index.callbacks import (\n",
+    "    CallbackManager,\n",
+    "    LlamaDebugHandler,\n",
+    "    CBEventType,\n",
+    ")"
    ]
   },
   {
@@ -258,7 +262,9 @@
     "# Now construct the agent\n",
     "from llama_index.agent import OpenAIAgent\n",
     "\n",
-    "agent = OpenAIAgent.from_tools(tools=[tool], llm=llm, callback_manager=callback_manager)"
+    "agent = OpenAIAgent.from_tools(\n",
+    "    tools=[tool], llm=llm, callback_manager=callback_manager\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/callbacks/OpenInferenceCallback.ipynb b/docs/examples/callbacks/OpenInferenceCallback.ipynb
index ba5de4daf94172baa01cfadff794efe940970282..f8507bdf80b6fccea50e7d4713febadbdbfea6cf 100644
--- a/docs/examples/callbacks/OpenInferenceCallback.ipynb
+++ b/docs/examples/callbacks/OpenInferenceCallback.ipynb
@@ -67,7 +67,10 @@
     "    VectorStoreIndex,\n",
     ")\n",
     "from llama_index.callbacks import CallbackManager, OpenInferenceCallbackHandler\n",
-    "from llama_index.callbacks.open_inference_callback import as_dataframe, QueryData\n",
+    "from llama_index.callbacks.open_inference_callback import (\n",
+    "    as_dataframe,\n",
+    "    QueryData,\n",
+    ")\n",
     "from llama_index.node_parser import SimpleNodeParser\n",
     "import pandas as pd\n",
     "from tqdm import tqdm"
@@ -527,7 +530,9 @@
    "source": [
     "callback_handler = OpenInferenceCallbackHandler()\n",
     "callback_manager = CallbackManager([callback_handler])\n",
-    "service_context = ServiceContext.from_defaults(callback_manager=callback_manager)"
+    "service_context = ServiceContext.from_defaults(\n",
+    "    callback_manager=callback_manager\n",
+    ")"
    ]
   },
   {
@@ -543,7 +548,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "index = VectorStoreIndex.from_documents(documents, service_context=service_context)\n",
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, service_context=service_context\n",
+    ")\n",
     "query_engine = index.as_query_engine()"
    ]
   },
@@ -823,7 +830,9 @@
    "outputs": [],
    "source": [
     "class ParquetCallback:\n",
-    "    def __init__(self, data_path: Union[str, Path], max_buffer_length: int = 1000):\n",
+    "    def __init__(\n",
+    "        self, data_path: Union[str, Path], max_buffer_length: int = 1000\n",
+    "    ):\n",
     "        self._data_path = Path(data_path)\n",
     "        self._data_path.mkdir(parents=True, exist_ok=False)\n",
     "        self._max_buffer_length = max_buffer_length\n",
@@ -875,8 +884,12 @@
     ")\n",
     "callback_handler = OpenInferenceCallbackHandler(callback=parquet_writer)\n",
     "callback_manager = CallbackManager([callback_handler])\n",
-    "service_context = ServiceContext.from_defaults(callback_manager=callback_manager)\n",
-    "index = VectorStoreIndex.from_documents(documents, service_context=service_context)\n",
+    "service_context = ServiceContext.from_defaults(\n",
+    "    callback_manager=callback_manager\n",
+    ")\n",
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, service_context=service_context\n",
+    ")\n",
     "query_engine = index.as_query_engine()\n",
     "\n",
     "for query in tqdm(queries):\n",
diff --git a/docs/examples/callbacks/TokenCountingHandler.ipynb b/docs/examples/callbacks/TokenCountingHandler.ipynb
index 65420e85a084e6f90f63b4df1edbf26c6ff64a59..175211d05142166009896d69e151b3adf83d7f3d 100644
--- a/docs/examples/callbacks/TokenCountingHandler.ipynb
+++ b/docs/examples/callbacks/TokenCountingHandler.ipynb
@@ -307,7 +307,10 @@
    ],
    "source": [
     "print(\"Num LLM token count events: \", len(token_counter.llm_token_counts))\n",
-    "print(\"Num Embedding token count events: \", len(token_counter.embedding_token_counts))"
+    "print(\n",
+    "    \"Num Embedding token count events: \",\n",
+    "    len(token_counter.embedding_token_counts),\n",
+    ")"
    ]
   },
   {
@@ -348,10 +351,14 @@
    "source": [
     "print(\"prompt: \", token_counter.llm_token_counts[0].prompt[:100], \"...\\n\")\n",
     "print(\n",
-    "    \"prompt token count: \", token_counter.llm_token_counts[0].prompt_token_count, \"\\n\"\n",
+    "    \"prompt token count: \",\n",
+    "    token_counter.llm_token_counts[0].prompt_token_count,\n",
+    "    \"\\n\",\n",
     ")\n",
     "\n",
-    "print(\"completion: \", token_counter.llm_token_counts[0].completion[:100], \"...\\n\")\n",
+    "print(\n",
+    "    \"completion: \", token_counter.llm_token_counts[0].completion[:100], \"...\\n\"\n",
+    ")\n",
     "print(\n",
     "    \"completion token count: \",\n",
     "    token_counter.llm_token_counts[0].completion_token_count,\n",
diff --git a/docs/examples/callbacks/WandbCallbackHandler.ipynb b/docs/examples/callbacks/WandbCallbackHandler.ipynb
index f9bb3769856536f430efb0c554038542d2dd489c..d151cf8b7d4ffe6455f08e1bfd693c2281250677 100644
--- a/docs/examples/callbacks/WandbCallbackHandler.ipynb
+++ b/docs/examples/callbacks/WandbCallbackHandler.ipynb
@@ -32,7 +32,8 @@
     "\n",
     "if os.getenv(\"OPENAI_API_KEY\") is None:\n",
     "    os.environ[\"OPENAI_API_KEY\"] = getpass(\n",
-    "        \"Paste your OpenAI key from: https://platform.openai.com/account/api-keys\\n\"\n",
+    "        \"Paste your OpenAI key from:\"\n",
+    "        \" https://platform.openai.com/account/api-keys\\n\"\n",
     "    )\n",
     "assert os.getenv(\"OPENAI_API_KEY\", \"\").startswith(\n",
     "    \"sk-\"\n",
@@ -208,7 +209,9 @@
     }
    ],
    "source": [
-    "index = GPTVectorStoreIndex.from_documents(docs, service_context=service_context)"
+    "index = GPTVectorStoreIndex.from_documents(\n",
+    "    docs, service_context=service_context\n",
+    ")"
    ]
   },
   {
@@ -274,7 +277,9 @@
     ")\n",
     "\n",
     "# Load the index and initialize a query engine\n",
-    "index = load_index_from_storage(storage_context, service_context=service_context)"
+    "index = load_index_from_storage(\n",
+    "    storage_context, service_context=service_context\n",
+    ")"
    ]
   },
   {
@@ -424,7 +429,9 @@
    "source": [
     "# build NYC index\n",
     "nyc_index = GPTVectorStoreIndex.from_documents(\n",
-    "    nyc_documents, service_context=service_context, storage_context=storage_context\n",
+    "    nyc_documents,\n",
+    "    service_context=service_context,\n",
+    "    storage_context=storage_context,\n",
     ")"
    ]
   },
@@ -458,7 +465,9 @@
    "source": [
     "# build essay index\n",
     "essay_index = GPTVectorStoreIndex.from_documents(\n",
-    "    essay_documents, service_context=service_context, storage_context=storage_context\n",
+    "    essay_documents,\n",
+    "    service_context=service_context,\n",
+    "    storage_context=storage_context,\n",
     ")"
    ]
   },
@@ -670,7 +679,8 @@
     "query_engine = graph.as_query_engine()\n",
     "\n",
     "response = query_engine.query(\n",
-    "    \"What is the climate of New York City like? How cold is it during the winter?\",\n",
+    "    \"What is the climate of New York City like? How cold is it during the\"\n",
+    "    \" winter?\",\n",
     ")\n",
     "print(response, sep=\"\\n\")"
    ]
diff --git a/docs/examples/chat_engine/chat_engine_best.ipynb b/docs/examples/chat_engine/chat_engine_best.ipynb
index 179bdb7910cd0a501f82408d9a1bf2ec04ea15b7..b8a3ab9e76dc2cc7f0099ac6eb6dad6e11571a66 100644
--- a/docs/examples/chat_engine/chat_engine_best.ipynb
+++ b/docs/examples/chat_engine/chat_engine_best.ipynb
@@ -93,7 +93,9 @@
     }
    ],
    "source": [
-    "response = chat_engine.chat(\"What are the first programs Paul Graham tried writing?\")"
+    "response = chat_engine.chat(\n",
+    "    \"What are the first programs Paul Graham tried writing?\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/chat_engine/chat_engine_context.ipynb b/docs/examples/chat_engine/chat_engine_context.ipynb
index 1507a70dd7de12747ea1ac775700170ffab4716b..a9664a3e895f0debc8c8dca7e3e10fdab7e21d71 100644
--- a/docs/examples/chat_engine/chat_engine_context.ipynb
+++ b/docs/examples/chat_engine/chat_engine_context.ipynb
@@ -110,7 +110,10 @@
     "chat_engine = index.as_chat_engine(\n",
     "    chat_mode=\"context\",\n",
     "    memory=memory,\n",
-    "    system_prompt=\"You are a chatbot, able to have normal interactions, as well as talk about an essay discussing Paul Grahams life.\",\n",
+    "    system_prompt=(\n",
+    "        \"You are a chatbot, able to have normal interactions, as well as talk\"\n",
+    "        \" about an essay discussing Paul Grahams life.\"\n",
+    "    ),\n",
     ")"
    ]
   },
diff --git a/docs/examples/chat_engine/chat_engine_openai.ipynb b/docs/examples/chat_engine/chat_engine_openai.ipynb
index ada3a8207076e1b82a1f1de5f1dac47be44c1009..29e19bd2e07380f5e53d2de29bd2caaf4da19adf 100644
--- a/docs/examples/chat_engine/chat_engine_openai.ipynb
+++ b/docs/examples/chat_engine/chat_engine_openai.ipynb
@@ -35,7 +35,9 @@
     "from llama_index.llms import OpenAI\n",
     "\n",
     "# Necessary to use the latest OpenAI models that support function calling API\n",
-    "service_context = ServiceContext.from_defaults(llm=OpenAI(model=\"gpt-3.5-turbo-0613\"))\n",
+    "service_context = ServiceContext.from_defaults(\n",
+    "    llm=OpenAI(model=\"gpt-3.5-turbo-0613\")\n",
+    ")\n",
     "data = SimpleDirectoryReader(input_dir=\"../data/paul_graham/\").load_data()\n",
     "index = VectorStoreIndex.from_documents(data, service_context=service_context)"
    ]
diff --git a/docs/examples/chat_engine/chat_engine_personality.ipynb b/docs/examples/chat_engine/chat_engine_personality.ipynb
index df033361995816f23479621d6d3502f521ddfa2a..95b2435f2d8276f39976f1ca97c6d359d2a72798 100644
--- a/docs/examples/chat_engine/chat_engine_personality.ipynb
+++ b/docs/examples/chat_engine/chat_engine_personality.ipynb
@@ -43,7 +43,9 @@
     "from llama_index.chat_engine import SimpleChatEngine\n",
     "\n",
     "chat_engine = SimpleChatEngine.from_defaults()\n",
-    "response = chat_engine.chat(\"Say something profound and romantic about fourth of July\")\n",
+    "response = chat_engine.chat(\n",
+    "    \"Say something profound and romantic about fourth of July\"\n",
+    ")\n",
     "print(response)"
    ]
   },
@@ -82,7 +84,9 @@
     "chat_engine = SimpleChatEngine.from_defaults(\n",
     "    system_prompt=SHAKESPEARE_WRITING_ASSISTANT\n",
     ")\n",
-    "response = chat_engine.chat(\"Say something profound and romantic about fourth of July\")\n",
+    "response = chat_engine.chat(\n",
+    "    \"Say something profound and romantic about fourth of July\"\n",
+    ")\n",
     "print(response)"
    ]
   },
@@ -112,8 +116,12 @@
     "from llama_index.chat_engine import SimpleChatEngine\n",
     "from llama_index.prompts.system import MARKETING_WRITING_ASSISTANT\n",
     "\n",
-    "chat_engine = SimpleChatEngine.from_defaults(system_prompt=MARKETING_WRITING_ASSISTANT)\n",
-    "response = chat_engine.chat(\"Say something profound and romantic about fourth of July\")\n",
+    "chat_engine = SimpleChatEngine.from_defaults(\n",
+    "    system_prompt=MARKETING_WRITING_ASSISTANT\n",
+    ")\n",
+    "response = chat_engine.chat(\n",
+    "    \"Say something profound and romantic about fourth of July\"\n",
+    ")\n",
     "print(response)"
    ]
   },
@@ -144,7 +152,9 @@
     "from llama_index.prompts.system import IRS_TAX_CHATBOT\n",
     "\n",
     "chat_engine = SimpleChatEngine.from_defaults(system_prompt=IRS_TAX_CHATBOT)\n",
-    "response = chat_engine.chat(\"Say something profound and romantic about fourth of July\")\n",
+    "response = chat_engine.chat(\n",
+    "    \"Say something profound and romantic about fourth of July\"\n",
+    ")\n",
     "print(response)"
    ]
   }
diff --git a/docs/examples/chat_engine/chat_engine_repl.ipynb b/docs/examples/chat_engine/chat_engine_repl.ipynb
index 78690f9654bac3042ca142c4504ce6c9c1cd2f97..e110470b7c974080283b36368a6f0a25551f8d1a 100644
--- a/docs/examples/chat_engine/chat_engine_repl.ipynb
+++ b/docs/examples/chat_engine/chat_engine_repl.ipynb
@@ -405,7 +405,9 @@
     }
    ],
    "source": [
-    "response = chat_engine.stream_chat(\"Write me a poem about raining cats and dogs.\")\n",
+    "response = chat_engine.stream_chat(\n",
+    "    \"Write me a poem about raining cats and dogs.\"\n",
+    ")\n",
     "for token in response.response_gen:\n",
     "    print(token, end=\"\")"
    ]
diff --git a/docs/examples/citation/pdf_page_reference.ipynb b/docs/examples/citation/pdf_page_reference.ipynb
index c89527d4d7f7f5623ab10b506f822174dc69ff86..9abcfa7a9f3afa2678183c953baa3efbfa84f8b2 100644
--- a/docs/examples/citation/pdf_page_reference.ipynb
+++ b/docs/examples/citation/pdf_page_reference.ipynb
@@ -114,7 +114,8 @@
    ],
    "source": [
     "response = query_engine.query(\n",
-    "    \"What was the impact of COVID? Show statements in bullet form and show page reference after each statement.\"\n",
+    "    \"What was the impact of COVID? Show statements in bullet form and show\"\n",
+    "    \" page reference after each statement.\"\n",
     ")\n",
     "response.print_response_stream()"
    ]
diff --git a/docs/examples/composable_indices/ComposableIndices-Prior.ipynb b/docs/examples/composable_indices/ComposableIndices-Prior.ipynb
index b565732e86ad7a9c528c8b437672ddcbda9a1d1c..c895e96fb4baa843334dbd1a70123d5ccb9fa2ca 100644
--- a/docs/examples/composable_indices/ComposableIndices-Prior.ipynb
+++ b/docs/examples/composable_indices/ComposableIndices-Prior.ipynb
@@ -75,7 +75,9 @@
    "outputs": [],
    "source": [
     "# load PG's essay\n",
-    "essay_documents = SimpleDirectoryReader(\"../paul_graham_essay/data/\").load_data()"
+    "essay_documents = SimpleDirectoryReader(\n",
+    "    \"../paul_graham_essay/data/\"\n",
+    ").load_data()"
    ]
   },
   {
@@ -101,7 +103,9 @@
     "\n",
     "# build essay index\n",
     "essay_index = VectorStoreIndex.from_documents(\n",
-    "    essay_documents, service_context=service_context, storage_context=storage_context\n",
+    "    essay_documents,\n",
+    "    service_context=service_context,\n",
+    "    storage_context=storage_context,\n",
     ")\n",
     "empty_index = EmptyIndex(\n",
     "    service_context=service_context, storage_context=storage_context\n",
@@ -181,7 +185,10 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "essay_index_summary = \"This document describes Paul Graham's life, from early adulthood to the present day.\"\n",
+    "essay_index_summary = (\n",
+    "    \"This document describes Paul Graham's life, from early adulthood to the\"\n",
+    "    \" present day.\"\n",
+    ")\n",
     "empty_index_summary = \"This can be used for general knowledge purposes.\""
    ]
   },
@@ -357,7 +364,9 @@
    "source": [
     "# set Logging to DEBUG for more detailed outputs\n",
     "# ask it a question about NYC\n",
-    "query_engine = graph2.as_query_engine(custom_query_engines=custom_query_engines)\n",
+    "query_engine = graph2.as_query_engine(\n",
+    "    custom_query_engines=custom_query_engines\n",
+    ")\n",
     "response = query_engine.query(\n",
     "    \"Tell me about what Paul Graham did growing up?\",\n",
     ")"
diff --git a/docs/examples/composable_indices/ComposableIndices-Weaviate.ipynb b/docs/examples/composable_indices/ComposableIndices-Weaviate.ipynb
index c932da0deeb3081d53d1d48a52d7b3df91299804..66b6697310c86766a3c10874286a5024beb9cc50 100644
--- a/docs/examples/composable_indices/ComposableIndices-Weaviate.ipynb
+++ b/docs/examples/composable_indices/ComposableIndices-Weaviate.ipynb
@@ -134,7 +134,9 @@
    "outputs": [],
    "source": [
     "# load PG's essay\n",
-    "essay_documents = SimpleDirectoryReader(\"../paul_graham_essay/data/\").load_data()"
+    "essay_documents = SimpleDirectoryReader(\n",
+    "    \"../paul_graham_essay/data/\"\n",
+    ").load_data()"
    ]
   },
   {
@@ -157,7 +159,9 @@
     "from llama_index.storage.storage_context import StorageContext\n",
     "\n",
     "\n",
-    "vector_store = WeaviateVectorStore(weaviate_client=client, index_name=\"Nyc_docs\")\n",
+    "vector_store = WeaviateVectorStore(\n",
+    "    weaviate_client=client, index_name=\"Nyc_docs\"\n",
+    ")\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
     "nyc_index = VectorStoreIndex.from_documents(\n",
     "    nyc_documents, storage_context=storage_context\n",
@@ -172,7 +176,9 @@
    "outputs": [],
    "source": [
     "# build essay index\n",
-    "vector_store = WeaviateVectorStore(weaviate_client=client, index_name=\"Essay_docs\")\n",
+    "vector_store = WeaviateVectorStore(\n",
+    "    weaviate_client=client, index_name=\"Essay_docs\"\n",
+    ")\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
     "essay_index = VectorStoreIndex.from_documents(\n",
     "    essay_documents, storage_context=storage_context\n",
@@ -306,7 +312,8 @@
     "# set Logging to DEBUG for more detailed outputs\n",
     "# ask it a question about NYC\n",
     "response = query_engine.query(\n",
-    "    \"What is the weather of New York City like? How cold is it during the winter?\",\n",
+    "    \"What is the weather of New York City like? How cold is it during the\"\n",
+    "    \" winter?\",\n",
     ")"
    ]
   },
diff --git a/docs/examples/composable_indices/ComposableIndices.ipynb b/docs/examples/composable_indices/ComposableIndices.ipynb
index 4aa5b1639548dec380045972d5f6319e9614c927..ce23ffb003f7804d4ca74a14cb73d08a6c900373 100644
--- a/docs/examples/composable_indices/ComposableIndices.ipynb
+++ b/docs/examples/composable_indices/ComposableIndices.ipynb
@@ -30,7 +30,11 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index import VectorStoreIndex, SimpleKeywordTableIndex, SimpleDirectoryReader"
+    "from llama_index import (\n",
+    "    VectorStoreIndex,\n",
+    "    SimpleKeywordTableIndex,\n",
+    "    SimpleDirectoryReader,\n",
+    ")"
    ]
   },
   {
@@ -96,7 +100,9 @@
    "outputs": [],
    "source": [
     "# load PG's essay\n",
-    "essay_documents = SimpleDirectoryReader(\"../paul_graham_essay/data/\").load_data()"
+    "essay_documents = SimpleDirectoryReader(\n",
+    "    \"../paul_graham_essay/data/\"\n",
+    ").load_data()"
    ]
   },
   {
@@ -288,7 +294,8 @@
     "# ask it a question about NYC\n",
     "query_engine = graph.as_query_engine()\n",
     "response = query_engine.query(\n",
-    "    \"What is the climate of New York City like? How cold is it during the winter?\",\n",
+    "    \"What is the climate of New York City like? How cold is it during the\"\n",
+    "    \" winter?\",\n",
     ")"
    ]
   },
diff --git a/docs/examples/composable_indices/city_analysis/City_Analysis-Decompose.ipynb b/docs/examples/composable_indices/city_analysis/City_Analysis-Decompose.ipynb
index f09e81acf63c17bb0b026461b852529ca0fd85dc..149ca39bff4efbfce0e25833d575987887c5f1cb 100644
--- a/docs/examples/composable_indices/city_analysis/City_Analysis-Decompose.ipynb
+++ b/docs/examples/composable_indices/city_analysis/City_Analysis-Decompose.ipynb
@@ -278,7 +278,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.indices.query.query_transform.base import DecomposeQueryTransform\n",
+    "from llama_index.indices.query.query_transform.base import (\n",
+    "    DecomposeQueryTransform,\n",
+    ")\n",
     "\n",
     "decompose_transform = DecomposeQueryTransform(\n",
     "    service_context.llm_predictor, verbose=True\n",
@@ -302,7 +304,9 @@
    "outputs": [],
    "source": [
     "# with query decomposition in subindices\n",
-    "from llama_index.query_engine.transform_query_engine import TransformQueryEngine\n",
+    "from llama_index.query_engine.transform_query_engine import (\n",
+    "    TransformQueryEngine,\n",
+    ")\n",
     "\n",
     "\n",
     "custom_query_engines = {}\n",
@@ -310,11 +314,15 @@
     "    query_engine = index.as_query_engine(service_context=service_context)\n",
     "    transform_metadata = {\"index_summary\": index.index_struct.summary}\n",
     "    tranformed_query_engine = TransformQueryEngine(\n",
-    "        query_engine, decompose_transform, transform_metadata=transform_metadata\n",
+    "        query_engine,\n",
+    "        decompose_transform,\n",
+    "        transform_metadata=transform_metadata,\n",
     "    )\n",
     "    custom_query_engines[index.index_id] = tranformed_query_engine\n",
     "\n",
-    "custom_query_engines[graph.root_index.index_id] = graph.root_index.as_query_engine(\n",
+    "custom_query_engines[\n",
+    "    graph.root_index.index_id\n",
+    "] = graph.root_index.as_query_engine(\n",
     "    retriever_mode=\"simple\",\n",
     "    response_mode=\"tree_summarize\",\n",
     "    service_context=service_context,\n",
@@ -465,7 +473,9 @@
     "    query_engine = index.as_query_engine(service_context=service_context)\n",
     "    custom_query_engines[index.index_id] = query_engine\n",
     "\n",
-    "custom_query_engines[graph.root_index.index_id] = graph.root_index.as_query_engine(\n",
+    "custom_query_engines[\n",
+    "    graph.root_index.index_id\n",
+    "] = graph.root_index.as_query_engine(\n",
     "    retriever_mode=\"simple\",\n",
     "    response_mode=\"tree_summarize\",\n",
     "    service_context=service_context,\n",
diff --git a/docs/examples/composable_indices/city_analysis/City_Analysis-Unified-Query.ipynb b/docs/examples/composable_indices/city_analysis/City_Analysis-Unified-Query.ipynb
index e13c0570d0284cf43cc42799b050a442e6b53ef9..090fddd662613a66748b914d2f2cdd2d56b8f937 100644
--- a/docs/examples/composable_indices/city_analysis/City_Analysis-Unified-Query.ipynb
+++ b/docs/examples/composable_indices/city_analysis/City_Analysis-Unified-Query.ipynb
@@ -213,9 +213,10 @@
    "source": [
     "index_summaries = {\n",
     "    wiki_title: (\n",
-    "        f\"This content contains Wikipedia articles about {wiki_title}. \"\n",
-    "        f\"Use this index if you need to lookup specific facts about {wiki_title}.\\n\"\n",
-    "        \"Do not use this index if you want to analyze multiple cities.\"\n",
+    "        f\"This content contains Wikipedia articles about {wiki_title}. Use\"\n",
+    "        \" this index if you need to lookup specific facts about\"\n",
+    "        f\" {wiki_title}.\\nDo not use this index if you want to analyze\"\n",
+    "        \" multiple cities.\"\n",
     "    )\n",
     "    for wiki_title in wiki_titles\n",
     "}"
@@ -341,10 +342,14 @@
    "outputs": [],
    "source": [
     "# define decompose_transform\n",
-    "from llama_index.indices.query.query_transform.base import DecomposeQueryTransform\n",
+    "from llama_index.indices.query.query_transform.base import (\n",
+    "    DecomposeQueryTransform,\n",
+    ")\n",
     "from llama_index import LLMPredictor\n",
     "\n",
-    "decompose_transform = DecomposeQueryTransform(LLMPredictor(llm=chatgpt), verbose=True)"
+    "decompose_transform = DecomposeQueryTransform(\n",
+    "    LLMPredictor(llm=chatgpt), verbose=True\n",
+    ")"
    ]
   },
   {
@@ -355,7 +360,9 @@
    "outputs": [],
    "source": [
     "# define custom retrievers\n",
-    "from llama_index.query_engine.transform_query_engine import TransformQueryEngine\n",
+    "from llama_index.query_engine.transform_query_engine import (\n",
+    "    TransformQueryEngine,\n",
+    ")\n",
     "\n",
     "\n",
     "custom_query_engines = {}\n",
@@ -384,7 +391,9 @@
    "outputs": [],
    "source": [
     "# define graph\n",
-    "graph_query_engine = graph.as_query_engine(custom_query_engines=custom_query_engines)"
+    "graph_query_engine = graph.as_query_engine(\n",
+    "    custom_query_engines=custom_query_engines\n",
+    ")"
    ]
   },
   {
@@ -558,7 +567,9 @@
     "    summary = index_summaries[wiki_title]\n",
     "\n",
     "    query_engine = index.as_query_engine(service_context=service_context)\n",
-    "    vector_tool = QueryEngineTool.from_defaults(query_engine, description=summary)\n",
+    "    vector_tool = QueryEngineTool.from_defaults(\n",
+    "        query_engine, description=summary\n",
+    "    )\n",
     "    query_engine_tools.append(vector_tool)\n",
     "\n",
     "\n",
diff --git a/docs/examples/composable_indices/city_analysis/City_Analysis.ipynb b/docs/examples/composable_indices/city_analysis/City_Analysis.ipynb
index cd8f037a5baf184c4e7b494ea480ae9950101ddf..28c2ef19bc7e0eef5404b975cc2183e83551b0c7 100644
--- a/docs/examples/composable_indices/city_analysis/City_Analysis.ipynb
+++ b/docs/examples/composable_indices/city_analysis/City_Analysis.ipynb
@@ -342,7 +342,9 @@
     "# Build city document index\n",
     "city_indices = {}\n",
     "for wiki_title in wiki_titles:\n",
-    "    city_indices[wiki_title] = VectorStoreIndex.from_documents(city_docs[wiki_title])"
+    "    city_indices[wiki_title] = VectorStoreIndex.from_documents(\n",
+    "        city_docs[wiki_title]\n",
+    "    )"
    ]
   },
   {
@@ -533,9 +535,9 @@
    "outputs": [],
    "source": [
     "query_str = (\n",
-    "    \"Tell me the airports in Seattle, Houston, and Toronto. \"\n",
-    "    \"If only one city is provided, return the airport information for that city. \"\n",
-    "    \"If airports for multiple cities are provided, compare and contrast the airports. \"\n",
+    "    \"Tell me the airports in Seattle, Houston, and Toronto. If only one city\"\n",
+    "    \" is provided, return the airport information for that city. If airports\"\n",
+    "    \" for multiple cities are provided, compare and contrast the airports. \"\n",
     ")\n",
     "response_davinci = query_engine_davinci.query(query_str)\n",
     "response_chatgpt = query_engine_chatgpt.query(query_str)"
@@ -599,9 +601,10 @@
    "outputs": [],
    "source": [
     "query_str = (\n",
-    "    \"Look at Houston and Boston. \"\n",
-    "    \"If only one city is provided, provide information about the sports teams for that city. \"\n",
-    "    \"If context for multiple cities are provided, compare and contrast the sports environment of the cities. \"\n",
+    "    \"Look at Houston and Boston. If only one city is provided, provide\"\n",
+    "    \" information about the sports teams for that city. If context for\"\n",
+    "    \" multiple cities are provided, compare and contrast the sports\"\n",
+    "    \" environment of the cities. \"\n",
     ")\n",
     "response_davinci = query_engine_davinci.query(query_str)\n",
     "response_chatgpt = query_engine_chatgpt.query(query_str)"
@@ -665,9 +668,10 @@
    "outputs": [],
    "source": [
     "query_str = (\n",
-    "    \"Look at Houston and Boston. \"\n",
-    "    \"If only one city is provided, provide information about the arts and culture for that city. \"\n",
-    "    \"If context for multiple cities are provided, compare and contrast the arts and culture of the two cities. \"\n",
+    "    \"Look at Houston and Boston. If only one city is provided, provide\"\n",
+    "    \" information about the arts and culture for that city. If context for\"\n",
+    "    \" multiple cities are provided, compare and contrast the arts and culture\"\n",
+    "    \" of the two cities. \"\n",
     ")\n",
     "response_davinci = query_engine_davinci.query(query_str)\n",
     "response_chatgpt = query_engine_chatgpt.query(query_str)"
@@ -729,9 +733,10 @@
    "outputs": [],
    "source": [
     "query_str = (\n",
-    "    \"Look at Toronto and San Francisco. \"\n",
-    "    \"If only one city is provided, provide information about the demographics for that city. \"\n",
-    "    \"If context for multiple cities are provided, compare and contrast the demographics of the two cities. \"\n",
+    "    \"Look at Toronto and San Francisco. If only one city is provided, provide\"\n",
+    "    \" information about the demographics for that city. If context for\"\n",
+    "    \" multiple cities are provided, compare and contrast the demographics of\"\n",
+    "    \" the two cities. \"\n",
     ")\n",
     "response_davinci = query_engine_davinci.query(query_str)\n",
     "response_chatgpt = query_engine_chatgpt.query(query_str)"
diff --git a/docs/examples/composable_indices/financial_data_analysis/DeepLakeDemo-FinancialData.ipynb b/docs/examples/composable_indices/financial_data_analysis/DeepLakeDemo-FinancialData.ipynb
index 786abb13d535c378710bc4a659139e0b67f23087..1578d49c73e22d8f0469243179e0d9e2068a44af 100644
--- a/docs/examples/composable_indices/financial_data_analysis/DeepLakeDemo-FinancialData.ipynb
+++ b/docs/examples/composable_indices/financial_data_analysis/DeepLakeDemo-FinancialData.ipynb
@@ -806,7 +806,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.indices.query.query_transform.base import DecomposeQueryTransform\n",
+    "from llama_index.indices.query.query_transform.base import (\n",
+    "    DecomposeQueryTransform,\n",
+    ")\n",
     "\n",
     "decompose_transform = DecomposeQueryTransform(\n",
     "    service_context.llm_predictor, verbose=True\n",
@@ -830,7 +832,9 @@
    "outputs": [],
    "source": [
     "# with query decomposition in subindices\n",
-    "from llama_index.query_engine.transform_query_engine import TransformQueryEngine\n",
+    "from llama_index.query_engine.transform_query_engine import (\n",
+    "    TransformQueryEngine,\n",
+    ")\n",
     "\n",
     "\n",
     "custom_query_engines = {}\n",
@@ -838,11 +842,15 @@
     "    query_engine = index.as_query_engine(service_context=service_context)\n",
     "    transform_metadata = {\"index_summary\": index.index_struct.summary}\n",
     "    tranformed_query_engine = TransformQueryEngine(\n",
-    "        query_engine, decompose_transform, transform_metadata=transform_metadata\n",
+    "        query_engine,\n",
+    "        decompose_transform,\n",
+    "        transform_metadata=transform_metadata,\n",
     "    )\n",
     "    custom_query_engines[index.index_id] = tranformed_query_engine\n",
     "\n",
-    "custom_query_engines[graph.root_index.index_id] = graph.root_index.as_query_engine(\n",
+    "custom_query_engines[\n",
+    "    graph.root_index.index_id\n",
+    "] = graph.root_index.as_query_engine(\n",
     "    retriever_mode=\"simple\",\n",
     "    response_mode=\"tree_summarize\",\n",
     "    service_context=service_context,\n",
@@ -859,7 +867,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.indices.query.query_transform.base import DecomposeQueryTransform\n",
+    "from llama_index.indices.query.query_transform.base import (\n",
+    "    DecomposeQueryTransform,\n",
+    ")\n",
     "\n",
     "decompose_transform = DecomposeQueryTransform(\n",
     "    service_context.llm_predictor, verbose=True\n",
@@ -985,7 +995,9 @@
     }
    ],
    "source": [
-    "response_chatgpt = query_engine_decompose.query(\"Analyze revenue in Q1 of 2018.\")"
+    "response_chatgpt = query_engine_decompose.query(\n",
+    "    \"Analyze revenue in Q1 of 2018.\"\n",
+    ")"
    ]
   },
   {
@@ -1124,7 +1136,9 @@
     }
    ],
    "source": [
-    "response_chatgpt = query_engine_decompose.query(\"Analyze revenue in Q2 of 2018.\")"
+    "response_chatgpt = query_engine_decompose.query(\n",
+    "    \"Analyze revenue in Q2 of 2018.\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/customization/llms/SimpleIndexDemo-ChatGPT.ipynb b/docs/examples/customization/llms/SimpleIndexDemo-ChatGPT.ipynb
index 496ea3f991d0ee7b7cdc04d51ddf69528dac60fe..d9ac5bdd2e05ecd5df5885bdd1f1d80bf4db49c1 100644
--- a/docs/examples/customization/llms/SimpleIndexDemo-ChatGPT.ipynb
+++ b/docs/examples/customization/llms/SimpleIndexDemo-ChatGPT.ipynb
@@ -71,7 +71,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "index = VectorStoreIndex.from_documents(documents, service_context=service_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, service_context=service_context\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/customization/llms/SimpleIndexDemo-Huggingface_camel.ipynb b/docs/examples/customization/llms/SimpleIndexDemo-Huggingface_camel.ipynb
index 604626384dcd81fc2c3702c54e2ac1a2fe7f55d1..d8eb537291a0baef469d641e59aad902663edda3 100644
--- a/docs/examples/customization/llms/SimpleIndexDemo-Huggingface_camel.ipynb
+++ b/docs/examples/customization/llms/SimpleIndexDemo-Huggingface_camel.ipynb
@@ -134,7 +134,9 @@
     }
    ],
    "source": [
-    "index = VectorStoreIndex.from_documents(documents, service_context=service_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, service_context=service_context\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/customization/llms/SimpleIndexDemo-Huggingface_stablelm.ipynb b/docs/examples/customization/llms/SimpleIndexDemo-Huggingface_stablelm.ipynb
index 8826a00e3bf04e37a6573c68b0fff0f4a7e5906f..1c7c7a7249a6d76e0b6d5a708827ae1d5a789cef 100644
--- a/docs/examples/customization/llms/SimpleIndexDemo-Huggingface_stablelm.ipynb
+++ b/docs/examples/customization/llms/SimpleIndexDemo-Huggingface_stablelm.ipynb
@@ -138,7 +138,9 @@
     }
    ],
    "source": [
-    "index = VectorStoreIndex.from_documents(documents, service_context=service_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, service_context=service_context\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/customization/prompts/chat_prompts.ipynb b/docs/examples/customization/prompts/chat_prompts.ipynb
index 6a3362bff15277c241cc1c70b853177abb7fa039..698b93e668cdb011650ef36756a1d69434664acd 100644
--- a/docs/examples/customization/prompts/chat_prompts.ipynb
+++ b/docs/examples/customization/prompts/chat_prompts.ipynb
@@ -31,7 +31,9 @@
     "chat_text_qa_msgs = [\n",
     "    ChatMessage(\n",
     "        role=MessageRole.SYSTEM,\n",
-    "        content=\"Always answer the question, even if the context isn't helpful.\",\n",
+    "        content=(\n",
+    "            \"Always answer the question, even if the context isn't helpful.\"\n",
+    "        ),\n",
     "    ),\n",
     "    ChatMessage(\n",
     "        role=MessageRole.USER,\n",
@@ -51,7 +53,9 @@
     "chat_refine_msgs = [\n",
     "    ChatMessage(\n",
     "        role=MessageRole.SYSTEM,\n",
-    "        content=\"Always answer the question, even if the context isn't helpful.\",\n",
+    "        content=(\n",
+    "            \"Always answer the question, even if the context isn't helpful.\"\n",
+    "        ),\n",
     "    ),\n",
     "    ChatMessage(\n",
     "        role=MessageRole.USER,\n",
@@ -110,7 +114,9 @@
     "    llm=OpenAI(model=\"gpt-3.5-turbo\", temperature=0.1)\n",
     ")\n",
     "\n",
-    "index = VectorStoreIndex.from_documents(documents, service_context=service_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, service_context=service_context\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/customization/prompts/completion_prompts.ipynb b/docs/examples/customization/prompts/completion_prompts.ipynb
index 52f1614eae7c5405972f039e83d434ee65bdb5fc..3b6073ab3a528d005acf6aec3eddbbfa1720c233 100644
--- a/docs/examples/customization/prompts/completion_prompts.ipynb
+++ b/docs/examples/customization/prompts/completion_prompts.ipynb
@@ -27,25 +27,20 @@
     "from llama_index.prompts import PromptTemplate\n",
     "\n",
     "text_qa_template_str = (\n",
-    "    \"Context information is below.\\n\"\n",
-    "    \"---------------------\\n\"\n",
-    "    \"{context_str}\\n\"\n",
-    "    \"---------------------\\n\"\n",
-    "    \"Using both the context information and also using your own knowledge, \"\n",
-    "    \"answer the question: {query_str}\\n\"\n",
-    "    \"If the context isn't helpful, you can also answer the question on your own.\\n\"\n",
+    "    \"Context information is\"\n",
+    "    \" below.\\n---------------------\\n{context_str}\\n---------------------\\nUsing\"\n",
+    "    \" both the context information and also using your own knowledge, answer\"\n",
+    "    \" the question: {query_str}\\nIf the context isn't helpful, you can also\"\n",
+    "    \" answer the question on your own.\\n\"\n",
     ")\n",
     "text_qa_template = PromptTemplate(text_qa_template_str)\n",
     "\n",
     "refine_template_str = (\n",
-    "    \"The original question is as follows: {query_str}\\n\"\n",
-    "    \"We have provided an existing answer: {existing_answer}\\n\"\n",
-    "    \"We have the opportunity to refine the existing answer \"\n",
-    "    \"(only if needed) with some more context below.\\n\"\n",
-    "    \"------------\\n\"\n",
-    "    \"{context_msg}\\n\"\n",
-    "    \"------------\\n\"\n",
-    "    \"Using both the new context and your own knowledge, update or repeat the existing answer.\\n\"\n",
+    "    \"The original question is as follows: {query_str}\\nWe have provided an\"\n",
+    "    \" existing answer: {existing_answer}\\nWe have the opportunity to refine\"\n",
+    "    \" the existing answer (only if needed) with some more context\"\n",
+    "    \" below.\\n------------\\n{context_msg}\\n------------\\nUsing both the new\"\n",
+    "    \" context and your own knowledge, update or repeat the existing answer.\\n\"\n",
     ")\n",
     "refine_template = PromptTemplate(refine_template_str)"
    ]
@@ -82,11 +77,15 @@
     "from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n",
     "from llama_index.llms import OpenAI\n",
     "\n",
-    "service_context = ServiceContext.from_defaults(llm=OpenAI(model=\"text-davinci-003\"))\n",
+    "service_context = ServiceContext.from_defaults(\n",
+    "    llm=OpenAI(model=\"text-davinci-003\")\n",
+    ")\n",
     "\n",
     "documents = SimpleDirectoryReader(\"../../data/paul_graham/\").load_data()\n",
     "\n",
-    "index = VectorStoreIndex.from_documents(documents, service_context=service_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, service_context=service_context\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/customization/streaming/chat_engine_condense_question_stream_response.ipynb b/docs/examples/customization/streaming/chat_engine_condense_question_stream_response.ipynb
index 2f7c8722ff4a4f5811c61e58aefb763733387ce9..ee09f29811097376c17a76bd83bbb158c80cdcf5 100644
--- a/docs/examples/customization/streaming/chat_engine_condense_question_stream_response.ipynb
+++ b/docs/examples/customization/streaming/chat_engine_condense_question_stream_response.ipynb
@@ -99,7 +99,9 @@
     }
    ],
    "source": [
-    "chat_engine = index.as_chat_engine(chat_mode=\"condense_question\", streaming=True)\n",
+    "chat_engine = index.as_chat_engine(\n",
+    "    chat_mode=\"condense_question\", streaming=True\n",
+    ")\n",
     "response_stream = chat_engine.chat(\"What did Paul Graham do after YC?\")"
    ]
   },
diff --git a/docs/examples/data_connectors/ChromaDemo.ipynb b/docs/examples/data_connectors/ChromaDemo.ipynb
index 2494289d60892fa62da9fa2a6d883d8bbee0b74e..eb4996e015bd7ee9bf98f0d061077284a3b633fb 100644
--- a/docs/examples/data_connectors/ChromaDemo.ipynb
+++ b/docs/examples/data_connectors/ChromaDemo.ipynb
@@ -73,7 +73,9 @@
     "# NOTE: Required args are collection_name, query_vector.\n",
     "# See the Python client: https://github.com/chroma-core/chroma\n",
     "# for more details.\n",
-    "documents = reader.load_data(collection_name=\"demo\", query_vector=query_vector, limit=5)"
+    "documents = reader.load_data(\n",
+    "    collection_name=\"demo\", query_vector=query_vector, limit=5\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/data_connectors/GithubRepositoryReaderDemo.ipynb b/docs/examples/data_connectors/GithubRepositoryReaderDemo.ipynb
index 3caf7808991a390cea93bdab17165ffa43f80aeb..283148ee629cca883fc3c4bdef0fe9530b6f5ee5 100644
--- a/docs/examples/data_connectors/GithubRepositoryReaderDemo.ipynb
+++ b/docs/examples/data_connectors/GithubRepositoryReaderDemo.ipynb
@@ -78,7 +78,8 @@
     "#     time.sleep(.25)\n",
     "query_engine = index.as_query_engine()\n",
     "response = query_engine.query(\n",
-    "    \"What is the difference between VectorStoreIndex and SummaryIndex?\", verbose=True\n",
+    "    \"What is the difference between VectorStoreIndex and SummaryIndex?\",\n",
+    "    verbose=True,\n",
     ")"
    ]
   },
diff --git a/docs/examples/data_connectors/MboxReaderDemo.ipynb b/docs/examples/data_connectors/MboxReaderDemo.ipynb
index a7233367750ad27f3cbabe2a83b5eb49f64777e3..886283d47cf37e2dcb96d930d2f74b4ba4357181 100644
--- a/docs/examples/data_connectors/MboxReaderDemo.ipynb
+++ b/docs/examples/data_connectors/MboxReaderDemo.ipynb
@@ -43,7 +43,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "index = VectorStoreIndex.from_documents(documents)  # Initialize index with documents"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents\n",
+    ")  # Initialize index with documents"
    ]
   },
   {
diff --git a/docs/examples/data_connectors/MyScaleReaderDemo.ipynb b/docs/examples/data_connectors/MyScaleReaderDemo.ipynb
index b13e5ec0a51982106b08b63dfda3ad20cd7d68e2..6d2f3647e8a31ced0ab1ce137fab596321ef8cab 100644
--- a/docs/examples/data_connectors/MyScaleReaderDemo.ipynb
+++ b/docs/examples/data_connectors/MyScaleReaderDemo.ipynb
@@ -91,7 +91,9 @@
    ],
    "source": [
     "reader.load_data(\n",
-    "    [random.random() for _ in range(1536)], where_str=\"extra_info._dummy=0\", limit=3\n",
+    "    [random.random() for _ in range(1536)],\n",
+    "    where_str=\"extra_info._dummy=0\",\n",
+    "    limit=3,\n",
     ")"
    ]
   }
diff --git a/docs/examples/data_connectors/ObsidianReaderDemo.ipynb b/docs/examples/data_connectors/ObsidianReaderDemo.ipynb
index 93bad86bbbac937bd8221e76a940799d5aa166e6..45c5fb8668bbab69913cf391b65b2a2729928d9d 100644
--- a/docs/examples/data_connectors/ObsidianReaderDemo.ipynb
+++ b/docs/examples/data_connectors/ObsidianReaderDemo.ipynb
@@ -56,7 +56,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "index = VectorStoreIndex.from_documents(documents)  # Initialize index with documents"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents\n",
+    ")  # Initialize index with documents"
    ]
   },
   {
diff --git a/docs/examples/data_connectors/QdrantDemo.ipynb b/docs/examples/data_connectors/QdrantDemo.ipynb
index e0c2f3859fc971701682ecebf9371f89400fb55a..566876bc5a6bf245ab2db68b829abd9d11071545 100644
--- a/docs/examples/data_connectors/QdrantDemo.ipynb
+++ b/docs/examples/data_connectors/QdrantDemo.ipynb
@@ -66,7 +66,9 @@
     "# NOTE: Required args are collection_name, query_vector.\n",
     "# See the Python client: https://github.com/qdrant/qdrant_client\n",
     "# for more details.\n",
-    "documents = reader.load_data(collection_name=\"demo\", query_vector=query_vector, limit=5)"
+    "documents = reader.load_data(\n",
+    "    collection_name=\"demo\", query_vector=query_vector, limit=5\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/data_connectors/SlackDemo.ipynb b/docs/examples/data_connectors/SlackDemo.ipynb
index 0946c74d3efc717792cf8443818f442a08ca6249..21877130f8787bfc3670ddb327ed41f81ab392ba 100644
--- a/docs/examples/data_connectors/SlackDemo.ipynb
+++ b/docs/examples/data_connectors/SlackDemo.ipynb
@@ -44,7 +44,9 @@
    "source": [
     "slack_token = os.getenv(\"SLACK_BOT_TOKEN\")\n",
     "channel_ids = [\"<channel_id>\"]\n",
-    "documents = SlackReader(slack_token=slack_token).load_data(channel_ids=channel_ids)"
+    "documents = SlackReader(slack_token=slack_token).load_data(\n",
+    "    channel_ids=channel_ids\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/data_connectors/WeaviateDemo.ipynb b/docs/examples/data_connectors/WeaviateDemo.ipynb
index 54c8706bec883d21a8dcc576b69642144b42ccc2..ec5849f95b169ac3360e27221584b180cd118bcc 100644
--- a/docs/examples/data_connectors/WeaviateDemo.ipynb
+++ b/docs/examples/data_connectors/WeaviateDemo.ipynb
@@ -50,7 +50,8 @@
     "\n",
     "# initialize reader\n",
     "reader = WeaviateReader(\n",
-    "    \"https://<cluster-id>.semi.network/\", auth_client_secret=resource_owner_config\n",
+    "    \"https://<cluster-id>.semi.network/\",\n",
+    "    auth_client_secret=resource_owner_config,\n",
     ")"
    ]
   },
diff --git a/docs/examples/data_connectors/WebPageDemo.ipynb b/docs/examples/data_connectors/WebPageDemo.ipynb
index 3cddc7d1718d5bbcac1aaf2440516bebe9491d6e..a42a0f17e5303fffe95f42d4b6601d171e0e87f2 100644
--- a/docs/examples/data_connectors/WebPageDemo.ipynb
+++ b/docs/examples/data_connectors/WebPageDemo.ipynb
@@ -133,7 +133,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "documents = TrafilaturaWebReader().load_data([\"http://paulgraham.com/worked.html\"])"
+    "documents = TrafilaturaWebReader().load_data(\n",
+    "    [\"http://paulgraham.com/worked.html\"]\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/data_connectors/deplot/DeplotReader.ipynb b/docs/examples/data_connectors/deplot/DeplotReader.ipynb
index 2ed90fdc72796498507fae7108a86b6d4a9427d3..866af8a843702a7951319b06d38c52049feb1b63 100644
--- a/docs/examples/data_connectors/deplot/DeplotReader.ipynb
+++ b/docs/examples/data_connectors/deplot/DeplotReader.ipynb
@@ -98,7 +98,8 @@
    "source": [
     "summary_index = SummaryIndex.from_documents(documents)\n",
     "response = summary_index.as_query_engine().query(\n",
-    "    \"What is the difference between the shares of Greenland and the share of Mauritania?\"\n",
+    "    \"What is the difference between the shares of Greenland and the share of\"\n",
+    "    \" Mauritania?\"\n",
     ")"
    ]
   },
diff --git a/docs/examples/data_connectors/html_tag_reader.ipynb b/docs/examples/data_connectors/html_tag_reader.ipynb
index 0c9ff0f06cc582bc14b1654602ca52a4dfe82a1f..5596ecfdb63a641ded58934467689535556c8a78 100644
--- a/docs/examples/data_connectors/html_tag_reader.ipynb
+++ b/docs/examples/data_connectors/html_tag_reader.ipynb
@@ -67,7 +67,9 @@
     "from llama_index.readers import HTMLTagReader\n",
     "\n",
     "reader = HTMLTagReader(tag=\"section\", ignore_no_id=True)\n",
-    "docs = reader.load_data(\"data/docs.ray.io/en/master/ray-overview/installation.html\")"
+    "docs = reader.load_data(\n",
+    "    \"data/docs.ray.io/en/master/ray-overview/installation.html\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/data_connectors/simple_directory_reader.ipynb b/docs/examples/data_connectors/simple_directory_reader.ipynb
index 5eb1846388cc1bdfee1cb8cccc34272bb9e3db2d..babe3d99ed0f783c7fa126eee43c89874d0d9eeb 100644
--- a/docs/examples/data_connectors/simple_directory_reader.ipynb
+++ b/docs/examples/data_connectors/simple_directory_reader.ipynb
@@ -125,7 +125,9 @@
     "required_exts = [\".md\"]\n",
     "\n",
     "reader = SimpleDirectoryReader(\n",
-    "    input_dir=\"../../end_to_end_tutorials\", required_exts=required_exts, recursive=True\n",
+    "    input_dir=\"../../end_to_end_tutorials\",\n",
+    "    required_exts=required_exts,\n",
+    "    recursive=True,\n",
     ")"
    ]
   },
diff --git a/docs/examples/discover_llamaindex/document_management/Discord_Thread_Management.ipynb b/docs/examples/discover_llamaindex/document_management/Discord_Thread_Management.ipynb
index 6a0d023fdb39b2bf3e4fd91183eaae5fd5ba9e5e..a657ed5106ba6114f1332c475f48b2561313c2e2 100644
--- a/docs/examples/discover_llamaindex/document_management/Discord_Thread_Management.ipynb
+++ b/docs/examples/discover_llamaindex/document_management/Discord_Thread_Management.ipynb
@@ -290,7 +290,9 @@
     "# load it again to confirm it worked\n",
     "from llama_index import StorageContext, load_index_from_storage\n",
     "\n",
-    "index = load_index_from_storage(StorageContext.from_defaults(persist_dir=\"./storage\"))\n",
+    "index = load_index_from_storage(\n",
+    "    StorageContext.from_defaults(persist_dir=\"./storage\")\n",
+    ")\n",
     "\n",
     "print(\"Double check ref_docs ingested: \", len(index.ref_doc_info))"
    ]
@@ -458,7 +460,8 @@
    "source": [
     "# now, refresh!\n",
     "refreshed_docs = index.refresh(\n",
-    "    new_documents, update_kwargs={\"delete_kwargs\": {\"delete_from_docstore\": True}}\n",
+    "    new_documents,\n",
+    "    update_kwargs={\"delete_kwargs\": {\"delete_from_docstore\": True}},\n",
     ")"
    ]
   },
diff --git a/docs/examples/docstore/DocstoreDemo.ipynb b/docs/examples/docstore/DocstoreDemo.ipynb
index 985fc789f54f703e04a69dbf7d61a8cbf29b32c4..f1e90a80457a4bba6991ccf2798504b5feb27fdd 100644
--- a/docs/examples/docstore/DocstoreDemo.ipynb
+++ b/docs/examples/docstore/DocstoreDemo.ipynb
@@ -126,7 +126,9 @@
     "storage_context = StorageContext.from_defaults(docstore=docstore)\n",
     "summary_index = SummaryIndex(nodes, storage_context=storage_context)\n",
     "vector_index = VectorStoreIndex(nodes, storage_context=storage_context)\n",
-    "keyword_table_index = SimpleKeywordTableIndex(nodes, storage_context=storage_context)"
+    "keyword_table_index = SimpleKeywordTableIndex(\n",
+    "    nodes, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
@@ -177,7 +179,9 @@
    ],
    "source": [
     "llm = OpenAI(temperature=0, model=\"gpt-3.5-turbo\")\n",
-    "service_context_chatgpt = ServiceContext.from_defaults(llm=llm, chunk_size=1024)"
+    "service_context_chatgpt = ServiceContext.from_defaults(\n",
+    "    llm=llm, chunk_size=1024\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/docstore/DynamoDBDocstoreDemo.ipynb b/docs/examples/docstore/DynamoDBDocstoreDemo.ipynb
index e8ba4fb9a2214b89d789ea49e1783b3334fd8397..d09212d00d2317805a099e8ee785f7451a5a2d2c 100644
--- a/docs/examples/docstore/DynamoDBDocstoreDemo.ipynb
+++ b/docs/examples/docstore/DynamoDBDocstoreDemo.ipynb
@@ -111,8 +111,12 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.storage.docstore.dynamodb_docstore import DynamoDBDocumentStore\n",
-    "from llama_index.storage.index_store.dynamodb_index_store import DynamoDBIndexStore\n",
+    "from llama_index.storage.docstore.dynamodb_docstore import (\n",
+    "    DynamoDBDocumentStore,\n",
+    ")\n",
+    "from llama_index.storage.index_store.dynamodb_index_store import (\n",
+    "    DynamoDBIndexStore,\n",
+    ")\n",
     "from llama_index.vector_stores.dynamodb import DynamoDBVectorStore"
    ]
   },
@@ -181,7 +185,9 @@
    "outputs": [],
    "source": [
     "# https://gpt-index.readthedocs.io/en/latest/api_reference/indices/table.html\n",
-    "keyword_table_index = SimpleKeywordTableIndex(nodes, storage_context=storage_context)"
+    "keyword_table_index = SimpleKeywordTableIndex(\n",
+    "    nodes, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
@@ -275,7 +281,9 @@
    "outputs": [],
    "source": [
     "chatgpt = OpenAI(temperature=0, model=\"gpt-3.5-turbo\")\n",
-    "service_context_chatgpt = ServiceContext.from_defaults(llm=chatgpt, chunk_size=1024)"
+    "service_context_chatgpt = ServiceContext.from_defaults(\n",
+    "    llm=chatgpt, chunk_size=1024\n",
+    ")"
    ]
   },
   {
@@ -328,7 +336,9 @@
    "outputs": [],
    "source": [
     "query_engine = keyword_table_index.as_query_engine()\n",
-    "keyword_response = query_engine.query(\"What did the author do after his time at YC?\")"
+    "keyword_response = query_engine.query(\n",
+    "    \"What did the author do after his time at YC?\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/docstore/FirestoreDemo.ipynb b/docs/examples/docstore/FirestoreDemo.ipynb
index 48a6a5d72a5ca87ec97eaf7b1fa62ce9af70f009..7b6949da32971b192a59dbf551ba8a61c01329b5 100644
--- a/docs/examples/docstore/FirestoreDemo.ipynb
+++ b/docs/examples/docstore/FirestoreDemo.ipynb
@@ -91,8 +91,12 @@
    "outputs": [],
    "source": [
     "from llama_index.storage.kvstore.firestore_kvstore import FirestoreKVStore\n",
-    "from llama_index.storage.docstore.firestore_docstore import FirestoreDocumentStore\n",
-    "from llama_index.storage.index_store.firestore_indexstore import FirestoreIndexStore"
+    "from llama_index.storage.docstore.firestore_docstore import (\n",
+    "    FirestoreDocumentStore,\n",
+    ")\n",
+    "from llama_index.storage.index_store.firestore_indexstore import (\n",
+    "    FirestoreIndexStore,\n",
+    ")"
    ]
   },
   {
@@ -151,7 +155,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "keyword_table_index = SimpleKeywordTableIndex(nodes, storage_context=storage_context)"
+    "keyword_table_index = SimpleKeywordTableIndex(\n",
+    "    nodes, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
@@ -236,7 +242,9 @@
    "outputs": [],
    "source": [
     "chatgpt = OpenAI(temperature=0, model=\"gpt-3.5-turbo\")\n",
-    "service_context_chatgpt = ServiceContext.from_defaults(llm=chatgpt, chunk_size=1024)"
+    "service_context_chatgpt = ServiceContext.from_defaults(\n",
+    "    llm=chatgpt, chunk_size=1024\n",
+    ")"
    ]
   },
   {
@@ -284,7 +292,9 @@
    "outputs": [],
    "source": [
     "query_engine = keyword_table_index.as_query_engine()\n",
-    "keyword_response = query_engine.query(\"What did the author do after his time at YC?\")"
+    "keyword_response = query_engine.query(\n",
+    "    \"What did the author do after his time at YC?\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/docstore/MongoDocstoreDemo.ipynb b/docs/examples/docstore/MongoDocstoreDemo.ipynb
index b058ea1d86ce88bdd5e4bdf1cb485d4fb5a9c903..0270794613fd30fc13807733c7f875fe049d743c 100644
--- a/docs/examples/docstore/MongoDocstoreDemo.ipynb
+++ b/docs/examples/docstore/MongoDocstoreDemo.ipynb
@@ -178,7 +178,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "keyword_table_index = SimpleKeywordTableIndex(nodes, storage_context=storage_context)"
+    "keyword_table_index = SimpleKeywordTableIndex(\n",
+    "    nodes, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
@@ -270,7 +272,9 @@
    "outputs": [],
    "source": [
     "chatgpt = OpenAI(temperature=0, model=\"gpt-3.5-turbo\")\n",
-    "service_context_chatgpt = ServiceContext.from_defaults(llm=chatgpt, chunk_size=1024)"
+    "service_context_chatgpt = ServiceContext.from_defaults(\n",
+    "    llm=chatgpt, chunk_size=1024\n",
+    ")"
    ]
   },
   {
@@ -323,7 +327,9 @@
    "outputs": [],
    "source": [
     "query_engine = keyword_table_index.as_query_engine()\n",
-    "keyword_response = query_engine.query(\"What did the author do after his time at YC?\")"
+    "keyword_response = query_engine.query(\n",
+    "    \"What did the author do after his time at YC?\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/docstore/RedisDocstoreIndexStoreDemo.ipynb b/docs/examples/docstore/RedisDocstoreIndexStoreDemo.ipynb
index a0054ee1938ba073edf98ce53f3e747d98c9e9b9..10891b173489537651f18854fd34988008943a9f 100644
--- a/docs/examples/docstore/RedisDocstoreIndexStoreDemo.ipynb
+++ b/docs/examples/docstore/RedisDocstoreIndexStoreDemo.ipynb
@@ -274,7 +274,9 @@
     }
    ],
    "source": [
-    "keyword_table_index = SimpleKeywordTableIndex(nodes, storage_context=storage_context)"
+    "keyword_table_index = SimpleKeywordTableIndex(\n",
+    "    nodes, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
@@ -394,7 +396,9 @@
    "outputs": [],
    "source": [
     "chatgpt = OpenAI(temperature=0, model=\"gpt-3.5-turbo\")\n",
-    "service_context_chatgpt = ServiceContext.from_defaults(llm=chatgpt, chunk_size=1024)"
+    "service_context_chatgpt = ServiceContext.from_defaults(\n",
+    "    llm=chatgpt, chunk_size=1024\n",
+    ")"
    ]
   },
   {
@@ -516,7 +520,9 @@
    ],
    "source": [
     "query_engine = keyword_table_index.as_query_engine()\n",
-    "keyword_response = query_engine.query(\"What did the author do after his time at YC?\")"
+    "keyword_response = query_engine.query(\n",
+    "    \"What did the author do after his time at YC?\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/embeddings/custom_embeddings.ipynb b/docs/examples/embeddings/custom_embeddings.ipynb
index 86c607cae70c63297b3338a68e181d1e0dec399d..8128154d6479108267e80d48558aa56ccdc003bf 100644
--- a/docs/examples/embeddings/custom_embeddings.ipynb
+++ b/docs/examples/embeddings/custom_embeddings.ipynb
@@ -88,7 +88,9 @@
     "        return embeddings[0]\n",
     "\n",
     "    def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:\n",
-    "        embeddings = self._model.encode([[self._instruction, text] for text in texts])\n",
+    "        embeddings = self._model.encode(\n",
+    "            [[self._instruction, text] for text in texts]\n",
+    "        )\n",
     "        return embeddings"
    ]
   },
@@ -138,7 +140,9 @@
     ")\n",
     "\n",
     "# if running for the first time, will download model weights first!\n",
-    "index = VectorStoreIndex.from_documents(documents, service_context=service_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, service_context=service_context\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/embeddings/gradient.ipynb b/docs/examples/embeddings/gradient.ipynb
index a048cb955db6c56c65e733d387e79b21e4f7c196..f051c8de9b88fd2fce2f1416e38c15ab8445a838 100644
--- a/docs/examples/embeddings/gradient.ipynb
+++ b/docs/examples/embeddings/gradient.ipynb
@@ -129,7 +129,9 @@
    "source": [
     "from llama_index import VectorStoreIndex\n",
     "\n",
-    "index = VectorStoreIndex.from_documents(documents, service_context=service_context)\n",
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, service_context=service_context\n",
+    ")\n",
     "query_engine = index.as_query_engine()"
    ]
   },
@@ -140,7 +142,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "response = query_engine.query(\"What did the author do after his time at Y Combinator?\")\n",
+    "response = query_engine.query(\n",
+    "    \"What did the author do after his time at Y Combinator?\"\n",
+    ")\n",
     "print(response)"
    ]
   }
diff --git a/docs/examples/embeddings/huggingface.ipynb b/docs/examples/embeddings/huggingface.ipynb
index f5170b954633db015a80b568f1ddec28d6884287..fd12545f438e8ed5076488228a5c1b2a6d151e31 100644
--- a/docs/examples/embeddings/huggingface.ipynb
+++ b/docs/examples/embeddings/huggingface.ipynb
@@ -191,7 +191,9 @@
    "source": [
     "from llama_index.embeddings import OptimumEmbedding\n",
     "\n",
-    "OptimumEmbedding.create_and_save_optimum_model(\"BAAI/bge-small-en-v1.5\", \"./bge_onnx\")"
+    "OptimumEmbedding.create_and_save_optimum_model(\n",
+    "    \"BAAI/bge-small-en-v1.5\", \"./bge_onnx\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/evaluation/QuestionGeneration.ipynb b/docs/examples/evaluation/QuestionGeneration.ipynb
index 7bc5d9dce71308bd297a2c84e1ee73fdce5a45d0..4fa4ff8eb6c7227eaa6ae5bde3ba23f1c7a70afd 100644
--- a/docs/examples/evaluation/QuestionGeneration.ipynb
+++ b/docs/examples/evaluation/QuestionGeneration.ipynb
@@ -225,7 +225,9 @@
     "        {\n",
     "            \"Query\": query,\n",
     "            \"Response\": str(response),\n",
-    "            \"Source\": response.source_nodes[0].node.get_content()[:1000] + \"...\",\n",
+    "            \"Source\": (\n",
+    "                response.source_nodes[0].node.get_content()[:1000] + \"...\"\n",
+    "            ),\n",
     "            \"Evaluation Result\": eval_result,\n",
     "        },\n",
     "        index=[0],\n",
diff --git a/docs/examples/evaluation/RetryQuery.ipynb b/docs/examples/evaluation/RetryQuery.ipynb
index 5296f7cd15ecf6abf10134035d069de77203d756..9895796d1e17f1e1b64249356f964463cd2b8ab8 100644
--- a/docs/examples/evaluation/RetryQuery.ipynb
+++ b/docs/examples/evaluation/RetryQuery.ipynb
@@ -145,7 +145,9 @@
     "from llama_index.evaluation import RelevancyEvaluator\n",
     "\n",
     "query_response_evaluator = RelevancyEvaluator()\n",
-    "retry_query_engine = RetryQueryEngine(base_query_engine, query_response_evaluator)\n",
+    "retry_query_engine = RetryQueryEngine(\n",
+    "    base_query_engine, query_response_evaluator\n",
+    ")\n",
     "retry_response = retry_query_engine.query(query)\n",
     "print(retry_response)"
    ]
@@ -207,7 +209,10 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.evaluation.guideline import GuidelineEvaluator, DEFAULT_GUIDELINES\n",
+    "from llama_index.evaluation.guideline import (\n",
+    "    GuidelineEvaluator,\n",
+    "    DEFAULT_GUIDELINES,\n",
+    ")\n",
     "from llama_index.response.schema import Response\n",
     "from llama_index.indices.query.query_transform.feedback_transform import (\n",
     "    FeedbackQueryTransformation,\n",
@@ -218,7 +223,8 @@
     "\n",
     "# Guideline eval\n",
     "guideline_eval = GuidelineEvaluator(\n",
-    "    guidelines=DEFAULT_GUIDELINES + \"\\nThe response should not be overly long.\\n\"\n",
+    "    guidelines=DEFAULT_GUIDELINES\n",
+    "    + \"\\nThe response should not be overly long.\\n\"\n",
     "    \"The response should try to summarize where possible.\\n\"\n",
     ")  # just for example"
    ]
@@ -250,7 +256,9 @@
     }
    ],
    "source": [
-    "typed_response = response if isinstance(response, Response) else response.get_response()\n",
+    "typed_response = (\n",
+    "    response if isinstance(response, Response) else response.get_response()\n",
+    ")\n",
     "eval = guideline_eval.evaluate_response(query, typed_response)\n",
     "print(f\"Guideline eval evaluation result: {eval.feedback}\")\n",
     "\n",
diff --git a/docs/examples/evaluation/correctness_eval.ipynb b/docs/examples/evaluation/correctness_eval.ipynb
index 4d5b6129d6ad848630febd89ccad5ca4a51551f4..4ae63e643997c9423595d53587fabfc7e65b43ba 100644
--- a/docs/examples/evaluation/correctness_eval.ipynb
+++ b/docs/examples/evaluation/correctness_eval.ipynb
@@ -47,7 +47,8 @@
    "outputs": [],
    "source": [
     "query = (\n",
-    "    \"Can you explain the theory of relativity proposed by Albert Einstein in detail?\"\n",
+    "    \"Can you explain the theory of relativity proposed by Albert Einstein in\"\n",
+    "    \" detail?\"\n",
     ")\n",
     "\n",
     "reference = \"\"\"\n",
diff --git a/docs/examples/evaluation/faithfulness_eval.ipynb b/docs/examples/evaluation/faithfulness_eval.ipynb
index 8954b6c996a08d6111f96516ae595ef14cb2a7ea..e168dd89830fcb2468ebd113474159630ed40cf5 100644
--- a/docs/examples/evaluation/faithfulness_eval.ipynb
+++ b/docs/examples/evaluation/faithfulness_eval.ipynb
@@ -270,7 +270,9 @@
     "    total_correct = 0\n",
     "    for r in results:\n",
     "        # evaluate with gpt 4\n",
-    "        eval_result = 1 if evaluator_gpt4.evaluate_response(response=r).passing else 0\n",
+    "        eval_result = (\n",
+    "            1 if evaluator_gpt4.evaluate_response(response=r).passing else 0\n",
+    "        )\n",
     "        total_correct += eval_result\n",
     "\n",
     "    return total_correct, len(results)"
diff --git a/docs/examples/evaluation/guideline_eval.ipynb b/docs/examples/evaluation/guideline_eval.ipynb
index a59ab7159586d6a50d8ff868d18e7dd641925334..5f853f81d8c5ec862b963343e4ea2b2a770f3ff5 100644
--- a/docs/examples/evaluation/guideline_eval.ipynb
+++ b/docs/examples/evaluation/guideline_eval.ipynb
@@ -43,7 +43,10 @@
     "GUIDELINES = [\n",
     "    \"The response should fully answer the query.\",\n",
     "    \"The response should avoid being vague or ambiguous.\",\n",
-    "    \"The response should be specific and use statistics or numbers when possible.\",\n",
+    "    (\n",
+    "        \"The response should be specific and use statistics or numbers when\"\n",
+    "        \" possible.\"\n",
+    "    ),\n",
     "]"
    ]
   },
@@ -72,11 +75,27 @@
     "sample_data = {\n",
     "    \"query\": \"Tell me about global warming.\",\n",
     "    \"contexts\": [\n",
-    "        \"Global warming refers to the long-term increase in Earth's average surface temperature due to human activities such as the burning of fossil fuels and deforestation.\",\n",
-    "        \"It is a major environmental issue with consequences such as rising sea levels, extreme weather events, and disruptions to ecosystems.\",\n",
-    "        \"Efforts to combat global warming include reducing carbon emissions, transitioning to renewable energy sources, and promoting sustainable practices.\",\n",
+    "        (\n",
+    "            \"Global warming refers to the long-term increase in Earth's\"\n",
+    "            \" average surface temperature due to human activities such as the\"\n",
+    "            \" burning of fossil fuels and deforestation.\"\n",
+    "        ),\n",
+    "        (\n",
+    "            \"It is a major environmental issue with consequences such as\"\n",
+    "            \" rising sea levels, extreme weather events, and disruptions to\"\n",
+    "            \" ecosystems.\"\n",
+    "        ),\n",
+    "        (\n",
+    "            \"Efforts to combat global warming include reducing carbon\"\n",
+    "            \" emissions, transitioning to renewable energy sources, and\"\n",
+    "            \" promoting sustainable practices.\"\n",
+    "        ),\n",
     "    ],\n",
-    "    \"response\": \"Global warming is a critical environmental issue caused by human activities that lead to a rise in Earth's temperature. It has various adverse effects on the planet.\",\n",
+    "    \"response\": (\n",
+    "        \"Global warming is a critical environmental issue caused by human\"\n",
+    "        \" activities that lead to a rise in Earth's temperature. It has\"\n",
+    "        \" various adverse effects on the planet.\"\n",
+    "    ),\n",
     "}"
    ]
   },
diff --git a/docs/examples/evaluation/pairwise_eval.ipynb b/docs/examples/evaluation/pairwise_eval.ipynb
index 43859c97b41b73a9c2743e47e59bacf31492ed85..dee9dcd9959f45a14a334345a0e0c43f73345498 100644
--- a/docs/examples/evaluation/pairwise_eval.ipynb
+++ b/docs/examples/evaluation/pairwise_eval.ipynb
@@ -88,7 +88,9 @@
     "gpt4 = OpenAI(temperature=0, model=\"gpt-4\")\n",
     "service_context_gpt4 = ServiceContext.from_defaults(llm=gpt4)\n",
     "\n",
-    "evaluator_gpt4 = PairwiseComparisonEvaluator(service_context=service_context_gpt4)"
+    "evaluator_gpt4 = PairwiseComparisonEvaluator(\n",
+    "    service_context=service_context_gpt4\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/evaluation/relevancy_eval.ipynb b/docs/examples/evaluation/relevancy_eval.ipynb
index 4e1ab024b43f826eba43f2bdd73123cbef0df1df..e44df8e7847630a37a448d8b9df53229d42fbc17 100644
--- a/docs/examples/evaluation/relevancy_eval.ipynb
+++ b/docs/examples/evaluation/relevancy_eval.ipynb
@@ -142,7 +142,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "query_str = \"What battles took place in New York City in the American Revolution?\"\n",
+    "query_str = (\n",
+    "    \"What battles took place in New York City in the American Revolution?\"\n",
+    ")\n",
     "query_engine = vector_index.as_query_engine()\n",
     "response_vector = query_engine.query(query_str)\n",
     "eval_result = evaluator_gpt4.evaluate_response(\n",
@@ -391,7 +393,9 @@
    "source": [
     "# NOTE: you can set response_mode=\"no_text\" to get just the sources\n",
     "query_str = \"What are the airports in New York City?\"\n",
-    "query_engine = vector_index.as_query_engine(similarity_top_k=3, response_mode=\"no_text\")\n",
+    "query_engine = vector_index.as_query_engine(\n",
+    "    similarity_top_k=3, response_mode=\"no_text\"\n",
+    ")\n",
     "response_vector = query_engine.query(query_str)\n",
     "eval_source_result_full = [\n",
     "    evaluator_gpt4.evaluate(\n",
@@ -499,7 +503,9 @@
    "source": [
     "# NOTE: you can set response_mode=\"no_text\" to get just the sources\n",
     "query_str = \"Who is the mayor of New York City?\"\n",
-    "query_engine = vector_index.as_query_engine(similarity_top_k=3, response_mode=\"no_text\")\n",
+    "query_engine = vector_index.as_query_engine(\n",
+    "    similarity_top_k=3, response_mode=\"no_text\"\n",
+    ")\n",
     "eval_source_result_full = [\n",
     "    evaluator_gpt4.evaluate(\n",
     "        query=query_str,\n",
diff --git a/docs/examples/evaluation/retrieval/retriever_eval.ipynb b/docs/examples/evaluation/retrieval/retriever_eval.ipynb
index 400683f440e53481a42b7b4f4c0611b6d67ba831..6ea1e45c5d3d917370a29e9040df9981aa1c6e0a 100644
--- a/docs/examples/evaluation/retrieval/retriever_eval.ipynb
+++ b/docs/examples/evaluation/retrieval/retriever_eval.ipynb
@@ -210,7 +210,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "qa_dataset = generate_question_context_pairs(nodes, llm=llm, num_questions_per_chunk=2)"
+    "qa_dataset = generate_question_context_pairs(\n",
+    "    nodes, llm=llm, num_questions_per_chunk=2\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/finetuning/cross_encoder_finetuning/cross_encoder_finetuning.ipynb b/docs/examples/finetuning/cross_encoder_finetuning/cross_encoder_finetuning.ipynb
index 3e6f12d2fb2d3a9d46864756da6265946561dbbf..90109ceb69aee8fd30f99ddf6d536ab0f2c8c8f9 100644
--- a/docs/examples/finetuning/cross_encoder_finetuning/cross_encoder_finetuning.ipynb
+++ b/docs/examples/finetuning/cross_encoder_finetuning/cross_encoder_finetuning.ipynb
@@ -502,7 +502,10 @@
     "    documents = [Document(text=row[\"paper\"])]\n",
     "    query_list = row[\"questions\"]\n",
     "    local_eval_dataset = generate_ce_fine_tuning_dataset(\n",
-    "        documents=documents, questions_list=query_list, max_chunk_length=256, top_k=5\n",
+    "        documents=documents,\n",
+    "        questions_list=query_list,\n",
+    "        max_chunk_length=256,\n",
+    "        top_k=5,\n",
     "    )\n",
     "    relevant_query_list = []\n",
     "    relevant_context_list = []\n",
@@ -683,7 +686,9 @@
    ],
    "source": [
     "# Push model to HuggingFace Hub\n",
-    "finetuning_engine.push_to_hub(repo_id=\"bpHigh/Cross-Encoder-LLamaIndex-Demo-v2\")"
+    "finetuning_engine.push_to_hub(\n",
+    "    repo_id=\"bpHigh/Cross-Encoder-LLamaIndex-Demo-v2\"\n",
+    ")"
    ]
   },
   {
@@ -1062,7 +1067,9 @@
     "        similarity_top_k=3, response_mode=\"no_text\"\n",
     "    )\n",
     "    retriever_with_base_reranker = vector_index.as_query_engine(\n",
-    "        similarity_top_k=8, response_mode=\"no_text\", node_postprocessors=[rerank_base]\n",
+    "        similarity_top_k=8,\n",
+    "        response_mode=\"no_text\",\n",
+    "        node_postprocessors=[rerank_base],\n",
     "    )\n",
     "    retriever_with_finetuned_reranker = vector_index.as_query_engine(\n",
     "        similarity_top_k=8,\n",
@@ -1089,10 +1096,12 @@
     "            if context in node.node.text or node.node.text in context:\n",
     "                base_reranker_hits += 1\n",
     "\n",
-    "        response_with_finetuned_reranker = retriever_with_finetuned_reranker.query(\n",
-    "            query\n",
+    "        response_with_finetuned_reranker = (\n",
+    "            retriever_with_finetuned_reranker.query(query)\n",
+    "        )\n",
+    "        with_finetuned_reranker_nodes = (\n",
+    "            response_with_finetuned_reranker.source_nodes\n",
     "        )\n",
-    "        with_finetuned_reranker_nodes = response_with_finetuned_reranker.source_nodes\n",
     "\n",
     "        for node in with_finetuned_reranker_nodes:\n",
     "            if context in node.node.text or node.node.text in context:\n",
@@ -1510,11 +1519,15 @@
     "            pass\n",
     "\n",
     "    if number_of_accepted_queries > 0:\n",
-    "        avg_pairwise_local_score = pairwise_local_score / number_of_accepted_queries\n",
+    "        avg_pairwise_local_score = (\n",
+    "            pairwise_local_score / number_of_accepted_queries\n",
+    "        )\n",
     "        pairwise_scores_list.append(avg_pairwise_local_score)\n",
     "\n",
     "\n",
-    "overal_pairwise_average_score = sum(pairwise_scores_list) / len(pairwise_scores_list)\n",
+    "overal_pairwise_average_score = sum(pairwise_scores_list) / len(\n",
+    "    pairwise_scores_list\n",
+    ")\n",
     "\n",
     "df_responses = pd.DataFrame(no_reranker_dict_list)\n",
     "df_responses.to_csv(\"No_Reranker_Responses.csv\")"
@@ -1767,10 +1780,14 @@
     "            pass\n",
     "\n",
     "    if number_of_accepted_queries > 0:\n",
-    "        avg_pairwise_local_score = pairwise_local_score / number_of_accepted_queries\n",
+    "        avg_pairwise_local_score = (\n",
+    "            pairwise_local_score / number_of_accepted_queries\n",
+    "        )\n",
     "        pairwise_scores_list.append(avg_pairwise_local_score)\n",
     "\n",
-    "overal_pairwise_average_score = sum(pairwise_scores_list) / len(pairwise_scores_list)\n",
+    "overal_pairwise_average_score = sum(pairwise_scores_list) / len(\n",
+    "    pairwise_scores_list\n",
+    ")\n",
     "\n",
     "df_responses = pd.DataFrame(base_reranker_dict_list)\n",
     "df_responses.to_csv(\"Base_Reranker_Responses.csv\")"
@@ -1953,10 +1970,14 @@
     "            pass\n",
     "\n",
     "    if number_of_accepted_queries > 0:\n",
-    "        avg_pairwise_local_score = pairwise_local_score / number_of_accepted_queries\n",
+    "        avg_pairwise_local_score = (\n",
+    "            pairwise_local_score / number_of_accepted_queries\n",
+    "        )\n",
     "        pairwise_scores_list.append(avg_pairwise_local_score)\n",
     "\n",
-    "overal_pairwise_average_score = sum(pairwise_scores_list) / len(pairwise_scores_list)\n",
+    "overal_pairwise_average_score = sum(pairwise_scores_list) / len(\n",
+    "    pairwise_scores_list\n",
+    ")\n",
     "df_responses = pd.DataFrame(finetuned_reranker_dict_list)\n",
     "df_responses.to_csv(\"Finetuned_Reranker_Responses.csv\")"
    ]
diff --git a/docs/examples/finetuning/embeddings/eval_utils.py b/docs/examples/finetuning/embeddings/eval_utils.py
index c685395d17bc0b0e81ee706faaa6f6b941aee102..774c4ee6ebc2acdf746e48ade29063dab51cbbf0 100644
--- a/docs/examples/finetuning/embeddings/eval_utils.py
+++ b/docs/examples/finetuning/embeddings/eval_utils.py
@@ -16,7 +16,9 @@ def evaluate(
 
     service_context = ServiceContext.from_defaults(embed_model=embed_model)
     nodes = [TextNode(id_=id_, text=text) for id_, text in corpus.items()]
-    index = VectorStoreIndex(nodes, service_context=service_context, show_progress=True)
+    index = VectorStoreIndex(
+        nodes, service_context=service_context, show_progress=True
+    )
     retriever = index.as_retriever(similarity_top_k=top_k)
 
     eval_results = []
@@ -57,5 +59,7 @@ def display_results(names, results_arr):
         hit_rates.append(hit_rate)
         mrrs.append(mrr)
 
-    final_df = pd.DataFrame({"retrievers": names, "hit_rate": hit_rates, "mrr": mrrs})
+    final_df = pd.DataFrame(
+        {"retrievers": names, "hit_rate": hit_rates, "mrr": mrrs}
+    )
     display(final_df)
diff --git a/docs/examples/finetuning/embeddings/finetune_embedding.ipynb b/docs/examples/finetuning/embeddings/finetune_embedding.ipynb
index c0e51d40919a8ab434b0ff2f3124f4289671df2e..232f49f20e765c9805a4c0ed16eb043e0a948a8a 100644
--- a/docs/examples/finetuning/embeddings/finetune_embedding.ipynb
+++ b/docs/examples/finetuning/embeddings/finetune_embedding.ipynb
@@ -491,7 +491,9 @@
     "\n",
     "    service_context = ServiceContext.from_defaults(embed_model=embed_model)\n",
     "    nodes = [TextNode(id_=id_, text=text) for id_, text in corpus.items()]\n",
-    "    index = VectorStoreIndex(nodes, service_context=service_context, show_progress=True)\n",
+    "    index = VectorStoreIndex(\n",
+    "        nodes, service_context=service_context, show_progress=True\n",
+    "    )\n",
     "    retriever = index.as_retriever(similarity_top_k=top_k)\n",
     "\n",
     "    eval_results = []\n",
@@ -542,7 +544,9 @@
     "    queries = dataset.queries\n",
     "    relevant_docs = dataset.relevant_docs\n",
     "\n",
-    "    evaluator = InformationRetrievalEvaluator(queries, corpus, relevant_docs, name=name)\n",
+    "    evaluator = InformationRetrievalEvaluator(\n",
+    "        queries, corpus, relevant_docs, name=name\n",
+    "    )\n",
     "    model = SentenceTransformer(model_id)\n",
     "    output_path = \"results/\"\n",
     "    Path(output_path).mkdir(exist_ok=True, parents=True)\n",
@@ -1326,7 +1330,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "df_st_bge = pd.read_csv(\"results/Information-Retrieval_evaluation_bge_results.csv\")\n",
+    "df_st_bge = pd.read_csv(\n",
+    "    \"results/Information-Retrieval_evaluation_bge_results.csv\"\n",
+    ")\n",
     "df_st_finetuned = pd.read_csv(\n",
     "    \"results/Information-Retrieval_evaluation_finetuned_results.csv\"\n",
     ")"
diff --git a/docs/examples/finetuning/embeddings/finetune_embedding_adapter.ipynb b/docs/examples/finetuning/embeddings/finetune_embedding_adapter.ipynb
index 5b0b35ccb26b8dfd9c7ae8fd52d69076cc10af86..bfe4d5d01817804f1a106827bcd2225718c67b2b 100644
--- a/docs/examples/finetuning/embeddings/finetune_embedding_adapter.ipynb
+++ b/docs/examples/finetuning/embeddings/finetune_embedding_adapter.ipynb
@@ -805,7 +805,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "embed_model_2layer = finetune_engine.get_finetuned_model(adapter_cls=TwoLayerNN)"
+    "embed_model_2layer = finetune_engine.get_finetuned_model(\n",
+    "    adapter_cls=TwoLayerNN\n",
+    ")"
    ]
   },
   {
@@ -1248,7 +1250,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "embed_model_custom = finetune_engine.get_finetuned_model(adapter_cls=CustomAdapter)"
+    "embed_model_custom = finetune_engine.get_finetuned_model(\n",
+    "    adapter_cls=CustomAdapter\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/finetuning/knowledge/finetune_knowledge.ipynb b/docs/examples/finetuning/knowledge/finetune_knowledge.ipynb
index 11a4e0854fab1a79571b372dc4f0a48f7c07a1b3..b1502c4468d13c85d8ffe8aacac0d9c4d4437d63 100644
--- a/docs/examples/finetuning/knowledge/finetune_knowledge.ipynb
+++ b/docs/examples/finetuning/knowledge/finetune_knowledge.ipynb
@@ -98,7 +98,9 @@
     "from llama_index import Document\n",
     "\n",
     "doc_text = \"\\n\\n\".join([d.get_content() for d in docs0])\n",
-    "metadata = {\"paper_title\": \"Llama 2: Open Foundation and Fine-Tuned Chat Models\"}\n",
+    "metadata = {\n",
+    "    \"paper_title\": \"Llama 2: Open Foundation and Fine-Tuned Chat Models\"\n",
+    "}\n",
     "docs = [Document(text=doc_text, metadata=metadata)]"
    ]
   },
@@ -128,7 +130,8 @@
     "    callback_manager=callback_manager,\n",
     ")\n",
     "gpt_4_context = ServiceContext.from_defaults(\n",
-    "    llm=OpenAI(model=\"gpt-4-0613\", temperature=0.3), callback_manager=callback_manager\n",
+    "    llm=OpenAI(model=\"gpt-4-0613\", temperature=0.3),\n",
+    "    callback_manager=callback_manager,\n",
     ")"
    ]
   },
@@ -178,24 +181,20 @@
     "\n",
     "num_questions_per_chunk = 10\n",
     "question_gen_query = (\n",
-    "    \"You are a Teacher/ Professor. Your task is to setup \"\n",
-    "    \"a quiz/examination. Using the provided context, \"\n",
-    "    f\"formulate {num_questions_per_chunk} that captures an important fact from the \"\n",
-    "    \"context. \\n\"\n",
-    "    \"You MUST obey the following criteria:\\n\"\n",
-    "    \"- Restrict the question to the context information provided.\\n\"\n",
-    "    \"- Do NOT create a question that cannot be answered from the context.\\n\"\n",
-    "    \"- Phrase the question so that it does NOT refer to specific context. \"\n",
-    "    'For instance, do NOT put phrases like \"given provided context\" or \"in this work\" in the question, '\n",
-    "    \"because if the question is asked elsewhere it wouldn't be provided specific context. Replace these terms \"\n",
-    "    \"with specific details.\\n\"\n",
-    "    \"BAD questions:\\n\"\n",
-    "    \"What did the author do in his childhood\\n\"\n",
-    "    \"What were the main findings in this report\\n\\n\"\n",
-    "    \"GOOD questions:\\n\"\n",
-    "    \"What did Barack Obama do in his childhood\\n\"\n",
-    "    \"What were the main findings in the original Transformers paper by Vaswani et al.\\n\\n\"\n",
-    "    \"Generate the questions below:\\n\"\n",
+    "    \"You are a Teacher/ Professor. Your task is to setup a quiz/examination.\"\n",
+    "    f\" Using the provided context, formulate {num_questions_per_chunk} that\"\n",
+    "    \" captures an important fact from the context. \\nYou MUST obey the\"\n",
+    "    \" following criteria:\\n- Restrict the question to the context information\"\n",
+    "    \" provided.\\n- Do NOT create a question that cannot be answered from the\"\n",
+    "    \" context.\\n- Phrase the question so that it does NOT refer to specific\"\n",
+    "    ' context. For instance, do NOT put phrases like \"given provided context\"'\n",
+    "    ' or \"in this work\" in the question, because if the question is asked'\n",
+    "    \" elsewhere it wouldn't be provided specific context. Replace these\"\n",
+    "    \" terms with specific details.\\nBAD questions:\\nWhat did the author do in\"\n",
+    "    \" his childhood\\nWhat were the main findings in this report\\n\\nGOOD\"\n",
+    "    \" questions:\\nWhat did Barack Obama do in his childhood\\nWhat were the\"\n",
+    "    \" main findings in the original Transformers paper by Vaswani et\"\n",
+    "    \" al.\\n\\nGenerate the questions below:\\n\"\n",
     ")\n",
     "\n",
     "# go through each node one at a time -\n",
@@ -254,15 +253,13 @@
    "outputs": [],
    "source": [
     "query_eval_tmpl = PromptTemplate(\n",
-    "    \"Your task is to evaluate the following: If the response for the query isn't able to answer the question provided.\\n\"\n",
-    "    \"If query isn't able to answer the question, answer NO.\\n\"\n",
-    "    \"Otherwise answer YES.\\n\"\n",
-    "    \"To elaborate, you might get an answer like the following: 'The context does not contain the answer to this question.'\"\n",
-    "    \"Please return NO in that case. \"\n",
-    "    \"You be given the query and response. Return YES or NO as the answer.\\n\"\n",
-    "    \"Query: \\n {query_str}\\n\"\n",
-    "    \"Response: \\n {response_str}\\n\"\n",
-    "    \"Answer: \"\n",
+    "    \"Your task is to evaluate the following: If the response for the query\"\n",
+    "    \" isn't able to answer the question provided.\\nIf query isn't able to\"\n",
+    "    \" answer the question, answer NO.\\nOtherwise answer YES.\\nTo elaborate,\"\n",
+    "    \" you might get an answer like the following: 'The context does not\"\n",
+    "    \" contain the answer to this question.'Please return NO in that case. You\"\n",
+    "    \" be given the query and response. Return YES or NO as the answer.\\nQuery:\"\n",
+    "    \" \\n {query_str}\\nResponse: \\n {response_str}\\nAnswer: \"\n",
     ")\n",
     "\n",
     "eval_llm = OpenAI(model=\"gpt-4-0613\")"
@@ -328,7 +325,9 @@
     "import random\n",
     "\n",
     "\n",
-    "def split_train_val(path: str, out_train_path: str, out_val_path: str, train_split=0.7):\n",
+    "def split_train_val(\n",
+    "    path: str, out_train_path: str, out_val_path: str, train_split=0.7\n",
+    "):\n",
     "    with open(path, \"r\") as fp:\n",
     "        lines = fp.readlines()\n",
     "\n",
@@ -354,7 +353,9 @@
    "outputs": [],
    "source": [
     "split_train_val(\n",
-    "    \"data/qa_pairs_2.jsonl\", \"data/qa_pairs_train.jsonl\", \"data/qa_pairs_val.jsonl\"\n",
+    "    \"data/qa_pairs_2.jsonl\",\n",
+    "    \"data/qa_pairs_train.jsonl\",\n",
+    "    \"data/qa_pairs_val.jsonl\",\n",
     ")"
    ]
   },
@@ -382,7 +383,10 @@
     "# TODO: try with different system prompts\n",
     "system_prompt = {\n",
     "    \"role\": \"system\",\n",
-    "    \"content\": \"You are a helpful assistant helping to answer questions about the Llama 2 paper.\",\n",
+    "    \"content\": (\n",
+    "        \"You are a helpful assistant helping to answer questions about the\"\n",
+    "        \" Llama 2 paper.\"\n",
+    "    ),\n",
     "}\n",
     "for line in fp:\n",
     "    qa_pair = json.loads(line)\n",
@@ -628,7 +632,10 @@
     "    msgs = [\n",
     "        ChatMessage(\n",
     "            role=\"system\",\n",
-    "            content=\"You are a helpful assistant helping to answer questions about the Llama 2 paper.\",\n",
+    "            content=(\n",
+    "                \"You are a helpful assistant helping to answer questions about\"\n",
+    "                \" the Llama 2 paper.\"\n",
+    "            ),\n",
     "        ),\n",
     "        ChatMessage(role=\"user\", content=d[\"query\"]),\n",
     "    ]\n",
@@ -827,10 +834,14 @@
     "        base_response = str(query_model(base_model, eval_dict))\n",
     "\n",
     "        # try evaluations\n",
-    "        ft_rag_eval = eval_match_gt(eval_dict[\"query\"], gt_response, ft_rag_response)\n",
+    "        ft_rag_eval = eval_match_gt(\n",
+    "            eval_dict[\"query\"], gt_response, ft_rag_response\n",
+    "        )\n",
     "        ft_eval = eval_match_gt(eval_dict[\"query\"], gt_response, ft_response)\n",
     "        rag_eval = eval_match_gt(eval_dict[\"query\"], gt_response, rag_response)\n",
-    "        base_eval = eval_match_gt(eval_dict[\"query\"], gt_response, base_response)\n",
+    "        base_eval = eval_match_gt(\n",
+    "            eval_dict[\"query\"], gt_response, base_response\n",
+    "        )\n",
     "\n",
     "        response_dict = {\n",
     "            \"query\": eval_dict[\"query\"],\n",
diff --git a/docs/examples/finetuning/knowledge/finetune_retrieval_aug.ipynb b/docs/examples/finetuning/knowledge/finetune_retrieval_aug.ipynb
index aa44a139c0bbea0cf3728b0a3e24b8e4fb5ac0cc..4dea6b9a357250c2f573661e3003c2d3476a5244 100644
--- a/docs/examples/finetuning/knowledge/finetune_retrieval_aug.ipynb
+++ b/docs/examples/finetuning/knowledge/finetune_retrieval_aug.ipynb
@@ -98,7 +98,9 @@
     "from llama_index import Document\n",
     "\n",
     "doc_text = \"\\n\\n\".join([d.get_content() for d in docs0])\n",
-    "metadata = {\"paper_title\": \"Llama 2: Open Foundation and Fine-Tuned Chat Models\"}\n",
+    "metadata = {\n",
+    "    \"paper_title\": \"Llama 2: Open Foundation and Fine-Tuned Chat Models\"\n",
+    "}\n",
     "docs = [Document(text=doc_text, metadata=metadata)]"
    ]
   },
@@ -128,7 +130,8 @@
     "    callback_manager=callback_manager,\n",
     ")\n",
     "gpt_4_context = ServiceContext.from_defaults(\n",
-    "    llm=OpenAI(model=\"gpt-4-0613\", temperature=0.3), callback_manager=callback_manager\n",
+    "    llm=OpenAI(model=\"gpt-4-0613\", temperature=0.3),\n",
+    "    callback_manager=callback_manager,\n",
     ")"
    ]
   },
@@ -502,7 +505,10 @@
     "    # TODO: try with different system prompts\n",
     "    system_prompt = {\n",
     "        \"role\": \"system\",\n",
-    "        \"content\": \"You are a helpful assistant helping to answer questions about the Llama 2 paper.\",\n",
+    "        \"content\": (\n",
+    "            \"You are a helpful assistant helping to answer questions about the\"\n",
+    "            \" Llama 2 paper.\"\n",
+    "        ),\n",
     "    }\n",
     "    train_qr_pairs = dataset.qr_pairs\n",
     "    for line in train_qr_pairs:\n",
@@ -651,7 +657,10 @@
     "ft_context = ServiceContext.from_defaults(\n",
     "    llm=ft_model,\n",
     "    callback_manager=callback_manager,\n",
-    "    system_prompt=\"You are a helpful assistant helping to answer questions about the Llama 2 paper.\",\n",
+    "    system_prompt=(\n",
+    "        \"You are a helpful assistant helping to answer questions about the\"\n",
+    "        \" Llama 2 paper.\"\n",
+    "    ),\n",
     ")\n",
     "# fine-tuned RAG system\n",
     "ft_query_engine = vector_index.as_query_engine(\n",
@@ -675,7 +684,8 @@
    ],
    "source": [
     "response = ft_query_engine.query(\n",
-    "    \"How is the margin component added in the loss of the reward model in Llama 2?\"\n",
+    "    \"How is the margin component added in the loss of the reward model in\"\n",
+    "    \" Llama 2?\"\n",
     ")\n",
     "print(str(response))"
    ]
@@ -697,7 +707,8 @@
    "source": [
     "base_query_engine = vector_index.as_query_engine(similarity_top_k=1)\n",
     "base_response = base_query_engine.query(\n",
-    "    \"How is the margin component added in the loss of the reward model in Llama 2?\"\n",
+    "    \"How is the margin component added in the loss of the reward model in\"\n",
+    "    \" Llama 2?\"\n",
     ")\n",
     "print(str(base_response))"
    ]
@@ -794,7 +805,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "base_pred_responses = get_responses(eval_qs, base_query_engine, show_progress=True)"
+    "base_pred_responses = get_responses(\n",
+    "    eval_qs, base_query_engine, show_progress=True\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/finetuning/openai_fine_tuning.ipynb b/docs/examples/finetuning/openai_fine_tuning.ipynb
index cde993f60c7768aa61e823026f99b571887ffbe6..d80b57bab76621beab11ed055dba87301198fafa 100644
--- a/docs/examples/finetuning/openai_fine_tuning.ipynb
+++ b/docs/examples/finetuning/openai_fine_tuning.ipynb
@@ -266,7 +266,9 @@
     "    llm=OpenAI(model=\"gpt-3.5-turbo\", temperature=0.3), context_window=2048\n",
     ")\n",
     "\n",
-    "index = VectorStoreIndex.from_documents(documents, service_context=gpt_35_context)\n",
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, service_context=gpt_35_context\n",
+    ")\n",
     "\n",
     "query_engine = index.as_query_engine(similarity_top_k=2)"
    ]
@@ -394,7 +396,9 @@
    "source": [
     "from llama_index import VectorStoreIndex\n",
     "\n",
-    "index = VectorStoreIndex.from_documents(documents, service_context=gpt_4_context)\n",
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, service_context=gpt_4_context\n",
+    ")\n",
     "\n",
     "query_engine = index.as_query_engine(similarity_top_k=2)"
    ]
diff --git a/docs/examples/finetuning/openai_fine_tuning_functions.ipynb b/docs/examples/finetuning/openai_fine_tuning_functions.ipynb
index 692c5999585e800863a33091365ce54b8229b35e..382c85929c6815e71da781903a00186b54922283 100644
--- a/docs/examples/finetuning/openai_fine_tuning_functions.ipynb
+++ b/docs/examples/finetuning/openai_fine_tuning_functions.ipynb
@@ -408,11 +408,16 @@
     "class Citation(BaseModel):\n",
     "    \"\"\"Citation class.\"\"\"\n",
     "\n",
-    "    author: str = Field(..., description=\"Inferred first author (usually last name\")\n",
+    "    author: str = Field(\n",
+    "        ..., description=\"Inferred first author (usually last name\"\n",
+    "    )\n",
     "    year: int = Field(..., description=\"Inferred year\")\n",
     "    desc: str = Field(\n",
     "        ...,\n",
-    "        description=\"Inferred description from the text of the work that the author is cited for\",\n",
+    "        description=(\n",
+    "            \"Inferred description from the text of the work that the author is\"\n",
+    "            \" cited for\"\n",
+    "        ),\n",
     "    )\n",
     "\n",
     "\n",
@@ -425,7 +430,10 @@
     "\n",
     "    citations: List[Citation] = Field(\n",
     "        ...,\n",
-    "        description=\"List of author citations (organized by author, year, and description).\",\n",
+    "        description=(\n",
+    "            \"List of author citations (organized by author, year, and\"\n",
+    "            \" description).\"\n",
+    "        ),\n",
     "    )"
    ]
   },
@@ -465,7 +473,9 @@
    "outputs": [],
    "source": [
     "doc_text = \"\\n\\n\".join([d.get_content() for d in docs0])\n",
-    "metadata = {\"paper_title\": \"Llama 2: Open Foundation and Fine-Tuned Chat Models\"}\n",
+    "metadata = {\n",
+    "    \"paper_title\": \"Llama 2: Open Foundation and Fine-Tuned Chat Models\"\n",
+    "}\n",
     "docs = [Document(text=doc_text, metadata=metadata)]"
    ]
   },
@@ -605,7 +615,9 @@
    "outputs": [],
    "source": [
     "gpt4_index = VectorStoreIndex(nodes, service_context=gpt_4_context)\n",
-    "gpt4_query_engine = gpt4_index.as_query_engine(output_cls=Response, similarity_top_k=1)"
+    "gpt4_query_engine = gpt4_index.as_query_engine(\n",
+    "    output_cls=Response, similarity_top_k=1\n",
+    ")"
    ]
   },
   {
@@ -784,7 +796,9 @@
     "from llama_index import VectorStoreIndex\n",
     "\n",
     "vector_index = VectorStoreIndex(nodes, service_context=ft_service_context)\n",
-    "query_engine = vector_index.as_query_engine(output_cls=Response, similarity_top_k=1)"
+    "query_engine = vector_index.as_query_engine(\n",
+    "    output_cls=Response, similarity_top_k=1\n",
+    ")"
    ]
   },
   {
@@ -795,7 +809,9 @@
    "source": [
     "# setup baseline as well\n",
     "base_index = VectorStoreIndex(nodes, service_context=gpt_35_context)\n",
-    "base_query_engine = base_index.as_query_engine(output_cls=Response, similarity_top_k=1)"
+    "base_query_engine = base_index.as_query_engine(\n",
+    "    output_cls=Response, similarity_top_k=1\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/finetuning/react_agent/react_agent_finetune.ipynb b/docs/examples/finetuning/react_agent/react_agent_finetune.ipynb
index 6a3b5b36380573be70c3d9111af9ea4cd3112832..7db43e7d972da6e69543bc2f5ea8b4801c7cdf80 100644
--- a/docs/examples/finetuning/react_agent/react_agent_finetune.ipynb
+++ b/docs/examples/finetuning/react_agent/react_agent_finetune.ipynb
@@ -78,13 +78,19 @@
    "outputs": [],
    "source": [
     "try:\n",
-    "    storage_context = StorageContext.from_defaults(persist_dir=\"./storage/march\")\n",
+    "    storage_context = StorageContext.from_defaults(\n",
+    "        persist_dir=\"./storage/march\"\n",
+    "    )\n",
     "    march_index = load_index_from_storage(storage_context)\n",
     "\n",
-    "    storage_context = StorageContext.from_defaults(persist_dir=\"./storage/june\")\n",
+    "    storage_context = StorageContext.from_defaults(\n",
+    "        persist_dir=\"./storage/june\"\n",
+    "    )\n",
     "    june_index = load_index_from_storage(storage_context)\n",
     "\n",
-    "    storage_context = StorageContext.from_defaults(persist_dir=\"./storage/sept\")\n",
+    "    storage_context = StorageContext.from_defaults(\n",
+    "        persist_dir=\"./storage/sept\"\n",
+    "    )\n",
     "    sept_index = load_index_from_storage(storage_context)\n",
     "\n",
     "    index_loaded = True\n",
@@ -159,17 +165,26 @@
     "query_tool_sept = QueryEngineTool.from_defaults(\n",
     "    query_engine=sept_engine,\n",
     "    name=\"sept_2022\",\n",
-    "    description=f\"Provides information about Uber quarterly financials ending September 2022\",\n",
+    "    description=(\n",
+    "        f\"Provides information about Uber quarterly financials ending\"\n",
+    "        f\" September 2022\"\n",
+    "    ),\n",
     ")\n",
     "query_tool_june = QueryEngineTool.from_defaults(\n",
     "    query_engine=june_engine,\n",
     "    name=\"june_2022\",\n",
-    "    description=f\"Provides information about Uber quarterly financials ending June 2022\",\n",
+    "    description=(\n",
+    "        f\"Provides information about Uber quarterly financials ending June\"\n",
+    "        f\" 2022\"\n",
+    "    ),\n",
     ")\n",
     "query_tool_march = QueryEngineTool.from_defaults(\n",
     "    query_engine=march_engine,\n",
     "    name=\"march_2022\",\n",
-    "    description=f\"Provides information about Uber quarterly financials ending March 2022\",\n",
+    "    description=(\n",
+    "        f\"Provides information about Uber quarterly financials ending March\"\n",
+    "        f\" 2022\"\n",
+    "    ),\n",
     ")\n",
     "\n",
     "query_engine_tools = [query_tool_march, query_tool_june, query_tool_sept]"
@@ -218,7 +233,9 @@
    "outputs": [],
    "source": [
     "# gpt-3.5 generally gives the right response here\n",
-    "response = base_agent.chat(\"Analyze Uber revenue growth over the last few quarters\")\n",
+    "response = base_agent.chat(\n",
+    "    \"Analyze Uber revenue growth over the last few quarters\"\n",
+    ")\n",
     "print(str(response))"
    ]
   },
@@ -270,7 +287,8 @@
    "source": [
     "# gpt-3.5 doesn't give the right response - it doesn't first search for the quarter with the highest revenue growth\n",
     "response = base_agent.chat(\n",
-    "    \"Can you tell me about the risk factors in the quarter with the highest revenue growth?\"\n",
+    "    \"Can you tell me about the risk factors in the quarter with the highest\"\n",
+    "    \" revenue growth?\"\n",
     ")\n",
     "print(str(response))"
    ]
@@ -303,10 +321,10 @@
    "outputs": [],
    "source": [
     "base_question_gen_query = (\n",
-    "    \"You are a Teacher/ Professor. Your task is to setup \"\n",
-    "    \"a quiz/examination. Using the provided context from the Uber March 10Q filing, formulate \"\n",
-    "    \"a single question that captures an important fact from the context. \"\n",
-    "    \"context. Restrict the question to the context information provided.\"\n",
+    "    \"You are a Teacher/ Professor. Your task is to setup a quiz/examination.\"\n",
+    "    \" Using the provided context from the Uber March 10Q filing, formulate a\"\n",
+    "    \" single question that captures an important fact from the context.\"\n",
+    "    \" context. Restrict the question to the context information provided.\"\n",
     ")\n",
     "\n",
     "dataset_generator = DatasetGenerator.from_documents(\n",
@@ -416,7 +434,9 @@
     "        new_questions.append(question)\n",
     "        response = llm.complete(\n",
     "            prompt_tmpl.format(\n",
-    "                num_vary=num_vary, base_question=question, valid_10qs=VALID_10Q_STR\n",
+    "                num_vary=num_vary,\n",
+    "                base_question=question,\n",
+    "                valid_10qs=VALID_10Q_STR,\n",
     "            )\n",
     "        )\n",
     "        # parse into newlines\n",
@@ -588,7 +608,10 @@
    "source": [
     "llm = OpenAI(model=\"gpt-4-0613\")\n",
     "gpt4_agent = ReActAgent.from_tools(\n",
-    "    query_engine_tools, llm=llm, callback_manager=callback_manager, verbose=True\n",
+    "    query_engine_tools,\n",
+    "    llm=llm,\n",
+    "    callback_manager=callback_manager,\n",
+    "    verbose=True,\n",
     ")"
    ]
   },
@@ -1501,7 +1524,10 @@
    "outputs": [],
    "source": [
     "ft_agent = ReActAgent.from_tools(\n",
-    "    query_engine_tools, llm=ft_llm, callback_manager=callback_manager, verbose=True\n",
+    "    query_engine_tools,\n",
+    "    llm=ft_llm,\n",
+    "    callback_manager=callback_manager,\n",
+    "    verbose=True,\n",
     ")"
    ]
   },
@@ -1623,7 +1649,10 @@
    ],
    "source": [
     "# try the original question that failed\n",
-    "test_q = \"Can you tell me about the risk factors in the quarter with the highest revenue growth?\"\n",
+    "test_q = (\n",
+    "    \"Can you tell me about the risk factors in the quarter with the highest\"\n",
+    "    \" revenue growth?\"\n",
+    ")\n",
     "base_response = base_agent.query(test_q)\n",
     "print(str(base_response))"
    ]
diff --git a/docs/examples/index_structs/doc_summary/DocSummary.ipynb b/docs/examples/index_structs/doc_summary/DocSummary.ipynb
index a9e8fa5ce0ef0c4d0c80dc34da6cfb4721d9b9b1..617e1fd10f16164f702e761bec4dd81b156aca7b 100644
--- a/docs/examples/index_structs/doc_summary/DocSummary.ipynb
+++ b/docs/examples/index_structs/doc_summary/DocSummary.ipynb
@@ -140,7 +140,9 @@
     "# Load all wiki documents\n",
     "city_docs = []\n",
     "for wiki_title in wiki_titles:\n",
-    "    docs = SimpleDirectoryReader(input_files=[f\"data/{wiki_title}.txt\"]).load_data()\n",
+    "    docs = SimpleDirectoryReader(\n",
+    "        input_files=[f\"data/{wiki_title}.txt\"]\n",
+    "    ).load_data()\n",
     "    docs[0].doc_id = wiki_title\n",
     "    city_docs.extend(docs)"
    ]
@@ -367,7 +369,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.indices.document_summary import DocumentSummaryIndexLLMRetriever"
+    "from llama_index.indices.document_summary import (\n",
+    "    DocumentSummaryIndexLLMRetriever,\n",
+    ")"
    ]
   },
   {
@@ -494,7 +498,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.indices.document_summary import DocumentSummaryIndexEmbeddingRetriever"
+    "from llama_index.indices.document_summary import (\n",
+    "    DocumentSummaryIndexEmbeddingRetriever,\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/index_structs/knowledge_graph/FalkorDBGraphDemo.ipynb b/docs/examples/index_structs/knowledge_graph/FalkorDBGraphDemo.ipynb
index 6036bee8cf09df8ea5f6c5b1b5f0f98a93c5ad05..103a95a7e5d45d34d21e343bcf14cbabec8b44f4 100644
--- a/docs/examples/index_structs/knowledge_graph/FalkorDBGraphDemo.ipynb
+++ b/docs/examples/index_structs/knowledge_graph/FalkorDBGraphDemo.ipynb
@@ -79,7 +79,9 @@
    "source": [
     "from llama_index.graph_stores import FalkorDBGraphStore\n",
     "\n",
-    "graph_store = FalkorDBGraphStore(\"redis://localhost:6379\", decode_responses=True)"
+    "graph_store = FalkorDBGraphStore(\n",
+    "    \"redis://localhost:6379\", decode_responses=True\n",
+    ")"
    ]
   },
   {
@@ -171,7 +173,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "query_engine = index.as_query_engine(include_text=False, response_mode=\"tree_summarize\")\n",
+    "query_engine = index.as_query_engine(\n",
+    "    include_text=False, response_mode=\"tree_summarize\"\n",
+    ")\n",
     "response = query_engine.query(\n",
     "    \"Tell me more about Interleaf\",\n",
     ")"
@@ -216,7 +220,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "query_engine = index.as_query_engine(include_text=True, response_mode=\"tree_summarize\")\n",
+    "query_engine = index.as_query_engine(\n",
+    "    include_text=True, response_mode=\"tree_summarize\"\n",
+    ")\n",
     "response = query_engine.query(\n",
     "    \"Tell me more about Interleaf\",\n",
     ")"
diff --git a/docs/examples/index_structs/knowledge_graph/KnowledgeGraphDemo.ipynb b/docs/examples/index_structs/knowledge_graph/KnowledgeGraphDemo.ipynb
index 7318314e553b84ba54edc1b1c1bf8ba8cd406131..e3932abbeb5c6ab94bb0d6421bd243f5d5a01172 100644
--- a/docs/examples/index_structs/knowledge_graph/KnowledgeGraphDemo.ipynb
+++ b/docs/examples/index_structs/knowledge_graph/KnowledgeGraphDemo.ipynb
@@ -182,7 +182,9 @@
     }
    ],
    "source": [
-    "query_engine = index.as_query_engine(include_text=False, response_mode=\"tree_summarize\")\n",
+    "query_engine = index.as_query_engine(\n",
+    "    include_text=False, response_mode=\"tree_summarize\"\n",
+    ")\n",
     "response = query_engine.query(\n",
     "    \"Tell me more about Interleaf\",\n",
     ")"
@@ -234,7 +236,9 @@
     }
    ],
    "source": [
-    "query_engine = index.as_query_engine(include_text=True, response_mode=\"tree_summarize\")\n",
+    "query_engine = index.as_query_engine(\n",
+    "    include_text=True, response_mode=\"tree_summarize\"\n",
+    ")\n",
     "response = query_engine.query(\n",
     "    \"Tell me more about what the author worked on at Interleaf\",\n",
     ")"
@@ -523,7 +527,9 @@
     }
    ],
    "source": [
-    "query_engine = index.as_query_engine(include_text=False, response_mode=\"tree_summarize\")\n",
+    "query_engine = index.as_query_engine(\n",
+    "    include_text=False, response_mode=\"tree_summarize\"\n",
+    ")\n",
     "response = query_engine.query(\n",
     "    \"Tell me more about Interleaf\",\n",
     ")"
diff --git a/docs/examples/index_structs/knowledge_graph/KnowledgeGraphIndex_vs_VectorStoreIndex_vs_CustomIndex_combined.ipynb b/docs/examples/index_structs/knowledge_graph/KnowledgeGraphIndex_vs_VectorStoreIndex_vs_CustomIndex_combined.ipynb
index 41f3cc5d09b4d13631104f80db00ba8f119e667d..0d0d09401b6cac6e5f95e49e127c3933d3e93c90 100644
--- a/docs/examples/index_structs/knowledge_graph/KnowledgeGraphIndex_vs_VectorStoreIndex_vs_CustomIndex_combined.ipynb
+++ b/docs/examples/index_structs/knowledge_graph/KnowledgeGraphIndex_vs_VectorStoreIndex_vs_CustomIndex_combined.ipynb
@@ -215,7 +215,9 @@
     "os.environ[\"NEBULA_PASSWORD\"] = \"nebula\"\n",
     "os.environ[\n",
     "    \"NEBULA_ADDRESS\"\n",
-    "] = \"127.0.0.1:9669\"  # assumed we have NebulaGraph 3.5.0 or newer installed locally\n",
+    "] = (  # assumed we have NebulaGraph 3.5.0 or newer installed locally\n",
+    "    \"127.0.0.1:9669\"\n",
+    ")\n",
     "\n",
     "# Assume that the graph has already been created\n",
     "# Create a NebulaGraph cluster with:\n",
@@ -366,7 +368,11 @@
     "from llama_index.schema import NodeWithScore\n",
     "\n",
     "# Retrievers\n",
-    "from llama_index.retrievers import BaseRetriever, VectorIndexRetriever, KGTableRetriever\n",
+    "from llama_index.retrievers import (\n",
+    "    BaseRetriever,\n",
+    "    VectorIndexRetriever,\n",
+    "    KGTableRetriever,\n",
+    ")\n",
     "\n",
     "from typing import List\n",
     "\n",
diff --git a/docs/examples/index_structs/knowledge_graph/KuzuGraphDemo.ipynb b/docs/examples/index_structs/knowledge_graph/KuzuGraphDemo.ipynb
index a3e20594728da1389fe3a248a7b01ddbdda4f2c2..721455d6992fcb6003a809458803b664d45fbdbc 100644
--- a/docs/examples/index_structs/knowledge_graph/KuzuGraphDemo.ipynb
+++ b/docs/examples/index_structs/knowledge_graph/KuzuGraphDemo.ipynb
@@ -229,7 +229,9 @@
     }
    ],
    "source": [
-    "query_engine = index.as_query_engine(include_text=False, response_mode=\"tree_summarize\")\n",
+    "query_engine = index.as_query_engine(\n",
+    "    include_text=False, response_mode=\"tree_summarize\"\n",
+    ")\n",
     "response = query_engine.query(\n",
     "    \"Tell me more about Interleaf\",\n",
     ")"
@@ -290,7 +292,9 @@
     }
    ],
    "source": [
-    "query_engine = index.as_query_engine(include_text=True, response_mode=\"tree_summarize\")\n",
+    "query_engine = index.as_query_engine(\n",
+    "    include_text=True, response_mode=\"tree_summarize\"\n",
+    ")\n",
     "response = query_engine.query(\n",
     "    \"Tell me more about Interleaf\",\n",
     ")"
@@ -634,7 +638,9 @@
     }
    ],
    "source": [
-    "query_engine = index.as_query_engine(include_text=False, response_mode=\"tree_summarize\")\n",
+    "query_engine = index.as_query_engine(\n",
+    "    include_text=False, response_mode=\"tree_summarize\"\n",
+    ")\n",
     "response = query_engine.query(\n",
     "    \"Tell me more about Interleaf\",\n",
     ")"
diff --git a/docs/examples/index_structs/knowledge_graph/NebulaGraphKGIndexDemo.ipynb b/docs/examples/index_structs/knowledge_graph/NebulaGraphKGIndexDemo.ipynb
index 9aeac78f3b6b8e800497e6d7653c680b69db409a..e16537a0ac611532aec889748f7f3951903199e2 100644
--- a/docs/examples/index_structs/knowledge_graph/NebulaGraphKGIndexDemo.ipynb
+++ b/docs/examples/index_structs/knowledge_graph/NebulaGraphKGIndexDemo.ipynb
@@ -189,7 +189,9 @@
     "] = \"<password>\"  # replace with your password, by default it is \"nebula\"\n",
     "os.environ[\n",
     "    \"NEBULA_ADDRESS\"\n",
-    "] = \"127.0.0.1:9669\"  # assumed we have NebulaGraph 3.5.0 or newer installed locally\n",
+    "] = (  # assumed we have NebulaGraph 3.5.0 or newer installed locally\n",
+    "    \"127.0.0.1:9669\"\n",
+    ")\n",
     "\n",
     "# Assume that the graph has already been created\n",
     "# Create a NebulaGraph cluster with:\n",
@@ -1055,7 +1057,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "query_engine = index.as_query_engine(include_text=False, response_mode=\"tree_summarize\")\n",
+    "query_engine = index.as_query_engine(\n",
+    "    include_text=False, response_mode=\"tree_summarize\"\n",
+    ")\n",
     "\n",
     "response = query_engine.query(\"Tell me more about Interleaf\")"
    ]
diff --git a/docs/examples/index_structs/knowledge_graph/Neo4jKGIndexDemo.ipynb b/docs/examples/index_structs/knowledge_graph/Neo4jKGIndexDemo.ipynb
index 3278d815f9b5b2ba791e9a2475d250dd3e3c8bde..9cc37e0b8ab1bee02de7384c56caf5686d0b0a0f 100644
--- a/docs/examples/index_structs/knowledge_graph/Neo4jKGIndexDemo.ipynb
+++ b/docs/examples/index_structs/knowledge_graph/Neo4jKGIndexDemo.ipynb
@@ -257,7 +257,9 @@
     }
    ],
    "source": [
-    "query_engine = index.as_query_engine(include_text=False, response_mode=\"tree_summarize\")\n",
+    "query_engine = index.as_query_engine(\n",
+    "    include_text=False, response_mode=\"tree_summarize\"\n",
+    ")\n",
     "\n",
     "response = query_engine.query(\"Tell me more about Interleaf\")"
    ]
@@ -316,7 +318,9 @@
     }
    ],
    "source": [
-    "query_engine = index.as_query_engine(include_text=True, response_mode=\"tree_summarize\")\n",
+    "query_engine = index.as_query_engine(\n",
+    "    include_text=True, response_mode=\"tree_summarize\"\n",
+    ")\n",
     "response = query_engine.query(\n",
     "    \"Tell me more about what the author worked on at Interleaf\"\n",
     ")"
@@ -550,7 +554,9 @@
     }
    ],
    "source": [
-    "query_engine = index.as_query_engine(include_text=False, response_mode=\"tree_summarize\")\n",
+    "query_engine = index.as_query_engine(\n",
+    "    include_text=False, response_mode=\"tree_summarize\"\n",
+    ")\n",
     "\n",
     "response = query_engine.query(\"Tell me more about Interleaf\")"
    ]
diff --git a/docs/examples/index_structs/knowledge_graph/knowledge_graph2.ipynb b/docs/examples/index_structs/knowledge_graph/knowledge_graph2.ipynb
index ab9a70c89aafb20c6ce55d5a5e8115b933e4cf01..93db7f3eb6d1e9ce7604de72469c9905a4308a62 100644
--- a/docs/examples/index_structs/knowledge_graph/knowledge_graph2.ipynb
+++ b/docs/examples/index_structs/knowledge_graph/knowledge_graph2.ipynb
@@ -169,9 +169,9 @@
     "def extract_triplets(input_text):\n",
     "    text = triplet_extractor.tokenizer.batch_decode(\n",
     "        [\n",
-    "            triplet_extractor(input_text, return_tensors=True, return_text=False)[0][\n",
-    "                \"generated_token_ids\"\n",
-    "            ]\n",
+    "            triplet_extractor(\n",
+    "                input_text, return_tensors=True, return_text=False\n",
+    "            )[0][\"generated_token_ids\"]\n",
     "        ]\n",
     "    )[0]\n",
     "\n",
@@ -180,18 +180,25 @@
     "    text = text.strip()\n",
     "    current = \"x\"\n",
     "    for token in (\n",
-    "        text.replace(\"<s>\", \"\").replace(\"<pad>\", \"\").replace(\"</s>\", \"\").split()\n",
+    "        text.replace(\"<s>\", \"\")\n",
+    "        .replace(\"<pad>\", \"\")\n",
+    "        .replace(\"</s>\", \"\")\n",
+    "        .split()\n",
     "    ):\n",
     "        if token == \"<triplet>\":\n",
     "            current = \"t\"\n",
     "            if relation != \"\":\n",
-    "                triplets.append((subject.strip(), relation.strip(), object_.strip()))\n",
+    "                triplets.append(\n",
+    "                    (subject.strip(), relation.strip(), object_.strip())\n",
+    "                )\n",
     "                relation = \"\"\n",
     "            subject = \"\"\n",
     "        elif token == \"<subj>\":\n",
     "            current = \"s\"\n",
     "            if relation != \"\":\n",
-    "                triplets.append((subject.strip(), relation.strip(), object_.strip()))\n",
+    "                triplets.append(\n",
+    "                    (subject.strip(), relation.strip(), object_.strip())\n",
+    "                )\n",
     "            object_ = \"\"\n",
     "        elif token == \"<obj>\":\n",
     "            current = \"o\"\n",
diff --git a/docs/examples/index_structs/struct_indices/SQLIndexDemo.ipynb b/docs/examples/index_structs/struct_indices/SQLIndexDemo.ipynb
index f891ae8d39ac6a0b90e678682e8c57d0e480cc2a..b9affae528f02711c933681aaa2aed786f66dd51 100644
--- a/docs/examples/index_structs/struct_indices/SQLIndexDemo.ipynb
+++ b/docs/examples/index_structs/struct_indices/SQLIndexDemo.ipynb
@@ -173,7 +173,11 @@
     "rows = [\n",
     "    {\"city_name\": \"Toronto\", \"population\": 2930000, \"country\": \"Canada\"},\n",
     "    {\"city_name\": \"Tokyo\", \"population\": 13960000, \"country\": \"Japan\"},\n",
-    "    {\"city_name\": \"Chicago\", \"population\": 2679000, \"country\": \"United States\"},\n",
+    "    {\n",
+    "        \"city_name\": \"Chicago\",\n",
+    "        \"population\": 2679000,\n",
+    "        \"country\": \"United States\",\n",
+    "    },\n",
     "    {\"city_name\": \"Seoul\", \"population\": 9776000, \"country\": \"South Korea\"},\n",
     "]\n",
     "for row in rows:\n",
@@ -342,8 +346,14 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.indices.struct_store.sql_query import SQLTableRetrieverQueryEngine\n",
-    "from llama_index.objects import SQLTableNodeMapping, ObjectIndex, SQLTableSchema\n",
+    "from llama_index.indices.struct_store.sql_query import (\n",
+    "    SQLTableRetrieverQueryEngine,\n",
+    ")\n",
+    "from llama_index.objects import (\n",
+    "    SQLTableNodeMapping,\n",
+    "    ObjectIndex,\n",
+    "    SQLTableSchema,\n",
+    ")\n",
     "from llama_index import VectorStoreIndex\n",
     "\n",
     "# set Logging to DEBUG for more detailed outputs\n",
@@ -435,9 +445,9 @@
    "source": [
     "# manually set context text\n",
     "city_stats_text = (\n",
-    "    \"This table gives information regarding the population and country of a given city.\\n\"\n",
-    "    \"The user will query with codewords, where 'foo' corresponds to population and 'bar'\"\n",
-    "    \"corresponds to city.\"\n",
+    "    \"This table gives information regarding the population and country of a\"\n",
+    "    \" given city.\\nThe user will query with codewords, where 'foo' corresponds\"\n",
+    "    \" to population and 'bar'corresponds to city.\"\n",
     ")\n",
     "\n",
     "table_node_mapping = SQLTableNodeMapping(sql_database)\n",
diff --git a/docs/examples/index_structs/struct_indices/duckdb_sql_query.ipynb b/docs/examples/index_structs/struct_indices/duckdb_sql_query.ipynb
index e8b29d52fd42192bad8a1f033c94bc06421c0aeb..efff5815acbb93d623beb2d693e46f9ab6e04726 100644
--- a/docs/examples/index_structs/struct_indices/duckdb_sql_query.ipynb
+++ b/docs/examples/index_structs/struct_indices/duckdb_sql_query.ipynb
@@ -44,7 +44,12 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index import SQLDatabase, SimpleDirectoryReader, WikipediaReader, Document\n",
+    "from llama_index import (\n",
+    "    SQLDatabase,\n",
+    "    SimpleDirectoryReader,\n",
+    "    WikipediaReader,\n",
+    "    Document,\n",
+    ")\n",
     "from llama_index.indices.struct_store import (\n",
     "    NLSQLTableQueryEngine,\n",
     "    SQLTableRetrieverQueryEngine,\n",
@@ -178,7 +183,11 @@
     "rows = [\n",
     "    {\"city_name\": \"Toronto\", \"population\": 2930000, \"country\": \"Canada\"},\n",
     "    {\"city_name\": \"Tokyo\", \"population\": 13960000, \"country\": \"Japan\"},\n",
-    "    {\"city_name\": \"Chicago\", \"population\": 2679000, \"country\": \"United States\"},\n",
+    "    {\n",
+    "        \"city_name\": \"Chicago\",\n",
+    "        \"population\": 2679000,\n",
+    "        \"country\": \"United States\",\n",
+    "    },\n",
     "    {\"city_name\": \"Seoul\", \"population\": 9776000, \"country\": \"South Korea\"},\n",
     "]\n",
     "for row in rows:\n",
@@ -430,7 +439,11 @@
     "rows = [\n",
     "    {\"city_name\": \"Toronto\", \"population\": 2930000, \"country\": \"Canada\"},\n",
     "    {\"city_name\": \"Tokyo\", \"population\": 13960000, \"country\": \"Japan\"},\n",
-    "    {\"city_name\": \"Chicago\", \"population\": 2679000, \"country\": \"United States\"},\n",
+    "    {\n",
+    "        \"city_name\": \"Chicago\",\n",
+    "        \"population\": 2679000,\n",
+    "        \"country\": \"United States\",\n",
+    "    },\n",
     "    {\"city_name\": \"Seoul\", \"population\": 9776000, \"country\": \"South Korea\"},\n",
     "]\n",
     "for row in rows:\n",
@@ -466,7 +479,11 @@
    "outputs": [],
    "source": [
     "from llama_index.indices.struct_store import SQLTableRetrieverQueryEngine\n",
-    "from llama_index.objects import SQLTableNodeMapping, ObjectIndex, SQLTableSchema\n",
+    "from llama_index.objects import (\n",
+    "    SQLTableNodeMapping,\n",
+    "    ObjectIndex,\n",
+    "    SQLTableSchema,\n",
+    ")\n",
     "from llama_index import VectorStoreIndex"
    ]
   },
diff --git a/docs/examples/llm/anthropic.ipynb b/docs/examples/llm/anthropic.ipynb
index bdc530709b19349f1b5197946b1349cc00cacba1..6b81ed4799bd2052b713ca9649442c51e51ef251 100644
--- a/docs/examples/llm/anthropic.ipynb
+++ b/docs/examples/llm/anthropic.ipynb
@@ -83,7 +83,9 @@
     "from llama_index.llms import ChatMessage, Anthropic\n",
     "\n",
     "messages = [\n",
-    "    ChatMessage(role=\"system\", content=\"You are a pirate with a colorful personality\"),\n",
+    "    ChatMessage(\n",
+    "        role=\"system\", content=\"You are a pirate with a colorful personality\"\n",
+    "    ),\n",
     "    ChatMessage(role=\"user\", content=\"Tell me a story\"),\n",
     "]\n",
     "resp = Anthropic().chat(messages)"
@@ -192,7 +194,9 @@
     "\n",
     "llm = Anthropic()\n",
     "messages = [\n",
-    "    ChatMessage(role=\"system\", content=\"You are a pirate with a colorful personality\"),\n",
+    "    ChatMessage(\n",
+    "        role=\"system\", content=\"You are a pirate with a colorful personality\"\n",
+    "    ),\n",
     "    ChatMessage(role=\"user\", content=\"Tell me a story\"),\n",
     "]\n",
     "resp = llm.stream_chat(messages)"
diff --git a/docs/examples/llm/azure_openai.ipynb b/docs/examples/llm/azure_openai.ipynb
index f99a693302f76ba7fa8e878de2ca9ad2568e122c..455b022581f98d47267b6f94654002868b407ad9 100644
--- a/docs/examples/llm/azure_openai.ipynb
+++ b/docs/examples/llm/azure_openai.ipynb
@@ -149,7 +149,9 @@
     "import os\n",
     "\n",
     "os.environ[\"OPENAI_API_KEY\"] = \"<your-api-key>\"\n",
-    "os.environ[\"OPENAI_API_BASE\"] = \"https://<your-resource-name>.openai.azure.com/\"\n",
+    "os.environ[\n",
+    "    \"OPENAI_API_BASE\"\n",
+    "] = \"https://<your-resource-name>.openai.azure.com/\"\n",
     "os.environ[\"OPENAI_API_TYPE\"] = \"azure\"\n",
     "os.environ[\"OPENAI_API_VERSION\"] = \"2023-03-15-preview\""
    ]
@@ -196,7 +198,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "llm = AzureOpenAI(engine=\"simon-llm\", model=\"gpt-35-turbo-16k\", temperature=0.0)"
+    "llm = AzureOpenAI(\n",
+    "    engine=\"simon-llm\", model=\"gpt-35-turbo-16k\", temperature=0.0\n",
+    ")"
    ]
   },
   {
@@ -298,7 +302,9 @@
     "from llama_index.llms import ChatMessage\n",
     "\n",
     "messages = [\n",
-    "    ChatMessage(role=\"system\", content=\"You are a pirate with colorful personality.\"),\n",
+    "    ChatMessage(\n",
+    "        role=\"system\", content=\"You are a pirate with colorful personality.\"\n",
+    "    ),\n",
     "    ChatMessage(role=\"user\", content=\"Hello\"),\n",
     "]\n",
     "\n",
diff --git a/docs/examples/llm/clarifai.ipynb b/docs/examples/llm/clarifai.ipynb
index 95df8d2ae46ee40d18504639b0241c91d94e6bb3..ca07e2874ec8d7b45899b05f0cee39a93a366f37 100644
--- a/docs/examples/llm/clarifai.ipynb
+++ b/docs/examples/llm/clarifai.ipynb
@@ -78,7 +78,9 @@
     "    user_id=\"clarifai\",\n",
     "    app_id=\"ml\",\n",
     "    model_name=\"llama2-7b-alternative-4k\",\n",
-    "    model_url=\"https://clarifai.com/clarifai/ml/models/llama2-7b-alternative-4k\",\n",
+    "    model_url=(\n",
+    "        \"https://clarifai.com/clarifai/ml/models/llama2-7b-alternative-4k\"\n",
+    "    ),\n",
     ")"
    ]
   },
@@ -107,7 +109,9 @@
    "source": [
     "# Method:2 using model_name, app_id & user_id parameters\n",
     "llm_model = Clarifai(\n",
-    "    model_name=params[\"model_name\"], app_id=params[\"app_id\"], user_id=params[\"user_id\"]\n",
+    "    model_name=params[\"model_name\"],\n",
+    "    app_id=params[\"app_id\"],\n",
+    "    user_id=params[\"user_id\"],\n",
     ")"
    ]
   },
@@ -124,7 +128,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "llm_reponse = llm_model.complete(prompt=\"write a 10 line rhyming poem about science\")"
+    "llm_reponse = llm_model.complete(\n",
+    "    prompt=\"write a 10 line rhyming poem about science\"\n",
+    ")"
    ]
   },
   {
@@ -167,7 +173,9 @@
    "source": [
     "from llama_index.llms import ChatMessage\n",
     "\n",
-    "messages = [ChatMessage(role=\"user\", content=\"write about climate change in 50 lines\")]\n",
+    "messages = [\n",
+    "    ChatMessage(role=\"user\", content=\"write about climate change in 50 lines\")\n",
+    "]\n",
     "Response = llm_model.chat(messages)"
    ]
   },
diff --git a/docs/examples/llm/cohere.ipynb b/docs/examples/llm/cohere.ipynb
index 4e7ffa76edc024130f19adb63da7b916f61fff39..d00fef1c2e7426924748ffedebcb7077525c0261 100644
--- a/docs/examples/llm/cohere.ipynb
+++ b/docs/examples/llm/cohere.ipynb
@@ -82,7 +82,9 @@
     "\n",
     "messages = [\n",
     "    ChatMessage(role=\"user\", content=\"hello there\"),\n",
-    "    ChatMessage(role=\"assistant\", content=\"Arrrr, matey! How can I help ye today?\"),\n",
+    "    ChatMessage(\n",
+    "        role=\"assistant\", content=\"Arrrr, matey! How can I help ye today?\"\n",
+    "    ),\n",
     "    ChatMessage(role=\"user\", content=\"What is your name\"),\n",
     "]\n",
     "\n",
@@ -179,7 +181,9 @@
     "llm = Cohere(api_key=api_key)\n",
     "messages = [\n",
     "    ChatMessage(role=\"user\", content=\"hello there\"),\n",
-    "    ChatMessage(role=\"assistant\", content=\"Arrrr, matey! How can I help ye today?\"),\n",
+    "    ChatMessage(\n",
+    "        role=\"assistant\", content=\"Arrrr, matey! How can I help ye today?\"\n",
+    "    ),\n",
     "    ChatMessage(role=\"user\", content=\"What is your name\"),\n",
     "]\n",
     "resp = llm.stream_chat(\n",
diff --git a/docs/examples/llm/gradient_base_model.ipynb b/docs/examples/llm/gradient_base_model.ipynb
index 8c1b26a808533c16d3622a55dae8bb41c2120da9..2497feae67795e2e21baf90eaf3042a8ad2a6fda 100644
--- a/docs/examples/llm/gradient_base_model.ipynb
+++ b/docs/examples/llm/gradient_base_model.ipynb
@@ -140,7 +140,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "index = VectorStoreIndex.from_documents(documents, service_context=service_context)\n",
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, service_context=service_context\n",
+    ")\n",
     "query_engine = index.as_query_engine()"
    ]
   },
@@ -151,7 +153,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "response = query_engine.query(\"What did the author do after his time at Y Combinator?\")\n",
+    "response = query_engine.query(\n",
+    "    \"What did the author do after his time at Y Combinator?\"\n",
+    ")\n",
     "print(response)"
    ]
   }
diff --git a/docs/examples/llm/gradient_model_adapter.ipynb b/docs/examples/llm/gradient_model_adapter.ipynb
index ee0496752aa8a33c5b8f5c8aec090e8f0d5bb6a2..908fbbe6d7eb9c2f45ee23bcf732d2b9d4363359 100644
--- a/docs/examples/llm/gradient_model_adapter.ipynb
+++ b/docs/examples/llm/gradient_model_adapter.ipynb
@@ -140,7 +140,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "index = VectorStoreIndex.from_documents(documents, service_context=service_context)\n",
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, service_context=service_context\n",
+    ")\n",
     "query_engine = index.as_query_engine()"
    ]
   },
@@ -151,7 +153,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "response = query_engine.query(\"What did the author do after his time at Y Combinator?\")\n",
+    "response = query_engine.query(\n",
+    "    \"What did the author do after his time at Y Combinator?\"\n",
+    ")\n",
     "print(response)"
    ]
   }
diff --git a/docs/examples/llm/litellm.ipynb b/docs/examples/llm/litellm.ipynb
index c3f031af12067c9186f3c7c5fb406f9996533e65..d6aa070593df5d2009dc32ab1fbb1a8c543b1d6e 100755
--- a/docs/examples/llm/litellm.ipynb
+++ b/docs/examples/llm/litellm.ipynb
@@ -56,7 +56,9 @@
     "from llama_index.llms import ChatMessage, LiteLLM\n",
     "\n",
     "messages = [\n",
-    "    ChatMessage(role=\"system\", content=\"You are a pirate with a colorful personality\"),\n",
+    "    ChatMessage(\n",
+    "        role=\"system\", content=\"You are a pirate with a colorful personality\"\n",
+    "    ),\n",
     "    ChatMessage(role=\"user\", content=\"Tell me a story\"),\n",
     "]\n",
     "resp = LiteLLM(\"gpt-3.5-turbo\").chat(messages)"
@@ -166,7 +168,9 @@
     "from llama_index.llms import LiteLLM\n",
     "\n",
     "messages = [\n",
-    "    ChatMessage(role=\"system\", content=\"You are a pirate with a colorful personality\"),\n",
+    "    ChatMessage(\n",
+    "        role=\"system\", content=\"You are a pirate with a colorful personality\"\n",
+    "    ),\n",
     "    ChatMessage(role=\"user\", content=\"Tell me a story\"),\n",
     "]\n",
     "\n",
diff --git a/docs/examples/llm/llama_2.ipynb b/docs/examples/llm/llama_2.ipynb
index c41faabfa21f99570fd5e45e9ad342eb2cce6690..610ddd048ec33a77227655bafe53837eb9d97de9 100644
--- a/docs/examples/llm/llama_2.ipynb
+++ b/docs/examples/llm/llama_2.ipynb
@@ -140,7 +140,9 @@
     "from llama_index.llms import ChatMessage\n",
     "\n",
     "messages = [\n",
-    "    ChatMessage(role=\"system\", content=\"You are a pirate with a colorful personality\"),\n",
+    "    ChatMessage(\n",
+    "        role=\"system\", content=\"You are a pirate with a colorful personality\"\n",
+    "    ),\n",
     "    ChatMessage(role=\"user\", content=\"What is your name\"),\n",
     "]\n",
     "resp = llm.chat(messages)"
@@ -243,7 +245,9 @@
     "from llama_index.llms import ChatMessage\n",
     "\n",
     "messages = [\n",
-    "    ChatMessage(role=\"system\", content=\"You are a pirate with a colorful personality\"),\n",
+    "    ChatMessage(\n",
+    "        role=\"system\", content=\"You are a pirate with a colorful personality\"\n",
+    "    ),\n",
     "    ChatMessage(role=\"user\", content=\"What is your name\"),\n",
     "]\n",
     "resp = llm.stream_chat(messages)"
diff --git a/docs/examples/llm/llama_2_llama_cpp.ipynb b/docs/examples/llm/llama_2_llama_cpp.ipynb
index adcadf298b597376ddfdf93201f345f5af0bd7aa..18ad32abf3ce3056a03704fbb825ef6cb61b1f5b 100644
--- a/docs/examples/llm/llama_2_llama_cpp.ipynb
+++ b/docs/examples/llm/llama_2_llama_cpp.ipynb
@@ -40,7 +40,10 @@
     "    ServiceContext,\n",
     ")\n",
     "from llama_index.llms import LlamaCPP\n",
-    "from llama_index.llms.llama_utils import messages_to_prompt, completion_to_prompt"
+    "from llama_index.llms.llama_utils import (\n",
+    "    messages_to_prompt,\n",
+    "    completion_to_prompt,\n",
+    ")"
    ]
   },
   {
@@ -370,7 +373,9 @@
    "outputs": [],
    "source": [
     "# create vector store index\n",
-    "index = VectorStoreIndex.from_documents(documents, service_context=service_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, service_context=service_context\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/llm/llama_2_rap_battle.ipynb b/docs/examples/llm/llama_2_rap_battle.ipynb
index 711079ec668cdbfde1b357a5992ba8730469468f..3921fe038b2575c6856c5b3e811a9cbfdf200b21 100644
--- a/docs/examples/llm/llama_2_rap_battle.ipynb
+++ b/docs/examples/llm/llama_2_rap_battle.ipynb
@@ -64,14 +64,18 @@
     "    llm=llm_70b,\n",
     "    memory=ChatMemoryBuffer.from_defaults(llm=llm_70b),\n",
     "    prefix_messages=[\n",
-    "        ChatMessage(role=\"system\", content=\"You are a rapper with an ENTJ personality\")\n",
+    "        ChatMessage(\n",
+    "            role=\"system\", content=\"You are a rapper with an ENTJ personality\"\n",
+    "        )\n",
     "    ],\n",
     ")\n",
     "bot_13b = SimpleChatEngine(\n",
     "    llm=llm_13b,\n",
     "    memory=ChatMemoryBuffer.from_defaults(llm=llm_13b),\n",
     "    prefix_messages=[\n",
-    "        ChatMessage(role=\"system\", content=\"You are a rapper with an INFP personality\")\n",
+    "        ChatMessage(\n",
+    "            role=\"system\", content=\"You are a rapper with an INFP personality\"\n",
+    "        )\n",
     "    ],\n",
     ")"
    ]
diff --git a/docs/examples/llm/llama_api.ipynb b/docs/examples/llm/llama_api.ipynb
index eb87e3d988edd2a90839f2d1b59f267afdb12d6c..e5058fc4e24d306d3bee430927a3de5b43ba0afc 100644
--- a/docs/examples/llm/llama_api.ipynb
+++ b/docs/examples/llm/llama_api.ipynb
@@ -124,7 +124,9 @@
     "from llama_index.llms import ChatMessage\n",
     "\n",
     "messages = [\n",
-    "    ChatMessage(role=\"system\", content=\"You are a pirate with a colorful personality\"),\n",
+    "    ChatMessage(\n",
+    "        role=\"system\", content=\"You are a pirate with a colorful personality\"\n",
+    "    ),\n",
     "    ChatMessage(role=\"user\", content=\"What is your name\"),\n",
     "]\n",
     "resp = llm.chat(messages)"
diff --git a/docs/examples/llm/monsterapi.ipynb b/docs/examples/llm/monsterapi.ipynb
index 65b985c1f5ca316cd68695d6d8c1895745877412..d567595438c551eb943654df1f8840c7a0dd41cf 100644
--- a/docs/examples/llm/monsterapi.ipynb
+++ b/docs/examples/llm/monsterapi.ipynb
@@ -159,7 +159,10 @@
     "history_message = ChatMessage(\n",
     "    **{\n",
     "        \"role\": \"user\",\n",
-    "        \"content\": \"When asked 'who are you?' respond as 'I am qblocks llm model' everytime.\",\n",
+    "        \"content\": (\n",
+    "            \"When asked 'who are you?' respond as 'I am qblocks llm model'\"\n",
+    "            \" everytime.\"\n",
+    "        ),\n",
     "    }\n",
     ")\n",
     "current_message = ChatMessage(**{\"role\": \"user\", \"content\": \"Who are you?\"})\n",
@@ -271,7 +274,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "index = VectorStoreIndex.from_documents(documents, service_context=service_context)\n",
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, service_context=service_context\n",
+    ")\n",
     "query_engine = index.as_query_engine()"
    ]
   },
diff --git a/docs/examples/llm/ollama.ipynb b/docs/examples/llm/ollama.ipynb
index a795b0e437cc402bb886a161c9fa065cc487e633..d41bef205fda1770dc91d4b390f00db32b687c2f 100644
--- a/docs/examples/llm/ollama.ipynb
+++ b/docs/examples/llm/ollama.ipynb
@@ -115,7 +115,9 @@
     "from llama_index.llms import ChatMessage\n",
     "\n",
     "messages = [\n",
-    "    ChatMessage(role=\"system\", content=\"You are a pirate with a colorful personality\"),\n",
+    "    ChatMessage(\n",
+    "        role=\"system\", content=\"You are a pirate with a colorful personality\"\n",
+    "    ),\n",
     "    ChatMessage(role=\"user\", content=\"What is your name\"),\n",
     "]\n",
     "resp = llm.chat(messages)"
@@ -216,7 +218,9 @@
     "from llama_index.llms import ChatMessage\n",
     "\n",
     "messages = [\n",
-    "    ChatMessage(role=\"system\", content=\"You are a pirate with a colorful personality\"),\n",
+    "    ChatMessage(\n",
+    "        role=\"system\", content=\"You are a pirate with a colorful personality\"\n",
+    "    ),\n",
     "    ChatMessage(role=\"user\", content=\"What is your name\"),\n",
     "]\n",
     "resp = llm.stream_chat(messages)"
diff --git a/docs/examples/llm/openai.ipynb b/docs/examples/llm/openai.ipynb
index 195edc15938208cb0ea05fde63bc7f12edb18bdc..f4ba1088140738f932aed8c2adeadda03bb8d6ba 100644
--- a/docs/examples/llm/openai.ipynb
+++ b/docs/examples/llm/openai.ipynb
@@ -72,7 +72,9 @@
     "from llama_index.llms import ChatMessage, OpenAI\n",
     "\n",
     "messages = [\n",
-    "    ChatMessage(role=\"system\", content=\"You are a pirate with a colorful personality\"),\n",
+    "    ChatMessage(\n",
+    "        role=\"system\", content=\"You are a pirate with a colorful personality\"\n",
+    "    ),\n",
     "    ChatMessage(role=\"user\", content=\"What is your name\"),\n",
     "]\n",
     "resp = OpenAI().chat(messages)"
@@ -163,7 +165,9 @@
     "\n",
     "llm = OpenAI(stream=True)\n",
     "messages = [\n",
-    "    ChatMessage(role=\"system\", content=\"You are a pirate with a colorful personality\"),\n",
+    "    ChatMessage(\n",
+    "        role=\"system\", content=\"You are a pirate with a colorful personality\"\n",
+    "    ),\n",
     "    ChatMessage(role=\"user\", content=\"What is your name\"),\n",
     "]\n",
     "resp = llm.stream_chat(messages)"
@@ -246,7 +250,9 @@
    "outputs": [],
    "source": [
     "messages = [\n",
-    "    ChatMessage(role=\"system\", content=\"You are a pirate with a colorful personality\"),\n",
+    "    ChatMessage(\n",
+    "        role=\"system\", content=\"You are a pirate with a colorful personality\"\n",
+    "    ),\n",
     "    ChatMessage(role=\"user\", content=\"What is your name\"),\n",
     "]\n",
     "resp = llm.chat(messages)"
diff --git a/docs/examples/llm/palm.ipynb b/docs/examples/llm/palm.ipynb
index 2b6252258a381a00fe266f16bfc193558271ff84..df438c8928460f8876e785484f0b450d69a2c18c 100644
--- a/docs/examples/llm/palm.ipynb
+++ b/docs/examples/llm/palm.ipynb
@@ -95,7 +95,9 @@
    ],
    "source": [
     "models = [\n",
-    "    m for m in palm.list_models() if \"generateText\" in m.supported_generation_methods\n",
+    "    m\n",
+    "    for m in palm.list_models()\n",
+    "    if \"generateText\" in m.supported_generation_methods\n",
     "]\n",
     "model = models[0].name\n",
     "print(model)"
diff --git a/docs/examples/llm/predibase.ipynb b/docs/examples/llm/predibase.ipynb
index b598f96dc29d62c90c1722a0c49d2de756380e88..b4a6aacba9f4ce1f0cf737e7b47545cbcd76d9bc 100644
--- a/docs/examples/llm/predibase.ipynb
+++ b/docs/examples/llm/predibase.ipynb
@@ -58,7 +58,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "llm = PredibaseLLM(model_name=\"llama-2-13b\", temperature=0.3, max_new_tokens=512)\n",
+    "llm = PredibaseLLM(\n",
+    "    model_name=\"llama-2-13b\", temperature=0.3, max_new_tokens=512\n",
+    ")\n",
     "# You can query any HuggingFace or fine-tuned LLM that's hosted on Predibase"
    ]
   },
@@ -125,7 +127,10 @@
    "outputs": [],
    "source": [
     "llm = PredibaseLLM(\n",
-    "    model_name=\"llama-2-13b\", temperature=0.3, max_new_tokens=400, context_window=1024\n",
+    "    model_name=\"llama-2-13b\",\n",
+    "    temperature=0.3,\n",
+    "    max_new_tokens=400,\n",
+    "    context_window=1024,\n",
     ")\n",
     "service_context = ServiceContext.from_defaults(\n",
     "    chunk_size=1024, llm=llm, embed_model=\"local:BAAI/bge-small-en-v1.5\"\n",
@@ -147,7 +152,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "index = VectorStoreIndex.from_documents(documents, service_context=service_context)\n",
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, service_context=service_context\n",
+    ")\n",
     "query_engine = index.as_query_engine()\n",
     "response = query_engine.query(\"What did the author do growing up?\")"
    ]
diff --git a/docs/examples/llm/rungpt.ipynb b/docs/examples/llm/rungpt.ipynb
index c9ba34106788e1f55255ec0d5682cba5d81a78bb..e78721946b174b062d5f0b7be3b4bdf5a41dd3ba 100644
--- a/docs/examples/llm/rungpt.ipynb
+++ b/docs/examples/llm/rungpt.ipynb
@@ -101,9 +101,12 @@
     "        role=MessageRole.USER,\n",
     "        content=\"Now, I want you to do some math for me.\",\n",
     "    ),\n",
-    "    ChatMessage(role=MessageRole.ASSISTANT, content=\"Sure, I would like to help you.\"),\n",
     "    ChatMessage(\n",
-    "        role=MessageRole.USER, content=\"How many points determine a straight line?\"\n",
+    "        role=MessageRole.ASSISTANT, content=\"Sure, I would like to help you.\"\n",
+    "    ),\n",
+    "    ChatMessage(\n",
+    "        role=MessageRole.USER,\n",
+    "        content=\"How many points determine a straight line?\",\n",
     "    ),\n",
     "]\n",
     "llm = RunGptLLM()\n",
@@ -165,9 +168,12 @@
     "        role=MessageRole.USER,\n",
     "        content=\"Now, I want you to do some math for me.\",\n",
     "    ),\n",
-    "    ChatMessage(role=MessageRole.ASSISTANT, content=\"Sure, I would like to help you.\"),\n",
     "    ChatMessage(\n",
-    "        role=MessageRole.USER, content=\"How many points determine a straight line?\"\n",
+    "        role=MessageRole.ASSISTANT, content=\"Sure, I would like to help you.\"\n",
+    "    ),\n",
+    "    ChatMessage(\n",
+    "        role=MessageRole.USER,\n",
+    "        content=\"How many points determine a straight line?\",\n",
     "    ),\n",
     "]\n",
     "response = RunGptLLM().stream_chat(messages=messages)"
diff --git a/docs/examples/llm/vicuna.ipynb b/docs/examples/llm/vicuna.ipynb
index d0dd98152cbe389f2cc59cf474117b29e1b88b53..2cfb4d30f60104186dc9181f0e3ca436197fd5c6 100644
--- a/docs/examples/llm/vicuna.ipynb
+++ b/docs/examples/llm/vicuna.ipynb
@@ -137,7 +137,9 @@
     "from llama_index.llms import ChatMessage\n",
     "\n",
     "messages = [\n",
-    "    ChatMessage(role=\"system\", content=\"You are a pirate with a colorful personality\"),\n",
+    "    ChatMessage(\n",
+    "        role=\"system\", content=\"You are a pirate with a colorful personality\"\n",
+    "    ),\n",
     "    ChatMessage(role=\"user\", content=\"What is your name\"),\n",
     "]\n",
     "resp = llm.chat(messages)"
@@ -230,7 +232,9 @@
     "from llama_index.llms import ChatMessage\n",
     "\n",
     "messages = [\n",
-    "    ChatMessage(role=\"system\", content=\"You are a pirate with a colorful personality\"),\n",
+    "    ChatMessage(\n",
+    "        role=\"system\", content=\"You are a pirate with a colorful personality\"\n",
+    "    ),\n",
     "    ChatMessage(role=\"user\", content=\"What is your name\"),\n",
     "]\n",
     "resp = llm.stream_chat(messages)"
diff --git a/docs/examples/low_level/evaluation.ipynb b/docs/examples/low_level/evaluation.ipynb
index 80642383e646b6ba4bd859c26eda7c4ae16e414d..91dd55d4c973ccc40cf4bcb3b0f0b0e859dfaefe 100644
--- a/docs/examples/low_level/evaluation.ipynb
+++ b/docs/examples/low_level/evaluation.ipynb
@@ -202,7 +202,9 @@
     "    \"\"\"Generate answers for questions given context.\"\"\"\n",
     "    answers = []\n",
     "    for question in questions:\n",
-    "        fmt_qa_prompt = QA_PROMPT.format(context_str=context, query_str=question)\n",
+    "        fmt_qa_prompt = QA_PROMPT.format(\n",
+    "            context_str=context, query_str=question\n",
+    "        )\n",
     "        response_obj = llm.complete(fmt_qa_prompt)\n",
     "        answers.append(str(response_obj))\n",
     "    return answers"
@@ -264,9 +266,12 @@
     "        raw_output = chat_response.message.content\n",
     "        result_list = str(raw_output).strip().split(\"\\n\")\n",
     "        cleaned_questions = [\n",
-    "            re.sub(r\"^\\d+[\\).\\s]\", \"\", question).strip() for question in result_list\n",
+    "            re.sub(r\"^\\d+[\\).\\s]\", \"\", question).strip()\n",
+    "            for question in result_list\n",
     "        ]\n",
-    "        answers = generate_answers_for_questions(cleaned_questions, context_str, llm)\n",
+    "        answers = generate_answers_for_questions(\n",
+    "            cleaned_questions, context_str, llm\n",
+    "        )\n",
     "        cur_qa_pairs = list(zip(cleaned_questions, answers))\n",
     "        qa_pairs.extend(cur_qa_pairs)\n",
     "    return qa_pairs"
@@ -542,8 +547,14 @@
     "# query_str = \"What is the range of parameters for the large language models (LLMs) developed in this work?\"\n",
     "# reference_answer = \"The range of parameters for the large language models (LLMs) developed in this work is from 7 billion to 70 billion.\"\n",
     "\n",
-    "query_str = \"What is the specific name given to the fine-tuned LLMs optimized for dialogue use cases?\"\n",
-    "reference_answer = \"The specific name given to the fine-tuned LLMs optimized for dialogue use cases is Llama 2-Chat.\""
+    "query_str = (\n",
+    "    \"What is the specific name given to the fine-tuned LLMs optimized for\"\n",
+    "    \" dialogue use cases?\"\n",
+    ")\n",
+    "reference_answer = (\n",
+    "    \"The specific name given to the fine-tuned LLMs optimized for dialogue use\"\n",
+    "    \" cases is Llama 2-Chat.\"\n",
+    ")"
    ]
   },
   {
@@ -801,7 +812,11 @@
     "        response = query_engine.query(question)\n",
     "        generated_answer = str(response)\n",
     "        correctness_results = run_correctness_eval(\n",
-    "            query_str, reference_answer, generated_answer, llm=llm, threshold=4.0\n",
+    "            query_str,\n",
+    "            reference_answer,\n",
+    "            generated_answer,\n",
+    "            llm=llm,\n",
+    "            threshold=4.0,\n",
     "        )\n",
     "        faithfulness_results = run_faithfulness_eval(\n",
     "            generated_answer,\n",
diff --git a/docs/examples/low_level/ingestion.ipynb b/docs/examples/low_level/ingestion.ipynb
index 89b2cda7eeeb8a7f5ba1600554cee594f9d6d8e9..172621a23281fecc146daf0db7e4cf668c319300 100644
--- a/docs/examples/low_level/ingestion.ipynb
+++ b/docs/examples/low_level/ingestion.ipynb
@@ -85,7 +85,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "dotenv_path = \"env\"  # Google Colabs will not let you open a .env, but you can set\n",
+    "dotenv_path = (  # Google Colabs will not let you open a .env, but you can set\n",
+    "    \"env\"\n",
+    ")\n",
     "with open(dotenv_path, \"w\") as f:\n",
     "    f.write('PINECONE_API_KEY=\"<your api key>\"\\n')\n",
     "    f.write('PINECONE_ENVIRONMENT=\"gcp-starter\"\\n')\n",
@@ -185,7 +187,9 @@
    "outputs": [],
    "source": [
     "# dimensions are for text-embedding-ada-002\n",
-    "pinecone.create_index(index_name, dimension=1536, metric=\"euclidean\", pod_type=\"p1\")"
+    "pinecone.create_index(\n",
+    "    index_name, dimension=1536, metric=\"euclidean\", pod_type=\"p1\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/low_level/oss_ingestion_retrieval.ipynb b/docs/examples/low_level/oss_ingestion_retrieval.ipynb
index 4bb5da2752c54804c0f3e6920aee32a24cdca31a..1bf5b5f937cf1f39617cd4ad9c7f30648afbc68d 100644
--- a/docs/examples/low_level/oss_ingestion_retrieval.ipynb
+++ b/docs/examples/low_level/oss_ingestion_retrieval.ipynb
@@ -133,7 +133,9 @@
    "source": [
     "from llama_index import ServiceContext\n",
     "\n",
-    "service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)"
+    "service_context = ServiceContext.from_defaults(\n",
+    "    llm=llm, embed_model=embed_model\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/low_level/response_synthesis.ipynb b/docs/examples/low_level/response_synthesis.ipynb
index 72abd3b585718e6c88e5f4b0bfabeebe90eee139..2064a72b147f36ba20dd880bfa6284f3fc7dd8b8 100644
--- a/docs/examples/low_level/response_synthesis.ipynb
+++ b/docs/examples/low_level/response_synthesis.ipynb
@@ -112,7 +112,9 @@
    "outputs": [],
    "source": [
     "# dimensions are for text-embedding-ada-002\n",
-    "pinecone.create_index(\"quickstart\", dimension=1536, metric=\"euclidean\", pod_type=\"p1\")"
+    "pinecone.create_index(\n",
+    "    \"quickstart\", dimension=1536, metric=\"euclidean\", pod_type=\"p1\"\n",
+    ")"
    ]
   },
   {
@@ -202,7 +204,10 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "query_str = \"Can you tell me about results from RLHF using both model-based and human-based evaluation?\""
+    "query_str = (\n",
+    "    \"Can you tell me about results from RLHF using both model-based and\"\n",
+    "    \" human-based evaluation?\"\n",
+    ")"
    ]
   },
   {
@@ -285,7 +290,10 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "query_str = \"Can you tell me about results from RLHF using both model-based and human-based evaluation?\""
+    "query_str = (\n",
+    "    \"Can you tell me about results from RLHF using both model-based and\"\n",
+    "    \" human-based evaluation?\"\n",
+    ")"
    ]
   },
   {
@@ -307,7 +315,9 @@
    "source": [
     "def generate_response(retrieved_nodes, query_str, qa_prompt, llm):\n",
     "    context_str = \"\\n\\n\".join([r.get_content() for r in retrieved_nodes])\n",
-    "    fmt_qa_prompt = qa_prompt.format(context_str=context_str, query_str=query_str)\n",
+    "    fmt_qa_prompt = qa_prompt.format(\n",
+    "        context_str=context_str, query_str=query_str\n",
+    "    )\n",
     "    response = llm.complete(fmt_qa_prompt)\n",
     "    return str(response), fmt_qa_prompt"
    ]
@@ -319,7 +329,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "response, fmt_qa_prompt = generate_response(retrieved_nodes, query_str, qa_prompt, llm)"
+    "response, fmt_qa_prompt = generate_response(\n",
+    "    retrieved_nodes, query_str, qa_prompt, llm\n",
+    ")"
    ]
   },
   {
@@ -487,7 +499,9 @@
     }
    ],
    "source": [
-    "response, fmt_qa_prompt = generate_response(retrieved_nodes, query_str, qa_prompt, llm)\n",
+    "response, fmt_qa_prompt = generate_response(\n",
+    "    retrieved_nodes, query_str, qa_prompt, llm\n",
+    ")\n",
     "print(f\"Response (k=5): {response}\")"
    ]
   },
@@ -536,7 +550,9 @@
     "from llama_index.response.notebook_utils import display_source_node\n",
     "\n",
     "\n",
-    "def generate_response_cr(retrieved_nodes, query_str, qa_prompt, refine_prompt, llm):\n",
+    "def generate_response_cr(\n",
+    "    retrieved_nodes, query_str, qa_prompt, refine_prompt, llm\n",
+    "):\n",
     "    \"\"\"Generate a response using create and refine strategy.\n",
     "\n",
     "    The first node uses the 'QA' prompt.\n",
@@ -550,7 +566,9 @@
     "        display_source_node(node, source_length=2000)\n",
     "        context_str = node.get_content()\n",
     "        if idx == 0:\n",
-    "            fmt_prompt = qa_prompt.format(context_str=context_str, query_str=query_str)\n",
+    "            fmt_prompt = qa_prompt.format(\n",
+    "                context_str=context_str, query_str=query_str\n",
+    "            )\n",
     "        else:\n",
     "            fmt_prompt = refine_prompt.format(\n",
     "                context_str=context_str,\n",
@@ -649,7 +667,9 @@
     "    for idx in range(0, len(texts), num_children):\n",
     "        text_batch = texts[idx : idx + num_children]\n",
     "        context_str = \"\\n\\n\".join([t for t in text_batch])\n",
-    "        fmt_qa_prompt = qa_prompt.format(context_str=context_str, query_str=query_str)\n",
+    "        fmt_qa_prompt = qa_prompt.format(\n",
+    "            context_str=context_str, query_str=query_str\n",
+    "        )\n",
     "        combined_response = llm.complete(fmt_qa_prompt)\n",
     "        new_texts.append(str(combined_response))\n",
     "        cur_prompt_list.append(fmt_qa_prompt)\n",
@@ -662,7 +682,9 @@
     "        )\n",
     "\n",
     "\n",
-    "def generate_response_hs(retrieved_nodes, query_str, qa_prompt, llm, num_children=10):\n",
+    "def generate_response_hs(\n",
+    "    retrieved_nodes, query_str, qa_prompt, llm, num_children=10\n",
+    "):\n",
     "    \"\"\"Generate a response using hierarchical summarization strategy.\n",
     "\n",
     "    Combine num_children nodes hierarchically until we get one root node.\n",
@@ -672,7 +694,9 @@
     "    node_responses = []\n",
     "    for node in retrieved_nodes:\n",
     "        context_str = node.get_content()\n",
-    "        fmt_qa_prompt = qa_prompt.format(context_str=context_str, query_str=query_str)\n",
+    "        fmt_qa_prompt = qa_prompt.format(\n",
+    "            context_str=context_str, query_str=query_str\n",
+    "        )\n",
     "        node_response = llm.complete(fmt_qa_prompt)\n",
     "        node_responses.append(node_response)\n",
     "        fmt_prompts.append(fmt_qa_prompt)\n",
@@ -696,7 +720,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "response, fmt_prompts = generate_response_hs(retrieved_nodes, query_str, qa_prompt, llm)"
+    "response, fmt_prompts = generate_response_hs(\n",
+    "    retrieved_nodes, query_str, qa_prompt, llm\n",
+    ")"
    ]
   },
   {
@@ -774,7 +800,9 @@
     "    for idx in range(0, len(texts), num_children):\n",
     "        text_batch = texts[idx : idx + num_children]\n",
     "        context_str = \"\\n\\n\".join([t for t in text_batch])\n",
-    "        fmt_qa_prompt = qa_prompt.format(context_str=context_str, query_str=query_str)\n",
+    "        fmt_qa_prompt = qa_prompt.format(\n",
+    "            context_str=context_str, query_str=query_str\n",
+    "        )\n",
     "        fmt_prompts.append(fmt_qa_prompt)\n",
     "        cur_prompt_list.append(fmt_qa_prompt)\n",
     "\n",
@@ -802,7 +830,9 @@
     "    node_responses = []\n",
     "    for node in retrieved_nodes:\n",
     "        context_str = node.get_content()\n",
-    "        fmt_qa_prompt = qa_prompt.format(context_str=context_str, query_str=query_str)\n",
+    "        fmt_qa_prompt = qa_prompt.format(\n",
+    "            context_str=context_str, query_str=query_str\n",
+    "        )\n",
     "        fmt_prompts.append(fmt_qa_prompt)\n",
     "\n",
     "    tasks = [llm.acomplete(p) for p in fmt_prompts]\n",
diff --git a/docs/examples/low_level/retrieval.ipynb b/docs/examples/low_level/retrieval.ipynb
index a6877427b173bfc8a891ad7bf57c1b4d0052f5e7..09a624aa2aca0c1a0801d42eb92c5cfa0569a6ae 100644
--- a/docs/examples/low_level/retrieval.ipynb
+++ b/docs/examples/low_level/retrieval.ipynb
@@ -58,7 +58,9 @@
    "outputs": [],
    "source": [
     "# dimensions are for text-embedding-ada-002\n",
-    "pinecone.create_index(\"quickstart\", dimension=1536, metric=\"euclidean\", pod_type=\"p1\")"
+    "pinecone.create_index(\n",
+    "    \"quickstart\", dimension=1536, metric=\"euclidean\", pod_type=\"p1\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/low_level/router.ipynb b/docs/examples/low_level/router.ipynb
index 3094438353e5dc5b8a215d6edf0ede0891fac372..e02a2c51caeb2998fd0c2e9eedbb06d6ddc24381 100644
--- a/docs/examples/low_level/router.ipynb
+++ b/docs/examples/low_level/router.ipynb
@@ -49,7 +49,9 @@
     "\n",
     "\n",
     "def get_choice_str(choices):\n",
-    "    choices_str = \"\\n\\n\".join([f\"{idx+1}. {c}\" for idx, c in enumerate(choices)])\n",
+    "    choices_str = \"\\n\\n\".join(\n",
+    "        [f\"{idx+1}. {c}\" for idx, c in enumerate(choices)]\n",
+    "    )\n",
     "    return choices_str\n",
     "\n",
     "\n",
@@ -64,15 +66,12 @@
    "outputs": [],
    "source": [
     "router_prompt0 = PromptTemplate(\n",
-    "    \"Some choices are given below. It is provided in a numbered \"\n",
-    "    \"list (1 to {num_choices}), \"\n",
-    "    \"where each item in the list corresponds to a summary.\\n\"\n",
-    "    \"---------------------\\n\"\n",
-    "    \"{context_list}\"\n",
-    "    \"\\n---------------------\\n\"\n",
-    "    \"Using only the choices above and not prior knowledge, return the top choices \"\n",
-    "    \"(no more than {max_outputs}, but only select what is needed) that \"\n",
-    "    \"are most relevant to the question: '{query_str}'\\n\"\n",
+    "    \"Some choices are given below. It is provided in a numbered list (1 to\"\n",
+    "    \" {num_choices}), where each item in the list corresponds to a\"\n",
+    "    \" summary.\\n---------------------\\n{context_list}\\n---------------------\\nUsing\"\n",
+    "    \" only the choices above and not prior knowledge, return the top choices\"\n",
+    "    \" (no more than {max_outputs}, but only select what is needed) that are\"\n",
+    "    \" most relevant to the question: '{query_str}'\\n\"\n",
     ")"
    ]
   },
@@ -180,7 +179,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "query_str = \"Can you tell me more about the amount of Vitamin C in apples and oranges.\"\n",
+    "query_str = (\n",
+    "    \"Can you tell me more about the amount of Vitamin C in apples and oranges.\"\n",
+    ")\n",
     "fmt_prompt = get_formatted_prompt(query_str)\n",
     "response = llm.complete(fmt_prompt)"
    ]
@@ -450,7 +451,9 @@
     "from typing import List\n",
     "\n",
     "\n",
-    "def route_query(query_str: str, choices: List[str], output_parser: RouterOutputParser):\n",
+    "def route_query(\n",
+    "    query_str: str, choices: List[str], output_parser: RouterOutputParser\n",
+    "):\n",
     "    choices_str\n",
     "\n",
     "    fmt_base_prompt = router_prompt0.format(\n",
@@ -500,7 +503,7 @@
     "\n",
     "\n",
     "class Answer(BaseModel):\n",
-    "    \"Represents a single choice with a reason.\" \"\"\n",
+    "    \"Represents a single choice with a reason.\"\n",
     "    choice: int\n",
     "    reason: str\n",
     "\n",
@@ -723,7 +726,9 @@
     "vector_index = VectorStoreIndex.from_documents(\n",
     "    documents, service_context=service_context\n",
     ")\n",
-    "summary_index = SummaryIndex.from_documents(documents, service_context=service_context)"
+    "summary_index = SummaryIndex.from_documents(\n",
+    "    documents, service_context=service_context\n",
+    ")"
    ]
   },
   {
@@ -806,7 +811,9 @@
     "        else:\n",
     "            # if multiple choices are picked, we can pick a summarizer\n",
     "            response_strs = [str(r) for r in responses]\n",
-    "            result_response = self.summarizer.get_response(query_str, response_strs)\n",
+    "            result_response = self.summarizer.get_response(\n",
+    "                query_str, response_strs\n",
+    "            )\n",
     "            return result_response"
    ]
   },
@@ -818,7 +825,10 @@
    "outputs": [],
    "source": [
     "choices = [\n",
-    "    \"Useful for answering questions about specific sections of the Llama 2 paper\",\n",
+    "    (\n",
+    "        \"Useful for answering questions about specific sections of the Llama 2\"\n",
+    "        \" paper\"\n",
+    "    ),\n",
     "    \"Useful for questions that ask for a summary of the whole paper\",\n",
     "]\n",
     "\n",
diff --git a/docs/examples/low_level/vector_store.ipynb b/docs/examples/low_level/vector_store.ipynb
index 78c92e3a3a06d4ee21e524567069a1c56076722b..b25093c35f921369edbee86bd71e204087218509 100644
--- a/docs/examples/low_level/vector_store.ipynb
+++ b/docs/examples/low_level/vector_store.ipynb
@@ -677,7 +677,9 @@
     }
    ],
    "source": [
-    "query_obj = VectorStoreQuery(query_embedding=query_embedding, similarity_top_k=2)\n",
+    "query_obj = VectorStoreQuery(\n",
+    "    query_embedding=query_embedding, similarity_top_k=2\n",
+    ")\n",
     "\n",
     "query_result = vector_store.query(query_obj)\n",
     "for similarity, node in zip(query_result.similarities, query_result.nodes):\n",
diff --git a/docs/examples/metadata_extraction/EntityExtractionClimate.ipynb b/docs/examples/metadata_extraction/EntityExtractionClimate.ipynb
index 731e8b66838d15bc049eacc25a1e792b28227e45..135e2a033f87ebc2b0fd4ddcdfae02696a82600a 100644
--- a/docs/examples/metadata_extraction/EntityExtractionClimate.ipynb
+++ b/docs/examples/metadata_extraction/EntityExtractionClimate.ipynb
@@ -74,7 +74,9 @@
     "\n",
     "metadata_extractor = MetadataExtractor(extractors=[entity_extractor])\n",
     "\n",
-    "node_parser = SimpleNodeParser.from_defaults(metadata_extractor=metadata_extractor)"
+    "node_parser = SimpleNodeParser.from_defaults(\n",
+    "    metadata_extractor=metadata_extractor\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/metadata_extraction/MarvinMetadataExtractorDemo.ipynb b/docs/examples/metadata_extraction/MarvinMetadataExtractorDemo.ipynb
index 454db4bb6c56d05d83277895097683c924c6b3be..1517306f864d43e1de77e71fd5b45d9ece1b2573 100644
--- a/docs/examples/metadata_extraction/MarvinMetadataExtractorDemo.ipynb
+++ b/docs/examples/metadata_extraction/MarvinMetadataExtractorDemo.ipynb
@@ -90,7 +90,9 @@
     "@ai_model\n",
     "class SportsSupplement(BaseModel):\n",
     "    name: str = Field(..., description=\"The name of the sports supplement\")\n",
-    "    description: str = Field(..., description=\"A description of the sports supplement\")\n",
+    "    description: str = Field(\n",
+    "        ..., description=\"A description of the sports supplement\"\n",
+    "    )\n",
     "    pros_cons: str = Field(\n",
     "        ..., description=\"The pros and cons of the sports supplement\"\n",
     "    )"
@@ -110,7 +112,9 @@
     "# construct text splitter to split texts into chunks for processing\n",
     "# this takes a while to process, you can increase processing time by using larger chunk_size\n",
     "# file size is a factor too of course\n",
-    "text_splitter = TokenTextSplitter(separator=\" \", chunk_size=512, chunk_overlap=128)\n",
+    "text_splitter = TokenTextSplitter(\n",
+    "    separator=\" \", chunk_size=512, chunk_overlap=128\n",
+    ")\n",
     "\n",
     "# set the global service context object, avoiding passing service_context when building the index\n",
     "from llama_index import set_global_service_context\n",
diff --git a/docs/examples/metadata_extraction/MetadataExtractionSEC.ipynb b/docs/examples/metadata_extraction/MetadataExtractionSEC.ipynb
index 248a26e38591f0ed40739c59727d80f3af5fb4d9..08bafbe67e970e5930690c7bafaee220ffa8e609 100644
--- a/docs/examples/metadata_extraction/MetadataExtractionSEC.ipynb
+++ b/docs/examples/metadata_extraction/MetadataExtractionSEC.ipynb
@@ -86,16 +86,20 @@
     ")\n",
     "from llama_index.text_splitter import TokenTextSplitter\n",
     "\n",
-    "text_splitter = TokenTextSplitter(separator=\" \", chunk_size=512, chunk_overlap=128)\n",
+    "text_splitter = TokenTextSplitter(\n",
+    "    separator=\" \", chunk_size=512, chunk_overlap=128\n",
+    ")\n",
     "\n",
     "\n",
     "class CustomExtractor(MetadataFeatureExtractor):\n",
     "    def extract(self, nodes):\n",
     "        metadata_list = [\n",
     "            {\n",
-    "                \"custom\": node.metadata[\"document_title\"]\n",
-    "                + \"\\n\"\n",
-    "                + node.metadata[\"excerpt_keywords\"]\n",
+    "                \"custom\": (\n",
+    "                    node.metadata[\"document_title\"]\n",
+    "                    + \"\\n\"\n",
+    "                    + node.metadata[\"excerpt_keywords\"]\n",
+    "                )\n",
     "            }\n",
     "            for node in nodes\n",
     "        ]\n",
@@ -206,7 +210,9 @@
    "outputs": [],
    "source": [
     "# Note the uninformative document file name, which may be a common scenario in a production setting\n",
-    "lyft_docs = SimpleDirectoryReader(input_files=[\"data/10k-vFinal.pdf\"]).load_data()\n",
+    "lyft_docs = SimpleDirectoryReader(\n",
+    "    input_files=[\"data/10k-vFinal.pdf\"]\n",
+    ").load_data()\n",
     "lyft_front_pages = lyft_docs[0:3]\n",
     "lyft_content = lyft_docs[68:73]\n",
     "lyft_docs = lyft_front_pages + lyft_content"
@@ -265,7 +271,9 @@
     "from llama_index.question_gen.llm_generators import LLMQuestionGenerator\n",
     "from llama_index.question_gen.prompts import DEFAULT_SUB_QUESTION_PROMPT_TMPL\n",
     "\n",
-    "service_context = ServiceContext.from_defaults(llm=llm, node_parser=node_parser)\n",
+    "service_context = ServiceContext.from_defaults(\n",
+    "    llm=llm, node_parser=node_parser\n",
+    ")\n",
     "question_gen = LLMQuestionGenerator.from_defaults(\n",
     "    service_context=service_context,\n",
     "    prompt_template_str=\"\"\"\n",
@@ -317,9 +325,14 @@
     "nodes_no_metadata = deepcopy(uber_nodes) + deepcopy(lyft_nodes)\n",
     "for node in nodes_no_metadata:\n",
     "    node.metadata = {\n",
-    "        k: node.metadata[k] for k in node.metadata if k in [\"page_label\", \"file_name\"]\n",
+    "        k: node.metadata[k]\n",
+    "        for k in node.metadata\n",
+    "        if k in [\"page_label\", \"file_name\"]\n",
     "    }\n",
-    "print(\"LLM sees:\\n\", (nodes_no_metadata)[9].get_content(metadata_mode=MetadataMode.LLM))"
+    "print(\n",
+    "    \"LLM sees:\\n\",\n",
+    "    (nodes_no_metadata)[9].get_content(metadata_mode=MetadataMode.LLM),\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/metadata_extraction/MetadataExtraction_LLMSurvey.ipynb b/docs/examples/metadata_extraction/MetadataExtraction_LLMSurvey.ipynb
index 05cd39fb431f3d15fdc865968d9324176d8a64c4..aada9117ac6686220d6e6d5f27e901aa9f5b7216 100644
--- a/docs/examples/metadata_extraction/MetadataExtraction_LLMSurvey.ipynb
+++ b/docs/examples/metadata_extraction/MetadataExtraction_LLMSurvey.ipynb
@@ -119,7 +119,9 @@
     ")\n",
     "from llama_index.text_splitter import TokenTextSplitter\n",
     "\n",
-    "text_splitter = TokenTextSplitter(separator=\" \", chunk_size=256, chunk_overlap=128)\n",
+    "text_splitter = TokenTextSplitter(\n",
+    "    separator=\" \", chunk_size=256, chunk_overlap=128\n",
+    ")\n",
     "\n",
     "\n",
     "metadata_extractor_1 = MetadataExtractor(\n",
@@ -486,7 +488,10 @@
    "outputs": [],
    "source": [
     "from llama_index import VectorStoreIndex\n",
-    "from llama_index.response.notebook_utils import display_source_node, display_response"
+    "from llama_index.response.notebook_utils import (\n",
+    "    display_source_node,\n",
+    "    display_response,\n",
+    ")"
    ]
   },
   {
@@ -565,7 +570,10 @@
    ],
    "source": [
     "# query_str = \"In the original RAG paper, can you describe the two main approaches for generation and compare them?\"\n",
-    "query_str = \"Can you describe metrics for evaluating text generation quality, compare them, and tell me about their downsides\"\n",
+    "query_str = (\n",
+    "    \"Can you describe metrics for evaluating text generation quality, compare\"\n",
+    "    \" them, and tell me about their downsides\"\n",
+    ")\n",
     "\n",
     "response0 = query_engine0.query(query_str)\n",
     "response1 = query_engine1.query(query_str)\n",
@@ -670,7 +678,10 @@
    ],
    "source": [
     "# query_str = \"What are some reproducibility issues with the ROUGE metric? Give some details related to benchmarks and also describe other ROUGE issues. \"\n",
-    "query_str = \"Can you give a high-level overview of BERTScore/MoverScore + formulas if available?\"\n",
+    "query_str = (\n",
+    "    \"Can you give a high-level overview of BERTScore/MoverScore + formulas if\"\n",
+    "    \" available?\"\n",
+    ")\n",
     "\n",
     "response0 = query_engine0.query(query_str)\n",
     "response1 = query_engine1.query(query_str)\n",
diff --git a/docs/examples/metadata_extraction/PydanticExtractor.ipynb b/docs/examples/metadata_extraction/PydanticExtractor.ipynb
index 1595460040b746519e0568a235bb60236e93dedd..19314e7dd91994eefbc15cb6252bdad5da7664b9 100644
--- a/docs/examples/metadata_extraction/PydanticExtractor.ipynb
+++ b/docs/examples/metadata_extraction/PydanticExtractor.ipynb
@@ -83,11 +83,17 @@
     "class NodeMetadata(BaseModel):\n",
     "    \"\"\"Node metadata.\"\"\"\n",
     "\n",
-    "    entities: List[str] = Field(..., description=\"Unique entities in this text chunk.\")\n",
-    "    summary: str = Field(..., description=\"A concise summary of this text chunk.\")\n",
+    "    entities: List[str] = Field(\n",
+    "        ..., description=\"Unique entities in this text chunk.\"\n",
+    "    )\n",
+    "    summary: str = Field(\n",
+    "        ..., description=\"A concise summary of this text chunk.\"\n",
+    "    )\n",
     "    contains_number: bool = Field(\n",
     "        ...,\n",
-    "        description=\"Whether the text chunk contains any numbers (ints, floats, etc.)\",\n",
+    "        description=(\n",
+    "            \"Whether the text chunk contains any numbers (ints, floats, etc.)\"\n",
+    "        ),\n",
     "    )"
    ]
   },
diff --git a/docs/examples/node_postprocessor/CohereRerank.ipynb b/docs/examples/node_postprocessor/CohereRerank.ipynb
index 26c854f0c9cb9525ca17b519e4486b04b607d162..3465612fd05a87f1d6bb8cb2921d02b29482bc53 100644
--- a/docs/examples/node_postprocessor/CohereRerank.ipynb
+++ b/docs/examples/node_postprocessor/CohereRerank.ipynb
@@ -22,7 +22,11 @@
     }
    ],
    "source": [
-    "from llama_index import VectorStoreIndex, SimpleDirectoryReader, pprint_response"
+    "from llama_index import (\n",
+    "    VectorStoreIndex,\n",
+    "    SimpleDirectoryReader,\n",
+    "    pprint_response,\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/node_postprocessor/LLMReranker-Gatsby.ipynb b/docs/examples/node_postprocessor/LLMReranker-Gatsby.ipynb
index 9360f3d3e64ae44724da1788398e70229e8d1e79..34d74da07293250e0de45b23c2c719f20488ed5b 100644
--- a/docs/examples/node_postprocessor/LLMReranker-Gatsby.ipynb
+++ b/docs/examples/node_postprocessor/LLMReranker-Gatsby.ipynb
@@ -117,7 +117,9 @@
     }
    ],
    "source": [
-    "index = VectorStoreIndex.from_documents(documents, service_context=service_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, service_context=service_context\n",
+    ")"
    ]
   },
   {
@@ -168,9 +170,13 @@
     "    if with_reranker:\n",
     "        # configure reranker\n",
     "        reranker = LLMRerank(\n",
-    "            choice_batch_size=5, top_n=reranker_top_n, service_context=service_context\n",
+    "            choice_batch_size=5,\n",
+    "            top_n=reranker_top_n,\n",
+    "            service_context=service_context,\n",
+    "        )\n",
+    "        retrieved_nodes = reranker.postprocess_nodes(\n",
+    "            retrieved_nodes, query_bundle\n",
     "        )\n",
-    "        retrieved_nodes = reranker.postprocess_nodes(retrieved_nodes, query_bundle)\n",
     "\n",
     "    return retrieved_nodes\n",
     "\n",
@@ -207,7 +213,9 @@
    ],
    "source": [
     "new_nodes = get_retrieved_nodes(\n",
-    "    \"Who was driving the car that hit Myrtle?\", vector_top_k=3, with_reranker=False\n",
+    "    \"Who was driving the car that hit Myrtle?\",\n",
+    "    vector_top_k=3,\n",
+    "    with_reranker=False,\n",
     ")"
    ]
   },
@@ -595,7 +603,9 @@
    "outputs": [],
    "source": [
     "query_engine = index.as_query_engine(\n",
-    "    similarity_top_k=10, node_postprocessors=[reranker], response_mode=\"tree_summarize\"\n",
+    "    similarity_top_k=10,\n",
+    "    node_postprocessors=[reranker],\n",
+    "    response_mode=\"tree_summarize\",\n",
     ")\n",
     "response = query_engine.query(\n",
     "    \"What did the author do during his time at Y Combinator?\",\n",
@@ -609,7 +619,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "query_engine = index.as_query_engine(similarity_top_k=3, response_mode=\"tree_summarize\")\n",
+    "query_engine = index.as_query_engine(\n",
+    "    similarity_top_k=3, response_mode=\"tree_summarize\"\n",
+    ")\n",
     "response = query_engine.query(\n",
     "    \"What did the author do during his time at Y Combinator?\",\n",
     ")"
diff --git a/docs/examples/node_postprocessor/LLMReranker-Lyft-10k.ipynb b/docs/examples/node_postprocessor/LLMReranker-Lyft-10k.ipynb
index 93c53e1e8c90002b577d97ecc7ff5c1462ac7367..62559756180dadd8e02082c97842aaa7c0148e15 100644
--- a/docs/examples/node_postprocessor/LLMReranker-Lyft-10k.ipynb
+++ b/docs/examples/node_postprocessor/LLMReranker-Lyft-10k.ipynb
@@ -109,7 +109,9 @@
     }
    ],
    "source": [
-    "index = VectorStoreIndex.from_documents(documents, service_context=service_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, service_context=service_context\n",
+    ")"
    ]
   },
   {
@@ -161,9 +163,13 @@
     "    if with_reranker:\n",
     "        # configure reranker\n",
     "        reranker = LLMRerank(\n",
-    "            choice_batch_size=5, top_n=reranker_top_n, service_context=service_context\n",
+    "            choice_batch_size=5,\n",
+    "            top_n=reranker_top_n,\n",
+    "            service_context=service_context,\n",
+    "        )\n",
+    "        retrieved_nodes = reranker.postprocess_nodes(\n",
+    "            retrieved_nodes, query_bundle\n",
     "        )\n",
-    "        retrieved_nodes = reranker.postprocess_nodes(retrieved_nodes, query_bundle)\n",
     "\n",
     "    return retrieved_nodes\n",
     "\n",
diff --git a/docs/examples/node_postprocessor/LongContextReorder.ipynb b/docs/examples/node_postprocessor/LongContextReorder.ipynb
index d7e549ae5aa0bee287d0c0e9014189b3f273b470..dc56326e1291ae15569923de21fa22cc736cbc86 100644
--- a/docs/examples/node_postprocessor/LongContextReorder.ipynb
+++ b/docs/examples/node_postprocessor/LongContextReorder.ipynb
@@ -50,7 +50,9 @@
     "from llama_index.llms import OpenAI\n",
     "\n",
     "llm = OpenAI(model=\"gpt-3.5-turbo-instruct\", temperature=0.1)\n",
-    "ctx = ServiceContext.from_defaults(llm=llm, embed_model=\"local:BAAI/bge-base-en-v1.5\")"
+    "ctx = ServiceContext.from_defaults(\n",
+    "    llm=llm, embed_model=\"local:BAAI/bge-base-en-v1.5\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/node_postprocessor/MetadataReplacementDemo.ipynb b/docs/examples/node_postprocessor/MetadataReplacementDemo.ipynb
index 29f3ba319a476e17c77d1fff159c0ad0f40675f6..dc053c1875c92e732ee77bf38d0f3e63a82b6880 100644
--- a/docs/examples/node_postprocessor/MetadataReplacementDemo.ipynb
+++ b/docs/examples/node_postprocessor/MetadataReplacementDemo.ipynb
@@ -228,7 +228,9 @@
     "        MetadataReplacementPostProcessor(target_metadata_key=\"window\")\n",
     "    ],\n",
     ")\n",
-    "window_response = query_engine.query(\"What are the concerns surrounding the AMOC?\")\n",
+    "window_response = query_engine.query(\n",
+    "    \"What are the concerns surrounding the AMOC?\"\n",
+    ")\n",
     "print(window_response)"
    ]
   },
@@ -313,7 +315,9 @@
    ],
    "source": [
     "query_engine = base_index.as_query_engine(similarity_top_k=2)\n",
-    "vector_response = query_engine.query(\"What are the concerns surrounding the AMOC?\")\n",
+    "vector_response = query_engine.query(\n",
+    "    \"What are the concerns surrounding the AMOC?\"\n",
+    ")\n",
     "print(vector_response)"
    ]
   },
@@ -339,7 +343,9 @@
    ],
    "source": [
     "query_engine = base_index.as_query_engine(similarity_top_k=5)\n",
-    "vector_response = query_engine.query(\"What are the concerns surrounding the AMOC?\")\n",
+    "vector_response = query_engine.query(\n",
+    "    \"What are the concerns surrounding the AMOC?\"\n",
+    ")\n",
     "print(vector_response)"
    ]
   },
@@ -695,7 +701,9 @@
     "base_pred_responses = get_responses(\n",
     "    eval_qs[:max_samples], base_query_engine, show_progress=True\n",
     ")\n",
-    "pred_responses = get_responses(eval_qs[:max_samples], query_engine, show_progress=True)\n",
+    "pred_responses = get_responses(\n",
+    "    eval_qs[:max_samples], query_engine, show_progress=True\n",
+    ")\n",
     "\n",
     "pred_response_strs = [str(p) for p in pred_responses]\n",
     "base_pred_response_strs = [str(p) for p in base_pred_responses]"
diff --git a/docs/examples/node_postprocessor/PII.ipynb b/docs/examples/node_postprocessor/PII.ipynb
index 446d0a7a556488a30e6ce19411b65e5e9938da36..f32aa7ef08b698f65c385dae85a184ad8c6bb8a4 100644
--- a/docs/examples/node_postprocessor/PII.ipynb
+++ b/docs/examples/node_postprocessor/PII.ipynb
@@ -295,7 +295,9 @@
     }
    ],
    "source": [
-    "response = index.as_query_engine().query(\"What address was the statement mailed to?\")\n",
+    "response = index.as_query_engine().query(\n",
+    "    \"What address was the statement mailed to?\"\n",
+    ")\n",
     "print(str(response))"
    ]
   }
diff --git a/docs/examples/node_postprocessor/PrevNextPostprocessorDemo.ipynb b/docs/examples/node_postprocessor/PrevNextPostprocessorDemo.ipynb
index a6ac2948093d68f557495ec96239a0b02dd4e8d3..043cb3e733e08c7a6dcdb3060ab9e8e371107c4e 100644
--- a/docs/examples/node_postprocessor/PrevNextPostprocessorDemo.ipynb
+++ b/docs/examples/node_postprocessor/PrevNextPostprocessorDemo.ipynb
@@ -141,7 +141,9 @@
    "outputs": [],
    "source": [
     "# Try querying index without node postprocessor\n",
-    "query_engine = index.as_query_engine(similarity_top_k=1, response_mode=\"tree_summarize\")\n",
+    "query_engine = index.as_query_engine(\n",
+    "    similarity_top_k=1, response_mode=\"tree_summarize\"\n",
+    ")\n",
     "response = query_engine.query(\n",
     "    \"What did the author do after handing off Y Combinator to Sam Altman?\",\n",
     ")"
@@ -174,7 +176,9 @@
    "outputs": [],
    "source": [
     "# Try querying index without node postprocessor and higher top-k\n",
-    "query_engine = index.as_query_engine(similarity_top_k=3, response_mode=\"tree_summarize\")\n",
+    "query_engine = index.as_query_engine(\n",
+    "    similarity_top_k=3, response_mode=\"tree_summarize\"\n",
+    ")\n",
     "response = query_engine.query(\n",
     "    \"What did the author do after handing off Y Combinator to Sam Altman?\",\n",
     ")"
@@ -215,7 +219,10 @@
    "outputs": [],
    "source": [
     "node_postprocessor = AutoPrevNextNodePostprocessor(\n",
-    "    docstore=docstore, num_nodes=3, service_context=service_context, verbose=True\n",
+    "    docstore=docstore,\n",
+    "    num_nodes=3,\n",
+    "    service_context=service_context,\n",
+    "    verbose=True,\n",
     ")"
    ]
   },
diff --git a/docs/examples/node_postprocessor/RecencyPostprocessorDemo.ipynb b/docs/examples/node_postprocessor/RecencyPostprocessorDemo.ipynb
index 729b99ac16e1308c152d86eec504c10f32406e2c..8b13fe6df585a1104cff2f7e137ff28725de84b4 100644
--- a/docs/examples/node_postprocessor/RecencyPostprocessorDemo.ipynb
+++ b/docs/examples/node_postprocessor/RecencyPostprocessorDemo.ipynb
@@ -162,7 +162,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "node_postprocessor_emb = EmbeddingRecencyPostprocessor(service_context=service_context)"
+    "node_postprocessor_emb = EmbeddingRecencyPostprocessor(\n",
+    "    service_context=service_context\n",
+    ")"
    ]
   },
   {
@@ -195,7 +197,8 @@
     "    similarity_top_k=3,\n",
     ")\n",
     "response = query_engine.query(\n",
-    "    \"How much did the author raise in seed funding from Idelle's husband (Julian) for Viaweb?\",\n",
+    "    \"How much did the author raise in seed funding from Idelle's husband\"\n",
+    "    \" (Julian) for Viaweb?\",\n",
     ")"
    ]
   },
@@ -212,7 +215,8 @@
     "    similarity_top_k=3, node_postprocessors=[node_postprocessor]\n",
     ")\n",
     "response = query_engine.query(\n",
-    "    \"How much did the author raise in seed funding from Idelle's husband (Julian) for Viaweb?\",\n",
+    "    \"How much did the author raise in seed funding from Idelle's husband\"\n",
+    "    \" (Julian) for Viaweb?\",\n",
     ")"
    ]
   },
@@ -238,7 +242,8 @@
     "    similarity_top_k=3, node_postprocessors=[node_postprocessor_emb]\n",
     ")\n",
     "response = query_engine.query(\n",
-    "    \"How much did the author raise in seed funding from Idelle's husband (Julian) for Viaweb?\",\n",
+    "    \"How much did the author raise in seed funding from Idelle's husband\"\n",
+    "    \" (Julian) for Viaweb?\",\n",
     ")"
    ]
   },
@@ -270,7 +275,10 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "query_str = \"How much did the author raise in seed funding from Idelle's husband (Julian) for Viaweb?\""
+    "query_str = (\n",
+    "    \"How much did the author raise in seed funding from Idelle's husband\"\n",
+    "    \" (Julian) for Viaweb?\"\n",
+    ")"
    ]
   },
   {
@@ -289,7 +297,9 @@
     }
    ],
    "source": [
-    "query_engine = index.as_query_engine(similarity_top_k=3, response_mode=\"no_text\")\n",
+    "query_engine = index.as_query_engine(\n",
+    "    similarity_top_k=3, response_mode=\"no_text\"\n",
+    ")\n",
     "init_response = query_engine.query(\n",
     "    query_str,\n",
     ")\n",
@@ -315,7 +325,9 @@
    ],
    "source": [
     "summary_index = SummaryIndex(resp_nodes)\n",
-    "query_engine = summary_index.as_query_engine(node_postprocessors=[node_postprocessor])\n",
+    "query_engine = summary_index.as_query_engine(\n",
+    "    node_postprocessors=[node_postprocessor]\n",
+    ")\n",
     "response = query_engine.query(query_str)"
    ]
   }
diff --git a/docs/examples/node_postprocessor/SentenceTransformerRerank.ipynb b/docs/examples/node_postprocessor/SentenceTransformerRerank.ipynb
index cc5a7fcdd355ab0cbce322d44a00427af675f91a..d08ae94a78f5dbb39a08b5e217997aa00ab6a806 100644
--- a/docs/examples/node_postprocessor/SentenceTransformerRerank.ipynb
+++ b/docs/examples/node_postprocessor/SentenceTransformerRerank.ipynb
@@ -111,7 +111,9 @@
     }
    ],
    "source": [
-    "query_engine = index.as_query_engine(similarity_top_k=10, node_postprocessors=[rerank])\n",
+    "query_engine = index.as_query_engine(\n",
+    "    similarity_top_k=10, node_postprocessors=[rerank]\n",
+    ")\n",
     "\n",
     "now = time()\n",
     "response = query_engine.query(\n",
diff --git a/docs/examples/node_postprocessor/TimeWeightedPostprocessorDemo.ipynb b/docs/examples/node_postprocessor/TimeWeightedPostprocessorDemo.ipynb
index 0d764eb7eb9fc147d75f663c0217fc6990f7b478..4cdf065559452a353e937c11e62719952e0b9b30 100644
--- a/docs/examples/node_postprocessor/TimeWeightedPostprocessorDemo.ipynb
+++ b/docs/examples/node_postprocessor/TimeWeightedPostprocessorDemo.ipynb
@@ -171,7 +171,8 @@
     "    similarity_top_k=3,\n",
     ")\n",
     "response = query_engine.query(\n",
-    "    \"How much did the author raise in seed funding from Idelle's husband (Julian) for Viaweb?\",\n",
+    "    \"How much did the author raise in seed funding from Idelle's husband\"\n",
+    "    \" (Julian) for Viaweb?\",\n",
     ")"
    ]
   },
@@ -211,7 +212,8 @@
     "    similarity_top_k=3, node_postprocessors=[node_postprocessor]\n",
     ")\n",
     "response = query_engine.query(\n",
-    "    \"How much did the author raise in seed funding from Idelle's husband (Julian) for Viaweb?\",\n",
+    "    \"How much did the author raise in seed funding from Idelle's husband\"\n",
+    "    \" (Julian) for Viaweb?\",\n",
     ")"
    ]
   },
@@ -266,7 +268,10 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "query_str = \"How much did the author raise in seed funding from Idelle's husband (Julian) for Viaweb?\""
+    "query_str = (\n",
+    "    \"How much did the author raise in seed funding from Idelle's husband\"\n",
+    "    \" (Julian) for Viaweb?\"\n",
+    ")"
    ]
   },
   {
@@ -276,7 +281,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "query_engine = index.as_query_engine(similarity_top_k=3, response_mode=\"no_text\")\n",
+    "query_engine = index.as_query_engine(\n",
+    "    similarity_top_k=3, response_mode=\"no_text\"\n",
+    ")\n",
     "init_response = query_engine.query(\n",
     "    query_str,\n",
     ")\n",
diff --git a/docs/examples/output_parsing/LangchainOutputParserDemo.ipynb b/docs/examples/output_parsing/LangchainOutputParserDemo.ipynb
index 12225bfca35ef833c922cd33182e4d42cbfeca0a..dd679c232a4452a37ce15ee85bbbb6aafa6398e8 100644
--- a/docs/examples/output_parsing/LangchainOutputParserDemo.ipynb
+++ b/docs/examples/output_parsing/LangchainOutputParserDemo.ipynb
@@ -128,10 +128,13 @@
     "response_schemas = [\n",
     "    ResponseSchema(\n",
     "        name=\"Education\",\n",
-    "        description=\"Describes the author's educational experience/background.\",\n",
+    "        description=(\n",
+    "            \"Describes the author's educational experience/background.\"\n",
+    "        ),\n",
     "    ),\n",
     "    ResponseSchema(\n",
-    "        name=\"Work\", description=\"Describes the author's work experience/background.\"\n",
+    "        name=\"Work\",\n",
+    "        description=\"Describes the author's work experience/background.\",\n",
     "    ),\n",
     "]"
    ]
@@ -143,7 +146,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "lc_output_parser = StructuredOutputParser.from_response_schemas(response_schemas)\n",
+    "lc_output_parser = StructuredOutputParser.from_response_schemas(\n",
+    "    response_schemas\n",
+    ")\n",
     "output_parser = LangchainOutputParser(lc_output_parser)"
    ]
   },
diff --git a/docs/examples/output_parsing/df_program.ipynb b/docs/examples/output_parsing/df_program.ipynb
index 1fa63f94dbc982f09f8e8698e5bca30b54a365c9..c1c3d5ebababf5e460ef353aa601902f6f66fa68 100644
--- a/docs/examples/output_parsing/df_program.ipynb
+++ b/docs/examples/output_parsing/df_program.ipynb
@@ -67,8 +67,9 @@
     "    output_cls=DataFrame,\n",
     "    llm=OpenAI(temperature=0, model=\"gpt-4-0613\"),\n",
     "    prompt_template_str=(\n",
-    "        \"Please extract the following query into a structured data according to: {input_str}.\"\n",
-    "        \"Please extract both the set of column names and a set of rows.\"\n",
+    "        \"Please extract the following query into a structured data according\"\n",
+    "        \" to: {input_str}.Please extract both the set of column names and a\"\n",
+    "        \" set of rows.\"\n",
     "    ),\n",
     "    verbose=True,\n",
     ")"
@@ -157,9 +158,10 @@
     "    output_cls=DataFrameRowsOnly,\n",
     "    llm=OpenAI(temperature=0, model=\"gpt-4-0613\"),\n",
     "    prompt_template_str=(\n",
-    "        \"Please extract the following text into a structured data: {input_str}. \"\n",
-    "        \"The column names are the following: ['Name', 'Age', 'City', 'Favorite Sport']. \"\n",
-    "        \"Do not specify additional parameters that are not in the function schema. \"\n",
+    "        \"Please extract the following text into a structured data:\"\n",
+    "        \" {input_str}. The column names are the following: ['Name', 'Age',\"\n",
+    "        \" 'City', 'Favorite Sport']. Do not specify additional parameters that\"\n",
+    "        \" are not in the function schema. \"\n",
     "    ),\n",
     "    verbose=True,\n",
     ")"
@@ -234,7 +236,11 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.program import OpenAIPydanticProgram, DFFullProgram, DFRowsProgram\n",
+    "from llama_index.program import (\n",
+    "    OpenAIPydanticProgram,\n",
+    "    DFFullProgram,\n",
+    "    DFRowsProgram,\n",
+    ")\n",
     "import pandas as pd\n",
     "\n",
     "# initialize empty df\n",
diff --git a/docs/examples/output_parsing/directory.py b/docs/examples/output_parsing/directory.py
index 81adf4c453671ba73e9a4d8b8be08d25d122dec6..3f7995fda6b68a8d397f13275ffe150bd6750f7e 100644
--- a/docs/examples/output_parsing/directory.py
+++ b/docs/examples/output_parsing/directory.py
@@ -26,11 +26,17 @@ class Node(BaseModel):
     name: str = Field(..., description="Name of the folder")
     children: List["Node"] = Field(
         default_factory=list,
-        description="List of children nodes, only applicable for folders, files cannot have children",
+        description=(
+            "List of children nodes, only applicable for folders, files cannot"
+            " have children"
+        ),
     )
     node_type: NodeType = Field(
         default=NodeType.FILE,
-        description="Either a file or folder, use the name to determine which it could be",
+        description=(
+            "Either a file or folder, use the name to determine which it"
+            " could be"
+        ),
     )
 
 
diff --git a/docs/examples/output_parsing/evaporate_program.ipynb b/docs/examples/output_parsing/evaporate_program.ipynb
index 5055457580579756f16e3c0482f95cae1502f488..5adf24d4b26c5c03ad59d90615528ba985f0cbad 100644
--- a/docs/examples/output_parsing/evaporate_program.ipynb
+++ b/docs/examples/output_parsing/evaporate_program.ipynb
@@ -509,7 +509,8 @@
     "from llama_index.program.predefined import MultiValueEvaporateProgram\n",
     "\n",
     "program = MultiValueEvaporateProgram.from_defaults(\n",
-    "    fields_to_extract=[\"countries\", \"medal_count\"], service_context=service_context\n",
+    "    fields_to_extract=[\"countries\", \"medal_count\"],\n",
+    "    service_context=service_context,\n",
     ")"
    ]
   },
diff --git a/docs/examples/output_parsing/guidance_pydantic_program.ipynb b/docs/examples/output_parsing/guidance_pydantic_program.ipynb
index 02051c2da06e636750ca30994c4a1f661971010e..67f297999329285956fd3ea130b8d687c587ef4f 100644
--- a/docs/examples/output_parsing/guidance_pydantic_program.ipynb
+++ b/docs/examples/output_parsing/guidance_pydantic_program.ipynb
@@ -77,7 +77,10 @@
    "source": [
     "program = GuidancePydanticProgram(\n",
     "    output_cls=Album,\n",
-    "    prompt_template_str=\"Generate an example album, with an artist and a list of songs. Using the movie {{movie_name}} as inspiration\",\n",
+    "    prompt_template_str=(\n",
+    "        \"Generate an example album, with an artist and a list of songs. Using\"\n",
+    "        \" the movie {{movie_name}} as inspiration\"\n",
+    "    ),\n",
     "    guidance_llm=OpenAI(\"text-davinci-003\"),\n",
     "    verbose=True,\n",
     ")"
diff --git a/docs/examples/output_parsing/guidance_sub_question.ipynb b/docs/examples/output_parsing/guidance_sub_question.ipynb
index fc501bbf8df7305cce82809a839b5823e8676531..427114d6e75eb64e8ef6195ecfd6e18abb864cba 100644
--- a/docs/examples/output_parsing/guidance_sub_question.ipynb
+++ b/docs/examples/output_parsing/guidance_sub_question.ipynb
@@ -53,7 +53,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.question_gen.guidance_generator import GuidanceQuestionGenerator\n",
+    "from llama_index.question_gen.guidance_generator import (\n",
+    "    GuidanceQuestionGenerator,\n",
+    ")\n",
     "from guidance.llms import OpenAI as GuidanceOpenAI"
    ]
   },
@@ -191,8 +193,12 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "lyft_docs = SimpleDirectoryReader(input_files=[\"../data/10k/lyft_2021.pdf\"]).load_data()\n",
-    "uber_docs = SimpleDirectoryReader(input_files=[\"../data/10k/uber_2021.pdf\"]).load_data()"
+    "lyft_docs = SimpleDirectoryReader(\n",
+    "    input_files=[\"../data/10k/lyft_2021.pdf\"]\n",
+    ").load_data()\n",
+    "uber_docs = SimpleDirectoryReader(\n",
+    "    input_files=[\"../data/10k/uber_2021.pdf\"]\n",
+    ").load_data()"
    ]
   },
   {
@@ -238,14 +244,18 @@
     "        query_engine=lyft_engine,\n",
     "        metadata=ToolMetadata(\n",
     "            name=\"lyft_10k\",\n",
-    "            description=\"Provides information about Lyft financials for year 2021\",\n",
+    "            description=(\n",
+    "                \"Provides information about Lyft financials for year 2021\"\n",
+    "            ),\n",
     "        ),\n",
     "    ),\n",
     "    QueryEngineTool(\n",
     "        query_engine=uber_engine,\n",
     "        metadata=ToolMetadata(\n",
     "            name=\"uber_10k\",\n",
-    "            description=\"Provides information about Uber financials for year 2021\",\n",
+    "            description=(\n",
+    "                \"Provides information about Uber financials for year 2021\"\n",
+    "            ),\n",
     "        ),\n",
     "    ),\n",
     "]\n",
@@ -286,7 +296,8 @@
    ],
    "source": [
     "response = s_engine.query(\n",
-    "    \"Compare and contrast the customer segments and geographies that grew the fastest\"\n",
+    "    \"Compare and contrast the customer segments and geographies that grew the\"\n",
+    "    \" fastest\"\n",
     ")"
    ]
   },
diff --git a/docs/examples/output_parsing/openai_sub_question.ipynb b/docs/examples/output_parsing/openai_sub_question.ipynb
index d70ad66f3d835c7acc27143fda20e9628c53c59d..dd241b283a3f46fb27ddb308ca957270bfbe8fda 100644
--- a/docs/examples/output_parsing/openai_sub_question.ipynb
+++ b/docs/examples/output_parsing/openai_sub_question.ipynb
@@ -92,27 +92,45 @@
     "tools = [\n",
     "    ToolMetadata(\n",
     "        name=\"march_22\",\n",
-    "        description=\"Provides information about Uber quarterly financials ending March 2022\",\n",
+    "        description=(\n",
+    "            \"Provides information about Uber quarterly financials ending March\"\n",
+    "            \" 2022\"\n",
+    "        ),\n",
     "    ),\n",
     "    ToolMetadata(\n",
     "        name=\"june_22\",\n",
-    "        description=\"Provides information about Uber quarterly financials ending June 2022\",\n",
+    "        description=(\n",
+    "            \"Provides information about Uber quarterly financials ending June\"\n",
+    "            \" 2022\"\n",
+    "        ),\n",
     "    ),\n",
     "    ToolMetadata(\n",
     "        name=\"sept_22\",\n",
-    "        description=\"Provides information about Uber quarterly financials ending September 2022\",\n",
+    "        description=(\n",
+    "            \"Provides information about Uber quarterly financials ending\"\n",
+    "            \" September 2022\"\n",
+    "        ),\n",
     "    ),\n",
     "    ToolMetadata(\n",
     "        name=\"sept_21\",\n",
-    "        description=\"Provides information about Uber quarterly financials ending September 2022\",\n",
+    "        description=(\n",
+    "            \"Provides information about Uber quarterly financials ending\"\n",
+    "            \" September 2022\"\n",
+    "        ),\n",
     "    ),\n",
     "    ToolMetadata(\n",
     "        name=\"june_21\",\n",
-    "        description=\"Provides information about Uber quarterly financials ending June 2022\",\n",
+    "        description=(\n",
+    "            \"Provides information about Uber quarterly financials ending June\"\n",
+    "            \" 2022\"\n",
+    "        ),\n",
     "    ),\n",
     "    ToolMetadata(\n",
     "        name=\"march_21\",\n",
-    "        description=\"Provides information about Uber quarterly financials ending March 2022\",\n",
+    "        description=(\n",
+    "            \"Provides information about Uber quarterly financials ending March\"\n",
+    "            \" 2022\"\n",
+    "        ),\n",
     "    ),\n",
     "]"
    ]
@@ -127,7 +145,8 @@
     "sub_questions = question_gen.generate(\n",
     "    tools=tools,\n",
     "    query=QueryBundle(\n",
-    "        \"Compare the fastest growing sectors for Uber in the first two quarters of 2022\"\n",
+    "        \"Compare the fastest growing sectors for Uber in the first two\"\n",
+    "        \" quarters of 2022\"\n",
     "    ),\n",
     ")"
    ]
diff --git a/docs/examples/query_engine/CustomRetrievers.ipynb b/docs/examples/query_engine/CustomRetrievers.ipynb
index 9ccf401c975e2a32f71bda6c96d69a26e796cb6b..8b8811671b1d29126fe3b0d482e4abcacc9d292d 100644
--- a/docs/examples/query_engine/CustomRetrievers.ipynb
+++ b/docs/examples/query_engine/CustomRetrievers.ipynb
@@ -306,7 +306,9 @@
     }
    ],
    "source": [
-    "response = custom_query_engine.query(\"What did the author do during his time at YC?\")"
+    "response = custom_query_engine.query(\n",
+    "    \"What did the author do during his time at YC?\"\n",
+    ")"
    ]
   },
   {
@@ -358,7 +360,9 @@
    "source": [
     "# hybrid search can allow us to not retrieve nodes that are irrelevant\n",
     "# Yale is never mentioned in the essay\n",
-    "response = custom_query_engine.query(\"What did the author do during his time at Yale?\")"
+    "response = custom_query_engine.query(\n",
+    "    \"What did the author do during his time at Yale?\"\n",
+    ")"
    ]
   },
   {
@@ -413,7 +417,9 @@
    ],
    "source": [
     "# in contrast, vector search will return an answer\n",
-    "response = vector_query_engine.query(\"What did the author do during his time at Yale?\")"
+    "response = vector_query_engine.query(\n",
+    "    \"What did the author do during his time at Yale?\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/query_engine/JointQASummary.ipynb b/docs/examples/query_engine/JointQASummary.ipynb
index fb28a72ab51ddc8bf0110c0d68279198b0e4c682..2ed50c5a4f181697bc23726a1ace6d53365ce698 100644
--- a/docs/examples/query_engine/JointQASummary.ipynb
+++ b/docs/examples/query_engine/JointQASummary.ipynb
@@ -42,7 +42,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.composability.joint_qa_summary import QASummaryQueryEngineBuilder\n",
+    "from llama_index.composability.joint_qa_summary import (\n",
+    "    QASummaryQueryEngineBuilder,\n",
+    ")\n",
     "from llama_index import SimpleDirectoryReader, ServiceContext, LLMPredictor\n",
     "from llama_index.response.notebook_utils import display_response\n",
     "from llama_index.llms import OpenAI"
@@ -79,7 +81,9 @@
     "service_context_gpt4 = ServiceContext.from_defaults(llm=gpt4, chunk_size=1024)\n",
     "\n",
     "chatgpt = OpenAI(temperature=0, model=\"gpt-3.5-turbo\")\n",
-    "service_context_chatgpt = ServiceContext.from_defaults(llm=chatgpt, chunk_size=1024)"
+    "service_context_chatgpt = ServiceContext.from_defaults(\n",
+    "    llm=chatgpt, chunk_size=1024\n",
+    ")"
    ]
   },
   {
@@ -105,7 +109,9 @@
    ],
    "source": [
     "# NOTE: can also specify an existing docstore, service context, summary text, qa_text, etc.\n",
-    "query_engine_builder = QASummaryQueryEngineBuilder(service_context=service_context_gpt4)\n",
+    "query_engine_builder = QASummaryQueryEngineBuilder(\n",
+    "    service_context=service_context_gpt4\n",
+    ")\n",
     "query_engine = query_engine_builder.build_from_documents(documents)"
    ]
   },
diff --git a/docs/examples/query_engine/RetrieverRouterQueryEngine.ipynb b/docs/examples/query_engine/RetrieverRouterQueryEngine.ipynb
index af3523186ef463b3eb114536f12ade07a45d2a87..2495194024c59d132fd1826100c08f68355fe533 100644
--- a/docs/examples/query_engine/RetrieverRouterQueryEngine.ipynb
+++ b/docs/examples/query_engine/RetrieverRouterQueryEngine.ipynb
@@ -180,7 +180,10 @@
     ")\n",
     "vector_tool = QueryEngineTool.from_defaults(\n",
     "    query_engine=vector_query_engine,\n",
-    "    description=\"Useful for retrieving specific snippets from the author's life, like his time in college, his time in YC, or more.\",\n",
+    "    description=(\n",
+    "        \"Useful for retrieving specific snippets from the author's life, like\"\n",
+    "        \" his time in college, his time in YC, or more.\"\n",
+    "    ),\n",
     ")"
    ]
   },
@@ -361,7 +364,9 @@
     }
    ],
    "source": [
-    "response = query_engine.query(\"What did Paul Graham do during his time in college?\")"
+    "response = query_engine.query(\n",
+    "    \"What did Paul Graham do during his time in college?\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/query_engine/RouterQueryEngine.ipynb b/docs/examples/query_engine/RouterQueryEngine.ipynb
index f28061c23b1d20ba2ad40adbf3c67816f2539569..89c74cb36d9f068dd6836626348930b28fa7127d 100644
--- a/docs/examples/query_engine/RouterQueryEngine.ipynb
+++ b/docs/examples/query_engine/RouterQueryEngine.ipynb
@@ -161,12 +161,18 @@
     "\n",
     "list_tool = QueryEngineTool.from_defaults(\n",
     "    query_engine=list_query_engine,\n",
-    "    description=\"Useful for summarization questions related to Paul Graham eassy on What I Worked On.\",\n",
+    "    description=(\n",
+    "        \"Useful for summarization questions related to Paul Graham eassy on\"\n",
+    "        \" What I Worked On.\"\n",
+    "    ),\n",
     ")\n",
     "\n",
     "vector_tool = QueryEngineTool.from_defaults(\n",
     "    query_engine=vector_query_engine,\n",
-    "    description=\"Useful for retrieving specific context from Paul Graham essay on What I Worked On.\",\n",
+    "    description=(\n",
+    "        \"Useful for retrieving specific context from Paul Graham essay on What\"\n",
+    "        \" I Worked On.\"\n",
+    "    ),\n",
     ")"
    ]
   },
@@ -196,7 +202,10 @@
    "outputs": [],
    "source": [
     "from llama_index.query_engine.router_query_engine import RouterQueryEngine\n",
-    "from llama_index.selectors.llm_selectors import LLMSingleSelector, LLMMultiSelector\n",
+    "from llama_index.selectors.llm_selectors import (\n",
+    "    LLMSingleSelector,\n",
+    "    LLMMultiSelector,\n",
+    ")\n",
     "from llama_index.selectors.pydantic_selectors import (\n",
     "    PydanticMultiSelector,\n",
     "    PydanticSingleSelector,\n",
@@ -366,7 +375,10 @@
     "\n",
     "keyword_tool = QueryEngineTool.from_defaults(\n",
     "    query_engine=vector_query_engine,\n",
-    "    description=\"Useful for retrieving specific context using keywords from Paul Graham essay on What I Worked On.\",\n",
+    "    description=(\n",
+    "        \"Useful for retrieving specific context using keywords from Paul\"\n",
+    "        \" Graham essay on What I Worked On.\"\n",
+    "    ),\n",
     ")"
    ]
   },
@@ -427,7 +439,8 @@
    "source": [
     "# This query could use either a keyword or vector query engine, so it will combine responses from both\n",
     "query_engine.query(\n",
-    "    \"What were noteable events and people from the authors time at Interleaf and YC?\"\n",
+    "    \"What were noteable events and people from the authors time at Interleaf\"\n",
+    "    \" and YC?\"\n",
     ")"
    ]
   }
diff --git a/docs/examples/query_engine/SQLAutoVectorQueryEngine.ipynb b/docs/examples/query_engine/SQLAutoVectorQueryEngine.ipynb
index d5c5010452ac5caacc2327f8aafa52a396578be5..13898369800389c8919eceec1c4dad6c2286af19 100644
--- a/docs/examples/query_engine/SQLAutoVectorQueryEngine.ipynb
+++ b/docs/examples/query_engine/SQLAutoVectorQueryEngine.ipynb
@@ -426,7 +426,10 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.query_engine import SQLAutoVectorQueryEngine, RetrieverQueryEngine\n",
+    "from llama_index.query_engine import (\n",
+    "    SQLAutoVectorQueryEngine,\n",
+    "    RetrieverQueryEngine,\n",
+    ")\n",
     "from llama_index.tools.query_engine import QueryEngineTool\n",
     "from llama_index.indices.vector_store import VectorIndexAutoRetriever"
    ]
@@ -437,15 +440,21 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.indices.vector_store.retrievers import VectorIndexAutoRetriever\n",
+    "from llama_index.indices.vector_store.retrievers import (\n",
+    "    VectorIndexAutoRetriever,\n",
+    ")\n",
     "from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n",
-    "from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine\n",
+    "from llama_index.query_engine.retriever_query_engine import (\n",
+    "    RetrieverQueryEngine,\n",
+    ")\n",
     "\n",
     "\n",
     "vector_store_info = VectorStoreInfo(\n",
     "    content_info=\"articles about different cities\",\n",
     "    metadata_info=[\n",
-    "        MetadataInfo(name=\"title\", type=\"str\", description=\"The name of the city\"),\n",
+    "        MetadataInfo(\n",
+    "            name=\"title\", type=\"str\", description=\"The name of the city\"\n",
+    "        ),\n",
     "    ],\n",
     ")\n",
     "vector_auto_retriever = VectorIndexAutoRetriever(\n",
@@ -466,13 +475,16 @@
     "sql_tool = QueryEngineTool.from_defaults(\n",
     "    query_engine=sql_query_engine,\n",
     "    description=(\n",
-    "        \"Useful for translating a natural language query into a SQL query over a table containing: \"\n",
-    "        \"city_stats, containing the population/country of each city\"\n",
+    "        \"Useful for translating a natural language query into a SQL query over\"\n",
+    "        \" a table containing: city_stats, containing the population/country of\"\n",
+    "        \" each city\"\n",
     "    ),\n",
     ")\n",
     "vector_tool = QueryEngineTool.from_defaults(\n",
     "    query_engine=retriever_query_engine,\n",
-    "    description=f\"Useful for answering semantic questions about different cities\",\n",
+    "    description=(\n",
+    "        f\"Useful for answering semantic questions about different cities\"\n",
+    "    ),\n",
     ")"
    ]
   },
@@ -531,7 +543,8 @@
    ],
    "source": [
     "response = query_engine.query(\n",
-    "    \"Tell me about the arts and culture of the city with the highest population\"\n",
+    "    \"Tell me about the arts and culture of the city with the highest\"\n",
+    "    \" population\"\n",
     ")"
    ]
   },
@@ -642,7 +655,9 @@
     }
    ],
    "source": [
-    "response = query_engine.query(\"Can you give me the country corresponding to each city?\")"
+    "response = query_engine.query(\n",
+    "    \"Can you give me the country corresponding to each city?\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/query_engine/SQLJoinQueryEngine.ipynb b/docs/examples/query_engine/SQLJoinQueryEngine.ipynb
index fa98c5472d7aadd9b21f1e789e6082116bad1ac0..8a7c80f6d3bc1288c81fef212b8cb905775e079c 100644
--- a/docs/examples/query_engine/SQLJoinQueryEngine.ipynb
+++ b/docs/examples/query_engine/SQLJoinQueryEngine.ipynb
@@ -405,7 +405,9 @@
     "    query_engine_tools.append(query_engine_tool)\n",
     "\n",
     "\n",
-    "s_engine = SubQuestionQueryEngine.from_defaults(query_engine_tools=query_engine_tools)\n",
+    "s_engine = SubQuestionQueryEngine.from_defaults(\n",
+    "    query_engine_tools=query_engine_tools\n",
+    ")\n",
     "\n",
     "# from llama_index.indices.vector_store.retrievers import VectorIndexAutoRetriever\n",
     "# from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n",
@@ -437,13 +439,16 @@
     "sql_tool = QueryEngineTool.from_defaults(\n",
     "    query_engine=sql_query_engine,\n",
     "    description=(\n",
-    "        \"Useful for translating a natural language query into a SQL query over a table containing: \"\n",
-    "        \"city_stats, containing the population/country of each city\"\n",
+    "        \"Useful for translating a natural language query into a SQL query over\"\n",
+    "        \" a table containing: city_stats, containing the population/country of\"\n",
+    "        \" each city\"\n",
     "    ),\n",
     ")\n",
     "s_engine_tool = QueryEngineTool.from_defaults(\n",
     "    query_engine=s_engine,\n",
-    "    description=f\"Useful for answering semantic questions about different cities\",\n",
+    "    description=(\n",
+    "        f\"Useful for answering semantic questions about different cities\"\n",
+    "    ),\n",
     ")"
    ]
   },
@@ -517,7 +522,8 @@
    ],
    "source": [
     "response = query_engine.query(\n",
-    "    \"Tell me about the arts and culture of the city with the highest population\"\n",
+    "    \"Tell me about the arts and culture of the city with the highest\"\n",
+    "    \" population\"\n",
     ")"
    ]
   },
diff --git a/docs/examples/query_engine/SQLRouterQueryEngine.ipynb b/docs/examples/query_engine/SQLRouterQueryEngine.ipynb
index 866545cbab1cd8ad330b2c4f7ad8a733f332e002..a6ac67ae7fab6c182058b21fc8513e1eef919d99 100644
--- a/docs/examples/query_engine/SQLRouterQueryEngine.ipynb
+++ b/docs/examples/query_engine/SQLRouterQueryEngine.ipynb
@@ -372,8 +372,9 @@
     "sql_tool = QueryEngineTool.from_defaults(\n",
     "    query_engine=sql_query_engine,\n",
     "    description=(\n",
-    "        \"Useful for translating a natural language query into a SQL query over a table containing: \"\n",
-    "        \"city_stats, containing the population/country of each city\"\n",
+    "        \"Useful for translating a natural language query into a SQL query over\"\n",
+    "        \" a table containing: city_stats, containing the population/country of\"\n",
+    "        \" each city\"\n",
     "    ),\n",
     ")\n",
     "vector_tools = []\n",
diff --git a/docs/examples/query_engine/citation_query_engine.ipynb b/docs/examples/query_engine/citation_query_engine.ipynb
index 5d667bb5a49481aa66c8088da823c4f558fb6d15..04d1ed3cd1ac4c8555834fe40bc90a1df44f2f7c 100644
--- a/docs/examples/query_engine/citation_query_engine.ipynb
+++ b/docs/examples/query_engine/citation_query_engine.ipynb
@@ -73,7 +73,9 @@
    "source": [
     "if not os.path.exists(\"./citation\"):\n",
     "    documents = SimpleDirectoryReader(\"../data/paul_graham\").load_data()\n",
-    "    index = VectorStoreIndex.from_documents(documents, service_context=service_context)\n",
+    "    index = VectorStoreIndex.from_documents(\n",
+    "        documents, service_context=service_context\n",
+    "    )\n",
     "    index.storage_context.persist(persist_dir=\"./citation\")\n",
     "else:\n",
     "    index = load_index_from_storage(\n",
diff --git a/docs/examples/query_engine/custom_query_engine.ipynb b/docs/examples/query_engine/custom_query_engine.ipynb
index c40e2cfdcfeae319676e1f46ff5f0fb570fdc359..9e5929cb05ca7ecd5d5613720a5b62175ea17822 100644
--- a/docs/examples/query_engine/custom_query_engine.ipynb
+++ b/docs/examples/query_engine/custom_query_engine.ipynb
@@ -82,7 +82,10 @@
    "source": [
     "from llama_index.query_engine import CustomQueryEngine\n",
     "from llama_index.retrievers import BaseRetriever\n",
-    "from llama_index.response_synthesizers import get_response_synthesizer, BaseSynthesizer"
+    "from llama_index.response_synthesizers import (\n",
+    "    get_response_synthesizer,\n",
+    "    BaseSynthesizer,\n",
+    ")"
    ]
   },
   {
@@ -189,7 +192,9 @@
    "outputs": [],
    "source": [
     "synthesizer = get_response_synthesizer(response_mode=\"compact\")\n",
-    "query_engine = RAGQueryEngine(retriever=retriever, response_synthesizer=synthesizer)"
+    "query_engine = RAGQueryEngine(\n",
+    "    retriever=retriever, response_synthesizer=synthesizer\n",
+    ")"
    ]
   },
   {
@@ -248,7 +253,10 @@
     "llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
     "\n",
     "query_engine = RAGStringQueryEngine(\n",
-    "    retriever=retriever, response_synthesizer=synthesizer, llm=llm, qa_prompt=qa_prompt\n",
+    "    retriever=retriever,\n",
+    "    response_synthesizer=synthesizer,\n",
+    "    llm=llm,\n",
+    "    qa_prompt=qa_prompt,\n",
     ")"
    ]
   },
diff --git a/docs/examples/query_engine/ensemble_query_engine.ipynb b/docs/examples/query_engine/ensemble_query_engine.ipynb
index 3db9eaf3ca6619391ab61f29ba956dda9046be13..506cd173ca96a4469346985c2afada476599f662 100644
--- a/docs/examples/query_engine/ensemble_query_engine.ipynb
+++ b/docs/examples/query_engine/ensemble_query_engine.ipynb
@@ -211,7 +211,9 @@
     ")\n",
     "QA_PROMPT = PromptTemplate(QA_PROMPT_TMPL)\n",
     "\n",
-    "keyword_query_engine = keyword_index.as_query_engine(text_qa_template=QA_PROMPT)\n",
+    "keyword_query_engine = keyword_index.as_query_engine(\n",
+    "    text_qa_template=QA_PROMPT\n",
+    ")\n",
     "vector_query_engine = vector_index.as_query_engine(text_qa_template=QA_PROMPT)"
    ]
   },
@@ -322,7 +324,10 @@
    "outputs": [],
    "source": [
     "from llama_index.query_engine.router_query_engine import RouterQueryEngine\n",
-    "from llama_index.selectors.llm_selectors import LLMSingleSelector, LLMMultiSelector\n",
+    "from llama_index.selectors.llm_selectors import (\n",
+    "    LLMSingleSelector,\n",
+    "    LLMMultiSelector,\n",
+    ")\n",
     "from llama_index.selectors.pydantic_selectors import (\n",
     "    PydanticMultiSelector,\n",
     "    PydanticSingleSelector,\n",
@@ -330,16 +335,13 @@
     "from llama_index.response_synthesizers import TreeSummarize\n",
     "\n",
     "TREE_SUMMARIZE_PROMPT_TMPL = (\n",
-    "    \"Context information from multiple sources is below. Each source may or may not have \\n\"\n",
-    "    \"a relevance score attached to it.\\n\"\n",
-    "    \"---------------------\\n\"\n",
-    "    \"{context_str}\\n\"\n",
-    "    \"---------------------\\n\"\n",
-    "    \"Given the information from multiple sources and their associated relevance scores (if provided) and not prior knowledge, \"\n",
-    "    \"answer the question. If the answer is not in the context, inform \"\n",
-    "    \"the user that you can't answer the question.\\n\"\n",
-    "    \"Question: {query_str}\\n\"\n",
-    "    \"Answer: \"\n",
+    "    \"Context information from multiple sources is below. Each source may or\"\n",
+    "    \" may not have \\na relevance score attached to\"\n",
+    "    \" it.\\n---------------------\\n{context_str}\\n---------------------\\nGiven\"\n",
+    "    \" the information from multiple sources and their associated relevance\"\n",
+    "    \" scores (if provided) and not prior knowledge, answer the question. If\"\n",
+    "    \" the answer is not in the context, inform the user that you can't answer\"\n",
+    "    \" the question.\\nQuestion: {query_str}\\nAnswer: \"\n",
     ")\n",
     "\n",
     "tree_summarize = TreeSummarize(\n",
diff --git a/docs/examples/query_engine/flare_query_engine.ipynb b/docs/examples/query_engine/flare_query_engine.ipynb
index 09a41945add7eb185ce72d15d67031043928a1b3..db6da023ef12fe4c0a83cf354752d7a3edeaec99 100644
--- a/docs/examples/query_engine/flare_query_engine.ipynb
+++ b/docs/examples/query_engine/flare_query_engine.ipynb
@@ -61,7 +61,9 @@
    "outputs": [],
    "source": [
     "documents = SimpleDirectoryReader(\"../data/paul_graham\").load_data()\n",
-    "index = VectorStoreIndex.from_documents(documents, service_context=service_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, service_context=service_context\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/query_engine/json_query_engine.ipynb b/docs/examples/query_engine/json_query_engine.ipynb
index aa5abf59384c41667d10f07d884af076759dee01..080ddd4212a56fb66872d7f3d212f4775785505b 100644
--- a/docs/examples/query_engine/json_query_engine.ipynb
+++ b/docs/examples/query_engine/json_query_engine.ipynb
@@ -102,7 +102,11 @@
     "# Test on some sample data\n",
     "json_value = {\n",
     "    \"blogPosts\": [\n",
-    "        {\"id\": 1, \"title\": \"First blog post\", \"content\": \"This is my first blog post\"},\n",
+    "        {\n",
+    "            \"id\": 1,\n",
+    "            \"title\": \"First blog post\",\n",
+    "            \"content\": \"This is my first blog post\",\n",
+    "        },\n",
     "        {\n",
     "            \"id\": 2,\n",
     "            \"title\": \"Second blog post\",\n",
@@ -110,7 +114,12 @@
     "        },\n",
     "    ],\n",
     "    \"comments\": [\n",
-    "        {\"id\": 1, \"content\": \"Nice post!\", \"username\": \"jerry\", \"blogPostId\": 1},\n",
+    "        {\n",
+    "            \"id\": 1,\n",
+    "            \"content\": \"Nice post!\",\n",
+    "            \"username\": \"jerry\",\n",
+    "            \"blogPostId\": 1,\n",
+    "        },\n",
     "        {\n",
     "            \"id\": 2,\n",
     "            \"content\": \"Interesting thoughts\",\n",
@@ -169,11 +178,16 @@
     "                        \"type\": \"string\",\n",
     "                    },\n",
     "                    \"username\": {\n",
-    "                        \"description\": \"Username of the commenter (lowercased)\",\n",
+    "                        \"description\": (\n",
+    "                            \"Username of the commenter (lowercased)\"\n",
+    "                        ),\n",
     "                        \"type\": \"string\",\n",
     "                    },\n",
     "                    \"blogPostId\": {\n",
-    "                        \"description\": \"Identifier for the blog post to which the comment belongs\",\n",
+    "                        \"description\": (\n",
+    "                            \"Identifier for the blog post to which the comment\"\n",
+    "                            \" belongs\"\n",
+    "                        ),\n",
     "                        \"type\": \"integer\",\n",
     "                    },\n",
     "                },\n",
@@ -199,7 +213,9 @@
     "llm = OpenAI(model=\"gpt-4\")\n",
     "service_context = ServiceContext.from_defaults(llm=llm)\n",
     "nl_query_engine = JSONQueryEngine(\n",
-    "    json_value=json_value, json_schema=json_schema, service_context=service_context\n",
+    "    json_value=json_value,\n",
+    "    json_schema=json_schema,\n",
+    "    service_context=service_context,\n",
     ")\n",
     "raw_query_engine = JSONQueryEngine(\n",
     "    json_value=json_value,\n",
@@ -256,7 +272,9 @@
     }
    ],
    "source": [
-    "display(Markdown(f\"<h1>Natural language Response</h1><br><b>{nl_response}</b>\"))\n",
+    "display(\n",
+    "    Markdown(f\"<h1>Natural language Response</h1><br><b>{nl_response}</b>\")\n",
+    ")\n",
     "display(Markdown(f\"<h1>Raw JSON Response</h1><br><b>{raw_response}</b>\"))"
    ]
   },
diff --git a/docs/examples/query_engine/knowledge_graph_query_engine.ipynb b/docs/examples/query_engine/knowledge_graph_query_engine.ipynb
index d765c87a57783776430d9e7e4a9c706cb23ebf36..bbc2fb8038417de14694838b598783edc00441a6 100644
--- a/docs/examples/query_engine/knowledge_graph_query_engine.ipynb
+++ b/docs/examples/query_engine/knowledge_graph_query_engine.ipynb
@@ -663,7 +663,9 @@
     "    \"Tell me about Peter Quill?\",\n",
     ")\n",
     "\n",
-    "graph_query = graph_query.replace(\"WHERE\", \"\\n  WHERE\").replace(\"RETURN\", \"\\nRETURN\")\n",
+    "graph_query = graph_query.replace(\"WHERE\", \"\\n  WHERE\").replace(\n",
+    "    \"RETURN\", \"\\nRETURN\"\n",
+    ")\n",
     "\n",
     "display(\n",
     "    Markdown(\n",
diff --git a/docs/examples/query_engine/pandas_query_engine.ipynb b/docs/examples/query_engine/pandas_query_engine.ipynb
index dcb01774ab98c0125d8c89f2e21af7ff352f9f1e..40f51c020f17fef4214be6c566b907a9483588e4 100644
--- a/docs/examples/query_engine/pandas_query_engine.ipynb
+++ b/docs/examples/query_engine/pandas_query_engine.ipynb
@@ -48,7 +48,10 @@
    "source": [
     "# Test on some sample data\n",
     "df = pd.DataFrame(\n",
-    "    {\"city\": [\"Toronto\", \"Tokyo\", \"Berlin\"], \"population\": [2930000, 13960000, 3645000]}\n",
+    "    {\n",
+    "        \"city\": [\"Toronto\", \"Tokyo\", \"Berlin\"],\n",
+    "        \"population\": [2930000, 13960000, 3645000],\n",
+    "    }\n",
     ")"
    ]
   },
diff --git a/docs/examples/query_engine/pdf_tables/recursive_retriever.ipynb b/docs/examples/query_engine/pdf_tables/recursive_retriever.ipynb
index 20e608099cfe8add914e3542393845fa0051330e..f02f8b33ca8c6d5aa2cf8479c34b09c29d5e2b9d 100644
--- a/docs/examples/query_engine/pdf_tables/recursive_retriever.ipynb
+++ b/docs/examples/query_engine/pdf_tables/recursive_retriever.ipynb
@@ -606,7 +606,9 @@
     }
    ],
    "source": [
-    "response = df_query_engines[1].query(\"How many billionaires were there in 2009?\")\n",
+    "response = df_query_engines[1].query(\n",
+    "    \"How many billionaires were there in 2009?\"\n",
+    ")\n",
     "print(str(response))"
    ]
   },
@@ -654,8 +656,14 @@
    "source": [
     "# define index nodes\n",
     "summaries = [\n",
-    "    \"This node provides information about the world's richest billionaires in 2023\",\n",
-    "    \"This node provides information on the number of billionaires and their combined net worth from 2000 to 2023.\",\n",
+    "    (\n",
+    "        \"This node provides information about the world's richest billionaires\"\n",
+    "        \" in 2023\"\n",
+    "    ),\n",
+    "    (\n",
+    "        \"This node provides information on the number of billionaires and\"\n",
+    "        \" their combined net worth from 2000 to 2023.\"\n",
+    "    ),\n",
     "]\n",
     "\n",
     "df_nodes = [\n",
@@ -851,7 +859,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "response = vector_query_engine0.query(\"How many billionaires were there in 2009?\")"
+    "response = vector_query_engine0.query(\n",
+    "    \"How many billionaires were there in 2009?\"\n",
+    ")"
    ]
   },
   {
@@ -899,7 +909,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "response = query_engine.query(\"Which billionaires are excluded from this list?\")"
+    "response = query_engine.query(\n",
+    "    \"Which billionaires are excluded from this list?\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/query_engine/pgvector_sql_query_engine.ipynb b/docs/examples/query_engine/pgvector_sql_query_engine.ipynb
index 4bb18f6a7cb64db6fed4e549acfc7b79b63a5f85..f8a47776c721ae33caed03b31621a93202686e54 100644
--- a/docs/examples/query_engine/pgvector_sql_query_engine.ipynb
+++ b/docs/examples/query_engine/pgvector_sql_query_engine.ipynb
@@ -339,7 +339,9 @@
     "sql_database = SQLDatabase(engine, include_tables=[\"sec_text_chunk\"])\n",
     "\n",
     "llm = OpenAI(model=\"gpt-4\")\n",
-    "service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)\n",
+    "service_context = ServiceContext.from_defaults(\n",
+    "    llm=llm, embed_model=embed_model\n",
+    ")\n",
     "\n",
     "table_desc = \"\"\"\\\n",
     "This table represents text chunks from an SEC filing. Each row contains the following columns:\n",
diff --git a/docs/examples/query_engine/pydantic_query_engine.ipynb b/docs/examples/query_engine/pydantic_query_engine.ipynb
index 1238647449ac76a4bdefa6d51ab2dbcd37771eeb..d555086a9b2206997f10e3f10d4bd9d603251f63 100644
--- a/docs/examples/query_engine/pydantic_query_engine.ipynb
+++ b/docs/examples/query_engine/pydantic_query_engine.ipynb
@@ -97,7 +97,9 @@
     "llm = OpenAI(model=\"gpt-3.5-turbo\", temperature=0.1)\n",
     "service_context = ServiceContext.from_defaults(llm=llm)\n",
     "\n",
-    "index = VectorStoreIndex.from_documents(documents, service_context=service_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, service_context=service_context\n",
+    ")"
    ]
   },
   {
@@ -106,7 +108,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "query_engine = index.as_query_engine(output_cls=Biography, response_mode=\"compact\")"
+    "query_engine = index.as_query_engine(\n",
+    "    output_cls=Biography, response_mode=\"compact\"\n",
+    ")"
    ]
   },
   {
@@ -189,7 +193,9 @@
     "llm = Anthropic(model=\"claude-instant-1.2\", temperature=0.1)\n",
     "service_context = ServiceContext.from_defaults(llm=llm)\n",
     "\n",
-    "index = VectorStoreIndex.from_documents(documents, service_context=service_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, service_context=service_context\n",
+    ")"
    ]
   },
   {
@@ -289,7 +295,9 @@
     "llm = OpenAI(model=\"gpt-3.5-turbo\", temperature=0.1)\n",
     "service_context = ServiceContext.from_defaults(llm=llm)\n",
     "\n",
-    "index = VectorStoreIndex.from_documents(documents, service_context=service_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, service_context=service_context\n",
+    ")"
    ]
   },
   {
@@ -298,7 +306,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "query_engine = index.as_query_engine(output_cls=Company, response_mode=\"accumulate\")"
+    "query_engine = index.as_query_engine(\n",
+    "    output_cls=Company, response_mode=\"accumulate\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/query_engine/recursive_retriever_agents.ipynb b/docs/examples/query_engine/recursive_retriever_agents.ipynb
index 5bde49d595f47684f332ff972dda019670289db9..046220da04311f98034f75ddc1ecc2feedcba6e0 100644
--- a/docs/examples/query_engine/recursive_retriever_agents.ipynb
+++ b/docs/examples/query_engine/recursive_retriever_agents.ipynb
@@ -168,14 +168,19 @@
     "            query_engine=vector_query_engine,\n",
     "            metadata=ToolMetadata(\n",
     "                name=\"vector_tool\",\n",
-    "                description=f\"Useful for summarization questions related to {wiki_title}\",\n",
+    "                description=(\n",
+    "                    \"Useful for summarization questions related to\"\n",
+    "                    f\" {wiki_title}\"\n",
+    "                ),\n",
     "            ),\n",
     "        ),\n",
     "        QueryEngineTool(\n",
     "            query_engine=list_query_engine,\n",
     "            metadata=ToolMetadata(\n",
     "                name=\"summary_tool\",\n",
-    "                description=f\"Useful for retrieving specific context from {wiki_title}\",\n",
+    "                description=(\n",
+    "                    f\"Useful for retrieving specific context from {wiki_title}\"\n",
+    "                ),\n",
     "            ),\n",
     "        ),\n",
     "    ]\n",
@@ -215,9 +220,10 @@
     "for wiki_title in wiki_titles:\n",
     "    # define index node that links to these agents\n",
     "    wiki_summary = (\n",
-    "        f\"This content contains Wikipedia articles about {wiki_title}. \"\n",
-    "        f\"Use this index if you need to lookup specific facts about {wiki_title}.\\n\"\n",
-    "        \"Do not use this index if you want to analyze multiple cities.\"\n",
+    "        f\"This content contains Wikipedia articles about {wiki_title}. Use\"\n",
+    "        \" this index if you need to lookup specific facts about\"\n",
+    "        f\" {wiki_title}.\\nDo not use this index if you want to analyze\"\n",
+    "        \" multiple cities.\"\n",
     "    )\n",
     "    node = IndexNode(text=wiki_summary, index_id=wiki_title)\n",
     "    nodes.append(node)"
diff --git a/docs/examples/query_engine/sec_tables/tesla_10q_table.ipynb b/docs/examples/query_engine/sec_tables/tesla_10q_table.ipynb
index 5f485e1d5c74e1f32a46ea7e37cfdf0d5f3756f7..1349c693151aef6f15c211a69eb8696f97fa08f8 100644
--- a/docs/examples/query_engine/sec_tables/tesla_10q_table.ipynb
+++ b/docs/examples/query_engine/sec_tables/tesla_10q_table.ipynb
@@ -217,10 +217,14 @@
     }
    ],
    "source": [
-    "example_index_node = [b for b in base_nodes_2021 if isinstance(b, IndexNode)][20]\n",
+    "example_index_node = [b for b in base_nodes_2021 if isinstance(b, IndexNode)][\n",
+    "    20\n",
+    "]\n",
     "\n",
     "# Index Node\n",
-    "print(f\"\\n--------\\n{example_index_node.get_content(metadata_mode='all')}\\n--------\\n\")\n",
+    "print(\n",
+    "    f\"\\n--------\\n{example_index_node.get_content(metadata_mode='all')}\\n--------\\n\"\n",
+    ")\n",
     "# Index Node ID\n",
     "print(f\"\\n--------\\nIndex ID: {example_index_node.index_id}\\n--------\\n\")\n",
     "# Referenceed Table\n",
@@ -497,7 +501,9 @@
     "        if nodes_save_path is not None:\n",
     "            pickle.dump(raw_nodes, open(nodes_save_path, \"wb\"))\n",
     "\n",
-    "    base_nodes, node_mappings = node_parser.get_base_nodes_and_mappings(raw_nodes)\n",
+    "    base_nodes, node_mappings = node_parser.get_base_nodes_and_mappings(\n",
+    "        raw_nodes\n",
+    "    )\n",
     "\n",
     "    ### Construct Retrievers\n",
     "    # construct top-level vector index + query engine\n",
@@ -595,14 +601,18 @@
     "        query_engine=query_engine_2021,\n",
     "        metadata=ToolMetadata(\n",
     "            name=\"tesla_2021_10k\",\n",
-    "            description=\"Provides information about Tesla financials for year 2021\",\n",
+    "            description=(\n",
+    "                \"Provides information about Tesla financials for year 2021\"\n",
+    "            ),\n",
     "        ),\n",
     "    ),\n",
     "    QueryEngineTool(\n",
     "        query_engine=query_engine_2020,\n",
     "        metadata=ToolMetadata(\n",
     "            name=\"tesla_2020_10k\",\n",
-    "            description=\"Provides information about Tesla financials for year 2020\",\n",
+    "            description=(\n",
+    "                \"Provides information about Tesla financials for year 2020\"\n",
+    "            ),\n",
     "        ),\n",
     "    ),\n",
     "]\n",
@@ -732,23 +742,31 @@
    "outputs": [],
    "source": [
     "vector_index_2021 = VectorStoreIndex(nodes_2021)\n",
-    "vector_query_engine_2021 = vector_index_2021.as_query_engine(similarity_top_k=2)\n",
+    "vector_query_engine_2021 = vector_index_2021.as_query_engine(\n",
+    "    similarity_top_k=2\n",
+    ")\n",
     "vector_index_2020 = VectorStoreIndex(nodes_2020)\n",
-    "vector_query_engine_2020 = vector_index_2020.as_query_engine(similarity_top_k=2)\n",
+    "vector_query_engine_2020 = vector_index_2020.as_query_engine(\n",
+    "    similarity_top_k=2\n",
+    ")\n",
     "# setup base query engine as tool\n",
     "query_engine_tools = [\n",
     "    QueryEngineTool(\n",
     "        query_engine=vector_query_engine_2021,\n",
     "        metadata=ToolMetadata(\n",
     "            name=\"tesla_2021_10k\",\n",
-    "            description=\"Provides information about Tesla financials for year 2021\",\n",
+    "            description=(\n",
+    "                \"Provides information about Tesla financials for year 2021\"\n",
+    "            ),\n",
     "        ),\n",
     "    ),\n",
     "    QueryEngineTool(\n",
     "        query_engine=vector_query_engine_2020,\n",
     "        metadata=ToolMetadata(\n",
     "            name=\"tesla_2020_10k\",\n",
-    "            description=\"Provides information about Tesla financials for year 2020\",\n",
+    "            description=(\n",
+    "                \"Provides information about Tesla financials for year 2020\"\n",
+    "            ),\n",
     "        ),\n",
     "    ),\n",
     "]\n",
diff --git a/docs/examples/query_engine/sub_question_query_engine.ipynb b/docs/examples/query_engine/sub_question_query_engine.ipynb
index 67e06432aa3668ccfa1b6b15feea45bf943a2d78..bbcb2b0adaf6add1f46803b10fe363e5b49b0117 100644
--- a/docs/examples/query_engine/sub_question_query_engine.ipynb
+++ b/docs/examples/query_engine/sub_question_query_engine.ipynb
@@ -64,7 +64,9 @@
     "# captured by the SUB_QUESTION callback event type\n",
     "llama_debug = LlamaDebugHandler(print_trace_on_end=True)\n",
     "callback_manager = CallbackManager([llama_debug])\n",
-    "service_context = ServiceContext.from_defaults(callback_manager=callback_manager)"
+    "service_context = ServiceContext.from_defaults(\n",
+    "    callback_manager=callback_manager\n",
+    ")"
    ]
   },
   {
@@ -118,7 +120,8 @@
     "    QueryEngineTool(\n",
     "        query_engine=vector_query_engine,\n",
     "        metadata=ToolMetadata(\n",
-    "            name=\"pg_essay\", description=\"Paul Graham essay on What I Worked On\"\n",
+    "            name=\"pg_essay\",\n",
+    "            description=\"Paul Graham essay on What I Worked On\",\n",
     "        ),\n",
     "    ),\n",
     "]\n",
diff --git a/docs/examples/query_transformations/HyDEQueryTransformDemo.ipynb b/docs/examples/query_transformations/HyDEQueryTransformDemo.ipynb
index 21f1500e6129fa1b889fbfc568fdf2203c6014aa..3b9533faa84d05ba6c0b4b4a03c1ccccadeb124f 100644
--- a/docs/examples/query_transformations/HyDEQueryTransformDemo.ipynb
+++ b/docs/examples/query_transformations/HyDEQueryTransformDemo.ipynb
@@ -32,7 +32,9 @@
     "\n",
     "from llama_index import VectorStoreIndex, SimpleDirectoryReader\n",
     "from llama_index.indices.query.query_transform import HyDEQueryTransform\n",
-    "from llama_index.query_engine.transform_query_engine import TransformQueryEngine\n",
+    "from llama_index.query_engine.transform_query_engine import (\n",
+    "    TransformQueryEngine,\n",
+    ")\n",
     "from IPython.display import Markdown, display"
    ]
   },
diff --git a/docs/examples/query_transformations/SimpleIndexDemo-multistep.ipynb b/docs/examples/query_transformations/SimpleIndexDemo-multistep.ipynb
index 19036865320e7f64c34191678489072517bae59b..3c0f98128b85761e61d92b58ccbb38c02ce021d1 100644
--- a/docs/examples/query_transformations/SimpleIndexDemo-multistep.ipynb
+++ b/docs/examples/query_transformations/SimpleIndexDemo-multistep.ipynb
@@ -97,7 +97,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.indices.query.query_transform.base import StepDecomposeQueryTransform\n",
+    "from llama_index.indices.query.query_transform.base import (\n",
+    "    StepDecomposeQueryTransform,\n",
+    ")\n",
     "from llama_index import LLMPredictor\n",
     "\n",
     "# gpt-4\n",
@@ -129,7 +131,9 @@
    "outputs": [],
    "source": [
     "# set Logging to DEBUG for more detailed outputs\n",
-    "from llama_index.query_engine.multistep_query_engine import MultiStepQueryEngine\n",
+    "from llama_index.query_engine.multistep_query_engine import (\n",
+    "    MultiStepQueryEngine,\n",
+    ")\n",
     "\n",
     "query_engine = index.as_query_engine(service_context=service_context_gpt4)\n",
     "query_engine = MultiStepQueryEngine(\n",
@@ -138,7 +142,8 @@
     "    index_summary=index_summary,\n",
     ")\n",
     "response_gpt4 = query_engine.query(\n",
-    "    \"Who was in the first batch of the accelerator program the author started?\",\n",
+    "    \"Who was in the first batch of the accelerator program the author\"\n",
+    "    \" started?\",\n",
     ")"
    ]
   },
diff --git a/docs/examples/retrievers/auto_merging_retriever.ipynb b/docs/examples/retrievers/auto_merging_retriever.ipynb
index 31c7b5be405c2f8b13ccc50dece15dbf52b364b3..f9da3355babae1097622eb832b1acdefa2a5bd75 100644
--- a/docs/examples/retrievers/auto_merging_retriever.ipynb
+++ b/docs/examples/retrievers/auto_merging_retriever.ipynb
@@ -253,7 +253,9 @@
     "# define storage context (will include vector store by default too)\n",
     "storage_context = StorageContext.from_defaults(docstore=docstore)\n",
     "\n",
-    "service_context = ServiceContext.from_defaults(llm=OpenAI(model=\"gpt-3.5-turbo\"))"
+    "service_context = ServiceContext.from_defaults(\n",
+    "    llm=OpenAI(model=\"gpt-3.5-turbo\")\n",
+    ")"
    ]
   },
   {
@@ -267,7 +269,9 @@
     "from llama_index import VectorStoreIndex\n",
     "\n",
     "base_index = VectorStoreIndex(\n",
-    "    leaf_nodes, storage_context=storage_context, service_context=service_context\n",
+    "    leaf_nodes,\n",
+    "    storage_context=storage_context,\n",
+    "    service_context=service_context,\n",
     ")"
    ]
   },
@@ -320,7 +324,10 @@
    "source": [
     "# query_str = \"What were some lessons learned from red-teaming?\"\n",
     "# query_str = \"Can you tell me about the key concepts for safety finetuning\"\n",
-    "query_str = \"What could be the potential outcomes of adjusting the amount of safety data used in the RLHF stage?\"\n",
+    "query_str = (\n",
+    "    \"What could be the potential outcomes of adjusting the amount of safety\"\n",
+    "    \" data used in the RLHF stage?\"\n",
+    ")\n",
     "\n",
     "nodes = retriever.retrieve(query_str)\n",
     "base_nodes = base_retriever.retrieve(query_str)"
@@ -776,7 +783,9 @@
    "outputs": [],
    "source": [
     "# optional\n",
-    "eval_dataset = QueryResponseDataset.from_json(\"data/llama2_eval_qr_dataset.json\")"
+    "eval_dataset = QueryResponseDataset.from_json(\n",
+    "    \"data/llama2_eval_qr_dataset.json\"\n",
+    ")"
    ]
   },
   {
@@ -877,7 +886,9 @@
     }
    ],
    "source": [
-    "base_pred_responses = get_responses(eval_qs, base_query_engine, show_progress=True)"
+    "base_pred_responses = get_responses(\n",
+    "    eval_qs, base_query_engine, show_progress=True\n",
+    ")"
    ]
   },
   {
@@ -1041,9 +1052,13 @@
    "outputs": [],
    "source": [
     "pairwise_eval_results = await batch_runner.aevaluate_response_strs(\n",
-    "    eval_qs, response_strs=pred_response_strs, reference=base_pred_response_strs\n",
+    "    eval_qs,\n",
+    "    response_strs=pred_response_strs,\n",
+    "    reference=base_pred_response_strs,\n",
     ")\n",
-    "pairwise_score = np.array([r.score for r in pairwise_eval_results[\"pairwise\"]]).mean()"
+    "pairwise_score = np.array(\n",
+    "    [r.score for r in pairwise_eval_results[\"pairwise\"]]\n",
+    ").mean()"
    ]
   },
   {
diff --git a/docs/examples/retrievers/auto_vs_recursive_retriever.ipynb b/docs/examples/retrievers/auto_vs_recursive_retriever.ipynb
index 73d8d2dd50d4879b966e8edb54ec62efd226cbe3..8d9e62c167e343399406e8db2c7217b691b5d019 100644
--- a/docs/examples/retrievers/auto_vs_recursive_retriever.ipynb
+++ b/docs/examples/retrievers/auto_vs_recursive_retriever.ipynb
@@ -116,7 +116,9 @@
     "# Load all wiki documents\n",
     "docs_dict = {}\n",
     "for wiki_title in wiki_titles:\n",
-    "    doc = SimpleDirectoryReader(input_files=[f\"data/{wiki_title}.txt\"]).load_data()[0]\n",
+    "    doc = SimpleDirectoryReader(\n",
+    "        input_files=[f\"data/{wiki_title}.txt\"]\n",
+    "    ).load_data()[0]\n",
     "\n",
     "    doc.metadata.update(wiki_metadatas[wiki_title])\n",
     "    docs_dict[wiki_title] = doc"
@@ -216,7 +218,9 @@
     "from llama_index.storage.storage_context import StorageContext\n",
     "\n",
     "# If you want to load the index later, be sure to give it a name!\n",
-    "vector_store = WeaviateVectorStore(weaviate_client=client, index_name=\"LlamaIndex\")\n",
+    "vector_store = WeaviateVectorStore(\n",
+    "    weaviate_client=client, index_name=\"LlamaIndex\"\n",
+    ")\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
     "\n",
     "# NOTE: you may also choose to define a index_name manually.\n",
@@ -404,7 +408,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.indices.vector_store.retrievers import VectorIndexAutoRetriever\n",
+    "from llama_index.indices.vector_store.retrievers import (\n",
+    "    VectorIndexAutoRetriever,\n",
+    ")\n",
     "from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n",
     "\n",
     "\n",
@@ -414,12 +420,18 @@
     "        MetadataInfo(\n",
     "            name=\"category\",\n",
     "            type=\"str\",\n",
-    "            description=\"Category of the celebrity, one of [Sports, Entertainment, Business, Music]\",\n",
+    "            description=(\n",
+    "                \"Category of the celebrity, one of [Sports, Entertainment,\"\n",
+    "                \" Business, Music]\"\n",
+    "            ),\n",
     "        ),\n",
     "        MetadataInfo(\n",
     "            name=\"country\",\n",
     "            type=\"str\",\n",
-    "            description=\"Country of the celebrity, one of [United States, Barbados, Portugal]\",\n",
+    "            description=(\n",
+    "                \"Country of the celebrity, one of [United States, Barbados,\"\n",
+    "                \" Portugal]\"\n",
+    "            ),\n",
     "        ),\n",
     "    ],\n",
     ")\n",
@@ -935,7 +947,8 @@
    ],
    "source": [
     "nodes = retriever.retrieve(\n",
-    "    \"Tell me about the childhood of a popular sports celebrity in the United States\"\n",
+    "    \"Tell me about the childhood of a popular sports celebrity in the United\"\n",
+    "    \" States\"\n",
     ")\n",
     "for node in nodes:\n",
     "    print(node.node.get_content())"
@@ -971,7 +984,8 @@
    ],
    "source": [
     "nodes = retriever.retrieve(\n",
-    "    \"Tell me about the college life of a billionaire who started at company at the age of 16\"\n",
+    "    \"Tell me about the college life of a billionaire who started at company at\"\n",
+    "    \" the age of 16\"\n",
     ")\n",
     "for node in nodes:\n",
     "    print(node.node.get_content())"
@@ -1083,8 +1097,12 @@
     "            [docs_dict[wiki_title]], service_context=service_context\n",
     "        )\n",
     "\n",
-    "        summarizer = summary_index.as_query_engine(response_mode=\"tree_summarize\")\n",
-    "        response = await summarizer.aquery(f\"Give me a summary of {wiki_title}\")\n",
+    "        summarizer = summary_index.as_query_engine(\n",
+    "            response_mode=\"tree_summarize\"\n",
+    "        )\n",
+    "        response = await summarizer.aquery(\n",
+    "            f\"Give me a summary of {wiki_title}\"\n",
+    "        )\n",
     "\n",
     "        wiki_summary = response.response\n",
     "        Path(\"summaries\").mkdir(exist_ok=True)\n",
@@ -1180,7 +1198,9 @@
    ],
    "source": [
     "# ?\n",
-    "nodes = recursive_retriever.retrieve(\"Tell me about a celebrity from the United States\")\n",
+    "nodes = recursive_retriever.retrieve(\n",
+    "    \"Tell me about a celebrity from the United States\"\n",
+    ")\n",
     "for node in nodes:\n",
     "    print(node.node.get_content())"
    ]
@@ -1223,7 +1243,8 @@
    ],
    "source": [
     "nodes = recursive_retriever.retrieve(\n",
-    "    \"Tell me about the childhood of a billionaire who started at company at the age of 16\"\n",
+    "    \"Tell me about the childhood of a billionaire who started at company at\"\n",
+    "    \" the age of 16\"\n",
     ")\n",
     "for node in nodes:\n",
     "    print(node.node.get_content())"
diff --git a/docs/examples/retrievers/bm25_retriever.ipynb b/docs/examples/retrievers/bm25_retriever.ipynb
index 26f2381ece9c2cd39e4116267f892284c2397fb0..60ae7c5293ead6ef82f20fd7bfb18c0a1f8ac52e 100644
--- a/docs/examples/retrievers/bm25_retriever.ipynb
+++ b/docs/examples/retrievers/bm25_retriever.ipynb
@@ -70,7 +70,9 @@
     "    VectorStoreIndex,\n",
     ")\n",
     "from llama_index.retrievers import BM25Retriever\n",
-    "from llama_index.indices.vector_store.retrievers.retriever import VectorIndexRetriever\n",
+    "from llama_index.indices.vector_store.retrievers.retriever import (\n",
+    "    VectorIndexRetriever,\n",
+    ")\n",
     "from llama_index.llms import OpenAI"
    ]
   },
@@ -594,10 +596,14 @@
    "source": [
     "from llama_index import QueryBundle\n",
     "\n",
-    "nodes = hybrid_retriever.retrieve(\"What is the impact of climate change on the ocean?\")\n",
+    "nodes = hybrid_retriever.retrieve(\n",
+    "    \"What is the impact of climate change on the ocean?\"\n",
+    ")\n",
     "reranked_nodes = reranker.postprocess_nodes(\n",
     "    nodes,\n",
-    "    query_bundle=QueryBundle(\"What is the impact of climate change on the ocean?\"),\n",
+    "    query_bundle=QueryBundle(\n",
+    "        \"What is the impact of climate change on the ocean?\"\n",
+    "    ),\n",
     ")\n",
     "\n",
     "print(\"Initial retrieval: \", len(nodes), \" nodes\")\n",
@@ -703,7 +709,9 @@
     "    service_context=service_context,\n",
     ")\n",
     "\n",
-    "response = query_engine.query(\"What is the impact of climate change on the ocean?\")"
+    "response = query_engine.query(\n",
+    "    \"What is the impact of climate change on the ocean?\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/retrievers/ensemble_retrieval.ipynb b/docs/examples/retrievers/ensemble_retrieval.ipynb
index 40f65c9f8ac9859e5e155d4ed10df0fafbbf9c06..31e67cc9f362edc303d95d05c2191a1d8dad8d6e 100644
--- a/docs/examples/retrievers/ensemble_retrieval.ipynb
+++ b/docs/examples/retrievers/ensemble_retrieval.ipynb
@@ -189,7 +189,9 @@
     "query_engines = []\n",
     "for chunk_size in chunk_sizes:\n",
     "    print(f\"Chunk Size: {chunk_size}\")\n",
-    "    service_context = ServiceContext.from_defaults(chunk_size=chunk_size, llm=llm)\n",
+    "    service_context = ServiceContext.from_defaults(\n",
+    "        chunk_size=chunk_size, llm=llm\n",
+    "    )\n",
     "    service_contexts.append(service_context)\n",
     "    nodes = service_context.node_parser.get_nodes_from_documents(docs)\n",
     "\n",
@@ -243,7 +245,10 @@
     "for chunk_size, vector_index in zip(chunk_sizes, vector_indices):\n",
     "    node_id = f\"chunk_{chunk_size}\"\n",
     "    node = IndexNode(\n",
-    "        text=f\"Retrieves relevant context from the Llama 2 paper (chunk size {chunk_size})\",\n",
+    "        text=(\n",
+    "            \"Retrieves relevant context from the Llama 2 paper (chunk size\"\n",
+    "            f\" {chunk_size})\"\n",
+    "        ),\n",
     "        index_id=node_id,\n",
     "    )\n",
     "    retriever_nodes.append(node)\n",
@@ -368,7 +373,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "response = query_engine.query(\"Tell me about the main aspects of safety fine-tuning\")"
+    "response = query_engine.query(\n",
+    "    \"Tell me about the main aspects of safety fine-tuning\"\n",
+    ")"
    ]
   },
   {
@@ -573,7 +580,9 @@
    "outputs": [],
    "source": [
     "# optional\n",
-    "eval_dataset = QueryResponseDataset.from_json(\"data/llama2_eval_qr_dataset.json\")"
+    "eval_dataset = QueryResponseDataset.from_json(\n",
+    "    \"data/llama2_eval_qr_dataset.json\"\n",
+    ")"
    ]
   },
   {
@@ -618,7 +627,9 @@
     "evaluator_r = RelevancyEvaluator(service_context=eval_service_context)\n",
     "evaluator_f = FaithfulnessEvaluator(service_context=eval_service_context)\n",
     "\n",
-    "pairwise_evaluator = PairwiseComparisonEvaluator(service_context=eval_service_context)"
+    "pairwise_evaluator = PairwiseComparisonEvaluator(\n",
+    "    service_context=eval_service_context\n",
+    ")"
    ]
   },
   {
@@ -664,7 +675,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "pred_responses = get_responses(eval_qs[:max_samples], query_engine, show_progress=True)"
+    "pred_responses = get_responses(\n",
+    "    eval_qs[:max_samples], query_engine, show_progress=True\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/retrievers/reciprocal_rerank_fusion.ipynb b/docs/examples/retrievers/reciprocal_rerank_fusion.ipynb
index e0fc16e49c569c8e865ccdb83d48cc45a6735d46..c53b79518e7182a0138c84a259b8a0d303ee632d 100644
--- a/docs/examples/retrievers/reciprocal_rerank_fusion.ipynb
+++ b/docs/examples/retrievers/reciprocal_rerank_fusion.ipynb
@@ -61,7 +61,9 @@
     "\n",
     "service_context = ServiceContext.from_defaults(chunk_size=256)\n",
     "\n",
-    "index = VectorStoreIndex.from_documents(documents, service_context=service_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, service_context=service_context\n",
+    ")"
    ]
   },
   {
@@ -166,7 +168,9 @@
     }
    ],
    "source": [
-    "nodes_with_scores = retriever.retrieve(\"What happened at Interleafe and Viaweb?\")"
+    "nodes_with_scores = retriever.retrieve(\n",
+    "    \"What happened at Interleafe and Viaweb?\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/retrievers/recurisve_retriever_nodes_braintrust.ipynb b/docs/examples/retrievers/recurisve_retriever_nodes_braintrust.ipynb
index 3011db6fa603542ad96b8fb200e671a1c1c64593..09a3f2bf6eff7e60be8996c322fb1e3cc742e4a7 100644
--- a/docs/examples/retrievers/recurisve_retriever_nodes_braintrust.ipynb
+++ b/docs/examples/retrievers/recurisve_retriever_nodes_braintrust.ipynb
@@ -157,7 +157,9 @@
     "\n",
     "embed_model = resolve_embed_model(\"local:BAAI/bge-small-en\")\n",
     "llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
-    "service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)"
+    "service_context = ServiceContext.from_defaults(\n",
+    "    llm=llm, embed_model=embed_model\n",
+    ")"
    ]
   },
   {
@@ -285,7 +287,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vector_index_chunk = VectorStoreIndex(all_nodes, service_context=service_context)"
+    "vector_index_chunk = VectorStoreIndex(\n",
+    "    all_nodes, service_context=service_context\n",
+    ")"
    ]
   },
   {
@@ -461,9 +465,12 @@
     "all_nodes = copy.deepcopy(base_nodes)\n",
     "for idx, d in enumerate(metadata_dicts):\n",
     "    inode_q = IndexNode(\n",
-    "        text=d[\"questions_this_excerpt_can_answer\"], index_id=base_nodes[idx].node_id\n",
+    "        text=d[\"questions_this_excerpt_can_answer\"],\n",
+    "        index_id=base_nodes[idx].node_id,\n",
+    "    )\n",
+    "    inode_s = IndexNode(\n",
+    "        text=d[\"section_summary\"], index_id=base_nodes[idx].node_id\n",
     "    )\n",
-    "    inode_s = IndexNode(text=d[\"section_summary\"], index_id=base_nodes[idx].node_id)\n",
     "    all_nodes.extend([inode_q, inode_s])"
    ]
   },
@@ -491,7 +498,9 @@
     "llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
     "service_context = ServiceContext.from_defaults(llm=llm)\n",
     "\n",
-    "vector_index_metadata = VectorStoreIndex(all_nodes, service_context=service_context)"
+    "vector_index_metadata = VectorStoreIndex(\n",
+    "    all_nodes, service_context=service_context\n",
+    ")"
    ]
   },
   {
@@ -501,7 +510,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vector_retriever_metadata = vector_index_metadata.as_retriever(similarity_top_k=2)"
+    "vector_retriever_metadata = vector_index_metadata.as_retriever(\n",
+    "    similarity_top_k=2\n",
+    ")"
    ]
   },
   {
@@ -629,7 +640,9 @@
    "outputs": [],
    "source": [
     "# optional\n",
-    "eval_dataset = EmbeddingQAFinetuneDataset.from_json(\"data/llama2_eval_dataset.json\")"
+    "eval_dataset = EmbeddingQAFinetuneDataset.from_json(\n",
+    "    \"data/llama2_eval_dataset.json\"\n",
+    ")"
    ]
   },
   {
@@ -674,7 +687,9 @@
     "        hit_rates.append(hit_rate)\n",
     "        mrrs.append(mrr)\n",
     "\n",
-    "    final_df = pd.DataFrame({\"retrievers\": names, \"hit_rate\": hit_rates, \"mrr\": mrrs})\n",
+    "    final_df = pd.DataFrame(\n",
+    "        {\"retrievers\": names, \"hit_rate\": hit_rates, \"mrr\": mrrs}\n",
+    "    )\n",
     "    display(final_df)"
    ]
   },
@@ -755,7 +770,9 @@
    "source": [
     "# Evaluate the metadata retriever\n",
     "\n",
-    "vector_retriever_metadata = vector_index_metadata.as_retriever(similarity_top_k=10)\n",
+    "vector_retriever_metadata = vector_index_metadata.as_retriever(\n",
+    "    similarity_top_k=10\n",
+    ")\n",
     "retriever_metadata = RecursiveRetriever(\n",
     "    \"vector\",\n",
     "    retriever_dict={\"vector\": vector_retriever_metadata},\n",
diff --git a/docs/examples/retrievers/recursive_retriever_nodes.ipynb b/docs/examples/retrievers/recursive_retriever_nodes.ipynb
index f12c094701c66e85447b947067881a98da9b7e0b..45a588991e9f2d3e0adde0472fc30949ac0c3829 100644
--- a/docs/examples/retrievers/recursive_retriever_nodes.ipynb
+++ b/docs/examples/retrievers/recursive_retriever_nodes.ipynb
@@ -144,7 +144,9 @@
     "\n",
     "embed_model = resolve_embed_model(\"local:BAAI/bge-small-en\")\n",
     "llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
-    "service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)"
+    "service_context = ServiceContext.from_defaults(\n",
+    "    llm=llm, embed_model=embed_model\n",
+    ")"
    ]
   },
   {
@@ -271,7 +273,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vector_index_chunk = VectorStoreIndex(all_nodes, service_context=service_context)"
+    "vector_index_chunk = VectorStoreIndex(\n",
+    "    all_nodes, service_context=service_context\n",
+    ")"
    ]
   },
   {
@@ -447,9 +451,12 @@
     "all_nodes = copy.deepcopy(base_nodes)\n",
     "for idx, d in enumerate(metadata_dicts):\n",
     "    inode_q = IndexNode(\n",
-    "        text=d[\"questions_this_excerpt_can_answer\"], index_id=base_nodes[idx].node_id\n",
+    "        text=d[\"questions_this_excerpt_can_answer\"],\n",
+    "        index_id=base_nodes[idx].node_id,\n",
+    "    )\n",
+    "    inode_s = IndexNode(\n",
+    "        text=d[\"section_summary\"], index_id=base_nodes[idx].node_id\n",
     "    )\n",
-    "    inode_s = IndexNode(text=d[\"section_summary\"], index_id=base_nodes[idx].node_id)\n",
     "    all_nodes.extend([inode_q, inode_s])"
    ]
   },
@@ -477,7 +484,9 @@
     "llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
     "service_context = ServiceContext.from_defaults(llm=llm)\n",
     "\n",
-    "vector_index_metadata = VectorStoreIndex(all_nodes, service_context=service_context)"
+    "vector_index_metadata = VectorStoreIndex(\n",
+    "    all_nodes, service_context=service_context\n",
+    ")"
    ]
   },
   {
@@ -487,7 +496,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vector_retriever_metadata = vector_index_metadata.as_retriever(similarity_top_k=2)"
+    "vector_retriever_metadata = vector_index_metadata.as_retriever(\n",
+    "    similarity_top_k=2\n",
+    ")"
    ]
   },
   {
@@ -612,7 +623,9 @@
    "outputs": [],
    "source": [
     "# optional\n",
-    "eval_dataset = EmbeddingQAFinetuneDataset.from_json(\"data/llama2_eval_dataset.json\")"
+    "eval_dataset = EmbeddingQAFinetuneDataset.from_json(\n",
+    "    \"data/llama2_eval_dataset.json\"\n",
+    ")"
    ]
   },
   {
@@ -658,7 +671,9 @@
     "        hit_rates.append(hit_rate)\n",
     "        mrrs.append(mrr)\n",
     "\n",
-    "    final_df = pd.DataFrame({\"retrievers\": names, \"hit_rate\": hit_rates, \"mrr\": mrrs})\n",
+    "    final_df = pd.DataFrame(\n",
+    "        {\"retrievers\": names, \"hit_rate\": hit_rates, \"mrr\": mrrs}\n",
+    "    )\n",
     "    display(final_df)"
    ]
   },
@@ -669,7 +684,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vector_retriever_chunk = vector_index_chunk.as_retriever(similarity_top_k=top_k)\n",
+    "vector_retriever_chunk = vector_index_chunk.as_retriever(\n",
+    "    similarity_top_k=top_k\n",
+    ")\n",
     "retriever_chunk = RecursiveRetriever(\n",
     "    \"vector\",\n",
     "    retriever_dict={\"vector\": vector_retriever_chunk},\n",
@@ -692,7 +709,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vector_retriever_metadata = vector_index_metadata.as_retriever(similarity_top_k=top_k)\n",
+    "vector_retriever_metadata = vector_index_metadata.as_retriever(\n",
+    "    similarity_top_k=top_k\n",
+    ")\n",
     "retriever_metadata = RecursiveRetriever(\n",
     "    \"vector\",\n",
     "    retriever_dict={\"vector\": vector_retriever_metadata},\n",
diff --git a/docs/examples/retrievers/router_retriever.ipynb b/docs/examples/retrievers/router_retriever.ipynb
index bfb8df0eee0a4d73c5d9dbf21ee5b472cc9cab60..21ec9ec86c0c19042a26d546d1355cd8cd6e2732 100644
--- a/docs/examples/retrievers/router_retriever.ipynb
+++ b/docs/examples/retrievers/router_retriever.ipynb
@@ -153,15 +153,24 @@
     "\n",
     "list_tool = RetrieverTool.from_defaults(\n",
     "    retriever=list_retriever,\n",
-    "    description=\"Will retrieve all context from Paul Graham's essay on What I Worked On. Don't use if the question only requires more specific context.\",\n",
+    "    description=(\n",
+    "        \"Will retrieve all context from Paul Graham's essay on What I Worked\"\n",
+    "        \" On. Don't use if the question only requires more specific context.\"\n",
+    "    ),\n",
     ")\n",
     "vector_tool = RetrieverTool.from_defaults(\n",
     "    retriever=vector_retriever,\n",
-    "    description=\"Useful for retrieving specific context from Paul Graham essay on What I Worked On.\",\n",
+    "    description=(\n",
+    "        \"Useful for retrieving specific context from Paul Graham essay on What\"\n",
+    "        \" I Worked On.\"\n",
+    "    ),\n",
     ")\n",
     "keyword_tool = RetrieverTool.from_defaults(\n",
     "    retriever=keyword_retriever,\n",
-    "    description=\"Useful for retrieving specific context from Paul Graham essay on What I Worked On (using entities mentioned in query)\",\n",
+    "    description=(\n",
+    "        \"Useful for retrieving specific context from Paul Graham essay on What\"\n",
+    "        \" I Worked On (using entities mentioned in query)\"\n",
+    "    ),\n",
     ")"
    ]
   },
@@ -188,7 +197,10 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.selectors.llm_selectors import LLMSingleSelector, LLMMultiSelector\n",
+    "from llama_index.selectors.llm_selectors import (\n",
+    "    LLMSingleSelector,\n",
+    "    LLMMultiSelector,\n",
+    ")\n",
     "from llama_index.selectors.pydantic_selectors import (\n",
     "    PydanticMultiSelector,\n",
     "    PydanticSingleSelector,\n",
diff --git a/docs/examples/usecases/10k_sub_question.ipynb b/docs/examples/usecases/10k_sub_question.ipynb
index 507731a7b7bbcec5d64003724977eadae17ea90e..18e390d4bb40f567179b84a2763cbee16aa17fe7 100644
--- a/docs/examples/usecases/10k_sub_question.ipynb
+++ b/docs/examples/usecases/10k_sub_question.ipynb
@@ -81,8 +81,12 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "lyft_docs = SimpleDirectoryReader(input_files=[\"../data/10k/lyft_2021.pdf\"]).load_data()\n",
-    "uber_docs = SimpleDirectoryReader(input_files=[\"../data/10k/uber_2021.pdf\"]).load_data()"
+    "lyft_docs = SimpleDirectoryReader(\n",
+    "    input_files=[\"../data/10k/lyft_2021.pdf\"]\n",
+    ").load_data()\n",
+    "uber_docs = SimpleDirectoryReader(\n",
+    "    input_files=[\"../data/10k/uber_2021.pdf\"]\n",
+    ").load_data()"
    ]
   },
   {
@@ -155,19 +159,25 @@
     "        query_engine=lyft_engine,\n",
     "        metadata=ToolMetadata(\n",
     "            name=\"lyft_10k\",\n",
-    "            description=\"Provides information about Lyft financials for year 2021\",\n",
+    "            description=(\n",
+    "                \"Provides information about Lyft financials for year 2021\"\n",
+    "            ),\n",
     "        ),\n",
     "    ),\n",
     "    QueryEngineTool(\n",
     "        query_engine=uber_engine,\n",
     "        metadata=ToolMetadata(\n",
     "            name=\"uber_10k\",\n",
-    "            description=\"Provides information about Uber financials for year 2021\",\n",
+    "            description=(\n",
+    "                \"Provides information about Uber financials for year 2021\"\n",
+    "            ),\n",
     "        ),\n",
     "    ),\n",
     "]\n",
     "\n",
-    "s_engine = SubQuestionQueryEngine.from_defaults(query_engine_tools=query_engine_tools)"
+    "s_engine = SubQuestionQueryEngine.from_defaults(\n",
+    "    query_engine_tools=query_engine_tools\n",
+    ")"
    ]
   },
   {
@@ -209,7 +219,8 @@
    ],
    "source": [
     "response = s_engine.query(\n",
-    "    \"Compare and contrast the customer segments and geographies that grew the fastest\"\n",
+    "    \"Compare and contrast the customer segments and geographies that grew the\"\n",
+    "    \" fastest\"\n",
     ")"
    ]
   },
@@ -260,7 +271,9 @@
     }
    ],
    "source": [
-    "response = s_engine.query(\"Compare revenue growth of Uber and Lyft from 2020 to 2021\")"
+    "response = s_engine.query(\n",
+    "    \"Compare revenue growth of Uber and Lyft from 2020 to 2021\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/usecases/10q_sub_question.ipynb b/docs/examples/usecases/10q_sub_question.ipynb
index f8fcf71e11c250ba20b64d5ea2ae5e18e413e90c..19b9b14f0e0f6419452b15dba875cd863cf5995d 100644
--- a/docs/examples/usecases/10q_sub_question.ipynb
+++ b/docs/examples/usecases/10q_sub_question.ipynb
@@ -138,21 +138,30 @@
     "        query_engine=sept_engine,\n",
     "        metadata=ToolMetadata(\n",
     "            name=\"sept_22\",\n",
-    "            description=\"Provides information about Uber quarterly financials ending September 2022\",\n",
+    "            description=(\n",
+    "                \"Provides information about Uber quarterly financials ending\"\n",
+    "                \" September 2022\"\n",
+    "            ),\n",
     "        ),\n",
     "    ),\n",
     "    QueryEngineTool(\n",
     "        query_engine=june_engine,\n",
     "        metadata=ToolMetadata(\n",
     "            name=\"june_22\",\n",
-    "            description=\"Provides information about Uber quarterly financials ending June 2022\",\n",
+    "            description=(\n",
+    "                \"Provides information about Uber quarterly financials ending\"\n",
+    "                \" June 2022\"\n",
+    "            ),\n",
     "        ),\n",
     "    ),\n",
     "    QueryEngineTool(\n",
     "        query_engine=march_engine,\n",
     "        metadata=ToolMetadata(\n",
     "            name=\"march_22\",\n",
-    "            description=\"Provides information about Uber quarterly financials ending March 2022\",\n",
+    "            description=(\n",
+    "                \"Provides information about Uber quarterly financials ending\"\n",
+    "                \" March 2022\"\n",
+    "            ),\n",
     "        ),\n",
     "    ),\n",
     "]"
@@ -165,7 +174,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "s_engine = SubQuestionQueryEngine.from_defaults(query_engine_tools=query_engine_tools)"
+    "s_engine = SubQuestionQueryEngine.from_defaults(\n",
+    "    query_engine_tools=query_engine_tools\n",
+    ")"
    ]
   },
   {
@@ -250,7 +261,9 @@
     }
    ],
    "source": [
-    "response = s_engine.query(\"Analyze change in macro environment over the 3 quarters\")"
+    "response = s_engine.query(\n",
+    "    \"Analyze change in macro environment over the 3 quarters\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/usecases/City_Analysis-Decompose-KeywordTable.ipynb b/docs/examples/usecases/City_Analysis-Decompose-KeywordTable.ipynb
index fdabce3002f06cb2a0f55f431fcb98c7f464104b..df03306c6f6bdaf934f95865eed21e088773f1c1 100644
--- a/docs/examples/usecases/City_Analysis-Decompose-KeywordTable.ipynb
+++ b/docs/examples/usecases/City_Analysis-Decompose-KeywordTable.ipynb
@@ -403,7 +403,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.indices.query.query_transform.base import DecomposeQueryTransform\n",
+    "from llama_index.indices.query.query_transform.base import (\n",
+    "    DecomposeQueryTransform,\n",
+    ")\n",
     "\n",
     "decompose_transform = DecomposeQueryTransform(verbose=True)"
    ]
@@ -435,7 +437,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.query_engine.transform_query_engine import TransformQueryEngine\n",
+    "from llama_index.query_engine.transform_query_engine import (\n",
+    "    TransformQueryEngine,\n",
+    ")\n",
     "\n",
     "custom_query_engines = {}\n",
     "for index in city_indices.values():\n",
@@ -482,7 +486,9 @@
    "source": [
     "# with query decomposition in subindices\n",
     "query_engine = graph.as_query_engine(custom_query_engines=custom_query_engines)\n",
-    "query_str = \"Compare and contrast the demographics in Seattle, Houston, and Toronto. \""
+    "query_str = (\n",
+    "    \"Compare and contrast the demographics in Seattle, Houston, and Toronto. \"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/vector_stores/AwadbDemo.ipynb b/docs/examples/vector_stores/AwadbDemo.ipynb
index bcf9b6b439af90777252e964e9e073d4ada8613f..5bd8e439f3b3814fb39aaf09a0a8d76ba334df63 100644
--- a/docs/examples/vector_stores/AwadbDemo.ipynb
+++ b/docs/examples/vector_stores/AwadbDemo.ipynb
@@ -154,7 +154,9 @@
    "source": [
     "# set Logging to DEBUG for more detailed outputs\n",
     "query_engine = index.as_query_engine()\n",
-    "response = query_engine.query(\"What did the author do after his time at Y Combinator?\")"
+    "response = query_engine.query(\n",
+    "    \"What did the author do after his time at Y Combinator?\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/vector_stores/BagelAutoRetriever.ipynb b/docs/examples/vector_stores/BagelAutoRetriever.ipynb
index 5b716eaa1882e8bb8420ee4120df1be8708339f1..a34c9c41b050da84e8d9c9f8443f506d2b6541e2 100644
--- a/docs/examples/vector_stores/BagelAutoRetriever.ipynb
+++ b/docs/examples/vector_stores/BagelAutoRetriever.ipynb
@@ -57,7 +57,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "server_settings = Settings(bagel_api_impl=\"rest\", bagel_server_host=\"api.bageldb.ai\")\n",
+    "server_settings = Settings(\n",
+    "    bagel_api_impl=\"rest\", bagel_server_host=\"api.bageldb.ai\"\n",
+    ")\n",
     "\n",
     "client = bagel.Client(server_settings)\n",
     "\n",
@@ -86,35 +88,56 @@
     "\n",
     "nodes = [\n",
     "    TextNode(\n",
-    "        text=\"Michael Jordan is a retired professional basketball player, widely regarded as one of the greatest basketball players of all time.\",\n",
+    "        text=(\n",
+    "            \"Michael Jordan is a retired professional basketball player,\"\n",
+    "            \" widely regarded as one of the greatest basketball players of all\"\n",
+    "            \" time.\"\n",
+    "        ),\n",
     "        metadata={\n",
     "            \"category\": \"Sports\",\n",
     "            \"country\": \"United States\",\n",
     "        },\n",
     "    ),\n",
     "    TextNode(\n",
-    "        text=\"Angelina Jolie is an American actress, filmmaker, and humanitarian. She has received numerous awards for her acting and is known for her philanthropic work.\",\n",
+    "        text=(\n",
+    "            \"Angelina Jolie is an American actress, filmmaker, and\"\n",
+    "            \" humanitarian. She has received numerous awards for her acting\"\n",
+    "            \" and is known for her philanthropic work.\"\n",
+    "        ),\n",
     "        metadata={\n",
     "            \"category\": \"Entertainment\",\n",
     "            \"country\": \"United States\",\n",
     "        },\n",
     "    ),\n",
     "    TextNode(\n",
-    "        text=\"Elon Musk is a business magnate, industrial designer, and engineer. He is the founder, CEO, and lead designer of SpaceX, Tesla, Inc., Neuralink, and The Boring Company.\",\n",
+    "        text=(\n",
+    "            \"Elon Musk is a business magnate, industrial designer, and\"\n",
+    "            \" engineer. He is the founder, CEO, and lead designer of SpaceX,\"\n",
+    "            \" Tesla, Inc., Neuralink, and The Boring Company.\"\n",
+    "        ),\n",
     "        metadata={\n",
     "            \"category\": \"Business\",\n",
     "            \"country\": \"United States\",\n",
     "        },\n",
     "    ),\n",
     "    TextNode(\n",
-    "        text=\"Rihanna is a Barbadian singer, actress, and businesswoman. She has achieved significant success in the music industry and is known for her versatile musical style.\",\n",
+    "        text=(\n",
+    "            \"Rihanna is a Barbadian singer, actress, and businesswoman. She\"\n",
+    "            \" has achieved significant success in the music industry and is\"\n",
+    "            \" known for her versatile musical style.\"\n",
+    "        ),\n",
     "        metadata={\n",
     "            \"category\": \"Music\",\n",
     "            \"country\": \"Barbados\",\n",
     "        },\n",
     "    ),\n",
     "    TextNode(\n",
-    "        text=\"Cristiano Ronaldo is a Portuguese professional footballer who is considered one of the greatest football players of all time. He has won numerous awards and set multiple records during his career.\",\n",
+    "        text=(\n",
+    "            \"Cristiano Ronaldo is a Portuguese professional footballer who is\"\n",
+    "            \" considered one of the greatest football players of all time. He\"\n",
+    "            \" has won numerous awards and set multiple records during his\"\n",
+    "            \" career.\"\n",
+    "        ),\n",
     "        metadata={\n",
     "            \"category\": \"Sports\",\n",
     "            \"country\": \"Portugal\",\n",
@@ -151,7 +174,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.indices.vector_store.retrievers import VectorIndexAutoRetriever\n",
+    "from llama_index.indices.vector_store.retrievers import (\n",
+    "    VectorIndexAutoRetriever,\n",
+    ")\n",
     "from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n",
     "\n",
     "\n",
@@ -161,16 +186,24 @@
     "        MetadataInfo(\n",
     "            name=\"category\",\n",
     "            type=\"str\",\n",
-    "            description=\"Category of the celebrity, one of [Sports, Entertainment, Business, Music]\",\n",
+    "            description=(\n",
+    "                \"Category of the celebrity, one of [Sports, Entertainment,\"\n",
+    "                \" Business, Music]\"\n",
+    "            ),\n",
     "        ),\n",
     "        MetadataInfo(\n",
     "            name=\"country\",\n",
     "            type=\"str\",\n",
-    "            description=\"Country of the celebrity, one of [United States, Barbados, Portugal]\",\n",
+    "            description=(\n",
+    "                \"Country of the celebrity, one of [United States, Barbados,\"\n",
+    "                \" Portugal]\"\n",
+    "            ),\n",
     "        ),\n",
     "    ],\n",
     ")\n",
-    "retriever = VectorIndexAutoRetriever(index, vector_store_info=vector_store_info)"
+    "retriever = VectorIndexAutoRetriever(\n",
+    "    index, vector_store_info=vector_store_info\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/vector_stores/BagelIndexDemo.ipynb b/docs/examples/vector_stores/BagelIndexDemo.ipynb
index 32d32ec071ae4dd6c60a55db071b5aa7af55dfed..bbd1f9a1c459d7cc39c45f3db6391ba9ecab424a 100644
--- a/docs/examples/vector_stores/BagelIndexDemo.ipynb
+++ b/docs/examples/vector_stores/BagelIndexDemo.ipynb
@@ -102,7 +102,9 @@
    "outputs": [],
    "source": [
     "# create server settings\n",
-    "server_settings = Settings(bagel_api_impl=\"rest\", bagel_server_host=\"api.bageldb.ai\")\n",
+    "server_settings = Settings(\n",
+    "    bagel_api_impl=\"rest\", bagel_server_host=\"api.bageldb.ai\"\n",
+    ")\n",
     "\n",
     "# create client\n",
     "client = bagel.Client(server_settings)\n",
@@ -285,7 +287,16 @@
     "            {\"uri\": \"img7.png\", \"style\": \"style1\"},\n",
     "            {\"uri\": \"img8.png\", \"style\": \"style1\"},\n",
     "        ],\n",
-    "        documents=[\"doc1\", \"doc2\", \"doc3\", \"doc4\", \"doc5\", \"doc6\", \"doc7\", \"doc8\"],\n",
+    "        documents=[\n",
+    "            \"doc1\",\n",
+    "            \"doc2\",\n",
+    "            \"doc3\",\n",
+    "            \"doc4\",\n",
+    "            \"doc5\",\n",
+    "            \"doc6\",\n",
+    "            \"doc7\",\n",
+    "            \"doc8\",\n",
+    "        ],\n",
     "        ids=[\"id1\", \"id2\", \"id3\", \"id4\", \"id5\", \"id6\", \"id7\", \"id8\"],\n",
     "    )\n",
     "\n",
diff --git a/docs/examples/vector_stores/CassandraIndexDemo.ipynb b/docs/examples/vector_stores/CassandraIndexDemo.ipynb
index 00de6a6d170096a901f45923da5b70833d56fb00..5c7f221e1d5849cd26ae7a0248d958f9a5588c4a 100644
--- a/docs/examples/vector_stores/CassandraIndexDemo.ipynb
+++ b/docs/examples/vector_stores/CassandraIndexDemo.ipynb
@@ -98,9 +98,13 @@
     "keyspace_name = input(\"\\nKeyspace name? \")\n",
     "\n",
     "if database_mode == \"A\":\n",
-    "    ASTRA_DB_APPLICATION_TOKEN = getpass.getpass('\\nAstra DB Token (\"AstraCS:...\") ')\n",
+    "    ASTRA_DB_APPLICATION_TOKEN = getpass.getpass(\n",
+    "        '\\nAstra DB Token (\"AstraCS:...\") '\n",
+    "    )\n",
     "    #\n",
-    "    ASTRA_DB_SECURE_BUNDLE_PATH = input(\"Full path to your Secure Connect Bundle? \")\n",
+    "    ASTRA_DB_SECURE_BUNDLE_PATH = input(\n",
+    "        \"Full path to your Secure Connect Bundle? \"\n",
+    "    )\n",
     "elif database_mode == \"C\":\n",
     "    CASSANDRA_CONTACT_POINTS = input(\n",
     "        \"Contact points? (comma-separated, empty for localhost) \"\n",
@@ -120,7 +124,11 @@
     "if database_mode == \"C\":\n",
     "    if CASSANDRA_CONTACT_POINTS:\n",
     "        cluster = Cluster(\n",
-    "            [cp.strip() for cp in CASSANDRA_CONTACT_POINTS.split(\",\") if cp.strip()]\n",
+    "            [\n",
+    "                cp.strip()\n",
+    "                for cp in CASSANDRA_CONTACT_POINTS.split(\",\")\n",
+    "                if cp.strip()\n",
+    "            ]\n",
     "        )\n",
     "    else:\n",
     "        cluster = Cluster()\n",
@@ -214,7 +222,8 @@
     "print(f\"First document, id: {documents[0].doc_id}\")\n",
     "print(f\"First document, hash: {documents[0].hash}\")\n",
     "print(\n",
-    "    f\"First document, text ({len(documents[0].text)} characters):\\n{'='*20}\\n{documents[0].text[:360]} ...\"\n",
+    "    \"First document, text\"\n",
+    "    f\" ({len(documents[0].text)} characters):\\n{'='*20}\\n{documents[0].text[:360]} ...\"\n",
     ")"
    ]
   },
@@ -260,7 +269,9 @@
    "source": [
     "storage_context = StorageContext.from_defaults(vector_store=cassandra_store)\n",
     "\n",
-    "index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
@@ -364,11 +375,15 @@
     ")\n",
     "\n",
     "# Create index (from preexisting stored vectors)\n",
-    "new_index_instance = VectorStoreIndex.from_vector_store(vector_store=new_store_instance)\n",
+    "new_index_instance = VectorStoreIndex.from_vector_store(\n",
+    "    vector_store=new_store_instance\n",
+    ")\n",
     "\n",
     "# now you can do querying, etc:\n",
     "query_engine = index.as_query_engine(similarity_top_k=5)\n",
-    "response = query_engine.query(\"What did the author study prior to working on AI?\")"
+    "response = query_engine.query(\n",
+    "    \"What did the author study prior to working on AI?\"\n",
+    ")"
    ]
   },
   {
@@ -620,7 +635,9 @@
     "        filters=[ExactMatchFilter(key=\"source_type\", value=\"essay\")]\n",
     "    )\n",
     ")\n",
-    "md_response = md_query_engine.query(\"How long it took the author to write his thesis?\")\n",
+    "md_response = md_query_engine.query(\n",
+    "    \"How long it took the author to write his thesis?\"\n",
+    ")\n",
     "print(md_response.response)"
    ]
   },
diff --git a/docs/examples/vector_stores/CognitiveSearchIndexDemo.ipynb b/docs/examples/vector_stores/CognitiveSearchIndexDemo.ipynb
index e01381312b254c7e77604f0c554f1766191521f0..ece9dcf847319bde4bdd1941d0f2b00b2d44a270 100644
--- a/docs/examples/vector_stores/CognitiveSearchIndexDemo.ipynb
+++ b/docs/examples/vector_stores/CognitiveSearchIndexDemo.ipynb
@@ -511,7 +511,9 @@
     "from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters\n",
     "\n",
     "\n",
-    "filters = MetadataFilters(filters=[ExactMatchFilter(key=\"theme\", value=\"Mafia\")])\n",
+    "filters = MetadataFilters(\n",
+    "    filters=[ExactMatchFilter(key=\"theme\", value=\"Mafia\")]\n",
+    ")\n",
     "\n",
     "retriever = index.as_retriever(filters=filters)\n",
     "retriever.retrieve(\"What is inception about?\")"
diff --git a/docs/examples/vector_stores/DeepLakeIndexDemo.ipynb b/docs/examples/vector_stores/DeepLakeIndexDemo.ipynb
index d94bbbc506beb555205acefdc311d018e8b64174..826eecbd433018b0742572c69fe71cb70c0d88d4 100644
--- a/docs/examples/vector_stores/DeepLakeIndexDemo.ipynb
+++ b/docs/examples/vector_stores/DeepLakeIndexDemo.ipynb
@@ -74,7 +74,12 @@
    "source": [
     "# load documents\n",
     "documents = SimpleDirectoryReader(\"../paul_graham_essay/data\").load_data()\n",
-    "print(\"Document ID:\", documents[0].doc_id, \"Document Hash:\", documents[0].doc_hash)"
+    "print(\n",
+    "    \"Document ID:\",\n",
+    "    documents[0].doc_id,\n",
+    "    \"Document Hash:\",\n",
+    "    documents[0].doc_hash,\n",
+    ")"
    ]
   },
   {
@@ -152,7 +157,9 @@
     "# Create an index over the documnts\n",
     "vector_store = DeepLakeVectorStore(dataset_path=dataset_path, overwrite=True)\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
-    "index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/vector_stores/DocArrayHnswIndexDemo.ipynb b/docs/examples/vector_stores/DocArrayHnswIndexDemo.ipynb
index f23dbd1583b5e2681a6139fb9ab00fc213e5275d..17955a5172ceaa645e6c3209db44ae8daf9b430f 100644
--- a/docs/examples/vector_stores/DocArrayHnswIndexDemo.ipynb
+++ b/docs/examples/vector_stores/DocArrayHnswIndexDemo.ipynb
@@ -69,7 +69,12 @@
    "source": [
     "# load documents\n",
     "documents = SimpleDirectoryReader(\"../data/paul_graham\").load_data()\n",
-    "print(\"Document ID:\", documents[0].doc_id, \"Document Hash:\", documents[0].doc_hash)"
+    "print(\n",
+    "    \"Document ID:\",\n",
+    "    documents[0].doc_id,\n",
+    "    \"Document Hash:\",\n",
+    "    documents[0].doc_hash,\n",
+    ")"
    ]
   },
   {
@@ -93,7 +98,9 @@
     "\n",
     "vector_store = DocArrayHnswVectorStore(work_dir=\"hnsw_index\")\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
-    "index = GPTVectorStoreIndex.from_documents(documents, storage_context=storage_context)"
+    "index = GPTVectorStoreIndex.from_documents(\n",
+    "    documents, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
@@ -237,7 +244,9 @@
     "from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters\n",
     "\n",
     "\n",
-    "filters = MetadataFilters(filters=[ExactMatchFilter(key=\"theme\", value=\"Mafia\")])\n",
+    "filters = MetadataFilters(\n",
+    "    filters=[ExactMatchFilter(key=\"theme\", value=\"Mafia\")]\n",
+    ")\n",
     "\n",
     "retriever = index.as_retriever(filters=filters)\n",
     "retriever.retrieve(\"What is inception about?\")"
diff --git a/docs/examples/vector_stores/DocArrayInMemoryIndexDemo.ipynb b/docs/examples/vector_stores/DocArrayInMemoryIndexDemo.ipynb
index f937313a8995fcf3a14c67bab2b49657371e658e..2e9813f0fd74183c475798205c9ec18c81de5526 100644
--- a/docs/examples/vector_stores/DocArrayInMemoryIndexDemo.ipynb
+++ b/docs/examples/vector_stores/DocArrayInMemoryIndexDemo.ipynb
@@ -70,7 +70,12 @@
    "source": [
     "# load documents\n",
     "documents = SimpleDirectoryReader(\"../data/paul_graham\").load_data()\n",
-    "print(\"Document ID:\", documents[0].doc_id, \"Document Hash:\", documents[0].doc_hash)"
+    "print(\n",
+    "    \"Document ID:\",\n",
+    "    documents[0].doc_id,\n",
+    "    \"Document Hash:\",\n",
+    "    documents[0].doc_hash,\n",
+    ")"
    ]
   },
   {
@@ -94,7 +99,9 @@
     "\n",
     "vector_store = DocArrayInMemoryVectorStore()\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
-    "index = GPTVectorStoreIndex.from_documents(documents, storage_context=storage_context)"
+    "index = GPTVectorStoreIndex.from_documents(\n",
+    "    documents, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
@@ -239,7 +246,9 @@
     "from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters\n",
     "\n",
     "\n",
-    "filters = MetadataFilters(filters=[ExactMatchFilter(key=\"theme\", value=\"Mafia\")])\n",
+    "filters = MetadataFilters(\n",
+    "    filters=[ExactMatchFilter(key=\"theme\", value=\"Mafia\")]\n",
+    ")\n",
     "\n",
     "retriever = index.as_retriever(filters=filters)\n",
     "retriever.retrieve(\"What is inception about?\")"
diff --git a/docs/examples/vector_stores/ElasticsearchIndexDemo.ipynb b/docs/examples/vector_stores/ElasticsearchIndexDemo.ipynb
index e052dcdc900caf14aeccc9dcbe1cfa599b39b7c8..9b0b63903acdc02845693e0c313972d1313d2185 100644
--- a/docs/examples/vector_stores/ElasticsearchIndexDemo.ipynb
+++ b/docs/examples/vector_stores/ElasticsearchIndexDemo.ipynb
@@ -197,7 +197,9 @@
     "    index_name=\"paul_graham\",\n",
     ")\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
-    "index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
@@ -324,7 +326,9 @@
     "# Metadata filter\n",
     "from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters\n",
     "\n",
-    "filters = MetadataFilters(filters=[ExactMatchFilter(key=\"theme\", value=\"Mafia\")])\n",
+    "filters = MetadataFilters(\n",
+    "    filters=[ExactMatchFilter(key=\"theme\", value=\"Mafia\")]\n",
+    ")\n",
     "\n",
     "retriever = index.as_retriever(filters=filters)\n",
     "\n",
diff --git a/docs/examples/vector_stores/EpsillaIndexDemo.ipynb b/docs/examples/vector_stores/EpsillaIndexDemo.ipynb
index 748eb293dbe75a1a47c54f1d6c6248a4e418a280..1638c167a893fc60e54afd1b90fee9c233e4a15e 100644
--- a/docs/examples/vector_stores/EpsillaIndexDemo.ipynb
+++ b/docs/examples/vector_stores/EpsillaIndexDemo.ipynb
@@ -139,7 +139,9 @@
     "vector_store = EpsillaVectorStore(client=client, db_path=\"/tmp/llamastore\")\n",
     "\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
-    "index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/vector_stores/FaissIndexDemo.ipynb b/docs/examples/vector_stores/FaissIndexDemo.ipynb
index b058f021690e696d439b32363596181b5f4a50be..cd6116d28b21c0a2402ed7aa58f2b8ad3c4991cd 100644
--- a/docs/examples/vector_stores/FaissIndexDemo.ipynb
+++ b/docs/examples/vector_stores/FaissIndexDemo.ipynb
@@ -92,7 +92,9 @@
    "source": [
     "vector_store = FaissVectorStore(faiss_index=faiss_index)\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
-    "index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
@@ -161,7 +163,9 @@
    "source": [
     "# set Logging to DEBUG for more detailed outputs\n",
     "query_engine = index.as_query_engine()\n",
-    "response = query_engine.query(\"What did the author do after his time at Y Combinator?\")"
+    "response = query_engine.query(\n",
+    "    \"What did the author do after his time at Y Combinator?\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/vector_stores/LanceDBIndexDemo.ipynb b/docs/examples/vector_stores/LanceDBIndexDemo.ipynb
index 543dc2c23e992443a62a43a7a42ac44151c6e299..3c7e65407b94067d4907ba2c42fa85f6f1f44bce 100644
--- a/docs/examples/vector_stores/LanceDBIndexDemo.ipynb
+++ b/docs/examples/vector_stores/LanceDBIndexDemo.ipynb
@@ -108,7 +108,9 @@
    "source": [
     "vector_store = LanceDBVectorStore(uri=\"/tmp/lancedb\")\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
-    "index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
@@ -200,7 +202,8 @@
     "del index\n",
     "\n",
     "index = VectorStoreIndex.from_documents(\n",
-    "    [Document(text=\"The sky is purple in Portland, Maine\")], uri=\"/tmp/new_dataset\"\n",
+    "    [Document(text=\"The sky is purple in Portland, Maine\")],\n",
+    "    uri=\"/tmp/new_dataset\",\n",
     ")"
    ]
   },
diff --git a/docs/examples/vector_stores/MetalIndexDemo.ipynb b/docs/examples/vector_stores/MetalIndexDemo.ipynb
index 41de4181cfb4feba76ab94e17f43da4d6657f719..90fb5d53e6fe69fc65b5e7d91b58e1145bbf5c59 100644
--- a/docs/examples/vector_stores/MetalIndexDemo.ipynb
+++ b/docs/examples/vector_stores/MetalIndexDemo.ipynb
@@ -84,7 +84,9 @@
     "    index_id=index_id,\n",
     ")\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
-    "index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/vector_stores/MilvusIndexDemo.ipynb b/docs/examples/vector_stores/MilvusIndexDemo.ipynb
index 5f2530af4761e4430d7cbea6940a34ab2cd92e56..24f6ad32d707e75eb45f471b0a744d051ec6ed69 100644
--- a/docs/examples/vector_stores/MilvusIndexDemo.ipynb
+++ b/docs/examples/vector_stores/MilvusIndexDemo.ipynb
@@ -125,7 +125,9 @@
     "\n",
     "vector_store = MilvusVectorStore(dim=1536, overwrite=True)\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
-    "index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
@@ -213,7 +215,8 @@
     "vector_store = MilvusVectorStore(dim=1536, overwrite=True)\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
     "index = VectorStoreIndex.from_documents(\n",
-    "    [Document(text=\"The number that is being searched for is ten.\")], storage_context\n",
+    "    [Document(text=\"The number that is being searched for is ten.\")],\n",
+    "    storage_context,\n",
     ")\n",
     "query_engine = index.as_query_engine()\n",
     "res = query_engine.query(\"Who is the author?\")\n",
@@ -248,7 +251,9 @@
     "\n",
     "vector_store = MilvusVectorStore(overwrite=False)\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
-    "index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)\n",
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, storage_context=storage_context\n",
+    ")\n",
     "query_engine = index.as_query_engine()\n",
     "res = query_engine.query(\"What is the number?\")\n",
     "print(\"Res:\", res)"
diff --git a/docs/examples/vector_stores/MongoDBAtlasVectorSearch.ipynb b/docs/examples/vector_stores/MongoDBAtlasVectorSearch.ipynb
index 75e8d039fda2c85f03e0146fb0e2d8365cc75c5d..b082b1bb7e3142f8bf2e3d0a2f46aa56db977b3f 100644
--- a/docs/examples/vector_stores/MongoDBAtlasVectorSearch.ipynb
+++ b/docs/examples/vector_stores/MongoDBAtlasVectorSearch.ipynb
@@ -29,12 +29,18 @@
    "outputs": [],
    "source": [
     "# mongo_uri = os.environ[\"MONGO_URI\"]\n",
-    "mongo_uri = \"mongodb+srv://<username>:<password>@<host>?retryWrites=true&w=majority\"\n",
+    "mongo_uri = (\n",
+    "    \"mongodb+srv://<username>:<password>@<host>?retryWrites=true&w=majority\"\n",
+    ")\n",
     "mongodb_client = pymongo.MongoClient(mongo_uri)\n",
     "store = MongoDBAtlasVectorSearch(mongodb_client)\n",
     "storage_context = StorageContext.from_defaults(vector_store=store)\n",
-    "uber_docs = SimpleDirectoryReader(input_files=[\"../data/10k/uber_2021.pdf\"]).load_data()\n",
-    "index = VectorStoreIndex.from_documents(uber_docs, storage_context=storage_context)"
+    "uber_docs = SimpleDirectoryReader(\n",
+    "    input_files=[\"../data/10k/uber_2021.pdf\"]\n",
+    ").load_data()\n",
+    "index = VectorStoreIndex.from_documents(\n",
+    "    uber_docs, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
@@ -83,7 +89,9 @@
     "\n",
     "print(store._collection.count_documents({}))\n",
     "# Get a ref_doc_id\n",
-    "typed_response = response if isinstance(response, Response) else response.get_response()\n",
+    "typed_response = (\n",
+    "    response if isinstance(response, Response) else response.get_response()\n",
+    ")\n",
     "ref_doc_id = typed_response.source_nodes[0].node.ref_doc_id\n",
     "print(store._collection.count_documents({\"metadata.ref_doc_id\": ref_doc_id}))\n",
     "# Test store delete\n",
diff --git a/docs/examples/vector_stores/MyScaleIndexDemo.ipynb b/docs/examples/vector_stores/MyScaleIndexDemo.ipynb
index cc9055f30949c015868fad08d0368484bc4cc9e5..1be21c27519ba8bcac6935a946debb0c92a2a272 100644
--- a/docs/examples/vector_stores/MyScaleIndexDemo.ipynb
+++ b/docs/examples/vector_stores/MyScaleIndexDemo.ipynb
@@ -140,7 +140,9 @@
     "    document.metadata = {\"user_id\": \"123\", \"favorite_color\": \"blue\"}\n",
     "vector_store = MyScaleVectorStore(myscale_client=client)\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
-    "index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/vector_stores/Neo4jVectorDemo.ipynb b/docs/examples/vector_stores/Neo4jVectorDemo.ipynb
index 16689a1551c6899c6bfb41003753ca7b8377ff71..2b9e72091ecc72f1b83dedb641ca0636c4ae78fa 100644
--- a/docs/examples/vector_stores/Neo4jVectorDemo.ipynb
+++ b/docs/examples/vector_stores/Neo4jVectorDemo.ipynb
@@ -87,7 +87,9 @@
     "from llama_index.storage.storage_context import StorageContext\n",
     "\n",
     "storage_context = StorageContext.from_defaults(vector_store=neo4j_vector)\n",
-    "index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/vector_stores/OpensearchDemo.ipynb b/docs/examples/vector_stores/OpensearchDemo.ipynb
index ceb9bf1fdedc26a2395d297a5d2627fe6775b114..afcd08b63ba727c335ff6d962f34e0ae2d0dda1a 100644
--- a/docs/examples/vector_stores/OpensearchDemo.ipynb
+++ b/docs/examples/vector_stores/OpensearchDemo.ipynb
@@ -42,7 +42,10 @@
    "source": [
     "from os import getenv\n",
     "from llama_index import SimpleDirectoryReader\n",
-    "from llama_index.vector_stores import OpensearchVectorStore, OpensearchVectorClient\n",
+    "from llama_index.vector_stores import (\n",
+    "    OpensearchVectorStore,\n",
+    "    OpensearchVectorClient,\n",
+    ")\n",
     "from llama_index import VectorStoreIndex, StorageContext\n",
     "\n",
     "# http endpoint for your cluster (opensearch required for vector index usage)\n",
@@ -179,7 +182,9 @@
     "footnote_query_engine = index.as_query_engine(\n",
     "    filters=MetadataFilters(\n",
     "        filters=[\n",
-    "            ExactMatchFilter(key=\"term\", value='{\"metadata.is_footnote\": \"true\"}'),\n",
+    "            ExactMatchFilter(\n",
+    "                key=\"term\", value='{\"metadata.is_footnote\": \"true\"}'\n",
+    "            ),\n",
     "            ExactMatchFilter(\n",
     "                key=\"query_string\",\n",
     "                value='{\"query\": \"content: space AND content: lisp\"}',\n",
@@ -188,7 +193,9 @@
     "    )\n",
     ")\n",
     "\n",
-    "res = footnote_query_engine.query(\"What did the author about space aliens and lisp?\")\n",
+    "res = footnote_query_engine.query(\n",
+    "    \"What did the author about space aliens and lisp?\"\n",
+    ")\n",
     "res.response"
    ]
   },
diff --git a/docs/examples/vector_stores/PineconeIndexDemo-0.6.0.ipynb b/docs/examples/vector_stores/PineconeIndexDemo-0.6.0.ipynb
index 7a40fd3e908661bfb8e606036febbd00ded961b7..d7fea429434b0cf9168251b2d081ec6ecd60afe4 100644
--- a/docs/examples/vector_stores/PineconeIndexDemo-0.6.0.ipynb
+++ b/docs/examples/vector_stores/PineconeIndexDemo-0.6.0.ipynb
@@ -316,7 +316,9 @@
    "outputs": [],
    "source": [
     "from llama_index.indices.composability import ComposableGraph\n",
-    "from llama_index.indices.keyword_table.simple_base import SimpleKeywordTableIndex"
+    "from llama_index.indices.keyword_table.simple_base import (\n",
+    "    SimpleKeywordTableIndex,\n",
+    ")"
    ]
   },
   {
@@ -352,8 +354,12 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.indices.query.query_transform.base import DecomposeQueryTransform\n",
-    "from llama_index.query_engine.transform_query_engine import TransformQueryEngine\n",
+    "from llama_index.indices.query.query_transform.base import (\n",
+    "    DecomposeQueryTransform,\n",
+    ")\n",
+    "from llama_index.query_engine.transform_query_engine import (\n",
+    "    TransformQueryEngine,\n",
+    ")\n",
     "\n",
     "decompose_transform = DecomposeQueryTransform(verbose=True)\n",
     "\n",
@@ -566,7 +572,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.indices.postprocessor.node import AutoPrevNextNodePostprocessor\n",
+    "from llama_index.indices.postprocessor.node import (\n",
+    "    AutoPrevNextNodePostprocessor,\n",
+    ")\n",
     "\n",
     "# define postprocessor\n",
     "node_postprocessor = AutoPrevNextNodePostprocessor(\n",
diff --git a/docs/examples/vector_stores/PineconeIndexDemo-Hybrid.ipynb b/docs/examples/vector_stores/PineconeIndexDemo-Hybrid.ipynb
index 7eb9956d20070aa8ab87dcaa17f4cd4b0ba74e42..b4ca14e631378806cb8cdf50534ee110fb320888 100644
--- a/docs/examples/vector_stores/PineconeIndexDemo-Hybrid.ipynb
+++ b/docs/examples/vector_stores/PineconeIndexDemo-Hybrid.ipynb
@@ -71,7 +71,9 @@
    "source": [
     "# dimensions are for text-embedding-ada-002\n",
     "# NOTE: needs dotproduct for hybrid search\n",
-    "pinecone.create_index(\"quickstart\", dimension=1536, metric=\"dotproduct\", pod_type=\"p1\")"
+    "pinecone.create_index(\n",
+    "    \"quickstart\", dimension=1536, metric=\"dotproduct\", pod_type=\"p1\"\n",
+    ")"
    ]
   },
   {
@@ -132,7 +134,9 @@
     "    add_sparse_vector=True,\n",
     ")\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
-    "index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/vector_stores/PineconeIndexDemo.ipynb b/docs/examples/vector_stores/PineconeIndexDemo.ipynb
index c23f7c72af0577a7cc4ee5bdb81e784a89ab65b8..1f0d4b08a5af6fb4091096f6cd6fd4a7ad98506c 100644
--- a/docs/examples/vector_stores/PineconeIndexDemo.ipynb
+++ b/docs/examples/vector_stores/PineconeIndexDemo.ipynb
@@ -71,7 +71,9 @@
    "outputs": [],
    "source": [
     "# dimensions are for text-embedding-ada-002\n",
-    "pinecone.create_index(\"quickstart\", dimension=1536, metric=\"euclidean\", pod_type=\"p1\")"
+    "pinecone.create_index(\n",
+    "    \"quickstart\", dimension=1536, metric=\"euclidean\", pod_type=\"p1\"\n",
+    ")"
    ]
   },
   {
@@ -154,7 +156,9 @@
     "\n",
     "vector_store = PineconeVectorStore(pinecone_index=pinecone_index)\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
-    "index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/vector_stores/QdrantIndexDemo.ipynb b/docs/examples/vector_stores/QdrantIndexDemo.ipynb
index 4a2e596f67fc85c1c66e7b08a5a7636d322867eb..e79b06c0bbc8435e8d1af982de8224d27a63714a 100644
--- a/docs/examples/vector_stores/QdrantIndexDemo.ipynb
+++ b/docs/examples/vector_stores/QdrantIndexDemo.ipynb
@@ -184,7 +184,9 @@
    "source": [
     "# set Logging to DEBUG for more detailed outputs\n",
     "query_engine = index.as_query_engine()\n",
-    "response = query_engine.query(\"What did the author do after his time at Viaweb?\")"
+    "response = query_engine.query(\n",
+    "    \"What did the author do after his time at Viaweb?\"\n",
+    ")"
    ]
   },
   {
@@ -246,7 +248,7 @@
     "    # on 'memory' location and requires\n",
     "    # Qdrant to be deployed somewhere.\n",
     "    url=\"http://localhost:6334\",\n",
-    "    prefer_grpc=True\n",
+    "    prefer_grpc=True,\n",
     "    # set API KEY for Qdrant Cloud\n",
     "    # api_key=\"<qdrant-api-key>\",\n",
     ")"
@@ -323,7 +325,9 @@
    "source": [
     "# set Logging to DEBUG for more detailed outputs\n",
     "query_engine = index.as_query_engine(use_async=True)\n",
-    "response = await query_engine.aquery(\"What did the author do after his time at Viaweb?\")"
+    "response = await query_engine.aquery(\n",
+    "    \"What did the author do after his time at Viaweb?\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/vector_stores/RedisIndexDemo.ipynb b/docs/examples/vector_stores/RedisIndexDemo.ipynb
index 91b73a3bf56ea143315752fe88b1600f21329ec8..fc80edd59d71863cd8f1135e4f5b6e68dac4b8d1 100644
--- a/docs/examples/vector_stores/RedisIndexDemo.ipynb
+++ b/docs/examples/vector_stores/RedisIndexDemo.ipynb
@@ -114,7 +114,12 @@
    "source": [
     "# load documents\n",
     "documents = SimpleDirectoryReader(\"../data/paul_graham\").load_data()\n",
-    "print(\"Document ID:\", documents[0].doc_id, \"Document Hash:\", documents[0].doc_hash)"
+    "print(\n",
+    "    \"Document ID:\",\n",
+    "    documents[0].doc_id,\n",
+    "    \"Document Hash:\",\n",
+    "    documents[0].doc_hash,\n",
+    ")"
    ]
   },
   {
@@ -227,7 +232,9 @@
     "    overwrite=True,\n",
     ")\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
-    "index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
@@ -400,7 +407,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vector_store.persist(persist_path=\"\")  # persist_path means nothing for RedisVectorStore"
+    "vector_store.persist(\n",
+    "    persist_path=\"\"\n",
+    ")  # persist_path means nothing for RedisVectorStore"
    ]
   },
   {
@@ -668,7 +677,9 @@
     "    document.metadata = {\"user_id\": \"12345\", \"favorite_color\": \"blue\"}\n",
     "\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
-    "index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)\n",
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, storage_context=storage_context\n",
+    ")\n",
     "\n",
     "# load documents\n",
     "print(\n",
diff --git a/docs/examples/vector_stores/RocksetIndexDemo.ipynb b/docs/examples/vector_stores/RocksetIndexDemo.ipynb
index b0412bcce90eb2af17f4c20a703da74ec32aac5d..0b15b6e89e103d46e0f1dbc6c48131d15a5c9f0e 100644
--- a/docs/examples/vector_stores/RocksetIndexDemo.ipynb
+++ b/docs/examples/vector_stores/RocksetIndexDemo.ipynb
@@ -56,7 +56,9 @@
    "source": [
     "from llama_index import SimpleDirectoryReader\n",
     "\n",
-    "docs = SimpleDirectoryReader(input_files=[\"{path to}/consitution.txt\"]).load_data()"
+    "docs = SimpleDirectoryReader(\n",
+    "    input_files=[\"{path to}/consitution.txt\"]\n",
+    ").load_data()"
    ]
   },
   {
@@ -206,7 +208,9 @@
    "source": [
     "from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters\n",
     "\n",
-    "filters = MetadataFilters(filters=[ExactMatchFilter(key=\"type\", value=\"fruit\")])"
+    "filters = MetadataFilters(\n",
+    "    filters=[ExactMatchFilter(key=\"type\", value=\"fruit\")]\n",
+    ")"
    ]
   },
   {
@@ -269,7 +273,7 @@
     "\n",
     "vector_store = RocksetVectorStore.with_new_collection(\n",
     "    collection=\"llamaindex_demo\",  # name of new collection\n",
-    "    dimensions=1536  # specifies length of vectors in ingest tranformation (optional)\n",
+    "    dimensions=1536,  # specifies length of vectors in ingest tranformation (optional)\n",
     "    # other RocksetVectorStore args\n",
     ")\n",
     "\n",
diff --git a/docs/examples/vector_stores/SimpleIndexDemo.ipynb b/docs/examples/vector_stores/SimpleIndexDemo.ipynb
index 1a17377ad7fb898f7017f642a379748dfd6198d3..c84d3c273bb5882952e889463e5c523ebe867dc5 100644
--- a/docs/examples/vector_stores/SimpleIndexDemo.ipynb
+++ b/docs/examples/vector_stores/SimpleIndexDemo.ipynb
@@ -460,7 +460,9 @@
    "source": [
     "from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters\n",
     "\n",
-    "filters = MetadataFilters(filters=[ExactMatchFilter(key=\"tag\", value=\"target\")])\n",
+    "filters = MetadataFilters(\n",
+    "    filters=[ExactMatchFilter(key=\"tag\", value=\"target\")]\n",
+    ")\n",
     "\n",
     "retriever = index.as_retriever(\n",
     "    similarity_top_k=20,\n",
diff --git a/docs/examples/vector_stores/SimpleIndexDemoLlama-Local.ipynb b/docs/examples/vector_stores/SimpleIndexDemoLlama-Local.ipynb
index a544834e2192a59166bb4161e9cfb6109556ba8f..0113c673975451fb9da525b17c802bf6b9b77a9c 100644
--- a/docs/examples/vector_stores/SimpleIndexDemoLlama-Local.ipynb
+++ b/docs/examples/vector_stores/SimpleIndexDemoLlama-Local.ipynb
@@ -246,7 +246,11 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index import VectorStoreIndex, ServiceContext, set_global_service_context\n",
+    "from llama_index import (\n",
+    "    VectorStoreIndex,\n",
+    "    ServiceContext,\n",
+    "    set_global_service_context,\n",
+    ")\n",
     "\n",
     "service_context = ServiceContext.from_defaults(\n",
     "    llm=llm, embed_model=\"local:BAAI/bge-small-en\"\n",
diff --git a/docs/examples/vector_stores/SimpleIndexDemoLlama2.ipynb b/docs/examples/vector_stores/SimpleIndexDemoLlama2.ipynb
index 51be90954913dabc5a56cda95e345dd985fb0e5a..5cb1b76e46539862c150f548583f348fd4b15b17 100644
--- a/docs/examples/vector_stores/SimpleIndexDemoLlama2.ipynb
+++ b/docs/examples/vector_stores/SimpleIndexDemoLlama2.ipynb
@@ -97,7 +97,10 @@
    "source": [
     "from llama_index.llms import Replicate\n",
     "from llama_index import ServiceContext, set_global_service_context\n",
-    "from llama_index.llms.llama_utils import messages_to_prompt, completion_to_prompt\n",
+    "from llama_index.llms.llama_utils import (\n",
+    "    messages_to_prompt,\n",
+    "    completion_to_prompt,\n",
+    ")\n",
     "\n",
     "# The replicate endpoint\n",
     "LLAMA_13B_V2_CHAT = \"a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5\"\n",
diff --git a/docs/examples/vector_stores/SimpleIndexDemoMMR.ipynb b/docs/examples/vector_stores/SimpleIndexDemoMMR.ipynb
index f2473cbfa4ae1bed7c0e00a94bde8a8b5a2c896d..d6cb7a737e58f4368ad8e4be4b299498bb274c24 100644
--- a/docs/examples/vector_stores/SimpleIndexDemoMMR.ipynb
+++ b/docs/examples/vector_stores/SimpleIndexDemoMMR.ipynb
@@ -77,7 +77,9 @@
     "    vector_store_query_mode=\"mmr\", vector_store_kwargs={\"mmr_threshold\": 0.2}\n",
     ")\n",
     "\n",
-    "response = query_engine_with_threshold.query(\"What did the author do growing up?\")\n",
+    "response = query_engine_with_threshold.query(\n",
+    "    \"What did the author do growing up?\"\n",
+    ")\n",
     "print(response)"
    ]
   },
@@ -107,7 +109,9 @@
    "source": [
     "index1 = VectorStoreIndex.from_documents(documents)\n",
     "query_engine_no_mrr = index1.as_query_engine()\n",
-    "response_no_mmr = query_engine_no_mrr.query(\"What did the author do growing up?\")\n",
+    "response_no_mmr = query_engine_no_mrr.query(\n",
+    "    \"What did the author do growing up?\"\n",
+    ")\n",
     "\n",
     "index2 = VectorStoreIndex.from_documents(documents)\n",
     "query_engine_with_high_threshold = index2.as_query_engine(\n",
@@ -125,7 +129,10 @@
     "    \"What did the author do growing up?\"\n",
     ")\n",
     "\n",
-    "print(\"Scores without MMR \", [node.score for node in response_no_mmr.source_nodes])\n",
+    "print(\n",
+    "    \"Scores without MMR \",\n",
+    "    [node.score for node in response_no_mmr.source_nodes],\n",
+    ")\n",
     "print(\n",
     "    \"Scores with MMR and a threshold of 0.8 \",\n",
     "    [node.score for node in response_high_threshold.source_nodes],\n",
@@ -176,7 +183,9 @@
    "source": [
     "# llama_index/docs/examples/data/paul_graham\n",
     "documents = SimpleDirectoryReader(\"../data/paul_graham/\").load_data()\n",
-    "index = VectorStoreIndex.from_documents(documents, service_context=service_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, service_context=service_context\n",
+    ")"
    ]
   },
   {
@@ -190,7 +199,9 @@
     "    similarity_top_k=3,\n",
     "    vector_store_kwargs={\"mmr_threshold\": 0.1},\n",
     ")\n",
-    "nodes = retriever.retrieve(\"What did the author do during his time in Y Combinator?\")"
+    "nodes = retriever.retrieve(\n",
+    "    \"What did the author do during his time in Y Combinator?\"\n",
+    ")"
    ]
   },
   {
@@ -253,7 +264,9 @@
     "    similarity_top_k=3,\n",
     "    vector_store_kwargs={\"mmr_threshold\": 0.5},\n",
     ")\n",
-    "nodes = retriever.retrieve(\"What did the author do during his time in Y Combinator?\")"
+    "nodes = retriever.retrieve(\n",
+    "    \"What did the author do during his time in Y Combinator?\"\n",
+    ")"
    ]
   },
   {
@@ -320,7 +333,9 @@
     "    similarity_top_k=3,\n",
     "    vector_store_kwargs={\"mmr_threshold\": 0.8},\n",
     ")\n",
-    "nodes = retriever.retrieve(\"What did the author do during his time in Y Combinator?\")"
+    "nodes = retriever.retrieve(\n",
+    "    \"What did the author do during his time in Y Combinator?\"\n",
+    ")"
    ]
   },
   {
@@ -383,7 +398,9 @@
     "    similarity_top_k=3,\n",
     "    vector_store_kwargs={\"mmr_threshold\": 1.0},\n",
     ")\n",
-    "nodes = retriever.retrieve(\"What did the author do during his time in Y Combinator?\")"
+    "nodes = retriever.retrieve(\n",
+    "    \"What did the author do during his time in Y Combinator?\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/vector_stores/SimpleIndexOnS3.ipynb b/docs/examples/vector_stores/SimpleIndexOnS3.ipynb
index fea8350d3351b7007e83997ccc71360a9197fde6..9aa093080a2c5eeb427dc861388f7e3c6be01e8f 100644
--- a/docs/examples/vector_stores/SimpleIndexOnS3.ipynb
+++ b/docs/examples/vector_stores/SimpleIndexOnS3.ipynb
@@ -176,7 +176,9 @@
    "outputs": [],
    "source": [
     "# load index from s3\n",
-    "sc = StorageContext.from_defaults(persist_dir=\"llama-index/storage_demo\", fs=s3)"
+    "sc = StorageContext.from_defaults(\n",
+    "    persist_dir=\"llama-index/storage_demo\", fs=s3\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/vector_stores/SupabaseVectorIndexDemo.ipynb b/docs/examples/vector_stores/SupabaseVectorIndexDemo.ipynb
index f2b44e149b42501fd7ec39e024f4c718e8871ff3..3c987c753bffaf0a1220a4535b563711712685f3 100644
--- a/docs/examples/vector_stores/SupabaseVectorIndexDemo.ipynb
+++ b/docs/examples/vector_stores/SupabaseVectorIndexDemo.ipynb
@@ -79,7 +79,12 @@
    ],
    "source": [
     "documents = SimpleDirectoryReader(\"../data/paul_graham/\").load_data()\n",
-    "print(\"Document ID:\", documents[0].doc_id, \"Document Hash:\", documents[0].doc_hash)"
+    "print(\n",
+    "    \"Document ID:\",\n",
+    "    documents[0].doc_id,\n",
+    "    \"Document Hash:\",\n",
+    "    documents[0].doc_hash,\n",
+    ")"
    ]
   },
   {
@@ -103,11 +108,15 @@
    "outputs": [],
    "source": [
     "vector_store = SupabaseVectorStore(\n",
-    "    postgres_connection_string=\"postgresql://<user>:<password>@<host>:<port>/<db_name>\",\n",
+    "    postgres_connection_string=(\n",
+    "        \"postgresql://<user>:<password>@<host>:<port>/<db_name>\"\n",
+    "    ),\n",
     "    collection_name=\"base_demo\",\n",
     ")\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
-    "index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
@@ -239,7 +248,9 @@
    "outputs": [],
    "source": [
     "vector_store = SupabaseVectorStore(\n",
-    "    postgres_connection_string=\"postgresql://<user>:<password>@<host>:<port>/<db_name>\",\n",
+    "    postgres_connection_string=(\n",
+    "        \"postgresql://<user>:<password>@<host>:<port>/<db_name>\"\n",
+    "    ),\n",
     "    collection_name=\"metadata_filters_demo\",\n",
     ")\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
@@ -264,7 +275,9 @@
    "source": [
     "from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters\n",
     "\n",
-    "filters = MetadataFilters(filters=[ExactMatchFilter(key=\"theme\", value=\"Mafia\")])"
+    "filters = MetadataFilters(\n",
+    "    filters=[ExactMatchFilter(key=\"theme\", value=\"Mafia\")]\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/vector_stores/TairIndexDemo.ipynb b/docs/examples/vector_stores/TairIndexDemo.ipynb
index 500c3e46d1b8725e35734d198642155f352a593b..540826314861d298d5ea655694c84b889a1d8ac0 100644
--- a/docs/examples/vector_stores/TairIndexDemo.ipynb
+++ b/docs/examples/vector_stores/TairIndexDemo.ipynb
@@ -86,7 +86,12 @@
    "source": [
     "# load documents\n",
     "documents = SimpleDirectoryReader(\"../data/paul_graham\").load_data()\n",
-    "print(\"Document ID:\", documents[0].doc_id, \"Document Hash:\", documents[0].doc_hash)"
+    "print(\n",
+    "    \"Document ID:\",\n",
+    "    documents[0].doc_id,\n",
+    "    \"Document Hash:\",\n",
+    "    documents[0].doc_hash,\n",
+    ")"
    ]
   },
   {
@@ -108,15 +113,15 @@
    "source": [
     "from llama_index.storage.storage_context import StorageContext\n",
     "\n",
-    "tair_url = (\n",
-    "    \"redis://{username}:{password}@r-bp****************.redis.rds.aliyuncs.com:{port}\"\n",
-    ")\n",
+    "tair_url = \"redis://{username}:{password}@r-bp****************.redis.rds.aliyuncs.com:{port}\"\n",
     "\n",
     "vector_store = TairVectorStore(\n",
     "    tair_url=tair_url, index_name=\"pg_essays\", overwrite=True\n",
     ")\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
-    "index = GPTVectorStoreIndex.from_documents(documents, storage_context=storage_context)"
+    "index = GPTVectorStoreIndex.from_documents(\n",
+    "    documents, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/vector_stores/Timescalevector.ipynb b/docs/examples/vector_stores/Timescalevector.ipynb
index c26fe4b073ebb5b109919a5d341278be635a7f74..6d35ca855d384fb66cf3bf98e5be6a23cd4e6330 100644
--- a/docs/examples/vector_stores/Timescalevector.ipynb
+++ b/docs/examples/vector_stores/Timescalevector.ipynb
@@ -192,7 +192,9 @@
     "\n",
     "# Create a new VectorStoreIndex using the TimescaleVectorStore\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
-    "index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
@@ -592,9 +594,13 @@
     "    month = month_dict[components[1]]\n",
     "    year = components[4]\n",
     "    time = components[3]\n",
-    "    timezone_offset_minutes = int(components[5])  # Convert the offset to minutes\n",
+    "    timezone_offset_minutes = int(\n",
+    "        components[5]\n",
+    "    )  # Convert the offset to minutes\n",
     "    timezone_hours = timezone_offset_minutes // 60  # Calculate the hours\n",
-    "    timezone_minutes = timezone_offset_minutes % 60  # Calculate the remaining minutes\n",
+    "    timezone_minutes = (\n",
+    "        timezone_offset_minutes % 60\n",
+    "    )  # Calculate the remaining minutes\n",
     "    # Create a formatted string for the timestamptz in PostgreSQL format\n",
     "    timestamp_tz_str = (\n",
     "        f\"{year}-{month}-{day} {time}+{timezone_hours:02}{timezone_minutes:02}\"\n",
@@ -813,8 +819,12 @@
    "outputs": [],
    "source": [
     "# Time filter variables for query\n",
-    "start_dt = datetime(2023, 8, 1, 22, 10, 35)  # Start date = 1 August 2023, 22:10:35\n",
-    "end_dt = datetime(2023, 8, 30, 22, 10, 35)  # End date = 30 August 2023, 22:10:35\n",
+    "start_dt = datetime(\n",
+    "    2023, 8, 1, 22, 10, 35\n",
+    ")  # Start date = 1 August 2023, 22:10:35\n",
+    "end_dt = datetime(\n",
+    "    2023, 8, 30, 22, 10, 35\n",
+    ")  # End date = 30 August 2023, 22:10:35\n",
     "td = timedelta(days=7)  # Time delta = 7 days"
    ]
   },
@@ -1077,7 +1087,9 @@
     ")\n",
     "\n",
     "# return most similar vectors to query from end date and a time delta earlier\n",
-    "query_result = ts_vector_store.query(vector_store_query, end_date=end_dt, time_delta=td)\n",
+    "query_result = ts_vector_store.query(\n",
+    "    vector_store_query, end_date=end_dt, time_delta=td\n",
+    ")\n",
     "\n",
     "for node in query_result.nodes:\n",
     "    print(\"-\" * 80)\n",
@@ -1171,7 +1183,8 @@
     "\n",
     "# query_str = \"What's new with TimescaleDB? List 3 new features\"\n",
     "query_str = (\n",
-    "    \"What's new with TimescaleDB functions? When were these changes made and by whom?\"\n",
+    "    \"What's new with TimescaleDB functions? When were these changes made and\"\n",
+    "    \" by whom?\"\n",
     ")\n",
     "response = query_engine.query(query_str)\n",
     "print(str(response))"
diff --git a/docs/examples/vector_stores/TypesenseDemo.ipynb b/docs/examples/vector_stores/TypesenseDemo.ipynb
index 40c4f8268ba86108d12a330d780d4fb0b1e37cc1..eef092313528b39815d7fa96c8d8d9f51bfbf7ca 100644
--- a/docs/examples/vector_stores/TypesenseDemo.ipynb
+++ b/docs/examples/vector_stores/TypesenseDemo.ipynb
@@ -66,9 +66,13 @@
     "    }\n",
     ")\n",
     "typesense_vector_store = TypesenseVectorStore(typesense_client)\n",
-    "storage_context = StorageContext.from_defaults(vector_store=typesense_vector_store)\n",
+    "storage_context = StorageContext.from_defaults(\n",
+    "    vector_store=typesense_vector_store\n",
+    ")\n",
     "\n",
-    "index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/vector_stores/WeaviateIndexDemo-Hybrid.ipynb b/docs/examples/vector_stores/WeaviateIndexDemo-Hybrid.ipynb
index 0f64be908966fdca5997147b10cf601a84d95133..1158921baa4a1691bc566ba74e34be2831a3bab0 100644
--- a/docs/examples/vector_stores/WeaviateIndexDemo-Hybrid.ipynb
+++ b/docs/examples/vector_stores/WeaviateIndexDemo-Hybrid.ipynb
@@ -119,7 +119,9 @@
     "\n",
     "vector_store = WeaviateVectorStore(weaviate_client=client)\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
-    "index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)\n",
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, storage_context=storage_context\n",
+    ")\n",
     "\n",
     "# NOTE: you may also choose to define a index_name manually.\n",
     "# index_name = \"test_prefix\"\n",
diff --git a/docs/examples/vector_stores/WeaviateIndexDemo.ipynb b/docs/examples/vector_stores/WeaviateIndexDemo.ipynb
index 53c161e7c9f2d66f7d168a0e3cc66209680a9238..edfc4c709413b525d6f84fff963f7836d8e8fa88 100644
--- a/docs/examples/vector_stores/WeaviateIndexDemo.ipynb
+++ b/docs/examples/vector_stores/WeaviateIndexDemo.ipynb
@@ -130,9 +130,13 @@
     "from llama_index.storage.storage_context import StorageContext\n",
     "\n",
     "# If you want to load the index later, be sure to give it a name!\n",
-    "vector_store = WeaviateVectorStore(weaviate_client=client, index_name=\"LlamaIndex\")\n",
+    "vector_store = WeaviateVectorStore(\n",
+    "    weaviate_client=client, index_name=\"LlamaIndex\"\n",
+    ")\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
-    "index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)\n",
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, storage_context=storage_context\n",
+    ")\n",
     "\n",
     "# NOTE: you may also choose to define a index_name manually.\n",
     "# index_name = \"test_prefix\"\n",
@@ -222,7 +226,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vector_store = WeaviateVectorStore(weaviate_client=client, index_name=\"LlamaIndex\")\n",
+    "vector_store = WeaviateVectorStore(\n",
+    "    weaviate_client=client, index_name=\"LlamaIndex\"\n",
+    ")\n",
     "\n",
     "loaded_index = VectorStoreIndex.from_vector_store(vector_store)"
    ]
@@ -326,7 +332,9 @@
    "source": [
     "from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters\n",
     "\n",
-    "filters = MetadataFilters(filters=[ExactMatchFilter(key=\"filename\", value=\"README.md\")])\n",
+    "filters = MetadataFilters(\n",
+    "    filters=[ExactMatchFilter(key=\"filename\", value=\"README.md\")]\n",
+    ")\n",
     "query_engine = loaded_index.as_query_engine(filters=filters)\n",
     "response = query_engine.query(\"What is the name of the file?\")\n",
     "display(Markdown(f\"<b>{response}</b>\"))"
diff --git a/docs/examples/vector_stores/ZepIndexDemo.ipynb b/docs/examples/vector_stores/ZepIndexDemo.ipynb
index a5c017176f209bbf7aaaab9a81c5dff151c7d8ac..02efd012d0d279460e8f810c37a4424de7a8b45b 100644
--- a/docs/examples/vector_stores/ZepIndexDemo.ipynb
+++ b/docs/examples/vector_stores/ZepIndexDemo.ipynb
@@ -134,12 +134,16 @@
     "collection_name = f\"graham{uuid4().hex}\"\n",
     "\n",
     "vector_store = ZepVectorStore(\n",
-    "    api_url=zep_api_url, collection_name=collection_name, embedding_dimensions=1536\n",
+    "    api_url=zep_api_url,\n",
+    "    collection_name=collection_name,\n",
+    "    embedding_dimensions=1536,\n",
     ")\n",
     "\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
     "\n",
-    "index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)"
+    "index = VectorStoreIndex.from_documents(\n",
+    "    documents, storage_context=storage_context\n",
+    ")"
    ]
   },
   {
@@ -236,7 +240,9 @@
     "collection_name = f\"movies{uuid4().hex}\"\n",
     "\n",
     "vector_store = ZepVectorStore(\n",
-    "    api_url=zep_api_url, collection_name=collection_name, embedding_dimensions=1536\n",
+    "    api_url=zep_api_url,\n",
+    "    collection_name=collection_name,\n",
+    "    embedding_dimensions=1536,\n",
     ")\n",
     "\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
@@ -252,7 +258,9 @@
    "source": [
     "from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters\n",
     "\n",
-    "filters = MetadataFilters(filters=[ExactMatchFilter(key=\"theme\", value=\"Mafia\")])"
+    "filters = MetadataFilters(\n",
+    "    filters=[ExactMatchFilter(key=\"theme\", value=\"Mafia\")]\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/vector_stores/chroma_auto_retriever.ipynb b/docs/examples/vector_stores/chroma_auto_retriever.ipynb
index ffe1a7ba2d9eec210e59c5e996e2061a22416619..f2004d63d944ba19605e4157fbc9c3a8a3da6acf 100644
--- a/docs/examples/vector_stores/chroma_auto_retriever.ipynb
+++ b/docs/examples/vector_stores/chroma_auto_retriever.ipynb
@@ -121,35 +121,56 @@
     "\n",
     "nodes = [\n",
     "    TextNode(\n",
-    "        text=\"Michael Jordan is a retired professional basketball player, widely regarded as one of the greatest basketball players of all time.\",\n",
+    "        text=(\n",
+    "            \"Michael Jordan is a retired professional basketball player,\"\n",
+    "            \" widely regarded as one of the greatest basketball players of all\"\n",
+    "            \" time.\"\n",
+    "        ),\n",
     "        metadata={\n",
     "            \"category\": \"Sports\",\n",
     "            \"country\": \"United States\",\n",
     "        },\n",
     "    ),\n",
     "    TextNode(\n",
-    "        text=\"Angelina Jolie is an American actress, filmmaker, and humanitarian. She has received numerous awards for her acting and is known for her philanthropic work.\",\n",
+    "        text=(\n",
+    "            \"Angelina Jolie is an American actress, filmmaker, and\"\n",
+    "            \" humanitarian. She has received numerous awards for her acting\"\n",
+    "            \" and is known for her philanthropic work.\"\n",
+    "        ),\n",
     "        metadata={\n",
     "            \"category\": \"Entertainment\",\n",
     "            \"country\": \"United States\",\n",
     "        },\n",
     "    ),\n",
     "    TextNode(\n",
-    "        text=\"Elon Musk is a business magnate, industrial designer, and engineer. He is the founder, CEO, and lead designer of SpaceX, Tesla, Inc., Neuralink, and The Boring Company.\",\n",
+    "        text=(\n",
+    "            \"Elon Musk is a business magnate, industrial designer, and\"\n",
+    "            \" engineer. He is the founder, CEO, and lead designer of SpaceX,\"\n",
+    "            \" Tesla, Inc., Neuralink, and The Boring Company.\"\n",
+    "        ),\n",
     "        metadata={\n",
     "            \"category\": \"Business\",\n",
     "            \"country\": \"United States\",\n",
     "        },\n",
     "    ),\n",
     "    TextNode(\n",
-    "        text=\"Rihanna is a Barbadian singer, actress, and businesswoman. She has achieved significant success in the music industry and is known for her versatile musical style.\",\n",
+    "        text=(\n",
+    "            \"Rihanna is a Barbadian singer, actress, and businesswoman. She\"\n",
+    "            \" has achieved significant success in the music industry and is\"\n",
+    "            \" known for her versatile musical style.\"\n",
+    "        ),\n",
     "        metadata={\n",
     "            \"category\": \"Music\",\n",
     "            \"country\": \"Barbados\",\n",
     "        },\n",
     "    ),\n",
     "    TextNode(\n",
-    "        text=\"Cristiano Ronaldo is a Portuguese professional footballer who is considered one of the greatest football players of all time. He has won numerous awards and set multiple records during his career.\",\n",
+    "        text=(\n",
+    "            \"Cristiano Ronaldo is a Portuguese professional footballer who is\"\n",
+    "            \" considered one of the greatest football players of all time. He\"\n",
+    "            \" has won numerous awards and set multiple records during his\"\n",
+    "            \" career.\"\n",
+    "        ),\n",
     "        metadata={\n",
     "            \"category\": \"Sports\",\n",
     "            \"country\": \"Portugal\",\n",
@@ -208,7 +229,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.indices.vector_store.retrievers import VectorIndexAutoRetriever\n",
+    "from llama_index.indices.vector_store.retrievers import (\n",
+    "    VectorIndexAutoRetriever,\n",
+    ")\n",
     "from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n",
     "\n",
     "\n",
@@ -218,16 +241,24 @@
     "        MetadataInfo(\n",
     "            name=\"category\",\n",
     "            type=\"str\",\n",
-    "            description=\"Category of the celebrity, one of [Sports, Entertainment, Business, Music]\",\n",
+    "            description=(\n",
+    "                \"Category of the celebrity, one of [Sports, Entertainment,\"\n",
+    "                \" Business, Music]\"\n",
+    "            ),\n",
     "        ),\n",
     "        MetadataInfo(\n",
     "            name=\"country\",\n",
     "            type=\"str\",\n",
-    "            description=\"Country of the celebrity, one of [United States, Barbados, Portugal]\",\n",
+    "            description=(\n",
+    "                \"Country of the celebrity, one of [United States, Barbados,\"\n",
+    "                \" Portugal]\"\n",
+    "            ),\n",
     "        ),\n",
     "    ],\n",
     ")\n",
-    "retriever = VectorIndexAutoRetriever(index, vector_store_info=vector_store_info)"
+    "retriever = VectorIndexAutoRetriever(\n",
+    "    index, vector_store_info=vector_store_info\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/vector_stores/chroma_metadata_filter.ipynb b/docs/examples/vector_stores/chroma_metadata_filter.ipynb
index 650adab1c7ccb5df0a27b037449613ef3a128900..8b2b5329b7c973090d24d3162572cab1837039f4 100644
--- a/docs/examples/vector_stores/chroma_metadata_filter.ipynb
+++ b/docs/examples/vector_stores/chroma_metadata_filter.ipynb
@@ -180,7 +180,9 @@
     "from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters\n",
     "\n",
     "\n",
-    "filters = MetadataFilters(filters=[ExactMatchFilter(key=\"theme\", value=\"Mafia\")])\n",
+    "filters = MetadataFilters(\n",
+    "    filters=[ExactMatchFilter(key=\"theme\", value=\"Mafia\")]\n",
+    ")\n",
     "\n",
     "retriever = index.as_retriever(filters=filters)\n",
     "retriever.retrieve(\"What is inception about?\")"
diff --git a/docs/examples/vector_stores/elasticsearch_auto_retriever.ipynb b/docs/examples/vector_stores/elasticsearch_auto_retriever.ipynb
index e675277fa7ebf98ccd579280703adede01e1c7a7..a5410dbf4b3bce19cbf5839ea5ff4497089a41fd 100644
--- a/docs/examples/vector_stores/elasticsearch_auto_retriever.ipynb
+++ b/docs/examples/vector_stores/elasticsearch_auto_retriever.ipynb
@@ -91,19 +91,35 @@
     "\n",
     "nodes = [\n",
     "    TextNode(\n",
-    "        text=\"A bunch of scientists bring back dinosaurs and mayhem breaks loose\",\n",
+    "        text=(\n",
+    "            \"A bunch of scientists bring back dinosaurs and mayhem breaks\"\n",
+    "            \" loose\"\n",
+    "        ),\n",
     "        metadata={\"year\": 1993, \"rating\": 7.7, \"genre\": \"science fiction\"},\n",
     "    ),\n",
     "    TextNode(\n",
-    "        text=\"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...\",\n",
-    "        metadata={\"year\": 2010, \"director\": \"Christopher Nolan\", \"rating\": 8.2},\n",
+    "        text=(\n",
+    "            \"Leo DiCaprio gets lost in a dream within a dream within a dream\"\n",
+    "            \" within a ...\"\n",
+    "        ),\n",
+    "        metadata={\n",
+    "            \"year\": 2010,\n",
+    "            \"director\": \"Christopher Nolan\",\n",
+    "            \"rating\": 8.2,\n",
+    "        },\n",
     "    ),\n",
     "    TextNode(\n",
-    "        text=\"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea\",\n",
+    "        text=(\n",
+    "            \"A psychologist / detective gets lost in a series of dreams within\"\n",
+    "            \" dreams within dreams and Inception reused the idea\"\n",
+    "        ),\n",
     "        metadata={\"year\": 2006, \"director\": \"Satoshi Kon\", \"rating\": 8.6},\n",
     "    ),\n",
     "    TextNode(\n",
-    "        text=\"A bunch of normal-sized women are supremely wholesome and some men pine after them\",\n",
+    "        text=(\n",
+    "            \"A bunch of normal-sized women are supremely wholesome and some\"\n",
+    "            \" men pine after them\"\n",
+    "        ),\n",
     "        metadata={\"year\": 2019, \"director\": \"Greta Gerwig\", \"rating\": 8.3},\n",
     "    ),\n",
     "    TextNode(\n",
@@ -165,7 +181,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.indices.vector_store.retrievers import VectorIndexAutoRetriever\n",
+    "from llama_index.indices.vector_store.retrievers import (\n",
+    "    VectorIndexAutoRetriever,\n",
+    ")\n",
     "from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n",
     "\n",
     "\n",
@@ -188,11 +206,15 @@
     "            type=\"string\",\n",
     "        ),\n",
     "        MetadataInfo(\n",
-    "            name=\"rating\", description=\"A 1-10 rating for the movie\", type=\"float\"\n",
+    "            name=\"rating\",\n",
+    "            description=\"A 1-10 rating for the movie\",\n",
+    "            type=\"float\",\n",
     "        ),\n",
     "    ],\n",
     ")\n",
-    "retriever = VectorIndexAutoRetriever(index, vector_store_info=vector_store_info)"
+    "retriever = VectorIndexAutoRetriever(\n",
+    "    index, vector_store_info=vector_store_info\n",
+    ")"
    ]
   },
   {
@@ -212,7 +234,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "retriever.retrieve(\"What are 2 movies by Christopher Nolan were made before 2020?\")"
+    "retriever.retrieve(\n",
+    "    \"What are 2 movies by Christopher Nolan were made before 2020?\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/vector_stores/existing_data/pinecone_existing_data.ipynb b/docs/examples/vector_stores/existing_data/pinecone_existing_data.ipynb
index 98161b7c9a737f5ae5f34362af6220012fd3dd7c..9b1ee5dfff2f24e9a53a33d2ec0bf3dfc584e4a4 100644
--- a/docs/examples/vector_stores/existing_data/pinecone_existing_data.ipynb
+++ b/docs/examples/vector_stores/existing_data/pinecone_existing_data.ipynb
@@ -134,25 +134,36 @@
     "    {\n",
     "        \"title\": \"To Kill a Mockingbird\",\n",
     "        \"author\": \"Harper Lee\",\n",
-    "        \"content\": \"To Kill a Mockingbird is a novel by Harper Lee published in 1960...\",\n",
+    "        \"content\": (\n",
+    "            \"To Kill a Mockingbird is a novel by Harper Lee published in\"\n",
+    "            \" 1960...\"\n",
+    "        ),\n",
     "        \"year\": 1960,\n",
     "    },\n",
     "    {\n",
     "        \"title\": \"1984\",\n",
     "        \"author\": \"George Orwell\",\n",
-    "        \"content\": \"1984 is a dystopian novel by George Orwell published in 1949...\",\n",
+    "        \"content\": (\n",
+    "            \"1984 is a dystopian novel by George Orwell published in 1949...\"\n",
+    "        ),\n",
     "        \"year\": 1949,\n",
     "    },\n",
     "    {\n",
     "        \"title\": \"The Great Gatsby\",\n",
     "        \"author\": \"F. Scott Fitzgerald\",\n",
-    "        \"content\": \"The Great Gatsby is a novel by F. Scott Fitzgerald published in 1925...\",\n",
+    "        \"content\": (\n",
+    "            \"The Great Gatsby is a novel by F. Scott Fitzgerald published in\"\n",
+    "            \" 1925...\"\n",
+    "        ),\n",
     "        \"year\": 1925,\n",
     "    },\n",
     "    {\n",
     "        \"title\": \"Pride and Prejudice\",\n",
     "        \"author\": \"Jane Austen\",\n",
-    "        \"content\": \"Pride and Prejudice is a novel by Jane Austen published in 1813...\",\n",
+    "        \"content\": (\n",
+    "            \"Pride and Prejudice is a novel by Jane Austen published in\"\n",
+    "            \" 1813...\"\n",
+    "        ),\n",
     "        \"year\": 1813,\n",
     "    },\n",
     "]"
@@ -202,7 +213,9 @@
     "entries = []\n",
     "for book in books:\n",
     "    vector = embed_model.get_text_embedding(book[\"content\"])\n",
-    "    entries.append({\"id\": str(uuid.uuid4()), \"values\": vector, \"metadata\": book})\n",
+    "    entries.append(\n",
+    "        {\"id\": str(uuid.uuid4()), \"values\": vector, \"metadata\": book}\n",
+    "    )\n",
     "pinecone_index.upsert(entries)"
    ]
   },
@@ -243,7 +256,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vector_store = PineconeVectorStore(pinecone_index=pinecone_index, text_key=\"content\")"
+    "vector_store = PineconeVectorStore(\n",
+    "    pinecone_index=pinecone_index, text_key=\"content\"\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/vector_stores/existing_data/weaviate_existing_data.ipynb b/docs/examples/vector_stores/existing_data/weaviate_existing_data.ipynb
index 32d61de77ddb841d8624860675ce1a85bdb512d5..b9c2800180c61c089e7db63ec9a0cc2116b66947 100644
--- a/docs/examples/vector_stores/existing_data/weaviate_existing_data.ipynb
+++ b/docs/examples/vector_stores/existing_data/weaviate_existing_data.ipynb
@@ -107,25 +107,36 @@
     "    {\n",
     "        \"title\": \"To Kill a Mockingbird\",\n",
     "        \"author\": \"Harper Lee\",\n",
-    "        \"content\": \"To Kill a Mockingbird is a novel by Harper Lee published in 1960...\",\n",
+    "        \"content\": (\n",
+    "            \"To Kill a Mockingbird is a novel by Harper Lee published in\"\n",
+    "            \" 1960...\"\n",
+    "        ),\n",
     "        \"year\": 1960,\n",
     "    },\n",
     "    {\n",
     "        \"title\": \"1984\",\n",
     "        \"author\": \"George Orwell\",\n",
-    "        \"content\": \"1984 is a dystopian novel by George Orwell published in 1949...\",\n",
+    "        \"content\": (\n",
+    "            \"1984 is a dystopian novel by George Orwell published in 1949...\"\n",
+    "        ),\n",
     "        \"year\": 1949,\n",
     "    },\n",
     "    {\n",
     "        \"title\": \"The Great Gatsby\",\n",
     "        \"author\": \"F. Scott Fitzgerald\",\n",
-    "        \"content\": \"The Great Gatsby is a novel by F. Scott Fitzgerald published in 1925...\",\n",
+    "        \"content\": (\n",
+    "            \"The Great Gatsby is a novel by F. Scott Fitzgerald published in\"\n",
+    "            \" 1925...\"\n",
+    "        ),\n",
     "        \"year\": 1925,\n",
     "    },\n",
     "    {\n",
     "        \"title\": \"Pride and Prejudice\",\n",
     "        \"author\": \"Jane Austen\",\n",
-    "        \"content\": \"Pride and Prejudice is a novel by Jane Austen published in 1813...\",\n",
+    "        \"content\": (\n",
+    "            \"Pride and Prejudice is a novel by Jane Austen published in\"\n",
+    "            \" 1813...\"\n",
+    "        ),\n",
     "        \"year\": 1813,\n",
     "    },\n",
     "]"
@@ -163,7 +174,9 @@
     "with client.batch as batch:\n",
     "    for book in books:\n",
     "        vector = embed_model.get_text_embedding(book[\"content\"])\n",
-    "        batch.add_data_object(data_object=book, class_name=\"Book\", vector=vector)"
+    "        batch.add_data_object(\n",
+    "            data_object=book, class_name=\"Book\", vector=vector\n",
+    "        )"
    ]
   },
   {
diff --git a/docs/examples/vector_stores/pinecone_auto_retriever.ipynb b/docs/examples/vector_stores/pinecone_auto_retriever.ipynb
index 0423ce5fcd33cb80aa4d27954ae3442ff4f0381b..c7e1ad56635d3bc04f52757cb9a8df99c36a2359 100644
--- a/docs/examples/vector_stores/pinecone_auto_retriever.ipynb
+++ b/docs/examples/vector_stores/pinecone_auto_retriever.ipynb
@@ -104,35 +104,56 @@
     "\n",
     "nodes = [\n",
     "    TextNode(\n",
-    "        text=\"Michael Jordan is a retired professional basketball player, widely regarded as one of the greatest basketball players of all time.\",\n",
+    "        text=(\n",
+    "            \"Michael Jordan is a retired professional basketball player,\"\n",
+    "            \" widely regarded as one of the greatest basketball players of all\"\n",
+    "            \" time.\"\n",
+    "        ),\n",
     "        metadata={\n",
     "            \"category\": \"Sports\",\n",
     "            \"country\": \"United States\",\n",
     "        },\n",
     "    ),\n",
     "    TextNode(\n",
-    "        text=\"Angelina Jolie is an American actress, filmmaker, and humanitarian. She has received numerous awards for her acting and is known for her philanthropic work.\",\n",
+    "        text=(\n",
+    "            \"Angelina Jolie is an American actress, filmmaker, and\"\n",
+    "            \" humanitarian. She has received numerous awards for her acting\"\n",
+    "            \" and is known for her philanthropic work.\"\n",
+    "        ),\n",
     "        metadata={\n",
     "            \"category\": \"Entertainment\",\n",
     "            \"country\": \"United States\",\n",
     "        },\n",
     "    ),\n",
     "    TextNode(\n",
-    "        text=\"Elon Musk is a business magnate, industrial designer, and engineer. He is the founder, CEO, and lead designer of SpaceX, Tesla, Inc., Neuralink, and The Boring Company.\",\n",
+    "        text=(\n",
+    "            \"Elon Musk is a business magnate, industrial designer, and\"\n",
+    "            \" engineer. He is the founder, CEO, and lead designer of SpaceX,\"\n",
+    "            \" Tesla, Inc., Neuralink, and The Boring Company.\"\n",
+    "        ),\n",
     "        metadata={\n",
     "            \"category\": \"Business\",\n",
     "            \"country\": \"United States\",\n",
     "        },\n",
     "    ),\n",
     "    TextNode(\n",
-    "        text=\"Rihanna is a Barbadian singer, actress, and businesswoman. She has achieved significant success in the music industry and is known for her versatile musical style.\",\n",
+    "        text=(\n",
+    "            \"Rihanna is a Barbadian singer, actress, and businesswoman. She\"\n",
+    "            \" has achieved significant success in the music industry and is\"\n",
+    "            \" known for her versatile musical style.\"\n",
+    "        ),\n",
     "        metadata={\n",
     "            \"category\": \"Music\",\n",
     "            \"country\": \"Barbados\",\n",
     "        },\n",
     "    ),\n",
     "    TextNode(\n",
-    "        text=\"Cristiano Ronaldo is a Portuguese professional footballer who is considered one of the greatest football players of all time. He has won numerous awards and set multiple records during his career.\",\n",
+    "        text=(\n",
+    "            \"Cristiano Ronaldo is a Portuguese professional footballer who is\"\n",
+    "            \" considered one of the greatest football players of all time. He\"\n",
+    "            \" has won numerous awards and set multiple records during his\"\n",
+    "            \" career.\"\n",
+    "        ),\n",
     "        metadata={\n",
     "            \"category\": \"Sports\",\n",
     "            \"country\": \"Portugal\",\n",
@@ -148,7 +169,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "vector_store = PineconeVectorStore(pinecone_index=pinecone_index, namespace=\"test\")\n",
+    "vector_store = PineconeVectorStore(\n",
+    "    pinecone_index=pinecone_index, namespace=\"test\"\n",
+    ")\n",
     "storage_context = StorageContext.from_defaults(vector_store=vector_store)"
    ]
   },
@@ -182,7 +205,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from llama_index.indices.vector_store.retrievers import VectorIndexAutoRetriever\n",
+    "from llama_index.indices.vector_store.retrievers import (\n",
+    "    VectorIndexAutoRetriever,\n",
+    ")\n",
     "from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n",
     "\n",
     "\n",
@@ -192,16 +217,24 @@
     "        MetadataInfo(\n",
     "            name=\"category\",\n",
     "            type=\"str\",\n",
-    "            description=\"Category of the celebrity, one of [Sports, Entertainment, Business, Music]\",\n",
+    "            description=(\n",
+    "                \"Category of the celebrity, one of [Sports, Entertainment,\"\n",
+    "                \" Business, Music]\"\n",
+    "            ),\n",
     "        ),\n",
     "        MetadataInfo(\n",
     "            name=\"country\",\n",
     "            type=\"str\",\n",
-    "            description=\"Country of the celebrity, one of [United States, Barbados, Portugal]\",\n",
+    "            description=(\n",
+    "                \"Country of the celebrity, one of [United States, Barbados,\"\n",
+    "                \" Portugal]\"\n",
+    "            ),\n",
     "        ),\n",
     "    ],\n",
     ")\n",
-    "retriever = VectorIndexAutoRetriever(index, vector_store_info=vector_store_info)"
+    "retriever = VectorIndexAutoRetriever(\n",
+    "    index, vector_store_info=vector_store_info\n",
+    ")"
    ]
   },
   {
diff --git a/docs/examples/vector_stores/pinecone_metadata_filter.ipynb b/docs/examples/vector_stores/pinecone_metadata_filter.ipynb
index f715a7cc82534744942558a17991a81c8b373206..cade6540dd478a20feb6288ca2dbd3351d634a75 100644
--- a/docs/examples/vector_stores/pinecone_metadata_filter.ipynb
+++ b/docs/examples/vector_stores/pinecone_metadata_filter.ipynb
@@ -154,7 +154,9 @@
    "source": [
     "from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters\n",
     "\n",
-    "filters = MetadataFilters(filters=[ExactMatchFilter(key=\"theme\", value=\"Mafia\")])"
+    "filters = MetadataFilters(\n",
+    "    filters=[ExactMatchFilter(key=\"theme\", value=\"Mafia\")]\n",
+    ")"
    ]
   },
   {
@@ -193,7 +195,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "retriever = index.as_retriever(vector_store_kwargs={\"filter\": {\"theme\": \"Mafia\"}})\n",
+    "retriever = index.as_retriever(\n",
+    "    vector_store_kwargs={\"filter\": {\"theme\": \"Mafia\"}}\n",
+    ")\n",
     "retriever.retrieve(\"What is inception about?\")"
    ]
   }
diff --git a/docs/examples/vector_stores/postgres.ipynb b/docs/examples/vector_stores/postgres.ipynb
index a143241fec6a0b6bb1e31f7cfa43a9ecfaf6a2c2..d470ade5227ef30d9ee8ea8b7aad63440bd7de6d 100644
--- a/docs/examples/vector_stores/postgres.ipynb
+++ b/docs/examples/vector_stores/postgres.ipynb
@@ -358,7 +358,9 @@
     "    text_search_config=\"english\",\n",
     ")\n",
     "\n",
-    "storage_context = StorageContext.from_defaults(vector_store=hybrid_vector_store)\n",
+    "storage_context = StorageContext.from_defaults(\n",
+    "    vector_store=hybrid_vector_store\n",
+    ")\n",
     "hybrid_index = VectorStoreIndex.from_documents(\n",
     "    documents, storage_context=storage_context\n",
     ")"