diff --git a/.vscode/settings.json b/.vscode/settings.json
index 598a079bcd4d20f990296e3c88becc5af13bc09a..5e831969672a1cdeac971c7b25a84f6fc107658d 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -1,10 +1,9 @@
 {
-  "python.formatting.provider": "none",
-  "editor.formatOnSave": true,
-  "editor.codeActionsOnSave": {
-    "source.organizeImports": true
-  },
   "[python]": {
+    "editor.formatOnSave": true,
+    "editor.codeActionsOnSave": {
+      "source.fixAll": true
+    },
     "editor.defaultFormatter": "ms-python.black-formatter"
   },
   "python.testing.pytestArgs": ["tests"],
diff --git a/docs/conf.py b/docs/conf.py
index 7f0de33d8f93d6f5b1dcb9b3a3d12e108222047e..174e71ccce77949645300900003eae9c8e6a5525 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -13,8 +13,6 @@
 import os
 import sys
 
-import sphinx_rtd_theme
-
 sys.path.insert(0, os.path.abspath("../"))
 
 with open("../llama_index/VERSION") as f:
diff --git a/docs/examples/index_structs/knowledge_graph/KnowledgeGraphIndex_vs_VectorStoreIndex_vs_CustomIndex_combined.ipynb b/docs/examples/index_structs/knowledge_graph/KnowledgeGraphIndex_vs_VectorStoreIndex_vs_CustomIndex_combined.ipynb
index 0d0d09401b6cac6e5f95e49e127c3933d3e93c90..e43670db8cd6c172e739f624f99576d9f960cc52 100644
--- a/docs/examples/index_structs/knowledge_graph/KnowledgeGraphIndex_vs_VectorStoreIndex_vs_CustomIndex_combined.ipynb
+++ b/docs/examples/index_structs/knowledge_graph/KnowledgeGraphIndex_vs_VectorStoreIndex_vs_CustomIndex_combined.ipynb
@@ -215,9 +215,7 @@
     "os.environ[\"NEBULA_PASSWORD\"] = \"nebula\"\n",
     "os.environ[\n",
     "    \"NEBULA_ADDRESS\"\n",
-    "] = (  # assumed we have NebulaGraph 3.5.0 or newer installed locally\n",
-    "    \"127.0.0.1:9669\"\n",
-    ")\n",
+    "] = \"127.0.0.1:9669\"  # assumed we have NebulaGraph 3.5.0 or newer installed locally\n",
     "\n",
     "# Assume that the graph has already been created\n",
     "# Create a NebulaGraph cluster with:\n",
diff --git a/docs/examples/index_structs/knowledge_graph/NebulaGraphKGIndexDemo.ipynb b/docs/examples/index_structs/knowledge_graph/NebulaGraphKGIndexDemo.ipynb
index e16537a0ac611532aec889748f7f3951903199e2..4441284317600f3002fa36845afe1964dbe23589 100644
--- a/docs/examples/index_structs/knowledge_graph/NebulaGraphKGIndexDemo.ipynb
+++ b/docs/examples/index_structs/knowledge_graph/NebulaGraphKGIndexDemo.ipynb
@@ -189,9 +189,7 @@
     "] = \"<password>\"  # replace with your password, by default it is \"nebula\"\n",
     "os.environ[\n",
     "    \"NEBULA_ADDRESS\"\n",
-    "] = (  # assumed we have NebulaGraph 3.5.0 or newer installed locally\n",
-    "    \"127.0.0.1:9669\"\n",
-    ")\n",
+    "] = \"127.0.0.1:9669\"  # assumed we have NebulaGraph 3.5.0 or newer installed locally\n",
     "\n",
     "# Assume that the graph has already been created\n",
     "# Create a NebulaGraph cluster with:\n",
diff --git a/docs/examples/low_level/ingestion.ipynb b/docs/examples/low_level/ingestion.ipynb
index 2d6387d8338604dd4ffd030eae3787a1d83a93e0..d35129d0598ab80222dbd5f93fb047e608a5faaa 100644
--- a/docs/examples/low_level/ingestion.ipynb
+++ b/docs/examples/low_level/ingestion.ipynb
@@ -111,8 +111,8 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "dotenv_path = (  # Google Colabs will not let you open a .env, but you can set\n",
-    "    \"env\"\n",
+    "dotenv_path = (\n",
+    "    \"env\"  # Google Colabs will not let you open a .env, but you can set\n",
     ")\n",
     "with open(dotenv_path, \"w\") as f:\n",
     "    f.write('PINECONE_API_KEY=\"<your api key>\"\\n')\n",
diff --git a/llama_index/agent/react/base.py b/llama_index/agent/react/base.py
index edb1a189e8a4f8ca3685df8e78ebf6949ff25a15..fbffcbc3f40819d6e08757121bdd320198a878a4 100644
--- a/llama_index/agent/react/base.py
+++ b/llama_index/agent/react/base.py
@@ -15,7 +15,6 @@ from typing import (
 )
 
 from aiostream import stream as async_stream
-from aiostream.core import Stream
 
 from llama_index.agent.react.formatter import ReActChatFormatter
 from llama_index.agent.react.output_parser import ReActOutputParser
diff --git a/llama_index/agent/types.py b/llama_index/agent/types.py
index 7aa73e74ac1a67047999b162e9c300a7260df2bf..8cd296b896c986fea1a74d6e165ae0dbec8ff9ad 100644
--- a/llama_index/agent/types.py
+++ b/llama_index/agent/types.py
@@ -6,7 +6,7 @@ from llama_index.chat_engine.types import BaseChatEngine, StreamingAgentChatResp
 from llama_index.indices.query.base import BaseQueryEngine
 from llama_index.indices.query.schema import QueryBundle
 from llama_index.llms.base import ChatMessage
-from llama_index.prompts.mixin import PromptDictType, PromptMixin, PromptMixinType
+from llama_index.prompts.mixin import PromptDictType, PromptMixinType
 from llama_index.response.schema import RESPONSE_TYPE, Response
 
 
diff --git a/llama_index/callbacks/finetuning_handler.py b/llama_index/callbacks/finetuning_handler.py
index 97927cb437641b3ee65ad17786bd24b2d393e114..811c7be8189a20b4c2ff3de5175ffc9c72b4aa21 100644
--- a/llama_index/callbacks/finetuning_handler.py
+++ b/llama_index/callbacks/finetuning_handler.py
@@ -1,8 +1,6 @@
 import json
 from abc import abstractmethod
-from typing import Any, Dict, List, Optional, Type
-
-from pydantic import BaseModel
+from typing import Any, Dict, List, Optional
 
 from llama_index.callbacks.base import BaseCallbackHandler
 from llama_index.callbacks.schema import CBEventType, EventPayload
diff --git a/llama_index/composability/base.py b/llama_index/composability/base.py
index 7012aa02415d275193a4b40ea7f7e62143885d88..1eb6b0cfc012b1ca324bbdfbdc68969a195fa976 100644
--- a/llama_index/composability/base.py
+++ b/llama_index/composability/base.py
@@ -1,4 +1,4 @@
 """Composable graph."""
 
 # TODO: remove this file, only keep for backwards compatibility
-from llama_index.indices.composability.graph import ComposableGraph
+from llama_index.indices.composability.graph import ComposableGraph  # noqa
diff --git a/llama_index/embeddings/text_embeddings_inference.py b/llama_index/embeddings/text_embeddings_inference.py
index 62162ee98daf3336fc40cbb5ced96023e7cf0227..e7140a2c4a7718836d2a8f474db46c6dcd402ed2 100644
--- a/llama_index/embeddings/text_embeddings_inference.py
+++ b/llama_index/embeddings/text_embeddings_inference.py
@@ -39,7 +39,7 @@ class TextEmbeddingsInference(BaseEmbedding):
         callback_manager: Optional[CallbackManager] = None,
     ):
         try:
-            import httpx
+            import httpx  # noqa
         except ImportError:
             raise ImportError(
                 "TextEmbeddingsInterface requires httpx to be installed.\n"
diff --git a/llama_index/evaluation/base.py b/llama_index/evaluation/base.py
index 037f017adbe11a79480093d1dc6756ef0e01491a..bd44cc7ffe809ecef35ba7e3066e33d4c66e8c5c 100644
--- a/llama_index/evaluation/base.py
+++ b/llama_index/evaluation/base.py
@@ -1,10 +1,10 @@
 """Evaluator."""
 import asyncio
-from abc import ABC, abstractmethod
+from abc import abstractmethod
 from typing import Any, Optional, Sequence
 
 from llama_index.bridge.pydantic import BaseModel, Field
-from llama_index.prompts.mixin import PromptDictType, PromptMixin, PromptMixinType
+from llama_index.prompts.mixin import PromptMixin, PromptMixinType
 from llama_index.response.schema import Response
 
 
diff --git a/llama_index/evaluation/correctness.py b/llama_index/evaluation/correctness.py
index 5386af64430d350c27a5adba1facd1f51e3fdacd..8257d63208f65814c736ad11e77bd4666bca0843 100644
--- a/llama_index/evaluation/correctness.py
+++ b/llama_index/evaluation/correctness.py
@@ -10,7 +10,7 @@ from llama_index.prompts import (
     MessageRole,
     PromptTemplate,
 )
-from llama_index.prompts.mixin import PromptDictType, PromptMixin, PromptMixinType
+from llama_index.prompts.mixin import PromptDictType
 
 DEFAULT_SYSTEM_TEMPLATE = """
 You are an expert evaluation system for a question answering chatbot.
diff --git a/llama_index/evaluation/dataset_generation.py b/llama_index/evaluation/dataset_generation.py
index 65514fad01ffba0f706fca01d5d8b17a48b78d90..87bad050e499b559a234f21ce8c853af17042359 100644
--- a/llama_index/evaluation/dataset_generation.py
+++ b/llama_index/evaluation/dataset_generation.py
@@ -5,7 +5,7 @@ import asyncio
 import json
 import re
 import uuid
-from typing import Dict, List, Optional, Tuple
+from typing import Dict, List, Tuple
 
 from pydantic import BaseModel, Field
 
diff --git a/llama_index/evaluation/faithfulness.py b/llama_index/evaluation/faithfulness.py
index e972f855c4be77cc0f87602f74baad7a5de57d13..46cd3f36764790e0e0cf54737fe4f5bc08406182 100644
--- a/llama_index/evaluation/faithfulness.py
+++ b/llama_index/evaluation/faithfulness.py
@@ -1,13 +1,13 @@
 """Faithfulness evaluation."""
 from __future__ import annotations
 
-from typing import Any, Optional, Sequence, Union
+from typing import Any, Sequence
 
 from llama_index import ServiceContext
 from llama_index.evaluation.base import BaseEvaluator, EvaluationResult
 from llama_index.indices import SummaryIndex
 from llama_index.prompts import BasePromptTemplate, PromptTemplate
-from llama_index.prompts.mixin import PromptDictType, PromptMixin, PromptMixinType
+from llama_index.prompts.mixin import PromptDictType
 from llama_index.schema import Document
 
 DEFAULT_EVAL_TEMPLATE = PromptTemplate(
diff --git a/llama_index/evaluation/guideline.py b/llama_index/evaluation/guideline.py
index 95ef50a2892f39dcfe9158513629b497007ae495..debb1d43705d7e9a6909e84831025a1e3eec023a 100644
--- a/llama_index/evaluation/guideline.py
+++ b/llama_index/evaluation/guideline.py
@@ -7,7 +7,7 @@ from llama_index.bridge.pydantic import BaseModel, Field
 from llama_index.evaluation.base import BaseEvaluator, EvaluationResult
 from llama_index.output_parsers import PydanticOutputParser
 from llama_index.prompts import BasePromptTemplate, PromptTemplate
-from llama_index.prompts.mixin import PromptDictType, PromptMixin, PromptMixinType
+from llama_index.prompts.mixin import PromptDictType
 
 logger = logging.getLogger(__name__)
 
diff --git a/llama_index/evaluation/pairwise.py b/llama_index/evaluation/pairwise.py
index d11e2d5af60f6a58c06af272a1455ac69196635f..4223f88b7ab472e57fb3eb766c3ecc128c442d72 100644
--- a/llama_index/evaluation/pairwise.py
+++ b/llama_index/evaluation/pairwise.py
@@ -11,7 +11,7 @@ from llama_index.prompts import (
     MessageRole,
     PromptTemplate,
 )
-from llama_index.prompts.mixin import PromptDictType, PromptMixin, PromptMixinType
+from llama_index.prompts.mixin import PromptDictType
 
 DEFAULT_SYSTEM_TEMPLATE = """
 You are an expert evaluation system for a question answering chatbot.
diff --git a/llama_index/evaluation/relevancy.py b/llama_index/evaluation/relevancy.py
index 4ba37a75a4a301895007515ec0c3a1c4aa6211ce..0ee2f1e42edd9c0af4335c5b389fc134b3d06eb2 100644
--- a/llama_index/evaluation/relevancy.py
+++ b/llama_index/evaluation/relevancy.py
@@ -1,13 +1,13 @@
 """Relevancy evaluation."""
 from __future__ import annotations
 
-from typing import Any, Optional, Sequence, Union
+from typing import Any, Sequence
 
 from llama_index import ServiceContext
 from llama_index.evaluation.base import BaseEvaluator, EvaluationResult
 from llama_index.indices import SummaryIndex
 from llama_index.prompts import BasePromptTemplate, PromptTemplate
-from llama_index.prompts.mixin import PromptDictType, PromptMixin, PromptMixinType
+from llama_index.prompts.mixin import PromptDictType
 from llama_index.schema import Document
 
 DEFAULT_EVAL_TEMPLATE = PromptTemplate(
diff --git a/llama_index/evaluation/semantic_similarity.py b/llama_index/evaluation/semantic_similarity.py
index e2ad6c70d9ba7e45e2e78d7dcbb29be9bac19b93..1e2d7efdd4be8a69be776efc70f9de9cc06a1f6f 100644
--- a/llama_index/evaluation/semantic_similarity.py
+++ b/llama_index/evaluation/semantic_similarity.py
@@ -3,7 +3,7 @@ from typing import Any, Callable, Optional, Sequence
 from llama_index.embeddings.base import SimilarityMode, similarity
 from llama_index.evaluation.base import BaseEvaluator, EvaluationResult
 from llama_index.indices.service_context import ServiceContext
-from llama_index.prompts.mixin import PromptDictType, PromptMixin, PromptMixinType
+from llama_index.prompts.mixin import PromptDictType
 
 
 class SemanticSimilarityEvaluator(BaseEvaluator):
diff --git a/llama_index/finetuning/cross_encoders/cross_encoder.py b/llama_index/finetuning/cross_encoders/cross_encoder.py
index 2ad4b21385bd1b21b2e3e0a7bcf7ce0130813c18..2bf783c45935c5277617898d81dc64fbfe98da9e 100644
--- a/llama_index/finetuning/cross_encoders/cross_encoder.py
+++ b/llama_index/finetuning/cross_encoders/cross_encoder.py
@@ -108,7 +108,7 @@ class CrossEncoderFinetuneEngine(BaseCrossEncoderFinetuningEngine):
                 self.model.model.push_to_hub(repo_id=repo_id)
                 self.model.tokenizer.push_to_hub(repo_id=repo_id)
 
-            except ValueError as e:
+            except ValueError:
                 raise ValueError(
                     "HuggingFace CLI/Hub login not "
                     "completed provide token to login using"
diff --git a/llama_index/graph_stores/nebulagraph.py b/llama_index/graph_stores/nebulagraph.py
index 3ea8e41774afd916f54a6fd0f19533e3402e8099..4084556509c080158389bcdacc4ae4649f2e5095 100644
--- a/llama_index/graph_stores/nebulagraph.py
+++ b/llama_index/graph_stores/nebulagraph.py
@@ -130,7 +130,7 @@ class NebulaGraphStore(GraphStore):
             **kwargs: Keyword arguments.
         """
         try:
-            import nebula3
+            import nebula3  # noqa
         except ImportError:
             raise ImportError(
                 "Please install NebulaGraph Python client first: "
diff --git a/llama_index/indices/base_retriever.py b/llama_index/indices/base_retriever.py
index 819c389bf457087c5219c4c469029e550e4cdeac..770d76790de5479489f011d3091dd0494c8adfc7 100644
--- a/llama_index/indices/base_retriever.py
+++ b/llama_index/indices/base_retriever.py
@@ -1,4 +1,4 @@
-from abc import ABC, abstractmethod
+from abc import abstractmethod
 from typing import List, Optional
 
 from llama_index.indices.query.schema import QueryBundle, QueryType
diff --git a/llama_index/indices/managed/vectara/retriever.py b/llama_index/indices/managed/vectara/retriever.py
index 5fb08c263718ee5d8fd4701517edc755ffd2b839..58f3a54e32c8e176dc5eeee49ad702dafbfe8427 100644
--- a/llama_index/indices/managed/vectara/retriever.py
+++ b/llama_index/indices/managed/vectara/retriever.py
@@ -4,7 +4,7 @@ An index that that is built on top of Vectara.
 
 import json
 import logging
-from typing import Any, List, Optional
+from typing import Any, List
 
 from llama_index.constants import DEFAULT_SIMILARITY_TOP_K
 from llama_index.indices.base_retriever import BaseRetriever
@@ -69,7 +69,7 @@ class VectaraRetriever(BaseRetriever):
         Args:
             query: Query Bundle
         """
-        similarity_top_k = self._similarity_top_k
+        self._similarity_top_k
         corpus_key = {
             "customer_id": self._index._vectara_customer_id,
             "corpus_id": self._index._vectara_corpus_id,
diff --git a/llama_index/indices/postprocessor/llm_rerank.py b/llama_index/indices/postprocessor/llm_rerank.py
index 27ecef1da5992c410e5f164740fb0ade1453977c..432ededb00fedca430c85bd60ffd1732718a191e 100644
--- a/llama_index/indices/postprocessor/llm_rerank.py
+++ b/llama_index/indices/postprocessor/llm_rerank.py
@@ -11,7 +11,7 @@ from llama_index.indices.utils import (
 )
 from llama_index.prompts import BasePromptTemplate
 from llama_index.prompts.default_prompts import DEFAULT_CHOICE_SELECT_PROMPT
-from llama_index.prompts.mixin import PromptDictType, PromptMixin, PromptMixinType
+from llama_index.prompts.mixin import PromptDictType
 from llama_index.schema import NodeWithScore
 
 
diff --git a/llama_index/indices/postprocessor/longllmlingua.py b/llama_index/indices/postprocessor/longllmlingua.py
index 1e0391d7c093c2229a70885dfd748c404f27e10b..034de82eb4402ed72b928e1f22d4be6a47ca32a2 100644
--- a/llama_index/indices/postprocessor/longllmlingua.py
+++ b/llama_index/indices/postprocessor/longllmlingua.py
@@ -1,12 +1,9 @@
 """Optimization related classes and functions."""
 import logging
-from typing import Any, Callable, Dict, List, Optional
+from typing import Any, Dict, List, Optional
 
 from llama_index.bridge.pydantic import Field, PrivateAttr
-from llama_index.embeddings.base import BaseEmbedding
-from llama_index.embeddings.openai import OpenAIEmbedding
 from llama_index.indices.postprocessor.types import BaseNodePostprocessor
-from llama_index.indices.query.embedding_utils import get_top_k_embeddings
 from llama_index.indices.query.schema import QueryBundle
 from llama_index.schema import MetadataMode, NodeWithScore, TextNode
 
diff --git a/llama_index/indices/postprocessor/types.py b/llama_index/indices/postprocessor/types.py
index c254a675253ffe58bdd8f1331578b31b59b1c251..a0e68a2954e0ee2c466e8a8803d1fb802f94fd3b 100644
--- a/llama_index/indices/postprocessor/types.py
+++ b/llama_index/indices/postprocessor/types.py
@@ -4,7 +4,7 @@ from typing import List, Optional
 from llama_index.bridge.pydantic import Field
 from llama_index.callbacks import CallbackManager
 from llama_index.indices.query.schema import QueryBundle
-from llama_index.prompts.mixin import PromptDictType, PromptMixin, PromptMixinType
+from llama_index.prompts.mixin import PromptDictType, PromptMixinType
 from llama_index.schema import BaseComponent, NodeWithScore
 
 
diff --git a/llama_index/indices/query/base.py b/llama_index/indices/query/base.py
index ddfb1d76e4262e4b2e3da01623c0b06529391e7b..8e7596db9eed8b27b1e33913b09610b15d14c756 100644
--- a/llama_index/indices/query/base.py
+++ b/llama_index/indices/query/base.py
@@ -1,12 +1,11 @@
 """Base query engine."""
 
 import logging
-from abc import ABC, abstractmethod
+from abc import abstractmethod
 from typing import Any, Dict, List, Optional, Sequence
 
 from llama_index.callbacks.base import CallbackManager
 from llama_index.indices.query.schema import QueryBundle, QueryType
-from llama_index.prompts.base import BasePromptTemplate
 from llama_index.prompts.mixin import PromptDictType, PromptMixin
 from llama_index.response.schema import RESPONSE_TYPE
 from llama_index.schema import NodeWithScore
diff --git a/llama_index/indices/query/query_transform/base.py b/llama_index/indices/query/query_transform/base.py
index 5bcd8d0233996ace8711720b69ff650be58daa9a..f488cc664bfb5033855937c427a95d0271aab783 100644
--- a/llama_index/indices/query/query_transform/base.py
+++ b/llama_index/indices/query/query_transform/base.py
@@ -2,7 +2,7 @@
 
 import dataclasses
 from abc import abstractmethod
-from typing import Any, Dict, Optional, cast
+from typing import Dict, Optional, cast
 
 from llama_index.indices.query.query_transform.prompts import (
     DEFAULT_DECOMPOSE_QUERY_TRANSFORM_PROMPT,
diff --git a/llama_index/indices/struct_store/json_query.py b/llama_index/indices/struct_store/json_query.py
index d09faf68b03824e987aebe374e45bc5a315c0fdf..a749db43f152d047429825280eabcaca71fe0357 100644
--- a/llama_index/indices/struct_store/json_query.py
+++ b/llama_index/indices/struct_store/json_query.py
@@ -7,7 +7,7 @@ from llama_index.indices.query.schema import QueryBundle
 from llama_index.indices.service_context import ServiceContext
 from llama_index.prompts import BasePromptTemplate, PromptTemplate
 from llama_index.prompts.default_prompts import DEFAULT_JSON_PATH_PROMPT
-from llama_index.prompts.mixin import PromptDictType, PromptMixin, PromptMixinType
+from llama_index.prompts.mixin import PromptDictType, PromptMixinType
 from llama_index.prompts.prompt_type import PromptType
 from llama_index.response.schema import Response
 from llama_index.utils import print_text
diff --git a/llama_index/llm_predictor/vellum/predictor.py b/llama_index/llm_predictor/vellum/predictor.py
index 1d18448e15e7988536825dd99023285a04fa4a03..f3ebf2063153bdbb4c4b1bd69c7a905dd2e08f86 100644
--- a/llama_index/llm_predictor/vellum/predictor.py
+++ b/llama_index/llm_predictor/vellum/predictor.py
@@ -1,6 +1,6 @@
 from __future__ import annotations
 
-from typing import Any, Optional, Tuple, cast
+from typing import Any, Tuple, cast
 
 from llama_index.bridge.pydantic import PrivateAttr
 from llama_index.callbacks import CallbackManager
diff --git a/llama_index/llm_predictor/vellum/types.py b/llama_index/llm_predictor/vellum/types.py
index 3655aec7b80af1191e4bcb60ee5ad752289e57a0..806900a5b7d3d847b355df0c45091609e52e5aa9 100644
--- a/llama_index/llm_predictor/vellum/types.py
+++ b/llama_index/llm_predictor/vellum/types.py
@@ -1,7 +1,6 @@
 from __future__ import annotations
 
 from dataclasses import dataclass
-from typing import Optional
 
 
 @dataclass(frozen=True, eq=True)
diff --git a/llama_index/llms/ai21.py b/llama_index/llms/ai21.py
index bc77c164c6c53273a64a72b8205869fb16ea3d4a..305aadbcaf11c2642a48947180ea203feb65c7c6 100644
--- a/llama_index/llms/ai21.py
+++ b/llama_index/llms/ai21.py
@@ -44,7 +44,7 @@ class AI21(CustomLLM):
     ) -> None:
         """Initialize params."""
         try:
-            import ai21 as _
+            import ai21 as _  # noqa
         except ImportError as e:
             raise ImportError(
                 "You must install the `ai21` package to use AI21."
diff --git a/llama_index/llms/bedrock.py b/llama_index/llms/bedrock.py
index e143cb9dbcd275d87b68ed39e9097b2542b7cc08..76983977d4b8e6f4054c73771847111c649e7961 100644
--- a/llama_index/llms/bedrock.py
+++ b/llama_index/llms/bedrock.py
@@ -1,5 +1,4 @@
 import json
-import warnings
 from typing import Any, Dict, Optional, Sequence
 
 from llama_index.bridge.pydantic import Field, PrivateAttr
@@ -27,7 +26,6 @@ from llama_index.llms.bedrock_utils import (
     get_text_from_response,
     stream_completion_to_chat_decorator,
 )
-from llama_index.llms.custom import CustomLLM
 
 
 class Bedrock(LLM):
diff --git a/llama_index/llms/bedrock_utils.py b/llama_index/llms/bedrock_utils.py
index 7e1c00afd673df482fd0ca7421b66998c5315e05..a560d509fac464a821e69446b4fc71e807c14bfd 100644
--- a/llama_index/llms/bedrock_utils.py
+++ b/llama_index/llms/bedrock_utils.py
@@ -1,4 +1,3 @@
-import json
 import logging
 from typing import Any, Callable, Sequence
 
@@ -17,8 +16,6 @@ from llama_index.llms.base import (
     CompletionResponse,
     CompletionResponseGen,
     MessageRole,
-    llm_chat_callback,
-    llm_completion_callback,
 )
 from llama_index.llms.generic_utils import (
     completion_response_to_chat_response,
@@ -98,7 +95,7 @@ def _create_retry_decorator(client: Any, max_retries: int) -> Callable[[Any], An
     # Wait 2^x * 1 second between each retry starting with
     # 4 seconds, then up to 10 seconds, then 10 seconds afterwards
     try:
-        import boto3
+        import boto3  # noqa
     except ImportError as e:
         raise ImportError(
             "You must install the `boto3` package to use Bedrock."
diff --git a/llama_index/node_parser/unstructured_element.py b/llama_index/node_parser/unstructured_element.py
index ac826de31fc1c422e8758accc13bcf36633412a9..f2b940e9776234cf70e08b1549bf6e79868f1b24 100644
--- a/llama_index/node_parser/unstructured_element.py
+++ b/llama_index/node_parser/unstructured_element.py
@@ -121,7 +121,7 @@ def extract_table_summaries(
         try:
             response = query_engine.query(summary_query_str)
             element.table_output = cast(PydanticResponse, response).response
-        except ValidationError as e:
+        except ValidationError:
             # There was a pydantic validation error, so we will run with text completion
             # fill in the summary and leave other fields blank
             query_engine = index.as_query_engine()
@@ -227,8 +227,8 @@ class UnstructuredElementNodeParser(NodeParser):
     ) -> None:
         """Initialize."""
         try:
-            import lxml
-            import unstructured
+            import lxml  # noqa
+            import unstructured  # noqa
         except ImportError:
             raise ImportError(
                 "You must install the `unstructured` and `lxml` package to use this node parser."
diff --git a/llama_index/playground/base.py b/llama_index/playground/base.py
index 6ee2d3c2fa81dbec5f060e223495bfc869b58980..89fa2259c3cd82de797ea3522a7eef344e33996a 100644
--- a/llama_index/playground/base.py
+++ b/llama_index/playground/base.py
@@ -2,7 +2,7 @@
 from __future__ import annotations
 
 import time
-from typing import Any, Dict, List, Optional, Type, Union
+from typing import Any, Dict, List, Type
 
 import pandas as pd
 
diff --git a/llama_index/program/predefined/evaporate/extractor.py b/llama_index/program/predefined/evaporate/extractor.py
index 38bca869115365b6951eda77cf3f2b182ea6a669..bb3c3c15d282e00d1b6a7b379ff9f0d8e4a89807 100644
--- a/llama_index/program/predefined/evaporate/extractor.py
+++ b/llama_index/program/predefined/evaporate/extractor.py
@@ -231,7 +231,7 @@ class EvaporateExtractor:
                 with time_limit(1):
                     exec(fn_str, globals())
                     exec(f"result = get_{function_field}_field(node_text)", globals())
-            except TimeoutException as e:
+            except TimeoutException:
                 raise
             results.append(result)  # type: ignore[name-defined]
         return results
diff --git a/llama_index/program/utils.py b/llama_index/program/utils.py
index ff3431110c3d06e7d5407d1221578917762b9ed0..dc58d9891cc6be69ffade57500627cf455b6f644 100644
--- a/llama_index/program/utils.py
+++ b/llama_index/program/utils.py
@@ -1,6 +1,5 @@
 """Program utils."""
 
-from enum import Enum
 from typing import Any, List, Type
 
 from llama_index.bridge.pydantic import BaseModel, Field, create_model
diff --git a/llama_index/prompts/mixin.py b/llama_index/prompts/mixin.py
index 430fa458367d5cfe99e50ae15b46d0ec3951da79..ac60b9ea557dd95777fce9acc5ca1359ad0c20e1 100644
--- a/llama_index/prompts/mixin.py
+++ b/llama_index/prompts/mixin.py
@@ -3,7 +3,7 @@
 from abc import ABC, abstractmethod
 from collections import defaultdict
 from copy import deepcopy
-from typing import Dict, List, Optional, Tuple, Union
+from typing import Dict, Union
 
 from llama_index.prompts.base import BasePromptTemplate
 
diff --git a/llama_index/query_engine/__init__.py b/llama_index/query_engine/__init__.py
index d5a2ae707cf04daf2534a8a0b783eceec60e883a..b4d695b409ed5ff958341c8f73626fadb034c179 100644
--- a/llama_index/query_engine/__init__.py
+++ b/llama_index/query_engine/__init__.py
@@ -57,5 +57,6 @@ __all__ = [
     "CustomQueryEngine",
     # SQL
     "SQLTableRetrieverQueryEngine",
-    "NLSQLTableQueryEngine" "PGVectorSQLQueryEngine",
+    "NLSQLTableQueryEngine",
+    "PGVectorSQLQueryEngine",
 ]
diff --git a/llama_index/query_engine/flare/answer_inserter.py b/llama_index/query_engine/flare/answer_inserter.py
index 8bed81bbae02fda5675de91fcde8a6c0ed459715..9a2c7893eaf16b84f1fbc57458448714fff407ee 100644
--- a/llama_index/query_engine/flare/answer_inserter.py
+++ b/llama_index/query_engine/flare/answer_inserter.py
@@ -1,6 +1,6 @@
 """Answer inserter."""
 
-from abc import ABC, abstractmethod
+from abc import abstractmethod
 from typing import Any, Dict, List, Optional
 
 from llama_index.indices.service_context import ServiceContext
diff --git a/llama_index/query_engine/flare/base.py b/llama_index/query_engine/flare/base.py
index ba034578bd543fe80dfc8fdffa6c713735d620e9..169ee9f6cb787d788e231611338e85dd48386f0f 100644
--- a/llama_index/query_engine/flare/base.py
+++ b/llama_index/query_engine/flare/base.py
@@ -11,7 +11,7 @@ from llama_index.indices.query.base import BaseQueryEngine
 from llama_index.indices.query.schema import QueryBundle
 from llama_index.indices.service_context import ServiceContext
 from llama_index.prompts.base import BasePromptTemplate, PromptTemplate
-from llama_index.prompts.mixin import PromptDictType, PromptMixin, PromptMixinType
+from llama_index.prompts.mixin import PromptDictType, PromptMixinType
 from llama_index.query_engine.flare.answer_inserter import (
     BaseLookaheadAnswerInserter,
     LLMLookaheadAnswerInserter,
diff --git a/llama_index/query_engine/multistep_query_engine.py b/llama_index/query_engine/multistep_query_engine.py
index b4c73067dbb151cfe912751562138aa6f5d272c2..f106c8f3a654d4e1ad3096d5ac375eea42d10b69 100644
--- a/llama_index/query_engine/multistep_query_engine.py
+++ b/llama_index/query_engine/multistep_query_engine.py
@@ -4,7 +4,7 @@ from llama_index.callbacks.schema import CBEventType, EventPayload
 from llama_index.indices.query.base import BaseQueryEngine
 from llama_index.indices.query.query_transform.base import StepDecomposeQueryTransform
 from llama_index.indices.query.schema import QueryBundle
-from llama_index.prompts.mixin import PromptDictType, PromptMixinType
+from llama_index.prompts.mixin import PromptMixinType
 from llama_index.response.schema import RESPONSE_TYPE
 from llama_index.response_synthesizers import BaseSynthesizer, get_response_synthesizer
 from llama_index.schema import NodeWithScore, TextNode
diff --git a/llama_index/query_engine/pandas_query_engine.py b/llama_index/query_engine/pandas_query_engine.py
index 161cf1f3344239364fecead4b5232986f3535ec2..dd2f2c69e36c00f675e8c0a2e74439af6ca7b06c 100644
--- a/llama_index/query_engine/pandas_query_engine.py
+++ b/llama_index/query_engine/pandas_query_engine.py
@@ -75,7 +75,7 @@ def default_output_processor(
             pd.reset_option("display.max_colwidth")
             return output_str
 
-        except Exception as e:
+        except Exception:
             raise
     except Exception as e:
         err_string = (
diff --git a/llama_index/query_engine/retriever_query_engine.py b/llama_index/query_engine/retriever_query_engine.py
index cba9dd7edfa4d303df648683f6f4390a087477e6..3ab4561f2befd8566e2cb2be36d18c848bf2d2be 100644
--- a/llama_index/query_engine/retriever_query_engine.py
+++ b/llama_index/query_engine/retriever_query_engine.py
@@ -1,4 +1,4 @@
-from typing import Any, List, Optional, Sequence, Tuple
+from typing import Any, List, Optional, Sequence
 
 from llama_index.bridge.pydantic import BaseModel
 from llama_index.callbacks.base import CallbackManager
diff --git a/llama_index/query_engine/sub_question_query_engine.py b/llama_index/query_engine/sub_question_query_engine.py
index 20370fb981c164e5a01b643fbcfdb842d734b46b..ef8adf932147484e92761923820807b46aca1950 100644
--- a/llama_index/query_engine/sub_question_query_engine.py
+++ b/llama_index/query_engine/sub_question_query_engine.py
@@ -9,7 +9,7 @@ from llama_index.callbacks.schema import CBEventType, EventPayload
 from llama_index.indices.query.base import BaseQueryEngine
 from llama_index.indices.query.schema import QueryBundle
 from llama_index.indices.service_context import ServiceContext
-from llama_index.prompts.mixin import PromptDictType, PromptMixinType
+from llama_index.prompts.mixin import PromptMixinType
 from llama_index.question_gen.llm_generators import LLMQuestionGenerator
 from llama_index.question_gen.openai_generator import OpenAIQuestionGenerator
 from llama_index.question_gen.types import BaseQuestionGenerator, SubQuestion
diff --git a/llama_index/query_engine/transform_query_engine.py b/llama_index/query_engine/transform_query_engine.py
index 15870e5e8738e99516e4d8d22ab70b634c220cda..fffdadf1453b22f2266d26e5f3b07fe81da1da5f 100644
--- a/llama_index/query_engine/transform_query_engine.py
+++ b/llama_index/query_engine/transform_query_engine.py
@@ -4,7 +4,7 @@ from llama_index.callbacks.base import CallbackManager
 from llama_index.indices.query.base import BaseQueryEngine
 from llama_index.indices.query.query_transform.base import BaseQueryTransform
 from llama_index.indices.query.schema import QueryBundle
-from llama_index.prompts.mixin import PromptDictType, PromptMixinType
+from llama_index.prompts.mixin import PromptMixinType
 from llama_index.response.schema import RESPONSE_TYPE
 from llama_index.schema import NodeWithScore
 
diff --git a/llama_index/question_gen/guidance_generator.py b/llama_index/question_gen/guidance_generator.py
index 4f1cc4f67937558f9607a1659f32e51b3e558625..d78732bd4abcab81897077f5b4fdcdb3044252e8 100644
--- a/llama_index/question_gen/guidance_generator.py
+++ b/llama_index/question_gen/guidance_generator.py
@@ -3,7 +3,7 @@ from typing import TYPE_CHECKING, List, Optional, Sequence, cast
 from llama_index.indices.query.schema import QueryBundle
 from llama_index.program.guidance_program import GuidancePydanticProgram
 from llama_index.prompts.guidance_utils import convert_to_handlebars
-from llama_index.prompts.mixin import PromptDictType, PromptMixinType
+from llama_index.prompts.mixin import PromptDictType
 from llama_index.question_gen.prompts import (
     DEFAULT_SUB_QUESTION_PROMPT_TMPL,
     build_tools_text,
diff --git a/llama_index/question_gen/llm_generators.py b/llama_index/question_gen/llm_generators.py
index 22cb8bbfb70806318768c736df73c47fb1fb4f2c..63a7501edb16cc3877942cd27f31e2aafa92267a 100644
--- a/llama_index/question_gen/llm_generators.py
+++ b/llama_index/question_gen/llm_generators.py
@@ -5,7 +5,7 @@ from llama_index.indices.service_context import ServiceContext
 from llama_index.llm_predictor.base import BaseLLMPredictor
 from llama_index.output_parsers.base import StructuredOutput
 from llama_index.prompts.base import BasePromptTemplate, PromptTemplate
-from llama_index.prompts.mixin import PromptDictType, PromptMixinType
+from llama_index.prompts.mixin import PromptDictType
 from llama_index.prompts.prompt_type import PromptType
 from llama_index.question_gen.output_parser import SubQuestionOutputParser
 from llama_index.question_gen.prompts import (
diff --git a/llama_index/question_gen/openai_generator.py b/llama_index/question_gen/openai_generator.py
index 6515a913691449b0d82cd18c9ab5976c073728c0..3349d103563b95c210d3513d48466ceafeaec9a8 100644
--- a/llama_index/question_gen/openai_generator.py
+++ b/llama_index/question_gen/openai_generator.py
@@ -4,7 +4,7 @@ from llama_index.indices.query.schema import QueryBundle
 from llama_index.llms.base import LLM
 from llama_index.llms.openai import OpenAI
 from llama_index.program.openai_program import OpenAIPydanticProgram
-from llama_index.prompts.mixin import PromptDictType, PromptMixinType
+from llama_index.prompts.mixin import PromptDictType
 from llama_index.question_gen.prompts import build_tools_text
 from llama_index.question_gen.types import (
     BaseQuestionGenerator,
diff --git a/llama_index/question_gen/types.py b/llama_index/question_gen/types.py
index 608fffafb988cc1ddd7d00ec1def8ba3538981f2..0cf3cc7fd8335f5e07972b6556b2b18ada4eafa0 100644
--- a/llama_index/question_gen/types.py
+++ b/llama_index/question_gen/types.py
@@ -1,4 +1,4 @@
-from abc import ABC, abstractmethod
+from abc import abstractmethod
 from typing import List, Sequence
 
 from llama_index.bridge.pydantic import BaseModel
diff --git a/llama_index/readers/deeplake.py b/llama_index/readers/deeplake.py
index 00c00f43a66e41c483d45359da3a97cbc2747931..54a039712898ace99a060c7cfc1c6a2822e4a636 100644
--- a/llama_index/readers/deeplake.py
+++ b/llama_index/readers/deeplake.py
@@ -67,7 +67,7 @@ class DeepLakeReader(BaseReader):
             "`deeplake` package not found, please run `pip install deeplake`"
         )
         try:
-            import deeplake
+            import deeplake  # noqa
         except ImportError:
             raise ImportError(import_err_msg)
         self.token = token
diff --git a/llama_index/readers/discord_reader.py b/llama_index/readers/discord_reader.py
index 0b400d2b38924b76306d5e514265a31814a2d583..1a9353bd53ecd491e1d34aaa5f275891442f8dcb 100644
--- a/llama_index/readers/discord_reader.py
+++ b/llama_index/readers/discord_reader.py
@@ -101,7 +101,7 @@ class DiscordReader(BasePydanticReader):
     def __init__(self, discord_token: Optional[str] = None) -> None:
         """Initialize with parameters."""
         try:
-            import discord
+            import discord  # noqa
         except ImportError:
             raise ImportError(
                 "`discord.py` package not found, please run `pip install discord.py`"
diff --git a/llama_index/readers/faiss.py b/llama_index/readers/faiss.py
index 3be122c77c8f34a9feca66622241736ca1e5f978..79e7ac1205f895498ba0a21ca417b6a2bd22fe72 100644
--- a/llama_index/readers/faiss.py
+++ b/llama_index/readers/faiss.py
@@ -30,7 +30,7 @@ class FaissReader(BaseReader):
             https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
         """
         try:
-            import faiss
+            import faiss  # noqa
         except ImportError:
             raise ImportError(import_err_msg)
 
diff --git a/llama_index/readers/file/image_caption_reader.py b/llama_index/readers/file/image_caption_reader.py
index 7ab94a0b0e79500ca6499ae4dc9e9ebaff876f94..ce2f4c814499a30ede65fb8af86ba4900b0230e3 100644
--- a/llama_index/readers/file/image_caption_reader.py
+++ b/llama_index/readers/file/image_caption_reader.py
@@ -23,9 +23,9 @@ class ImageCaptionReader(BaseReader):
         if parser_config is None:
             """Init parser."""
             try:
-                import sentencepiece
+                import sentencepiece  # noqa
                 import torch
-                from PIL import Image
+                from PIL import Image  # noqa
                 from transformers import BlipForConditionalGeneration, BlipProcessor
             except ImportError:
                 raise ImportError(
diff --git a/llama_index/readers/file/image_reader.py b/llama_index/readers/file/image_reader.py
index e584d87a603c93acc6ee2d6a505c12fb41185fb0..15e4c3d3d18f958e93f2ac31db3cda74f0fd31b0 100644
--- a/llama_index/readers/file/image_reader.py
+++ b/llama_index/readers/file/image_reader.py
@@ -29,9 +29,9 @@ class ImageReader(BaseReader):
         """Init parser."""
         if parser_config is None and parse_text:
             try:
-                import sentencepiece
-                import torch
-                from PIL import Image
+                import sentencepiece  # noqa
+                import torch  # noqa
+                from PIL import Image  # noqa
                 from transformers import DonutProcessor, VisionEncoderDecoderModel
             except ImportError:
                 raise ImportError(
diff --git a/llama_index/readers/file/image_vision_llm_reader.py b/llama_index/readers/file/image_vision_llm_reader.py
index e5e729e53a7ac358f6a905043430bbc43d059761..bb772c5cc99b25d021fc54629cc5338907814306 100644
--- a/llama_index/readers/file/image_vision_llm_reader.py
+++ b/llama_index/readers/file/image_vision_llm_reader.py
@@ -22,9 +22,9 @@ class ImageVisionLLMReader(BaseReader):
         """Init params."""
         if parser_config is None:
             try:
-                import sentencepiece
+                import sentencepiece  # noqa
                 import torch
-                from PIL import Image
+                from PIL import Image  # noqa
                 from transformers import Blip2ForConditionalGeneration, Blip2Processor
             except ImportError:
                 raise ImportError(
diff --git a/llama_index/readers/file/mbox_reader.py b/llama_index/readers/file/mbox_reader.py
index dfc84fb50a12e674b88966898ccd72a2d34ab485..5e495a51f493dfc82c755ba24fd333b2f7c317a1 100644
--- a/llama_index/readers/file/mbox_reader.py
+++ b/llama_index/readers/file/mbox_reader.py
@@ -39,7 +39,7 @@ class MboxReader(BaseReader):
     ) -> None:
         """Init params."""
         try:
-            from bs4 import BeautifulSoup
+            from bs4 import BeautifulSoup  # noqa
         except ImportError:
             raise ImportError(
                 "`beautifulsoup4` package not found: `pip install beautifulsoup4`"
diff --git a/llama_index/readers/file/slides_reader.py b/llama_index/readers/file/slides_reader.py
index 19849833ff29adf2c8756e6d6fec06a4ba286b4e..709845bd5f7ca31f9240cc41aaa6f2fa8d6daa63 100644
--- a/llama_index/readers/file/slides_reader.py
+++ b/llama_index/readers/file/slides_reader.py
@@ -23,9 +23,9 @@ class PptxReader(BaseReader):
     def __init__(self) -> None:
         """Init parser."""
         try:
-            import torch
-            from PIL import Image
-            from pptx import Presentation
+            import torch  # noqa
+            from PIL import Image  # noqa
+            from pptx import Presentation  # noqa
             from transformers import (
                 AutoTokenizer,
                 VisionEncoderDecoderModel,
diff --git a/llama_index/readers/google_readers/gdocs.py b/llama_index/readers/google_readers/gdocs.py
index 502dac0d9d434c5154a570e645e7d6dcf0d50dfb..c96daf75694fb7ff5b6b5d19d9814cdaaa6775e4 100644
--- a/llama_index/readers/google_readers/gdocs.py
+++ b/llama_index/readers/google_readers/gdocs.py
@@ -38,9 +38,9 @@ class GoogleDocsReader(BasePydanticReader):
     def __init__(self) -> None:
         """Initialize with parameters."""
         try:
-            import google
-            import google_auth_oauthlib
-            import googleapiclient
+            import google  # noqa
+            import google_auth_oauthlib  # noqa
+            import googleapiclient  # noqa
         except ImportError:
             raise ImportError(
                 "`google_auth_oauthlib`, `googleapiclient` and `google` "
diff --git a/llama_index/readers/google_readers/gsheets.py b/llama_index/readers/google_readers/gsheets.py
index 74a3f0d4cd17030a621b51a31779801f79807312..a02af79a426fb06841d5b68ac0455ad2447ce298 100644
--- a/llama_index/readers/google_readers/gsheets.py
+++ b/llama_index/readers/google_readers/gsheets.py
@@ -38,9 +38,9 @@ class GoogleSheetsReader(BasePydanticReader):
     def __init__(self) -> None:
         """Initialize with parameters."""
         try:
-            import google
-            import google_auth_oauthlib
-            import googleapiclient
+            import google  # noqa
+            import google_auth_oauthlib  # noqa
+            import googleapiclient  # noqa
         except ImportError:
             raise ImportError(
                 "`google_auth_oauthlib`, `googleapiclient` and `google` "
diff --git a/llama_index/readers/metal.py b/llama_index/readers/metal.py
index 2e4740d6f19c5345fdad088036f54cb3a856a996..29ea228a61b119466e9294b18ccab30f3af92f9a 100644
--- a/llama_index/readers/metal.py
+++ b/llama_index/readers/metal.py
@@ -18,7 +18,7 @@ class MetalReader(BaseReader):
             "`metal_sdk` package not found, please run `pip install metal_sdk`"
         )
         try:
-            import metal_sdk
+            import metal_sdk  # noqa
         except ImportError:
             raise ImportError(import_err_msg)
         from metal_sdk.metal import Metal
diff --git a/llama_index/readers/milvus.py b/llama_index/readers/milvus.py
index 6b0b982f81dfb9f74e5e6bce24e65ce49dbde1ad..1a0489a05ce70be0704329f3261c5980ac21036f 100644
--- a/llama_index/readers/milvus.py
+++ b/llama_index/readers/milvus.py
@@ -23,7 +23,7 @@ class MilvusReader(BaseReader):
             "`pymilvus` package not found, please run `pip install pymilvus`"
         )
         try:
-            import pymilvus
+            import pymilvus  # noqa
         except ImportError:
             raise ImportError(import_err_msg)
 
@@ -50,7 +50,7 @@ class MilvusReader(BaseReader):
         }
         try:
             self._create_connection_alias()
-        except MilvusException as e:
+        except MilvusException:
             raise
 
     def load_data(
@@ -75,13 +75,13 @@ class MilvusReader(BaseReader):
 
         try:
             self.collection = Collection(collection_name, using=self.alias)
-        except MilvusException as e:
+        except MilvusException:
             raise
 
         assert self.collection is not None
         try:
             self.collection.load()
-        except MilvusException as e:
+        except MilvusException:
             raise
         if search_params is None:
             search_params = self._create_search_params()
diff --git a/llama_index/readers/schema/base.py b/llama_index/readers/schema/base.py
index 648918bd175a262c2f6c3c22021a399a2fa6821c..f209eccb3ccfe66947fefe90aa164af0131ee860 100644
--- a/llama_index/readers/schema/base.py
+++ b/llama_index/readers/schema/base.py
@@ -1,2 +1,2 @@
 # TODO: remove this file, only keep for backwards compatibility
-from llama_index.schema import Document, ImageDocument
+from llama_index.schema import Document, ImageDocument  # noqa
diff --git a/llama_index/readers/steamship/file_reader.py b/llama_index/readers/steamship/file_reader.py
index 20688b63a6aae70efdcb63ad344450d9e98d4c53..9eab467312aac834ddd7ba76c4a197958ad252b7 100644
--- a/llama_index/readers/steamship/file_reader.py
+++ b/llama_index/readers/steamship/file_reader.py
@@ -21,7 +21,7 @@ class SteamshipFileReader(BaseReader):
     def __init__(self, api_key: Optional[str] = None) -> None:
         """Initialize the Reader."""
         try:
-            import steamship
+            import steamship  # noqa
 
             self.api_key = api_key
         except ImportError:
diff --git a/llama_index/readers/weaviate/reader.py b/llama_index/readers/weaviate/reader.py
index fe3df76158131817a3bc92a68cfeb0399554e7b5..4b9d46f2019a152f10db4d439552942861049ead 100644
--- a/llama_index/readers/weaviate/reader.py
+++ b/llama_index/readers/weaviate/reader.py
@@ -26,9 +26,9 @@ class WeaviateReader(BaseReader):
     ) -> None:
         """Initialize with parameters."""
         try:
-            import weaviate
+            import weaviate  # noqa
             from weaviate import Client
-            from weaviate.auth import AuthCredentials
+            from weaviate.auth import AuthCredentials  # noqa
         except ImportError:
             raise ImportError(
                 "`weaviate` package not found, please run `pip install weaviate-client`"
diff --git a/llama_index/readers/web.py b/llama_index/readers/web.py
index 6bab9db9053da74dfb4371c439807620362c4773..da31874f7fa35c7e3f9f733f9881a0277f3928a9 100644
--- a/llama_index/readers/web.py
+++ b/llama_index/readers/web.py
@@ -36,7 +36,7 @@ class SimpleWebPageReader(BasePydanticReader):
     ) -> None:
         """Initialize with parameters."""
         try:
-            import html2text
+            import html2text  # noqa
         except ImportError:
             raise ImportError(
                 "`html2text` package not found, please run `pip install html2text`"
@@ -95,7 +95,7 @@ class TrafilaturaWebReader(BasePydanticReader):
             error_on_missing (bool): Throw an error when data cannot be parsed
         """
         try:
-            import trafilatura
+            import trafilatura  # noqa
         except ImportError:
             raise ImportError(
                 "`trafilatura` package not found, please run `pip install trafilatura`"
@@ -174,10 +174,10 @@ class BeautifulSoupWebReader(BasePydanticReader):
     ) -> None:
         """Initialize with parameters."""
         try:
-            from urllib.parse import urlparse
+            from urllib.parse import urlparse  # noqa
 
-            import requests
-            from bs4 import BeautifulSoup
+            import requests  # noqa
+            from bs4 import BeautifulSoup  # noqa
         except ImportError:
             raise ImportError(
                 "`bs4`, `requests`, and `urllib` must be installed to scrape websites."
@@ -253,7 +253,7 @@ class RssReader(BasePydanticReader):
 
         """
         try:
-            import feedparser
+            import feedparser  # noqa
         except ImportError:
             raise ImportError(
                 "`feedparser` package not found, please run `pip install feedparser`"
@@ -261,7 +261,7 @@ class RssReader(BasePydanticReader):
 
         if html_to_text:
             try:
-                import html2text
+                import html2text  # noqa
             except ImportError:
                 raise ImportError(
                     "`html2text` package not found, please run `pip install html2text`"
diff --git a/llama_index/readers/wikipedia.py b/llama_index/readers/wikipedia.py
index da5eb3c3710729776d7fad232dacf35804177ed1..0e1e3c21e1fb6a2c46734e882f3d8aa11677f789 100644
--- a/llama_index/readers/wikipedia.py
+++ b/llama_index/readers/wikipedia.py
@@ -17,7 +17,7 @@ class WikipediaReader(BasePydanticReader):
     def __init__(self) -> None:
         """Initialize with parameters."""
         try:
-            import wikipedia
+            import wikipedia  # noqa
         except ImportError:
             raise ImportError(
                 "`wikipedia` package not found, please run `pip install wikipedia`"
diff --git a/llama_index/response_synthesizers/accumulate.py b/llama_index/response_synthesizers/accumulate.py
index 465a499f830f987c85ac20fa6b498349992c212f..d0a6a1e21ceaf6ef49aba551d0b36a184ce54607 100644
--- a/llama_index/response_synthesizers/accumulate.py
+++ b/llama_index/response_synthesizers/accumulate.py
@@ -1,11 +1,11 @@
 import asyncio
-from typing import Any, List, Optional, Sequence, Tuple
+from typing import Any, List, Optional, Sequence
 
 from llama_index.async_utils import run_async_tasks
 from llama_index.indices.service_context import ServiceContext
 from llama_index.prompts import BasePromptTemplate
 from llama_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
-from llama_index.prompts.mixin import PromptDictType, PromptMixinType
+from llama_index.prompts.mixin import PromptDictType
 from llama_index.response_synthesizers.base import BaseSynthesizer
 from llama_index.types import RESPONSE_TEXT_TYPE
 
diff --git a/llama_index/response_synthesizers/base.py b/llama_index/response_synthesizers/base.py
index 0fd6c5ba46e7623dbe2890a2aa1be8bc3e6787d9..4b72edb67371e8bb05717193492e4413a65600a5 100644
--- a/llama_index/response_synthesizers/base.py
+++ b/llama_index/response_synthesizers/base.py
@@ -8,15 +8,14 @@ Will support different modes, from 1) stuffing chunks into prompt,
 
 """
 import logging
-from abc import ABC, abstractmethod
+from abc import abstractmethod
 from typing import Any, Dict, Generator, List, Optional, Sequence, Union
 
 from llama_index.bridge.pydantic import BaseModel
 from llama_index.callbacks.schema import CBEventType, EventPayload
 from llama_index.indices.query.schema import QueryBundle
 from llama_index.indices.service_context import ServiceContext
-from llama_index.prompts.base import BasePromptTemplate
-from llama_index.prompts.mixin import PromptDictType, PromptMixin, PromptMixinType
+from llama_index.prompts.mixin import PromptMixin
 from llama_index.response.schema import (
     RESPONSE_TYPE,
     PydanticResponse,
diff --git a/llama_index/response_synthesizers/generation.py b/llama_index/response_synthesizers/generation.py
index 068fd2603a333910724df480194bcb5893397f69..5ea5f96aed6b20b6eb4d15b116a1bc7c9750297e 100644
--- a/llama_index/response_synthesizers/generation.py
+++ b/llama_index/response_synthesizers/generation.py
@@ -1,9 +1,9 @@
-from typing import Any, Optional, Sequence, Tuple
+from typing import Any, Optional, Sequence
 
 from llama_index.indices.service_context import ServiceContext
 from llama_index.prompts import BasePromptTemplate
 from llama_index.prompts.default_prompts import DEFAULT_SIMPLE_INPUT_PROMPT
-from llama_index.prompts.mixin import PromptDictType, PromptMixinType
+from llama_index.prompts.mixin import PromptDictType
 from llama_index.response_synthesizers.base import BaseSynthesizer
 from llama_index.types import RESPONSE_TEXT_TYPE
 
diff --git a/llama_index/response_synthesizers/refine.py b/llama_index/response_synthesizers/refine.py
index 189344e75d39fa985ad2e4d74729ed1b071c68cb..06ab3e051ea57231251366b78949c4c1c410b30f 100644
--- a/llama_index/response_synthesizers/refine.py
+++ b/llama_index/response_synthesizers/refine.py
@@ -1,5 +1,5 @@
 import logging
-from typing import Any, Callable, Generator, Optional, Sequence, Tuple, Type, cast
+from typing import Any, Callable, Generator, Optional, Sequence, Type, cast
 
 from llama_index.bridge.pydantic import BaseModel, Field, ValidationError
 from llama_index.indices.service_context import ServiceContext
@@ -10,7 +10,7 @@ from llama_index.prompts.default_prompt_selectors import (
     DEFAULT_REFINE_PROMPT_SEL,
     DEFAULT_TEXT_QA_PROMPT_SEL,
 )
-from llama_index.prompts.mixin import PromptDictType, PromptMixinType
+from llama_index.prompts.mixin import PromptDictType
 from llama_index.response.utils import get_response_text
 from llama_index.response_synthesizers.base import BaseSynthesizer
 from llama_index.types import RESPONSE_TEXT_TYPE, BasePydanticProgram
diff --git a/llama_index/response_synthesizers/simple_summarize.py b/llama_index/response_synthesizers/simple_summarize.py
index 2c7e459d9a39460162b938c74172ae2f0b1e519d..8ab4dd1bc47798d20e3525c42093d7ff9f8a5d2b 100644
--- a/llama_index/response_synthesizers/simple_summarize.py
+++ b/llama_index/response_synthesizers/simple_summarize.py
@@ -3,7 +3,7 @@ from typing import Any, Generator, Optional, Sequence, cast
 from llama_index.indices.service_context import ServiceContext
 from llama_index.prompts import BasePromptTemplate
 from llama_index.prompts.default_prompt_selectors import DEFAULT_TEXT_QA_PROMPT_SEL
-from llama_index.prompts.mixin import PromptDictType, PromptMixinType
+from llama_index.prompts.mixin import PromptDictType
 from llama_index.response_synthesizers.base import BaseSynthesizer
 from llama_index.types import RESPONSE_TEXT_TYPE
 
diff --git a/llama_index/response_synthesizers/tree_summarize.py b/llama_index/response_synthesizers/tree_summarize.py
index 93da77cb817dfa71b82fd836ccee2e7c718d77a3..77d9a15908344485a607ebb3db380c07eb0fe3a3 100644
--- a/llama_index/response_synthesizers/tree_summarize.py
+++ b/llama_index/response_synthesizers/tree_summarize.py
@@ -1,5 +1,5 @@
 import asyncio
-from typing import Any, List, Optional, Sequence, Tuple
+from typing import Any, List, Optional, Sequence
 
 from llama_index.async_utils import run_async_tasks
 from llama_index.indices.service_context import ServiceContext
@@ -7,7 +7,7 @@ from llama_index.prompts import BasePromptTemplate
 from llama_index.prompts.default_prompt_selectors import (
     DEFAULT_TREE_SUMMARIZE_PROMPT_SEL,
 )
-from llama_index.prompts.mixin import PromptDictType, PromptMixinType
+from llama_index.prompts.mixin import PromptDictType
 from llama_index.response_synthesizers.base import BaseSynthesizer
 from llama_index.types import RESPONSE_TEXT_TYPE, BaseModel
 
diff --git a/llama_index/selectors/embedding_selectors.py b/llama_index/selectors/embedding_selectors.py
index 1fcc571af847a84490c05621a83565f01906a24f..9e18f86d96f6aa98fc4dffc9301a622e41ea1e50 100644
--- a/llama_index/selectors/embedding_selectors.py
+++ b/llama_index/selectors/embedding_selectors.py
@@ -1,18 +1,16 @@
-from typing import Any, Dict, List, Optional, Sequence, cast
+from typing import Any, Dict, Optional, Sequence
 
 from llama_index.embeddings.base import BaseEmbedding
 from llama_index.embeddings.utils import resolve_embed_model
 from llama_index.indices.query.embedding_utils import get_top_k_embeddings
 from llama_index.indices.query.schema import QueryBundle
-from llama_index.prompts.mixin import PromptDictType, PromptMixinType
+from llama_index.prompts.mixin import PromptDictType
 from llama_index.selectors.types import (
     BaseSelector,
-    MultiSelection,
     SelectorResult,
     SingleSelection,
 )
 from llama_index.tools.types import ToolMetadata
-from llama_index.types import BaseOutputParser
 
 
 class EmbeddingSingleSelector(BaseSelector):
diff --git a/llama_index/selectors/llm_selectors.py b/llama_index/selectors/llm_selectors.py
index 6d04f33b145d10f0361cd36ccbdbab16341870a5..1a56d0ccb50fe660cd6b3822f7214d36dfe24652 100644
--- a/llama_index/selectors/llm_selectors.py
+++ b/llama_index/selectors/llm_selectors.py
@@ -5,7 +5,7 @@ from llama_index.indices.service_context import ServiceContext
 from llama_index.llm_predictor.base import BaseLLMPredictor
 from llama_index.output_parsers.base import StructuredOutput
 from llama_index.output_parsers.selection import Answer, SelectionOutputParser
-from llama_index.prompts.mixin import PromptDictType, PromptMixinType
+from llama_index.prompts.mixin import PromptDictType
 from llama_index.prompts.prompt_type import PromptType
 from llama_index.selectors.prompts import (
     DEFAULT_MULTI_SELECT_PROMPT_TMPL,
diff --git a/llama_index/selectors/pydantic_selectors.py b/llama_index/selectors/pydantic_selectors.py
index 75f9a73c5ca97c0b0c9aef03940a48eb35f33270..b50224088f31e18e2862aa9590623ed32d53e2f1 100644
--- a/llama_index/selectors/pydantic_selectors.py
+++ b/llama_index/selectors/pydantic_selectors.py
@@ -3,7 +3,7 @@ from typing import Any, Dict, Optional, Sequence
 from llama_index.indices.query.schema import QueryBundle
 from llama_index.llms.openai import OpenAI
 from llama_index.program.openai_program import OpenAIPydanticProgram
-from llama_index.prompts.mixin import PromptDictType, PromptMixinType
+from llama_index.prompts.mixin import PromptDictType
 from llama_index.selectors.llm_selectors import _build_choices_text
 from llama_index.selectors.prompts import (
     DEFAULT_MULTI_PYD_SELECT_PROMPT_TMPL,
diff --git a/llama_index/selectors/types.py b/llama_index/selectors/types.py
index 269b066a2a501a91ac81f476783da53f0e5a4bff..8ccf7f4607081c9bf7c90f4a325f28d3b702d207 100644
--- a/llama_index/selectors/types.py
+++ b/llama_index/selectors/types.py
@@ -1,9 +1,9 @@
-from abc import ABC, abstractmethod
+from abc import abstractmethod
 from typing import List, Sequence, Union
 
 from llama_index.bridge.pydantic import BaseModel
 from llama_index.indices.query.schema import QueryBundle, QueryType
-from llama_index.prompts.mixin import PromptDictType, PromptMixin, PromptMixinType
+from llama_index.prompts.mixin import PromptMixin, PromptMixinType
 from llama_index.tools.types import ToolMetadata
 
 MetadataType = Union[str, ToolMetadata]
diff --git a/llama_index/storage/docstore/dynamodb_docstore.py b/llama_index/storage/docstore/dynamodb_docstore.py
index f83239b2f21487044f24ba87524688516265de42..38ae0c82c83678e8eb9a4f52ced3fb032699eb09 100644
--- a/llama_index/storage/docstore/dynamodb_docstore.py
+++ b/llama_index/storage/docstore/dynamodb_docstore.py
@@ -1,7 +1,5 @@
 from __future__ import annotations
 
-from typing import Optional
-
 from llama_index.storage.docstore.keyval_docstore import KVDocumentStore
 from llama_index.storage.kvstore.dynamodb_kvstore import DynamoDBKVStore
 
diff --git a/llama_index/storage/index_store/dynamodb_index_store.py b/llama_index/storage/index_store/dynamodb_index_store.py
index 8f8078a66d5d2cc268b365844d4b57b158a6594c..494c9ac48ba5a67904439f86d347c5c58580fd3c 100644
--- a/llama_index/storage/index_store/dynamodb_index_store.py
+++ b/llama_index/storage/index_store/dynamodb_index_store.py
@@ -1,7 +1,5 @@
 from __future__ import annotations
 
-from typing import Optional
-
 from llama_index.storage.index_store.keyval_index_store import KVIndexStore
 from llama_index.storage.kvstore.dynamodb_kvstore import DynamoDBKVStore
 
diff --git a/llama_index/storage/kvstore/dynamodb_kvstore.py b/llama_index/storage/kvstore/dynamodb_kvstore.py
index 1baa5aa902338c0b9e44b965d7e1d3173baddfdd..d60193f5bd30b1df3fb2918d16fd28e15268005c 100644
--- a/llama_index/storage/kvstore/dynamodb_kvstore.py
+++ b/llama_index/storage/kvstore/dynamodb_kvstore.py
@@ -2,7 +2,7 @@ from __future__ import annotations
 
 import os
 from decimal import Decimal
-from typing import Any, Dict, List, Optional, Set, Tuple
+from typing import Any, Dict, List, Set, Tuple
 
 from llama_index.storage.kvstore.types import DEFAULT_COLLECTION, BaseKVStore
 
diff --git a/llama_index/text_splitter/code_splitter.py b/llama_index/text_splitter/code_splitter.py
index 54a87341bd54d46f741b77478be5c06bdea5a9fc..cc610cc9eb4530aafef7199b7cc391e196929da1 100644
--- a/llama_index/text_splitter/code_splitter.py
+++ b/llama_index/text_splitter/code_splitter.py
@@ -94,7 +94,7 @@ class CodeSplitter(TextSplitter):
 
             try:
                 parser = tree_sitter_languages.get_parser(self.language)
-            except Exception as e:
+            except Exception:
                 print(
                     f"Could not get parser for language {self.language}. Check "
                     "https://github.com/grantjenks/py-tree-sitter-languages#license "
diff --git a/llama_index/tools/query_engine.py b/llama_index/tools/query_engine.py
index 18654d048b58ebca7cc973e4a9517c74fb1c0db4..154bea7473cc9d1d7141d3ebf11f718998e66e74 100644
--- a/llama_index/tools/query_engine.py
+++ b/llama_index/tools/query_engine.py
@@ -1,4 +1,4 @@
-from typing import Any, Optional, cast
+from typing import Any, Optional
 
 from llama_index.indices.query.base import BaseQueryEngine
 from llama_index.langchain_helpers.agents.tools import IndexToolConfig, LlamaIndexTool
diff --git a/llama_index/vector_stores/chroma.py b/llama_index/vector_stores/chroma.py
index 5ba2bd8783b2bb27634ff6181554a283bbd7f12b..6db328f432e8d005419d111694bebd6418e54062 100644
--- a/llama_index/vector_stores/chroma.py
+++ b/llama_index/vector_stores/chroma.py
@@ -87,7 +87,7 @@ class ChromaVectorStore(BasePydanticVectorStore):
     ) -> None:
         """Init params."""
         try:
-            import chromadb
+            import chromadb  # noqa
         except ImportError:
             raise ImportError(import_err_msg)
         from chromadb.api.models.Collection import Collection
diff --git a/llama_index/vector_stores/cogsearch.py b/llama_index/vector_stores/cogsearch.py
index 51e99fb327354bb6aff92883677f2d3aed1e5a46..cf075875cdf76412cea3680c0145fbbecafc5de2 100644
--- a/llama_index/vector_stores/cogsearch.py
+++ b/llama_index/vector_stores/cogsearch.py
@@ -287,7 +287,7 @@ class CognitiveSearchVectorStore(VectorStore):
         )
 
         try:
-            import azure.search.documents
+            import azure.search.documents  # noqa
             from azure.search.documents import SearchClient
             from azure.search.documents.indexes import SearchIndexClient
         except ImportError:
diff --git a/llama_index/vector_stores/docarray/hnsw.py b/llama_index/vector_stores/docarray/hnsw.py
index 7bedb55bd215541563e7166c9f918cbbf8dfd71f..d8d8712f8bd3b2da8e870b21c9f9528a6491c53c 100644
--- a/llama_index/vector_stores/docarray/hnsw.py
+++ b/llama_index/vector_stores/docarray/hnsw.py
@@ -48,7 +48,7 @@ class DocArrayHnswVectorStore(DocArrayVectorStore):
                 `pip install docarray[hnswlib]`
         """
         try:
-            import docarray
+            import docarray  # noqa
         except ImportError:
             raise ImportError(import_err_msg)
 
diff --git a/llama_index/vector_stores/docarray/in_memory.py b/llama_index/vector_stores/docarray/in_memory.py
index 7e944fe359844a5019c49cf2869753a89e56edb7..4ca8d379b813b1f65212afed00591b6cc1a7c36e 100644
--- a/llama_index/vector_stores/docarray/in_memory.py
+++ b/llama_index/vector_stores/docarray/in_memory.py
@@ -30,7 +30,7 @@ class DocArrayInMemoryVectorStore(DocArrayVectorStore):
                 `pip install docarray`
         """
         try:
-            import docarray
+            import docarray  # noqa
         except ImportError:
             raise ImportError(import_err_msg)
 
diff --git a/llama_index/vector_stores/dynamodb.py b/llama_index/vector_stores/dynamodb.py
index bd89f72447f4bc70f1ab3d0773600618ca2aa6ff..87e6e5c18c8730dd21350ea66460ec945f5522d9 100644
--- a/llama_index/vector_stores/dynamodb.py
+++ b/llama_index/vector_stores/dynamodb.py
@@ -2,7 +2,7 @@
 from __future__ import annotations
 
 from logging import getLogger
-from typing import Any, Dict, List, Optional, cast
+from typing import Any, Dict, List, cast
 
 from llama_index.indices.query.embedding_utils import (
     get_top_k_embeddings,
diff --git a/llama_index/vector_stores/elasticsearch.py b/llama_index/vector_stores/elasticsearch.py
index cba936152be64f52852cd784037ecf28100bd22d..98e4bf54ee39c779a218307f136d2d11e55485e7 100644
--- a/llama_index/vector_stores/elasticsearch.py
+++ b/llama_index/vector_stores/elasticsearch.py
@@ -412,7 +412,7 @@ class ElasticsearchStore(VectorStore):
                 logger.warning(f"Could not find text {ref_doc_id} to delete")
             else:
                 logger.debug(f"Deleted text {ref_doc_id} from index")
-        except Exception as e:
+        except Exception:
             logger.error(f"Error deleting text: {ref_doc_id}")
             raise
 
diff --git a/llama_index/vector_stores/metal.py b/llama_index/vector_stores/metal.py
index bf8dce693ae98d83ac48816099511874091ba2ef..7196ef123e9430c21060035ddd4c9053d31ec9b4 100644
--- a/llama_index/vector_stores/metal.py
+++ b/llama_index/vector_stores/metal.py
@@ -39,7 +39,7 @@ class MetalVectorStore(VectorStore):
             "`metal_sdk` package not found, please run `pip install metal_sdk`"
         )
         try:
-            import metal_sdk
+            import metal_sdk  # noqa
         except ImportError:
             raise ImportError(import_err_msg)
         from metal_sdk.metal import Metal
diff --git a/llama_index/vector_stores/milvus.py b/llama_index/vector_stores/milvus.py
index 740877afc4d3c5366c6b3000472c960691cf9745..d051eb708e074dcf86cac9223ed274daa5d3c1a0 100644
--- a/llama_index/vector_stores/milvus.py
+++ b/llama_index/vector_stores/milvus.py
@@ -99,7 +99,7 @@ class MilvusVectorStore(VectorStore):
             "`pymilvus` package not found, please run `pip install pymilvus`"
         )
         try:
-            import pymilvus
+            import pymilvus  # noqa
         except ImportError:
             raise ImportError(import_err_msg)
 
diff --git a/llama_index/vector_stores/postgres.py b/llama_index/vector_stores/postgres.py
index 3c23946d1d2be9ec6d0e0d128bbac5da9a0c4c48..0621d5c24b8452d4ba4303cb54db6fd5b1a82891 100644
--- a/llama_index/vector_stores/postgres.py
+++ b/llama_index/vector_stores/postgres.py
@@ -132,11 +132,11 @@ class PGVectorStore(BasePydanticVectorStore):
         debug: bool = False,
     ) -> None:
         try:
-            import asyncpg
-            import pgvector
-            import psycopg2
+            import asyncpg  # noqa
+            import pgvector  # noqa
+            import psycopg2  # noqa
             import sqlalchemy
-            import sqlalchemy.ext.asyncio
+            import sqlalchemy.ext.asyncio  # noqa
         except ImportError:
             raise ImportError(
                 "`sqlalchemy[asyncio]`, `pgvector`, `psycopg2-binary` and `asyncpg` "
@@ -521,8 +521,6 @@ class PGVectorStore(BasePydanticVectorStore):
     async def aquery(
         self, query: VectorStoreQuery, **kwargs: Any
     ) -> VectorStoreQueryResult:
-        import sqlalchemy
-
         self._initialize()
         if query.mode == VectorStoreQueryMode.HYBRID:
             results = await self._async_hybrid_query(query)
diff --git a/llama_index/vector_stores/qdrant.py b/llama_index/vector_stores/qdrant.py
index 5ee0132911252ff0e7d6b119e5197b85e974d11e..471f610680bc79a357086d93d21b980d723f551a 100644
--- a/llama_index/vector_stores/qdrant.py
+++ b/llama_index/vector_stores/qdrant.py
@@ -374,7 +374,7 @@ class QdrantVectorStore(BasePydanticVectorStore):
         Args:
             query (VectorStoreQuery): query
         """
-        from qdrant_client.http.models import Filter, Payload
+        from qdrant_client.http.models import Filter
 
         query_embedding = cast(List[float], query.query_embedding)
 
diff --git a/llama_index/vector_stores/rocksetdb.py b/llama_index/vector_stores/rocksetdb.py
index c323d64ac866ae21d50ee2634cb83af92748fdac..04c05edd1a792a0ab8794a3883c1322572fc4aba 100644
--- a/llama_index/vector_stores/rocksetdb.py
+++ b/llama_index/vector_stores/rocksetdb.py
@@ -4,7 +4,7 @@ from enum import Enum
 from os import getenv
 from time import sleep
 from types import ModuleType
-from typing import Any, List, Optional, Type, TypeVar
+from typing import Any, List, Type, TypeVar
 
 from llama_index.schema import BaseNode
 from llama_index.vector_stores.types import (
diff --git a/llama_index/vector_stores/tair.py b/llama_index/vector_stores/tair.py
index 7eb1501253361f0a37db1794ac556717c1dd7479..01cbffc525f4ea68c34a6b98455de826cd742396 100644
--- a/llama_index/vector_stores/tair.py
+++ b/llama_index/vector_stores/tair.py
@@ -89,7 +89,7 @@ class TairVectorStore(VectorStore):
 
         """
         try:
-            from tair import Tair, tairvector
+            from tair import Tair, tairvector  # noqa
         except ImportError:
             raise ValueError(
                 "Could not import tair-py python package. "
diff --git a/llama_index/vector_stores/tencentvectordb.py b/llama_index/vector_stores/tencentvectordb.py
index 4b7c1ced0c2cd6624908774934aef1f7ed5f624e..a42cb72f11cf9ac25cffaf01f93b29ed3ffffa1f 100644
--- a/llama_index/vector_stores/tencentvectordb.py
+++ b/llama_index/vector_stores/tencentvectordb.py
@@ -8,7 +8,6 @@ from typing import Any, Dict, List, Optional
 
 from llama_index.schema import BaseNode, NodeRelationship, RelatedNodeInfo, TextNode
 from llama_index.vector_stores.types import (
-    MetadataFilters,
     VectorStore,
     VectorStoreQuery,
     VectorStoreQueryResult,
@@ -52,7 +51,7 @@ NOT_SUPPORT_METRIC_TYPE_ERROR = (
 
 def _try_import() -> None:
     try:
-        import tcvectordb
+        import tcvectordb  # noqa
     except ImportError:
         raise ImportError(
             "`tcvectordb` package not found, please run `pip install tcvectordb`"
diff --git a/llama_index/vector_stores/timescalevector.py b/llama_index/vector_stores/timescalevector.py
index ec90fdfc5fc05e1b137b1d2e10347e1bf894ee40..7c077afc696155eaad817a26bcbe582e7dabf24c 100644
--- a/llama_index/vector_stores/timescalevector.py
+++ b/llama_index/vector_stores/timescalevector.py
@@ -34,7 +34,7 @@ class TimescaleVectorStore(VectorStore):
         time_partition_interval: Optional[timedelta] = None,
     ) -> None:
         try:
-            from timescale_vector import client
+            from timescale_vector import client  # noqa
         except ImportError:
             raise ImportError("`timescale-vector` package should be pre installed")
 
diff --git a/llama_index/vector_stores/weaviate.py b/llama_index/vector_stores/weaviate.py
index 8f43b1641e12b61a82619d96cce71d9fc00066cf..803b550d6c85ae5a73bebe70f0da89bc05fb0974 100644
--- a/llama_index/vector_stores/weaviate.py
+++ b/llama_index/vector_stores/weaviate.py
@@ -91,7 +91,7 @@ class WeaviateVectorStore(BasePydanticVectorStore):
     ) -> None:
         """Initialize params."""
         try:
-            import weaviate
+            import weaviate  # noqa
             from weaviate import Client
         except ImportError:
             raise ImportError(import_err_msg)
@@ -136,8 +136,8 @@ class WeaviateVectorStore(BasePydanticVectorStore):
     ) -> "WeaviateVectorStore":
         """Create WeaviateVectorStore from config."""
         try:
-            import weaviate
-            from weaviate import AuthApiKey, Client
+            import weaviate  # noqa
+            from weaviate import AuthApiKey, Client  # noqa
         except ImportError:
             raise ImportError(import_err_msg)
 
diff --git a/llama_index/vector_stores/weaviate_utils.py b/llama_index/vector_stores/weaviate_utils.py
index ec77f8d0b82c000c0c07e3398ff3344647acad35..5e1d263bfbea3f26250ccbec5adfb94e36ac2e60 100644
--- a/llama_index/vector_stores/weaviate_utils.py
+++ b/llama_index/vector_stores/weaviate_utils.py
@@ -47,7 +47,7 @@ NODE_SCHEMA: List[Dict] = [
 def validate_client(client: Any) -> None:
     """Validate client and import weaviate library."""
     try:
-        import weaviate
+        import weaviate  # noqa
         from weaviate import Client
 
         client = cast(Client, client)
diff --git a/pyproject.toml b/pyproject.toml
index dffd43a2bc2f0b16d057810d8acb7783ab5484da..5c72833ecfe28869a82286f2a9aaef052db8732d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -167,6 +167,7 @@ select = [
     "D4",
     "E7",
     "EXE004",
+    "F401",
     "F504",
     "F541",
     "F632",
diff --git a/tests/evaluation/test_base.py b/tests/evaluation/test_base.py
index 884e1551c45c0060d087f2737d1010dd8a9fe4f8..d4ce1d3f8b17d6c614f2e85e4af30343af3053f4 100644
--- a/tests/evaluation/test_base.py
+++ b/tests/evaluation/test_base.py
@@ -2,7 +2,7 @@ from typing import Any, Optional, Sequence
 
 from llama_index.evaluation import BaseEvaluator
 from llama_index.evaluation.base import EvaluationResult
-from llama_index.prompts.mixin import PromptDictType, PromptMixin, PromptMixinType
+from llama_index.prompts.mixin import PromptDictType
 from llama_index.response.schema import NodeWithScore, Response
 from llama_index.schema import TextNode
 
@@ -30,7 +30,7 @@ class MockEvaluator(BaseEvaluator):
         query: Optional[str] = None,
         response: Optional[str] = None,
         contexts: Optional[Sequence[str]] = None,
-        **kwargs: Any
+        **kwargs: Any,
     ) -> EvaluationResult:
         return EvaluationResult(
             query=query,
diff --git a/tests/finetuning/test_base.py b/tests/finetuning/test_base.py
index 2363de3cc860dbaaeb078693ff858dec11c28b31..4c494c82025ed3a7d96cd70f7e750a0abe58076c 100644
--- a/tests/finetuning/test_base.py
+++ b/tests/finetuning/test_base.py
@@ -7,22 +7,16 @@ import pytest
 def test_torch_imports() -> None:
     """Test that torch is an optional dependency."""
     # importing fine-tuning modules should be ok
-    from llama_index.finetuning import (
-        EmbeddingAdapterFinetuneEngine,
-        OpenAIFinetuneEngine,
-        SentenceTransformersFinetuneEngine,
-    )
+    from llama_index.finetuning import EmbeddingAdapterFinetuneEngine  # noqa
+    from llama_index.finetuning import OpenAIFinetuneEngine  # noqa
+    from llama_index.finetuning import SentenceTransformersFinetuneEngine  # noqa
 
     # if torch isn't installed, then these should fail
     if pkgutil.find_loader("torch") is None:
         with pytest.raises(ModuleNotFoundError):
             from llama_index.embeddings.adapter_utils import LinearLayer
-            from llama_index.finetuning.embeddings.adapter_utils import (
-                train_model,
-            )
+            from llama_index.finetuning.embeddings.adapter_utils import train_model
     else:
         # else, importing these should be ok
-        from llama_index.embeddings.adapter_utils import LinearLayer
-        from llama_index.finetuning.embeddings.adapter_utils import (
-            train_model,
-        )
+        from llama_index.embeddings.adapter_utils import LinearLayer  # noqa
+        from llama_index.finetuning.embeddings.adapter_utils import train_model  # noqa
diff --git a/tests/output_parsers/test_selection.py b/tests/output_parsers/test_selection.py
index 748e360e18a2ff03e7d1985c490298e69acff075..7480f3f21d5600c49e2b0582fbd1097c42466293 100644
--- a/tests/output_parsers/test_selection.py
+++ b/tests/output_parsers/test_selection.py
@@ -1,5 +1,3 @@
-import json.decoder
-
 import pytest
 from llama_index.output_parsers.base import StructuredOutput
 from llama_index.output_parsers.selection import SelectionOutputParser
diff --git a/tests/prompts/test_mixin.py b/tests/prompts/test_mixin.py
index 4bc6cfec0035ef776f1d4e8e5f0e6a00f36a0f92..717ae6982de4d32acc5dc68d086a0bb4440fef26 100644
--- a/tests/prompts/test_mixin.py
+++ b/tests/prompts/test_mixin.py
@@ -1,8 +1,7 @@
 """Test prompt mixin."""
 
 
-import pytest
-from llama_index.prompts.base import BasePromptTemplate, PromptTemplate
+from llama_index.prompts.base import PromptTemplate
 from llama_index.prompts.mixin import PromptDictType, PromptMixin, PromptMixinType
 
 
diff --git a/tests/vector_stores/test_cassandra.py b/tests/vector_stores/test_cassandra.py
index 2a63c1185b3f99fbd45b131b1c20d7d2d25463da..23f24b5397da9d7bdbba69ddb4c0d591e2798ae9 100644
--- a/tests/vector_stores/test_cassandra.py
+++ b/tests/vector_stores/test_cassandra.py
@@ -8,7 +8,7 @@ from llama_index.vector_stores.cassandra import CassandraVectorStore
 from llama_index.vector_stores.types import VectorStoreQuery, VectorStoreQueryMode
 
 try:
-    import cassio
+    import cassio  # noqa
 
     has_cassio = True
 except ImportError:
@@ -20,7 +20,7 @@ class TestCassandraVectorStore(unittest.TestCase):
     def test_cassandra_create_and_crud(self) -> None:
         mock_db_session = MagicMock()
         try:
-            import cassio
+            import cassio  # noqa
         except ModuleNotFoundError:
             # mock `cassio` if not installed
             mock_cassio = MagicMock()
@@ -55,7 +55,7 @@ class TestCassandraVectorStore(unittest.TestCase):
     def test_cassandra_queries(self) -> None:
         mock_db_session = MagicMock()
         try:
-            import cassio
+            import cassio  # noqa
         except ModuleNotFoundError:
             # mock `cassio` if not installed
             mock_cassio = MagicMock()
diff --git a/tests/vector_stores/test_postgres.py b/tests/vector_stores/test_postgres.py
index 6bdb0b46d3aab4c1a49fa14bcacf418ef1bccc0f..fcb0cf4f8a137654155f27d1588845bb51377f61 100644
--- a/tests/vector_stores/test_postgres.py
+++ b/tests/vector_stores/test_postgres.py
@@ -33,11 +33,11 @@ TEST_SCHEMA_NAME = "test"
 TEST_EMBED_DIM = 2
 
 try:
-    import asyncpg
-    import pgvector
+    import asyncpg  # noqa
+    import pgvector  # noqa
     import psycopg2
     import sqlalchemy
-    import sqlalchemy.ext.asyncio
+    import sqlalchemy.ext.asyncio  # noqa
 
     # connection check
     conn__ = psycopg2.connect(**PARAMS)  # type: ignore
diff --git a/tests/vector_stores/test_tencentvectordb.py b/tests/vector_stores/test_tencentvectordb.py
index f004f276802c89c47ecc6d080dfb3392cd1537d3..b28eeb077a9cd3f82acd203459c0106e73ff98d6 100644
--- a/tests/vector_stores/test_tencentvectordb.py
+++ b/tests/vector_stores/test_tencentvectordb.py
@@ -4,7 +4,7 @@ from typing import List
 import pytest
 
 try:
-    import tcvectordb
+    import tcvectordb  # noqa: F401
 
     tcvectordb_init = True
 except ImportError: