diff --git a/Makefile b/Makefile
index f53ef67ae9160332a59ca30ba5ff6167a386aa02..4800823abf8d79589d7159a5e05b9686a0ec7686 100644
--- a/Makefile
+++ b/Makefile
@@ -1,18 +1,15 @@
-.PHONY: format lint
-
 GIT_ROOT ?= $(shell git rev-parse --show-toplevel)
+help: ## Show all Makefile targets
+	@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[33m%-30s\033[0m %s\n", $$1, $$2}'
 
-format:
+.PHONY: format lint
+format: ## Run code formatter: black
 	black .
-
-lint:
+lint: ## Run linters: mypy, black, ruff
 	mypy .
 	black . --check
 	ruff check .
-
-test:
+test: ## Run tests
 	pytest tests
-
-# Docs
 watch-docs: ## Build and watch documentation
 	sphinx-autobuild docs/ docs/_build/html --open-browser --watch $(GIT_ROOT)/llama_index/
diff --git a/docs/examples/agent/openai_agent_query_cookbook.ipynb b/docs/examples/agent/openai_agent_query_cookbook.ipynb
index 6e4c5c7caffdb54bd3c47ea3a6eb8a328c492b61..f403f5be971b4dedebdb63c69577b62093a867ac 100644
--- a/docs/examples/agent/openai_agent_query_cookbook.ipynb
+++ b/docs/examples/agent/openai_agent_query_cookbook.ipynb
@@ -254,6 +254,7 @@
     "    ],\n",
     ")\n",
     "\n",
+    "\n",
     "# define pydantic model for auto-retrieval function\n",
     "class AutoRetrieveModel(BaseModel):\n",
     "    query: str = Field(..., description=\"natural language query string\")\n",
diff --git a/docs/examples/evaluation/TestNYC-Evaluation-Query.ipynb b/docs/examples/evaluation/TestNYC-Evaluation-Query.ipynb
index 7afd23bda9e882d6e512f6e09e19f2622f15c5cc..df3dbe87d8e6c23a3cff7641d3a8cc1bf0dbed0b 100644
--- a/docs/examples/evaluation/TestNYC-Evaluation-Query.ipynb
+++ b/docs/examples/evaluation/TestNYC-Evaluation-Query.ipynb
@@ -517,11 +517,11 @@
    "source": [
     "from typing import List\n",
     "\n",
+    "\n",
     "# define jupyter display function\n",
     "def display_eval_sources(\n",
     "    query: str, response: Response, eval_result: List[str]\n",
     ") -> None:\n",
-    "\n",
     "    sources = [s.node.get_text() for s in response.source_nodes]\n",
     "    eval_df = pd.DataFrame(\n",
     "        {\n",
diff --git a/examples/async/AsyncQueryDemo.ipynb b/examples/async/AsyncQueryDemo.ipynb
index 347b40f66964ff58b89e6d916101357aeb637e8c..b4d5081b47eb22b1f819e445d8564e3050570a69 100644
--- a/examples/async/AsyncQueryDemo.ipynb
+++ b/examples/async/AsyncQueryDemo.ipynb
@@ -278,6 +278,7 @@
     "    response_mode=\"tree_summarize\",\n",
     ")\n",
     "\n",
+    "\n",
     "# run each query in parallel\n",
     "async def async_query(query_engine, questions):\n",
     "    tasks = [query_engine.aquery(q) for q in questions]\n",
diff --git a/llama_index/llms/base.py b/llama_index/llms/base.py
index 06db4d11819524d01d4b3da6679d77f29fc915b6..7234b1c6d2f5e4877b7fcde45c3ac708bacc8cea 100644
--- a/llama_index/llms/base.py
+++ b/llama_index/llms/base.py
@@ -43,6 +43,7 @@ class ChatResponse(BaseModel):
 ChatResponseGen = Generator[ChatResponse, None, None]
 ChatResponseAsyncGen = AsyncGenerator[ChatResponse, None]
 
+
 # ===== Generic Model Output - Completion =====
 class CompletionResponse(BaseModel):
     """Completion response."""
diff --git a/tests/llm_predictor/test_base.py b/tests/llm_predictor/test_base.py
index 3d3bd2d88f57ac3739c95290767926fd86c8c5bc..01ed7dca006d80669155e3aa8d0f62588284b903 100644
--- a/tests/llm_predictor/test_base.py
+++ b/tests/llm_predictor/test_base.py
@@ -8,7 +8,6 @@ from llama_index.types import BaseOutputParser
 from llama_index.prompts.prompts import Prompt, SimpleInputPrompt
 
 try:
-
     gptcache_installed = True
 except ImportError:
     gptcache_installed = False