From 9a48208b843af2ce4317a4925249da81062d00f2 Mon Sep 17 00:00:00 2001
From: Jithin James <jamesjithin97@gmail.com>
Date: Thu, 13 Jul 2023 01:42:25 +0530
Subject: [PATCH] chore: added a help message to makefile (#6861)

* makefile with help

* format fix with black
---
 Makefile                                          | 15 ++++++---------
 .../agent/openai_agent_query_cookbook.ipynb       |  1 +
 .../evaluation/TestNYC-Evaluation-Query.ipynb     |  2 +-
 examples/async/AsyncQueryDemo.ipynb               |  1 +
 llama_index/llms/base.py                          |  1 +
 tests/llm_predictor/test_base.py                  |  1 -
 6 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/Makefile b/Makefile
index f53ef67ae9..4800823abf 100644
--- a/Makefile
+++ b/Makefile
@@ -1,18 +1,15 @@
-.PHONY: format lint
-
 GIT_ROOT ?= $(shell git rev-parse --show-toplevel)
+help: ## Show all Makefile targets
+	@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[33m%-30s\033[0m %s\n", $$1, $$2}'
 
-format:
+.PHONY: format lint
+format: ## Run code formatter: black
 	black .
-
-lint:
+lint: ## Run linters: mypy, black, ruff
 	mypy .
 	black . --check
 	ruff check .
-
-test:
+test: ## Run tests
 	pytest tests
-
-# Docs
 watch-docs: ## Build and watch documentation
 	sphinx-autobuild docs/ docs/_build/html --open-browser --watch $(GIT_ROOT)/llama_index/
diff --git a/docs/examples/agent/openai_agent_query_cookbook.ipynb b/docs/examples/agent/openai_agent_query_cookbook.ipynb
index 6e4c5c7caf..f403f5be97 100644
--- a/docs/examples/agent/openai_agent_query_cookbook.ipynb
+++ b/docs/examples/agent/openai_agent_query_cookbook.ipynb
@@ -254,6 +254,7 @@
     "    ],\n",
     ")\n",
     "\n",
+    "\n",
     "# define pydantic model for auto-retrieval function\n",
     "class AutoRetrieveModel(BaseModel):\n",
     "    query: str = Field(..., description=\"natural language query string\")\n",
diff --git a/docs/examples/evaluation/TestNYC-Evaluation-Query.ipynb b/docs/examples/evaluation/TestNYC-Evaluation-Query.ipynb
index 7afd23bda9..df3dbe87d8 100644
--- a/docs/examples/evaluation/TestNYC-Evaluation-Query.ipynb
+++ b/docs/examples/evaluation/TestNYC-Evaluation-Query.ipynb
@@ -517,11 +517,11 @@
    "source": [
     "from typing import List\n",
     "\n",
+    "\n",
     "# define jupyter display function\n",
     "def display_eval_sources(\n",
     "    query: str, response: Response, eval_result: List[str]\n",
     ") -> None:\n",
-    "\n",
     "    sources = [s.node.get_text() for s in response.source_nodes]\n",
     "    eval_df = pd.DataFrame(\n",
     "        {\n",
diff --git a/examples/async/AsyncQueryDemo.ipynb b/examples/async/AsyncQueryDemo.ipynb
index 347b40f669..b4d5081b47 100644
--- a/examples/async/AsyncQueryDemo.ipynb
+++ b/examples/async/AsyncQueryDemo.ipynb
@@ -278,6 +278,7 @@
     "    response_mode=\"tree_summarize\",\n",
     ")\n",
     "\n",
+    "\n",
     "# run each query in parallel\n",
     "async def async_query(query_engine, questions):\n",
     "    tasks = [query_engine.aquery(q) for q in questions]\n",
diff --git a/llama_index/llms/base.py b/llama_index/llms/base.py
index 06db4d1181..7234b1c6d2 100644
--- a/llama_index/llms/base.py
+++ b/llama_index/llms/base.py
@@ -43,6 +43,7 @@ class ChatResponse(BaseModel):
 ChatResponseGen = Generator[ChatResponse, None, None]
 ChatResponseAsyncGen = AsyncGenerator[ChatResponse, None]
 
+
 # ===== Generic Model Output - Completion =====
 class CompletionResponse(BaseModel):
     """Completion response."""
diff --git a/tests/llm_predictor/test_base.py b/tests/llm_predictor/test_base.py
index 3d3bd2d88f..01ed7dca00 100644
--- a/tests/llm_predictor/test_base.py
+++ b/tests/llm_predictor/test_base.py
@@ -8,7 +8,6 @@ from llama_index.types import BaseOutputParser
 from llama_index.prompts.prompts import Prompt, SimpleInputPrompt
 
 try:
-
     gptcache_installed = True
 except ImportError:
     gptcache_installed = False
-- 
GitLab