diff --git a/.github/workflows/lint_on_push_or_pull.yml b/.github/workflows/lint_on_push_or_pull.yml
index 488f96d6c67c43be1e706042580a4408d4fa8177..29f3d4b968638d65fe84427e5f9c8ad3bafddeff 100644
--- a/.github/workflows/lint_on_push_or_pull.yml
+++ b/.github/workflows/lint_on_push_or_pull.yml
@@ -30,3 +30,13 @@ jobs:
 
       - name: Run Prettier
         run: pnpm run format
+
+      - name: Run Python format check
+        uses: chartboost/ruff-action@v1
+        with:
+          args: "format --check"
+
+      - name: Run Python lint
+        uses: chartboost/ruff-action@v1
+        with:
+          args: "check"
diff --git a/templates/components/engines/python/agent/tools/__init__.py b/templates/components/engines/python/agent/tools/__init__.py
index 111bee5f7c0d7a6dbc9deb4437da9329e0ad0661..f24d988dbdb91ad6d8527c9d95ca18d3604d9d5d 100644
--- a/templates/components/engines/python/agent/tools/__init__.py
+++ b/templates/components/engines/python/agent/tools/__init__.py
@@ -1,8 +1,6 @@
 import os
 import yaml
-import json
 import importlib
-from cachetools import cached, LRUCache
 from llama_index.core.tools.tool_spec.base import BaseToolSpec
 from llama_index.core.tools.function_tool import FunctionTool
 
@@ -13,7 +11,6 @@ class ToolType:
 
 
 class ToolFactory:
-
     TOOL_SOURCE_PACKAGE_MAP = {
         ToolType.LLAMAHUB: "llama_index.tools",
         ToolType.LOCAL: "app.engine.tools",
diff --git a/templates/components/engines/python/agent/tools/interpreter.py b/templates/components/engines/python/agent/tools/interpreter.py
index 1d2c02c366bc197b502f3df46722961424710c69..8e701c58f2bf438a58aae5d50964c1b78918e8a9 100644
--- a/templates/components/engines/python/agent/tools/interpreter.py
+++ b/templates/components/engines/python/agent/tools/interpreter.py
@@ -3,7 +3,7 @@ import logging
 import base64
 import uuid
 from pydantic import BaseModel
-from typing import List, Tuple, Dict, Optional
+from typing import List, Dict, Optional
 from llama_index.core.tools import FunctionTool
 from e2b_code_interpreter import CodeInterpreter
 from e2b_code_interpreter.models import Logs
@@ -26,7 +26,6 @@ class E2BToolOutput(BaseModel):
 
 
 class E2BCodeInterpreter:
-
     output_dir = "output/tool"
 
     def __init__(self, api_key: str = None):
diff --git a/templates/components/loaders/python/db.py b/templates/components/loaders/python/db.py
index d5c9ffde6364b13b9e5c97096912ba4b96d0ab3b..b6e3d8f0ea5ea7d73c53d7c67dca8c5a0502a7e5 100644
--- a/templates/components/loaders/python/db.py
+++ b/templates/components/loaders/python/db.py
@@ -1,8 +1,6 @@
-import os
 import logging
 from typing import List
-from pydantic import BaseModel, validator
-from llama_index.core.indices.vector_store import VectorStoreIndex
+from pydantic import BaseModel
 
 logger = logging.getLogger(__name__)
 
diff --git a/templates/components/loaders/python/web.py b/templates/components/loaders/python/web.py
index 563e51b5d04be991d900ce08a180f3cbb77c8037..a9bf281fe95d560bc6cb02861fa0e2f9bd98de21 100644
--- a/templates/components/loaders/python/web.py
+++ b/templates/components/loaders/python/web.py
@@ -1,5 +1,3 @@
-import os
-import json
 from pydantic import BaseModel, Field
 
 
diff --git a/templates/components/settings/python/llmhub.py b/templates/components/settings/python/llmhub.py
index 69e0e324971a0ea68c001e15e88320124483a80a..2c46b252e502d4252dcc80714a1dd05c7b478017 100644
--- a/templates/components/settings/python/llmhub.py
+++ b/templates/components/settings/python/llmhub.py
@@ -6,11 +6,13 @@ import os
 DEFAULT_MODEL = "gpt-3.5-turbo"
 DEFAULT_EMBEDDING_MODEL = "text-embedding-3-large"
 
+
 class TSIEmbedding(OpenAIEmbedding):
     def __init__(self, **kwargs):
         super().__init__(**kwargs)
         self._query_engine = self._text_engine = self.model_name
 
+
 def llm_config_from_env() -> Dict:
     from llama_index.core.constants import DEFAULT_TEMPERATURE
 
@@ -32,7 +34,7 @@ def llm_config_from_env() -> Dict:
 
 def embedding_config_from_env() -> Dict:
     from llama_index.core.constants import DEFAULT_EMBEDDING_DIM
-    
+
     model = os.getenv("EMBEDDING_MODEL", DEFAULT_EMBEDDING_MODEL)
     dimension = os.getenv("EMBEDDING_DIM", DEFAULT_EMBEDDING_DIM)
     api_key = os.getenv("T_SYSTEMS_LLMHUB_API_KEY")
@@ -46,6 +48,7 @@ def embedding_config_from_env() -> Dict:
     }
     return config
 
+
 def init_llmhub():
     from llama_index.llms.openai_like import OpenAILike
 
@@ -58,4 +61,4 @@ def init_llmhub():
         is_chat_model=True,
         is_function_calling_model=False,
         context_window=4096,
-    )
\ No newline at end of file
+    )
diff --git a/templates/components/vectordbs/python/llamacloud/generate.py b/templates/components/vectordbs/python/llamacloud/generate.py
index 7ddfb7e8f09d69b7aea837437233c5e3a6b14f3c..41a0966737c2f6079cc9e1b7f263149a70517a8b 100644
--- a/templates/components/vectordbs/python/llamacloud/generate.py
+++ b/templates/components/vectordbs/python/llamacloud/generate.py
@@ -1,3 +1,4 @@
+# flake8: noqa: E402
 from dotenv import load_dotenv
 
 from app.engine.index import get_index
diff --git a/templates/components/vectordbs/python/none/generate.py b/templates/components/vectordbs/python/none/generate.py
index c9e08f9461a6fd696be723975895fce9cac4821c..eed63fec5ba489055385f3552c7647e735ef53db 100644
--- a/templates/components/vectordbs/python/none/generate.py
+++ b/templates/components/vectordbs/python/none/generate.py
@@ -1,3 +1,4 @@
+# flake8: noqa: E402
 from dotenv import load_dotenv
 
 load_dotenv()
diff --git a/templates/components/vectordbs/python/weaviate/vectordb.py b/templates/components/vectordbs/python/weaviate/vectordb.py
index 1005ead288401149c876840f2b23b36276213fd4..a1fc82b74d188e0990de5601f6fab365dbd4a21f 100644
--- a/templates/components/vectordbs/python/weaviate/vectordb.py
+++ b/templates/components/vectordbs/python/weaviate/vectordb.py
@@ -17,9 +17,11 @@ def _create_weaviate_client():
     client = weaviate.connect_to_weaviate_cloud(cluster_url, auth_credentials)
     return client
 
+
 # Global variable to store the Weaviate client
 client = None
 
+
 def get_vector_store():
     global client
     if client is None:
diff --git a/templates/types/extractor/fastapi/main.py b/templates/types/extractor/fastapi/main.py
index 768c4203a7faf901726eebb0a2e1fd2f97487824..646a98c75447b752d43156e7fd34ac33f45a266e 100644
--- a/templates/types/extractor/fastapi/main.py
+++ b/templates/types/extractor/fastapi/main.py
@@ -1,3 +1,4 @@
+# flake8: noqa: E402
 from dotenv import load_dotenv
 
 load_dotenv()
diff --git a/templates/types/multiagent/fastapi/app/utils.py b/templates/types/multiagent/fastapi/app/utils.py
index 4ad0c2647dfcbe60d2d09acc5370f8bd0188f56a..ac43ccbb36843974c610fc937b5b23b2f25d47da 100644
--- a/templates/types/multiagent/fastapi/app/utils.py
+++ b/templates/types/multiagent/fastapi/app/utils.py
@@ -5,4 +5,4 @@ def load_from_env(var: str, throw_error: bool = True) -> str:
     res = os.getenv(var)
     if res is None and throw_error:
         raise ValueError(f"Missing environment variable: {var}")
-    return res
\ No newline at end of file
+    return res
diff --git a/templates/types/multiagent/fastapi/main.py b/templates/types/multiagent/fastapi/main.py
index 93fb6c47454337cf33b3b67cc50c54ba5f42e54e..03dc98a64676851605c6a58eee3252636bf3503e 100644
--- a/templates/types/multiagent/fastapi/main.py
+++ b/templates/types/multiagent/fastapi/main.py
@@ -1,3 +1,4 @@
+# flake8: noqa: E402
 from dotenv import load_dotenv
 from app.settings import init_settings
 
diff --git a/templates/types/streaming/fastapi/app/api/routers/models.py b/templates/types/streaming/fastapi/app/api/routers/models.py
index 3d790adb9659be0f0538e67f561cbd051465b36a..15f69bc34dd493fcbbda0c3ca4b722c2818b77db 100644
--- a/templates/types/streaming/fastapi/app/api/routers/models.py
+++ b/templates/types/streaming/fastapi/app/api/routers/models.py
@@ -1,6 +1,6 @@
 import logging
 import os
-from typing import Any, Dict, List, Literal, Optional, Set
+from typing import Any, Dict, List, Literal, Optional
 
 from llama_index.core.llms import ChatMessage, MessageRole
 from llama_index.core.schema import NodeWithScore
diff --git a/templates/types/streaming/fastapi/app/api/routers/upload.py b/templates/types/streaming/fastapi/app/api/routers/upload.py
index dcb3a08dc8a5323d0b48174db6ed50e8cebda76a..e1ebbebdb7f86bac52d298b8378b395f032c5cc3 100644
--- a/templates/types/streaming/fastapi/app/api/routers/upload.py
+++ b/templates/types/streaming/fastapi/app/api/routers/upload.py
@@ -21,7 +21,9 @@ class FileUploadRequest(BaseModel):
 def upload_file(request: FileUploadRequest) -> List[str]:
     try:
         logger.info("Processing file")
-        return PrivateFileService.process_file(request.filename, request.base64, request.params)
+        return PrivateFileService.process_file(
+            request.filename, request.base64, request.params
+        )
     except Exception as e:
         logger.error(f"Error processing file: {e}", exc_info=True)
         raise HTTPException(status_code=500, detail="Error processing file")
diff --git a/templates/types/streaming/fastapi/app/api/services/file.py b/templates/types/streaming/fastapi/app/api/services/file.py
index 20356113f609f3795a83a2351e536d8a294b697e..36113f9d8d73bb430a924f87b31bfa943373c350 100644
--- a/templates/types/streaming/fastapi/app/api/services/file.py
+++ b/templates/types/streaming/fastapi/app/api/services/file.py
@@ -3,8 +3,7 @@ import mimetypes
 import os
 from io import BytesIO
 from pathlib import Path
-import time
-from typing import Any, Dict, List, Tuple
+from typing import Any, List, Tuple
 from uuid import uuid4
 
 
@@ -14,7 +13,6 @@ from llama_index.core.ingestion import IngestionPipeline
 from llama_index.core.readers.file.base import (
     _try_loading_included_file_formats as get_file_loaders_map,
 )
-from llama_index.core.readers.file.base import default_file_metadata_func
 from llama_index.core.schema import Document
 from llama_index.indices.managed.llama_cloud.base import LlamaCloudIndex
 from llama_index.readers.file import FlatReader
diff --git a/templates/types/streaming/fastapi/app/api/services/suggestion.py b/templates/types/streaming/fastapi/app/api/services/suggestion.py
index ea563b17e8f901352932d7306b154ab4d0bf8521..f881962ef158c95742aa6070661e6a749d50c771 100644
--- a/templates/types/streaming/fastapi/app/api/services/suggestion.py
+++ b/templates/types/streaming/fastapi/app/api/services/suggestion.py
@@ -25,7 +25,6 @@ class NextQuestions(BaseModel):
 
 
 class NextQuestionSuggestion:
-
     @staticmethod
     async def suggest_next_questions(
         messages: List[Message],
diff --git a/templates/types/streaming/fastapi/app/engine/generate.py b/templates/types/streaming/fastapi/app/engine/generate.py
index 44a8e8e018f4e32ba7a1a70a7a54d27e617f591f..1bca2e2883156db872686d800e4e9cb505182300 100644
--- a/templates/types/streaming/fastapi/app/engine/generate.py
+++ b/templates/types/streaming/fastapi/app/engine/generate.py
@@ -1,3 +1,4 @@
+# flake8: noqa: E402
 from dotenv import load_dotenv
 
 load_dotenv()
@@ -21,7 +22,6 @@ STORAGE_DIR = os.getenv("STORAGE_DIR", "storage")
 
 
 def get_doc_store():
-
     # If the storage directory is there, load the document store from it.
     # If not, set up an in-memory document store since we can't load from a directory that doesn't exist.
     if os.path.exists(STORAGE_DIR):
diff --git a/templates/types/streaming/fastapi/main.py b/templates/types/streaming/fastapi/main.py
index c9b287a813e7cf0c605e0114cff7fb8ffdf8bb9f..b0be152a3f8a3f3fe5422d1f88cdec7218324e7a 100644
--- a/templates/types/streaming/fastapi/main.py
+++ b/templates/types/streaming/fastapi/main.py
@@ -1,3 +1,4 @@
+# flake8: noqa: E402
 from dotenv import load_dotenv
 
 load_dotenv()