diff --git a/.changeset/plenty-pumpkins-fold.md b/.changeset/plenty-pumpkins-fold.md
new file mode 100644
index 0000000000000000000000000000000000000000..3d18a1a832f9bda6d6bcd140f2a4e7a1bc0dbdd6
--- /dev/null
+++ b/.changeset/plenty-pumpkins-fold.md
@@ -0,0 +1,5 @@
+---
+"create-llama": patch
+---
+
+Add support for local models via Hugging Face
diff --git a/helpers/env-variables.ts b/helpers/env-variables.ts
index f1c23de27aae431644556bb0ea964602244ae40c..07ae88e02a08f6edc0dc9aca1c2d0fb76511ef12 100644
--- a/helpers/env-variables.ts
+++ b/helpers/env-variables.ts
@@ -336,6 +336,20 @@ const getModelEnvs = (modelConfig: ModelConfig): EnvVar[] => {
           },
         ]
       : []),
+    ...(modelConfig.provider === "huggingface"
+      ? [
+          {
+            name: "EMBEDDING_BACKEND",
+            description:
+              "The backend to use for the Sentence Transformers embedding model, either 'torch', 'onnx', or 'openvino'. Defaults to 'onnx'.",
+          },
+          {
+            name: "EMBEDDING_TRUST_REMOTE_CODE",
+            description:
+              "Whether to trust remote code for the embedding model, required for some models with custom code.",
+          },
+        ]
+      : []),
     ...(modelConfig.provider === "t-systems"
       ? [
           {
diff --git a/helpers/providers/huggingface.ts b/helpers/providers/huggingface.ts
new file mode 100644
index 0000000000000000000000000000000000000000..1a3a4a069bfe1945d86bbd3410a7e681f3061574
--- /dev/null
+++ b/helpers/providers/huggingface.ts
@@ -0,0 +1,61 @@
+import prompts from "prompts";
+import { ModelConfigParams } from ".";
+import { questionHandlers, toChoice } from "../../questions/utils";
+
+const MODELS = ["HuggingFaceH4/zephyr-7b-alpha"];
+type ModelData = {
+  dimensions: number;
+};
+const EMBEDDING_MODELS: Record<string, ModelData> = {
+  "BAAI/bge-small-en-v1.5": { dimensions: 384 },
+};
+
+const DEFAULT_MODEL = MODELS[0];
+const DEFAULT_EMBEDDING_MODEL = Object.keys(EMBEDDING_MODELS)[0];
+const DEFAULT_DIMENSIONS = Object.values(EMBEDDING_MODELS)[0].dimensions;
+
+type HuggingfaceQuestionsParams = {
+  askModels: boolean;
+};
+
+export async function askHuggingfaceQuestions({
+  askModels,
+}: HuggingfaceQuestionsParams): Promise<ModelConfigParams> {
+  const config: ModelConfigParams = {
+    model: DEFAULT_MODEL,
+    embeddingModel: DEFAULT_EMBEDDING_MODEL,
+    dimensions: DEFAULT_DIMENSIONS,
+    isConfigured(): boolean {
+      return true;
+    },
+  };
+
+  if (askModels) {
+    const { model } = await prompts(
+      {
+        type: "select",
+        name: "model",
+        message: "Which Hugging Face model would you like to use?",
+        choices: MODELS.map(toChoice),
+        initial: 0,
+      },
+      questionHandlers,
+    );
+    config.model = model;
+
+    const { embeddingModel } = await prompts(
+      {
+        type: "select",
+        name: "embeddingModel",
+        message: "Which embedding model would you like to use?",
+        choices: Object.keys(EMBEDDING_MODELS).map(toChoice),
+        initial: 0,
+      },
+      questionHandlers,
+    );
+    config.embeddingModel = embeddingModel;
+    config.dimensions = EMBEDDING_MODELS[embeddingModel].dimensions;
+  }
+
+  return config;
+}
diff --git a/helpers/providers/index.ts b/helpers/providers/index.ts
index 06977f6fc048c1ba177200eee919008a1855ae1b..a75302981c5a66ef2cf1f56b844646474b266efe 100644
--- a/helpers/providers/index.ts
+++ b/helpers/providers/index.ts
@@ -5,6 +5,7 @@ import { askAnthropicQuestions } from "./anthropic";
 import { askAzureQuestions } from "./azure";
 import { askGeminiQuestions } from "./gemini";
 import { askGroqQuestions } from "./groq";
+import { askHuggingfaceQuestions } from "./huggingface";
 import { askLLMHubQuestions } from "./llmhub";
 import { askMistralQuestions } from "./mistral";
 import { askOllamaQuestions } from "./ollama";
@@ -39,6 +40,7 @@ export async function askModelConfig({
 
     if (framework === "fastapi") {
       choices.push({ title: "T-Systems", value: "t-systems" });
+      choices.push({ title: "Huggingface", value: "huggingface" });
     }
     const { provider } = await prompts(
       {
@@ -76,6 +78,9 @@ export async function askModelConfig({
     case "t-systems":
       modelConfig = await askLLMHubQuestions({ askModels });
       break;
+    case "huggingface":
+      modelConfig = await askHuggingfaceQuestions({ askModels });
+      break;
     default:
       modelConfig = await askOpenAIQuestions({
         openAiKey,
diff --git a/helpers/python.ts b/helpers/python.ts
index 6305739a21804703baee60358b03e720ecaeebda..9dd686d803f468fa7d69f473f9164dbb405859d7 100644
--- a/helpers/python.ts
+++ b/helpers/python.ts
@@ -234,6 +234,21 @@ const getAdditionalDependencies = (
         version: "0.2.4",
       });
       break;
+    case "huggingface":
+      dependencies.push({
+        name: "llama-index-llms-huggingface",
+        version: "^0.3.5",
+      });
+      dependencies.push({
+        name: "llama-index-embeddings-huggingface",
+        version: "^0.3.1",
+      });
+      dependencies.push({
+        name: "optimum",
+        version: "^1.23.3",
+        extras: ["onnxruntime"],
+      });
+      break;
     case "t-systems":
       dependencies.push({
         name: "llama-index-agent-openai",
diff --git a/helpers/types.ts b/helpers/types.ts
index cef8ce3b81688269a50ce8127231014aef00b3ad..bcaf5b062af5bf8731f6a0e02c8747c4b1235972 100644
--- a/helpers/types.ts
+++ b/helpers/types.ts
@@ -9,6 +9,7 @@ export type ModelProvider =
   | "gemini"
   | "mistral"
   | "azure-openai"
+  | "huggingface"
   | "t-systems";
 export type ModelConfig = {
   provider: ModelProvider;
diff --git a/templates/components/settings/python/settings.py b/templates/components/settings/python/settings.py
index 681974cedff9528334505947de9afd4421a0c796..bc7270bd81e8fe78d6897a2ea8f4f8d059f40b47 100644
--- a/templates/components/settings/python/settings.py
+++ b/templates/components/settings/python/settings.py
@@ -21,6 +21,8 @@ def init_settings():
             init_mistral()
         case "azure-openai":
             init_azure_openai()
+        case "huggingface":
+            init_huggingface()
         case "t-systems":
             from .llmhub import init_llmhub
 
@@ -138,6 +140,42 @@ def init_fastembed():
     )
 
 
+def init_huggingface_embedding():
+    try:
+        from llama_index.embeddings.huggingface import HuggingFaceEmbedding
+    except ImportError:
+        raise ImportError(
+            "Hugging Face support is not installed. Please install it with `poetry add llama-index-embeddings-huggingface`"
+        )
+
+    embedding_model = os.getenv("EMBEDDING_MODEL", "all-MiniLM-L6-v2")
+    backend = os.getenv("EMBEDDING_BACKEND", "onnx")  # "torch", "onnx", or "openvino"
+    trust_remote_code = (
+        os.getenv("EMBEDDING_TRUST_REMOTE_CODE", "false").lower() == "true"
+    )
+
+    Settings.embed_model = HuggingFaceEmbedding(
+        model_name=embedding_model,
+        trust_remote_code=trust_remote_code,
+        backend=backend,
+    )
+
+
+def init_huggingface():
+    try:
+        from llama_index.llms.huggingface import HuggingFaceLLM
+    except ImportError:
+        raise ImportError(
+            "Hugging Face support is not installed. Please install it with `poetry add llama-index-llms-huggingface` and `poetry add llama-index-embeddings-huggingface`"
+        )
+
+    Settings.llm = HuggingFaceLLM(
+        model_name=os.getenv("MODEL"),
+        tokenizer_name=os.getenv("MODEL"),
+    )
+    init_huggingface_embedding()
+
+
 def init_groq():
     try:
         from llama_index.llms.groq import Groq