diff --git a/README.md b/README.md
index f77cdf2b894c0520e0d9c58418796956c02c9a72..68653bddc5a45fc9672a80eb9750516acaaf89a6 100644
--- a/README.md
+++ b/README.md
@@ -82,6 +82,7 @@ Some cool features of AnythingLLM
 - [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service)
 - [LM Studio (all)](https://lmstudio.ai)
 - [LocalAi (all)](https://localai.io/)
+- [Ollama (all)](https://ollama.ai/)
 
 **Supported Vector Databases:**
 
diff --git a/docker/.env.example b/docker/.env.example
index 16413ad3c74e6d05d7013becd598c60b3eaed666..ba33bd5c0433028bef8c4f00114042a357ef56ab 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -79,6 +79,11 @@ GID='1000'
 # EMBEDDING_MODEL_PREF='text-embedding-ada-002'
 # EMBEDDING_MODEL_MAX_CHUNK_LENGTH=1000 # The max chunk size in chars a string to embed can be
 
+# EMBEDDING_ENGINE='ollama'
+# EMBEDDING_BASE_PATH='http://127.0.0.1:11434'
+# EMBEDDING_MODEL_PREF='nomic-embed-text:latest'
+# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
+
 ###########################################
 ######## Vector Database Selection ########
 ###########################################
diff --git a/frontend/src/components/EmbeddingSelection/OllamaOptions/index.jsx b/frontend/src/components/EmbeddingSelection/OllamaOptions/index.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..dff697f888642ed018d0654d6aa3f1feaed33722
--- /dev/null
+++ b/frontend/src/components/EmbeddingSelection/OllamaOptions/index.jsx
@@ -0,0 +1,120 @@
+import React, { useEffect, useState } from "react";
+import System from "@/models/system";
+
+export default function OllamaEmbeddingOptions({ settings }) {
+  const [basePathValue, setBasePathValue] = useState(
+    settings?.EmbeddingBasePath
+  );
+  const [basePath, setBasePath] = useState(settings?.EmbeddingBasePath);
+
+  return (
+    <div className="w-full flex flex-col gap-y-4">
+      <div className="w-full flex items-center gap-4">
+        <div className="flex flex-col w-60">
+          <label className="text-white text-sm font-semibold block mb-4">
+            LocalAI Base URL
+          </label>
+          <input
+            type="url"
+            name="EmbeddingBasePath"
+            className="bg-zinc-900 text-white placeholder-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
+            placeholder="http://127.0.0.1:11434"
+            defaultValue={settings?.EmbeddingBasePath}
+            onChange={(e) => setBasePathValue(e.target.value)}
+            onBlur={() => setBasePath(basePathValue)}
+            required={true}
+            autoComplete="off"
+            spellCheck={false}
+          />
+        </div>
+        <OllamaLLMModelSelection settings={settings} basePath={basePath} />
+        <div className="flex flex-col w-60">
+          <label className="text-white text-sm font-semibold block mb-4">
+            Max embedding chunk length
+          </label>
+          <input
+            type="number"
+            name="EmbeddingModelMaxChunkLength"
+            className="bg-zinc-900 text-white placeholder-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
+            placeholder="8192"
+            min={1}
+            onScroll={(e) => e.target.blur()}
+            defaultValue={settings?.EmbeddingModelMaxChunkLength}
+            required={false}
+            autoComplete="off"
+          />
+        </div>
+      </div>
+    </div>
+  );
+}
+
+function OllamaLLMModelSelection({ settings, basePath = null }) {
+  const [customModels, setCustomModels] = useState([]);
+  const [loading, setLoading] = useState(true);
+
+  useEffect(() => {
+    async function findCustomModels() {
+      if (!basePath) {
+        setCustomModels([]);
+        setLoading(false);
+        return;
+      }
+      setLoading(true);
+      const { models } = await System.customModels("ollama", null, basePath);
+      setCustomModels(models || []);
+      setLoading(false);
+    }
+    findCustomModels();
+  }, [basePath]);
+
+  if (loading || customModels.length == 0) {
+    return (
+      <div className="flex flex-col w-60">
+        <label className="text-white text-sm font-semibold block mb-4">
+          Embedding Model Selection
+        </label>
+        <select
+          name="EmbeddingModelPref"
+          disabled={true}
+          className="bg-zinc-900 border border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
+        >
+          <option disabled={true} selected={true}>
+            {!!basePath
+              ? "-- loading available models --"
+              : "-- waiting for URL --"}
+          </option>
+        </select>
+      </div>
+    );
+  }
+
+  return (
+    <div className="flex flex-col w-60">
+      <label className="text-white text-sm font-semibold block mb-4">
+        Embedding Model Selection
+      </label>
+      <select
+        name="EmbeddingModelPref"
+        required={true}
+        className="bg-zinc-900 border border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
+      >
+        {customModels.length > 0 && (
+          <optgroup label="Your loaded models">
+            {customModels.map((model) => {
+              return (
+                <option
+                  key={model.id}
+                  value={model.id}
+                  selected={settings.EmbeddingModelPref === model.id}
+                >
+                  {model.id}
+                </option>
+              );
+            })}
+          </optgroup>
+        )}
+      </select>
+    </div>
+  );
+}
diff --git a/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx b/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx
index 0629fb9021f29aa14ac704c6a84a8d7179584490..2e400ad63266c7c4a21715fd0cd4f4689b6c2914 100644
--- a/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx
@@ -7,12 +7,14 @@ import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
 import OpenAiLogo from "@/media/llmprovider/openai.png";
 import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
 import LocalAiLogo from "@/media/llmprovider/localai.png";
+import OllamaLogo from "@/media/llmprovider/ollama.png";
 import PreLoader from "@/components/Preloader";
 import ChangeWarningModal from "@/components/ChangeWarning";
 import OpenAiOptions from "@/components/EmbeddingSelection/OpenAiOptions";
 import AzureAiOptions from "@/components/EmbeddingSelection/AzureAiOptions";
 import LocalAiOptions from "@/components/EmbeddingSelection/LocalAiOptions";
 import NativeEmbeddingOptions from "@/components/EmbeddingSelection/NativeEmbeddingOptions";
+import OllamaEmbeddingOptions from "@/components/EmbeddingSelection/OllamaOptions";
 import EmbedderItem from "@/components/EmbeddingSelection/EmbedderItem";
 import { MagnifyingGlass } from "@phosphor-icons/react";
 import { useModal } from "@/hooks/useModal";
@@ -108,6 +110,13 @@ export default function GeneralEmbeddingPreference() {
       options: <LocalAiOptions settings={settings} />,
       description: "Run embedding models locally on your own machine.",
     },
+    {
+      name: "Ollama",
+      value: "ollama",
+      logo: OllamaLogo,
+      options: <OllamaEmbeddingOptions settings={settings} />,
+      description: "Run embedding models locally on your own machine.",
+    },
   ];
 
   useEffect(() => {
diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
index 51dc73004e70073bf18161e1bc2f3f01ebbe554c..5beec3c1764f4dffffd64d4041329c4af634f865 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
@@ -221,6 +221,13 @@ const EMBEDDING_ENGINE_PRIVACY = {
     ],
     logo: LocalAiLogo,
   },
+  ollama: {
+    name: "Ollama",
+    description: [
+      "Your document text is embedded privately on the server running Ollama",
+    ],
+    logo: OllamaLogo,
+  },
 };
 
 export default function DataHandling({ setHeader, setForwardBtn, setBackBtn }) {
diff --git a/frontend/src/pages/OnboardingFlow/Steps/EmbeddingPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/EmbeddingPreference/index.jsx
index fa17eebdf7f7eeb735470d1673986ef2e0ce0cc6..1932309e43f514a38b5062fb59ba090c3ee4b588 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/EmbeddingPreference/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/EmbeddingPreference/index.jsx
@@ -4,10 +4,12 @@ import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
 import OpenAiLogo from "@/media/llmprovider/openai.png";
 import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
 import LocalAiLogo from "@/media/llmprovider/localai.png";
+import OllamaLogo from "@/media/llmprovider/ollama.png";
 import NativeEmbeddingOptions from "@/components/EmbeddingSelection/NativeEmbeddingOptions";
 import OpenAiOptions from "@/components/EmbeddingSelection/OpenAiOptions";
 import AzureAiOptions from "@/components/EmbeddingSelection/AzureAiOptions";
 import LocalAiOptions from "@/components/EmbeddingSelection/LocalAiOptions";
+import OllamaEmbeddingOptions from "@/components/EmbeddingSelection/OllamaOptions";
 import EmbedderItem from "@/components/EmbeddingSelection/EmbedderItem";
 import System from "@/models/system";
 import paths from "@/utils/paths";
@@ -70,6 +72,13 @@ export default function EmbeddingPreference({
       options: <LocalAiOptions settings={settings} />,
       description: "Run embedding models locally on your own machine.",
     },
+    {
+      name: "Ollama",
+      value: "ollama",
+      logo: OllamaLogo,
+      options: <OllamaEmbeddingOptions settings={settings} />,
+      description: "Run embedding models locally on your own machine.",
+    },
   ];
 
   function handleForward() {
diff --git a/server/.env.example b/server/.env.example
index bed943925a0df036e3076a05f129de0510ee0527..0ca826e89584ceb7c01739eee709ad53f0883f36 100644
--- a/server/.env.example
+++ b/server/.env.example
@@ -76,6 +76,11 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
 # EMBEDDING_MODEL_PREF='text-embedding-ada-002'
 # EMBEDDING_MODEL_MAX_CHUNK_LENGTH=1000 # The max chunk size in chars a string to embed can be
 
+# EMBEDDING_ENGINE='ollama'
+# EMBEDDING_BASE_PATH='http://127.0.0.1:11434'
+# EMBEDDING_MODEL_PREF='nomic-embed-text:latest'
+# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
+
 ###########################################
 ######## Vector Database Selection ########
 ###########################################
diff --git a/server/utils/EmbeddingEngines/ollama/index.js b/server/utils/EmbeddingEngines/ollama/index.js
new file mode 100644
index 0000000000000000000000000000000000000000..1f77c36e8e50959c821e4ef49014dfca46978ed8
--- /dev/null
+++ b/server/utils/EmbeddingEngines/ollama/index.js
@@ -0,0 +1,90 @@
+const { maximumChunkLength } = require("../../helpers");
+
+class OllamaEmbedder {
+  constructor() {
+    if (!process.env.EMBEDDING_BASE_PATH)
+      throw new Error("No embedding base path was set.");
+    if (!process.env.EMBEDDING_MODEL_PREF)
+      throw new Error("No embedding model was set.");
+
+    this.basePath = `${process.env.EMBEDDING_BASE_PATH}/api/embeddings`;
+    this.model = process.env.EMBEDDING_MODEL_PREF;
+    // Limit of how many strings we can process in a single pass to stay with resource or network limits
+    this.maxConcurrentChunks = 1;
+    this.embeddingMaxChunkLength = maximumChunkLength();
+  }
+
+  log(text, ...args) {
+    console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
+  }
+
+  async embedTextInput(textInput) {
+    const result = await this.embedChunks([textInput]);
+    return result?.[0] || [];
+  }
+
+  async embedChunks(textChunks = []) {
+    const embeddingRequests = [];
+    this.log(
+      `Embedding ${textChunks.length} chunks of text with ${this.model}.`
+    );
+
+    for (const chunk of textChunks) {
+      embeddingRequests.push(
+        new Promise((resolve) => {
+          fetch(this.basePath, {
+            method: "POST",
+            body: JSON.stringify({
+              model: this.model,
+              prompt: chunk,
+            }),
+          })
+            .then((res) => res.json())
+            .then(({ embedding }) => {
+              resolve({ data: embedding, error: null });
+              return;
+            })
+            .catch((error) => {
+              resolve({ data: [], error: error.message });
+              return;
+            });
+        })
+      );
+    }
+
+    const { data = [], error = null } = await Promise.all(
+      embeddingRequests
+    ).then((results) => {
+      // If any errors were returned from Ollama abort the entire sequence because the embeddings
+      // will be incomplete.
+
+      const errors = results
+        .filter((res) => !!res.error)
+        .map((res) => res.error)
+        .flat();
+      if (errors.length > 0) {
+        let uniqueErrors = new Set();
+        errors.map((error) =>
+          uniqueErrors.add(`[${error.type}]: ${error.message}`)
+        );
+
+        return {
+          data: [],
+          error: Array.from(uniqueErrors).join(", "),
+        };
+      }
+
+      return {
+        data: results.map((res) => res?.data || []),
+        error: null,
+      };
+    });
+
+    if (!!error) throw new Error(`Ollama Failed to embed: ${error}`);
+    return data.length > 0 ? data : null;
+  }
+}
+
+module.exports = {
+  OllamaEmbedder,
+};
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index 8bda716aa6b76ead9270e9396e7fb53bb0c4434d..a31a3e4f99b4f423157719822cc4335a8e9c6790 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -92,6 +92,9 @@ function getEmbeddingEngineSelection() {
     case "localai":
       const { LocalAiEmbedder } = require("../EmbeddingEngines/localAi");
       return new LocalAiEmbedder();
+    case "ollama":
+      const { OllamaEmbedder } = require("../EmbeddingEngines/ollama");
+      return new OllamaEmbedder();
     case "native":
       const { NativeEmbedder } = require("../EmbeddingEngines/native");
       console.log("\x1b[34m[INFO]\x1b[0m Using Native Embedder");
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index 247e3ba486380c917bc024532329555a43af3665..1ca9368204f23645409e7b7e4e5c49e6a98b2db9 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -135,7 +135,7 @@ const KEY_MAPPING = {
   },
   EmbeddingBasePath: {
     envKey: "EMBEDDING_BASE_PATH",
-    checks: [isNotEmpty, validLLMExternalBasePath, validDockerizedUrl],
+    checks: [isNotEmpty, validDockerizedUrl],
   },
   EmbeddingModelPref: {
     envKey: "EMBEDDING_MODEL_PREF",
@@ -355,7 +355,7 @@ function validAnthropicModel(input = "") {
 }
 
 function supportedEmbeddingModel(input = "") {
-  const supported = ["openai", "azure", "localai", "native"];
+  const supported = ["openai", "azure", "localai", "native", "ollama"];
   return supported.includes(input)
     ? null
     : `Invalid Embedding model type. Must be one of ${supported.join(", ")}.`;