From f490c354560d9d64a0de9b0cf65ac2334a62d630 Mon Sep 17 00:00:00 2001
From: Timothy Carambat <rambat1010@gmail.com>
Date: Wed, 7 Feb 2024 16:23:17 -0800
Subject: [PATCH] Recover from fatal Ollama crash from LangChain library (#693)

Resolve fatal crash from Ollama failure
---
 server/utils/AiProviders/ollama/index.js | 77 ++++++++++++++++--------
 1 file changed, 51 insertions(+), 26 deletions(-)

diff --git a/server/utils/AiProviders/ollama/index.js b/server/utils/AiProviders/ollama/index.js
index 9a16d245a..efd4649f1 100644
--- a/server/utils/AiProviders/ollama/index.js
+++ b/server/utils/AiProviders/ollama/index.js
@@ -119,9 +119,14 @@ class OllamaAILLM {
     });
     const textResponse = await model
       .pipe(new StringOutputParser())
-      .invoke(this.#convertToLangchainPrototypes(messages));
+      .invoke(this.#convertToLangchainPrototypes(messages))
+      .catch((e) => {
+        throw new Error(
+          `Ollama::getChatCompletion failed to communicate with Ollama. ${e.message}`
+        );
+      });
 
-    if (!textResponse.length)
+    if (!textResponse || !textResponse.length)
       throw new Error(`Ollama::sendChat text response was empty.`);
 
     return textResponse;
@@ -150,9 +155,14 @@ class OllamaAILLM {
     const model = this.#ollamaClient({ temperature });
     const textResponse = await model
       .pipe(new StringOutputParser())
-      .invoke(this.#convertToLangchainPrototypes(messages));
+      .invoke(this.#convertToLangchainPrototypes(messages))
+      .catch((e) => {
+        throw new Error(
+          `Ollama::getChatCompletion failed to communicate with Ollama. ${e.message}`
+        );
+      });
 
-    if (!textResponse.length)
+    if (!textResponse || !textResponse.length)
       throw new Error(`Ollama::getChatCompletion text response was empty.`);
 
     return textResponse;
@@ -170,34 +180,49 @@ class OllamaAILLM {
     const { uuid = uuidv4(), sources = [] } = responseProps;
 
     return new Promise(async (resolve) => {
-      let fullText = "";
-      for await (const chunk of stream) {
-        if (chunk === undefined)
-          throw new Error(
-            "Stream returned undefined chunk. Aborting reply - check model provider logs."
-          );
-
-        const content = chunk.hasOwnProperty("content") ? chunk.content : chunk;
-        fullText += content;
+      try {
+        let fullText = "";
+        for await (const chunk of stream) {
+          if (chunk === undefined)
+            throw new Error(
+              "Stream returned undefined chunk. Aborting reply - check model provider logs."
+            );
+
+          const content = chunk.hasOwnProperty("content")
+            ? chunk.content
+            : chunk;
+          fullText += content;
+          writeResponseChunk(response, {
+            uuid,
+            sources: [],
+            type: "textResponseChunk",
+            textResponse: content,
+            close: false,
+            error: false,
+          });
+        }
+
         writeResponseChunk(response, {
           uuid,
-          sources: [],
+          sources,
           type: "textResponseChunk",
-          textResponse: content,
-          close: false,
+          textResponse: "",
+          close: true,
           error: false,
         });
+        resolve(fullText);
+      } catch (error) {
+        writeResponseChunk(response, {
+          uuid,
+          sources: [],
+          type: "textResponseChunk",
+          textResponse: "",
+          close: true,
+          error: `Ollama:streaming - could not stream chat. ${
+            error?.cause ?? error.message
+          }`,
+        });
       }
-
-      writeResponseChunk(response, {
-        uuid,
-        sources,
-        type: "textResponseChunk",
-        textResponse: "",
-        close: true,
-        error: false,
-      });
-      resolve(fullText);
     });
   }
 
-- 
GitLab