From 923b013567e84b7d9789055ca5501e4c686ab141 Mon Sep 17 00:00:00 2001
From: tolgaidev <164843802+tolgaidev@users.noreply.github.com>
Date: Tue, 21 May 2024 02:15:03 +0300
Subject: [PATCH] modify the code to address the issue when truncating
 documents

---
 semantic_router/encoders/openai.py                    | 3 +--
 tests/integration/encoders/test_openai_integration.py | 4 +---
 2 files changed, 2 insertions(+), 5 deletions(-)

diff --git a/semantic_router/encoders/openai.py b/semantic_router/encoders/openai.py
index 24cac037..86f46e8c 100644
--- a/semantic_router/encoders/openai.py
+++ b/semantic_router/encoders/openai.py
@@ -79,8 +79,7 @@ class OpenAIEncoder(BaseEncoder):
 
         if truncate:
             # check if any document exceeds token limit and truncate if so
-            for i in range(len(docs)):
-                docs[i] = self._truncate(docs[i])
+            docs = [self._truncate(doc) for doc in docs]
 
         # Exponential backoff
         for j in range(1, 7):
diff --git a/tests/integration/encoders/test_openai_integration.py b/tests/integration/encoders/test_openai_integration.py
index 73852bb3..3298071b 100644
--- a/tests/integration/encoders/test_openai_integration.py
+++ b/tests/integration/encoders/test_openai_integration.py
@@ -1,7 +1,5 @@
 import pytest
-
-
-from semantic_router.encoders import OpenAIEncoder
+from semantic_router.encoders.openai import OpenAIEncoder
 
 with open("tests/integration/57640.4032.txt", "r") as fp:
     long_doc = fp.read()
-- 
GitLab