From b0b5889405a0e7ae36111ff074e9da574d9dabc4 Mon Sep 17 00:00:00 2001
From: tolgadevAI <164843802+tolgadevAI@users.noreply.github.com>
Date: Wed, 17 Jul 2024 10:08:54 +0300
Subject: [PATCH] async_client calls

---
 semantic_router/layer.py       | 1 +
 semantic_router/llms/openai.py | 3 +--
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/semantic_router/layer.py b/semantic_router/layer.py
index b5c9475a..ac55db98 100644
--- a/semantic_router/layer.py
+++ b/semantic_router/layer.py
@@ -303,6 +303,7 @@ class RouteLayer:
                     )
                     self.llm = OpenAILLM()
                     route.llm = self.llm
+                    return await route.llm.acall(text)  # type: ignore
                 else:
                     route.llm = self.llm
             return route(text)
diff --git a/semantic_router/llms/openai.py b/semantic_router/llms/openai.py
index f7818201..24a38ee3 100644
--- a/semantic_router/llms/openai.py
+++ b/semantic_router/llms/openai.py
@@ -32,7 +32,6 @@ class OpenAILLM(BaseLLM):
         openai_api_key: Optional[str] = None,
         temperature: float = 0.01,
         max_tokens: int = 200,
-        use_async=False,
     ):
         if name is None:
             name = EncoderDefault.OPENAI.value["language_model"]
@@ -123,7 +122,7 @@ class OpenAILLM(BaseLLM):
                 function_schemas if function_schemas is not None else NOT_GIVEN
             )
 
-            completion = await self.async_client.chat.completions.create(  # type: ignore
+            completion = await self.async_client.chat.completions.create(
                 model=self.name,
                 messages=[m.to_openai() for m in messages],
                 temperature=self.temperature,
-- 
GitLab