diff --git a/LLM/openai_api_language_model.py b/LLM/openai_api_language_model.py
index fcebb2e6b3a3c693063a50d7a154b53987506a2b..4614d386b183c4192aeafbb857f2bfad15bf413f 100644
--- a/LLM/openai_api_language_model.py
+++ b/LLM/openai_api_language_model.py
@@ -54,7 +54,6 @@ class OpenApiModelHandler(BaseHandler):
         logger.info(
             f"{self.__class__.__name__}:  warmed up! time: {(end - start):.3f} s"
         )
-
     def process(self, prompt):
             logger.debug("call api language model...")
             self.chat.append({"role": self.user_role, "content": prompt})
@@ -87,3 +86,4 @@ class OpenApiModelHandler(BaseHandler):
                 generated_text = response.choices[0].message.content
                 self.chat.append({"role": "assistant", "content": generated_text})
                 yield generated_text, language_code
+
diff --git a/arguments_classes/open_api_language_model_arguments.py b/arguments_classes/open_api_language_model_arguments.py
new file mode 100644
index 0000000000000000000000000000000000000000..a6ab1d2aeccbb138c1fa3b0571f9fa116de19c90
--- /dev/null
+++ b/arguments_classes/open_api_language_model_arguments.py
@@ -0,0 +1,57 @@
+from dataclasses import dataclass, field
+
+
+@dataclass
+class OpenApiLanguageModelHandlerArguments:
+    open_api_model_name: str = field(
+        # default="HuggingFaceTB/SmolLM-360M-Instruct",
+        default="deepseek-chat",
+        metadata={
+            "help": "The pretrained language model to use. Default is 'microsoft/Phi-3-mini-4k-instruct'."
+        },
+    )
+    open_api_user_role: str = field(
+        default="user",
+        metadata={
+            "help": "Role assigned to the user in the chat context. Default is 'user'."
+        },
+    )
+    open_api_init_chat_role: str = field(
+        default="system",
+        metadata={
+            "help": "Initial role for setting up the chat context. Default is 'system'."
+        },
+    )
+    open_api_init_chat_prompt: str = field(
+        # default="You are a helpful and friendly AI assistant. You are polite, respectful, and aim to provide concise responses of less than 20 words.",
+        default="You are a helpful and friendly AI assistant. You are polite, respectful, and aim to provide concise responses of less than 20 words.",
+        metadata={
+            "help": "The initial chat prompt to establish context for the language model. Default is 'You are a helpful AI assistant.'"
+        },
+    )
+
+    open_api_chat_size: int = field(
+        default=2,
+        metadata={
+            "help": "Number of interactions assitant-user to keep for the chat. None for no limitations."
+        },
+    )
+    open_api_api_key: str = field(
+        default=None,
+        metadata={
+            "help": "Is a unique code used to authenticate and authorize access to an API.Default is None"
+        },
+    )
+    open_api_base_url: str = field(
+        default=None,
+        metadata={
+            "help": "Is the root URL for all endpoints of an API, serving as the starting point for constructing API request.Default is Non"
+        },
+    )
+    open_api_stream: bool = field(
+        default=False,
+        metadata={
+            "help": "The stream parameter typically indicates whether data should be transmitted in a continuous flow rather"
+                    " than in a single, complete response, often used for handling large or real-time data.Default is False"
+        },
+    )
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index fba30cd7f5e716797d29b3dd5890fd1a610d06a2..fd6542f57bd69bcc8447270f93ccacec183570a0 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -7,3 +7,4 @@ ChatTTS>=0.1.1
 funasr>=1.1.6
 modelscope>=1.17.1
 deepfilternet>=0.5.6
+openai>=1.40.1
\ No newline at end of file
diff --git a/requirements_mac.txt b/requirements_mac.txt
index 4a1c5cbb4a101ce611a2b81e4d52b73259782a0c..a146c3bef85b55a22ea63eada723421d532a4679 100644
--- a/requirements_mac.txt
+++ b/requirements_mac.txt
@@ -9,4 +9,4 @@ ChatTTS>=0.1.1
 funasr>=1.1.6
 modelscope>=1.17.1
 deepfilternet>=0.5.6
-
+openai>=1.40.1
\ No newline at end of file
diff --git a/s2s_pipeline.py b/s2s_pipeline.py
index c0a7c82620e908147743133c5d523f5c4b905e69..1da202e200c825ad8473f1e115d41cd5f8f686ff 100644
--- a/s2s_pipeline.py
+++ b/s2s_pipeline.py
@@ -180,6 +180,7 @@ def prepare_all_args(
         chat_tts_handler_kwargs,
     )
 
+
     rename_args(whisper_stt_handler_kwargs, "stt")
     rename_args(paraformer_stt_handler_kwargs, "paraformer_stt")
     rename_args(language_model_handler_kwargs, "lm")
@@ -224,7 +225,6 @@ def build_pipeline(
     spoken_prompt_queue = queues_and_events["spoken_prompt_queue"]
     text_prompt_queue = queues_and_events["text_prompt_queue"]
     lm_response_queue = queues_and_events["lm_response_queue"]
-
     if module_kwargs.mode == "local":
         from connections.local_audio_streamer import LocalAudioStreamer
 
@@ -323,6 +323,7 @@ def get_llm_handler(
             queue_out=lm_response_queue,
             setup_kwargs=vars(open_api_language_model_handler_kwargs),
         )
+
     elif module_kwargs.llm == "mlx-lm":
         from LLM.mlx_language_model import MLXLanguageModelHandler
         return MLXLanguageModelHandler(
@@ -331,6 +332,7 @@ def get_llm_handler(
             queue_out=lm_response_queue,
             setup_kwargs=vars(mlx_language_model_handler_kwargs),
         )
+
     else:
         raise ValueError("The LLM should be either transformers or mlx-lm")