diff --git a/apps/simple/llmStream.ts b/apps/simple/llmStream.ts
index 7cdbc9f7fc8b327939b2a41ced762eb02180d6be..2c2d75a0e9e1d9cc8d59b6a8aefd817cbb5182b8 100644
--- a/apps/simple/llmStream.ts
+++ b/apps/simple/llmStream.ts
@@ -1,4 +1,4 @@
-import {ChatMessage, OpenAI, Anthropic, SimpleChatEngine} from "llamaindex";
+import { Anthropic, ChatMessage, SimpleChatEngine } from "llamaindex";
 import { stdin as input, stdout as output } from "node:process";
 import readline from "node:readline/promises";
 
@@ -35,7 +35,7 @@ Where is Istanbul?
     //Case 2: .chat(query, undefined, false) => Response object
     //Case 3: .chat(query, undefined) => Response object
     const chatStream = await chatEngine.chat(query, undefined, true);
-    for await (const part of chatStream){
+    for await (const part of chatStream) {
       process.stdout.write(part);
       // accumulated_result += part;
     }
diff --git a/packages/core/src/llm/LLM.ts b/packages/core/src/llm/LLM.ts
index ac21a6737e9745b278e98dbacba2da27275f90a4..b7febec10cce1b13ef5d5925a35ebc3496b4f21f 100644
--- a/packages/core/src/llm/LLM.ts
+++ b/packages/core/src/llm/LLM.ts
@@ -1,10 +1,10 @@
 import OpenAILLM, { ClientOptions as OpenAIClientOptions } from "openai";
 import {
+  AnthropicStreamToken,
   CallbackManager,
   Event,
   EventType,
   OpenAIStreamToken,
-  AnthropicStreamToken,
   StreamCallbackResponse,
 } from "../callbacks/CallbackManager";
 
@@ -607,7 +607,6 @@ export class Anthropic implements LLM {
     parentEvent?: Event | undefined,
     streaming?: T,
   ): Promise<R> {
-
     //Streaming
     if (streaming) {
       if (!this.hasStreaming) {
@@ -631,17 +630,20 @@ export class Anthropic implements LLM {
     } as R;
   }
 
-  protected async *streamChat(messages: ChatMessage[], parentEvent?: Event | undefined): AsyncGenerator<string, void, unknown>
-  {
+  protected async *streamChat(
+    messages: ChatMessage[],
+    parentEvent?: Event | undefined,
+  ): AsyncGenerator<string, void, unknown> {
     // AsyncIterable<AnthropicStreamToken>
-    const stream:AsyncIterable<AnthropicStreamToken>  = await this.session.anthropic.completions.create({
-      model: this.model,
-      prompt: this.mapMessagesToPrompt(messages),
-      max_tokens_to_sample: this.maxTokens ?? 100000,
-      temperature: this.temperature,
-      top_p: this.topP,
-      streaming: true
-    })
+    const stream: AsyncIterable<AnthropicStreamToken> =
+      await this.session.anthropic.completions.create({
+        model: this.model,
+        prompt: this.mapMessagesToPrompt(messages),
+        max_tokens_to_sample: this.maxTokens ?? 100000,
+        temperature: this.temperature,
+        top_p: this.topP,
+        streaming: true,
+      });
 
     var idx_counter: number = 0;
     for await (const part of stream) {
@@ -654,11 +656,10 @@ export class Anthropic implements LLM {
       idx_counter++;
       yield part.choices[0].delta.content ? part.choices[0].delta.content : "";
 
-    return;
+      return;
+    }
   }
 
-}
-
   async complete<
     T extends boolean | undefined = undefined,
     R = T extends true ? AsyncGenerator<string, void, unknown> : ChatResponse,
@@ -670,7 +671,10 @@ export class Anthropic implements LLM {
     return this.chat([{ content: prompt, role: "user" }], parentEvent) as R;
   }
 
-  protected stream_complete(prompt: string, parentEvent?: Event | undefined): AsyncGenerator<string, void, unknown>{
+  protected stream_complete(
+    prompt: string,
+    parentEvent?: Event | undefined,
+  ): AsyncGenerator<string, void, unknown> {
     return this.streamChat([{ content: prompt, role: "user" }], parentEvent);
   }
 }