From 086a65141d8a3b1ceae488b4dd5bc87b49312cda Mon Sep 17 00:00:00 2001
From: Marcus Schiesser <mail@marcusschiesser.de>
Date: Tue, 10 Dec 2024 12:02:57 +0700
Subject: [PATCH] Add adapter to use Vercel model providers (#1558)

Co-authored-by: Alex Yang <himself65@outlook.com>
---
 .changeset/hip-windows-tease.md               |   5 +
 .../docs/llamaindex/integration/vercel.mdx    |  35 +++-
 examples/vercel/README.md                     |  10 +
 examples/vercel/llamacloud.ts                 |   1 +
 examples/vercel/llm.ts                        |  45 +++++
 examples/vercel/vector-store.ts               |   1 +
 packages/providers/vercel/src/index.ts        |   1 +
 packages/providers/vercel/src/llm.ts          | 183 ++++++++++++++++++
 packages/providers/vercel/src/tool.ts         |  33 ++--
 9 files changed, 294 insertions(+), 20 deletions(-)
 create mode 100644 .changeset/hip-windows-tease.md
 create mode 100644 examples/vercel/llm.ts
 create mode 100644 packages/providers/vercel/src/llm.ts

diff --git a/.changeset/hip-windows-tease.md b/.changeset/hip-windows-tease.md
new file mode 100644
index 000000000..cbf432352
--- /dev/null
+++ b/.changeset/hip-windows-tease.md
@@ -0,0 +1,5 @@
+---
+"@llamaindex/vercel": patch
+---
+
+Add VercelLLM (adapter to use any model provider from Vercel AI in LlamaIndex)
diff --git a/apps/next/src/content/docs/llamaindex/integration/vercel.mdx b/apps/next/src/content/docs/llamaindex/integration/vercel.mdx
index 48cdc2fc9..7eb946247 100644
--- a/apps/next/src/content/docs/llamaindex/integration/vercel.mdx
+++ b/apps/next/src/content/docs/llamaindex/integration/vercel.mdx
@@ -3,7 +3,9 @@ title: Vercel
 description: Integrate LlamaIndex with Vercel's AI SDK
 ---
 
-LlamaIndex provides integration with Vercel's AI SDK, allowing you to create powerful search and retrieval applications. Below are examples of how to use LlamaIndex with `streamText` from the Vercel AI SDK.
+LlamaIndex provides integration with Vercel's AI SDK, allowing you to create powerful search and retrieval applications. You can:
+- Use any of Vercel AI's [model providers](https://sdk.vercel.ai/docs/foundations/providers-and-models) as LLMs in LlamaIndex
+- Use indexes (e.g. VectorStoreIndex, LlamaCloudIndex) from LlamaIndexTS in your Vercel AI applications
 
 ## Setup
 
@@ -13,7 +15,22 @@ First, install the required dependencies:
 npm install @llamaindex/vercel ai
 ```
 
-## Using Local Vector Store
+## Using Vercel AI's Model Providers 
+
+Using the `VercelLLM` adapter, it's easy to use any of Vercel AI's [model providers](https://sdk.vercel.ai/docs/foundations/providers-and-models) as LLMs in LlamaIndex. Here's an example of how to use OpenAI's GPT-4o model:
+
+```typescript
+const llm = new VercelLLM({ model: openai("gpt-4o") });
+const result = await llm.complete({
+  prompt: "What is the capital of France?",
+  stream: false, // Set to true if you want streaming responses
+});
+console.log(result.text);
+```
+
+## Use Indexes
+
+### Using VectorStoreIndex
 
 Here's how to create a simple vector store index and query it using Vercel's AI SDK:
 
@@ -29,22 +46,25 @@ const index = await VectorStoreIndex.fromDocuments([document]);
 
 // Create a query tool
 const queryTool = llamaindex({
+  model: openai("gpt-4"),
   index,
   description: "Search through the documents", // optional
 });
 
 // Use the tool with Vercel's AI SDK
 streamText({
-  tools: { queryTool },
-  prompt: "Your question here",
   model: openai("gpt-4"),
+  prompt: "Your question here",
+  tools: { queryTool },
   onFinish({ response }) {
     console.log("Response:", response.messages); // log the response
   },
 }).toDataStream();
 ```
 
-## Using LlamaCloud
+> Note: the Vercel AI model referenced in the `llamaindex` function is used by the response synthesizer to generate a response for the tool call.
+
+### Using LlamaCloud
 
 For production deployments, you can use LlamaCloud to store and manage your documents:
 
@@ -61,15 +81,16 @@ const index = await LlamaCloudIndex.fromDocuments({
 
 // Use it the same way as VectorStoreIndex
 const queryTool = llamaindex({
+  model: openai("gpt-4"),
   index,
   description: "Search through the documents",
 });
 
 // Use the tool with Vercel's AI SDK
 streamText({
-  tools: { queryTool },
-  prompt: "Your question here",
   model: openai("gpt-4"),
+  prompt: "Your question here",
+  tools: { queryTool },
 }).toDataStream();
 ```
 
diff --git a/examples/vercel/README.md b/examples/vercel/README.md
index edb1f11f2..c76430ec2 100644
--- a/examples/vercel/README.md
+++ b/examples/vercel/README.md
@@ -14,6 +14,16 @@ npm i
 
 Make sure to run the examples from the parent folder called `examples`. The following examples are available:
 
+### Vercel LLM Example
+
+Run the Vercel LLM example with:
+
+```bash
+npx tsx vercel/llm.ts
+```
+
+This example demonstrates using the `VercelLLM` adapter with Vercel's OpenAI model provider
+
 ### Vector Store Example
 
 Run the local vector store example with:
diff --git a/examples/vercel/llamacloud.ts b/examples/vercel/llamacloud.ts
index 2fac09863..7d01dd577 100644
--- a/examples/vercel/llamacloud.ts
+++ b/examples/vercel/llamacloud.ts
@@ -22,6 +22,7 @@ async function main() {
     prompt: "Cost of moving cat from Russia to UK?",
     tools: {
       queryTool: llamaindex({
+        model: openai("gpt-4o"),
         index,
         description:
           "get information from your knowledge base to answer questions.", // optional description
diff --git a/examples/vercel/llm.ts b/examples/vercel/llm.ts
new file mode 100644
index 000000000..641e8ab7d
--- /dev/null
+++ b/examples/vercel/llm.ts
@@ -0,0 +1,45 @@
+import { openai } from "@ai-sdk/openai";
+import { VercelLLM } from "@llamaindex/vercel";
+import { LLMAgent, WikipediaTool } from "llamaindex";
+
+async function main() {
+  // Create an instance of VercelLLM with the OpenAI model
+  const vercelLLM = new VercelLLM({ model: openai("gpt-4o") });
+
+  console.log("\n=== Test 1: Using complete() for single response ===");
+  const result = await vercelLLM.complete({
+    prompt: "What is the capital of France?",
+    stream: false, // Set to true if you want streaming responses
+  });
+  console.log(result.text);
+
+  console.log("\n=== Test 2: Using chat() for streaming response ===");
+  const stream = await vercelLLM.chat({
+    messages: [
+      { content: "You want to talk in rhymes.", role: "system" },
+      {
+        content:
+          "How much wood would a woodchuck chuck if a woodchuck could chuck wood?",
+        role: "user",
+      },
+    ],
+    stream: true,
+  });
+  for await (const chunk of stream) {
+    process.stdout.write(chunk.delta);
+  }
+
+  console.log("\n=== Test 3: Using LLMAgent with WikipediaTool ===");
+  const agent = new LLMAgent({
+    llm: vercelLLM,
+    tools: [new WikipediaTool()],
+  });
+
+  const { message } = await agent.chat({
+    message: "What's the history of New York from Wikipedia in 3 sentences?",
+  });
+
+  console.log(message);
+}
+
+main().catch(console.error);
diff --git a/examples/vercel/vector-store.ts b/examples/vercel/vector-store.ts
index c61291ba3..d484739c6 100644
--- a/examples/vercel/vector-store.ts
+++ b/examples/vercel/vector-store.ts
@@ -18,6 +18,7 @@ async function main() {
     prompt: "Cost of moving cat from Russia to UK?",
     tools: {
       queryTool: llamaindex({
+        model: openai("gpt-4o"),
         index,
         description:
           "get information from your knowledge base to answer questions.", // optional description
diff --git a/packages/providers/vercel/src/index.ts b/packages/providers/vercel/src/index.ts
index 548473883..c0347b5c2 100644
--- a/packages/providers/vercel/src/index.ts
+++ b/packages/providers/vercel/src/index.ts
@@ -1 +1,2 @@
+export { VercelLLM } from "./llm";
 export { llamaindex } from "./tool";
diff --git a/packages/providers/vercel/src/llm.ts b/packages/providers/vercel/src/llm.ts
new file mode 100644
index 000000000..fbf899a37
--- /dev/null
+++ b/packages/providers/vercel/src/llm.ts
@@ -0,0 +1,183 @@
+import { wrapEventCaller, wrapLLMEvent } from "@llamaindex/core/decorator";
+import {
+  ToolCallLLM,
+  type ChatMessage,
+  type ChatResponse,
+  type ChatResponseChunk,
+  type LLMChatParamsNonStreaming,
+  type LLMChatParamsStreaming,
+  type LLMMetadata,
+  type ToolCallLLMMessageOptions,
+} from "@llamaindex/core/llms";
+import { extractText } from "@llamaindex/core/utils";
+import {
+  generateText,
+  streamText,
+  type CoreAssistantMessage,
+  type CoreMessage,
+  type CoreSystemMessage,
+  type CoreToolMessage,
+  type CoreUserMessage,
+  type ImagePart,
+  type LanguageModelV1,
+  type TextPart,
+} from "ai";
+
+export type VercelAdditionalChatOptions = ToolCallLLMMessageOptions;
+
+export class VercelLLM extends ToolCallLLM<VercelAdditionalChatOptions> {
+  supportToolCall: boolean = true;
+  private model: LanguageModelV1;
+
+  constructor({ model }: { model: LanguageModelV1 }) {
+    super();
+    this.model = model;
+  }
+
+  get metadata(): LLMMetadata {
+    return {
+      model: this.model.modelId,
+      temperature: 1,
+      topP: 1,
+      contextWindow: 128000,
+      tokenizer: undefined,
+    };
+  }
+
+  private toVercelMessages(
+    messages: ChatMessage<ToolCallLLMMessageOptions>[],
+  ): CoreMessage[] {
+    return messages.map((message) => {
+      const options = message.options ?? {};
+
+      if ("toolResult" in options) {
+        return {
+          role: "tool",
+          content: [
+            {
+              type: "tool-result",
+              toolCallId: options.toolResult.id,
+              toolName: "", // XXX: tool result doesn't name
+              isError: options.toolResult.isError,
+              result: options.toolResult.result,
+            },
+          ],
+        } satisfies CoreToolMessage;
+      } else if ("toolCall" in options) {
+        return {
+          role: "assistant",
+          content: options.toolCall.map((toolCall) => ({
+            type: "tool-call",
+            toolName: toolCall.name,
+            toolCallId: toolCall.id,
+            args: toolCall.input,
+          })),
+        } satisfies CoreAssistantMessage;
+      }
+
+      if (message.role === "system" || message.role === "assistant") {
+        return {
+          role: message.role,
+          content: extractText(message.content),
+        } satisfies CoreSystemMessage | CoreAssistantMessage;
+      }
+
+      if (message.role === "user") {
+        return {
+          role: message.role,
+          content:
+            typeof message.content === "string"
+              ? message.content
+              : message.content.map((contentDetail) => {
+                  if (contentDetail.type === "image_url") {
+                    return {
+                      type: "image",
+                      image: new URL(contentDetail.image_url.url),
+                    } satisfies ImagePart;
+                  }
+                  return {
+                    type: "text",
+                    text: contentDetail.text,
+                  } satisfies TextPart;
+                }),
+        } satisfies CoreUserMessage;
+      }
+
+      throw new Error(`Can not convert message ${JSON.stringify(message)}`);
+    });
+  }
+
+  chat(
+    params: LLMChatParamsStreaming<
+      VercelAdditionalChatOptions,
+      ToolCallLLMMessageOptions
+    >,
+  ): Promise<AsyncIterable<ChatResponseChunk<ToolCallLLMMessageOptions>>>;
+  chat(
+    params: LLMChatParamsNonStreaming<
+      VercelAdditionalChatOptions,
+      ToolCallLLMMessageOptions
+    >,
+  ): Promise<ChatResponse<ToolCallLLMMessageOptions>>;
+  @wrapEventCaller
+  @wrapLLMEvent
+  async chat(
+    params:
+      | LLMChatParamsNonStreaming<
+          VercelAdditionalChatOptions,
+          ToolCallLLMMessageOptions
+        >
+      | LLMChatParamsStreaming<
+          VercelAdditionalChatOptions,
+          ToolCallLLMMessageOptions
+        >,
+  ): Promise<
+    | ChatResponse<ToolCallLLMMessageOptions>
+    | AsyncIterable<ChatResponseChunk<ToolCallLLMMessageOptions>>
+  > {
+    const { messages, stream } = params;
+
+    // Streaming
+    if (stream) {
+      const result = streamText({
+        model: this.model,
+        messages: this.toVercelMessages(messages),
+      });
+      return result.fullStream.pipeThrough(
+        new TransformStream({
+          async transform(message, controller): Promise<void> {
+            switch (message.type) {
+              case "text-delta":
+                controller.enqueue({ raw: message, delta: message.textDelta });
+            }
+          },
+        }),
+      );
+    }
+
+    // Non-streaming
+    const result = await generateText({
+      model: this.model,
+      messages: this.toVercelMessages(messages),
+    });
+
+    return {
+      raw: result,
+      message: {
+        content: result.text,
+        role: "assistant",
+        options: result.toolCalls?.length
+          ? {
+              toolCall: result.toolCalls.map(
+                ({ toolCallId, toolName, args }) => ({
+                  id: toolCallId,
+                  name: toolName,
+                  input: args,
+                }),
+              ),
+            }
+          : {},
+      },
+    };
+  }
+}
diff --git a/packages/providers/vercel/src/tool.ts b/packages/providers/vercel/src/tool.ts
index a0720d94f..4abe6101e 100644
--- a/packages/providers/vercel/src/tool.ts
+++ b/packages/providers/vercel/src/tool.ts
@@ -1,29 +1,36 @@
+import { Settings } from "@llamaindex/core/global";
 import type { BaseQueryEngine } from "@llamaindex/core/query-engine";
-import { type CoreTool, tool } from "ai";
+import { type CoreTool, type LanguageModelV1, tool } from "ai";
 import { z } from "zod";
+import { VercelLLM } from "./llm";
 
 interface DatasourceIndex {
   asQueryEngine: () => BaseQueryEngine;
 }
 
 export function llamaindex({
+  model,
   index,
   description,
 }: {
+  model: LanguageModelV1;
   index: DatasourceIndex;
   description?: string;
 }): CoreTool {
-  const queryEngine = index.asQueryEngine();
-  return tool({
-    description: description ?? "Get information about your documents.",
-    parameters: z.object({
-      query: z
-        .string()
-        .describe("The query to get information about your documents."),
-    }),
-    execute: async ({ query }) => {
-      const result = await queryEngine?.query({ query });
-      return result?.message.content ?? "No result found in documents.";
-    },
+  const llm = new VercelLLM({ model });
+  return Settings.withLLM<CoreTool>(llm, () => {
+    const queryEngine = index.asQueryEngine();
+    return tool({
+      description: description ?? "Get information about your documents.",
+      parameters: z.object({
+        query: z
+          .string()
+          .describe("The query to get information about your documents."),
+      }),
+      execute: async ({ query }) => {
+        const result = await queryEngine?.query({ query });
+        return result?.message.content ?? "No result found in documents.";
+      },
+    });
   });
 }
-- 
GitLab