diff --git a/.changeset/popular-jokes-push.md b/.changeset/popular-jokes-push.md new file mode 100644 index 0000000000000000000000000000000000000000..6e25f7222f699bec918eb92deb2b01517e79942c --- /dev/null +++ b/.changeset/popular-jokes-push.md @@ -0,0 +1,5 @@ +--- +"create-llama": patch +--- + +Add multi-agent task selector for TS template diff --git a/templates/components/multiagent/python/app/agents/planner.py b/templates/components/multiagent/python/app/agents/planner.py index ce9ba01ee7b85e9285058bc68af2014a88e7cb29..8bb4fd721eec549639546f3c27e99051ef484927 100644 --- a/templates/components/multiagent/python/app/agents/planner.py +++ b/templates/components/multiagent/python/app/agents/planner.py @@ -27,7 +27,7 @@ from llama_index.core.workflow import ( INITIAL_PLANNER_PROMPT = """\ Think step-by-step. Given a conversation, set of tools and a user request. Your responsibility is to create a plan to complete the task. -The plan must adapt with the user request and the conversation. It's fine to just start with needed tasks first and asking user for the next step approval. +The plan must adapt with the user request and the conversation. The tools available are: {tools_str} diff --git a/templates/components/multiagent/python/app/examples/workflow.py b/templates/components/multiagent/python/app/examples/workflow.py index 9fffb45001e37aaef4dabe36f838a2235a3d950a..8a3432d9467f6efeab7014b819416b4f93c714a1 100644 --- a/templates/components/multiagent/python/app/examples/workflow.py +++ b/templates/components/multiagent/python/app/examples/workflow.py @@ -1,12 +1,12 @@ from textwrap import dedent from typing import AsyncGenerator, List, Optional -from llama_index.core.settings import Settings -from llama_index.core.prompts import PromptTemplate from app.agents.single import AgentRunEvent, AgentRunResult, FunctionCallingAgent from app.examples.publisher import create_publisher from app.examples.researcher import create_researcher from llama_index.core.chat_engine.types import ChatMessage +from llama_index.core.prompts import PromptTemplate +from llama_index.core.settings import Settings from llama_index.core.workflow import ( Context, Event, @@ -33,7 +33,6 @@ def create_workflow(chat_history: Optional[List[ChatMessage]] = None): You are given the task of writing a blog post based on research content provided by the researcher agent. Do not invent any information yourself. It's important to read the entire conversation history to write the blog post accurately. If you receive a review from the reviewer, update the post according to the feedback and return the new post content. - If the user requests an update with new information but no research content is provided, you must respond with: "I don't have any research content to write about." If the content is not valid (e.g., broken link, broken image, etc.), do not use it. It's normal for the task to include some ambiguity, so you must define the user's initial request to write the post correctly. If you update the post based on the reviewer's feedback, first explain what changes you made to the post, then provide the new post content. Do not include the reviewer's comments. @@ -128,10 +127,20 @@ class BlogPostWorkflow(Workflow): self, input: str, chat_history: List[ChatMessage] ) -> str: prompt_template = PromptTemplate( - "Given the following chat history and new task, decide whether to publish based on existing information.\n" - "Chat history:\n{chat_history}\n" - "New task: {input}\n" - "Decision (respond with either 'not_publish' or 'publish'):" + dedent(""" + You are an expert in decision-making, helping people write and publish blog posts. + If the user is asking for a file or to publish content, respond with 'publish'. + If the user requests to write or update a blog post, respond with 'not_publish'. + + Here is the chat history: + {chat_history} + + The current user request is: + {input} + + Given the chat history and the new user request, decide whether to publish based on existing information. + Decision (respond with either 'not_publish' or 'publish'): + """) ) chat_history_str = "\n".join( @@ -171,7 +180,10 @@ class BlogPostWorkflow(Workflow): if ev.is_good or too_many_attempts: # too many attempts or the blog post is good - stream final response if requested result = await self.run_agent( - ctx, writer, ev.input, streaming=ctx.data["streaming"] + ctx, + writer, + f"Based on the reviewer's feedback, refine the post and return only the final version of the post. Here's the current version: {ev.input}", + streaming=ctx.data["streaming"], ) return StopEvent(result=result) result: AgentRunResult = await self.run_agent(ctx, writer, ev.input) diff --git a/templates/components/multiagent/typescript/workflow/agents.ts b/templates/components/multiagent/typescript/workflow/agents.ts index b62bd360ad03b9960261ce538f72065b2bef9dd3..6af2bf94abe77c2459ab225c97f598c19b3aa169 100644 --- a/templates/components/multiagent/typescript/workflow/agents.ts +++ b/templates/components/multiagent/typescript/workflow/agents.ts @@ -44,7 +44,6 @@ export const createWriter = (chatHistory: ChatMessage[]) => { You are given the task of writing a blog post based on research content provided by the researcher agent. Do not invent any information yourself. It's important to read the entire conversation history to write the blog post accurately. If you receive a review from the reviewer, update the post according to the feedback and return the new post content. -If the user requests an update with new information but no research content is provided, you must respond with: "I don't have any research content to write about." If the content is not valid (e.g., broken link, broken image, etc.), do not use it. It's normal for the task to include some ambiguity, so you must define the user's initial request to write the post correctly. If you update the post based on the reviewer's feedback, first explain what changes you made to the post, then provide the new post content. Do not include the reviewer's comments. diff --git a/templates/components/multiagent/typescript/workflow/factory.ts b/templates/components/multiagent/typescript/workflow/factory.ts index 01161303bd509b8dcd70c3d9071fa043d221287d..2aef2c252a27ffc8ede6e923e892c90e7517fb83 100644 --- a/templates/components/multiagent/typescript/workflow/factory.ts +++ b/templates/components/multiagent/typescript/workflow/factory.ts @@ -5,7 +5,7 @@ import { Workflow, WorkflowEvent, } from "@llamaindex/core/workflow"; -import { ChatMessage, ChatResponseChunk } from "llamaindex"; +import { ChatMessage, ChatResponseChunk, Settings } from "llamaindex"; import { createPublisher, createResearcher, @@ -82,9 +82,44 @@ export const createWorkflow = (chatHistory: ChatMessage[]) => { const start = async (context: Context, ev: StartEvent) => { context.set("task", ev.data.input); - return new ResearchEvent({ - input: `Research for this task: ${ev.data.input}`, - }); + + const chatHistoryStr = chatHistoryWithAgentMessages + .map((msg) => `${msg.role}: ${msg.content}`) + .join("\n"); + + // Decision-making process + const decision = await decideWorkflow(ev.data.input, chatHistoryStr); + + if (decision !== "publish") { + return new ResearchEvent({ + input: `Research for this task: ${ev.data.input}`, + }); + } else { + return new PublishEvent({ + input: `Publish content based on the chat history\n${chatHistoryStr}\n\n and task: ${ev.data.input}`, + }); + } + }; + + const decideWorkflow = async (task: string, chatHistoryStr: string) => { + const llm = Settings.llm; + + const prompt = `You are an expert in decision-making, helping people write and publish blog posts. +If the user is asking for a file or to publish content, respond with 'publish'. +If the user requests to write or update a blog post, respond with 'not_publish'. + +Here is the chat history: +${chatHistoryStr} + +The current user request is: +${task} + +Given the chat history and the new user request, decide whether to publish based on existing information. +Decision (respond with either 'not_publish' or 'publish'):`; + + const output = await llm.complete({ prompt: prompt }); + const decision = output.text.trim().toLowerCase(); + return decision === "publish" ? "publish" : "research"; }; const research = async (context: Context, ev: ResearchEvent) => { @@ -100,6 +135,8 @@ export const createWorkflow = (chatHistory: ChatMessage[]) => { }; const write = async (context: Context, ev: WriteEvent) => { + const writer = createWriter(chatHistoryWithAgentMessages); + context.set("attempts", context.get("attempts", 0) + 1); const tooManyAttempts = context.get("attempts") > MAX_ATTEMPTS; if (tooManyAttempts) { @@ -112,12 +149,15 @@ export const createWorkflow = (chatHistory: ChatMessage[]) => { } if (ev.data.isGood || tooManyAttempts) { - return new PublishEvent({ - input: "Please help me to publish the blog post.", + // the blog post is good or too many attempts + // stream the final content + const result = await runAgent(context, writer, { + message: `Based on the reviewer's feedback, refine the post and return only the final version of the post. Here's the current version: ${ev.data.input}`, + streaming: true, }); + return result as unknown as StopEvent<AsyncGenerator<ChatResponseChunk>>; } - const writer = createWriter(chatHistoryWithAgentMessages); const writeRes = await runAgent(context, writer, { message: ev.data.input, }); @@ -177,9 +217,11 @@ export const createWorkflow = (chatHistory: ChatMessage[]) => { }; const workflow = new Workflow({ timeout: TIMEOUT, validate: true }); - workflow.addStep(StartEvent, start, { outputs: ResearchEvent }); + workflow.addStep(StartEvent, start, { + outputs: [ResearchEvent, PublishEvent], + }); workflow.addStep(ResearchEvent, research, { outputs: WriteEvent }); - workflow.addStep(WriteEvent, write, { outputs: [ReviewEvent, PublishEvent] }); + workflow.addStep(WriteEvent, write, { outputs: [ReviewEvent, StopEvent] }); workflow.addStep(ReviewEvent, review, { outputs: WriteEvent }); workflow.addStep(PublishEvent, publish, { outputs: StopEvent });