diff --git a/.changeset/moody-mangos-punch.md b/.changeset/moody-mangos-punch.md
new file mode 100644
index 0000000000000000000000000000000000000000..b54e1527a424cb601c9cfa60a5b1c9030146721c
--- /dev/null
+++ b/.changeset/moody-mangos-punch.md
@@ -0,0 +1,5 @@
+---
+"create-llama": patch
+---
+
+Show streaming errors in Python, optimize system prompts for tool usage and set the weather tool as default for the Agentic RAG use case
diff --git a/helpers/env-variables.ts b/helpers/env-variables.ts
index 37fc2d5378fef0b713c8ede881ff9b558e356077..5e430b2b109a9f273eddae0289b92a84314178c7 100644
--- a/helpers/env-variables.ts
+++ b/helpers/env-variables.ts
@@ -13,6 +13,12 @@ import {
 
 import { TSYSTEMS_LLMHUB_API_URL } from "./providers/llmhub";
 
+const DEFAULT_SYSTEM_PROMPT =
+  "You are a helpful assistant who helps users with their questions.";
+
+const DATA_SOURCES_PROMPT =
+  "You have access to a knowledge base including the facts that you should start with to find the answer for the user question. Use the query engine tool to retrieve the facts from the knowledge base.";
+
 export type EnvVar = {
   name?: string;
   description?: string;
@@ -449,9 +455,6 @@ const getSystemPromptEnv = (
   dataSources?: TemplateDataSource[],
   template?: TemplateType,
 ): EnvVar[] => {
-  const defaultSystemPrompt =
-    "You are a helpful assistant who helps users with their questions.";
-
   const systemPromptEnv: EnvVar[] = [];
   // build tool system prompt by merging all tool system prompts
   // multiagent template doesn't need system prompt
@@ -466,9 +469,12 @@ const getSystemPromptEnv = (
       }
     });
 
-    const systemPrompt = toolSystemPrompt
-      ? `\"${toolSystemPrompt}\"`
-      : defaultSystemPrompt;
+    const systemPrompt =
+      "'" +
+      DEFAULT_SYSTEM_PROMPT +
+      (dataSources?.length ? `\n${DATA_SOURCES_PROMPT}` : "") +
+      (toolSystemPrompt ? `\n${toolSystemPrompt}` : "") +
+      "'";
 
     systemPromptEnv.push({
       name: "SYSTEM_PROMPT",
diff --git a/helpers/tools.ts b/helpers/tools.ts
index 5d3ab929f25715c53eaffb690968e0ad9c835cd8..c3a34390c82a7194d2eca8dcb616d9e8bb4a824c 100644
--- a/helpers/tools.ts
+++ b/helpers/tools.ts
@@ -71,8 +71,7 @@ export const supportedTools: Tool[] = [
       {
         name: TOOL_SYSTEM_PROMPT_ENV_VAR,
         description: "System prompt for DuckDuckGo search tool.",
-        value: `You are a DuckDuckGo search agent. 
-You can use the duckduckgo search tool to get information from the web to answer user questions.
+        value: `You have access to the duckduckgo search tool. Use it to get information from the web to answer user questions.
 For better results, you can specify the region parameter to get results from a specific region but it's optional.`,
       },
     ],
@@ -88,13 +87,6 @@ For better results, you can specify the region parameter to get results from a s
     ],
     supportedFrameworks: ["fastapi", "express", "nextjs"],
     type: ToolType.LLAMAHUB,
-    envVars: [
-      {
-        name: TOOL_SYSTEM_PROMPT_ENV_VAR,
-        description: "System prompt for wiki tool.",
-        value: `You are a Wikipedia agent. You help users to get information from Wikipedia.`,
-      },
-    ],
   },
   {
     display: "Weather",
@@ -102,13 +94,6 @@ For better results, you can specify the region parameter to get results from a s
     dependencies: [],
     supportedFrameworks: ["fastapi", "express", "nextjs"],
     type: ToolType.LOCAL,
-    envVars: [
-      {
-        name: TOOL_SYSTEM_PROMPT_ENV_VAR,
-        description: "System prompt for weather tool.",
-        value: `You are a weather forecast agent. You help users to get the weather forecast for a given location.`,
-      },
-    ],
   },
   {
     display: "Document generator",
@@ -211,14 +196,6 @@ For better results, you can specify the region parameter to get results from a s
     },
     supportedFrameworks: ["fastapi", "express", "nextjs"],
     type: ToolType.LOCAL,
-    envVars: [
-      {
-        name: TOOL_SYSTEM_PROMPT_ENV_VAR,
-        description: "System prompt for openapi action tool.",
-        value:
-          "You are an OpenAPI action agent. You help users to make requests to the provided OpenAPI schema.",
-      },
-    ],
   },
   {
     display: "Image Generator",
@@ -231,11 +208,6 @@ For better results, you can specify the region parameter to get results from a s
         description:
           "STABILITY_API_KEY key is required to run image generator. Get it here: https://platform.stability.ai/account/keys",
       },
-      {
-        name: TOOL_SYSTEM_PROMPT_ENV_VAR,
-        description: "System prompt for image generator tool.",
-        value: `You are an image generator agent. You help users to generate images using the Stability API.`,
-      },
     ],
   },
   {
diff --git a/questions/simple.ts b/questions/simple.ts
index 8571021fc5ff2bf535f32e0ebbfd97e3c3ca8a8c..e7892acdd4e0cd33df7677f4314d30ea77e09339 100644
--- a/questions/simple.ts
+++ b/questions/simple.ts
@@ -131,7 +131,7 @@ const convertAnswers = async (
   > = {
     rag: {
       template: "streaming",
-      tools: getTools(["wikipedia.WikipediaToolSpec"]),
+      tools: getTools(["weather"]),
       frontend: true,
       dataSources: [EXAMPLE_FILE],
     },
diff --git a/templates/components/engines/typescript/agent/tools/interpreter.ts b/templates/components/engines/typescript/agent/tools/interpreter.ts
index 44cc7cbc4d3ea3ea18ec28b461b7caa0ce35b08f..37b645882078defd9e821c1df18708c50e16e621 100644
--- a/templates/components/engines/typescript/agent/tools/interpreter.ts
+++ b/templates/components/engines/typescript/agent/tools/interpreter.ts
@@ -116,7 +116,9 @@ export class InterpreterTool implements BaseTool<InterpreterParameter> {
           const fileName = path.basename(filePath);
           const localFilePath = path.join(this.uploadedFilesDir, fileName);
           const content = fs.readFileSync(localFilePath);
-          await this.codeInterpreter?.files.write(filePath, content);
+
+          const arrayBuffer = new Uint8Array(content).buffer;
+          await this.codeInterpreter?.files.write(filePath, arrayBuffer);
         }
       } catch (error) {
         console.error("Got error when uploading files to sandbox", error);
diff --git a/templates/components/multiagent/python/app/api/routers/vercel_response.py b/templates/components/multiagent/python/app/api/routers/vercel_response.py
index 4298553243168d806ae52bb6a3ef7662eca3925f..0fd1d0172f1a893c1eef22356d8b89e2e333faea 100644
--- a/templates/components/multiagent/python/app/api/routers/vercel_response.py
+++ b/templates/components/multiagent/python/app/api/routers/vercel_response.py
@@ -19,6 +19,7 @@ class VercelStreamResponse(StreamingResponse):
 
     TEXT_PREFIX = "0:"
     DATA_PREFIX = "8:"
+    ERROR_PREFIX = "3:"
 
     def __init__(self, request: Request, chat_data: ChatData, *args, **kwargs):
         self.request = request
@@ -41,13 +42,16 @@ class VercelStreamResponse(StreamingResponse):
 
                     yield output
         except asyncio.CancelledError:
-            logger.info("Stopping workflow")
-            await event_handler.cancel_run()
+            logger.warning("Workflow has been cancelled!")
         except Exception as e:
             logger.error(
                 f"Unexpected error in content_generator: {str(e)}", exc_info=True
             )
+            yield self.convert_error(
+                "An unexpected error occurred while processing your request, preventing the creation of a final answer. Please try again."
+            )
         finally:
+            await event_handler.cancel_run()
             logger.info("The stream has been stopped!")
 
     def _create_stream(
@@ -107,6 +111,11 @@ class VercelStreamResponse(StreamingResponse):
         data_str = json.dumps(data)
         return f"{cls.DATA_PREFIX}[{data_str}]\n"
 
+    @classmethod
+    def convert_error(cls, error: str):
+        error_str = json.dumps(error)
+        return f"{cls.ERROR_PREFIX}{error_str}\n"
+
     @staticmethod
     async def _generate_next_questions(chat_history: List[Message], response: str):
         questions = await NextQuestionSuggestion.suggest_next_questions(
diff --git a/templates/types/streaming/express/README-template.md b/templates/types/streaming/express/README-template.md
index 8ec0466435359107b3b8d7d8f1a11fab1563f931..4b9ece10e7ad0c4643f03149a6cad96cf7d96cad 100644
--- a/templates/types/streaming/express/README-template.md
+++ b/templates/types/streaming/express/README-template.md
@@ -8,7 +8,7 @@ First, install the dependencies:
 npm install
 ```
 
-Second, generate the embeddings of the documents in the `./data` directory (if this folder exists - otherwise, skip this step):
+Second, generate the embeddings of the documents in the `./data` directory:
 
 ```
 npm run generate
diff --git a/templates/types/streaming/fastapi/README-template.md b/templates/types/streaming/fastapi/README-template.md
index 00a522e7efd4a98f33df63793e95d39db7097a98..ff92270775ea5fa38fb727ae8c2e11a3c08360ca 100644
--- a/templates/types/streaming/fastapi/README-template.md
+++ b/templates/types/streaming/fastapi/README-template.md
@@ -15,7 +15,7 @@ Then check the parameters that have been pre-configured in the `.env` file in th
 
 If you are using any tools or data sources, you can update their config files in the `config` folder.
 
-Second, generate the embeddings of the documents in the `./data` directory (if this folder exists - otherwise, skip this step):
+Second, generate the embeddings of the documents in the `./data` directory:
 
 ```
 poetry run generate
diff --git a/templates/types/streaming/fastapi/app/api/routers/vercel_response.py b/templates/types/streaming/fastapi/app/api/routers/vercel_response.py
index 1155f6ba7a84570caa68c1ceb6611f34a9ef9ed3..0d41d893e8f007210303d9553194b2578be94299 100644
--- a/templates/types/streaming/fastapi/app/api/routers/vercel_response.py
+++ b/templates/types/streaming/fastapi/app/api/routers/vercel_response.py
@@ -22,6 +22,7 @@ class VercelStreamResponse(StreamingResponse):
 
     TEXT_PREFIX = "0:"
     DATA_PREFIX = "8:"
+    ERROR_PREFIX = "3:"
 
     def __init__(
         self,
@@ -53,17 +54,26 @@ class VercelStreamResponse(StreamingResponse):
         # Merge the chat response generator and the event generator
         combine = stream.merge(chat_response_generator, event_generator)
         is_stream_started = False
-        async with combine.stream() as streamer:
-            async for output in streamer:
-                if not is_stream_started:
-                    is_stream_started = True
-                    # Stream a blank message to start displaying the response in the UI
-                    yield cls.convert_text("")
-
-                yield output
-
-                if await request.is_disconnected():
-                    break
+        try:
+            async with combine.stream() as streamer:
+                async for output in streamer:
+                    if await request.is_disconnected():
+                        break
+
+                    if not is_stream_started:
+                        is_stream_started = True
+                        # Stream a blank message to start displaying the response in the UI
+                        yield cls.convert_text("")
+
+                    yield output
+        except Exception:
+            logger.exception("Error in stream response")
+            yield cls.convert_error(
+                "An unexpected error occurred while processing your request, preventing the creation of a final answer. Please try again."
+            )
+        finally:
+            # Ensure event handler is marked as done even if connection breaks
+            event_handler.is_done = True
 
     @classmethod
     async def _event_generator(cls, event_handler: EventCallbackHandler):
@@ -131,6 +141,11 @@ class VercelStreamResponse(StreamingResponse):
         data_str = json.dumps(data)
         return f"{cls.DATA_PREFIX}[{data_str}]\n"
 
+    @classmethod
+    def convert_error(cls, error: str):
+        error_str = json.dumps(error)
+        return f"{cls.ERROR_PREFIX}{error_str}\n"
+
     @staticmethod
     def _process_response_nodes(
         source_nodes: List[NodeWithScore],
diff --git a/templates/types/streaming/nextjs/README-template.md b/templates/types/streaming/nextjs/README-template.md
index d2eb1eb850d1e94c0cdab296ad14b29f8f874431..78a3928132b2f4e76799bd603c24befbff653307 100644
--- a/templates/types/streaming/nextjs/README-template.md
+++ b/templates/types/streaming/nextjs/README-template.md
@@ -8,7 +8,7 @@ First, install the dependencies:
 npm install
 ```
 
-Second, generate the embeddings of the documents in the `./data` directory (if this folder exists - otherwise, skip this step):
+Second, generate the embeddings of the documents in the `./data` directory:
 
 ```
 npm run generate
diff --git a/templates/types/streaming/nextjs/app/api/sandbox/route.ts b/templates/types/streaming/nextjs/app/api/sandbox/route.ts
index 6bbd15177dbe33f8a0eb0f50cdbf767f6545b207..f524d550760428ea90367410342ec41940e41e9f 100644
--- a/templates/types/streaming/nextjs/app/api/sandbox/route.ts
+++ b/templates/types/streaming/nextjs/app/api/sandbox/route.ts
@@ -92,7 +92,8 @@ export async function POST(req: Request) {
       const localFilePath = path.join("output", "uploaded", fileName);
       const fileContent = await fs.readFile(localFilePath);
 
-      await sbx.files.write(sandboxFilePath, fileContent);
+      const arrayBuffer = new Uint8Array(fileContent).buffer;
+      await sbx.files.write(sandboxFilePath, arrayBuffer);
       console.log(`Copied file to ${sandboxFilePath} in ${sbx.sandboxID}`);
     });
   }
diff --git a/templates/types/streaming/nextjs/app/components/chat-section.tsx b/templates/types/streaming/nextjs/app/components/chat-section.tsx
index 46b0054279490b1f488789e30c54107ad33a5d59..4a0fe9deb232a8a48ef58d4c366a3482eae5a108 100644
--- a/templates/types/streaming/nextjs/app/components/chat-section.tsx
+++ b/templates/types/streaming/nextjs/app/components/chat-section.tsx
@@ -15,7 +15,13 @@ export default function ChatSection() {
     api: `${backend}/api/chat`,
     onError: (error: unknown) => {
       if (!(error instanceof Error)) throw error;
-      alert(JSON.parse(error.message).detail);
+      let errorMessage: string;
+      try {
+        errorMessage = JSON.parse(error.message).detail;
+      } catch (e) {
+        errorMessage = error.message;
+      }
+      alert(errorMessage);
     },
   });
   return (