diff --git a/.changeset/strong-schools-hope.md b/.changeset/strong-schools-hope.md
new file mode 100644
index 0000000000000000000000000000000000000000..05f52139963f4cdc29e6945c7b7d5b0135fb7239
--- /dev/null
+++ b/.changeset/strong-schools-hope.md
@@ -0,0 +1,5 @@
+---
+"@llamaindex/community": patch
+---
+
+feat: added meta3.2 support via Bedrock including vision, tool call and inference region support
diff --git a/packages/community/README.md b/packages/community/README.md
index 7ffd2fdf24e3417a73aec1c1d26f4d0dd49dbec3..2457143dc032d43f54efaab6626bf817a4709ed5 100644
--- a/packages/community/README.md
+++ b/packages/community/README.md
@@ -5,9 +5,11 @@
 ## Current Features:
 
 - Bedrock support for the Anthropic Claude Models [usage](https://ts.llamaindex.ai/modules/llms/available_llms/bedrock)
-- Bedrock support for the Meta LLama 2, 3 and 3.1 Models [usage](https://ts.llamaindex.ai/modules/llms/available_llms/bedrock)
-- Meta LLama3.1 405b tool call support
+- Bedrock support for the Meta LLama 2, 3, 3.1 and 3.2 Models [usage](https://ts.llamaindex.ai/modules/llms/available_llms/bedrock)
+- Meta LLama3.1 405b and Llama3.2 tool call support
+- Meta 3.2 11B and 90B vision support
 - Bedrock support for querying Knowledge Base
+- Bedrock: [Supported Regions and models for cross-region inference](https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html)
 
 ## LICENSE
 
diff --git a/packages/community/src/index.ts b/packages/community/src/index.ts
index f1368f6874e3fc7c43f514b255e8023e5ec410cf..1775f6238ec1124f8bd788ab0facae26042031f4 100644
--- a/packages/community/src/index.ts
+++ b/packages/community/src/index.ts
@@ -2,5 +2,7 @@ export {
   BEDROCK_MODELS,
   BEDROCK_MODEL_MAX_TOKENS,
   Bedrock,
+  INFERENCE_BEDROCK_MODELS,
+  INFERENCE_TO_BEDROCK_MAP,
 } from "./llm/bedrock/index.js";
 export { AmazonKnowledgeBaseRetriever } from "./retrievers/bedrock.js";
diff --git a/packages/community/src/llm/bedrock/anthropic/utils.ts b/packages/community/src/llm/bedrock/anthropic/utils.ts
index 372443694662590c016228eaadbadfe620927943..ac841e543c7a805dddf5bdb5f8e6fe9d3f7a1eb7 100644
--- a/packages/community/src/llm/bedrock/anthropic/utils.ts
+++ b/packages/community/src/llm/bedrock/anthropic/utils.ts
@@ -6,7 +6,10 @@ import type {
   MessageContentDetail,
   ToolCallLLMMessageOptions,
 } from "@llamaindex/core/llms";
-import { mapMessageContentToMessageContentDetails } from "../utils";
+import {
+  extractDataUrlComponents,
+  mapMessageContentToMessageContentDetails,
+} from "../utils";
 import type {
   AnthropicContent,
   AnthropicImageContent,
@@ -143,27 +146,6 @@ export const mapTextContent = (text: string): AnthropicTextContent => {
   return { type: "text", text };
 };
 
-export const extractDataUrlComponents = (
-  dataUrl: string,
-): {
-  mimeType: string;
-  base64: string;
-} => {
-  const parts = dataUrl.split(";base64,");
-
-  if (parts.length !== 2 || !parts[0]!.startsWith("data:")) {
-    throw new Error("Invalid data URL");
-  }
-
-  const mimeType = parts[0]!.slice(5);
-  const base64 = parts[1]!;
-
-  return {
-    mimeType,
-    base64,
-  };
-};
-
 export const mapImageContent = (imageUrl: string): AnthropicImageContent => {
   if (!imageUrl.startsWith("data:"))
     throw new Error(
diff --git a/packages/community/src/llm/bedrock/index.ts b/packages/community/src/llm/bedrock/index.ts
index 981c0c9e3dc6676fea50be2552444d10b34fe8bd..9e4bc9141a532235d422285dc3a3b4ce363a0e8e 100644
--- a/packages/community/src/llm/bedrock/index.ts
+++ b/packages/community/src/llm/bedrock/index.ts
@@ -47,35 +47,96 @@ export type BedrockChatParamsNonStreaming = LLMChatParamsNonStreaming<
 export type BedrockChatNonStreamResponse =
   ChatResponse<ToolCallLLMMessageOptions>;
 
-export enum BEDROCK_MODELS {
-  AMAZON_TITAN_TG1_LARGE = "amazon.titan-tg1-large",
-  AMAZON_TITAN_TEXT_EXPRESS_V1 = "amazon.titan-text-express-v1",
-  AI21_J2_GRANDE_INSTRUCT = "ai21.j2-grande-instruct",
-  AI21_J2_JUMBO_INSTRUCT = "ai21.j2-jumbo-instruct",
-  AI21_J2_MID = "ai21.j2-mid",
-  AI21_J2_MID_V1 = "ai21.j2-mid-v1",
-  AI21_J2_ULTRA = "ai21.j2-ultra",
-  AI21_J2_ULTRA_V1 = "ai21.j2-ultra-v1",
-  COHERE_COMMAND_TEXT_V14 = "cohere.command-text-v14",
-  ANTHROPIC_CLAUDE_INSTANT_1 = "anthropic.claude-instant-v1",
-  ANTHROPIC_CLAUDE_1 = "anthropic.claude-v1", // EOF: No longer supported
-  ANTHROPIC_CLAUDE_2 = "anthropic.claude-v2",
-  ANTHROPIC_CLAUDE_2_1 = "anthropic.claude-v2:1",
-  ANTHROPIC_CLAUDE_3_SONNET = "anthropic.claude-3-sonnet-20240229-v1:0",
-  ANTHROPIC_CLAUDE_3_HAIKU = "anthropic.claude-3-haiku-20240307-v1:0",
-  ANTHROPIC_CLAUDE_3_OPUS = "anthropic.claude-3-opus-20240229-v1:0",
-  ANTHROPIC_CLAUDE_3_5_SONNET = "anthropic.claude-3-5-sonnet-20240620-v1:0",
-  META_LLAMA2_13B_CHAT = "meta.llama2-13b-chat-v1",
-  META_LLAMA2_70B_CHAT = "meta.llama2-70b-chat-v1",
-  META_LLAMA3_8B_INSTRUCT = "meta.llama3-8b-instruct-v1:0",
-  META_LLAMA3_70B_INSTRUCT = "meta.llama3-70b-instruct-v1:0",
-  META_LLAMA3_1_8B_INSTRUCT = "meta.llama3-1-8b-instruct-v1:0",
-  META_LLAMA3_1_70B_INSTRUCT = "meta.llama3-1-70b-instruct-v1:0",
-  META_LLAMA3_1_405B_INSTRUCT = "meta.llama3-1-405b-instruct-v1:0",
-  MISTRAL_7B_INSTRUCT = "mistral.mistral-7b-instruct-v0:2",
-  MISTRAL_MIXTRAL_7B_INSTRUCT = "mistral.mixtral-8x7b-instruct-v0:1",
-  MISTRAL_MIXTRAL_LARGE_2402 = "mistral.mistral-large-2402-v1:0",
-}
+export const BEDROCK_MODELS = {
+  AMAZON_TITAN_TG1_LARGE: "amazon.titan-tg1-large",
+  AMAZON_TITAN_TEXT_EXPRESS_V1: "amazon.titan-text-express-v1",
+  AI21_J2_GRANDE_INSTRUCT: "ai21.j2-grande-instruct",
+  AI21_J2_JUMBO_INSTRUCT: "ai21.j2-jumbo-instruct",
+  AI21_J2_MID: "ai21.j2-mid",
+  AI21_J2_MID_V1: "ai21.j2-mid-v1",
+  AI21_J2_ULTRA: "ai21.j2-ultra",
+  AI21_J2_ULTRA_V1: "ai21.j2-ultra-v1",
+  COHERE_COMMAND_TEXT_V14: "cohere.command-text-v14",
+  ANTHROPIC_CLAUDE_INSTANT_1: "anthropic.claude-instant-v1",
+  ANTHROPIC_CLAUDE_1: "anthropic.claude-v1", // EOF: No longer supported
+  ANTHROPIC_CLAUDE_2: "anthropic.claude-v2",
+  ANTHROPIC_CLAUDE_2_1: "anthropic.claude-v2:1",
+  ANTHROPIC_CLAUDE_3_SONNET: "anthropic.claude-3-sonnet-20240229-v1:0",
+  ANTHROPIC_CLAUDE_3_HAIKU: "anthropic.claude-3-haiku-20240307-v1:0",
+  ANTHROPIC_CLAUDE_3_OPUS: "anthropic.claude-3-opus-20240229-v1:0",
+  ANTHROPIC_CLAUDE_3_5_SONNET: "anthropic.claude-3-5-sonnet-20240620-v1:0",
+  META_LLAMA2_13B_CHAT: "meta.llama2-13b-chat-v1",
+  META_LLAMA2_70B_CHAT: "meta.llama2-70b-chat-v1",
+  META_LLAMA3_8B_INSTRUCT: "meta.llama3-8b-instruct-v1:0",
+  META_LLAMA3_70B_INSTRUCT: "meta.llama3-70b-instruct-v1:0",
+  META_LLAMA3_1_8B_INSTRUCT: "meta.llama3-1-8b-instruct-v1:0",
+  META_LLAMA3_1_70B_INSTRUCT: "meta.llama3-1-70b-instruct-v1:0",
+  META_LLAMA3_1_405B_INSTRUCT: "meta.llama3-1-405b-instruct-v1:0",
+  META_LLAMA3_2_1B_INSTRUCT: "meta.llama3-2-1b-instruct-v1:0",
+  META_LLAMA3_2_3B_INSTRUCT: "meta.llama3-2-3b-instruct-v1:0",
+  META_LLAMA3_2_11B_INSTRUCT: "meta.llama3-2-11b-instruct-v1:0",
+  META_LLAMA3_2_90B_INSTRUCT: "meta.llama3-2-90b-instruct-v1:0",
+  MISTRAL_7B_INSTRUCT: "mistral.mistral-7b-instruct-v0:2",
+  MISTRAL_MIXTRAL_7B_INSTRUCT: "mistral.mixtral-8x7b-instruct-v0:1",
+  MISTRAL_MIXTRAL_LARGE_2402: "mistral.mistral-large-2402-v1:0",
+};
+export type BEDROCK_MODELS =
+  (typeof BEDROCK_MODELS)[keyof typeof BEDROCK_MODELS];
+
+export const INFERENCE_BEDROCK_MODELS = {
+  US_ANTHROPIC_CLAUDE_3_HAIKU: "us.anthropic.claude-3-haiku-20240307-v1:0",
+  US_ANTHROPIC_CLAUDE_3_OPUS: "us.anthropic.claude-3-opus-20240229-v1:0",
+  US_ANTHROPIC_CLAUDE_3_SONNET: "us.anthropic.claude-3-sonnet-20240229-v1:0",
+  US_ANTHROPIC_CLAUDE_3_5_SONNET:
+    "us.anthropic.claude-3-5-sonnet-20240620-v1:0",
+  US_META_LLAMA_3_2_1B_INSTRUCT: "us.meta.llama3-2-1b-instruct-v1:0",
+  US_META_LLAMA_3_2_3B_INSTRUCT: "us.meta.llama3-2-3b-instruct-v1:0",
+  US_META_LLAMA_3_2_11B_INSTRUCT: "us.meta.llama3-2-11b-instruct-v1:0",
+  US_META_LLAMA_3_2_90B_INSTRUCT: "us.meta.llama3-2-90b-instruct-v1:0",
+
+  EU_ANTHROPIC_CLAUDE_3_HAIKU: "eu.anthropic.claude-3-haiku-20240307-v1:0",
+  EU_ANTHROPIC_CLAUDE_3_SONNET: "eu.anthropic.claude-3-sonnet-20240229-v1:0",
+  EU_ANTHROPIC_CLAUDE_3_5_SONNET:
+    "eu.anthropic.claude-3-5-sonnet-20240620-v1:0",
+  EU_META_LLAMA_3_2_1B_INSTRUCT: "eu.meta.llama3-2-1b-instruct-v1:0",
+  EU_META_LLAMA_3_2_3B_INSTRUCT: "eu.meta.llama3-2-3b-instruct-v1:0",
+};
+
+export type INFERENCE_BEDROCK_MODELS =
+  (typeof INFERENCE_BEDROCK_MODELS)[keyof typeof INFERENCE_BEDROCK_MODELS];
+
+export const INFERENCE_TO_BEDROCK_MAP: Record<
+  INFERENCE_BEDROCK_MODELS,
+  BEDROCK_MODELS
+> = {
+  [INFERENCE_BEDROCK_MODELS.US_ANTHROPIC_CLAUDE_3_HAIKU]:
+    BEDROCK_MODELS.ANTHROPIC_CLAUDE_3_HAIKU,
+  [INFERENCE_BEDROCK_MODELS.US_ANTHROPIC_CLAUDE_3_OPUS]:
+    BEDROCK_MODELS.ANTHROPIC_CLAUDE_3_OPUS,
+  [INFERENCE_BEDROCK_MODELS.US_ANTHROPIC_CLAUDE_3_SONNET]:
+    BEDROCK_MODELS.ANTHROPIC_CLAUDE_3_SONNET,
+  [INFERENCE_BEDROCK_MODELS.US_ANTHROPIC_CLAUDE_3_5_SONNET]:
+    BEDROCK_MODELS.ANTHROPIC_CLAUDE_3_5_SONNET,
+  [INFERENCE_BEDROCK_MODELS.US_META_LLAMA_3_2_1B_INSTRUCT]:
+    BEDROCK_MODELS.META_LLAMA3_2_1B_INSTRUCT,
+  [INFERENCE_BEDROCK_MODELS.US_META_LLAMA_3_2_3B_INSTRUCT]:
+    BEDROCK_MODELS.META_LLAMA3_2_3B_INSTRUCT,
+  [INFERENCE_BEDROCK_MODELS.US_META_LLAMA_3_2_11B_INSTRUCT]:
+    BEDROCK_MODELS.META_LLAMA3_2_11B_INSTRUCT,
+  [INFERENCE_BEDROCK_MODELS.US_META_LLAMA_3_2_90B_INSTRUCT]:
+    BEDROCK_MODELS.META_LLAMA3_2_90B_INSTRUCT,
+
+  [INFERENCE_BEDROCK_MODELS.EU_ANTHROPIC_CLAUDE_3_HAIKU]:
+    BEDROCK_MODELS.ANTHROPIC_CLAUDE_3_HAIKU,
+  [INFERENCE_BEDROCK_MODELS.EU_ANTHROPIC_CLAUDE_3_SONNET]:
+    BEDROCK_MODELS.ANTHROPIC_CLAUDE_3_SONNET,
+  [INFERENCE_BEDROCK_MODELS.EU_ANTHROPIC_CLAUDE_3_5_SONNET]:
+    BEDROCK_MODELS.ANTHROPIC_CLAUDE_3_5_SONNET,
+  [INFERENCE_BEDROCK_MODELS.EU_META_LLAMA_3_2_1B_INSTRUCT]:
+    BEDROCK_MODELS.META_LLAMA3_2_1B_INSTRUCT,
+  [INFERENCE_BEDROCK_MODELS.EU_META_LLAMA_3_2_3B_INSTRUCT]:
+    BEDROCK_MODELS.META_LLAMA3_2_3B_INSTRUCT,
+};
 
 /*
  * Values taken from https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html#model-parameters-claude
@@ -109,6 +170,10 @@ const CHAT_ONLY_MODELS = {
   [BEDROCK_MODELS.META_LLAMA3_1_8B_INSTRUCT]: 128000,
   [BEDROCK_MODELS.META_LLAMA3_1_70B_INSTRUCT]: 128000,
   [BEDROCK_MODELS.META_LLAMA3_1_405B_INSTRUCT]: 128000,
+  [BEDROCK_MODELS.META_LLAMA3_2_1B_INSTRUCT]: 131000,
+  [BEDROCK_MODELS.META_LLAMA3_2_3B_INSTRUCT]: 131000,
+  [BEDROCK_MODELS.META_LLAMA3_2_11B_INSTRUCT]: 128000,
+  [BEDROCK_MODELS.META_LLAMA3_2_90B_INSTRUCT]: 128000,
   [BEDROCK_MODELS.MISTRAL_7B_INSTRUCT]: 32000,
   [BEDROCK_MODELS.MISTRAL_MIXTRAL_7B_INSTRUCT]: 32000,
   [BEDROCK_MODELS.MISTRAL_MIXTRAL_LARGE_2402]: 32000,
@@ -139,17 +204,25 @@ export const STREAMING_MODELS = new Set([
   BEDROCK_MODELS.META_LLAMA3_1_8B_INSTRUCT,
   BEDROCK_MODELS.META_LLAMA3_1_70B_INSTRUCT,
   BEDROCK_MODELS.META_LLAMA3_1_405B_INSTRUCT,
+  BEDROCK_MODELS.META_LLAMA3_2_1B_INSTRUCT,
+  BEDROCK_MODELS.META_LLAMA3_2_3B_INSTRUCT,
+  BEDROCK_MODELS.META_LLAMA3_2_11B_INSTRUCT,
+  BEDROCK_MODELS.META_LLAMA3_2_90B_INSTRUCT,
   BEDROCK_MODELS.MISTRAL_7B_INSTRUCT,
   BEDROCK_MODELS.MISTRAL_MIXTRAL_7B_INSTRUCT,
   BEDROCK_MODELS.MISTRAL_MIXTRAL_LARGE_2402,
 ]);
 
-export const TOOL_CALL_MODELS = [
+export const TOOL_CALL_MODELS: BEDROCK_MODELS[] = [
   BEDROCK_MODELS.ANTHROPIC_CLAUDE_3_SONNET,
   BEDROCK_MODELS.ANTHROPIC_CLAUDE_3_HAIKU,
   BEDROCK_MODELS.ANTHROPIC_CLAUDE_3_OPUS,
   BEDROCK_MODELS.ANTHROPIC_CLAUDE_3_5_SONNET,
   BEDROCK_MODELS.META_LLAMA3_1_405B_INSTRUCT,
+  BEDROCK_MODELS.META_LLAMA3_2_1B_INSTRUCT,
+  BEDROCK_MODELS.META_LLAMA3_2_3B_INSTRUCT,
+  BEDROCK_MODELS.META_LLAMA3_2_11B_INSTRUCT,
+  BEDROCK_MODELS.META_LLAMA3_2_90B_INSTRUCT,
 ];
 
 const getProvider = (model: string): Provider => {
@@ -166,7 +239,7 @@ const getProvider = (model: string): Provider => {
 };
 
 export type BedrockModelParams = {
-  model: keyof typeof BEDROCK_FOUNDATION_LLMS;
+  model: BEDROCK_MODELS | INFERENCE_BEDROCK_MODELS;
   temperature?: number;
   topP?: number;
   maxTokens?: number;
@@ -185,6 +258,10 @@ export const BEDROCK_MODEL_MAX_TOKENS: Partial<Record<BEDROCK_MODELS, number>> =
     [BEDROCK_MODELS.META_LLAMA3_1_8B_INSTRUCT]: 2048,
     [BEDROCK_MODELS.META_LLAMA3_1_70B_INSTRUCT]: 2048,
     [BEDROCK_MODELS.META_LLAMA3_1_405B_INSTRUCT]: 2048,
+    [BEDROCK_MODELS.META_LLAMA3_2_1B_INSTRUCT]: 2048,
+    [BEDROCK_MODELS.META_LLAMA3_2_3B_INSTRUCT]: 2048,
+    [BEDROCK_MODELS.META_LLAMA3_2_11B_INSTRUCT]: 2048,
+    [BEDROCK_MODELS.META_LLAMA3_2_90B_INSTRUCT]: 2048,
   };
 
 const DEFAULT_BEDROCK_PARAMS = {
@@ -193,14 +270,15 @@ const DEFAULT_BEDROCK_PARAMS = {
   maxTokens: 1024, // required by anthropic
 };
 
-export type BedrockParams = BedrockModelParams & BedrockRuntimeClientConfig;
+export type BedrockParams = BedrockRuntimeClientConfig & BedrockModelParams;
 
 /**
  * ToolCallLLM for Bedrock
  */
 export class Bedrock extends ToolCallLLM<BedrockAdditionalChatOptions> {
   private client: BedrockRuntimeClient;
-  model: keyof typeof BEDROCK_FOUNDATION_LLMS;
+  protected actualModel: BEDROCK_MODELS | INFERENCE_BEDROCK_MODELS;
+  model: BEDROCK_MODELS;
   temperature: number;
   topP: number;
   maxTokens?: number;
@@ -217,8 +295,8 @@ export class Bedrock extends ToolCallLLM<BedrockAdditionalChatOptions> {
     ...params
   }: BedrockParams) {
     super();
-
-    this.model = model;
+    this.actualModel = model;
+    this.model = INFERENCE_TO_BEDROCK_MAP[model] ?? model;
     this.provider = getProvider(this.model);
     this.maxTokens = maxTokens ?? DEFAULT_BEDROCK_PARAMS.maxTokens;
     this.temperature = temperature ?? DEFAULT_BEDROCK_PARAMS.temperature;
@@ -241,7 +319,7 @@ export class Bedrock extends ToolCallLLM<BedrockAdditionalChatOptions> {
       temperature: this.temperature,
       topP: this.topP,
       maxTokens: this.maxTokens,
-      contextWindow: BEDROCK_FOUNDATION_LLMS[this.model],
+      contextWindow: BEDROCK_FOUNDATION_LLMS[this.model] ?? 128000,
       tokenizer: undefined,
     };
   }
@@ -256,6 +334,8 @@ export class Bedrock extends ToolCallLLM<BedrockAdditionalChatOptions> {
       params.additionalChatOptions,
     );
     const command = new InvokeModelCommand(input);
+    command.input.modelId = this.actualModel;
+
     const response = await this.client.send(command);
     let options: ToolCallLLMMessageOptions = {};
     if (this.supportToolCall) {
@@ -287,6 +367,8 @@ export class Bedrock extends ToolCallLLM<BedrockAdditionalChatOptions> {
       params.additionalChatOptions,
     );
     const command = new InvokeModelWithResponseStreamCommand(input);
+    command.input.modelId = this.actualModel;
+
     const response = await this.client.send(command);
 
     if (response.body) yield* this.provider.reduceStream(response.body);
diff --git a/packages/community/src/llm/bedrock/meta/provider.ts b/packages/community/src/llm/bedrock/meta/provider.ts
index fb1875a6c0793a4a3ecafc13660831947adbb3b5..6e79a775c2b6b2d1035e7be668132f2bf2de42e9 100644
--- a/packages/community/src/llm/bedrock/meta/provider.ts
+++ b/packages/community/src/llm/bedrock/meta/provider.ts
@@ -67,21 +67,26 @@ export class MetaProvider extends Provider<MetaStreamEvent> {
     for await (const response of stream) {
       const event = this.getStreamingEventResponse(response);
       const delta = this.getTextFromStreamResponse(response);
+
       // odd quirk of llama3.1, start token is \n\n
       if (
+        !toolId &&
         !event?.generation.trim() &&
         event?.generation_token_count === 1 &&
-        event.prompt_token_count !== null
+        event?.prompt_token_count !== null
       )
         continue;
 
-      if (delta === TOKENS.TOOL_CALL) {
+      if (delta.startsWith(TOKENS.TOOL_CALL)) {
         toolId = randomUUID();
+        const parts = delta.split(TOKENS.TOOL_CALL).filter((part) => part);
+        collecting.push(...parts);
         continue;
       }
 
       let options: undefined | ToolCallLLMMessageOptions = undefined;
       if (toolId && event?.stop_reason === "stop") {
+        if (delta) collecting.push(delta);
         const tool = JSON.parse(collecting.join(""));
         options = {
           toolCall: [
@@ -110,11 +115,18 @@ export class MetaProvider extends Provider<MetaStreamEvent> {
   getRequestBody<T extends ChatMessage>(
     metadata: LLMMetadata,
     messages: T[],
-    tools?: BaseTool[],
+    tools: BaseTool[] = [],
   ): InvokeModelCommandInput | InvokeModelWithResponseStreamCommandInput {
     let prompt: string = "";
+    let images: string[] = [];
     if (metadata.model.startsWith("meta.llama3")) {
-      prompt = mapChatMessagesToMetaLlama3Messages(messages, tools);
+      const mapped = mapChatMessagesToMetaLlama3Messages({
+        messages,
+        tools,
+        model: metadata.model,
+      });
+      prompt = mapped.prompt;
+      images = mapped.images;
     } else if (metadata.model.startsWith("meta.llama2")) {
       prompt = mapChatMessagesToMetaLlama2Messages(messages);
     } else {
@@ -127,6 +139,7 @@ export class MetaProvider extends Provider<MetaStreamEvent> {
       accept: "application/json",
       body: JSON.stringify({
         prompt,
+        images: images.length ? images : undefined,
         max_gen_len: metadata.maxTokens,
         temperature: metadata.temperature,
         top_p: metadata.topP,
diff --git a/packages/community/src/llm/bedrock/meta/utils.ts b/packages/community/src/llm/bedrock/meta/utils.ts
index fd8cb1e6991adfbc6736757be5990b08c8efe40c..058111d8ad639c6e7dfa6082d3ad7a6d43bd784b 100644
--- a/packages/community/src/llm/bedrock/meta/utils.ts
+++ b/packages/community/src/llm/bedrock/meta/utils.ts
@@ -1,9 +1,12 @@
 import type {
   BaseTool,
   ChatMessage,
+  LLMMetadata,
   MessageContentTextDetail,
   ToolCallLLMMessageOptions,
 } from "@llamaindex/core/llms";
+import { extractDataUrlComponents } from "../utils";
+import { TOKENS } from "./constants";
 import type { MetaMessage } from "./types";
 
 const getToolCallInstructionString = (tool: BaseTool): string => {
@@ -24,7 +27,7 @@ const getToolCallParametersString = (tool: BaseTool): string => {
 
 // ported from https://github.com/meta-llama/llama-agentic-system/blob/main/llama_agentic_system/system_prompt.py
 // NOTE: using json instead of the above xml style tool calling works more reliability
-export const getToolsPrompt = (tools?: BaseTool[]) => {
+export const getToolsPrompt_3_1 = (tools?: BaseTool[]) => {
   if (!tools?.length) return "";
 
   const customToolParams = tools.map((tool) => {
@@ -77,6 +80,46 @@ Reminder:
   `;
 };
 
+export const getToolsPrompt_3_2 = (tools?: BaseTool[]) => {
+  if (!tools?.length) return "";
+  return `
+You are an expert in composing functions. You are given a question and a set of possible functions.
+Based on the question, you will need to make one or more function/tool calls to achieve the purpose.
+If none of the function can be used, point it out. If the given question lacks the parameters required by the function,
+also point it out. You should only return the function call in tools call sections.
+
+If you decide to invoke any of the function(s), you MUST put it in the format of and start with the token: ${TOKENS.TOOL_CALL}:
+{
+  "name": function_name,
+  "parameters": parameters,
+}
+where
+
+{
+  "name": function_name,
+  "parameters": parameters, => a JSON dict with the function argument name as key and function argument value as value.
+}
+
+Here is an example,
+
+{
+  "name": "example_function_name",
+  "parameters": {"example_name": "example_value"}
+}
+
+Reminder:
+- Function calls MUST follow the specified format
+- Required parameters MUST be specified
+- Only call one function at a time
+- You SHOULD NOT include any other text in the response
+- Put the entire function call reply on one line
+
+Here is a list of functions in JSON format that you can invoke.
+
+${JSON.stringify(tools)}
+`;
+};
+
 export const mapChatRoleToMetaRole = (
   role: ChatMessage["role"],
 ): MetaMessage["role"] => {
@@ -125,16 +168,46 @@ export const mapChatMessagesToMetaMessages = <
 /**
  * Documentation at https://llama.meta.com/docs/model-cards-and-prompt-formats/meta-llama-3
  */
-export const mapChatMessagesToMetaLlama3Messages = <T extends ChatMessage>(
-  messages: T[],
-  tools?: BaseTool[],
-): string => {
+export const mapChatMessagesToMetaLlama3Messages = <T extends ChatMessage>({
+  messages,
+  model,
+  tools,
+}: {
+  messages: T[];
+  model: LLMMetadata["model"];
+  tools?: BaseTool[];
+}): { prompt: string; images: string[] } => {
+  const images: string[] = [];
+  const textMessages: T[] = [];
+
+  messages.forEach((message) => {
+    if (Array.isArray(message.content)) {
+      message.content.forEach((content) => {
+        if (content.type === "image_url") {
+          const { base64 } = extractDataUrlComponents(content.image_url.url);
+          images.push(base64);
+        } else {
+          textMessages.push(message);
+        }
+      });
+    } else {
+      textMessages.push(message);
+    }
+  });
+
   const parts: string[] = [];
-  if (tools?.length) {
+
+  let toolsPrompt = "";
+  if (model.startsWith("meta.llama3-2")) {
+    toolsPrompt = getToolsPrompt_3_2(tools);
+  } else if (model.startsWith("meta.llama3-1")) {
+    toolsPrompt = getToolsPrompt_3_1(tools);
+  }
+  if (toolsPrompt) {
     parts.push(
       "<|begin_of_text|>",
       "<|start_header_id|>system<|end_header_id|>",
-      getToolsPrompt(tools),
+      toolsPrompt,
       "<|eot_id|>",
     );
   }
@@ -154,7 +227,9 @@ export const mapChatMessagesToMetaLlama3Messages = <T extends ChatMessage>(
     ...mapped,
     "<|start_header_id|>assistant<|end_header_id|>",
   );
-  return parts.join("\n");
+
+  const prompt = parts.join("\n");
+  return { prompt, images };
 };
 
 /**
diff --git a/packages/community/src/llm/bedrock/utils.ts b/packages/community/src/llm/bedrock/utils.ts
index 964651882d55d4ae6ec6a4f18fc2a8d09c130930..9e90c14dd85ccd8ee4cb21b68bf5bf94e1cc8779 100644
--- a/packages/community/src/llm/bedrock/utils.ts
+++ b/packages/community/src/llm/bedrock/utils.ts
@@ -11,3 +11,24 @@ export const mapMessageContentToMessageContentDetails = (
 
 export const toUtf8 = (input: Uint8Array): string =>
   new TextDecoder("utf-8").decode(input);
+
+export const extractDataUrlComponents = (
+  dataUrl: string,
+): {
+  mimeType: string;
+  base64: string;
+} => {
+  const parts = dataUrl.split(";base64,");
+
+  if (parts.length !== 2 || !parts[0]!.startsWith("data:")) {
+    throw new Error("Invalid data URL");
+  }
+
+  const mimeType = parts[0]!.slice(5);
+  const base64 = parts[1]!;
+
+  return {
+    mimeType,
+    base64,
+  };
+};