diff --git a/.changeset/itchy-dots-warn.md b/.changeset/itchy-dots-warn.md
new file mode 100644
index 0000000000000000000000000000000000000000..3dfab5b749a6f9bd63563cdce2ff350ff0aa5eed
--- /dev/null
+++ b/.changeset/itchy-dots-warn.md
@@ -0,0 +1,5 @@
+---
+"@llamaindex/openai": patch
+---
+
+Add reasoning_effort for o1 and o3 models
diff --git a/packages/providers/openai/src/llm.ts b/packages/providers/openai/src/llm.ts
index 926d478d185f0e2dee37ec5a7db0586e2ac356d4..081edde960b45695327911c29942b75de93dd4aa 100644
--- a/packages/providers/openai/src/llm.ts
+++ b/packages/providers/openai/src/llm.ts
@@ -140,6 +140,12 @@ export function isFunctionCallingModel(llm: LLM): llm is OpenAI {
   return isChatModel && !isOld && !isO1;
 }
 
+export function isReasoningModel(model: ChatModel | string): boolean {
+  const isO1 = model.startsWith("o1");
+  const isO3 = model.startsWith("o3");
+  return isO1 || isO3;
+}
+
 export function isTemperatureSupported(model: ChatModel | string): boolean {
   return !model.startsWith("o3");
 }
@@ -152,6 +158,7 @@ export type OpenAIAdditionalChatOptions = Omit<
   | "messages"
   | "model"
   | "temperature"
+  | "reasoning_effort"
   | "top_p"
   | "stream"
   | "tools"
@@ -166,6 +173,7 @@ export class OpenAI extends ToolCallLLM<OpenAIAdditionalChatOptions> {
     // string & {} is a hack to allow any string, but still give autocomplete
     | (string & {});
   temperature: number;
+  reasoningEffort?: "low" | "medium" | "high" | undefined;
   topP: number;
   maxTokens?: number | undefined;
   additionalChatOptions?: OpenAIAdditionalChatOptions | undefined;
@@ -197,6 +205,9 @@ export class OpenAI extends ToolCallLLM<OpenAIAdditionalChatOptions> {
     super();
     this.model = init?.model ?? "gpt-4o";
     this.temperature = init?.temperature ?? 0.1;
+    this.reasoningEffort = isReasoningModel(this.model)
+      ? init?.reasoningEffort
+      : undefined;
     this.topP = init?.topP ?? 1;
     this.maxTokens = init?.maxTokens ?? undefined;
 
@@ -354,6 +365,7 @@ export class OpenAI extends ToolCallLLM<OpenAIAdditionalChatOptions> {
     const baseRequestParams = <OpenAILLM.Chat.ChatCompletionCreateParams>{
       model: this.model,
       temperature: isTemperatureSupported(this.model) ? this.temperature : null,
+      reasoning_effort: this.reasoningEffort,
       max_tokens: this.maxTokens,
       tools: tools?.map(OpenAI.toTool),
       messages: OpenAI.toOpenAIMessage(messages),