diff --git a/.changeset/fast-mayflies-grab.md b/.changeset/fast-mayflies-grab.md
new file mode 100644
index 0000000000000000000000000000000000000000..de9387dc436f3668f0ba80c931e3b5d1f215453c
--- /dev/null
+++ b/.changeset/fast-mayflies-grab.md
@@ -0,0 +1,5 @@
+---
+"llamaindex": patch
+---
+
+gpt-4-turbo GA
diff --git a/examples/agent/wiki.ts b/examples/agent/wiki.ts
index 65c00a3fe540e6f5f1eee257d1e0b271f71b9583..de2e1f90d1e89451552106ffd09d766b1380462b 100644
--- a/examples/agent/wiki.ts
+++ b/examples/agent/wiki.ts
@@ -1,7 +1,7 @@
 import { OpenAI, OpenAIAgent, WikipediaTool } from "llamaindex";
 
 async function main() {
-  const llm = new OpenAI({ model: "gpt-4-turbo-preview" });
+  const llm = new OpenAI({ model: "gpt-4-turbo" });
   const wikiTool = new WikipediaTool();
 
   // Create an OpenAIAgent with the Wikipedia tool
diff --git a/examples/multimodal/rag.ts b/examples/multimodal/rag.ts
index f7d945c804936e7870386b7d68fabf7107ec96c0..0213de3cc5f1cbdc072d5531c3d209dbfd72d350 100644
--- a/examples/multimodal/rag.ts
+++ b/examples/multimodal/rag.ts
@@ -13,7 +13,7 @@ Settings.chunkSize = 512;
 Settings.chunkOverlap = 20;
 
 // Update llm
-Settings.llm = new OpenAI({ model: "gpt-4-vision-preview", maxTokens: 512 });
+Settings.llm = new OpenAI({ model: "gpt-4-turbo", maxTokens: 512 });
 
 // Update callbackManager
 Settings.callbackManager = new CallbackManager({
diff --git a/examples/toolsStream.ts b/examples/toolsStream.ts
index 10e8400df776f17f13f8b2d670af885accb47bc1..f52d9049ac978527bfcddf34ab147018a228e935 100644
--- a/examples/toolsStream.ts
+++ b/examples/toolsStream.ts
@@ -1,7 +1,7 @@
 import { ChatResponseChunk, OpenAI } from "llamaindex";
 
 async function main() {
-  const llm = new OpenAI({ model: "gpt-4-turbo-preview" });
+  const llm = new OpenAI({ model: "gpt-4-turbo" });
 
   const args: Parameters<typeof llm.chat>[0] = {
     additionalChatOptions: {
diff --git a/examples/vision.ts b/examples/vision.ts
index 2cf841517ded5655a3ce89cb4cdc401c78d33cdf..6ee5d33d5b92a66eccc5a1e570efcd9317b9ab73 100644
--- a/examples/vision.ts
+++ b/examples/vision.ts
@@ -1,7 +1,7 @@
 import { OpenAI } from "llamaindex";
 
 (async () => {
-  const llm = new OpenAI({ model: "gpt-4-vision-preview", temperature: 0.1 });
+  const llm = new OpenAI({ model: "gpt-4-turbo", temperature: 0.1 });
 
   // complete api
   const response1 = await llm.complete({ prompt: "How are you?" });
diff --git a/packages/core/src/llm/azure.ts b/packages/core/src/llm/azure.ts
index 92d77228065fb85a89bae9b4e668d331a7db9ba0..81caf19a05b1ec6799d2b9890ab10a2e209e5d50 100644
--- a/packages/core/src/llm/azure.ts
+++ b/packages/core/src/llm/azure.ts
@@ -19,6 +19,10 @@ const ALL_AZURE_OPENAI_CHAT_MODELS = {
   },
   "gpt-4": { contextWindow: 8192, openAIModel: "gpt-4" },
   "gpt-4-32k": { contextWindow: 32768, openAIModel: "gpt-4-32k" },
+  "gpt-4-turbo": {
+    contextWindow: 128000,
+    openAIModel: "gpt-4-turbo",
+  },
   "gpt-4-vision-preview": {
     contextWindow: 128000,
     openAIModel: "gpt-4-vision-preview",
diff --git a/packages/core/src/llm/open_ai.ts b/packages/core/src/llm/open_ai.ts
index ffc5a176b55f32a9c98ed18f4aeaff6a283b01e9..00426c2215e5d537db802d8c1b807df74eae5267 100644
--- a/packages/core/src/llm/open_ai.ts
+++ b/packages/core/src/llm/open_ai.ts
@@ -96,6 +96,7 @@ export const GPT4_MODELS = {
   "gpt-4": { contextWindow: 8192 },
   "gpt-4-32k": { contextWindow: 32768 },
   "gpt-4-32k-0613": { contextWindow: 32768 },
+  "gpt-4-turbo": { contextWindow: 128000 },
   "gpt-4-turbo-preview": { contextWindow: 128000 },
   "gpt-4-1106-preview": { contextWindow: 128000 },
   "gpt-4-0125-preview": { contextWindow: 128000 },