diff --git a/.changeset/tame-apples-help.md b/.changeset/tame-apples-help.md
new file mode 100644
index 0000000000000000000000000000000000000000..77296fe3fa76046fd9c93424912109d87ae62ba4
--- /dev/null
+++ b/.changeset/tame-apples-help.md
@@ -0,0 +1,7 @@
+---
+"llamaindex": minor
+"@llamaindex/cloudflare-hono": patch
+"@llamaindex/examples": patch
+---
+
+Remove deprecated ServiceContext
diff --git a/e2e/examples/cloudflare-hono/src/index.ts b/e2e/examples/cloudflare-hono/src/index.ts
index 758e623089c328c96e3781393897541cfb7a8f08..b9ca5e969db5966617386db6b71cef48cc9c59da 100644
--- a/e2e/examples/cloudflare-hono/src/index.ts
+++ b/e2e/examples/cloudflare-hono/src/index.ts
@@ -20,9 +20,9 @@ app.post("/llm", async (c) => {
   const {
     extractText,
     QueryEngineTool,
-    serviceContextFromDefaults,
     VectorStoreIndex,
     Settings,
+    SentenceSplitter,
   } = await import("llamaindex");
 
   const { OpenAIAgent, OpenAI, OpenAIEmbedding } = await import(
@@ -31,7 +31,7 @@ app.post("/llm", async (c) => {
 
   const { PineconeVectorStore } = await import("@llamaindex/pinecone");
 
-  const llm = new OpenAI({
+  Settings.llm = new OpenAI({
     model: "gpt-4o-mini",
     apiKey: c.env.OPENAI_API_KEY,
   });
@@ -41,8 +41,7 @@ app.post("/llm", async (c) => {
     apiKey: c.env.OPENAI_API_KEY,
   });
 
-  const serviceContext = serviceContextFromDefaults({
-    llm,
+  Settings.nodeParser = new SentenceSplitter({
     chunkSize: 8191,
     chunkOverlap: 0,
   });
@@ -51,7 +50,7 @@ app.post("/llm", async (c) => {
     namespace: "8xolsn4ulEQGdhnhP76yCzfLHdOZ",
   });
 
-  const index = await VectorStoreIndex.fromVectorStore(store, serviceContext);
+  const index = await VectorStoreIndex.fromVectorStore(store);
 
   const retriever = index.asRetriever({
     similarityTopK: 3,
diff --git a/examples/astradb/query.ts b/examples/astradb/query.ts
index 412e339a6575c2255448699ef29abca8bc2dcc2f..b339ad4f98a07f0b5e5c042d793879caf8c4b411 100644
--- a/examples/astradb/query.ts
+++ b/examples/astradb/query.ts
@@ -1,5 +1,5 @@
 import { AstraDBVectorStore } from "@llamaindex/astra";
-import { VectorStoreIndex, serviceContextFromDefaults } from "llamaindex";
+import { VectorStoreIndex } from "llamaindex";
 
 const collectionName = "movie_reviews";
 
@@ -8,8 +8,7 @@ async function main() {
     const astraVS = new AstraDBVectorStore({ contentKey: "reviewtext" });
     await astraVS.connect(collectionName);
 
-    const ctx = serviceContextFromDefaults();
-    const index = await VectorStoreIndex.fromVectorStore(astraVS, ctx);
+    const index = await VectorStoreIndex.fromVectorStore(astraVS);
 
     const retriever = await index.asRetriever({ similarityTopK: 20 });
 
diff --git a/examples/mongo.ts b/examples/mongo.ts
index 4bb8aa59418688beaf76bf52953e341219fca7a1..e9ad675b46d18391bc2f829b4a0b3ffb6be7fb31 100644
--- a/examples/mongo.ts
+++ b/examples/mongo.ts
@@ -30,13 +30,12 @@ async function main() {
 
   // Split text and create embeddings. Store them in a VectorStoreIndex
   // var storageContext = await storageContextFromDefaults({});
-  // var serviceContext = serviceContextFromDefaults({});
   // const docStore = storageContext.docStore;
 
   // for (const doc of documents) {
   //   docStore.setDocumentHash(doc.id_, doc.hash);
   // }
-  // const nodes = serviceContext.nodeParser.getNodesFromDocuments(documents);
+  // const nodes = Settings.nodeParser.getNodesFromDocuments(documents);
   // console.log(nodes);
 
   //
diff --git a/packages/llamaindex/src/ServiceContext.ts b/packages/llamaindex/src/ServiceContext.ts
deleted file mode 100644
index 9500ce44371d58e8780f9e44af4584f13b5e0eb3..0000000000000000000000000000000000000000
--- a/packages/llamaindex/src/ServiceContext.ts
+++ /dev/null
@@ -1,67 +0,0 @@
-import type { BaseEmbedding } from "@llamaindex/core/embeddings";
-import { PromptHelper } from "@llamaindex/core/indices";
-import type { LLM } from "@llamaindex/core/llms";
-import {
-  type NodeParser,
-  SentenceSplitter,
-} from "@llamaindex/core/node-parser";
-import { OpenAI, OpenAIEmbedding } from "@llamaindex/openai";
-
-/**
- * The ServiceContext is a collection of components that are used in different parts of the application.
- *
- * @deprecated This will no longer supported, please use `Settings` instead.
- */
-export interface ServiceContext {
-  llm: LLM;
-  promptHelper: PromptHelper;
-  embedModel: BaseEmbedding;
-  nodeParser: NodeParser;
-  // llamaLogger: any;
-}
-
-export interface ServiceContextOptions {
-  llm?: LLM;
-  promptHelper?: PromptHelper;
-  embedModel?: BaseEmbedding;
-  nodeParser?: NodeParser;
-  // NodeParser arguments
-  chunkSize?: number;
-  chunkOverlap?: number;
-}
-
-export function serviceContextFromDefaults(options?: ServiceContextOptions) {
-  const serviceContext: ServiceContext = {
-    llm: options?.llm ?? new OpenAI(),
-    embedModel: options?.embedModel ?? new OpenAIEmbedding(),
-    nodeParser:
-      options?.nodeParser ??
-      new SentenceSplitter({
-        chunkSize: options?.chunkSize,
-        chunkOverlap: options?.chunkOverlap,
-      }),
-    promptHelper: options?.promptHelper ?? new PromptHelper(),
-  };
-
-  return serviceContext;
-}
-
-export function serviceContextFromServiceContext(
-  serviceContext: ServiceContext,
-  options: ServiceContextOptions,
-) {
-  const newServiceContext = { ...serviceContext };
-  if (options.llm) {
-    newServiceContext.llm = options.llm;
-  }
-  if (options.promptHelper) {
-    newServiceContext.promptHelper = options.promptHelper;
-  }
-  if (options.embedModel) {
-    newServiceContext.embedModel = options.embedModel;
-  }
-  if (options.nodeParser) {
-    newServiceContext.nodeParser = options.nodeParser;
-  }
-  return newServiceContext;
-}
diff --git a/packages/llamaindex/src/Settings.ts b/packages/llamaindex/src/Settings.ts
index 89676036be659a7306d8d5d1374c46232703b879..82a1098dbaba50291563e57c52812e34c40a31b2 100644
--- a/packages/llamaindex/src/Settings.ts
+++ b/packages/llamaindex/src/Settings.ts
@@ -12,7 +12,6 @@ import {
   SentenceSplitter,
 } from "@llamaindex/core/node-parser";
 import { AsyncLocalStorage } from "@llamaindex/env";
-import type { ServiceContext } from "./ServiceContext.js";
 
 export type PromptConfig = {
   llm?: string;
@@ -163,42 +162,4 @@ class GlobalSettings implements Config {
   }
 }
 
-export const llmFromSettingsOrContext = (serviceContext?: ServiceContext) => {
-  if (serviceContext?.llm) {
-    return serviceContext.llm;
-  }
-
-  return Settings.llm;
-};
-
-export const nodeParserFromSettingsOrContext = (
-  serviceContext?: ServiceContext,
-) => {
-  if (serviceContext?.nodeParser) {
-    return serviceContext.nodeParser;
-  }
-
-  return Settings.nodeParser;
-};
-
-export const embedModelFromSettingsOrContext = (
-  serviceContext?: ServiceContext,
-) => {
-  if (serviceContext?.embedModel) {
-    return serviceContext.embedModel;
-  }
-
-  return Settings.embedModel;
-};
-
-export const promptHelperFromSettingsOrContext = (
-  serviceContext?: ServiceContext,
-) => {
-  if (serviceContext?.promptHelper) {
-    return serviceContext.promptHelper;
-  }
-
-  return Settings.promptHelper;
-};
-
 export const Settings = new GlobalSettings();
diff --git a/packages/llamaindex/src/cloud/type.ts b/packages/llamaindex/src/cloud/type.ts
index cee7275cca1cee2de26d4f23be11efbbbe1d6445..804b70fdd53dbdf9b3d6cee9b8605e727a9c847d 100644
--- a/packages/llamaindex/src/cloud/type.ts
+++ b/packages/llamaindex/src/cloud/type.ts
@@ -1,5 +1,3 @@
-import type { ServiceContext } from "../ServiceContext.js";
-
 export type ClientParams = {
   apiKey?: string | undefined;
   baseUrl?: string | undefined;
@@ -9,5 +7,4 @@ export type CloudConstructorParams = {
   name: string;
   projectName: string;
   organizationId?: string | undefined;
-  serviceContext?: ServiceContext | undefined;
 } & ClientParams;
diff --git a/packages/llamaindex/src/engines/chat/CondenseQuestionChatEngine.ts b/packages/llamaindex/src/engines/chat/CondenseQuestionChatEngine.ts
index 1bf8d14d386f77d291f2fb24ef666f3fc7ba71fe..4f659b681dd83e18e5183c9c2ea5f09825b3dc38 100644
--- a/packages/llamaindex/src/engines/chat/CondenseQuestionChatEngine.ts
+++ b/packages/llamaindex/src/engines/chat/CondenseQuestionChatEngine.ts
@@ -18,8 +18,7 @@ import {
   messagesToHistory,
   streamReducer,
 } from "@llamaindex/core/utils";
-import type { ServiceContext } from "../../ServiceContext.js";
-import { llmFromSettingsOrContext } from "../../Settings.js";
+import { Settings } from "../../Settings.js";
 
 /**
  * CondenseQuestionChatEngine is used in conjunction with a Index (for example VectorStoreIndex).
@@ -44,7 +43,6 @@ export class CondenseQuestionChatEngine extends BaseChatEngine {
   constructor(init: {
     queryEngine: BaseQueryEngine;
     chatHistory: ChatMessage[];
-    serviceContext?: ServiceContext;
     condenseMessagePrompt?: CondenseQuestionPrompt;
   }) {
     super();
@@ -53,7 +51,7 @@ export class CondenseQuestionChatEngine extends BaseChatEngine {
     this.memory = new ChatMemoryBuffer({
       chatHistory: init?.chatHistory,
     });
-    this.llm = llmFromSettingsOrContext(init?.serviceContext);
+    this.llm = Settings.llm;
     this.condenseMessagePrompt =
       init?.condenseMessagePrompt ?? defaultCondenseQuestionPrompt;
   }
diff --git a/packages/llamaindex/src/engines/query/RouterQueryEngine.ts b/packages/llamaindex/src/engines/query/RouterQueryEngine.ts
index 8dbd00c3f60973e4314064df5312fed5465b6970..dc6e24a39044eb5d1ed7edee6d6f9c84fbc84d3e 100644
--- a/packages/llamaindex/src/engines/query/RouterQueryEngine.ts
+++ b/packages/llamaindex/src/engines/query/RouterQueryEngine.ts
@@ -9,10 +9,9 @@ import {
 } from "@llamaindex/core/response-synthesizers";
 import { EngineResponse, type NodeWithScore } from "@llamaindex/core/schema";
 import { extractText } from "@llamaindex/core/utils";
-import type { ServiceContext } from "../../ServiceContext.js";
-import { llmFromSettingsOrContext } from "../../Settings.js";
 import type { BaseSelector } from "../../selectors/index.js";
 import { LLMSingleSelector } from "../../selectors/index.js";
+import { Settings } from "../../Settings.js";
 
 type RouterQueryEngineTool = {
   queryEngine: BaseQueryEngine;
@@ -60,7 +59,6 @@ export class RouterQueryEngine extends BaseQueryEngine {
   constructor(init: {
     selector: BaseSelector;
     queryEngineTools: RouterQueryEngineTool[];
-    serviceContext?: ServiceContext | undefined;
     summarizer?: BaseSynthesizer | undefined;
     verbose?: boolean | undefined;
   }) {
@@ -106,20 +104,16 @@ export class RouterQueryEngine extends BaseQueryEngine {
   static fromDefaults(init: {
     queryEngineTools: RouterQueryEngineTool[];
     selector?: BaseSelector;
-    serviceContext?: ServiceContext;
     summarizer?: BaseSynthesizer;
     verbose?: boolean;
   }) {
-    const serviceContext = init.serviceContext;
-
     return new RouterQueryEngine({
       selector:
         init.selector ??
         new LLMSingleSelector({
-          llm: llmFromSettingsOrContext(serviceContext),
+          llm: Settings.llm,
         }),
       queryEngineTools: init.queryEngineTools,
-      serviceContext,
       summarizer: init.summarizer,
       verbose: init.verbose,
     });
diff --git a/packages/llamaindex/src/engines/query/SubQuestionQueryEngine.ts b/packages/llamaindex/src/engines/query/SubQuestionQueryEngine.ts
index 2c52090362f9929457c16f5b79bbdc8aa587c8bd..7b14637691032bd2e06dcf0171a4c162744ee8d4 100644
--- a/packages/llamaindex/src/engines/query/SubQuestionQueryEngine.ts
+++ b/packages/llamaindex/src/engines/query/SubQuestionQueryEngine.ts
@@ -2,7 +2,6 @@ import type { BaseSynthesizer } from "@llamaindex/core/response-synthesizers";
 import { getResponseSynthesizer } from "@llamaindex/core/response-synthesizers";
 import { TextNode, type NodeWithScore } from "@llamaindex/core/schema";
 import { LLMQuestionGenerator } from "../../QuestionGenerator.js";
-import type { ServiceContext } from "../../ServiceContext.js";
 
 import type { BaseTool, ToolMetadata } from "@llamaindex/core/llms";
 import type { PromptsRecord } from "@llamaindex/core/prompts";
@@ -93,7 +92,6 @@ export class SubQuestionQueryEngine extends BaseQueryEngine {
     queryEngineTools: BaseTool[];
     questionGen?: BaseQuestionGenerator;
     responseSynthesizer?: BaseSynthesizer;
-    serviceContext?: ServiceContext;
   }) {
     const questionGen = init.questionGen ?? new LLMQuestionGenerator();
     const responseSynthesizer =
diff --git a/packages/llamaindex/src/evaluation/Correctness.ts b/packages/llamaindex/src/evaluation/Correctness.ts
index 3bc3a5146a1f17f4e086eba5cf1800adc23253de..830cf26d2a9cda3ef2ec21d268c4eba249762401 100644
--- a/packages/llamaindex/src/evaluation/Correctness.ts
+++ b/packages/llamaindex/src/evaluation/Correctness.ts
@@ -2,8 +2,7 @@ import type { ChatMessage, LLM } from "@llamaindex/core/llms";
 import { PromptMixin } from "@llamaindex/core/prompts";
 import { MetadataMode } from "@llamaindex/core/schema";
 import { extractText } from "@llamaindex/core/utils";
-import type { ServiceContext } from "../ServiceContext.js";
-import { llmFromSettingsOrContext } from "../Settings.js";
+import { Settings } from "../Settings.js";
 import type { CorrectnessSystemPrompt } from "./prompts.js";
 import {
   defaultCorrectnessSystemPrompt,
@@ -18,7 +17,6 @@ import type {
 import { defaultEvaluationParser } from "./utils.js";
 
 type CorrectnessParams = {
-  serviceContext?: ServiceContext;
   scoreThreshold?: number;
   parserFunction?: (str: string) => [number, string];
 };
@@ -35,7 +33,7 @@ export class CorrectnessEvaluator extends PromptMixin implements BaseEvaluator {
   constructor(params?: CorrectnessParams) {
     super();
 
-    this.llm = llmFromSettingsOrContext(params?.serviceContext);
+    this.llm = Settings.llm;
     this.correctnessPrompt = defaultCorrectnessSystemPrompt;
     this.scoreThreshold = params?.scoreThreshold ?? 4.0;
     this.parserFunction = params?.parserFunction ?? defaultEvaluationParser;
diff --git a/packages/llamaindex/src/evaluation/Faithfulness.ts b/packages/llamaindex/src/evaluation/Faithfulness.ts
index 359b00209d1a82dc0a921952c0a74ffddf485408..ab6a1908f1e8939cdc5b0acaf4417b2e31b7ada6 100644
--- a/packages/llamaindex/src/evaluation/Faithfulness.ts
+++ b/packages/llamaindex/src/evaluation/Faithfulness.ts
@@ -1,7 +1,6 @@
 import { PromptMixin, type ModuleRecord } from "@llamaindex/core/prompts";
 import { Document, MetadataMode } from "@llamaindex/core/schema";
 import { extractText } from "@llamaindex/core/utils";
-import type { ServiceContext } from "../ServiceContext.js";
 import { SummaryIndex } from "../indices/summary/index.js";
 import type {
   FaithfulnessRefinePrompt,
@@ -22,19 +21,16 @@ export class FaithfulnessEvaluator
   extends PromptMixin
   implements BaseEvaluator
 {
-  private serviceContext?: ServiceContext | undefined;
   private raiseError: boolean;
   private evalTemplate: FaithfulnessTextQAPrompt;
   private refineTemplate: FaithfulnessRefinePrompt;
 
   constructor(params?: {
-    serviceContext?: ServiceContext | undefined;
     raiseError?: boolean | undefined;
     faithfulnessSystemPrompt?: FaithfulnessTextQAPrompt | undefined;
     faithFulnessRefinePrompt?: FaithfulnessRefinePrompt | undefined;
   }) {
     super();
-    this.serviceContext = params?.serviceContext;
     this.raiseError = params?.raiseError ?? false;
 
     this.evalTemplate =
@@ -92,9 +88,7 @@ export class FaithfulnessEvaluator
 
     const docs = contexts?.map((context) => new Document({ text: context }));
 
-    const index = await SummaryIndex.fromDocuments(docs, {
-      serviceContext: this.serviceContext,
-    });
+    const index = await SummaryIndex.fromDocuments(docs, {});
 
     const queryEngine = index.asQueryEngine();
 
diff --git a/packages/llamaindex/src/evaluation/Relevancy.ts b/packages/llamaindex/src/evaluation/Relevancy.ts
index da22741b8bd4ecaee4bddacb4097c4066c03c8ba..596380ac78e30dbc0441dd648c85702387d8f032 100644
--- a/packages/llamaindex/src/evaluation/Relevancy.ts
+++ b/packages/llamaindex/src/evaluation/Relevancy.ts
@@ -1,7 +1,6 @@
 import { PromptMixin, type ModuleRecord } from "@llamaindex/core/prompts";
 import { Document, MetadataMode } from "@llamaindex/core/schema";
 import { extractText } from "@llamaindex/core/utils";
-import type { ServiceContext } from "../ServiceContext.js";
 import { SummaryIndex } from "../indices/summary/index.js";
 import type { RelevancyEvalPrompt, RelevancyRefinePrompt } from "./prompts.js";
 import {
@@ -16,14 +15,12 @@ import type {
 } from "./types.js";
 
 type RelevancyParams = {
-  serviceContext?: ServiceContext | undefined;
   raiseError?: boolean | undefined;
   evalTemplate?: RelevancyEvalPrompt | undefined;
   refineTemplate?: RelevancyRefinePrompt | undefined;
 };
 
 export class RelevancyEvaluator extends PromptMixin implements BaseEvaluator {
-  private serviceContext?: ServiceContext | undefined;
   private raiseError: boolean;
 
   private evalTemplate: RelevancyEvalPrompt;
@@ -32,7 +29,6 @@ export class RelevancyEvaluator extends PromptMixin implements BaseEvaluator {
   constructor(params?: RelevancyParams) {
     super();
 
-    this.serviceContext = params?.serviceContext;
     this.raiseError = params?.raiseError ?? false;
     this.evalTemplate = params?.evalTemplate ?? defaultRelevancyEvalPrompt;
     this.refineTemplate =
@@ -78,9 +74,7 @@ export class RelevancyEvaluator extends PromptMixin implements BaseEvaluator {
 
     const docs = contexts?.map((context) => new Document({ text: context }));
 
-    const index = await SummaryIndex.fromDocuments(docs, {
-      serviceContext: this.serviceContext,
-    });
+    const index = await SummaryIndex.fromDocuments(docs, {});
 
     const queryResponse = `Question: ${extractText(query)}\nResponse: ${response}`;
 
diff --git a/packages/llamaindex/src/index.edge.ts b/packages/llamaindex/src/index.edge.ts
index a14ebbf017b76f90eb4baf5988c789871f1886f5..992e58d6d5ac13aa8498ddcc6294b10c05ba0ebd 100644
--- a/packages/llamaindex/src/index.edge.ts
+++ b/packages/llamaindex/src/index.edge.ts
@@ -82,7 +82,6 @@ export * from "./OutputParser.js";
 export * from "./postprocessors/index.js";
 export * from "./QuestionGenerator.js";
 export * from "./selectors/index.js";
-export * from "./ServiceContext.js";
 export * from "./storage/StorageContext.js";
 export * from "./tools/index.js";
 export * from "./types.js";
diff --git a/packages/llamaindex/src/indices/BaseIndex.ts b/packages/llamaindex/src/indices/BaseIndex.ts
index 9e14deb2726046c3fc975bb77c6766b9e8020d8e..2d90a7fd8f185ce8ec8b9bbaf30c4c177e7eacd7 100644
--- a/packages/llamaindex/src/indices/BaseIndex.ts
+++ b/packages/llamaindex/src/indices/BaseIndex.ts
@@ -8,13 +8,11 @@ import type { BaseRetriever } from "@llamaindex/core/retriever";
 import type { BaseNode, Document } from "@llamaindex/core/schema";
 import type { BaseDocumentStore } from "@llamaindex/core/storage/doc-store";
 import type { BaseIndexStore } from "@llamaindex/core/storage/index-store";
-import type { ServiceContext } from "../ServiceContext.js";
-import { nodeParserFromSettingsOrContext } from "../Settings.js";
 import { runTransformations } from "../ingestion/IngestionPipeline.js";
+import { Settings } from "../Settings.js";
 import type { StorageContext } from "../storage/StorageContext.js";
 
 export interface BaseIndexInit<T> {
-  serviceContext?: ServiceContext | undefined;
   storageContext: StorageContext;
   docStore: BaseDocumentStore;
   indexStore?: BaseIndexStore | undefined;
@@ -26,14 +24,12 @@ export interface BaseIndexInit<T> {
  * they can be retrieved for our queries.
  */
 export abstract class BaseIndex<T> {
-  serviceContext?: ServiceContext | undefined;
   storageContext: StorageContext;
   docStore: BaseDocumentStore;
   indexStore?: BaseIndexStore | undefined;
   indexStruct: T;
 
   constructor(init: BaseIndexInit<T>) {
-    this.serviceContext = init.serviceContext;
     this.storageContext = init.storageContext;
     this.docStore = init.docStore;
     this.indexStore = init.indexStore;
@@ -70,10 +66,7 @@ export abstract class BaseIndex<T> {
    * @param document
    */
   async insert(document: Document) {
-    const nodes = await runTransformations(
-      [document],
-      [nodeParserFromSettingsOrContext(this.serviceContext)],
-    );
+    const nodes = await runTransformations([document], [Settings.nodeParser]);
     await this.insertNodes(nodes);
     await this.docStore.setDocumentHash(document.id_, document.hash);
   }
diff --git a/packages/llamaindex/src/indices/keyword/index.ts b/packages/llamaindex/src/indices/keyword/index.ts
index ec44118fcb4009b26c177c8de776e4290b58ff83..ca79b0376126fe485bbf952534eae8b2506c6825 100644
--- a/packages/llamaindex/src/indices/keyword/index.ts
+++ b/packages/llamaindex/src/indices/keyword/index.ts
@@ -5,8 +5,6 @@ import type {
   NodeWithScore,
 } from "@llamaindex/core/schema";
 import { MetadataMode } from "@llamaindex/core/schema";
-import type { ServiceContext } from "../../ServiceContext.js";
-import { serviceContextFromDefaults } from "../../ServiceContext.js";
 import { RetrieverQueryEngine } from "../../engines/query/index.js";
 import type { StorageContext } from "../../storage/StorageContext.js";
 import { storageContextFromDefaults } from "../../storage/StorageContext.js";
@@ -34,7 +32,7 @@ import type {
 import { BaseRetriever } from "@llamaindex/core/retriever";
 import type { BaseDocumentStore } from "@llamaindex/core/storage/doc-store";
 import { extractText } from "@llamaindex/core/utils";
-import { llmFromSettingsOrContext } from "../../Settings.js";
+import { Settings } from "../../Settings.js";
 import {
   ContextChatEngine,
   type BaseChatEngine,
@@ -45,7 +43,6 @@ export interface KeywordIndexOptions {
   nodes?: BaseNode[];
   indexStruct?: KeywordTable;
   indexId?: string;
-  serviceContext?: ServiceContext;
   llm?: LLM;
   storageContext?: StorageContext;
 }
@@ -84,7 +81,7 @@ abstract class BaseKeywordTableRetriever extends BaseRetriever {
     this.index = index;
     this.indexStruct = index.indexStruct;
     this.docstore = index.docStore;
-    this.llm = llmFromSettingsOrContext(index.serviceContext);
+    this.llm = Settings.llm;
 
     this.maxKeywordsPerQuery = maxKeywordsPerQuery;
     this.numChunksPerQuery = numChunksPerQuery;
@@ -172,7 +169,6 @@ export class KeywordTableIndex extends BaseIndex<KeywordTable> {
   static async init(options: KeywordIndexOptions): Promise<KeywordTableIndex> {
     const storageContext =
       options.storageContext ?? (await storageContextFromDefaults({}));
-    const serviceContext = options.serviceContext;
     const { docStore, indexStore } = storageContext;
 
     // Setup IndexStruct from storage
@@ -219,7 +215,6 @@ export class KeywordTableIndex extends BaseIndex<KeywordTable> {
       indexStruct = await KeywordTableIndex.buildIndexFromNodes(
         options.nodes,
         storageContext.docStore,
-        serviceContext,
       );
 
       await indexStore.addIndexStruct(indexStruct);
@@ -227,7 +222,6 @@ export class KeywordTableIndex extends BaseIndex<KeywordTable> {
 
     return new KeywordTableIndex({
       storageContext,
-      serviceContext,
       docStore,
       indexStore,
       indexStruct,
@@ -268,11 +262,8 @@ export class KeywordTableIndex extends BaseIndex<KeywordTable> {
     });
   }
 
-  static async extractKeywords(
-    text: string,
-    serviceContext?: ServiceContext,
-  ): Promise<Set<string>> {
-    const llm = llmFromSettingsOrContext(serviceContext);
+  static async extractKeywords(text: string): Promise<Set<string>> {
+    const llm = Settings.llm;
 
     const response = await llm.complete({
       prompt: defaultKeywordExtractPrompt.format({
@@ -288,19 +279,16 @@ export class KeywordTableIndex extends BaseIndex<KeywordTable> {
    * @param documents
    * @param args
    * @param args.storageContext
-   * @param args.serviceContext
    * @returns
    */
   static async fromDocuments(
     documents: Document[],
     args: {
       storageContext?: StorageContext;
-      serviceContext?: ServiceContext;
     } = {},
   ): Promise<KeywordTableIndex> {
-    let { storageContext, serviceContext } = args;
+    let { storageContext } = args;
     storageContext = storageContext ?? (await storageContextFromDefaults({}));
-    serviceContext = serviceContext ?? serviceContextFromDefaults({});
     const docStore = storageContext.docStore;
 
     await docStore.addDocuments(documents, true);
@@ -308,11 +296,10 @@ export class KeywordTableIndex extends BaseIndex<KeywordTable> {
       await docStore.setDocumentHash(doc.id_, doc.hash);
     }
 
-    const nodes = serviceContext.nodeParser.getNodesFromDocuments(documents);
+    const nodes = Settings.nodeParser.getNodesFromDocuments(documents);
     const index = await KeywordTableIndex.init({
       nodes,
       storageContext,
-      serviceContext,
     });
     return index;
   }
@@ -321,20 +308,17 @@ export class KeywordTableIndex extends BaseIndex<KeywordTable> {
    * Get keywords for nodes and place them into the index.
    * @param nodes
    * @param docStore
-   * @param serviceContext
    * @returns
    */
   static async buildIndexFromNodes(
     nodes: BaseNode[],
     docStore: BaseDocumentStore,
-    serviceContext?: ServiceContext,
   ): Promise<KeywordTable> {
     const indexStruct = new KeywordTable();
     await docStore.addDocuments(nodes, true);
     for (const node of nodes) {
       const keywords = await KeywordTableIndex.extractKeywords(
         node.getContent(MetadataMode.LLM),
-        serviceContext,
       );
       indexStruct.addNode([...keywords], node.id_);
     }
@@ -345,7 +329,6 @@ export class KeywordTableIndex extends BaseIndex<KeywordTable> {
     for (const node of nodes) {
       const keywords = await KeywordTableIndex.extractKeywords(
         node.getContent(MetadataMode.LLM),
-        this.serviceContext,
       );
       this.indexStruct.addNode([...keywords], node.id_);
     }
diff --git a/packages/llamaindex/src/indices/summary/index.ts b/packages/llamaindex/src/indices/summary/index.ts
index aa31576713d8d076e3bec2f77152cad747e4fad1..f1e955266c0fa45dd65693df2543978b34358e1b 100644
--- a/packages/llamaindex/src/indices/summary/index.ts
+++ b/packages/llamaindex/src/indices/summary/index.ts
@@ -19,11 +19,7 @@ import type {
 } from "@llamaindex/core/storage/doc-store";
 import { extractText } from "@llamaindex/core/utils";
 import _ from "lodash";
-import type { ServiceContext } from "../../ServiceContext.js";
-import {
-  llmFromSettingsOrContext,
-  nodeParserFromSettingsOrContext,
-} from "../../Settings.js";
+import { Settings } from "../../Settings.js";
 import type {
   BaseChatEngine,
   ContextChatEngineOptions,
@@ -58,7 +54,6 @@ export interface SummaryIndexOptions {
   nodes?: BaseNode[] | undefined;
   indexStruct?: IndexList | undefined;
   indexId?: string | undefined;
-  serviceContext?: ServiceContext | undefined;
   storageContext?: StorageContext | undefined;
 }
 
@@ -73,7 +68,6 @@ export class SummaryIndex extends BaseIndex<IndexList> {
   static async init(options: SummaryIndexOptions): Promise<SummaryIndex> {
     const storageContext =
       options.storageContext ?? (await storageContextFromDefaults({}));
-    const serviceContext = options.serviceContext;
     const { docStore, indexStore } = storageContext;
 
     // Setup IndexStruct from storage
@@ -130,7 +124,6 @@ export class SummaryIndex extends BaseIndex<IndexList> {
 
     return new SummaryIndex({
       storageContext,
-      serviceContext,
       docStore,
       indexStore,
       indexStruct,
@@ -141,11 +134,9 @@ export class SummaryIndex extends BaseIndex<IndexList> {
     documents: Document[],
     args: {
       storageContext?: StorageContext | undefined;
-      serviceContext?: ServiceContext | undefined;
     } = {},
   ): Promise<SummaryIndex> {
     let { storageContext } = args;
-    const serviceContext = args.serviceContext;
     storageContext = storageContext ?? (await storageContextFromDefaults({}));
     const docStore = storageContext.docStore;
 
@@ -154,15 +145,11 @@ export class SummaryIndex extends BaseIndex<IndexList> {
       await docStore.setDocumentHash(doc.id_, doc.hash);
     }
 
-    const nodes =
-      nodeParserFromSettingsOrContext(serviceContext).getNodesFromDocuments(
-        documents,
-      );
+    const nodes = Settings.nodeParser.getNodesFromDocuments(documents);
 
     const index = await SummaryIndex.init({
       nodes,
       storageContext,
-      serviceContext,
     });
     return index;
   }
@@ -326,7 +313,6 @@ export class SummaryIndexLLMRetriever extends BaseRetriever {
   choiceBatchSize: number;
   formatNodeBatchFn: NodeFormatterFunction;
   parseChoiceSelectAnswerFn: ChoiceSelectParserFunction;
-  serviceContext?: ServiceContext | undefined;
 
   constructor(
     index: SummaryIndex,
@@ -334,7 +320,6 @@ export class SummaryIndexLLMRetriever extends BaseRetriever {
     choiceBatchSize: number = 10,
     formatNodeBatchFn?: NodeFormatterFunction,
     parseChoiceSelectAnswerFn?: ChoiceSelectParserFunction,
-    serviceContext?: ServiceContext,
   ) {
     super();
     this.index = index;
@@ -343,7 +328,6 @@ export class SummaryIndexLLMRetriever extends BaseRetriever {
     this.formatNodeBatchFn = formatNodeBatchFn || defaultFormatNodeBatchFn;
     this.parseChoiceSelectAnswerFn =
       parseChoiceSelectAnswerFn || defaultParseChoiceSelectAnswerFn;
-    this.serviceContext = serviceContext || index.serviceContext;
   }
 
   async _retrieve(query: QueryBundle): Promise<NodeWithScore[]> {
@@ -357,7 +341,7 @@ export class SummaryIndexLLMRetriever extends BaseRetriever {
       const fmtBatchStr = this.formatNodeBatchFn(nodesBatch);
       const input = { context: fmtBatchStr, query: extractText(query) };
 
-      const llm = llmFromSettingsOrContext(this.serviceContext);
+      const llm = Settings.llm;
 
       const rawResponse = (
         await llm.complete({
diff --git a/packages/llamaindex/src/indices/vectorStore/index.ts b/packages/llamaindex/src/indices/vectorStore/index.ts
index 6b21f7ae6eca5f5043a0a61a954d3e7f0d80a731..fc03b6e19caf8427cc0bff952b8f2fc10bd6af18 100644
--- a/packages/llamaindex/src/indices/vectorStore/index.ts
+++ b/packages/llamaindex/src/indices/vectorStore/index.ts
@@ -24,16 +24,15 @@ import {
 import type { BaseIndexStore } from "@llamaindex/core/storage/index-store";
 import { extractText } from "@llamaindex/core/utils";
 import { VectorStoreQueryMode } from "@llamaindex/core/vector-store";
-import type { ServiceContext } from "../../ServiceContext.js";
-import { nodeParserFromSettingsOrContext } from "../../Settings.js";
+import { Settings } from "../../Settings.js";
 import { RetrieverQueryEngine } from "../../engines/query/RetrieverQueryEngine.js";
 import {
   addNodesToVectorStores,
   runTransformations,
 } from "../../ingestion/IngestionPipeline.js";
 import {
-  DocStoreStrategy,
   createDocStoreStrategy,
+  DocStoreStrategy,
 } from "../../ingestion/strategies/index.js";
 import type { StorageContext } from "../../storage/StorageContext.js";
 import { storageContextFromDefaults } from "../../storage/StorageContext.js";
@@ -52,7 +51,6 @@ interface IndexStructOptions {
 }
 export interface VectorIndexOptions extends IndexStructOptions {
   nodes?: BaseNode[] | undefined;
-  serviceContext?: ServiceContext | undefined;
   storageContext?: StorageContext | undefined;
   vectorStores?: VectorStoreByType | undefined;
   logProgress?: boolean | undefined;
@@ -81,7 +79,7 @@ export class VectorStoreIndex extends BaseIndex<IndexDict> {
     super(init);
     this.indexStore = init.indexStore;
     this.vectorStores = init.vectorStores ?? init.storageContext.vectorStores;
-    this.embedModel = init.serviceContext?.embedModel;
+    this.embedModel = Settings.embedModel;
   }
 
   /**
@@ -94,7 +92,6 @@ export class VectorStoreIndex extends BaseIndex<IndexDict> {
   ): Promise<VectorStoreIndex> {
     const storageContext =
       options.storageContext ?? (await storageContextFromDefaults({}));
-    const serviceContext = options.serviceContext;
     const indexStore = storageContext.indexStore;
     const docStore = storageContext.docStore;
 
@@ -113,7 +110,6 @@ export class VectorStoreIndex extends BaseIndex<IndexDict> {
 
     const index = new this({
       storageContext,
-      serviceContext,
       docStore,
       indexStruct,
       indexStore,
@@ -214,10 +210,7 @@ export class VectorStoreIndex extends BaseIndex<IndexDict> {
     } = {},
   ): Promise<VectorStoreIndex> {
     args.storageContext =
-      args.storageContext ??
-      (await storageContextFromDefaults({
-        serviceContext: args.serviceContext,
-      }));
+      args.storageContext ?? (await storageContextFromDefaults({}));
     args.vectorStores = args.vectorStores ?? args.storageContext.vectorStores;
     args.docStoreStrategy =
       args.docStoreStrategy ??
@@ -240,7 +233,7 @@ export class VectorStoreIndex extends BaseIndex<IndexDict> {
     );
     args.nodes = await runTransformations(
       documents,
-      [nodeParserFromSettingsOrContext(args.serviceContext)],
+      [Settings.nodeParser],
       {},
       { docStoreStrategy },
     );
@@ -255,10 +248,7 @@ export class VectorStoreIndex extends BaseIndex<IndexDict> {
     }
   }
 
-  static async fromVectorStores(
-    vectorStores: VectorStoreByType,
-    serviceContext?: ServiceContext,
-  ) {
+  static async fromVectorStores(vectorStores: VectorStoreByType) {
     if (!vectorStores[ModalityType.TEXT]?.storesText) {
       throw new Error(
         "Cannot initialize from a vector store that does not store text",
@@ -272,20 +262,13 @@ export class VectorStoreIndex extends BaseIndex<IndexDict> {
     const index = await this.init({
       nodes: [],
       storageContext,
-      serviceContext,
     });
 
     return index;
   }
 
-  static async fromVectorStore(
-    vectorStore: BaseVectorStore,
-    serviceContext?: ServiceContext,
-  ) {
-    return this.fromVectorStores(
-      { [ModalityType.TEXT]: vectorStore },
-      serviceContext,
-    );
+  static async fromVectorStore(vectorStore: BaseVectorStore) {
+    return this.fromVectorStores({ [ModalityType.TEXT]: vectorStore });
   }
 
   asRetriever(
@@ -436,7 +419,6 @@ export class VectorIndexRetriever extends BaseRetriever {
   index: VectorStoreIndex;
   topK: TopKMap;
 
-  serviceContext?: ServiceContext | undefined;
   filters?: MetadataFilters | undefined;
   queryMode?: VectorStoreQueryMode | undefined;
 
@@ -444,7 +426,6 @@ export class VectorIndexRetriever extends BaseRetriever {
     super();
     this.index = options.index;
     this.queryMode = options.mode ?? VectorStoreQueryMode.DEFAULT;
-    this.serviceContext = this.index.serviceContext;
     if ("topK" in options && options.topK) {
       this.topK = options.topK;
     } else {
diff --git a/packages/llamaindex/src/selectors/utils.ts b/packages/llamaindex/src/selectors/utils.ts
index a4db900d8e13c730717b034541701a853ad724ca..9ad358c552bb4df11db711b163bb3cf50643a4f5 100644
--- a/packages/llamaindex/src/selectors/utils.ts
+++ b/packages/llamaindex/src/selectors/utils.ts
@@ -1,15 +1,13 @@
-import type { ServiceContext } from "../ServiceContext.js";
-import { llmFromSettingsOrContext } from "../Settings.js";
+import { Settings } from "../Settings.js";
 import type { BaseSelector } from "./base.js";
 import { LLMMultiSelector, LLMSingleSelector } from "./llmSelectors.js";
 
 export const getSelectorFromContext = (
-  serviceContext: ServiceContext,
   isMulti: boolean = false,
 ): BaseSelector => {
   let selector: BaseSelector | null = null;
 
-  const llm = llmFromSettingsOrContext(serviceContext);
+  const llm = Settings.llm;
 
   if (isMulti) {
     selector = new LLMMultiSelector({ llm });
diff --git a/packages/llamaindex/src/storage/StorageContext.ts b/packages/llamaindex/src/storage/StorageContext.ts
index aa0d056ce62fac576d75ec8d2fe3aa88a239842b..5a31ffe06c097f30141a22f9f40a0347019be409 100644
--- a/packages/llamaindex/src/storage/StorageContext.ts
+++ b/packages/llamaindex/src/storage/StorageContext.ts
@@ -9,7 +9,7 @@ import type {
   BaseVectorStore,
   VectorStoreByType,
 } from "@llamaindex/core/vector-store";
-import type { ServiceContext } from "../ServiceContext.js";
+import { Settings } from "../Settings.js";
 import { SimpleVectorStore } from "../vector-store/SimpleVectorStore.js";
 import { SimpleDocumentStore } from "./docStore/SimpleDocumentStore.js";
 
@@ -25,10 +25,6 @@ type BuilderParams = {
   vectorStore: BaseVectorStore;
   vectorStores: VectorStoreByType;
   persistDir: string;
-  /**
-   * @deprecated Please use `Settings` instead
-   */
-  serviceContext?: ServiceContext | undefined;
 };
 
 export async function storageContextFromDefaults({
@@ -37,7 +33,6 @@ export async function storageContextFromDefaults({
   vectorStore,
   vectorStores,
   persistDir,
-  serviceContext,
 }: Partial<BuilderParams>): Promise<StorageContext> {
   vectorStores = vectorStores ?? {};
   if (!persistDir) {
@@ -47,7 +42,7 @@ export async function storageContextFromDefaults({
       vectorStores[ModalityType.TEXT] = vectorStore ?? new SimpleVectorStore();
     }
   } else {
-    const embedModel = serviceContext?.embedModel;
+    const embedModel = Settings.embedModel;
     docStore =
       docStore ||
       (await SimpleDocumentStore.fromPersistDir(persistDir, DEFAULT_NAMESPACE));
diff --git a/packages/llamaindex/tests/MetadataExtractors.test.ts b/packages/llamaindex/tests/MetadataExtractors.test.ts
index 5e32e878376465faaf5799524f899ed76a0504bc..29bb117abbac821be314bcc2a791506e22e2b734 100644
--- a/packages/llamaindex/tests/MetadataExtractors.test.ts
+++ b/packages/llamaindex/tests/MetadataExtractors.test.ts
@@ -1,7 +1,5 @@
 import { Document } from "@llamaindex/core/schema";
 import { Settings } from "llamaindex";
-import type { ServiceContext } from "llamaindex/ServiceContext";
-import { serviceContextFromDefaults } from "llamaindex/ServiceContext";
 import { OpenAIEmbedding } from "llamaindex/embeddings/index";
 import {
   KeywordExtractor,
@@ -19,8 +17,6 @@ import {
 } from "./utility/mockOpenAI.js";
 
 describe("[MetadataExtractor]: Extractors should populate the metadata", () => {
-  let serviceContext: ServiceContext;
-
   beforeAll(async () => {
     const languageModel = new OpenAI({
       model: "gpt-3.5-turbo",
@@ -33,11 +29,6 @@ describe("[MetadataExtractor]: Extractors should populate the metadata", () => {
     const embedModel = new OpenAIEmbedding();
 
     mockEmbeddingModel(embedModel);
-
-    serviceContext = serviceContextFromDefaults({
-      llm: languageModel,
-      embedModel,
-    });
   });
 
   afterAll(() => {
@@ -52,7 +43,7 @@ describe("[MetadataExtractor]: Extractors should populate the metadata", () => {
     ]);
 
     const keywordExtractor = new KeywordExtractor({
-      llm: serviceContext.llm,
+      llm: Settings.llm,
       keywords: 5,
     });
 
@@ -71,7 +62,7 @@ describe("[MetadataExtractor]: Extractors should populate the metadata", () => {
     ]);
 
     const titleExtractor = new TitleExtractor({
-      llm: serviceContext.llm,
+      llm: Settings.llm,
       nodes: 5,
     });
 
@@ -90,7 +81,7 @@ describe("[MetadataExtractor]: Extractors should populate the metadata", () => {
     ]);
 
     const questionsAnsweredExtractor = new QuestionsAnsweredExtractor({
-      llm: serviceContext.llm,
+      llm: Settings.llm,
       questions: 5,
     });
 
@@ -109,10 +100,10 @@ describe("[MetadataExtractor]: Extractors should populate the metadata", () => {
       new Document({ text: DEFAULT_LLM_TEXT_OUTPUT }),
     ]);
 
-    const llmCompleteSpy = vi.spyOn(serviceContext.llm, "complete");
+    const llmCompleteSpy = vi.spyOn(Settings.llm, "complete");
 
     const questionsAnsweredExtractor = new QuestionsAnsweredExtractor({
-      llm: serviceContext.llm,
+      llm: Settings.llm,
       questions: 5,
       promptTemplate: `This is a custom prompt template for {context} with {numQuestions} questions`,
     });
@@ -139,7 +130,7 @@ describe("[MetadataExtractor]: Extractors should populate the metadata", () => {
     ]);
 
     const summaryExtractor = new SummaryExtractor({
-      llm: serviceContext.llm,
+      llm: Settings.llm,
     });
 
     const nodesWithKeywordMetadata = await summaryExtractor.processNodes(nodes);
@@ -156,10 +147,10 @@ describe("[MetadataExtractor]: Extractors should populate the metadata", () => {
       new Document({ text: DEFAULT_LLM_TEXT_OUTPUT }),
     ]);
 
-    const llmCompleteSpy = vi.spyOn(serviceContext.llm, "complete");
+    const llmCompleteSpy = vi.spyOn(Settings.llm, "complete");
 
     const keywordExtractor = new KeywordExtractor({
-      llm: serviceContext.llm,
+      llm: Settings.llm,
       keywords: 5,
       promptTemplate: `This is a custom prompt template for {context} with {maxKeywords} keywords`,
     });
diff --git a/packages/llamaindex/tests/Selectors.test.ts b/packages/llamaindex/tests/Selectors.test.ts
index 4d4e804209d8b6b37df483365685192d4337ae7a..9fe1ace1760e016191e658f6f5e52631c6a32639 100644
--- a/packages/llamaindex/tests/Selectors.test.ts
+++ b/packages/llamaindex/tests/Selectors.test.ts
@@ -1,15 +1,12 @@
 import { describe, expect, test } from "vitest";
 // from unittest.mock import patch
 
-import { serviceContextFromDefaults } from "llamaindex/ServiceContext";
 import { OpenAI } from "llamaindex/llm/index";
 import { LLMSingleSelector } from "llamaindex/selectors/index";
 import { mocStructuredkLlmGeneration } from "./utility/mockOpenAI.js";
 
 describe("LLMSelector", () => {
   test("should be able to output a selection with a reason", async () => {
-    const serviceContext = serviceContextFromDefaults({});
-
     const languageModel = new OpenAI({
       model: "gpt-3.5-turbo",
     });
diff --git a/packages/llamaindex/tests/indices/SummaryIndex.test.ts b/packages/llamaindex/tests/indices/SummaryIndex.test.ts
index d273e78e060e867506fdb2c567232af65a2f56a3..c2a523e95c266a502ece510da680a3349e83a997 100644
--- a/packages/llamaindex/tests/indices/SummaryIndex.test.ts
+++ b/packages/llamaindex/tests/indices/SummaryIndex.test.ts
@@ -1,41 +1,45 @@
 import {
   Document,
+  OpenAIEmbedding,
+  Settings,
   SummaryIndex,
   VectorStoreIndex,
   storageContextFromDefaults,
-  type ServiceContext,
   type StorageContext,
 } from "llamaindex";
 import { rmSync } from "node:fs";
 import { mkdtemp } from "node:fs/promises";
 import { tmpdir } from "node:os";
 import { join } from "node:path";
-import { afterAll, beforeAll, describe, expect, it } from "vitest";
+import { afterAll, beforeAll, describe, expect, it, vi } from "vitest";
+import { mockEmbeddingModel } from "../utility/mockOpenAI.js";
 
 const testDir = await mkdtemp(join(tmpdir(), "test-"));
 
-import { mockServiceContext } from "../utility/mockServiceContext.js";
-
 describe("SummaryIndex", () => {
-  let serviceContext: ServiceContext;
   let storageContext: StorageContext;
 
   beforeAll(async () => {
-    serviceContext = mockServiceContext();
     storageContext = await storageContextFromDefaults({
       persistDir: testDir,
     });
+
+    const embedModel = new OpenAIEmbedding();
+    mockEmbeddingModel(embedModel);
+    Settings.embedModel = embedModel;
+  });
+
+  afterAll(() => {
+    vi.clearAllMocks();
   });
 
   it("SummaryIndex and VectorStoreIndex must be able to share the same storage context", async () => {
     const documents = [new Document({ text: "lorem ipsem", id_: "1" })];
     const vectorIndex = await VectorStoreIndex.fromDocuments(documents, {
-      serviceContext,
       storageContext,
     });
     expect(vectorIndex).toBeDefined();
     const summaryIndex = await SummaryIndex.fromDocuments(documents, {
-      serviceContext,
       storageContext,
     });
     expect(summaryIndex).toBeDefined();
diff --git a/packages/llamaindex/tests/indices/VectorStoreIndex.test.ts b/packages/llamaindex/tests/indices/VectorStoreIndex.test.ts
index 9bca51af7e9dfaae84789d8bee9618072bfec600..61c8299a28c5b82d64d07ae9650ce172f4638c07 100644
--- a/packages/llamaindex/tests/indices/VectorStoreIndex.test.ts
+++ b/packages/llamaindex/tests/indices/VectorStoreIndex.test.ts
@@ -1,18 +1,22 @@
-import type { ServiceContext, StorageContext } from "llamaindex";
-import { Document, VectorStoreIndex } from "llamaindex";
+import type { StorageContext } from "llamaindex";
+import {
+  Document,
+  OpenAIEmbedding,
+  Settings,
+  VectorStoreIndex,
+} from "llamaindex";
 import { DocStoreStrategy } from "llamaindex/ingestion/strategies/index";
 import { mkdtemp, rm } from "node:fs/promises";
 import { tmpdir } from "node:os";
 import { join } from "node:path";
-import { afterAll, beforeAll, describe, expect, test } from "vitest";
+import { afterAll, beforeAll, describe, expect, test, vi } from "vitest";
 
 const testDir = await mkdtemp(join(tmpdir(), "test-"));
 
-import { mockServiceContext } from "../utility/mockServiceContext.js";
+import { mockEmbeddingModel } from "../utility/mockOpenAI.js";
 import { mockStorageContext } from "../utility/mockStorageContext.js";
 
 describe("VectorStoreIndex", () => {
-  let serviceContext: ServiceContext;
   let storageContext: StorageContext;
   let testStrategy: (
     strategy: DocStoreStrategy,
@@ -20,7 +24,6 @@ describe("VectorStoreIndex", () => {
   ) => Promise<Array<number>>;
 
   beforeAll(async () => {
-    serviceContext = mockServiceContext();
     storageContext = await mockStorageContext(testDir);
     testStrategy = async (
       strategy: DocStoreStrategy,
@@ -30,7 +33,6 @@ describe("VectorStoreIndex", () => {
       const entries = [];
       for (let i = 0; i < runs; i++) {
         await VectorStoreIndex.fromDocuments(documents, {
-          serviceContext,
           storageContext,
           docStoreStrategy: strategy,
         });
@@ -39,6 +41,14 @@ describe("VectorStoreIndex", () => {
       }
       return entries;
     };
+
+    const embedModel = new OpenAIEmbedding();
+    mockEmbeddingModel(embedModel);
+    Settings.embedModel = embedModel;
+  });
+
+  afterAll(() => {
+    vi.clearAllMocks();
   });
 
   test("fromDocuments stores duplicates without a doc store strategy", async () => {
diff --git a/packages/llamaindex/tests/objects/ObjectIndex.test.ts b/packages/llamaindex/tests/objects/ObjectIndex.test.ts
index a68f44c440fd13a4b5f5cbb8d1c0bac1a4cba21c..64c8ff4282c890d30c2d37f7b4a6add4ca4cc0fa 100644
--- a/packages/llamaindex/tests/objects/ObjectIndex.test.ts
+++ b/packages/llamaindex/tests/objects/ObjectIndex.test.ts
@@ -1,19 +1,23 @@
-import type { ServiceContext } from "llamaindex";
 import {
   FunctionTool,
   ObjectIndex,
+  OpenAIEmbedding,
+  Settings,
   SimpleToolNodeMapping,
   VectorStoreIndex,
 } from "llamaindex";
-import { beforeAll, describe, expect, test } from "vitest";
-
-import { mockServiceContext } from "../utility/mockServiceContext.js";
+import { afterAll, beforeAll, describe, expect, test, vi } from "vitest";
+import { mockEmbeddingModel } from "../utility/mockOpenAI.js";
 
 describe("ObjectIndex", () => {
-  let serviceContext: ServiceContext;
+  beforeAll(async () => {
+    const embedModel = new OpenAIEmbedding();
+    mockEmbeddingModel(embedModel);
+    Settings.embedModel = embedModel;
+  });
 
-  beforeAll(() => {
-    serviceContext = mockServiceContext();
+  afterAll(() => {
+    vi.clearAllMocks();
   });
 
   test("test_object_with_tools", async () => {
@@ -51,14 +55,9 @@ describe("ObjectIndex", () => {
       [tool1, tool2],
       toolMapping,
       VectorStoreIndex,
-      {
-        serviceContext,
-      },
     );
 
-    const retriever = await objectRetriever.asRetriever({
-      serviceContext,
-    });
+    const retriever = await objectRetriever.asRetriever({});
 
     expect(await retriever.retrieve("test")).toStrictEqual([tool1, tool2]);
   });
@@ -98,9 +97,6 @@ describe("ObjectIndex", () => {
       [tool1],
       toolMapping,
       VectorStoreIndex,
-      {
-        serviceContext,
-      },
     );
 
     let tools = objectRetriever.tools;
diff --git a/packages/llamaindex/tests/utility/mockServiceContext.ts b/packages/llamaindex/tests/utility/mockServiceContext.ts
deleted file mode 100644
index f6843069c472637272449be8182d04f3ad844f3d..0000000000000000000000000000000000000000
--- a/packages/llamaindex/tests/utility/mockServiceContext.ts
+++ /dev/null
@@ -1,23 +0,0 @@
-import {
-  OpenAI,
-  OpenAIEmbedding,
-  serviceContextFromDefaults,
-} from "llamaindex";
-
-import {
-  mockEmbeddingModel,
-  mockLlmGeneration,
-} from "../utility/mockOpenAI.js";
-
-export function mockServiceContext() {
-  const embeddingModel = new OpenAIEmbedding();
-  const llm = new OpenAI();
-
-  mockEmbeddingModel(embeddingModel);
-  mockLlmGeneration({ languageModel: llm });
-
-  return serviceContextFromDefaults({
-    embedModel: embeddingModel,
-    llm,
-  });
-}