diff --git a/docs/getting_started/starter_example_local.md b/docs/getting_started/starter_example_local.md
index 4785a2ac1f849a78e9685b35ed48480fd09e56f5..85dd5723bea7cbfe24144c0d80b2321fd81eefea 100644
--- a/docs/getting_started/starter_example_local.md
+++ b/docs/getting_started/starter_example_local.md
@@ -4,7 +4,7 @@
 Make sure you've followed the [custom installation](installation.md) steps first.
 ```
 
-This is our famous "5 lines of code" starter example with local LLM and embedding models. We will use `BAAI/bge-m3` as our embedding model and `Mistral-7B` served through `Ollama` as our LLM.
+This is our famous "5 lines of code" starter example with local LLM and embedding models. We will use `BAAI/bge-small-en-v1.5` as our embedding model and `Mistral-7B` served through `Ollama` as our LLM.
 
 ## Download data
 
@@ -33,7 +33,7 @@ from llama_index.llms.ollama import Ollama
 
 documents = SimpleDirectoryReader("data").load_data()
 
-# bge-m3 embedding model
+# bge embedding model
 Settings.embed_model = resolve_embed_model("local:BAAI/bge-small-en-v1.5")
 
 # ollama