diff --git a/docs/docs/examples/embeddings/ipex_llm_gpu.ipynb b/docs/docs/examples/embeddings/ipex_llm_gpu.ipynb
index c4c1afb77c97bc7a049fc3ac76635726d0758df3..fdb9c8a53064a6b00a7bcf31a9a50ea0e2421c33 100644
--- a/docs/docs/examples/embeddings/ipex_llm_gpu.ipynb
+++ b/docs/docs/examples/embeddings/ipex_llm_gpu.ipynb
@@ -17,7 +17,7 @@
     "## Install Prerequisites\n",
     "To benefit from IPEX-LLM on Intel GPUs, there are several prerequisite steps for tools installation and environment preparation.\n",
     "\n",
-    "If you are a Windows user, visit the [Install IPEX-LLM on Windows with Intel GPU Guide](https://ipex-llm.readthedocs.io/en/latest/doc/LLM/Quickstart/install_windows_gpu.html), and follow [**Install Prerequisites**](https://ipex-llm.readthedocs.io/en/latest/doc/LLM/Quickstart/install_windows_gpu.html#install-prerequisites) to install Visual Studio 2022, GPU driver, Conda, and IntelĀ® oneAPI Base Toolkit 2024.0.\n",
+    "If you are a Windows user, visit the [Install IPEX-LLM on Windows with Intel GPU Guide](https://ipex-llm.readthedocs.io/en/latest/doc/LLM/Quickstart/install_windows_gpu.html), and follow [**Install Prerequisites**](https://ipex-llm.readthedocs.io/en/latest/doc/LLM/Quickstart/install_windows_gpu.html#install-prerequisites) to update GPU driver (optional) and install Conda.\n",
     "\n",
     "If you are a Linux user, visit the [Install IPEX-LLM on Linux with Intel GPU](https://ipex-llm.readthedocs.io/en/latest/doc/LLM/Quickstart/install_linux_gpu.html), and follow [**Install Prerequisites**](https://ipex-llm.readthedocs.io/en/latest/doc/LLM/Quickstart/install_linux_gpu.html#install-prerequisites) to install GPU driver, IntelĀ® oneAPI Base Toolkit 2024.0, and Conda.\n",
     "\n",
diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-ipex-llm/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-ipex-llm/pyproject.toml
index acf423c66ec887c67b828fd83e0856ee82de6dd5..a4596dcc17716ef9bc7c90384e70628ec470e2b6 100644
--- a/llama-index-integrations/embeddings/llama-index-embeddings-ipex-llm/pyproject.toml
+++ b/llama-index-integrations/embeddings/llama-index-embeddings-ipex-llm/pyproject.toml
@@ -35,10 +35,10 @@ version = "0.1.1"
 [tool.poetry.dependencies]
 python = ">=3.9,<4.0"
 llama-index-core = "^0.10.0"
-ipex-llm = {allow-prereleases = true, extras = ["llama-index"], version = ">=2.1.0b20240423"}
-torch = {optional = true, version = "2.1.0a0"}
-torchvision = {optional = true, version = "0.16.0a0"}
-intel_extension_for_pytorch = {optional = true, version = "2.1.10+xpu"}
+ipex-llm = {allow-prereleases = true, extras = ["llama-index"], version = ">=2.1.0b20240514"}
+torch = {optional = true, source = "ipex-xpu-src-us", version = "2.1.0a0"}
+torchvision = {optional = true, source = "ipex-xpu-src-us", version = "0.16.0a0"}
+intel_extension_for_pytorch = {optional = true, source = "ipex-xpu-src-us", version = "2.1.10+xpu"}
 bigdl-core-xe-21 = {optional = true, version = "*"}
 bigdl-core-xe-esimd-21 = {optional = true, version = "*"}
 
@@ -63,3 +63,13 @@ types-protobuf = "^4.24.0.4"
 types-redis = "4.5.5.0"
 types-requests = "2.28.11.8"  # TODO: unpin when mypy>0.991
 types-setuptools = "67.1.0.0"
+
+[[tool.poetry.source]]
+name = "ipex-xpu-src-us"
+priority = "explicit"
+url = "https://pytorch-extension.intel.com/release-whl/stable/xpu/us/"
+
+[[tool.poetry.source]]
+name = "ipex-xpu-src-cn"
+priority = "supplemental"
+url = "https://pytorch-extension.intel.com/release-whl/stable/xpu/cn/"