diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/llama_index/embeddings/nvidia/base.py b/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/llama_index/embeddings/nvidia/base.py
index 7c496954b73ac4b98a7a0285681d2271427ac74e..70793e61ee94f4d53d56a3c39b9cf7733798f3da 100644
--- a/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/llama_index/embeddings/nvidia/base.py
+++ b/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/llama_index/embeddings/nvidia/base.py
@@ -13,7 +13,6 @@ from llama_index.core.callbacks.base import CallbackManager
 from llama_index.core.base.llms.generic_utils import get_from_param_or_env
 
 from openai import OpenAI, AsyncOpenAI
-from urllib.parse import urlparse, urlunparse
 from .utils import (
     EMBEDDING_MODEL_TABLE,
     BASE_URL,
@@ -120,14 +119,10 @@ class NVIDIAEmbedding(BaseEmbedding):
         )
 
         self._is_hosted = self.base_url in KNOWN_URLS
-        if not self._is_hosted:
-            self.base_url = self._validate_url(self.base_url)
 
         if self._is_hosted:  # hosted on API Catalog (build.nvidia.com)
             if api_key == "NO_API_KEY_PROVIDED":
                 raise ValueError("An API key is required for hosted NIM.")
-        else:  # not hosted
-            self.base_url = self._validate_url(self.base_url)
 
         self._client = OpenAI(
             api_key=api_key,
@@ -176,38 +171,6 @@ class NVIDIAEmbedding(BaseEmbedding):
         else:
             self.model = self.model or DEFAULT_MODEL
 
-    def _validate_url(self, base_url):
-        """
-        validate the base_url.
-        if the base_url is not a url, raise an error
-        if the base_url does not end in /v1, e.g. /embeddings
-        emit a warning. old documentation told users to pass in the full
-        inference url, which is incorrect and prevents model listing from working.
-        normalize base_url to end in /v1.
-        """
-        if base_url is not None:
-            parsed = urlparse(base_url)
-
-            # Ensure scheme and netloc (domain name) are present
-            if not (parsed.scheme and parsed.netloc):
-                expected_format = "Expected format is: http://host:port"
-                raise ValueError(
-                    f"Invalid base_url format. {expected_format} Got: {base_url}"
-                )
-
-            normalized_path = parsed.path.rstrip("/")
-            if not normalized_path.endswith("/v1"):
-                warnings.warn(
-                    f"{base_url} does not end in /v1, you may "
-                    "have inference and listing issues"
-                )
-                normalized_path += "/v1"
-
-                base_url = urlunparse(
-                    (parsed.scheme, parsed.netloc, normalized_path, None, None, None)
-                )
-        return base_url
-
     def _validate_model(self, model_name: str) -> None:
         """
         Validates compatibility of the hosted model with the client.
diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/pyproject.toml
index 54ee33e495ac8a0bc7596beb41e1259966fa3da9..25154bf81340c32e9bc502bddaa484000f1dd843 100644
--- a/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/pyproject.toml
+++ b/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/pyproject.toml
@@ -27,7 +27,7 @@ exclude = ["**/BUILD"]
 license = "MIT"
 name = "llama-index-embeddings-nvidia"
 readme = "README.md"
-version = "0.3.2"
+version = "0.3.3"
 
 [tool.poetry.dependencies]
 python = ">=3.9,<4.0"
diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/tests/test_base_url.py b/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/tests/test_base_url.py
index 56472f6f18fc9528106f9160bc79715492ba7fe3..d94c7f1721ca04ff3da6f44e37a1d55b44f8aac8 100644
--- a/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/tests/test_base_url.py
+++ b/llama-index-integrations/embeddings/llama-index-embeddings-nvidia/tests/test_base_url.py
@@ -56,6 +56,8 @@ def test_base_url_priority(public_class: type, monkeypatch) -> None:
         assert get_base_url(base_url=PARAM_URL) == PARAM_URL
 
 
+# marking as skip because base_url validation is removed
+@pytest.mark.skip(reason="base_url validation is removed")
 @pytest.mark.parametrize(
     "base_url",
     [
@@ -75,6 +77,8 @@ def test_param_base_url_negative(
     assert "Invalid base_url" in str(e.value)
 
 
+# marking as skip because base_url validation is removed
+@pytest.mark.skip(reason="base_url validation is removed")
 @pytest.mark.parametrize(
     "base_url",
     [
diff --git a/llama-index-integrations/llms/llama-index-llms-nvidia/llama_index/llms/nvidia/base.py b/llama-index-integrations/llms/llama-index-llms-nvidia/llama_index/llms/nvidia/base.py
index a16ced661a3a5848751cf66b825448ce36206df3..5bec9dfef84854a6125ed65c65d120b1d04b46b1 100644
--- a/llama-index-integrations/llms/llama-index-llms-nvidia/llama_index/llms/nvidia/base.py
+++ b/llama-index-integrations/llms/llama-index-llms-nvidia/llama_index/llms/nvidia/base.py
@@ -11,7 +11,6 @@ from llama_index.core.base.llms.generic_utils import (
 
 from llama_index.llms.openai_like import OpenAILike
 from llama_index.core.llms.function_calling import FunctionCallingLLM
-from urllib.parse import urlparse
 from llama_index.core.base.llms.types import (
     ChatMessage,
     ChatResponse,
@@ -128,27 +127,6 @@ class NVIDIA(OpenAILike, FunctionCallingLLM):
         else:
             self.model = DEFAULT_MODEL
 
-    def _validate_url(self, base_url):
-        """
-        validate the base_url.
-        if the base_url is not a url, raise an error
-        if the base_url does not end in /v1, e.g. /completions, /chat/completions,
-        emit a warning. old documentation told users to pass in the full
-        inference url, which is incorrect and prevents model listing from working.
-        normalize base_url to end in /v1.
-        """
-        if base_url is not None:
-            base_url = base_url.rstrip("/")
-            parsed = urlparse(base_url)
-
-            # Ensure scheme and netloc (domain name) are present
-            if not (parsed.scheme and parsed.netloc):
-                expected_format = "Expected format is: http://host:port"
-                raise ValueError(
-                    f"Invalid base_url format. {expected_format} Got: {base_url}"
-                )
-        return base_url
-
     def _validate_model(self, model_name: str) -> None:
         """
         Validates compatibility of the hosted model with the client.
diff --git a/llama-index-integrations/llms/llama-index-llms-nvidia/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-nvidia/pyproject.toml
index f34069f9a6fc75eae6ac988fb96f1e26833cc62a..755e7856aed7c430befe7d9dbae91527d8d2add6 100644
--- a/llama-index-integrations/llms/llama-index-llms-nvidia/pyproject.toml
+++ b/llama-index-integrations/llms/llama-index-llms-nvidia/pyproject.toml
@@ -30,7 +30,7 @@ license = "MIT"
 name = "llama-index-llms-nvidia"
 packages = [{include = "llama_index/"}]
 readme = "README.md"
-version = "0.3.2"
+version = "0.3.3"
 
 [tool.poetry.dependencies]
 python = ">=3.9,<4.0"
diff --git a/llama-index-integrations/postprocessor/llama-index-postprocessor-nvidia-rerank/llama_index/postprocessor/nvidia_rerank/base.py b/llama-index-integrations/postprocessor/llama-index-postprocessor-nvidia-rerank/llama_index/postprocessor/nvidia_rerank/base.py
index a1938d49339345e9ab51fd39544fa364b111d305..811cecebb87d8d48260da6aae414ebd1297b8ff0 100644
--- a/llama-index-integrations/postprocessor/llama-index-postprocessor-nvidia-rerank/llama_index/postprocessor/nvidia_rerank/base.py
+++ b/llama-index-integrations/postprocessor/llama-index-postprocessor-nvidia-rerank/llama_index/postprocessor/nvidia_rerank/base.py
@@ -1,6 +1,5 @@
 from typing import Any, List, Optional, Generator, Literal
 import os
-from urllib.parse import urlparse, urlunparse
 import httpx
 
 from llama_index.core.bridge.pydantic import Field, PrivateAttr, ConfigDict
@@ -110,8 +109,6 @@ class NVIDIARerank(BaseNodePostprocessor):
         if self._is_hosted:  # hosted on API Catalog (build.nvidia.com)
             if (not self._api_key) or (self._api_key == "NO_API_KEY_PROVIDED"):
                 raise ValueError("An API key is required for hosted NIM.")
-        else:  # not hosted
-            self.base_url = self._validate_url(self.base_url)
 
         self.model = model
         if not self.model:
@@ -210,65 +207,6 @@ class NVIDIARerank(BaseNodePostprocessor):
         else:
             return RANKING_MODEL_TABLE
 
-    def _validate_url(self, base_url):
-        """
-        validate the base_url.
-        if the base_url is not a url, raise an error
-        if the base_url does not end in /v1, e.g. /embeddings
-        emit a warning. old documentation told users to pass in the full
-        inference url, which is incorrect and prevents model listing from working.
-        normalize base_url to end in /v1.
-        validate the base_url.
-        if the base_url is not a url, raise an error
-        if the base_url does not end in /v1, e.g. /embeddings
-        emit a warning. old documentation told users to pass in the full
-        inference url, which is incorrect and prevents model listing from working.
-        normalize base_url to end in /v1.
-        """
-        if base_url is not None:
-            parsed = urlparse(base_url)
-
-            # Ensure scheme and netloc (domain name) are present
-            if not (parsed.scheme and parsed.netloc):
-                expected_format = "Expected format is: http://host:port"
-                raise ValueError(
-                    f"Invalid base_url format. {expected_format} Got: {base_url}"
-                )
-
-            normalized_path = parsed.path.rstrip("/")
-            if not normalized_path.endswith("/v1"):
-                warnings.warn(
-                    f"{base_url} does not end in /v1, you may "
-                    "have inference and listing issues"
-                )
-                normalized_path += "/v1"
-
-                base_url = urlunparse(
-                    (parsed.scheme, parsed.netloc, normalized_path, None, None, None)
-                )
-        if base_url is not None:
-            parsed = urlparse(base_url)
-
-            # Ensure scheme and netloc (domain name) are present
-            if not (parsed.scheme and parsed.netloc):
-                expected_format = "Expected format is: http://host:port"
-                raise ValueError(
-                    f"Invalid base_url format. {expected_format} Got: {base_url}"
-                )
-
-            normalized_path = parsed.path.rstrip("/")
-            if not normalized_path.endswith("/v1"):
-                warnings.warn(
-                    f"{base_url} does not end in /v1, you may "
-                    "have inference and listing issues"
-                )
-                normalized_path += "/v1"
-
-                base_url = urlunparse(
-                    (parsed.scheme, parsed.netloc, normalized_path, None, None, None)
-                )
-        return base_url
-
     def _validate_model(self, model_name: str) -> None:
         """
         Validates compatibility of the hosted model with the client.
diff --git a/llama-index-integrations/postprocessor/llama-index-postprocessor-nvidia-rerank/pyproject.toml b/llama-index-integrations/postprocessor/llama-index-postprocessor-nvidia-rerank/pyproject.toml
index a59369273279524b3dd1cc079ea89d1cb620aab6..f619135f9385c67ebe9cfec44397227abd0cd473 100644
--- a/llama-index-integrations/postprocessor/llama-index-postprocessor-nvidia-rerank/pyproject.toml
+++ b/llama-index-integrations/postprocessor/llama-index-postprocessor-nvidia-rerank/pyproject.toml
@@ -30,7 +30,7 @@ license = "MIT"
 name = "llama-index-postprocessor-nvidia-rerank"
 packages = [{include = "llama_index/"}]
 readme = "README.md"
-version = "0.4.2"
+version = "0.4.3"
 
 [tool.poetry.dependencies]
 python = ">=3.9,<4.0"
diff --git a/llama-index-integrations/postprocessor/llama-index-postprocessor-nvidia-rerank/tests/test_base_url.py b/llama-index-integrations/postprocessor/llama-index-postprocessor-nvidia-rerank/tests/test_base_url.py
index f2108f10bf082033239316346f152499281ac9bd..0e16bf9d931aad75c582ef474672143ef8902a11 100644
--- a/llama-index-integrations/postprocessor/llama-index-postprocessor-nvidia-rerank/tests/test_base_url.py
+++ b/llama-index-integrations/postprocessor/llama-index-postprocessor-nvidia-rerank/tests/test_base_url.py
@@ -31,7 +31,8 @@ def mock_v1_local_models2(respx_mock: respx.MockRouter, base_url: str) -> None:
     )
 
 
-# Updated test for non-hosted URLs that may need normalization.
+# marking as skip because base_url validation is removed
+@pytest.mark.skip(reason="base_url validation is removed")
 @pytest.mark.parametrize(
     "base_url",
     [
@@ -98,6 +99,8 @@ def test_proxy_base_url(base_url: str, mock_v1_local_models2: None) -> None:
     assert client.base_url == base_url
 
 
+# marking as skip because base_url validation is removed
+@pytest.mark.skip(reason="base_url validation is removed")
 @pytest.mark.parametrize(
     "base_url",
     [