diff --git a/docs/docs/examples/node_postprocessor/openvino_rerank.ipynb b/docs/docs/examples/node_postprocessor/openvino_rerank.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..dfe2788946298701726a6a91460d37f02b37f4be
--- /dev/null
+++ b/docs/docs/examples/node_postprocessor/openvino_rerank.ipynb
@@ -0,0 +1,328 @@
+{
+ "cells": [
+  {
+   "attachments": {},
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "<a href=\"https://colab.research.google.com/github/run-llama/llama_index/blob/main/docs/docs/examples/node_postprocessor/openivno_rerank.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# OpenVINO Rerank\n",
+    "\n",
+    "[OpenVINOâ„¢](https://github.com/openvinotoolkit/openvino) is an open-source toolkit for optimizing and deploying AI inference. The OpenVINOâ„¢ Runtime supports various hardware [devices](https://github.com/openvinotoolkit/openvino?tab=readme-ov-file#supported-hardware-matrix) including x86 and ARM CPUs, and Intel GPUs. It can help to boost deep learning performance in Computer Vision, Automatic Speech Recognition, Natural Language Processing and other common tasks.\n",
+    "\n",
+    "Hugging Face rerank model can be supported by OpenVINO through ``OpenVINORerank`` class."
+   ]
+  },
+  {
+   "attachments": {},
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "If you're opening this Notebook on colab, you will probably need to install LlamaIndex 🦙."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%pip install llama-index-postprocessor-openvino-rerank\n",
+    "%pip install llama-index-embeddings-openvino"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!pip install llama-index"
+   ]
+  },
+  {
+   "attachments": {},
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Download Data"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!mkdir -p 'data/paul_graham/'\n",
+    "!wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from llama_index.core import VectorStoreIndex, SimpleDirectoryReader\n",
+    "\n",
+    "# load documents\n",
+    "documents = SimpleDirectoryReader(\"./data/paul_graham/\").load_data()\n",
+    "\n",
+    "# build index\n",
+    "index = VectorStoreIndex.from_documents(documents=documents)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Download Embedding, Rerank models and LLM"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from llama_index.embeddings.huggingface_openvino import OpenVINOEmbedding\n",
+    "\n",
+    "OpenVINOEmbedding.create_and_save_openvino_model(\n",
+    "    \"BAAI/bge-small-en-v1.5\", \"./embedding_ov\"\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from llama_index.postprocessor.openvino_rerank import OpenVINORerank\n",
+    "\n",
+    "OpenVINORerank.create_and_save_openvino_model(\n",
+    "    \"BAAI/bge-reranker-large\", \"./rerank_ov\"\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!optimum-cli export openvino --model HuggingFaceH4/zephyr-7b-beta --weight-format int4 llm_ov"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Download models"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from llama_index.llms.openvino import OpenVINOLLM\n",
+    "from llama_index.core import Settings\n",
+    "\n",
+    "\n",
+    "Settings.embed_model = OpenVINOEmbedding(folder_name=\"./embedding_ov\")\n",
+    "Settings.llm = OpenVINOLLM(model_name=\"./llm_ov\", tokenizer_name=\"./llm_ov\")\n",
+    "ov_rerank = OpenVINORerank(model=\"./rerank_ov\", top_n=2)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Retrieve top 10 most relevant nodes, then filter with OpenVINO Rerank"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import os\n",
+    "from llama_index.postprocessor.openvino_rerank import OpenVINORerank\n",
+    "\n",
+    "\n",
+    "ov_rerank = OpenVINORerank(top_n=2)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "index = VectorStoreIndex.from_documents(documents=documents)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "query_engine = index.as_query_engine(\n",
+    "    similarity_top_k=10,\n",
+    "    node_postprocessors=[ov_rerank],\n",
+    ")\n",
+    "response = query_engine.query(\n",
+    "    \"What did Sam Altman do in this essay?\",\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "\n",
+      "\n",
+      "Sam Altman was asked by the author, Paul Graham, to become the president of Y Combinator (YC), a startup accelerator. Initially, Sam declined the offer as he wanted to start a startup to make nuclear reactors. However, the author continued to persuade him, and in October 2013, Sam agreed to take over YC starting with the winter 2014 batch. The author then stepped back from running YC and focused on other activities, including painting and writing essays.\n"
+     ]
+    }
+   ],
+   "source": [
+    "print(response)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "> Source (Doc id: ae4297fa-670c-403c-a355-6fffe7e16835): Why not organize a summer program where they'd start startups instead? We wouldn't feel guilty for being in a sense fake investors, because they would in a similar sense be fake founders. So while ...\n",
+      "\n",
+      "> Source (Doc id: c55eddb9-33f8-46bb-82a1-cb7fa0c7f5b6): This seemed strange advice, because YC was doing great. But if there was one thing rarer than Rtm offering advice, it was Rtm being wrong. So this set me thinking. It was true that on my current tr...\n"
+     ]
+    }
+   ],
+   "source": [
+    "print(response.get_formatted_sources(length=200))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Directly retrieve top 2 most similar nodes"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "query_engine = index.as_query_engine(\n",
+    "    similarity_top_k=2,\n",
+    ")\n",
+    "response = query_engine.query(\n",
+    "    \"What did Sam Altman do in this essay?\",\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Retrieved context is irrelevant and response is hallucinated."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "\n",
+      "\n",
+      "Sam Altman is mentioned in the essay as the person who was asked to become the president of Y Combinator. He initially declined the offer but later agreed to take over starting with the winter 2014 batch. The author also mentions that they left running Y Combinator more and more to Sam, partly so he could learn the job, and partly because they were focused on their mother, who had cancer and passed away in January 2014.\n"
+     ]
+    }
+   ],
+   "source": [
+    "print(response)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "> Source (Doc id: c55eddb9-33f8-46bb-82a1-cb7fa0c7f5b6): This seemed strange advice, because YC was doing great. But if there was one thing rarer than Rtm offering advice, it was Rtm being wrong. So this set me thinking. It was true that on my current tr...\n",
+      "\n",
+      "> Source (Doc id: 6b2c335f-1390-4e92-9171-3ba5d24b3826): I knew that online essays would be a marginal medium at first. Socially they'd seem more like rants posted by nutjobs on their GeoCities sites than the genteel and beautifully typeset compositions ...\n"
+     ]
+    }
+   ],
+   "source": [
+    "print(response.get_formatted_sources(length=200))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "For more information refer to:\n",
+    "\n",
+    "* [OpenVINO LLM guide](https://docs.openvino.ai/2024/learn-openvino/llm_inference_guide.html).\n",
+    "\n",
+    "* [OpenVINO Documentation](https://docs.openvino.ai/2024/home.html).\n",
+    "\n",
+    "* [OpenVINO Get Started Guide](https://www.intel.com/content/www/us/en/content-details/819067/openvino-get-started-guide.html)."
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/BUILD b/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/BUILD
new file mode 100644
index 0000000000000000000000000000000000000000..0896ca890d8bffd60a44fa824f8d57fecd73ee53
--- /dev/null
+++ b/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/BUILD
@@ -0,0 +1,3 @@
+poetry_requirements(
+    name="poetry",
+)
diff --git a/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/Makefile b/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..b9eab05aa370629a4a3de75df3ff64cd53887b68
--- /dev/null
+++ b/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/Makefile
@@ -0,0 +1,17 @@
+GIT_ROOT ?= $(shell git rev-parse --show-toplevel)
+
+help:	## Show all Makefile targets.
+	@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[33m%-30s\033[0m %s\n", $$1, $$2}'
+
+format:	## Run code autoformatters (black).
+	pre-commit install
+	git ls-files | xargs pre-commit run black --files
+
+lint:	## Run linters: pre-commit (black, ruff, codespell) and mypy
+	pre-commit install && git ls-files | xargs pre-commit run --show-diff-on-failure --files
+
+test:	## Run tests via pytest.
+	pytest tests
+
+watch-docs:	## Build and watch documentation.
+	sphinx-autobuild docs/ docs/_build/html --open-browser --watch $(GIT_ROOT)/llama_index/
diff --git a/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/README.md b/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..962c158bcbb4237bcf7cdfce22253cb11942d06a
--- /dev/null
+++ b/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/README.md
@@ -0,0 +1 @@
+# LlamaIndex Postprocessor Integration: OpenVINO Rerank
diff --git a/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/llama_index/postprocessor/openvino_rerank/BUILD b/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/llama_index/postprocessor/openvino_rerank/BUILD
new file mode 100644
index 0000000000000000000000000000000000000000..db46e8d6c978c67e301dd6c47bee08c1b3fd141c
--- /dev/null
+++ b/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/llama_index/postprocessor/openvino_rerank/BUILD
@@ -0,0 +1 @@
+python_sources()
diff --git a/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/llama_index/postprocessor/openvino_rerank/__init__.py b/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/llama_index/postprocessor/openvino_rerank/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9fe09f19dcfb775a71504751ed9480c12f5ff511
--- /dev/null
+++ b/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/llama_index/postprocessor/openvino_rerank/__init__.py
@@ -0,0 +1,3 @@
+from llama_index.postprocessor.openvino_rerank.base import OpenVINORerank
+
+__all__ = ["OpenVINORerank"]
diff --git a/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/llama_index/postprocessor/openvino_rerank/base.py b/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/llama_index/postprocessor/openvino_rerank/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..b1ce378e5b297dcde6b38755715c8f9040643922
--- /dev/null
+++ b/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/llama_index/postprocessor/openvino_rerank/base.py
@@ -0,0 +1,175 @@
+from typing import Any, List, Optional, Dict
+from pathlib import Path
+import numpy as np
+
+from llama_index.core.bridge.pydantic import Field, PrivateAttr
+from llama_index.core.callbacks import CBEventType, EventPayload
+from llama_index.core.postprocessor.types import BaseNodePostprocessor
+from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle
+from llama_index.core.utils import infer_torch_device
+
+from transformers import AutoTokenizer
+from optimum.intel.openvino import OVModelForSequenceClassification
+
+DEFAULT_COLBERT_MAX_LENGTH = 512
+
+
+class OpenVINORerank(BaseNodePostprocessor):
+    model: str = Field(description="Huggingface model id or local path.")
+    top_n: int = Field(description="Number of nodes to return sorted by score.")
+    keep_retrieval_score: bool = Field(
+        default=False,
+        description="Whether to keep the retrieval score in metadata.",
+    )
+    _model: Any = PrivateAttr()
+    _tokenizer: Any = PrivateAttr()
+
+    def __init__(
+        self,
+        top_n: int = 3,
+        model: str = "BAAI/bge-reranker-large",
+        tokenizer: str = "BAAI/bge-reranker-large",
+        device: Optional[str] = "auto",
+        model_kwargs: Dict[str, Any] = {},
+        keep_retrieval_score: Optional[bool] = False,
+    ):
+        device = infer_torch_device() if device is None else device
+
+        try:
+            from huggingface_hub import HfApi
+        except ImportError as e:
+            raise ValueError(
+                "Could not import huggingface_hub python package. "
+                "Please install it with: "
+                "`pip install -U huggingface_hub`."
+            ) from e
+
+        def require_model_export(
+            model_id: str, revision: Any = None, subfolder: Any = None
+        ) -> bool:
+            model_dir = Path(model_id)
+            if subfolder is not None:
+                model_dir = model_dir / subfolder
+            if model_dir.is_dir():
+                return (
+                    not (model_dir / "openvino_model.xml").exists()
+                    or not (model_dir / "openvino_model.bin").exists()
+                )
+            hf_api = HfApi()
+            try:
+                model_info = hf_api.model_info(model_id, revision=revision or "main")
+                normalized_subfolder = (
+                    None if subfolder is None else Path(subfolder).as_posix()
+                )
+                model_files = [
+                    file.rfilename
+                    for file in model_info.siblings
+                    if normalized_subfolder is None
+                    or file.rfilename.startswith(normalized_subfolder)
+                ]
+                ov_model_path = (
+                    "openvino_model.xml"
+                    if subfolder is None
+                    else f"{normalized_subfolder}/openvino_model.xml"
+                )
+                return (
+                    ov_model_path not in model_files
+                    or ov_model_path.replace(".xml", ".bin") not in model_files
+                )
+            except Exception:
+                return True
+
+        if require_model_export(model):
+            # use remote model
+            self._model = OVModelForSequenceClassification.from_pretrained(
+                model, export=True, device=device, **model_kwargs
+            )
+        else:
+            # use local model
+            self._model = OVModelForSequenceClassification.from_pretrained(
+                model, device=device, **model_kwargs
+            )
+
+        self._tokenizer = AutoTokenizer.from_pretrained(tokenizer)
+        super().__init__(
+            top_n=top_n,
+            model=model,
+            device=device,
+            tokenizer=tokenizer,
+            keep_retrieval_score=keep_retrieval_score,
+        )
+
+    @classmethod
+    def class_name(cls) -> str:
+        return "OpenVINORerank"
+
+    @staticmethod
+    def create_and_save_openvino_model(
+        model_name_or_path: str,
+        output_path: str,
+        export_kwargs: Optional[dict] = None,
+    ) -> None:
+        export_kwargs = export_kwargs or {}
+        model = OVModelForSequenceClassification.from_pretrained(
+            model_name_or_path, export=True, compile=False, **export_kwargs
+        )
+        tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
+
+        model.save_pretrained(output_path)
+        tokenizer.save_pretrained(output_path)
+        print(
+            f"Saved OpenVINO model to {output_path}. Use it with "
+            f"`embed_model = OpenVINORerank(model='{output_path}')`."
+        )
+
+    def _postprocess_nodes(
+        self,
+        nodes: List[NodeWithScore],
+        query_bundle: Optional[QueryBundle] = None,
+    ) -> List[NodeWithScore]:
+        if query_bundle is None:
+            raise ValueError("Missing query bundle in extra info.")
+        if len(nodes) == 0:
+            return []
+
+        nodes_text_list = [
+            str(node.node.get_content(metadata_mode=MetadataMode.EMBED))
+            for node in nodes
+        ]
+
+        with self.callback_manager.event(
+            CBEventType.RERANKING,
+            payload={
+                EventPayload.NODES: nodes,
+                EventPayload.MODEL_NAME: self.model,
+                EventPayload.QUERY_STR: query_bundle.query_str,
+                EventPayload.TOP_K: self.top_n,
+            },
+        ) as event:
+            query_pairs = [[query_bundle.query_str, text] for text in nodes_text_list]
+            input_tensors = self._tokenizer(
+                query_pairs, padding=True, truncation=True, return_tensors="pt"
+            )
+
+            outputs = self._model(**input_tensors, return_dict=True)
+            if outputs[0].shape[1] > 1:
+                scores = outputs[0][:, 1]
+            else:
+                scores = outputs[0].flatten()
+
+            scores = list(1 / (1 + np.exp(-scores)))
+
+            assert len(scores) == len(nodes)
+
+            for node, score in zip(nodes, scores):
+                if self.keep_retrieval_score:
+                    # keep the retrieval score in metadata
+                    node.node.metadata["retrieval_score"] = node.score
+                node.score = float(score)
+
+            reranked_nodes = sorted(nodes, key=lambda x: -x.score if x.score else 0)[
+                : self.top_n
+            ]
+            event.on_end(payload={EventPayload.NODES: reranked_nodes})
+
+        return reranked_nodes
diff --git a/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/pyproject.toml b/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..b029fe8a617d4a363e287b38350ba8d0ee4fb140
--- /dev/null
+++ b/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/pyproject.toml
@@ -0,0 +1,67 @@
+[build-system]
+build-backend = "poetry.core.masonry.api"
+requires = ["poetry-core"]
+
+[tool.codespell]
+check-filenames = true
+check-hidden = true
+skip = "*.csv,*.html,*.json,*.jsonl,*.pdf,*.txt,*.ipynb"
+
+[tool.llamahub]
+contains_example = false
+import_path = "llama_index.postprocessor.openvino_rerank"
+
+[tool.llamahub.class_authors]
+OpenVINORerank = "llama-index"
+
+[tool.mypy]
+disallow_untyped_defs = true
+exclude = ["_static", "build", "examples", "notebooks", "venv"]
+ignore_missing_imports = true
+python_version = "3.9"
+
+[tool.poetry]
+authors = ["Your Name <you@example.com>"]
+description = "llama-index postprocessor openvino rerank integration"
+exclude = ["**/BUILD"]
+license = "MIT"
+name = "llama-index-postprocessor-openvino-rerank"
+readme = "README.md"
+version = "0.1.2"
+
+[tool.poetry.dependencies]
+python = ">=3.8.1,<4.0"
+llama-index-core = "^0.10.1"
+huggingface-hub = "^0.20.3"
+
+[tool.poetry.dependencies.optimum]
+extras = ["openvino"]
+version = "^1.18.0"
+
+[tool.poetry.group.dev.dependencies]
+ipython = "8.10.0"
+jupyter = "^1.0.0"
+mypy = "0.991"
+pre-commit = "3.2.0"
+pylint = "2.15.10"
+pytest = "7.2.1"
+pytest-mock = "3.11.1"
+ruff = "0.0.292"
+tree-sitter-languages = "^1.8.0"
+types-Deprecated = ">=0.1.0"
+types-PyYAML = "^6.0.12.12"
+types-protobuf = "^4.24.0.4"
+types-redis = "4.5.5.0"
+types-requests = "2.28.11.8"
+types-setuptools = "67.1.0.0"
+
+[tool.poetry.group.dev.dependencies.black]
+extras = ["jupyter"]
+version = "<=23.9.1,>=23.7.0"
+
+[tool.poetry.group.dev.dependencies.codespell]
+extras = ["toml"]
+version = ">=v2.2.6"
+
+[[tool.poetry.packages]]
+include = "llama_index/"
diff --git a/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/tests/BUILD b/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/tests/BUILD
new file mode 100644
index 0000000000000000000000000000000000000000..dabf212d7e7162849c24a733909ac4f645d75a31
--- /dev/null
+++ b/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/tests/BUILD
@@ -0,0 +1 @@
+python_tests()
diff --git a/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/tests/__init__.py b/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/tests/test_postprocessor_openvino_rerank.py b/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/tests/test_postprocessor_openvino_rerank.py
new file mode 100644
index 0000000000000000000000000000000000000000..da976516bdae51911c621c96721ecb66eb874425
--- /dev/null
+++ b/llama-index-integrations/postprocessor/llama-index-postprocessor-openvino-rerank/tests/test_postprocessor_openvino_rerank.py
@@ -0,0 +1,7 @@
+from llama_index.core.postprocessor.types import BaseNodePostprocessor
+from llama_index.postprocessor.openvino_rerank import OpenVINORerank
+
+
+def test_class():
+    names_of_base_classes = [b.__name__ for b in OpenVINORerank.__mro__]
+    assert BaseNodePostprocessor.__name__ in names_of_base_classes