diff --git a/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/.gitignore b/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..990c18de229088f55c6c514fd0f2d49981d1b0e7 --- /dev/null +++ b/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/.gitignore @@ -0,0 +1,153 @@ +llama_index/_static +.DS_Store +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +bin/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +etc/ +include/ +lib/ +lib64/ +parts/ +sdist/ +share/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +.ruff_cache + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints +notebooks/ + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ +pyvenv.cfg + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# Jetbrains +.idea +modules/ +*.swp + +# VsCode +.vscode + +# pipenv +Pipfile +Pipfile.lock + +# pyright +pyrightconfig.json diff --git a/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/BUILD b/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..0896ca890d8bffd60a44fa824f8d57fecd73ee53 --- /dev/null +++ b/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/BUILD @@ -0,0 +1,3 @@ +poetry_requirements( + name="poetry", +) diff --git a/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/Makefile b/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..b9eab05aa370629a4a3de75df3ff64cd53887b68 --- /dev/null +++ b/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/Makefile @@ -0,0 +1,17 @@ +GIT_ROOT ?= $(shell git rev-parse --show-toplevel) + +help: ## Show all Makefile targets. + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[33m%-30s\033[0m %s\n", $$1, $$2}' + +format: ## Run code autoformatters (black). + pre-commit install + git ls-files | xargs pre-commit run black --files + +lint: ## Run linters: pre-commit (black, ruff, codespell) and mypy + pre-commit install && git ls-files | xargs pre-commit run --show-diff-on-failure --files + +test: ## Run tests via pytest. + pytest tests + +watch-docs: ## Build and watch documentation. + sphinx-autobuild docs/ docs/_build/html --open-browser --watch $(GIT_ROOT)/llama_index/ diff --git a/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/README.md b/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7df602080b810829474642a1abf0c814b7c3a010 --- /dev/null +++ b/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/README.md @@ -0,0 +1 @@ +# LlamaIndex Postprocessor Integration: Voyageai-Rerank diff --git a/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/llama_index/postprocessor/voyageai_rerank/BUILD b/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/llama_index/postprocessor/voyageai_rerank/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..db46e8d6c978c67e301dd6c47bee08c1b3fd141c --- /dev/null +++ b/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/llama_index/postprocessor/voyageai_rerank/BUILD @@ -0,0 +1 @@ +python_sources() diff --git a/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/llama_index/postprocessor/voyageai_rerank/__init__.py b/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/llama_index/postprocessor/voyageai_rerank/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ec28fc145fd71d517e62d7d65d90c2232cf982b5 --- /dev/null +++ b/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/llama_index/postprocessor/voyageai_rerank/__init__.py @@ -0,0 +1,3 @@ +from llama_index.postprocessor.voyageai_rerank.base import VoyageAIRerank + +__all__ = ["VoyageAIRerank"] diff --git a/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/llama_index/postprocessor/voyageai_rerank/base.py b/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/llama_index/postprocessor/voyageai_rerank/base.py new file mode 100644 index 0000000000000000000000000000000000000000..d802967b8c0ae885ea277562ce31fd36a584c325 --- /dev/null +++ b/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/llama_index/postprocessor/voyageai_rerank/base.py @@ -0,0 +1,77 @@ +from typing import Any, List, Optional + +from llama_index.core.bridge.pydantic import Field, PrivateAttr +from llama_index.core.callbacks import CBEventType, EventPayload +from llama_index.core.postprocessor.types import BaseNodePostprocessor +from llama_index.core.schema import NodeWithScore, QueryBundle + + +class VoyageAIRerank(BaseNodePostprocessor): + model: str = Field(description="Name of the model to use.") + top_k: int = Field( + description="The number of most relevant documents to return. If not specified, the reranking results of all documents will be returned." + ) + truncation: bool = Field( + description="Whether to truncate the input to satisfy the 'context length limit' on the query and the documents." + ) + + _client: Any = PrivateAttr() + + def __init__( + self, + api_key: str, + model: str, + top_k: Optional[int] = None, + truncation: Optional[bool] = None, + ): + try: + from voyageai import Client + except ImportError: + raise ImportError( + "Cannot import voyageai package, please `pip install voyageai`." + ) + + self._client = Client(api_key=api_key) + super().__init__(top_n=top_k, model=model, truncation=truncation) + + @classmethod + def class_name(cls) -> str: + return "VoyageAIRerank" + + def _postprocess_nodes( + self, + nodes: List[NodeWithScore], + query_bundle: Optional[QueryBundle] = None, + ) -> List[NodeWithScore]: + if query_bundle is None: + raise ValueError("Missing query bundle in extra info.") + if len(nodes) == 0: + return [] + + with self.callback_manager.event( + CBEventType.RERANKING, + payload={ + EventPayload.NODES: nodes, + EventPayload.MODEL_NAME: self.model, + EventPayload.QUERY_STR: query_bundle.query_str, + EventPayload.TOP_K: self.top_k, + }, + ) as event: + texts = [node.node.get_content() for node in nodes] + results = self._client.rerank( + model=self.model, + top_k=self.top_k, + query=query_bundle.query_str, + documents=texts, + truncation=self.truncation, + ) + + new_nodes = [] + for result in results: + new_node_with_score = NodeWithScore( + node=nodes[result.index].node, score=result.relevance_score + ) + new_nodes.append(new_node_with_score) + event.on_end(payload={EventPayload.NODES: new_nodes}) + + return new_nodes diff --git a/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/pyproject.toml b/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..3bbb058549d9988dfc26227bdd3a7487d95411d8 --- /dev/null +++ b/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/pyproject.toml @@ -0,0 +1,50 @@ +[build-system] +build-backend = "poetry.core.masonry.api" +requires = ["poetry-core"] + +[tool.codespell] +check-filenames = true +check-hidden = true +# Feel free to un-skip examples, and experimental, you will just need to +# work through many typos (--write-changes and --interactive will help) +skip = "*.csv,*.html,*.json,*.jsonl,*.pdf,*.txt,*.ipynb" + +[tool.mypy] +disallow_untyped_defs = true +# Remove venv skip when integrated with pre-commit +exclude = ["_static", "build", "examples", "notebooks", "venv"] +ignore_missing_imports = true +python_version = "3.8" + +[tool.poetry] +authors = ["Your Name <you@example.com>"] +description = "llama-index postprocessor voyageai-rerank integration" +license = "MIT" +name = "llama-index-postprocessor-voyageai-rerank" +packages = [{include = "llama_index/"}] +readme = "README.md" +version = "0.1.0" + +[tool.poetry.dependencies] +python = ">=3.8.1,<4.0" +llama-index-core = "^0.10.0" +voyageai = "^0.2.1" + +[tool.poetry.group.dev.dependencies] +black = {extras = ["jupyter"], version = "<=23.9.1,>=23.7.0"} +codespell = {extras = ["toml"], version = ">=v2.2.6"} +ipython = "8.10.0" +jupyter = "^1.0.0" +mypy = "0.991" +pre-commit = "3.2.0" +pylint = "2.15.10" +pytest = "7.2.1" +pytest-mock = "3.11.1" +ruff = "0.0.292" +tree-sitter-languages = "^1.8.0" +types-Deprecated = ">=0.1.0" +types-PyYAML = "^6.0.12.12" +types-protobuf = "^4.24.0.4" +types-redis = "4.5.5.0" +types-requests = "2.28.11.8" # TODO: unpin when mypy>0.991 +types-setuptools = "67.1.0.0" diff --git a/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/tests/BUILD b/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/tests/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..dabf212d7e7162849c24a733909ac4f645d75a31 --- /dev/null +++ b/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/tests/BUILD @@ -0,0 +1 @@ +python_tests() diff --git a/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/tests/__init__.py b/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/tests/test_postprocessor_voyageai-rerank.py b/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/tests/test_postprocessor_voyageai-rerank.py new file mode 100644 index 0000000000000000000000000000000000000000..da9f17da9b521b4aa9baa3f9c43a2491e2e1a6d6 --- /dev/null +++ b/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/tests/test_postprocessor_voyageai-rerank.py @@ -0,0 +1,7 @@ +from llama_index.core.postprocessor.types import BaseNodePostprocessor +from llama_index.postprocessor.voyageai_rerank import VoyageAIRerank + + +def test_class(): + names_of_base_classes = [b.__name__ for b in VoyageAIRerank.__mro__] + assert BaseNodePostprocessor.__name__ in names_of_base_classes