diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
new file mode 100644
index 0000000000000000000000000000000000000000..09dc40d305813f3444d710896bd0ca4af484d6bf
--- /dev/null
+++ b/.github/workflows/test.yml
@@ -0,0 +1,41 @@
+name: Test
+
+on:
+  push:
+    branches:
+      - main
+  pull_request:
+
+env:
+  POETRY_VERSION: "1.5.1"
+
+jobs:
+  build:
+    runs-on: ubuntu-latest
+    strategy:
+      matrix:
+        python-version:
+          - "3.11"
+    steps:
+      - uses: actions/checkout@v4
+      - name: Cache Poetry
+        uses: actions/cache@v3
+        with:
+          path: ~/.poetry
+          key: ${{ runner.os }}-poetry-${{ hashFiles('**/poetry.lock') }}
+          restore-keys: |
+            ${{ runner.os }}-poetry-
+      - name: Install poetry
+        run: |
+          pipx install poetry==$POETRY_VERSION
+      - name: Set up Python ${{ matrix.python-version }}
+        uses: actions/setup-python@v4
+        with:
+          python-version: ${{ matrix.python-version }}
+          cache: poetry
+      - name: Install dependencies
+        run: |
+          poetry install
+      - name: Pytest
+        run: |
+          make test
diff --git a/.gitignore b/.gitignore
index df57182d1a723b6eef2b8c4855918fb7a0907bd4..5e807c4d1ed56d35548cc859123a3cc2666acbcb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -12,3 +12,6 @@ venv/
 .env*.local
 .env
 mac.env
+
+# Code coverage history
+.coverage
diff --git a/.python-version b/.python-version
new file mode 100644
index 0000000000000000000000000000000000000000..2c0733315e415bfb5e5b353f9996ecd964d395b2
--- /dev/null
+++ b/.python-version
@@ -0,0 +1 @@
+3.11
diff --git a/Makefile b/Makefile
index 6fcc35a18cc71e9e22b0b8793f1789aa589a7d0c..372221c63b5f6ba50b28a1c171645b44d4e94f0e 100644
--- a/Makefile
+++ b/Makefile
@@ -9,3 +9,6 @@ lint_diff: PYTHON_FILES=$(shell git diff --name-only --diff-filter=d main | grep
 lint lint_diff:
 	poetry run black $(PYTHON_FILES) --check
 	poetry run ruff .
+
+test:
+	poetry run pytest -vv --cov=semantic_router --cov-report=term-missing --cov-fail-under=100
diff --git a/poetry.lock b/poetry.lock
index a2617c1c1dd5aaacf48d1efc23c5412f7af5f947..c748530efa9170898e70117fdb6b63fec96103cd 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand.
 
 [[package]]
 name = "aiohttp"
@@ -468,6 +468,73 @@ traitlets = ">=4"
 [package.extras]
 test = ["pytest"]
 
+[[package]]
+name = "coverage"
+version = "7.3.2"
+description = "Code coverage measurement for Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+    {file = "coverage-7.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d872145f3a3231a5f20fd48500274d7df222e291d90baa2026cc5152b7ce86bf"},
+    {file = "coverage-7.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:310b3bb9c91ea66d59c53fa4989f57d2436e08f18fb2f421a1b0b6b8cc7fffda"},
+    {file = "coverage-7.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f47d39359e2c3779c5331fc740cf4bce6d9d680a7b4b4ead97056a0ae07cb49a"},
+    {file = "coverage-7.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aa72dbaf2c2068404b9870d93436e6d23addd8bbe9295f49cbca83f6e278179c"},
+    {file = "coverage-7.3.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:beaa5c1b4777f03fc63dfd2a6bd820f73f036bfb10e925fce067b00a340d0f3f"},
+    {file = "coverage-7.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:dbc1b46b92186cc8074fee9d9fbb97a9dd06c6cbbef391c2f59d80eabdf0faa6"},
+    {file = "coverage-7.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:315a989e861031334d7bee1f9113c8770472db2ac484e5b8c3173428360a9148"},
+    {file = "coverage-7.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d1bc430677773397f64a5c88cb522ea43175ff16f8bfcc89d467d974cb2274f9"},
+    {file = "coverage-7.3.2-cp310-cp310-win32.whl", hash = "sha256:a889ae02f43aa45032afe364c8ae84ad3c54828c2faa44f3bfcafecb5c96b02f"},
+    {file = "coverage-7.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:c0ba320de3fb8c6ec16e0be17ee1d3d69adcda99406c43c0409cb5c41788a611"},
+    {file = "coverage-7.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ac8c802fa29843a72d32ec56d0ca792ad15a302b28ca6203389afe21f8fa062c"},
+    {file = "coverage-7.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:89a937174104339e3a3ffcf9f446c00e3a806c28b1841c63edb2b369310fd074"},
+    {file = "coverage-7.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e267e9e2b574a176ddb983399dec325a80dbe161f1a32715c780b5d14b5f583a"},
+    {file = "coverage-7.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2443cbda35df0d35dcfb9bf8f3c02c57c1d6111169e3c85fc1fcc05e0c9f39a3"},
+    {file = "coverage-7.3.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4175e10cc8dda0265653e8714b3174430b07c1dca8957f4966cbd6c2b1b8065a"},
+    {file = "coverage-7.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0cbf38419fb1a347aaf63481c00f0bdc86889d9fbf3f25109cf96c26b403fda1"},
+    {file = "coverage-7.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:5c913b556a116b8d5f6ef834038ba983834d887d82187c8f73dec21049abd65c"},
+    {file = "coverage-7.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1981f785239e4e39e6444c63a98da3a1db8e971cb9ceb50a945ba6296b43f312"},
+    {file = "coverage-7.3.2-cp311-cp311-win32.whl", hash = "sha256:43668cabd5ca8258f5954f27a3aaf78757e6acf13c17604d89648ecc0cc66640"},
+    {file = "coverage-7.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10c39c0452bf6e694511c901426d6b5ac005acc0f78ff265dbe36bf81f808a2"},
+    {file = "coverage-7.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:4cbae1051ab791debecc4a5dcc4a1ff45fc27b91b9aee165c8a27514dd160836"},
+    {file = "coverage-7.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:12d15ab5833a997716d76f2ac1e4b4d536814fc213c85ca72756c19e5a6b3d63"},
+    {file = "coverage-7.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c7bba973ebee5e56fe9251300c00f1579652587a9f4a5ed8404b15a0471f216"},
+    {file = "coverage-7.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe494faa90ce6381770746077243231e0b83ff3f17069d748f645617cefe19d4"},
+    {file = "coverage-7.3.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6e9589bd04d0461a417562649522575d8752904d35c12907d8c9dfeba588faf"},
+    {file = "coverage-7.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d51ac2a26f71da1b57f2dc81d0e108b6ab177e7d30e774db90675467c847bbdf"},
+    {file = "coverage-7.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:99b89d9f76070237975b315b3d5f4d6956ae354a4c92ac2388a5695516e47c84"},
+    {file = "coverage-7.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fa28e909776dc69efb6ed975a63691bc8172b64ff357e663a1bb06ff3c9b589a"},
+    {file = "coverage-7.3.2-cp312-cp312-win32.whl", hash = "sha256:289fe43bf45a575e3ab10b26d7b6f2ddb9ee2dba447499f5401cfb5ecb8196bb"},
+    {file = "coverage-7.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:7dbc3ed60e8659bc59b6b304b43ff9c3ed858da2839c78b804973f613d3e92ed"},
+    {file = "coverage-7.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f94b734214ea6a36fe16e96a70d941af80ff3bfd716c141300d95ebc85339738"},
+    {file = "coverage-7.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:af3d828d2c1cbae52d34bdbb22fcd94d1ce715d95f1a012354a75e5913f1bda2"},
+    {file = "coverage-7.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:630b13e3036e13c7adc480ca42fa7afc2a5d938081d28e20903cf7fd687872e2"},
+    {file = "coverage-7.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9eacf273e885b02a0273bb3a2170f30e2d53a6d53b72dbe02d6701b5296101c"},
+    {file = "coverage-7.3.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8f17966e861ff97305e0801134e69db33b143bbfb36436efb9cfff6ec7b2fd9"},
+    {file = "coverage-7.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b4275802d16882cf9c8b3d057a0839acb07ee9379fa2749eca54efbce1535b82"},
+    {file = "coverage-7.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:72c0cfa5250f483181e677ebc97133ea1ab3eb68645e494775deb6a7f6f83901"},
+    {file = "coverage-7.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:cb536f0dcd14149425996821a168f6e269d7dcd2c273a8bff8201e79f5104e76"},
+    {file = "coverage-7.3.2-cp38-cp38-win32.whl", hash = "sha256:307adb8bd3abe389a471e649038a71b4eb13bfd6b7dd9a129fa856f5c695cf92"},
+    {file = "coverage-7.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:88ed2c30a49ea81ea3b7f172e0269c182a44c236eb394718f976239892c0a27a"},
+    {file = "coverage-7.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b631c92dfe601adf8f5ebc7fc13ced6bb6e9609b19d9a8cd59fa47c4186ad1ce"},
+    {file = "coverage-7.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d3d9df4051c4a7d13036524b66ecf7a7537d14c18a384043f30a303b146164e9"},
+    {file = "coverage-7.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f7363d3b6a1119ef05015959ca24a9afc0ea8a02c687fe7e2d557705375c01f"},
+    {file = "coverage-7.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f11cc3c967a09d3695d2a6f03fb3e6236622b93be7a4b5dc09166a861be6d25"},
+    {file = "coverage-7.3.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:149de1d2401ae4655c436a3dced6dd153f4c3309f599c3d4bd97ab172eaf02d9"},
+    {file = "coverage-7.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3a4006916aa6fee7cd38db3bfc95aa9c54ebb4ffbfc47c677c8bba949ceba0a6"},
+    {file = "coverage-7.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9028a3871280110d6e1aa2df1afd5ef003bab5fb1ef421d6dc748ae1c8ef2ebc"},
+    {file = "coverage-7.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9f805d62aec8eb92bab5b61c0f07329275b6f41c97d80e847b03eb894f38d083"},
+    {file = "coverage-7.3.2-cp39-cp39-win32.whl", hash = "sha256:d1c88ec1a7ff4ebca0219f5b1ef863451d828cccf889c173e1253aa84b1e07ce"},
+    {file = "coverage-7.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b4767da59464bb593c07afceaddea61b154136300881844768037fd5e859353f"},
+    {file = "coverage-7.3.2-pp38.pp39.pp310-none-any.whl", hash = "sha256:ae97af89f0fbf373400970c0a21eef5aa941ffeed90aee43650b81f7d7f47637"},
+    {file = "coverage-7.3.2.tar.gz", hash = "sha256:be32ad29341b0170e795ca590e1c07e81fc061cb5b10c74ce7203491484404ef"},
+]
+
+[package.dependencies]
+tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""}
+
+[package.extras]
+toml = ["tomli"]
+
 [[package]]
 name = "debugpy"
 version = "1.8.0"
@@ -674,6 +741,17 @@ docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker
 perf = ["ipython"]
 testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"]
 
+[[package]]
+name = "iniconfig"
+version = "2.0.0"
+description = "brain-dead simple config-ini parsing"
+optional = false
+python-versions = ">=3.7"
+files = [
+    {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
+    {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
+]
+
 [[package]]
 name = "ipykernel"
 version = "6.26.0"
@@ -924,6 +1002,51 @@ files = [
     {file = "nest_asyncio-1.5.8.tar.gz", hash = "sha256:25aa2ca0d2a5b5531956b9e273b45cf664cae2b145101d73b86b199978d48fdb"},
 ]
 
+[[package]]
+name = "numpy"
+version = "1.26.2"
+description = "Fundamental package for array computing in Python"
+optional = false
+python-versions = ">=3.9"
+files = [
+    {file = "numpy-1.26.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3703fc9258a4a122d17043e57b35e5ef1c5a5837c3db8be396c82e04c1cf9b0f"},
+    {file = "numpy-1.26.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cc392fdcbd21d4be6ae1bb4475a03ce3b025cd49a9be5345d76d7585aea69440"},
+    {file = "numpy-1.26.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36340109af8da8805d8851ef1d74761b3b88e81a9bd80b290bbfed61bd2b4f75"},
+    {file = "numpy-1.26.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcc008217145b3d77abd3e4d5ef586e3bdfba8fe17940769f8aa09b99e856c00"},
+    {file = "numpy-1.26.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3ced40d4e9e18242f70dd02d739e44698df3dcb010d31f495ff00a31ef6014fe"},
+    {file = "numpy-1.26.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b272d4cecc32c9e19911891446b72e986157e6a1809b7b56518b4f3755267523"},
+    {file = "numpy-1.26.2-cp310-cp310-win32.whl", hash = "sha256:22f8fc02fdbc829e7a8c578dd8d2e15a9074b630d4da29cda483337e300e3ee9"},
+    {file = "numpy-1.26.2-cp310-cp310-win_amd64.whl", hash = "sha256:26c9d33f8e8b846d5a65dd068c14e04018d05533b348d9eaeef6c1bd787f9919"},
+    {file = "numpy-1.26.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b96e7b9c624ef3ae2ae0e04fa9b460f6b9f17ad8b4bec6d7756510f1f6c0c841"},
+    {file = "numpy-1.26.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:aa18428111fb9a591d7a9cc1b48150097ba6a7e8299fb56bdf574df650e7d1f1"},
+    {file = "numpy-1.26.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06fa1ed84aa60ea6ef9f91ba57b5ed963c3729534e6e54055fc151fad0423f0a"},
+    {file = "numpy-1.26.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96ca5482c3dbdd051bcd1fce8034603d6ebfc125a7bd59f55b40d8f5d246832b"},
+    {file = "numpy-1.26.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:854ab91a2906ef29dc3925a064fcd365c7b4da743f84b123002f6139bcb3f8a7"},
+    {file = "numpy-1.26.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f43740ab089277d403aa07567be138fc2a89d4d9892d113b76153e0e412409f8"},
+    {file = "numpy-1.26.2-cp311-cp311-win32.whl", hash = "sha256:a2bbc29fcb1771cd7b7425f98b05307776a6baf43035d3b80c4b0f29e9545186"},
+    {file = "numpy-1.26.2-cp311-cp311-win_amd64.whl", hash = "sha256:2b3fca8a5b00184828d12b073af4d0fc5fdd94b1632c2477526f6bd7842d700d"},
+    {file = "numpy-1.26.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a4cd6ed4a339c21f1d1b0fdf13426cb3b284555c27ac2f156dfdaaa7e16bfab0"},
+    {file = "numpy-1.26.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5d5244aabd6ed7f312268b9247be47343a654ebea52a60f002dc70c769048e75"},
+    {file = "numpy-1.26.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a3cdb4d9c70e6b8c0814239ead47da00934666f668426fc6e94cce869e13fd7"},
+    {file = "numpy-1.26.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa317b2325f7aa0a9471663e6093c210cb2ae9c0ad824732b307d2c51983d5b6"},
+    {file = "numpy-1.26.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:174a8880739c16c925799c018f3f55b8130c1f7c8e75ab0a6fa9d41cab092fd6"},
+    {file = "numpy-1.26.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f79b231bf5c16b1f39c7f4875e1ded36abee1591e98742b05d8a0fb55d8a3eec"},
+    {file = "numpy-1.26.2-cp312-cp312-win32.whl", hash = "sha256:4a06263321dfd3598cacb252f51e521a8cb4b6df471bb12a7ee5cbab20ea9167"},
+    {file = "numpy-1.26.2-cp312-cp312-win_amd64.whl", hash = "sha256:b04f5dc6b3efdaab541f7857351aac359e6ae3c126e2edb376929bd3b7f92d7e"},
+    {file = "numpy-1.26.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4eb8df4bf8d3d90d091e0146f6c28492b0be84da3e409ebef54349f71ed271ef"},
+    {file = "numpy-1.26.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1a13860fdcd95de7cf58bd6f8bc5a5ef81c0b0625eb2c9a783948847abbef2c2"},
+    {file = "numpy-1.26.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64308ebc366a8ed63fd0bf426b6a9468060962f1a4339ab1074c228fa6ade8e3"},
+    {file = "numpy-1.26.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baf8aab04a2c0e859da118f0b38617e5ee65d75b83795055fb66c0d5e9e9b818"},
+    {file = "numpy-1.26.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d73a3abcac238250091b11caef9ad12413dab01669511779bc9b29261dd50210"},
+    {file = "numpy-1.26.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b361d369fc7e5e1714cf827b731ca32bff8d411212fccd29ad98ad622449cc36"},
+    {file = "numpy-1.26.2-cp39-cp39-win32.whl", hash = "sha256:bd3f0091e845164a20bd5a326860c840fe2af79fa12e0469a12768a3ec578d80"},
+    {file = "numpy-1.26.2-cp39-cp39-win_amd64.whl", hash = "sha256:2beef57fb031dcc0dc8fa4fe297a742027b954949cabb52a2a376c144e5e6060"},
+    {file = "numpy-1.26.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1cc3d5029a30fb5f06704ad6b23b35e11309491c999838c31f124fee32107c79"},
+    {file = "numpy-1.26.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94cc3c222bb9fb5a12e334d0479b97bb2df446fbe622b470928f5284ffca3f8d"},
+    {file = "numpy-1.26.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fe6b44fb8fcdf7eda4ef4461b97b3f63c466b27ab151bec2366db8b197387841"},
+    {file = "numpy-1.26.2.tar.gz", hash = "sha256:f65738447676ab5777f11e6bbbdb8ce11b785e105f690bc45966574816b6d3ea"},
+]
+
 [[package]]
 name = "openai"
 version = "0.28.1"
@@ -1012,6 +1135,21 @@ files = [
 docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"]
 test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"]
 
+[[package]]
+name = "pluggy"
+version = "1.3.0"
+description = "plugin and hook calling mechanisms for python"
+optional = false
+python-versions = ">=3.8"
+files = [
+    {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"},
+    {file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"},
+]
+
+[package.extras]
+dev = ["pre-commit", "tox"]
+testing = ["pytest", "pytest-benchmark"]
+
 [[package]]
 name = "prompt-toolkit"
 version = "3.0.41"
@@ -1156,6 +1294,63 @@ files = [
 [package.extras]
 plugins = ["importlib-metadata"]
 
+[[package]]
+name = "pytest"
+version = "7.4.3"
+description = "pytest: simple powerful testing with Python"
+optional = false
+python-versions = ">=3.7"
+files = [
+    {file = "pytest-7.4.3-py3-none-any.whl", hash = "sha256:0d009c083ea859a71b76adf7c1d502e4bc170b80a8ef002da5806527b9591fac"},
+    {file = "pytest-7.4.3.tar.gz", hash = "sha256:d989d136982de4e3b29dabcc838ad581c64e8ed52c11fbe86ddebd9da0818cd5"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "sys_platform == \"win32\""}
+exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""}
+iniconfig = "*"
+packaging = "*"
+pluggy = ">=0.12,<2.0"
+tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""}
+
+[package.extras]
+testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
+
+[[package]]
+name = "pytest-cov"
+version = "4.1.0"
+description = "Pytest plugin for measuring coverage."
+optional = false
+python-versions = ">=3.7"
+files = [
+    {file = "pytest-cov-4.1.0.tar.gz", hash = "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6"},
+    {file = "pytest_cov-4.1.0-py3-none-any.whl", hash = "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a"},
+]
+
+[package.dependencies]
+coverage = {version = ">=5.2.1", extras = ["toml"]}
+pytest = ">=4.6"
+
+[package.extras]
+testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"]
+
+[[package]]
+name = "pytest-mock"
+version = "3.12.0"
+description = "Thin-wrapper around the mock package for easier use with pytest"
+optional = false
+python-versions = ">=3.8"
+files = [
+    {file = "pytest-mock-3.12.0.tar.gz", hash = "sha256:31a40f038c22cad32287bb43932054451ff5583ff094bca6f675df2f8bc1a6e9"},
+    {file = "pytest_mock-3.12.0-py3-none-any.whl", hash = "sha256:0972719a7263072da3a21c7f4773069bcc7486027d7e8e1f81d98a47e701bc4f"},
+]
+
+[package.dependencies]
+pytest = ">=5.0"
+
+[package.extras]
+dev = ["pre-commit", "pytest-asyncio", "tox"]
+
 [[package]]
 name = "python-dateutil"
 version = "2.8.2"
@@ -1584,4 +1779,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p
 [metadata]
 lock-version = "2.0"
 python-versions = "^3.10"
-content-hash = "7955e07ea098c2e8b29421733eb5ec6c06cbbc5bf64bd88451baa1a42c71e6b2"
+content-hash = "c0b2fcab1a4add0d43415d41359db59dd15ce87f14f5227e8f0cfb956a679dcd"
diff --git a/pyproject.toml b/pyproject.toml
index cb351d3f56c518dec9a3dfc9c7c0b3426a7f61ed..ce9aa8f0c01ecff5159cc03ac288995693e13b22 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -5,7 +5,8 @@ description = "Super fast semantic router for AI decision making"
 authors = [
     "James Briggs <james@aurelio.ai>",
     "Siraj Aizlewood <siraj@aurelio.ai>",
-    "Simonas Jakubonis <simonas@aurelio.ai>"
+    "Simonas Jakubonis <simonas@aurelio.ai>",
+    "Bogdan Buduroiu <bogdan@aurelio.ai>"
 ]
 readme = "README.md"
 
@@ -14,12 +15,16 @@ python = "^3.10"
 pydantic = "^1.8.2"
 openai = "^0.28.1"
 cohere = "^4.32"
+numpy = "^1.26.2"
 
 
 [tool.poetry.group.dev.dependencies]
 ipykernel = "^6.26.0"
 ruff = "^0.1.5"
 black = "^23.11.0"
+pytest = "^7.4.3"
+pytest-mock = "^3.12.0"
+pytest-cov = "^4.1.0"
 
 [build-system]
 requires = ["poetry-core"]
diff --git a/semantic_router/schema.py b/semantic_router/schema.py
index d13975c080b237075c6b13c952323f4c98650e77..b0de34d01da3fe17fcecab900accbbf5daba3c45 100644
--- a/semantic_router/schema.py
+++ b/semantic_router/schema.py
@@ -19,6 +19,7 @@ class Decision(BaseModel):
 class EncoderType(Enum):
     OPENAI = "openai"
     COHERE = "cohere"
+    HUGGINGFACE = "huggingface"
 
 
 @dataclass
diff --git a/tests/encoders/test_base.py b/tests/encoders/test_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..d2c39645d6bb6091b2d17c21cad5bee0aafe5079
--- /dev/null
+++ b/tests/encoders/test_base.py
@@ -0,0 +1,16 @@
+import pytest
+
+from semantic_router.encoders import BaseEncoder
+
+
+class TestBaseEncoder:
+    @pytest.fixture
+    def base_encoder(self):
+        return BaseEncoder(name="TestEncoder")
+
+    def test_base_encoder_initialization(self, base_encoder):
+        assert base_encoder.name == "TestEncoder", "Initialization of name failed"
+
+    def test_base_encoder_call_method_not_implemented(self, base_encoder):
+        with pytest.raises(NotImplementedError):
+            base_encoder(["some", "texts"])
diff --git a/tests/encoders/test_cohere.py b/tests/encoders/test_cohere.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f7ddf281244132c130e027fedc99e00829770de
--- /dev/null
+++ b/tests/encoders/test_cohere.py
@@ -0,0 +1,41 @@
+import pytest
+
+from semantic_router.encoders import CohereEncoder
+
+
+@pytest.fixture
+def cohere_encoder(mocker):
+    mocker.patch("cohere.Client")
+    return CohereEncoder(cohere_api_key="test_api_key")
+
+
+class TestCohereEncoder:
+    def test_initialization_with_api_key(self, cohere_encoder):
+        assert cohere_encoder.client is not None, "Client should be initialized"
+        assert (
+            cohere_encoder.name == "embed-english-v3.0"
+        ), "Default name not set correctly"
+
+    def test_initialization_without_api_key(self, mocker, monkeypatch):
+        monkeypatch.delenv("COHERE_API_KEY", raising=False)
+        mocker.patch("cohere.Client")
+        with pytest.raises(ValueError):
+            CohereEncoder()
+
+    def test_call_method(self, cohere_encoder, mocker):
+        mock_embed = mocker.MagicMock()
+        mock_embed.embeddings = [[0.1, 0.2, 0.3]]
+        cohere_encoder.client.embed.return_value = mock_embed
+
+        result = cohere_encoder(["test"])
+        assert isinstance(result, list), "Result should be a list"
+        assert all(
+            isinstance(sublist, list) for sublist in result
+        ), "Each item in result should be a list"
+        cohere_encoder.client.embed.assert_called_once()
+
+    def test_call_with_uninitialized_client(self, mocker):
+        mocker.patch("cohere.Client", return_value=None)
+        encoder = CohereEncoder(cohere_api_key="test_api_key")
+        with pytest.raises(ValueError):
+            encoder(["test"])
diff --git a/tests/encoders/test_openai.py b/tests/encoders/test_openai.py
new file mode 100644
index 0000000000000000000000000000000000000000..23883fcc3581896b765e771e7c67f3a016603a1f
--- /dev/null
+++ b/tests/encoders/test_openai.py
@@ -0,0 +1,122 @@
+import openai
+import pytest
+from openai.error import RateLimitError
+
+from semantic_router.encoders import OpenAIEncoder
+
+
+@pytest.fixture
+def openai_encoder(mocker):
+    mocker.patch("openai.Embedding.create")
+    return OpenAIEncoder(name="test-engine", openai_api_key="test_api_key")
+
+
+class TestOpenAIEncoder:
+    def test_initialization_with_api_key(self, openai_encoder):
+        assert openai.api_key == "test_api_key", "API key should be set correctly"
+        assert openai_encoder.name == "test-engine", "Engine name not set correctly"
+
+    def test_initialization_without_api_key(self, mocker, monkeypatch):
+        monkeypatch.delenv("OPENAI_API_KEY", raising=False)
+        mocker.patch("openai.Embedding.create")
+        with pytest.raises(ValueError):
+            OpenAIEncoder(name="test-engine")
+
+    def test_call_method_success(self, openai_encoder, mocker):
+        mocker.patch(
+            "openai.Embedding.create",
+            return_value={"data": [{"embedding": [0.1, 0.2, 0.3]}]},
+        )
+
+        result = openai_encoder(["test"])
+        assert isinstance(result, list), "Result should be a list"
+        assert len(result) == 1 and len(result[0]) == 3, "Result list size is incorrect"
+
+    def test_call_method_rate_limit_error__raises_value_error_after_max_retries(
+        self, openai_encoder, mocker
+    ):
+        mocker.patch("semantic_router.encoders.openai.sleep")
+        mocker.patch(
+            "openai.Embedding.create",
+            side_effect=RateLimitError(message="rate limit exceeded", http_status=429),
+        )
+
+        with pytest.raises(ValueError):
+            openai_encoder(["test"])
+
+    def test_call_method_failure(self, openai_encoder, mocker):
+        mocker.patch("openai.Embedding.create", return_value={})
+
+        with pytest.raises(ValueError):
+            openai_encoder(["test"])
+
+    def test_call_method_rate_limit_error__exponential_backoff_single_retry(
+        self, openai_encoder, mocker
+    ):
+        mock_sleep = mocker.patch("semantic_router.encoders.openai.sleep")
+        mocker.patch(
+            "openai.Embedding.create",
+            side_effect=[
+                RateLimitError("rate limit exceeded"),
+                {"data": [{"embedding": [1, 2, 3]}]},
+            ],
+        )
+
+        openai_encoder(["sample text"])
+
+        mock_sleep.assert_called_once_with(1)  # 2**0
+
+    def test_call_method_rate_limit_error__exponential_backoff_multiple_retries(
+        self, openai_encoder, mocker
+    ):
+        mock_sleep = mocker.patch("semantic_router.encoders.openai.sleep")
+        mocker.patch(
+            "openai.Embedding.create",
+            side_effect=[
+                RateLimitError("rate limit exceeded"),
+                RateLimitError("rate limit exceeded"),
+                {"data": [{"embedding": [1, 2, 3]}]},
+            ],
+        )
+
+        openai_encoder(["sample text"])
+
+        assert mock_sleep.call_count == 2
+        mock_sleep.assert_any_call(1)  # 2**0
+        mock_sleep.assert_any_call(2)  # 2**1
+
+    def test_call_method_rate_limit_error__exponential_backoff_max_retries_exceeded(
+        self, openai_encoder, mocker
+    ):
+        mock_sleep = mocker.patch("semantic_router.encoders.openai.sleep")
+        mocker.patch(
+            "openai.Embedding.create", side_effect=RateLimitError("rate limit exceeded")
+        )
+
+        with pytest.raises(ValueError):
+            openai_encoder(["sample text"])
+
+        assert mock_sleep.call_count == 5  # Assuming 5 retries
+        mock_sleep.assert_any_call(1)  # 2**0
+        mock_sleep.assert_any_call(2)  # 2**1
+        mock_sleep.assert_any_call(4)  # 2**2
+        mock_sleep.assert_any_call(8)  # 2**3
+        mock_sleep.assert_any_call(16)  # 2**4
+
+    def test_call_method_rate_limit_error__exponential_backoff_successful(
+        self, openai_encoder, mocker
+    ):
+        mock_sleep = mocker.patch("semantic_router.encoders.openai.sleep")
+        mocker.patch(
+            "openai.Embedding.create",
+            side_effect=[
+                RateLimitError("rate limit exceeded"),
+                RateLimitError("rate limit exceeded"),
+                {"data": [{"embedding": [1, 2, 3]}]},
+            ],
+        )
+
+        embeddings = openai_encoder(["sample text"])
+
+        assert mock_sleep.call_count == 2
+        assert embeddings == [[1, 2, 3]]
diff --git a/tests/test_layer.py b/tests/test_layer.py
new file mode 100644
index 0000000000000000000000000000000000000000..96e06a086469b78c03dde08ff4508c33ea548095
--- /dev/null
+++ b/tests/test_layer.py
@@ -0,0 +1,115 @@
+import pytest
+
+from semantic_router.encoders import BaseEncoder, CohereEncoder, OpenAIEncoder
+from semantic_router.layer import DecisionLayer  # Replace with the actual module name
+from semantic_router.schema import Decision
+
+
+def mock_encoder_call(utterances):
+    # Define a mapping of utterances to return values
+    mock_responses = {
+        "Hello": [0.1, 0.2, 0.3],
+        "Hi": [0.4, 0.5, 0.6],
+        "Goodbye": [0.7, 0.8, 0.9],
+        "Bye": [1.0, 1.1, 1.2],
+        "Au revoir": [1.3, 1.4, 1.5],
+    }
+    return [mock_responses.get(u, [0, 0, 0]) for u in utterances]
+
+
+@pytest.fixture
+def base_encoder():
+    return BaseEncoder(name="test-encoder")
+
+
+@pytest.fixture
+def cohere_encoder(mocker):
+    mocker.patch.object(CohereEncoder, "__call__", side_effect=mock_encoder_call)
+    return CohereEncoder(name="test-cohere-encoder", cohere_api_key="test_api_key")
+
+
+@pytest.fixture
+def openai_encoder(mocker):
+    mocker.patch.object(OpenAIEncoder, "__call__", side_effect=mock_encoder_call)
+    return OpenAIEncoder(name="test-openai-encoder", openai_api_key="test_api_key")
+
+
+@pytest.fixture
+def decisions():
+    return [
+        Decision(name="Decision 1", utterances=["Hello", "Hi"]),
+        Decision(name="Decision 2", utterances=["Goodbye", "Bye", "Au revoir"]),
+    ]
+
+
+class TestDecisionLayer:
+    def test_initialization(self, openai_encoder, decisions):
+        decision_layer = DecisionLayer(encoder=openai_encoder, decisions=decisions)
+        assert decision_layer.similarity_threshold == 0.82
+        assert len(decision_layer.index) == 5
+        assert len(set(decision_layer.categories)) == 2
+
+    def test_initialization_different_encoders(self, cohere_encoder, openai_encoder):
+        decision_layer_cohere = DecisionLayer(encoder=cohere_encoder)
+        assert decision_layer_cohere.similarity_threshold == 0.3
+
+        decision_layer_openai = DecisionLayer(encoder=openai_encoder)
+        assert decision_layer_openai.similarity_threshold == 0.82
+
+    def test_add_decision(self, openai_encoder):
+        decision_layer = DecisionLayer(encoder=openai_encoder)
+        decision = Decision(name="Decision 3", utterances=["Yes", "No"])
+        decision_layer.add(decision)
+        assert len(decision_layer.index) == 2
+        assert len(set(decision_layer.categories)) == 1
+
+    def test_add_multiple_decisions(self, openai_encoder, decisions):
+        decision_layer = DecisionLayer(encoder=openai_encoder)
+        for decision in decisions:
+            decision_layer.add(decision)
+        assert len(decision_layer.index) == 5
+        assert len(set(decision_layer.categories)) == 2
+
+    def test_query_and_classification(self, openai_encoder, decisions):
+        decision_layer = DecisionLayer(encoder=openai_encoder, decisions=decisions)
+        query_result = decision_layer("Hello")
+        assert query_result in ["Decision 1", "Decision 2"]
+
+    def test_query_with_no_index(self, openai_encoder):
+        decision_layer = DecisionLayer(encoder=openai_encoder)
+        assert decision_layer("Anything") is None
+
+    def test_semantic_classify(self, openai_encoder, decisions):
+        decision_layer = DecisionLayer(encoder=openai_encoder, decisions=decisions)
+        classification, score = decision_layer._semantic_classify(
+            [
+                {"decision": "Decision 1", "score": 0.9},
+                {"decision": "Decision 2", "score": 0.1},
+            ]
+        )
+        assert classification == "Decision 1"
+        assert score == [0.9]
+
+    def test_semantic_classify_multiple_decisions(self, openai_encoder, decisions):
+        decision_layer = DecisionLayer(encoder=openai_encoder, decisions=decisions)
+        classification, score = decision_layer._semantic_classify(
+            [
+                {"decision": "Decision 1", "score": 0.9},
+                {"decision": "Decision 2", "score": 0.1},
+                {"decision": "Decision 1", "score": 0.8},
+            ]
+        )
+        assert classification == "Decision 1"
+        assert score == [0.9, 0.8]
+
+    def test_pass_threshold(self, openai_encoder):
+        decision_layer = DecisionLayer(encoder=openai_encoder)
+        assert not decision_layer._pass_threshold([], 0.5)
+        assert decision_layer._pass_threshold([0.6, 0.7], 0.5)
+
+    def test_failover_similarity_threshold(self, base_encoder):
+        decision_layer = DecisionLayer(encoder=base_encoder)
+        assert decision_layer.similarity_threshold == 0.82
+
+
+# Add more tests for edge cases and error handling as needed.
diff --git a/tests/test_schema.py b/tests/test_schema.py
new file mode 100644
index 0000000000000000000000000000000000000000..2563bf0bc21b7769f96d1119fdc29b6afab66d1a
--- /dev/null
+++ b/tests/test_schema.py
@@ -0,0 +1,61 @@
+import pytest
+
+from semantic_router.schema import (
+    CohereEncoder,
+    Decision,
+    Encoder,
+    EncoderType,
+    OpenAIEncoder,
+    SemanticSpace,
+)
+
+
+class TestEncoderDataclass:
+    def test_encoder_initialization_openai(self, mocker):
+        mocker.patch.dict("os.environ", {"OPENAI_API_KEY": "test"})
+        encoder = Encoder(type="openai", name="test-engine")
+        assert encoder.type == EncoderType.OPENAI
+        assert isinstance(encoder.model, OpenAIEncoder)
+
+    def test_encoder_initialization_cohere(self, mocker):
+        mocker.patch.dict("os.environ", {"COHERE_API_KEY": "test"})
+        encoder = Encoder(type="cohere", name="test-engine")
+        assert encoder.type == EncoderType.COHERE
+        assert isinstance(encoder.model, CohereEncoder)
+
+    def test_encoder_initialization_unsupported_type(self):
+        with pytest.raises(ValueError):
+            Encoder(type="unsupported", name="test-engine")
+
+    def test_encoder_initialization_huggingface(self):
+        with pytest.raises(NotImplementedError):
+            Encoder(type="huggingface", name="test-engine")
+
+    def test_encoder_call_method(self, mocker):
+        mocker.patch.dict("os.environ", {"OPENAI_API_KEY": "test"})
+        mocker.patch(
+            "semantic_router.encoders.openai.OpenAIEncoder.__call__",
+            return_value=[0.1, 0.2, 0.3],
+        )
+        encoder = Encoder(type="openai", name="test-engine")
+        result = encoder(["test"])
+        assert result == [0.1, 0.2, 0.3]
+
+
+class TestSemanticSpaceDataclass:
+    def test_semanticspace_initialization(self):
+        semantic_space = SemanticSpace()
+        assert semantic_space.id == ""
+        assert semantic_space.decisions == []
+
+    def test_semanticspace_add_decision(self):
+        decision = Decision(
+            name="test", utterances=["hello", "hi"], description="greeting"
+        )
+        semantic_space = SemanticSpace()
+        semantic_space.add(decision)
+
+        assert len(semantic_space.decisions) == 1
+        assert semantic_space.decisions[0].name == "test"
+        assert semantic_space.decisions[0].utterances == ["hello", "hi"]
+        assert semantic_space.decisions[0].description == "greeting"