diff --git a/.gitignore b/.gitignore
index df57182d1a723b6eef2b8c4855918fb7a0907bd4..5e807c4d1ed56d35548cc859123a3cc2666acbcb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -12,3 +12,6 @@ venv/
 .env*.local
 .env
 mac.env
+
+# Code coverage history
+.coverage
diff --git a/.python-version b/.python-version
new file mode 100644
index 0000000000000000000000000000000000000000..2c0733315e415bfb5e5b353f9996ecd964d395b2
--- /dev/null
+++ b/.python-version
@@ -0,0 +1 @@
+3.11
diff --git a/poetry.lock b/poetry.lock
index 1c56439f325b9690f57395b76d6ae13a7f68514b..c748530efa9170898e70117fdb6b63fec96103cd 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1540,48 +1540,6 @@ files = [
     {file = "ruff-0.1.5.tar.gz", hash = "sha256:5cbec0ef2ae1748fb194f420fb03fb2c25c3258c86129af7172ff8f198f125ab"},
 ]
 
-[[package]]
-name = "scipy"
-version = "1.11.4"
-description = "Fundamental algorithms for scientific computing in Python"
-optional = false
-python-versions = ">=3.9"
-files = [
-    {file = "scipy-1.11.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc9a714581f561af0848e6b69947fda0614915f072dfd14142ed1bfe1b806710"},
-    {file = "scipy-1.11.4-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:cf00bd2b1b0211888d4dc75656c0412213a8b25e80d73898083f402b50f47e41"},
-    {file = "scipy-1.11.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9999c008ccf00e8fbcce1236f85ade5c569d13144f77a1946bef8863e8f6eb4"},
-    {file = "scipy-1.11.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:933baf588daa8dc9a92c20a0be32f56d43faf3d1a60ab11b3f08c356430f6e56"},
-    {file = "scipy-1.11.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8fce70f39076a5aa62e92e69a7f62349f9574d8405c0a5de6ed3ef72de07f446"},
-    {file = "scipy-1.11.4-cp310-cp310-win_amd64.whl", hash = "sha256:6550466fbeec7453d7465e74d4f4b19f905642c89a7525571ee91dd7adabb5a3"},
-    {file = "scipy-1.11.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f313b39a7e94f296025e3cffc2c567618174c0b1dde173960cf23808f9fae4be"},
-    {file = "scipy-1.11.4-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:1b7c3dca977f30a739e0409fb001056484661cb2541a01aba0bb0029f7b68db8"},
-    {file = "scipy-1.11.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00150c5eae7b610c32589dda259eacc7c4f1665aedf25d921907f4d08a951b1c"},
-    {file = "scipy-1.11.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:530f9ad26440e85766509dbf78edcfe13ffd0ab7fec2560ee5c36ff74d6269ff"},
-    {file = "scipy-1.11.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5e347b14fe01003d3b78e196e84bd3f48ffe4c8a7b8a1afbcb8f5505cb710993"},
-    {file = "scipy-1.11.4-cp311-cp311-win_amd64.whl", hash = "sha256:acf8ed278cc03f5aff035e69cb511741e0418681d25fbbb86ca65429c4f4d9cd"},
-    {file = "scipy-1.11.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:028eccd22e654b3ea01ee63705681ee79933652b2d8f873e7949898dda6d11b6"},
-    {file = "scipy-1.11.4-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:2c6ff6ef9cc27f9b3db93a6f8b38f97387e6e0591600369a297a50a8e96e835d"},
-    {file = "scipy-1.11.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b030c6674b9230d37c5c60ab456e2cf12f6784596d15ce8da9365e70896effc4"},
-    {file = "scipy-1.11.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad669df80528aeca5f557712102538f4f37e503f0c5b9541655016dd0932ca79"},
-    {file = "scipy-1.11.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ce7fff2e23ab2cc81ff452a9444c215c28e6305f396b2ba88343a567feec9660"},
-    {file = "scipy-1.11.4-cp312-cp312-win_amd64.whl", hash = "sha256:36750b7733d960d7994888f0d148d31ea3017ac15eef664194b4ef68d36a4a97"},
-    {file = "scipy-1.11.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6e619aba2df228a9b34718efb023966da781e89dd3d21637b27f2e54db0410d7"},
-    {file = "scipy-1.11.4-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:f3cd9e7b3c2c1ec26364856f9fbe78695fe631150f94cd1c22228456404cf1ec"},
-    {file = "scipy-1.11.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d10e45a6c50211fe256da61a11c34927c68f277e03138777bdebedd933712fea"},
-    {file = "scipy-1.11.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91af76a68eeae0064887a48e25c4e616fa519fa0d38602eda7e0f97d65d57937"},
-    {file = "scipy-1.11.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6df1468153a31cf55ed5ed39647279beb9cfb5d3f84369453b49e4b8502394fd"},
-    {file = "scipy-1.11.4-cp39-cp39-win_amd64.whl", hash = "sha256:ee410e6de8f88fd5cf6eadd73c135020bfbbbdfcd0f6162c36a7638a1ea8cc65"},
-    {file = "scipy-1.11.4.tar.gz", hash = "sha256:90a2b78e7f5733b9de748f589f09225013685f9b218275257f8a8168ededaeaa"},
-]
-
-[package.dependencies]
-numpy = ">=1.21.6,<1.28.0"
-
-[package.extras]
-dev = ["click", "cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy", "pycodestyle", "pydevtool", "rich-click", "ruff", "types-psutil", "typing_extensions"]
-doc = ["jupytext", "matplotlib (>2)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"]
-test = ["asv", "gmpy2", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"]
-
 [[package]]
 name = "six"
 version = "1.16.0"
@@ -1821,4 +1779,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p
 [metadata]
 lock-version = "2.0"
 python-versions = "^3.10"
-content-hash = "d00b433ea2ff03e4ba01033022d4c7f4bf1a824e94cd8138193cc86af8780805"
+content-hash = "c0b2fcab1a4add0d43415d41359db59dd15ce87f14f5227e8f0cfb956a679dcd"
diff --git a/pyproject.toml b/pyproject.toml
index d513ba62adab280e9c6a7c68208dd651137b8972..ce9aa8f0c01ecff5159cc03ac288995693e13b22 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -16,7 +16,6 @@ pydantic = "^1.8.2"
 openai = "^0.28.1"
 cohere = "^4.32"
 numpy = "^1.26.2"
-scipy = "^1.11.4"
 
 
 [tool.poetry.group.dev.dependencies]
diff --git a/tests/encoders/test_base.py b/tests/encoders/test_base.py
index f1d4d51cf1d641fed28c514cb253c40bb5553541..d2c39645d6bb6091b2d17c21cad5bee0aafe5079 100644
--- a/tests/encoders/test_base.py
+++ b/tests/encoders/test_base.py
@@ -1,4 +1,5 @@
 import pytest
+
 from semantic_router.encoders import BaseEncoder
 
 
diff --git a/tests/encoders/test_cohere.py b/tests/encoders/test_cohere.py
index e7170a747817a3e8de4c753f88addf8aad3940b7..592aaa390848917aef41ee54ba9e16e80c021dbd 100644
--- a/tests/encoders/test_cohere.py
+++ b/tests/encoders/test_cohere.py
@@ -1,5 +1,6 @@
-import pytest
 import cohere
+import pytest
+
 from semantic_router.encoders import CohereEncoder
 
 
@@ -12,7 +13,9 @@ def cohere_encoder(mocker):
 class TestCohereEncoder:
     def test_initialization_with_api_key(self, cohere_encoder):
         assert cohere_encoder.client is not None, "Client should be initialized"
-        assert cohere_encoder.name == "embed-english-v3.0", "Default name not set correctly"
+        assert (
+            cohere_encoder.name == "embed-english-v3.0"
+        ), "Default name not set correctly"
 
     def test_initialization_without_api_key(self, mocker, monkeypatch):
         monkeypatch.delenv("COHERE_API_KEY", raising=False)
@@ -27,7 +30,9 @@ class TestCohereEncoder:
 
         result = cohere_encoder(["test"])
         assert isinstance(result, list), "Result should be a list"
-        assert all(isinstance(sublist, list) for sublist in result), "Each item in result should be a list"
+        assert all(
+            isinstance(sublist, list) for sublist in result
+        ), "Each item in result should be a list"
         cohere_encoder.client.embed.assert_called_once()
 
     def test_call_with_uninitialized_client(self, mocker):
diff --git a/tests/encoders/test_openai.py b/tests/encoders/test_openai.py
index 9dd113adb1b05ecca696a0b3680637800ee40b56..2c7c0e6deb7dba230d30712b5f8e19586542fd28 100644
--- a/tests/encoders/test_openai.py
+++ b/tests/encoders/test_openai.py
@@ -1,8 +1,9 @@
 import os
 
-import pytest
 import openai
+import pytest
 from openai.error import RateLimitError
+
 from semantic_router.encoders import OpenAIEncoder
 
 
@@ -24,16 +25,22 @@ class TestOpenAIEncoder:
             OpenAIEncoder(name="test-engine")
 
     def test_call_method_success(self, openai_encoder, mocker):
-        mocker.patch("openai.Embedding.create", return_value={"data": [{"embedding": [0.1, 0.2, 0.3]}]})
+        mocker.patch(
+            "openai.Embedding.create",
+            return_value={"data": [{"embedding": [0.1, 0.2, 0.3]}]},
+        )
 
         result = openai_encoder(["test"])
         assert isinstance(result, list), "Result should be a list"
         assert len(result) == 1 and len(result[0]) == 3, "Result list size is incorrect"
 
-    def test_call_method_rate_limit_error__raises_value_error_after_max_retries(self, openai_encoder, mocker):
+    def test_call_method_rate_limit_error__raises_value_error_after_max_retries(
+        self, openai_encoder, mocker
+    ):
         mocker.patch("semantic_router.encoders.openai.sleep")
         mocker.patch(
-            "openai.Embedding.create", side_effect=RateLimitError(message="rate limit exceeded", http_status=429)
+            "openai.Embedding.create",
+            side_effect=RateLimitError(message="rate limit exceeded", http_status=429),
         )
 
         with pytest.raises(ValueError):
@@ -45,7 +52,9 @@ class TestOpenAIEncoder:
         with pytest.raises(ValueError):
             openai_encoder(["test"])
 
-    def test_call_method_rate_limit_error__exponential_backoff_single_retry(self, openai_encoder, mocker):
+    def test_call_method_rate_limit_error__exponential_backoff_single_retry(
+        self, openai_encoder, mocker
+    ):
         mock_sleep = mocker.patch("semantic_router.encoders.openai.sleep")
         mocker.patch(
             "openai.Embedding.create",
@@ -59,7 +68,9 @@ class TestOpenAIEncoder:
 
         mock_sleep.assert_called_once_with(1)  # 2**0
 
-    def test_call_method_rate_limit_error__exponential_backoff_multiple_retries(self, openai_encoder, mocker):
+    def test_call_method_rate_limit_error__exponential_backoff_multiple_retries(
+        self, openai_encoder, mocker
+    ):
         mock_sleep = mocker.patch("semantic_router.encoders.openai.sleep")
         mocker.patch(
             "openai.Embedding.create",
@@ -76,9 +87,13 @@ class TestOpenAIEncoder:
         mock_sleep.assert_any_call(1)  # 2**0
         mock_sleep.assert_any_call(2)  # 2**1
 
-    def test_call_method_rate_limit_error__exponential_backoff_max_retries_exceeded(self, openai_encoder, mocker):
+    def test_call_method_rate_limit_error__exponential_backoff_max_retries_exceeded(
+        self, openai_encoder, mocker
+    ):
         mock_sleep = mocker.patch("semantic_router.encoders.openai.sleep")
-        mocker.patch("openai.Embedding.create", side_effect=RateLimitError("rate limit exceeded"))
+        mocker.patch(
+            "openai.Embedding.create", side_effect=RateLimitError("rate limit exceeded")
+        )
 
         with pytest.raises(ValueError):
             openai_encoder(["sample text"])
@@ -90,7 +105,9 @@ class TestOpenAIEncoder:
         mock_sleep.assert_any_call(8)  # 2**3
         mock_sleep.assert_any_call(16)  # 2**4
 
-    def test_call_method_rate_limit_error__exponential_backoff_successful(self, openai_encoder, mocker):
+    def test_call_method_rate_limit_error__exponential_backoff_successful(
+        self, openai_encoder, mocker
+    ):
         mock_sleep = mocker.patch("semantic_router.encoders.openai.sleep")
         mocker.patch(
             "openai.Embedding.create",
diff --git a/tests/test_layer.py b/tests/test_layer.py
index 63209de38acd5e6bd035ebd55c248b6be7737bff..96e06a086469b78c03dde08ff4508c33ea548095 100644
--- a/tests/test_layer.py
+++ b/tests/test_layer.py
@@ -1,8 +1,8 @@
 import pytest
 
 from semantic_router.encoders import BaseEncoder, CohereEncoder, OpenAIEncoder
-from semantic_router.schema import Decision
 from semantic_router.layer import DecisionLayer  # Replace with the actual module name
+from semantic_router.schema import Decision
 
 
 def mock_encoder_call(utterances):
diff --git a/tests/test_schema.py b/tests/test_schema.py
index 0088c358bf4043933560d14759373c312e143145..2563bf0bc21b7769f96d1119fdc29b6afab66d1a 100644
--- a/tests/test_schema.py
+++ b/tests/test_schema.py
@@ -1,5 +1,13 @@
 import pytest
-from semantic_router.schema import Decision, Encoder, EncoderType, OpenAIEncoder, CohereEncoder, SemanticSpace
+
+from semantic_router.schema import (
+    CohereEncoder,
+    Decision,
+    Encoder,
+    EncoderType,
+    OpenAIEncoder,
+    SemanticSpace,
+)
 
 
 class TestEncoderDataclass:
@@ -25,7 +33,10 @@ class TestEncoderDataclass:
 
     def test_encoder_call_method(self, mocker):
         mocker.patch.dict("os.environ", {"OPENAI_API_KEY": "test"})
-        mocker.patch("semantic_router.encoders.openai.OpenAIEncoder.__call__", return_value=[0.1, 0.2, 0.3])
+        mocker.patch(
+            "semantic_router.encoders.openai.OpenAIEncoder.__call__",
+            return_value=[0.1, 0.2, 0.3],
+        )
         encoder = Encoder(type="openai", name="test-engine")
         result = encoder(["test"])
         assert result == [0.1, 0.2, 0.3]
@@ -38,7 +49,9 @@ class TestSemanticSpaceDataclass:
         assert semantic_space.decisions == []
 
     def test_semanticspace_add_decision(self):
-        decision = Decision(name="test", utterances=["hello", "hi"], description="greeting")
+        decision = Decision(
+            name="test", utterances=["hello", "hi"], description="greeting"
+        )
         semantic_space = SemanticSpace()
         semantic_space.add(decision)