diff --git a/coverage.xml b/coverage.xml
index 3c9c2e7c7acc2890d77de9dd322f29415a4d39aa..612cac462cea738d5f3a86626f804df04a1c6141 100644
--- a/coverage.xml
+++ b/coverage.xml
@@ -1,5 +1,5 @@
 <?xml version="1.0" ?>
-<coverage version="7.3.2" timestamp="1702458856280" lines-valid="311" lines-covered="311" line-rate="1" branches-covered="0" branches-valid="0" branch-rate="0" complexity="0">
+<coverage version="7.3.2" timestamp="1702461637601" lines-valid="315" lines-covered="315" line-rate="1" branches-covered="0" branches-valid="0" branch-rate="0" complexity="0">
 	<!-- Generated by coverage.py: https://coverage.readthedocs.io/en/7.3.2 -->
 	<!-- Based on https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd -->
 	<sources>
@@ -12,10 +12,11 @@
 					<methods/>
 					<lines>
 						<line number="1" hits="1"/>
-						<line number="3" hits="1"/>
+						<line number="2" hits="1"/>
+						<line number="4" hits="1"/>
 					</lines>
 				</class>
-				<class name="layer.py" filename="layer.py" complexity="0" line-rate="1" branch-rate="0">
+				<class name="hybrid_layer.py" filename="hybrid_layer.py" complexity="0" line-rate="1" branch-rate="0">
 					<methods/>
 					<lines>
 						<line number="1" hits="1"/>
@@ -23,149 +24,157 @@
 						<line number="3" hits="1"/>
 						<line number="5" hits="1"/>
 						<line number="11" hits="1"/>
-						<line number="12" hits="1"/>
+						<line number="14" hits="1"/>
 						<line number="15" hits="1"/>
 						<line number="16" hits="1"/>
 						<line number="17" hits="1"/>
 						<line number="18" hits="1"/>
 						<line number="20" hits="1"/>
-						<line number="21" hits="1"/>
 						<line number="23" hits="1"/>
 						<line number="24" hits="1"/>
 						<line number="25" hits="1"/>
-						<line number="26" hits="1"/>
+						<line number="27" hits="1"/>
 						<line number="28" hits="1"/>
+						<line number="29" hits="1"/>
 						<line number="30" hits="1"/>
 						<line number="32" hits="1"/>
 						<line number="34" hits="1"/>
-						<line number="35" hits="1"/>
 						<line number="36" hits="1"/>
 						<line number="37" hits="1"/>
-						<line number="38" hits="1"/>
 						<line number="39" hits="1"/>
+						<line number="40" hits="1"/>
 						<line number="41" hits="1"/>
+						<line number="42" hits="1"/>
+						<line number="43" hits="1"/>
+						<line number="44" hits="1"/>
 						<line number="46" hits="1"/>
 						<line number="48" hits="1"/>
+						<line number="49" hits="1"/>
 						<line number="51" hits="1"/>
-						<line number="52" hits="1"/>
+						<line number="53" hits="1"/>
 						<line number="54" hits="1"/>
-						<line number="55" hits="1"/>
-						<line number="57" hits="1"/>
-						<line number="58" hits="1"/>
+						<line number="59" hits="1"/>
 						<line number="60" hits="1"/>
 						<line number="61" hits="1"/>
 						<line number="63" hits="1"/>
+						<line number="64" hits="1"/>
 						<line number="65" hits="1"/>
-						<line number="68" hits="1"/>
-						<line number="71" hits="1"/>
+						<line number="69" hits="1"/>
+						<line number="70" hits="1"/>
+						<line number="72" hits="1"/>
 						<line number="74" hits="1"/>
 						<line number="75" hits="1"/>
-						<line number="82" hits="1"/>
-						<line number="83" hits="1"/>
-						<line number="89" hits="1"/>
+						<line number="77" hits="1"/>
+						<line number="79" hits="1"/>
+						<line number="84" hits="1"/>
+						<line number="85" hits="1"/>
+						<line number="87" hits="1"/>
+						<line number="88" hits="1"/>
+						<line number="90" hits="1"/>
+						<line number="92" hits="1"/>
 						<line number="94" hits="1"/>
 						<line number="95" hits="1"/>
-						<line number="97" hits="1"/>
+						<line number="96" hits="1"/>
+						<line number="98" hits="1"/>
 						<line number="99" hits="1"/>
 						<line number="100" hits="1"/>
-						<line number="102" hits="1"/>
+						<line number="101" hits="1"/>
 						<line number="103" hits="1"/>
+						<line number="104" hits="1"/>
+						<line number="105" hits="1"/>
 						<line number="107" hits="1"/>
-						<line number="109" hits="1"/>
+						<line number="108" hits="1"/>
 						<line number="110" hits="1"/>
-						<line number="111" hits="1"/>
 						<line number="112" hits="1"/>
-						<line number="113" hits="1"/>
 						<line number="114" hits="1"/>
 						<line number="115" hits="1"/>
-						<line number="117" hits="1"/>
+						<line number="116" hits="1"/>
+						<line number="118" hits="1"/>
+						<line number="119" hits="1"/>
 						<line number="120" hits="1"/>
+						<line number="121" hits="1"/>
+						<line number="122" hits="1"/>
 						<line number="123" hits="1"/>
+						<line number="124" hits="1"/>
 						<line number="126" hits="1"/>
-						<line number="128" hits="1"/>
 						<line number="129" hits="1"/>
 						<line number="130" hits="1"/>
-						<line number="132" hits="1"/>
+						<line number="133" hits="1"/>
 						<line number="135" hits="1"/>
 						<line number="136" hits="1"/>
 						<line number="137" hits="1"/>
-						<line number="138" hits="1"/>
 						<line number="139" hits="1"/>
-						<line number="141" hits="1"/>
-						<line number="144" hits="1"/>
-						<line number="145" hits="1"/>
-						<line number="146" hits="1"/>
-						<line number="148" hits="1"/>
-						<line number="149" hits="1"/>
-						<line number="150" hits="1"/>
-						<line number="151" hits="1"/>
-						<line number="153" hits="1"/>
-						<line number="155" hits="1"/>
-						<line number="157" hits="1"/>
-						<line number="158" hits="1"/>
-						<line number="160" hits="1"/>
-						<line number="161" hits="1"/>
-						<line number="162" hits="1"/>
-						<line number="163" hits="1"/>
-						<line number="164" hits="1"/>
-						<line number="165" hits="1"/>
-						<line number="167" hits="1"/>
-						<line number="169" hits="1"/>
-						<line number="170" hits="1"/>
-						<line number="172" hits="1"/>
-						<line number="174" hits="1"/>
-						<line number="175" hits="1"/>
-						<line number="180" hits="1"/>
-						<line number="181" hits="1"/>
-						<line number="182" hits="1"/>
-						<line number="184" hits="1"/>
-						<line number="185" hits="1"/>
-						<line number="186" hits="1"/>
-						<line number="190" hits="1"/>
-						<line number="191" hits="1"/>
-						<line number="193" hits="1"/>
-						<line number="195" hits="1"/>
-						<line number="196" hits="1"/>
-						<line number="198" hits="1"/>
-						<line number="200" hits="1"/>
-						<line number="205" hits="1"/>
-						<line number="206" hits="1"/>
-						<line number="208" hits="1"/>
-						<line number="209" hits="1"/>
-						<line number="211" hits="1"/>
-						<line number="213" hits="1"/>
-						<line number="215" hits="1"/>
-						<line number="216" hits="1"/>
-						<line number="217" hits="1"/>
-						<line number="219" hits="1"/>
-						<line number="220" hits="1"/>
-						<line number="221" hits="1"/>
-						<line number="222" hits="1"/>
-						<line number="224" hits="1"/>
-						<line number="225" hits="1"/>
-						<line number="226" hits="1"/>
-						<line number="228" hits="1"/>
-						<line number="229" hits="1"/>
-						<line number="233" hits="1"/>
-						<line number="235" hits="1"/>
-						<line number="237" hits="1"/>
-						<line number="238" hits="1"/>
-						<line number="239" hits="1"/>
-						<line number="241" hits="1"/>
-						<line number="242" hits="1"/>
-						<line number="243" hits="1"/>
-						<line number="244" hits="1"/>
-						<line number="245" hits="1"/>
-						<line number="246" hits="1"/>
-						<line number="247" hits="1"/>
-						<line number="249" hits="1"/>
-						<line number="252" hits="1"/>
-						<line number="255" hits="1"/>
-						<line number="258" hits="1"/>
-						<line number="260" hits="1"/>
-						<line number="261" hits="1"/>
-						<line number="262" hits="1"/>
-						<line number="264" hits="1"/>
+					</lines>
+				</class>
+				<class name="layer.py" filename="layer.py" complexity="0" line-rate="1" branch-rate="0">
+					<methods/>
+					<lines>
+						<line number="1" hits="1"/>
+						<line number="3" hits="1"/>
+						<line number="8" hits="1"/>
+						<line number="9" hits="1"/>
+						<line number="12" hits="1"/>
+						<line number="13" hits="1"/>
+						<line number="14" hits="1"/>
+						<line number="15" hits="1"/>
+						<line number="17" hits="1"/>
+						<line number="18" hits="1"/>
+						<line number="20" hits="1"/>
+						<line number="21" hits="1"/>
+						<line number="22" hits="1"/>
+						<line number="23" hits="1"/>
+						<line number="25" hits="1"/>
+						<line number="27" hits="1"/>
+						<line number="29" hits="1"/>
+						<line number="31" hits="1"/>
+						<line number="32" hits="1"/>
+						<line number="33" hits="1"/>
+						<line number="34" hits="1"/>
+						<line number="35" hits="1"/>
+						<line number="36" hits="1"/>
+						<line number="38" hits="1"/>
+						<line number="40" hits="1"/>
+						<line number="42" hits="1"/>
+						<line number="45" hits="1"/>
+						<line number="46" hits="1"/>
+						<line number="48" hits="1"/>
+						<line number="49" hits="1"/>
+						<line number="51" hits="1"/>
+						<line number="52" hits="1"/>
+						<line number="54" hits="1"/>
+						<line number="55" hits="1"/>
+						<line number="57" hits="1"/>
+						<line number="59" hits="1"/>
+						<line number="62" hits="1"/>
+						<line number="65" hits="1"/>
+						<line number="66" hits="1"/>
+						<line number="67" hits="1"/>
+						<line number="74" hits="1"/>
+						<line number="75" hits="1"/>
+						<line number="81" hits="1"/>
+						<line number="86" hits="1"/>
+						<line number="87" hits="1"/>
+						<line number="89" hits="1"/>
+						<line number="91" hits="1"/>
+						<line number="92" hits="1"/>
+						<line number="94" hits="1"/>
+						<line number="95" hits="1"/>
+						<line number="97" hits="1"/>
+						<line number="99" hits="1"/>
+						<line number="100" hits="1"/>
+						<line number="101" hits="1"/>
+						<line number="102" hits="1"/>
+						<line number="103" hits="1"/>
+						<line number="104" hits="1"/>
+						<line number="105" hits="1"/>
+						<line number="107" hits="1"/>
+						<line number="110" hits="1"/>
+						<line number="111" hits="1"/>
+						<line number="114" hits="1"/>
+						<line number="116" hits="1"/>
+						<line number="117" hits="1"/>
+						<line number="118" hits="1"/>
+						<line number="120" hits="1"/>
 					</lines>
 				</class>
 				<class name="linear.py" filename="linear.py" complexity="0" line-rate="1" branch-rate="0">
diff --git a/docs/examples/hybrid-layer.ipynb b/docs/examples/hybrid-layer.ipynb
index 98fccf6972026d62e6d53a3c96492ca90287207e..8b1da5ae75f0a8a9572996b8e416a282d2c48f1b 100644
--- a/docs/examples/hybrid-layer.ipynb
+++ b/docs/examples/hybrid-layer.ipynb
@@ -11,7 +11,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "The Hybrid Layer in the Semantic Router library can improve decision making performance particularly for niche use-cases that contain specific terminology, such as finance or medical. It helps us provide more importance to decision making based on the keywords contained in our utterances and user queries."
+    "The Hybrid Layer in the Semantic Router library can improve  making performance particularly for niche use-cases that contain specific terminology, such as finance or medical. It helps us provide more importance to  making based on the keywords contained in our utterances and user queries."
    ]
   },
   {
@@ -34,36 +34,37 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "!pip install -qU semantic-router==0.0.5"
+    "!pip install -qU semantic-router==0.0.6"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "We start by defining a dictionary mapping decisions to example phrases that should trigger those decisions."
+    "We start by defining a dictionary mapping s to example phrases that should trigger those s."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import os\n",
+   "execution_count": 8,
+   "metadata": {},
+   "outputs": [
+    {
+     "ename": "ImportError",
+     "evalue": "cannot import name 'Route' from 'semantic_router.schema' (/Users/jakit/customers/aurelio/semantic-router/.venv/lib/python3.11/site-packages/semantic_router/schema.py)",
+     "output_type": "error",
+     "traceback": [
+      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+      "\u001b[0;31mImportError\u001b[0m                               Traceback (most recent call last)",
+      "\u001b[1;32m/Users/jakit/customers/aurelio/semantic-router/docs/examples/hybrid-layer.ipynb Cell 7\u001b[0m line \u001b[0;36m1\n\u001b[0;32m----> <a href='vscode-notebook-cell:/Users/jakit/customers/aurelio/semantic-router/docs/examples/hybrid-layer.ipynb#X10sZmlsZQ%3D%3D?line=0'>1</a>\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39msemantic_router\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mschema\u001b[39;00m \u001b[39mimport\u001b[39;00m Route\n\u001b[1;32m      <a href='vscode-notebook-cell:/Users/jakit/customers/aurelio/semantic-router/docs/examples/hybrid-layer.ipynb#X10sZmlsZQ%3D%3D?line=2'>3</a>\u001b[0m politics \u001b[39m=\u001b[39m Route(\n\u001b[1;32m      <a href='vscode-notebook-cell:/Users/jakit/customers/aurelio/semantic-router/docs/examples/hybrid-layer.ipynb#X10sZmlsZQ%3D%3D?line=3'>4</a>\u001b[0m     name\u001b[39m=\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mpolitics\u001b[39m\u001b[39m\"\u001b[39m,\n\u001b[1;32m      <a href='vscode-notebook-cell:/Users/jakit/customers/aurelio/semantic-router/docs/examples/hybrid-layer.ipynb#X10sZmlsZQ%3D%3D?line=4'>5</a>\u001b[0m     utterances\u001b[39m=\u001b[39m[\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m     <a href='vscode-notebook-cell:/Users/jakit/customers/aurelio/semantic-router/docs/examples/hybrid-layer.ipynb#X10sZmlsZQ%3D%3D?line=11'>12</a>\u001b[0m     ],\n\u001b[1;32m     <a href='vscode-notebook-cell:/Users/jakit/customers/aurelio/semantic-router/docs/examples/hybrid-layer.ipynb#X10sZmlsZQ%3D%3D?line=12'>13</a>\u001b[0m )\n",
+      "\u001b[0;31mImportError\u001b[0m: cannot import name 'Route' from 'semantic_router.schema' (/Users/jakit/customers/aurelio/semantic-router/.venv/lib/python3.11/site-packages/semantic_router/schema.py)"
+     ]
+    }
+   ],
+   "source": [
+    "from semantic_router.schema import Route\n",
     "\n",
-    "os.environ[\"COHERE_API_KEY\"] = \"BQBiUqqjDRsYl1QKKux4JsqKdDkjyInS5T3Z3eJP\""
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from semantic_router.schema import Decision\n",
-    "\n",
-    "politics = Decision(\n",
+    "politics = Route(\n",
     "    name=\"politics\",\n",
     "    utterances=[\n",
     "        \"isn't politics the best thing ever\",\n",
@@ -89,7 +90,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "chitchat = Decision(\n",
+    "chitchat = Route(\n",
     "    name=\"chitchat\",\n",
     "    utterances=[\n",
     "        \"how's the weather today?\",\n",
@@ -100,7 +101,7 @@
     "    ],\n",
     ")\n",
     "\n",
-    "chitchat = Decision(\n",
+    "chitchat = Route(\n",
     "    name=\"chitchat\",\n",
     "    utterances=[\n",
     "        \"how's the weather today?\",\n",
@@ -111,7 +112,7 @@
     "    ],\n",
     ")\n",
     "\n",
-    "decisions = [politics, chitchat]"
+    "routes = [politics, chitchat]"
    ]
   },
   {
@@ -127,6 +128,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "import os\n",
     "from semantic_router.encoders import CohereEncoder\n",
     "from getpass import getpass\n",
     "\n",
@@ -141,7 +143,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Now we define the `DecisionLayer`. When called, the decision layer will consume text (a query) and output the category (`Decision`) it belongs to — to initialize a `DecisionLayer` we need our `encoder` model and a list of `decisions`."
+    "Now we define the `RouteLayer`. When called, the route layer will consume text (a query) and output the category (`Route`) it belongs to — to initialize a `RouteLayer` we need our `encoder` model and a list of `routes`."
    ]
   },
   {
@@ -150,9 +152,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from semantic_router.layer import HybridDecisionLayer\n",
+    "from semantic_router.hybrid_layer import HybridRouteLayer\n",
     "\n",
-    "dl = HybridDecisionLayer(encoder=encoder, decisions=decisions)"
+    "dl = HybridRouteLayer(encoder=encoder, routes=routes)"
    ]
   },
   {
@@ -197,7 +199,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.11.5"
+   "version": "3.11.3"
   }
  },
  "nbformat": 4,
diff --git a/semantic_router/__init__.py b/semantic_router/__init__.py
index b10cc687b0924faf516432e89f87bc1f95bb0c9e..0c445bea3ff4efd8f3aa8950e2c772277d93b20c 100644
--- a/semantic_router/__init__.py
+++ b/semantic_router/__init__.py
@@ -1,3 +1,4 @@
-from .layer import RouteLayer, HybridRouteLayer
+from .hybrid_layer import HybridRouteLayer
+from .layer import RouteLayer
 
 __all__ = ["RouteLayer", "HybridRouteLayer"]
diff --git a/semantic_router/hybrid_layer.py b/semantic_router/hybrid_layer.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e9508d6a50862cc523d80e802716f71adeee99d
--- /dev/null
+++ b/semantic_router/hybrid_layer.py
@@ -0,0 +1,139 @@
+import numpy as np
+from numpy.linalg import norm
+from tqdm.auto import tqdm
+
+from semantic_router.encoders import (
+    BaseEncoder,
+    BM25Encoder,
+    CohereEncoder,
+    OpenAIEncoder,
+)
+from semantic_router.schema import Route
+
+
+class HybridRouteLayer:
+    index = None
+    sparse_index = None
+    categories = None
+    score_threshold = 0.82
+
+    def __init__(
+        self, encoder: BaseEncoder, routes: list[Route] = [], alpha: float = 0.3
+    ):
+        self.encoder = encoder
+        self.sparse_encoder = BM25Encoder()
+        self.alpha = alpha
+        # decide on default threshold based on encoder
+        if isinstance(encoder, OpenAIEncoder):
+            self.score_threshold = 0.82
+        elif isinstance(encoder, CohereEncoder):
+            self.score_threshold = 0.3
+        else:
+            self.score_threshold = 0.82
+        # if routes list has been passed, we initialize index now
+        if routes:
+            # initialize index now
+            for route in tqdm(routes):
+                self._add_route(route=route)
+
+    def __call__(self, text: str) -> str | None:
+        results = self._query(text)
+        top_class, top_class_scores = self._semantic_classify(results)
+        passed = self._pass_threshold(top_class_scores, self.score_threshold)
+        if passed:
+            return top_class
+        else:
+            return None
+
+    def add(self, route: Route):
+        self._add_route(route=route)
+
+    def _add_route(self, route: Route):
+        # create embeddings
+        dense_embeds = np.array(self.encoder(route.utterances))  # * self.alpha
+        sparse_embeds = np.array(
+            self.sparse_encoder(route.utterances)
+        )  # * (1 - self.alpha)
+
+        # create route array
+        if self.categories is None:
+            self.categories = np.array([route.name] * len(route.utterances))
+            self.utterances = np.array(route.utterances)
+        else:
+            str_arr = np.array([route.name] * len(route.utterances))
+            self.categories = np.concatenate([self.categories, str_arr])
+            self.utterances = np.concatenate(
+                [self.utterances, np.array(route.utterances)]
+            )
+        # create utterance array (the dense index)
+        if self.index is None:
+            self.index = dense_embeds
+        else:
+            self.index = np.concatenate([self.index, dense_embeds])
+        # create sparse utterance array
+        if self.sparse_index is None:
+            self.sparse_index = sparse_embeds
+        else:
+            self.sparse_index = np.concatenate([self.sparse_index, sparse_embeds])
+
+    def _query(self, text: str, top_k: int = 5):
+        """Given some text, encodes and searches the index vector space to
+        retrieve the top_k most similar records.
+        """
+        # create dense query vector
+        xq_d = np.array(self.encoder([text]))
+        xq_d = np.squeeze(xq_d)  # Reduce to 1d array.
+        # create sparse query vector
+        xq_s = np.array(self.sparse_encoder([text]))
+        xq_s = np.squeeze(xq_s)
+        # convex scaling
+        xq_d, xq_s = self._convex_scaling(xq_d, xq_s)
+
+        if self.index is not None:
+            # calculate dense vec similarity
+            index_norm = norm(self.index, axis=1)
+            xq_d_norm = norm(xq_d.T)
+            sim_d = np.dot(self.index, xq_d.T) / (index_norm * xq_d_norm)
+            # calculate sparse vec similarity
+            sparse_norm = norm(self.sparse_index, axis=1)
+            xq_s_norm = norm(xq_s.T)
+            sim_s = np.dot(self.sparse_index, xq_s.T) / (sparse_norm * xq_s_norm)
+            total_sim = sim_d + sim_s
+            # get indices of top_k records
+            top_k = min(top_k, total_sim.shape[0])
+            idx = np.argpartition(total_sim, -top_k)[-top_k:]
+            scores = total_sim[idx]
+            # get the utterance categories (route names)
+            routes = self.categories[idx] if self.categories is not None else []
+            return [{"route": d, "score": s.item()} for d, s in zip(routes, scores)]
+        else:
+            return []
+
+    def _convex_scaling(self, dense: list[float], sparse: list[float]):
+        # scale sparse and dense vecs
+        dense = np.array(dense) * self.alpha
+        sparse = np.array(sparse) * (1 - self.alpha)
+        return dense, sparse
+
+    def _semantic_classify(self, query_results: list[dict]) -> tuple[str, list[float]]:
+        scores_by_class = {}
+        for result in query_results:
+            score = result["score"]
+            route = result["route"]
+            if route in scores_by_class:
+                scores_by_class[route].append(score)
+            else:
+                scores_by_class[route] = [score]
+
+        # Calculate total score for each class
+        total_scores = {route: sum(scores) for route, scores in scores_by_class.items()}
+        top_class = max(total_scores, key=lambda x: total_scores[x], default=None)
+
+        # Return the top class and its associated scores
+        return str(top_class), scores_by_class.get(top_class, [])
+
+    def _pass_threshold(self, scores: list[float], threshold: float) -> bool:
+        if scores:
+            return max(scores) > threshold
+        else:
+            return False
diff --git a/semantic_router/layer.py b/semantic_router/layer.py
index 591e8f08f60108adc727a309e4d22aa3415fb640..efa4862d1766e4bd677101373e71d8be95897c38 100644
--- a/semantic_router/layer.py
+++ b/semantic_router/layer.py
@@ -1,10 +1,7 @@
 import numpy as np
-from numpy.linalg import norm
-from tqdm.auto import tqdm
 
 from semantic_router.encoders import (
     BaseEncoder,
-    BM25Encoder,
     CohereEncoder,
     OpenAIEncoder,
 )
@@ -121,131 +118,3 @@ class RouteLayer:
             return max(scores) > threshold
         else:
             return False
-
-
-class HybridRouteLayer:
-    index = None
-    sparse_index = None
-    categories = None
-    score_threshold = 0.82
-
-    def __init__(
-        self, encoder: BaseEncoder, routes: list[Route] = [], alpha: float = 0.3
-    ):
-        self.encoder = encoder
-        self.sparse_encoder = BM25Encoder()
-        self.alpha = alpha
-        # decide on default threshold based on encoder
-        if isinstance(encoder, OpenAIEncoder):
-            self.score_threshold = 0.82
-        elif isinstance(encoder, CohereEncoder):
-            self.score_threshold = 0.3
-        else:
-            self.score_threshold = 0.82
-        # if routes list has been passed, we initialize index now
-        if routes:
-            # initialize index now
-            for route in tqdm(routes):
-                self._add_route(route=route)
-
-    def __call__(self, text: str) -> str | None:
-        results = self._query(text)
-        top_class, top_class_scores = self._semantic_classify(results)
-        passed = self._pass_threshold(top_class_scores, self.score_threshold)
-        if passed:
-            return top_class
-        else:
-            return None
-
-    def add(self, route: Route):
-        self._add_route(route=route)
-
-    def _add_route(self, route: Route):
-        # create embeddings
-        dense_embeds = np.array(self.encoder(route.utterances))  # * self.alpha
-        sparse_embeds = np.array(
-            self.sparse_encoder(route.utterances)
-        )  # * (1 - self.alpha)
-
-        # create route array
-        if self.categories is None:
-            self.categories = np.array([route.name] * len(route.utterances))
-            self.utterances = np.array(route.utterances)
-        else:
-            str_arr = np.array([route.name] * len(route.utterances))
-            self.categories = np.concatenate([self.categories, str_arr])
-            self.utterances = np.concatenate(
-                [self.utterances, np.array(route.utterances)]
-            )
-        # create utterance array (the dense index)
-        if self.index is None:
-            self.index = dense_embeds
-        else:
-            self.index = np.concatenate([self.index, dense_embeds])
-        # create sparse utterance array
-        if self.sparse_index is None:
-            self.sparse_index = sparse_embeds
-        else:
-            self.sparse_index = np.concatenate([self.sparse_index, sparse_embeds])
-
-    def _query(self, text: str, top_k: int = 5):
-        """Given some text, encodes and searches the index vector space to
-        retrieve the top_k most similar records.
-        """
-        # create dense query vector
-        xq_d = np.array(self.encoder([text]))
-        xq_d = np.squeeze(xq_d)  # Reduce to 1d array.
-        # create sparse query vector
-        xq_s = np.array(self.sparse_encoder([text]))
-        xq_s = np.squeeze(xq_s)
-        # convex scaling
-        xq_d, xq_s = self._convex_scaling(xq_d, xq_s)
-
-        if self.index is not None:
-            # calculate dense vec similarity
-            index_norm = norm(self.index, axis=1)
-            xq_d_norm = norm(xq_d.T)
-            sim_d = np.dot(self.index, xq_d.T) / (index_norm * xq_d_norm)
-            # calculate sparse vec similarity
-            sparse_norm = norm(self.sparse_index, axis=1)
-            xq_s_norm = norm(xq_s.T)
-            sim_s = np.dot(self.sparse_index, xq_s.T) / (sparse_norm * xq_s_norm)
-            total_sim = sim_d + sim_s
-            # get indices of top_k records
-            top_k = min(top_k, total_sim.shape[0])
-            idx = np.argpartition(total_sim, -top_k)[-top_k:]
-            scores = total_sim[idx]
-            # get the utterance categories (route names)
-            routes = self.categories[idx] if self.categories is not None else []
-            return [{"route": d, "score": s.item()} for d, s in zip(routes, scores)]
-        else:
-            return []
-
-    def _convex_scaling(self, dense: list[float], sparse: list[float]):
-        # scale sparse and dense vecs
-        dense = np.array(dense) * self.alpha
-        sparse = np.array(sparse) * (1 - self.alpha)
-        return dense, sparse
-
-    def _semantic_classify(self, query_results: list[dict]) -> tuple[str, list[float]]:
-        scores_by_class = {}
-        for result in query_results:
-            score = result["score"]
-            route = result["route"]
-            if route in scores_by_class:
-                scores_by_class[route].append(score)
-            else:
-                scores_by_class[route] = [score]
-
-        # Calculate total score for each class
-        total_scores = {route: sum(scores) for route, scores in scores_by_class.items()}
-        top_class = max(total_scores, key=lambda x: total_scores[x], default=None)
-
-        # Return the top class and its associated scores
-        return str(top_class), scores_by_class.get(top_class, [])
-
-    def _pass_threshold(self, scores: list[float], threshold: float) -> bool:
-        if scores:
-            return max(scores) > threshold
-        else:
-            return False
diff --git a/tests/unit/test_hybrid_layer.py b/tests/unit/test_hybrid_layer.py
new file mode 100644
index 0000000000000000000000000000000000000000..94720cd8d9567b19e78a016a03c2cc90b8f62d40
--- /dev/null
+++ b/tests/unit/test_hybrid_layer.py
@@ -0,0 +1,118 @@
+import pytest
+
+from semantic_router.encoders import BaseEncoder, CohereEncoder, OpenAIEncoder
+from semantic_router.hybrid_layer import HybridRouteLayer
+from semantic_router.schema import Route
+
+
+def mock_encoder_call(utterances):
+    # Define a mapping of utterances to return values
+    mock_responses = {
+        "Hello": [0.1, 0.2, 0.3],
+        "Hi": [0.4, 0.5, 0.6],
+        "Goodbye": [0.7, 0.8, 0.9],
+        "Bye": [1.0, 1.1, 1.2],
+        "Au revoir": [1.3, 1.4, 1.5],
+    }
+    return [mock_responses.get(u, [0, 0, 0]) for u in utterances]
+
+
+@pytest.fixture
+def base_encoder():
+    return BaseEncoder(name="test-encoder")
+
+
+@pytest.fixture
+def cohere_encoder(mocker):
+    mocker.patch.object(CohereEncoder, "__call__", side_effect=mock_encoder_call)
+    return CohereEncoder(name="test-cohere-encoder", cohere_api_key="test_api_key")
+
+
+@pytest.fixture
+def openai_encoder(mocker):
+    mocker.patch.object(OpenAIEncoder, "__call__", side_effect=mock_encoder_call)
+    return OpenAIEncoder(name="test-openai-encoder", openai_api_key="test_api_key")
+
+
+@pytest.fixture
+def routes():
+    return [
+        Route(name="Route 1", utterances=["Hello", "Hi"]),
+        Route(name="Route 2", utterances=["Goodbye", "Bye", "Au revoir"]),
+    ]
+
+
+class TestHybridRouteLayer:
+    def test_initialization(self, openai_encoder, routes):
+        route_layer = HybridRouteLayer(encoder=openai_encoder, routes=routes)
+        assert route_layer.index is not None and route_layer.categories is not None
+        assert route_layer.score_threshold == 0.82
+        assert len(route_layer.index) == 5
+        assert len(set(route_layer.categories)) == 2
+
+    def test_initialization_different_encoders(self, cohere_encoder, openai_encoder):
+        route_layer_cohere = HybridRouteLayer(encoder=cohere_encoder)
+        assert route_layer_cohere.score_threshold == 0.3
+
+        route_layer_openai = HybridRouteLayer(encoder=openai_encoder)
+        assert route_layer_openai.score_threshold == 0.82
+
+    def test_add_route(self, openai_encoder):
+        route_layer = HybridRouteLayer(encoder=openai_encoder)
+        route = Route(name="Route 3", utterances=["Yes", "No"])
+        route_layer.add(route)
+        assert route_layer.index is not None and route_layer.categories is not None
+        assert len(route_layer.index) == 2
+        assert len(set(route_layer.categories)) == 1
+
+    def test_add_multiple_routes(self, openai_encoder, routes):
+        route_layer = HybridRouteLayer(encoder=openai_encoder)
+        for route in routes:
+            route_layer.add(route)
+        assert route_layer.index is not None and route_layer.categories is not None
+        assert len(route_layer.index) == 5
+        assert len(set(route_layer.categories)) == 2
+
+    def test_query_and_classification(self, openai_encoder, routes):
+        route_layer = HybridRouteLayer(encoder=openai_encoder, routes=routes)
+        query_result = route_layer("Hello")
+        assert query_result in ["Route 1", "Route 2"]
+
+    def test_query_with_no_index(self, openai_encoder):
+        route_layer = HybridRouteLayer(encoder=openai_encoder)
+        assert route_layer("Anything") is None
+
+    def test_semantic_classify(self, openai_encoder, routes):
+        route_layer = HybridRouteLayer(encoder=openai_encoder, routes=routes)
+        classification, score = route_layer._semantic_classify(
+            [
+                {"route": "Route 1", "score": 0.9},
+                {"route": "Route 2", "score": 0.1},
+            ]
+        )
+        assert classification == "Route 1"
+        assert score == [0.9]
+
+    def test_semantic_classify_multiple_routes(self, openai_encoder, routes):
+        route_layer = HybridRouteLayer(encoder=openai_encoder, routes=routes)
+        classification, score = route_layer._semantic_classify(
+            [
+                {"route": "Route 1", "score": 0.9},
+                {"route": "Route 2", "score": 0.1},
+                {"route": "Route 1", "score": 0.8},
+            ]
+        )
+        assert classification == "Route 1"
+        assert score == [0.9, 0.8]
+
+    def test_pass_threshold(self, openai_encoder):
+        route_layer = HybridRouteLayer(encoder=openai_encoder)
+        assert not route_layer._pass_threshold([], 0.5)
+        assert route_layer._pass_threshold([0.6, 0.7], 0.5)
+
+    def test_failover_score_threshold(self, base_encoder):
+        route_layer = HybridRouteLayer(encoder=base_encoder)
+        assert route_layer.score_threshold == 0.82
+
+
+# Add more tests for edge cases and error handling as needed.
diff --git a/tests/unit/test_layer.py b/tests/unit/test_layer.py
index d5f698bee02065e7e5f703c8a19faec67dbdf6fc..66e0d53bb9350c77578682f9ea0742b1d3dfe0b2 100644
--- a/tests/unit/test_layer.py
+++ b/tests/unit/test_layer.py
@@ -1,12 +1,7 @@
 import pytest
 
 from semantic_router.encoders import BaseEncoder, CohereEncoder, OpenAIEncoder
-from semantic_router.layer import (
-    HybridRouteLayer,
-    RouteLayer,
-)
-
-# Replace with the actual module name
+from semantic_router.layer import RouteLayer
 from semantic_router.schema import Route
 
 
@@ -130,77 +125,4 @@ class TestRouteLayer:
         assert route_layer.score_threshold == 0.82
 
 
-class TestHybridRouteLayer:
-    def test_initialization(self, openai_encoder, routes):
-        route_layer = HybridRouteLayer(encoder=openai_encoder, routes=routes)
-        assert route_layer.index is not None and route_layer.categories is not None
-        assert route_layer.score_threshold == 0.82
-        assert len(route_layer.index) == 5
-        assert len(set(route_layer.categories)) == 2
-
-    def test_initialization_different_encoders(self, cohere_encoder, openai_encoder):
-        route_layer_cohere = HybridRouteLayer(encoder=cohere_encoder)
-        assert route_layer_cohere.score_threshold == 0.3
-
-        route_layer_openai = HybridRouteLayer(encoder=openai_encoder)
-        assert route_layer_openai.score_threshold == 0.82
-
-    def test_add_route(self, openai_encoder):
-        route_layer = HybridRouteLayer(encoder=openai_encoder)
-        route = Route(name="Route 3", utterances=["Yes", "No"])
-        route_layer.add(route)
-        assert route_layer.index is not None and route_layer.categories is not None
-        assert len(route_layer.index) == 2
-        assert len(set(route_layer.categories)) == 1
-
-    def test_add_multiple_routes(self, openai_encoder, routes):
-        route_layer = HybridRouteLayer(encoder=openai_encoder)
-        for route in routes:
-            route_layer.add(route)
-        assert route_layer.index is not None and route_layer.categories is not None
-        assert len(route_layer.index) == 5
-        assert len(set(route_layer.categories)) == 2
-
-    def test_query_and_classification(self, openai_encoder, routes):
-        route_layer = HybridRouteLayer(encoder=openai_encoder, routes=routes)
-        query_result = route_layer("Hello")
-        assert query_result in ["Route 1", "Route 2"]
-
-    def test_query_with_no_index(self, openai_encoder):
-        route_layer = HybridRouteLayer(encoder=openai_encoder)
-        assert route_layer("Anything") is None
-
-    def test_semantic_classify(self, openai_encoder, routes):
-        route_layer = HybridRouteLayer(encoder=openai_encoder, routes=routes)
-        classification, score = route_layer._semantic_classify(
-            [
-                {"route": "Route 1", "score": 0.9},
-                {"route": "Route 2", "score": 0.1},
-            ]
-        )
-        assert classification == "Route 1"
-        assert score == [0.9]
-
-    def test_semantic_classify_multiple_routes(self, openai_encoder, routes):
-        route_layer = HybridRouteLayer(encoder=openai_encoder, routes=routes)
-        classification, score = route_layer._semantic_classify(
-            [
-                {"route": "Route 1", "score": 0.9},
-                {"route": "Route 2", "score": 0.1},
-                {"route": "Route 1", "score": 0.8},
-            ]
-        )
-        assert classification == "Route 1"
-        assert score == [0.9, 0.8]
-
-    def test_pass_threshold(self, openai_encoder):
-        route_layer = HybridRouteLayer(encoder=openai_encoder)
-        assert not route_layer._pass_threshold([], 0.5)
-        assert route_layer._pass_threshold([0.6, 0.7], 0.5)
-
-    def test_failover_score_threshold(self, base_encoder):
-        route_layer = HybridRouteLayer(encoder=base_encoder)
-        assert route_layer.score_threshold == 0.82
-
-
 # Add more tests for edge cases and error handling as needed.
diff --git a/tests/unit/test_schema.py b/tests/unit/test_schema.py
index 7c954805bd0112fa7ce1e015aee716621faced12..f471755c35796d33ac9329a2ddb3a20816230cda 100644
--- a/tests/unit/test_schema.py
+++ b/tests/unit/test_schema.py
@@ -2,10 +2,10 @@ import pytest
 
 from semantic_router.schema import (
     CohereEncoder,
-    Route,
     Encoder,
     EncoderType,
     OpenAIEncoder,
+    Route,
     SemanticSpace,
 )
 
diff --git a/walkthrough.ipynb b/walkthrough.ipynb
index a4265e5a1dfc9b2dd5060d54b54add8e4cc67e6d..d31a88dc0da38dec04e019100bfc28815adf7906 100644
--- a/walkthrough.ipynb
+++ b/walkthrough.ipynb
@@ -128,7 +128,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "from semantic_router.layer import RouteLayer\n",
+    "from semantic_router.router import RouteLayer\n",
     "\n",
     "dl = RouteLayer(encoder=encoder, routes=routes)"
    ]