diff --git a/README.md b/README.md
index 2db0e9b784c857b95cf8e215011c2b995edb6b7f..4ab443ce797c232d5846f4126be4fb081e034f61 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,7 @@
 [![Aurelio AI](https://pbs.twimg.com/profile_banners/1671498317455581184/1696285195/1500x500)](https://aurelio.ai)
 
 # Semantic Router
+
 <p>
 <img alt="GitHub Contributors" src="https://img.shields.io/github/contributors/aurelio-labs/semantic-router" />
 <img alt="GitHub Last Commit" src="https://img.shields.io/github/last-commit/aurelio-labs/semantic-router" />
@@ -24,7 +25,7 @@ pip install -qU semantic-router
 We begin by defining a set of `Decision` objects. These are the decision paths that the semantic router can decide to use, let's try two simple decisions for now — one for talk on _politics_ and another for _chitchat_:
 
 ```python
-from semantic_router.schema import Route
+from semantic_router.schemas.route import Route
 
 # we could use this as a guide for our chatbot to avoid political conversations
 politics = Route(
@@ -112,6 +113,6 @@ In this case, no decision could be made as we had no matches — so our decision
 
 ## 📚 Resources
 
-|                                                                                                                 |                            |
-| --------------------------------------------------------------------------------------------------------------- | -------------------------- |
+|                                                                                                                    |                            |
+| ------------------------------------------------------------------------------------------------------------------ | -------------------------- |
 | 🏃[Walkthrough](https://colab.research.google.com/github/aurelio-labs/semantic-router/blob/main/walkthrough.ipynb) | Quickstart Python notebook |
diff --git a/docs/examples/function_calling.ipynb b/docs/examples/function_calling.ipynb
index 5d3be2fb9072d3e25d50a6c7456500f7d72083bc..f487f77daaa9c00efe87411c0e32c06adb6f067a 100644
--- a/docs/examples/function_calling.ipynb
+++ b/docs/examples/function_calling.ipynb
@@ -1,647 +1,647 @@
 {
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Define LLMs"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 12,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# OpenAI\n",
-    "import openai\n",
-    "from semantic_router.utils.logger import logger\n",
-    "\n",
-    "\n",
-    "# Docs # https://platform.openai.com/docs/guides/function-calling\n",
-    "def llm_openai(prompt: str, model: str = \"gpt-4\") -> str:\n",
-    "    try:\n",
-    "        logger.info(f\"Calling {model} model\")\n",
-    "        response = openai.chat.completions.create(\n",
-    "            model=model,\n",
-    "            messages=[\n",
-    "                {\"role\": \"system\", \"content\": f\"{prompt}\"},\n",
-    "            ],\n",
-    "        )\n",
-    "        ai_message = response.choices[0].message.content\n",
-    "        if not ai_message:\n",
-    "            raise Exception(\"AI message is empty\", ai_message)\n",
-    "        logger.info(f\"AI message: {ai_message}\")\n",
-    "        return ai_message\n",
-    "    except Exception as e:\n",
-    "        raise Exception(\"Failed to call OpenAI API\", e)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 13,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Mistral\n",
-    "import os\n",
-    "import requests\n",
-    "\n",
-    "# Docs https://huggingface.co/docs/transformers/main_classes/text_generation\n",
-    "HF_API_TOKEN = os.getenv(\"HF_API_TOKEN\")\n",
-    "\n",
-    "\n",
-    "def llm_mistral(prompt: str) -> str:\n",
-    "    api_url = \"https://z5t4cuhg21uxfmc3.us-east-1.aws.endpoints.huggingface.cloud/\"\n",
-    "    headers = {\n",
-    "        \"Authorization\": f\"Bearer {HF_API_TOKEN}\",\n",
-    "        \"Content-Type\": \"application/json\",\n",
-    "    }\n",
-    "\n",
-    "    logger.info(\"Calling Mistral model\")\n",
-    "    response = requests.post(\n",
-    "        api_url,\n",
-    "        headers=headers,\n",
-    "        json={\n",
-    "            \"inputs\": f\"You are a helpful assistant, user query: {prompt}\",\n",
-    "            \"parameters\": {\n",
-    "                \"max_new_tokens\": 200,\n",
-    "                \"temperature\": 0.01,\n",
-    "                \"num_beams\": 5,\n",
-    "                \"num_return_sequences\": 1,\n",
-    "            },\n",
-    "        },\n",
-    "    )\n",
-    "    if response.status_code != 200:\n",
-    "        raise Exception(\"Failed to call HuggingFace API\", response.text)\n",
-    "\n",
-    "    ai_message = response.json()[0][\"generated_text\"]\n",
-    "    if not ai_message:\n",
-    "        raise Exception(\"AI message is empty\", ai_message)\n",
-    "    logger.info(f\"AI message: {ai_message}\")\n",
-    "    return ai_message"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Now we need to generate config from function schema using LLM"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 14,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import inspect\n",
-    "from typing import Any\n",
-    "\n",
-    "\n",
-    "def get_function_schema(function) -> dict[str, Any]:\n",
-    "    schema = {\n",
-    "        \"name\": function.__name__,\n",
-    "        \"description\": str(inspect.getdoc(function)),\n",
-    "        \"signature\": str(inspect.signature(function)),\n",
-    "        \"output\": str(\n",
-    "            inspect.signature(function).return_annotation,\n",
-    "        ),\n",
-    "    }\n",
-    "    return schema"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 15,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import json\n",
-    "\n",
-    "\n",
-    "def is_valid_config(route_config_str: str) -> bool:\n",
-    "    try:\n",
-    "        output_json = json.loads(route_config_str)\n",
-    "        return all(key in output_json for key in [\"name\", \"utterances\"])\n",
-    "    except json.JSONDecodeError:\n",
-    "        return False"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 16,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import json\n",
-    "\n",
-    "from semantic_router.utils.logger import logger\n",
-    "\n",
-    "\n",
-    "def generate_route(function) -> dict:\n",
-    "    logger.info(\"Generating config...\")\n",
-    "\n",
-    "    function_schema = get_function_schema(function)\n",
-    "\n",
-    "    prompt = f\"\"\"\n",
-    "    You are tasked to generate a JSON configuration based on the provided\n",
-    "    function schema. Please follow the template below:\n",
-    "\n",
-    "    {{\n",
-    "        \"name\": \"<function_name>\",\n",
-    "        \"utterances\": [\n",
-    "            \"<example_utterance_1>\",\n",
-    "            \"<example_utterance_2>\",\n",
-    "            \"<example_utterance_3>\",\n",
-    "            \"<example_utterance_4>\",\n",
-    "            \"<example_utterance_5>\"]\n",
-    "    }}\n",
-    "\n",
-    "    Only include the \"name\" and \"utterances\" keys in your answer.\n",
-    "    The \"name\" should match the function name and the \"utterances\"\n",
-    "    should comprise a list of 5 example phrases that could be used to invoke\n",
-    "    the function.\n",
-    "\n",
-    "    Input schema:\n",
-    "    {function_schema}\n",
-    "    \"\"\"\n",
-    "\n",
-    "    try:\n",
-    "        ai_message = llm_mistral(prompt)\n",
-    "\n",
-    "        # Parse the response\n",
-    "        ai_message = ai_message[ai_message.find(\"{\") :]\n",
-    "        ai_message = (\n",
-    "            ai_message.replace(\"'\", '\"')\n",
-    "            .replace('\"s', \"'s\")\n",
-    "            .strip()\n",
-    "            .rstrip(\",\")\n",
-    "            .replace(\"}\", \"}\")\n",
-    "        )\n",
-    "\n",
-    "        valid_config = is_valid_config(ai_message)\n",
-    "\n",
-    "        if not valid_config:\n",
-    "            logger.warning(f\"Mistral failed with error, falling back to OpenAI\")\n",
-    "            ai_message = llm_openai(prompt)\n",
-    "            if not is_valid_config(ai_message):\n",
-    "                raise Exception(\"Invalid config generated\")\n",
-    "    except Exception as e:\n",
-    "        logger.error(f\"Fall back to OpenAI failed with error {e}\")\n",
-    "        ai_message = llm_openai(prompt)\n",
-    "        if not is_valid_config(ai_message):\n",
-    "            raise Exception(\"Failed to generate config\")\n",
-    "\n",
-    "    try:\n",
-    "        route_config = json.loads(ai_message)\n",
-    "        logger.info(f\"Generated config: {route_config}\")\n",
-    "        return route_config\n",
-    "    except json.JSONDecodeError as json_error:\n",
-    "        logger.error(f\"JSON parsing error {json_error}\")\n",
-    "        print(f\"AI message: {ai_message}\")\n",
-    "        return {\"error\": \"Failed to generate config\"}"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Extract function parameters using `Mistral` open-source model"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 17,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def validate_parameters(function, parameters):\n",
-    "    sig = inspect.signature(function)\n",
-    "    for name, param in sig.parameters.items():\n",
-    "        if name not in parameters:\n",
-    "            return False, f\"Parameter {name} missing from query\"\n",
-    "        if not isinstance(parameters[name], param.annotation):\n",
-    "            return False, f\"Parameter {name} is not of type {param.annotation}\"\n",
-    "    return True, \"Parameters are valid\""
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 18,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def extract_parameters(query: str, function) -> dict:\n",
-    "    logger.info(\"Extracting parameters...\")\n",
-    "    example_query = \"How is the weather in Hawaii right now in International units?\"\n",
-    "\n",
-    "    example_schema = {\n",
-    "        \"name\": \"get_weather\",\n",
-    "        \"description\": \"Useful to get the weather in a specific location\",\n",
-    "        \"signature\": \"(location: str, degree: str) -> str\",\n",
-    "        \"output\": \"<class 'str'>\",\n",
-    "    }\n",
-    "\n",
-    "    example_parameters = {\n",
-    "        \"location\": \"London\",\n",
-    "        \"degree\": \"Celsius\",\n",
-    "    }\n",
-    "\n",
-    "    prompt = f\"\"\"\n",
-    "    You are a helpful assistant designed to output JSON.\n",
-    "    Given the following function schema\n",
-    "    << {get_function_schema(function)} >>\n",
-    "    and query\n",
-    "    << {query} >>\n",
-    "    extract the parameters values from the query, in a valid JSON format.\n",
-    "    Example:\n",
-    "    Input:\n",
-    "    query: {example_query}\n",
-    "    schema: {example_schema}\n",
-    "\n",
-    "    Result: {example_parameters}\n",
-    "\n",
-    "    Input:\n",
-    "    query: {query}\n",
-    "    schema: {get_function_schema(function)}\n",
-    "    Result:\n",
-    "    \"\"\"\n",
-    "\n",
-    "    try:\n",
-    "        ai_message = llm_mistral(prompt)\n",
-    "        ai_message = (\n",
-    "            ai_message.replace(\"Output:\", \"\").replace(\"'\", '\"').strip().rstrip(\",\")\n",
-    "        )\n",
-    "    except Exception as e:\n",
-    "        logger.error(f\"Mistral failed with error {e}, falling back to OpenAI\")\n",
-    "        ai_message = llm_openai(prompt)\n",
-    "\n",
-    "    try:\n",
-    "        parameters = json.loads(ai_message)\n",
-    "        valid, message = validate_parameters(function, parameters)\n",
-    "\n",
-    "        if not valid:\n",
-    "            logger.warning(\n",
-    "                f\"Invalid parameters from Mistral, falling back to OpenAI: {message}\"\n",
-    "            )\n",
-    "            # Fall back to OpenAI\n",
-    "            ai_message = llm_openai(prompt)\n",
-    "            parameters = json.loads(ai_message)\n",
-    "            valid, message = validate_parameters(function, parameters)\n",
-    "            if not valid:\n",
-    "                raise ValueError(message)\n",
-    "\n",
-    "        logger.info(f\"Extracted parameters: {parameters}\")\n",
-    "        return parameters\n",
-    "    except ValueError as e:\n",
-    "        logger.error(f\"Parameter validation error: {str(e)}\")\n",
-    "        return {\"error\": \"Failed to validate parameters\"}"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Set up the routing layer"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 19,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from semantic_router.schema import Route\n",
-    "from semantic_router.encoders import CohereEncoder\n",
-    "from semantic_router.layer import RouteLayer\n",
-    "from semantic_router.utils.logger import logger\n",
-    "\n",
-    "\n",
-    "def create_router(routes: list[dict]) -> RouteLayer:\n",
-    "    logger.info(\"Creating route layer...\")\n",
-    "    encoder = CohereEncoder()\n",
-    "\n",
-    "    route_list: list[Route] = []\n",
-    "    for route in routes:\n",
-    "        if \"name\" in route and \"utterances\" in route:\n",
-    "            print(f\"Route: {route}\")\n",
-    "            route_list.append(Route(name=route[\"name\"], utterances=route[\"utterances\"]))\n",
-    "        else:\n",
-    "            logger.warning(f\"Misconfigured route: {route}\")\n",
-    "\n",
-    "    return RouteLayer(encoder=encoder, routes=route_list)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Set up calling functions"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 24,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from typing import Callable\n",
-    "from semantic_router.layer import RouteLayer\n",
-    "\n",
-    "\n",
-    "def call_function(function: Callable, parameters: dict[str, str]):\n",
-    "    try:\n",
-    "        return function(**parameters)\n",
-    "    except TypeError as e:\n",
-    "        logger.error(f\"Error calling function: {e}\")\n",
-    "\n",
-    "\n",
-    "def call_llm(query: str) -> str:\n",
-    "    try:\n",
-    "        ai_message = llm_mistral(query)\n",
-    "    except Exception as e:\n",
-    "        logger.error(f\"Mistral failed with error {e}, falling back to OpenAI\")\n",
-    "        ai_message = llm_openai(query)\n",
-    "\n",
-    "    return ai_message\n",
-    "\n",
-    "\n",
-    "def call(query: str, functions: list[Callable], router: RouteLayer):\n",
-    "    function_name = router(query)\n",
-    "    if not function_name:\n",
-    "        logger.warning(\"No function found\")\n",
-    "        return call_llm(query)\n",
-    "\n",
-    "    for function in functions:\n",
-    "        if function.__name__ == function_name:\n",
-    "            parameters = extract_parameters(query, function)\n",
-    "            print(f\"parameters: {parameters}\")\n",
-    "            return call_function(function, parameters)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Workflow"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 21,
-   "metadata": {},
-   "outputs": [
+  "cells": [
     {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "\u001b[32m2023-12-18 12:17:58 INFO semantic_router.utils.logger Generating config...\u001b[0m\n",
-      "\u001b[32m2023-12-18 12:17:58 INFO semantic_router.utils.logger Calling Mistral model\u001b[0m\n",
-      "\u001b[31m2023-12-18 12:18:00 ERROR semantic_router.utils.logger Fall back to OpenAI failed with error ('Failed to call HuggingFace API', '{\"error\":\"Bad Gateway\"}')\u001b[0m\n",
-      "\u001b[32m2023-12-18 12:18:00 INFO semantic_router.utils.logger Calling gpt-4 model\u001b[0m\n",
-      "\u001b[32m2023-12-18 12:18:05 INFO semantic_router.utils.logger AI message: {\n",
-      "    \"name\": \"get_time\",\n",
-      "    \"utterances\": [\n",
-      "        \"what is the time in new york\",\n",
-      "        \"can you tell me the time in london\",\n",
-      "        \"get me the current time in tokyo\",\n",
-      "        \"i need to know the time in sydney\",\n",
-      "        \"please tell me the current time in paris\"\n",
-      "    ]\n",
-      "}\u001b[0m\n",
-      "\u001b[32m2023-12-18 12:18:05 INFO semantic_router.utils.logger Generated config: {'name': 'get_time', 'utterances': ['what is the time in new york', 'can you tell me the time in london', 'get me the current time in tokyo', 'i need to know the time in sydney', 'please tell me the current time in paris']}\u001b[0m\n",
-      "\u001b[32m2023-12-18 12:18:05 INFO semantic_router.utils.logger Generating config...\u001b[0m\n",
-      "\u001b[32m2023-12-18 12:18:05 INFO semantic_router.utils.logger Calling Mistral model\u001b[0m\n",
-      "\u001b[31m2023-12-18 12:18:07 ERROR semantic_router.utils.logger Fall back to OpenAI failed with error ('Failed to call HuggingFace API', '{\"error\":\"Bad Gateway\"}')\u001b[0m\n",
-      "\u001b[32m2023-12-18 12:18:07 INFO semantic_router.utils.logger Calling gpt-4 model\u001b[0m\n",
-      "\u001b[32m2023-12-18 12:18:12 INFO semantic_router.utils.logger AI message: {\n",
-      "    \"name\": \"get_news\",\n",
-      "    \"utterances\": [\n",
-      "        \"Can I get the latest news in Canada?\",\n",
-      "        \"Show me the recent news in the US\",\n",
-      "        \"I would like to know about the sports news in England\",\n",
-      "        \"Let's check the technology news in Japan\",\n",
-      "        \"Show me the health related news in Germany\"\n",
-      "    ]\n",
-      "}\u001b[0m\n",
-      "\u001b[32m2023-12-18 12:18:12 INFO semantic_router.utils.logger Generated config: {'name': 'get_news', 'utterances': ['Can I get the latest news in Canada?', 'Show me the recent news in the US', 'I would like to know about the sports news in England', \"Let's check the technology news in Japan\", 'Show me the health related news in Germany']}\u001b[0m\n",
-      "\u001b[32m2023-12-18 12:18:12 INFO semantic_router.utils.logger Creating route layer...\u001b[0m\n"
-     ]
+      "cell_type": "markdown",
+      "metadata": {},
+      "source": [
+        "## Define LLMs"
+      ]
     },
     {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Route: {'name': 'get_time', 'utterances': ['what is the time in new york', 'can you tell me the time in london', 'get me the current time in tokyo', 'i need to know the time in sydney', 'please tell me the current time in paris']}\n",
-      "Route: {'name': 'get_news', 'utterances': ['Can I get the latest news in Canada?', 'Show me the recent news in the US', 'I would like to know about the sports news in England', \"Let's check the technology news in Japan\", 'Show me the health related news in Germany']}\n"
-     ]
-    }
-   ],
-   "source": [
-    "def get_time(location: str) -> str:\n",
-    "    \"\"\"Useful to get the time in a specific location\"\"\"\n",
-    "    print(f\"Calling `get_time` function with location: {location}\")\n",
-    "    return \"get_time\"\n",
-    "\n",
-    "\n",
-    "def get_news(category: str, country: str) -> str:\n",
-    "    \"\"\"Useful to get the news in a specific country\"\"\"\n",
-    "    print(\n",
-    "        f\"Calling `get_news` function with category: {category} and country: {country}\"\n",
-    "    )\n",
-    "    return \"get_news\"\n",
-    "\n",
-    "\n",
-    "# Registering functions to the router\n",
-    "route_get_time = generate_route(get_time)\n",
-    "route_get_news = generate_route(get_news)\n",
-    "\n",
-    "routes = [route_get_time, route_get_news]\n",
-    "router = create_router(routes)\n",
-    "\n",
-    "# Tools\n",
-    "tools = [get_time, get_news]"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 27,
-   "metadata": {},
-   "outputs": [
+      "cell_type": "code",
+      "execution_count": 12,
+      "metadata": {},
+      "outputs": [],
+      "source": [
+        "# OpenAI\n",
+        "import openai\n",
+        "from semantic_router.utils.logger import logger\n",
+        "\n",
+        "\n",
+        "# Docs # https://platform.openai.com/docs/guides/function-calling\n",
+        "def llm_openai(prompt: str, model: str = \"gpt-4\") -> str:\n",
+        "    try:\n",
+        "        logger.info(f\"Calling {model} model\")\n",
+        "        response = openai.chat.completions.create(\n",
+        "            model=model,\n",
+        "            messages=[\n",
+        "                {\"role\": \"system\", \"content\": f\"{prompt}\"},\n",
+        "            ],\n",
+        "        )\n",
+        "        ai_message = response.choices[0].message.content\n",
+        "        if not ai_message:\n",
+        "            raise Exception(\"AI message is empty\", ai_message)\n",
+        "        logger.info(f\"AI message: {ai_message}\")\n",
+        "        return ai_message\n",
+        "    except Exception as e:\n",
+        "        raise Exception(\"Failed to call OpenAI API\", e)"
+      ]
+    },
     {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "\u001b[32m2023-12-18 12:20:12 INFO semantic_router.utils.logger Generating config...\u001b[0m\n",
-      "\u001b[32m2023-12-18 12:20:12 INFO semantic_router.utils.logger Calling Mistral model\u001b[0m\n",
-      "\u001b[32m2023-12-18 12:20:16 INFO semantic_router.utils.logger AI message: \n",
-      "    Example output:\n",
-      "    {\n",
-      "        \"name\": \"get_time\",\n",
-      "        \"utterances\": [\n",
-      "            \"What's the time in New York?\",\n",
-      "            \"Tell me the time in Tokyo.\",\n",
-      "            \"Can you give me the time in London?\",\n",
-      "            \"What's the current time in Sydney?\",\n",
-      "            \"Can you tell me the time in Berlin?\"\n",
-      "        ]\n",
-      "    }\u001b[0m\n",
-      "\u001b[32m2023-12-18 12:20:16 INFO semantic_router.utils.logger Generated config: {'name': 'get_time', 'utterances': [\"What's the time in New York?\", 'Tell me the time in Tokyo.', 'Can you give me the time in London?', \"What's the current time in Sydney?\", 'Can you tell me the time in Berlin?']}\u001b[0m\n",
-      "\u001b[32m2023-12-18 12:20:16 INFO semantic_router.utils.logger Generating config...\u001b[0m\n",
-      "\u001b[32m2023-12-18 12:20:16 INFO semantic_router.utils.logger Calling Mistral model\u001b[0m\n",
-      "\u001b[32m2023-12-18 12:20:20 INFO semantic_router.utils.logger AI message: \n",
-      "    Example output:\n",
-      "    {\n",
-      "        \"name\": \"get_news\",\n",
-      "        \"utterances\": [\n",
-      "            \"Tell me the latest news from the US\",\n",
-      "            \"What's happening in India today?\",\n",
-      "            \"Get me the top stories from Japan\",\n",
-      "            \"Can you give me the breaking news from Brazil?\",\n",
-      "            \"What's the latest news from Germany?\"\n",
-      "        ]\n",
-      "    }\u001b[0m\n",
-      "\u001b[32m2023-12-18 12:20:20 INFO semantic_router.utils.logger Generated config: {'name': 'get_news', 'utterances': ['Tell me the latest news from the US', \"What's happening in India today?\", 'Get me the top stories from Japan', 'Can you give me the breaking news from Brazil?', \"What's the latest news from Germany?\"]}\u001b[0m\n",
-      "\u001b[32m2023-12-18 12:20:20 INFO semantic_router.utils.logger Creating route layer...\u001b[0m\n"
-     ]
+      "cell_type": "code",
+      "execution_count": 13,
+      "metadata": {},
+      "outputs": [],
+      "source": [
+        "# Mistral\n",
+        "import os\n",
+        "import requests\n",
+        "\n",
+        "# Docs https://huggingface.co/docs/transformers/main_classes/text_generation\n",
+        "HF_API_TOKEN = os.getenv(\"HF_API_TOKEN\")\n",
+        "\n",
+        "\n",
+        "def llm_mistral(prompt: str) -> str:\n",
+        "    api_url = \"https://z5t4cuhg21uxfmc3.us-east-1.aws.endpoints.huggingface.cloud/\"\n",
+        "    headers = {\n",
+        "        \"Authorization\": f\"Bearer {HF_API_TOKEN}\",\n",
+        "        \"Content-Type\": \"application/json\",\n",
+        "    }\n",
+        "\n",
+        "    logger.info(\"Calling Mistral model\")\n",
+        "    response = requests.post(\n",
+        "        api_url,\n",
+        "        headers=headers,\n",
+        "        json={\n",
+        "            \"inputs\": f\"You are a helpful assistant, user query: {prompt}\",\n",
+        "            \"parameters\": {\n",
+        "                \"max_new_tokens\": 200,\n",
+        "                \"temperature\": 0.01,\n",
+        "                \"num_beams\": 5,\n",
+        "                \"num_return_sequences\": 1,\n",
+        "            },\n",
+        "        },\n",
+        "    )\n",
+        "    if response.status_code != 200:\n",
+        "        raise Exception(\"Failed to call HuggingFace API\", response.text)\n",
+        "\n",
+        "    ai_message = response.json()[0][\"generated_text\"]\n",
+        "    if not ai_message:\n",
+        "        raise Exception(\"AI message is empty\", ai_message)\n",
+        "    logger.info(f\"AI message: {ai_message}\")\n",
+        "    return ai_message"
+      ]
     },
     {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Route: {'name': 'get_time', 'utterances': [\"What's the time in New York?\", 'Tell me the time in Tokyo.', 'Can you give me the time in London?', \"What's the current time in Sydney?\", 'Can you tell me the time in Berlin?']}\n",
-      "Route: {'name': 'get_news', 'utterances': ['Tell me the latest news from the US', \"What's happening in India today?\", 'Get me the top stories from Japan', 'Can you give me the breaking news from Brazil?', \"What's the latest news from Germany?\"]}\n"
-     ]
-    }
-   ],
-   "source": [
-    "def get_time(location: str) -> str:\n",
-    "    \"\"\"Useful to get the time in a specific location\"\"\"\n",
-    "    print(f\"Calling `get_time` function with location: {location}\")\n",
-    "    return \"get_time\"\n",
-    "\n",
-    "\n",
-    "def get_news(category: str, country: str) -> str:\n",
-    "    \"\"\"Useful to get the news in a specific country\"\"\"\n",
-    "    print(\n",
-    "        f\"Calling `get_news` function with category: {category} and country: {country}\"\n",
-    "    )\n",
-    "    return \"get_news\"\n",
-    "\n",
-    "\n",
-    "# Registering functions to the router\n",
-    "route_get_time = generate_route(get_time)\n",
-    "route_get_news = generate_route(get_news)\n",
-    "\n",
-    "routes = [route_get_time, route_get_news]\n",
-    "router = create_router(routes)\n",
-    "\n",
-    "# Tools\n",
-    "tools = [get_time, get_news]"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 26,
-   "metadata": {},
-   "outputs": [
+      "cell_type": "markdown",
+      "metadata": {},
+      "source": [
+        "### Now we need to generate config from function schema using LLM"
+      ]
+    },
     {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "\u001b[32m2023-12-18 12:20:02 INFO semantic_router.utils.logger Extracting parameters...\u001b[0m\n",
-      "\u001b[32m2023-12-18 12:20:02 INFO semantic_router.utils.logger Calling Mistral model\u001b[0m\n",
-      "\u001b[32m2023-12-18 12:20:04 INFO semantic_router.utils.logger AI message: \n",
-      "    {\n",
-      "        \"location\": \"Stockholm\"\n",
-      "    }\u001b[0m\n",
-      "\u001b[32m2023-12-18 12:20:04 INFO semantic_router.utils.logger Extracted parameters: {'location': 'Stockholm'}\u001b[0m\n"
-     ]
+      "cell_type": "code",
+      "execution_count": 14,
+      "metadata": {},
+      "outputs": [],
+      "source": [
+        "import inspect\n",
+        "from typing import Any\n",
+        "\n",
+        "\n",
+        "def get_function_schema(function) -> dict[str, Any]:\n",
+        "    schema = {\n",
+        "        \"name\": function.__name__,\n",
+        "        \"description\": str(inspect.getdoc(function)),\n",
+        "        \"signature\": str(inspect.signature(function)),\n",
+        "        \"output\": str(\n",
+        "            inspect.signature(function).return_annotation,\n",
+        "        ),\n",
+        "    }\n",
+        "    return schema"
+      ]
     },
     {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "parameters: {'location': 'Stockholm'}\n",
-      "Calling `get_time` function with location: Stockholm\n"
-     ]
+      "cell_type": "code",
+      "execution_count": 15,
+      "metadata": {},
+      "outputs": [],
+      "source": [
+        "import json\n",
+        "\n",
+        "\n",
+        "def is_valid_config(route_config_str: str) -> bool:\n",
+        "    try:\n",
+        "        output_json = json.loads(route_config_str)\n",
+        "        return all(key in output_json for key in [\"name\", \"utterances\"])\n",
+        "    except json.JSONDecodeError:\n",
+        "        return False"
+      ]
     },
     {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "\u001b[32m2023-12-18 12:20:04 INFO semantic_router.utils.logger Extracting parameters...\u001b[0m\n",
-      "\u001b[32m2023-12-18 12:20:04 INFO semantic_router.utils.logger Calling Mistral model\u001b[0m\n",
-      "\u001b[32m2023-12-18 12:20:05 INFO semantic_router.utils.logger AI message: \n",
-      "    {\n",
-      "        \"category\": \"tech\",\n",
-      "        \"country\": \"Lithuania\"\n",
-      "    }\u001b[0m\n",
-      "\u001b[32m2023-12-18 12:20:05 INFO semantic_router.utils.logger Extracted parameters: {'category': 'tech', 'country': 'Lithuania'}\u001b[0m\n"
-     ]
+      "cell_type": "code",
+      "execution_count": 16,
+      "metadata": {},
+      "outputs": [],
+      "source": [
+        "import json\n",
+        "\n",
+        "from semantic_router.utils.logger import logger\n",
+        "\n",
+        "\n",
+        "def generate_route(function) -> dict:\n",
+        "    logger.info(\"Generating config...\")\n",
+        "\n",
+        "    function_schema = get_function_schema(function)\n",
+        "\n",
+        "    prompt = f\"\"\"\n",
+        "    You are tasked to generate a JSON configuration based on the provided\n",
+        "    function schema. Please follow the template below:\n",
+        "\n",
+        "    {{\n",
+        "        \"name\": \"<function_name>\",\n",
+        "        \"utterances\": [\n",
+        "            \"<example_utterance_1>\",\n",
+        "            \"<example_utterance_2>\",\n",
+        "            \"<example_utterance_3>\",\n",
+        "            \"<example_utterance_4>\",\n",
+        "            \"<example_utterance_5>\"]\n",
+        "    }}\n",
+        "\n",
+        "    Only include the \"name\" and \"utterances\" keys in your answer.\n",
+        "    The \"name\" should match the function name and the \"utterances\"\n",
+        "    should comprise a list of 5 example phrases that could be used to invoke\n",
+        "    the function.\n",
+        "\n",
+        "    Input schema:\n",
+        "    {function_schema}\n",
+        "    \"\"\"\n",
+        "\n",
+        "    try:\n",
+        "        ai_message = llm_mistral(prompt)\n",
+        "\n",
+        "        # Parse the response\n",
+        "        ai_message = ai_message[ai_message.find(\"{\") :]\n",
+        "        ai_message = (\n",
+        "            ai_message.replace(\"'\", '\"')\n",
+        "            .replace('\"s', \"'s\")\n",
+        "            .strip()\n",
+        "            .rstrip(\",\")\n",
+        "            .replace(\"}\", \"}\")\n",
+        "        )\n",
+        "\n",
+        "        valid_config = is_valid_config(ai_message)\n",
+        "\n",
+        "        if not valid_config:\n",
+        "            logger.warning(f\"Mistral failed with error, falling back to OpenAI\")\n",
+        "            ai_message = llm_openai(prompt)\n",
+        "            if not is_valid_config(ai_message):\n",
+        "                raise Exception(\"Invalid config generated\")\n",
+        "    except Exception as e:\n",
+        "        logger.error(f\"Fall back to OpenAI failed with error {e}\")\n",
+        "        ai_message = llm_openai(prompt)\n",
+        "        if not is_valid_config(ai_message):\n",
+        "            raise Exception(\"Failed to generate config\")\n",
+        "\n",
+        "    try:\n",
+        "        route_config = json.loads(ai_message)\n",
+        "        logger.info(f\"Generated config: {route_config}\")\n",
+        "        return route_config\n",
+        "    except json.JSONDecodeError as json_error:\n",
+        "        logger.error(f\"JSON parsing error {json_error}\")\n",
+        "        print(f\"AI message: {ai_message}\")\n",
+        "        return {\"error\": \"Failed to generate config\"}"
+      ]
     },
     {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "parameters: {'category': 'tech', 'country': 'Lithuania'}\n",
-      "Calling `get_news` function with category: tech and country: Lithuania\n"
-     ]
+      "cell_type": "markdown",
+      "metadata": {},
+      "source": [
+        "Extract function parameters using `Mistral` open-source model"
+      ]
     },
     {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "\u001b[33m2023-12-18 12:20:05 WARNING semantic_router.utils.logger No function found\u001b[0m\n",
-      "\u001b[32m2023-12-18 12:20:05 INFO semantic_router.utils.logger Calling Mistral model\u001b[0m\n",
-      "\u001b[32m2023-12-18 12:20:06 INFO semantic_router.utils.logger AI message:  How can I help you today?\u001b[0m\n"
-     ]
+      "cell_type": "code",
+      "execution_count": 17,
+      "metadata": {},
+      "outputs": [],
+      "source": [
+        "def validate_parameters(function, parameters):\n",
+        "    sig = inspect.signature(function)\n",
+        "    for name, param in sig.parameters.items():\n",
+        "        if name not in parameters:\n",
+        "            return False, f\"Parameter {name} missing from query\"\n",
+        "        if not isinstance(parameters[name], param.annotation):\n",
+        "            return False, f\"Parameter {name} is not of type {param.annotation}\"\n",
+        "    return True, \"Parameters are valid\""
+      ]
     },
     {
-     "data": {
-      "text/plain": [
-       "' How can I help you today?'"
+      "cell_type": "code",
+      "execution_count": 18,
+      "metadata": {},
+      "outputs": [],
+      "source": [
+        "def extract_parameters(query: str, function) -> dict:\n",
+        "    logger.info(\"Extracting parameters...\")\n",
+        "    example_query = \"How is the weather in Hawaii right now in International units?\"\n",
+        "\n",
+        "    example_schema = {\n",
+        "        \"name\": \"get_weather\",\n",
+        "        \"description\": \"Useful to get the weather in a specific location\",\n",
+        "        \"signature\": \"(location: str, degree: str) -> str\",\n",
+        "        \"output\": \"<class 'str'>\",\n",
+        "    }\n",
+        "\n",
+        "    example_parameters = {\n",
+        "        \"location\": \"London\",\n",
+        "        \"degree\": \"Celsius\",\n",
+        "    }\n",
+        "\n",
+        "    prompt = f\"\"\"\n",
+        "    You are a helpful assistant designed to output JSON.\n",
+        "    Given the following function schema\n",
+        "    << {get_function_schema(function)} >>\n",
+        "    and query\n",
+        "    << {query} >>\n",
+        "    extract the parameters values from the query, in a valid JSON format.\n",
+        "    Example:\n",
+        "    Input:\n",
+        "    query: {example_query}\n",
+        "    schema: {example_schema}\n",
+        "\n",
+        "    Result: {example_parameters}\n",
+        "\n",
+        "    Input:\n",
+        "    query: {query}\n",
+        "    schema: {get_function_schema(function)}\n",
+        "    Result:\n",
+        "    \"\"\"\n",
+        "\n",
+        "    try:\n",
+        "        ai_message = llm_mistral(prompt)\n",
+        "        ai_message = (\n",
+        "            ai_message.replace(\"Output:\", \"\").replace(\"'\", '\"').strip().rstrip(\",\")\n",
+        "        )\n",
+        "    except Exception as e:\n",
+        "        logger.error(f\"Mistral failed with error {e}, falling back to OpenAI\")\n",
+        "        ai_message = llm_openai(prompt)\n",
+        "\n",
+        "    try:\n",
+        "        parameters = json.loads(ai_message)\n",
+        "        valid, message = validate_parameters(function, parameters)\n",
+        "\n",
+        "        if not valid:\n",
+        "            logger.warning(\n",
+        "                f\"Invalid parameters from Mistral, falling back to OpenAI: {message}\"\n",
+        "            )\n",
+        "            # Fall back to OpenAI\n",
+        "            ai_message = llm_openai(prompt)\n",
+        "            parameters = json.loads(ai_message)\n",
+        "            valid, message = validate_parameters(function, parameters)\n",
+        "            if not valid:\n",
+        "                raise ValueError(message)\n",
+        "\n",
+        "        logger.info(f\"Extracted parameters: {parameters}\")\n",
+        "        return parameters\n",
+        "    except ValueError as e:\n",
+        "        logger.error(f\"Parameter validation error: {str(e)}\")\n",
+        "        return {\"error\": \"Failed to validate parameters\"}"
       ]
-     },
-     "execution_count": 26,
-     "metadata": {},
-     "output_type": "execute_result"
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {},
+      "source": [
+        "Set up the routing layer"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 19,
+      "metadata": {},
+      "outputs": [],
+      "source": [
+        "from semantic_router.schemas.route import Route\n",
+        "from semantic_router.encoders import CohereEncoder\n",
+        "from semantic_router.layer import RouteLayer\n",
+        "from semantic_router.utils.logger import logger\n",
+        "\n",
+        "\n",
+        "def create_router(routes: list[dict]) -> RouteLayer:\n",
+        "    logger.info(\"Creating route layer...\")\n",
+        "    encoder = CohereEncoder()\n",
+        "\n",
+        "    route_list: list[Route] = []\n",
+        "    for route in routes:\n",
+        "        if \"name\" in route and \"utterances\" in route:\n",
+        "            print(f\"Route: {route}\")\n",
+        "            route_list.append(Route(name=route[\"name\"], utterances=route[\"utterances\"]))\n",
+        "        else:\n",
+        "            logger.warning(f\"Misconfigured route: {route}\")\n",
+        "\n",
+        "    return RouteLayer(encoder=encoder, routes=route_list)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {},
+      "source": [
+        "Set up calling functions"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 24,
+      "metadata": {},
+      "outputs": [],
+      "source": [
+        "from typing import Callable\n",
+        "from semantic_router.layer import RouteLayer\n",
+        "\n",
+        "\n",
+        "def call_function(function: Callable, parameters: dict[str, str]):\n",
+        "    try:\n",
+        "        return function(**parameters)\n",
+        "    except TypeError as e:\n",
+        "        logger.error(f\"Error calling function: {e}\")\n",
+        "\n",
+        "\n",
+        "def call_llm(query: str) -> str:\n",
+        "    try:\n",
+        "        ai_message = llm_mistral(query)\n",
+        "    except Exception as e:\n",
+        "        logger.error(f\"Mistral failed with error {e}, falling back to OpenAI\")\n",
+        "        ai_message = llm_openai(query)\n",
+        "\n",
+        "    return ai_message\n",
+        "\n",
+        "\n",
+        "def call(query: str, functions: list[Callable], router: RouteLayer):\n",
+        "    function_name = router(query)\n",
+        "    if not function_name:\n",
+        "        logger.warning(\"No function found\")\n",
+        "        return call_llm(query)\n",
+        "\n",
+        "    for function in functions:\n",
+        "        if function.__name__ == function_name:\n",
+        "            parameters = extract_parameters(query, function)\n",
+        "            print(f\"parameters: {parameters}\")\n",
+        "            return call_function(function, parameters)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {},
+      "source": [
+        "### Workflow"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 21,
+      "metadata": {},
+      "outputs": [
+        {
+          "name": "stderr",
+          "output_type": "stream",
+          "text": [
+            "\u001b[32m2023-12-18 12:17:58 INFO semantic_router.utils.logger Generating config...\u001b[0m\n",
+            "\u001b[32m2023-12-18 12:17:58 INFO semantic_router.utils.logger Calling Mistral model\u001b[0m\n",
+            "\u001b[31m2023-12-18 12:18:00 ERROR semantic_router.utils.logger Fall back to OpenAI failed with error ('Failed to call HuggingFace API', '{\"error\":\"Bad Gateway\"}')\u001b[0m\n",
+            "\u001b[32m2023-12-18 12:18:00 INFO semantic_router.utils.logger Calling gpt-4 model\u001b[0m\n",
+            "\u001b[32m2023-12-18 12:18:05 INFO semantic_router.utils.logger AI message: {\n",
+            "    \"name\": \"get_time\",\n",
+            "    \"utterances\": [\n",
+            "        \"what is the time in new york\",\n",
+            "        \"can you tell me the time in london\",\n",
+            "        \"get me the current time in tokyo\",\n",
+            "        \"i need to know the time in sydney\",\n",
+            "        \"please tell me the current time in paris\"\n",
+            "    ]\n",
+            "}\u001b[0m\n",
+            "\u001b[32m2023-12-18 12:18:05 INFO semantic_router.utils.logger Generated config: {'name': 'get_time', 'utterances': ['what is the time in new york', 'can you tell me the time in london', 'get me the current time in tokyo', 'i need to know the time in sydney', 'please tell me the current time in paris']}\u001b[0m\n",
+            "\u001b[32m2023-12-18 12:18:05 INFO semantic_router.utils.logger Generating config...\u001b[0m\n",
+            "\u001b[32m2023-12-18 12:18:05 INFO semantic_router.utils.logger Calling Mistral model\u001b[0m\n",
+            "\u001b[31m2023-12-18 12:18:07 ERROR semantic_router.utils.logger Fall back to OpenAI failed with error ('Failed to call HuggingFace API', '{\"error\":\"Bad Gateway\"}')\u001b[0m\n",
+            "\u001b[32m2023-12-18 12:18:07 INFO semantic_router.utils.logger Calling gpt-4 model\u001b[0m\n",
+            "\u001b[32m2023-12-18 12:18:12 INFO semantic_router.utils.logger AI message: {\n",
+            "    \"name\": \"get_news\",\n",
+            "    \"utterances\": [\n",
+            "        \"Can I get the latest news in Canada?\",\n",
+            "        \"Show me the recent news in the US\",\n",
+            "        \"I would like to know about the sports news in England\",\n",
+            "        \"Let's check the technology news in Japan\",\n",
+            "        \"Show me the health related news in Germany\"\n",
+            "    ]\n",
+            "}\u001b[0m\n",
+            "\u001b[32m2023-12-18 12:18:12 INFO semantic_router.utils.logger Generated config: {'name': 'get_news', 'utterances': ['Can I get the latest news in Canada?', 'Show me the recent news in the US', 'I would like to know about the sports news in England', \"Let's check the technology news in Japan\", 'Show me the health related news in Germany']}\u001b[0m\n",
+            "\u001b[32m2023-12-18 12:18:12 INFO semantic_router.utils.logger Creating route layer...\u001b[0m\n"
+          ]
+        },
+        {
+          "name": "stdout",
+          "output_type": "stream",
+          "text": [
+            "Route: {'name': 'get_time', 'utterances': ['what is the time in new york', 'can you tell me the time in london', 'get me the current time in tokyo', 'i need to know the time in sydney', 'please tell me the current time in paris']}\n",
+            "Route: {'name': 'get_news', 'utterances': ['Can I get the latest news in Canada?', 'Show me the recent news in the US', 'I would like to know about the sports news in England', \"Let's check the technology news in Japan\", 'Show me the health related news in Germany']}\n"
+          ]
+        }
+      ],
+      "source": [
+        "def get_time(location: str) -> str:\n",
+        "    \"\"\"Useful to get the time in a specific location\"\"\"\n",
+        "    print(f\"Calling `get_time` function with location: {location}\")\n",
+        "    return \"get_time\"\n",
+        "\n",
+        "\n",
+        "def get_news(category: str, country: str) -> str:\n",
+        "    \"\"\"Useful to get the news in a specific country\"\"\"\n",
+        "    print(\n",
+        "        f\"Calling `get_news` function with category: {category} and country: {country}\"\n",
+        "    )\n",
+        "    return \"get_news\"\n",
+        "\n",
+        "\n",
+        "# Registering functions to the router\n",
+        "route_get_time = generate_route(get_time)\n",
+        "route_get_news = generate_route(get_news)\n",
+        "\n",
+        "routes = [route_get_time, route_get_news]\n",
+        "router = create_router(routes)\n",
+        "\n",
+        "# Tools\n",
+        "tools = [get_time, get_news]"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 27,
+      "metadata": {},
+      "outputs": [
+        {
+          "name": "stderr",
+          "output_type": "stream",
+          "text": [
+            "\u001b[32m2023-12-18 12:20:12 INFO semantic_router.utils.logger Generating config...\u001b[0m\n",
+            "\u001b[32m2023-12-18 12:20:12 INFO semantic_router.utils.logger Calling Mistral model\u001b[0m\n",
+            "\u001b[32m2023-12-18 12:20:16 INFO semantic_router.utils.logger AI message: \n",
+            "    Example output:\n",
+            "    {\n",
+            "        \"name\": \"get_time\",\n",
+            "        \"utterances\": [\n",
+            "            \"What's the time in New York?\",\n",
+            "            \"Tell me the time in Tokyo.\",\n",
+            "            \"Can you give me the time in London?\",\n",
+            "            \"What's the current time in Sydney?\",\n",
+            "            \"Can you tell me the time in Berlin?\"\n",
+            "        ]\n",
+            "    }\u001b[0m\n",
+            "\u001b[32m2023-12-18 12:20:16 INFO semantic_router.utils.logger Generated config: {'name': 'get_time', 'utterances': [\"What's the time in New York?\", 'Tell me the time in Tokyo.', 'Can you give me the time in London?', \"What's the current time in Sydney?\", 'Can you tell me the time in Berlin?']}\u001b[0m\n",
+            "\u001b[32m2023-12-18 12:20:16 INFO semantic_router.utils.logger Generating config...\u001b[0m\n",
+            "\u001b[32m2023-12-18 12:20:16 INFO semantic_router.utils.logger Calling Mistral model\u001b[0m\n",
+            "\u001b[32m2023-12-18 12:20:20 INFO semantic_router.utils.logger AI message: \n",
+            "    Example output:\n",
+            "    {\n",
+            "        \"name\": \"get_news\",\n",
+            "        \"utterances\": [\n",
+            "            \"Tell me the latest news from the US\",\n",
+            "            \"What's happening in India today?\",\n",
+            "            \"Get me the top stories from Japan\",\n",
+            "            \"Can you give me the breaking news from Brazil?\",\n",
+            "            \"What's the latest news from Germany?\"\n",
+            "        ]\n",
+            "    }\u001b[0m\n",
+            "\u001b[32m2023-12-18 12:20:20 INFO semantic_router.utils.logger Generated config: {'name': 'get_news', 'utterances': ['Tell me the latest news from the US', \"What's happening in India today?\", 'Get me the top stories from Japan', 'Can you give me the breaking news from Brazil?', \"What's the latest news from Germany?\"]}\u001b[0m\n",
+            "\u001b[32m2023-12-18 12:20:20 INFO semantic_router.utils.logger Creating route layer...\u001b[0m\n"
+          ]
+        },
+        {
+          "name": "stdout",
+          "output_type": "stream",
+          "text": [
+            "Route: {'name': 'get_time', 'utterances': [\"What's the time in New York?\", 'Tell me the time in Tokyo.', 'Can you give me the time in London?', \"What's the current time in Sydney?\", 'Can you tell me the time in Berlin?']}\n",
+            "Route: {'name': 'get_news', 'utterances': ['Tell me the latest news from the US', \"What's happening in India today?\", 'Get me the top stories from Japan', 'Can you give me the breaking news from Brazil?', \"What's the latest news from Germany?\"]}\n"
+          ]
+        }
+      ],
+      "source": [
+        "def get_time(location: str) -> str:\n",
+        "    \"\"\"Useful to get the time in a specific location\"\"\"\n",
+        "    print(f\"Calling `get_time` function with location: {location}\")\n",
+        "    return \"get_time\"\n",
+        "\n",
+        "\n",
+        "def get_news(category: str, country: str) -> str:\n",
+        "    \"\"\"Useful to get the news in a specific country\"\"\"\n",
+        "    print(\n",
+        "        f\"Calling `get_news` function with category: {category} and country: {country}\"\n",
+        "    )\n",
+        "    return \"get_news\"\n",
+        "\n",
+        "\n",
+        "# Registering functions to the router\n",
+        "route_get_time = generate_route(get_time)\n",
+        "route_get_news = generate_route(get_news)\n",
+        "\n",
+        "routes = [route_get_time, route_get_news]\n",
+        "router = create_router(routes)\n",
+        "\n",
+        "# Tools\n",
+        "tools = [get_time, get_news]"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 26,
+      "metadata": {},
+      "outputs": [
+        {
+          "name": "stderr",
+          "output_type": "stream",
+          "text": [
+            "\u001b[32m2023-12-18 12:20:02 INFO semantic_router.utils.logger Extracting parameters...\u001b[0m\n",
+            "\u001b[32m2023-12-18 12:20:02 INFO semantic_router.utils.logger Calling Mistral model\u001b[0m\n",
+            "\u001b[32m2023-12-18 12:20:04 INFO semantic_router.utils.logger AI message: \n",
+            "    {\n",
+            "        \"location\": \"Stockholm\"\n",
+            "    }\u001b[0m\n",
+            "\u001b[32m2023-12-18 12:20:04 INFO semantic_router.utils.logger Extracted parameters: {'location': 'Stockholm'}\u001b[0m\n"
+          ]
+        },
+        {
+          "name": "stdout",
+          "output_type": "stream",
+          "text": [
+            "parameters: {'location': 'Stockholm'}\n",
+            "Calling `get_time` function with location: Stockholm\n"
+          ]
+        },
+        {
+          "name": "stderr",
+          "output_type": "stream",
+          "text": [
+            "\u001b[32m2023-12-18 12:20:04 INFO semantic_router.utils.logger Extracting parameters...\u001b[0m\n",
+            "\u001b[32m2023-12-18 12:20:04 INFO semantic_router.utils.logger Calling Mistral model\u001b[0m\n",
+            "\u001b[32m2023-12-18 12:20:05 INFO semantic_router.utils.logger AI message: \n",
+            "    {\n",
+            "        \"category\": \"tech\",\n",
+            "        \"country\": \"Lithuania\"\n",
+            "    }\u001b[0m\n",
+            "\u001b[32m2023-12-18 12:20:05 INFO semantic_router.utils.logger Extracted parameters: {'category': 'tech', 'country': 'Lithuania'}\u001b[0m\n"
+          ]
+        },
+        {
+          "name": "stdout",
+          "output_type": "stream",
+          "text": [
+            "parameters: {'category': 'tech', 'country': 'Lithuania'}\n",
+            "Calling `get_news` function with category: tech and country: Lithuania\n"
+          ]
+        },
+        {
+          "name": "stderr",
+          "output_type": "stream",
+          "text": [
+            "\u001b[33m2023-12-18 12:20:05 WARNING semantic_router.utils.logger No function found\u001b[0m\n",
+            "\u001b[32m2023-12-18 12:20:05 INFO semantic_router.utils.logger Calling Mistral model\u001b[0m\n",
+            "\u001b[32m2023-12-18 12:20:06 INFO semantic_router.utils.logger AI message:  How can I help you today?\u001b[0m\n"
+          ]
+        },
+        {
+          "data": {
+            "text/plain": [
+              "' How can I help you today?'"
+            ]
+          },
+          "execution_count": 26,
+          "metadata": {},
+          "output_type": "execute_result"
+        }
+      ],
+      "source": [
+        "call(query=\"What is the time in Stockholm?\", functions=tools, router=router)\n",
+        "call(query=\"What is the tech news in the Lithuania?\", functions=tools, router=router)\n",
+        "call(query=\"Hi!\", functions=tools, router=router)"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {},
+      "outputs": [],
+      "source": []
+    }
+  ],
+  "metadata": {
+    "kernelspec": {
+      "display_name": ".venv",
+      "language": "python",
+      "name": "python3"
+    },
+    "language_info": {
+      "codemirror_mode": {
+        "name": "ipython",
+        "version": 3
+      },
+      "file_extension": ".py",
+      "mimetype": "text/x-python",
+      "name": "python",
+      "nbconvert_exporter": "python",
+      "pygments_lexer": "ipython3",
+      "version": "3.11.5"
     }
-   ],
-   "source": [
-    "call(query=\"What is the time in Stockholm?\", functions=tools, router=router)\n",
-    "call(query=\"What is the tech news in the Lithuania?\", functions=tools, router=router)\n",
-    "call(query=\"Hi!\", functions=tools, router=router)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": []
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": ".venv",
-   "language": "python",
-   "name": "python3"
   },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.11.5"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
+  "nbformat": 4,
+  "nbformat_minor": 2
 }
diff --git a/docs/examples/hybrid-layer.ipynb b/docs/examples/hybrid-layer.ipynb
index 8b1da5ae75f0a8a9572996b8e416a282d2c48f1b..9e5eca6645418d25515fe0dd80b3af7e05909069 100644
--- a/docs/examples/hybrid-layer.ipynb
+++ b/docs/examples/hybrid-layer.ipynb
@@ -1,207 +1,199 @@
 {
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# Semantic Router: Hybrid Layer"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "The Hybrid Layer in the Semantic Router library can improve  making performance particularly for niche use-cases that contain specific terminology, such as finance or medical. It helps us provide more importance to  making based on the keywords contained in our utterances and user queries."
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Getting Started"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "We start by installing the library:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "!pip install -qU semantic-router==0.0.6"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "We start by defining a dictionary mapping s to example phrases that should trigger those s."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 8,
-   "metadata": {},
-   "outputs": [
-    {
-     "ename": "ImportError",
-     "evalue": "cannot import name 'Route' from 'semantic_router.schema' (/Users/jakit/customers/aurelio/semantic-router/.venv/lib/python3.11/site-packages/semantic_router/schema.py)",
-     "output_type": "error",
-     "traceback": [
-      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
-      "\u001b[0;31mImportError\u001b[0m                               Traceback (most recent call last)",
-      "\u001b[1;32m/Users/jakit/customers/aurelio/semantic-router/docs/examples/hybrid-layer.ipynb Cell 7\u001b[0m line \u001b[0;36m1\n\u001b[0;32m----> <a href='vscode-notebook-cell:/Users/jakit/customers/aurelio/semantic-router/docs/examples/hybrid-layer.ipynb#X10sZmlsZQ%3D%3D?line=0'>1</a>\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39msemantic_router\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mschema\u001b[39;00m \u001b[39mimport\u001b[39;00m Route\n\u001b[1;32m      <a href='vscode-notebook-cell:/Users/jakit/customers/aurelio/semantic-router/docs/examples/hybrid-layer.ipynb#X10sZmlsZQ%3D%3D?line=2'>3</a>\u001b[0m politics \u001b[39m=\u001b[39m Route(\n\u001b[1;32m      <a href='vscode-notebook-cell:/Users/jakit/customers/aurelio/semantic-router/docs/examples/hybrid-layer.ipynb#X10sZmlsZQ%3D%3D?line=3'>4</a>\u001b[0m     name\u001b[39m=\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mpolitics\u001b[39m\u001b[39m\"\u001b[39m,\n\u001b[1;32m      <a href='vscode-notebook-cell:/Users/jakit/customers/aurelio/semantic-router/docs/examples/hybrid-layer.ipynb#X10sZmlsZQ%3D%3D?line=4'>5</a>\u001b[0m     utterances\u001b[39m=\u001b[39m[\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m     <a href='vscode-notebook-cell:/Users/jakit/customers/aurelio/semantic-router/docs/examples/hybrid-layer.ipynb#X10sZmlsZQ%3D%3D?line=11'>12</a>\u001b[0m     ],\n\u001b[1;32m     <a href='vscode-notebook-cell:/Users/jakit/customers/aurelio/semantic-router/docs/examples/hybrid-layer.ipynb#X10sZmlsZQ%3D%3D?line=12'>13</a>\u001b[0m )\n",
-      "\u001b[0;31mImportError\u001b[0m: cannot import name 'Route' from 'semantic_router.schema' (/Users/jakit/customers/aurelio/semantic-router/.venv/lib/python3.11/site-packages/semantic_router/schema.py)"
-     ]
+  "cells": [
+    {
+      "cell_type": "markdown",
+      "metadata": {},
+      "source": [
+        "# Semantic Router: Hybrid Layer\n"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {},
+      "source": [
+        "The Hybrid Layer in the Semantic Router library can improve making performance particularly for niche use-cases that contain specific terminology, such as finance or medical. It helps us provide more importance to making based on the keywords contained in our utterances and user queries.\n"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {},
+      "source": [
+        "## Getting Started\n"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {},
+      "source": [
+        "We start by installing the library:\n"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {},
+      "outputs": [],
+      "source": [
+        "#!pip install -qU semantic-router==0.0.6"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {},
+      "source": [
+        "We start by defining a dictionary mapping s to example phrases that should trigger those s.\n"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {},
+      "outputs": [],
+      "source": [
+        "from semantic_router.schemas.route import Route\n",
+        "\n",
+        "politics = Route(\n",
+        "    name=\"politics\",\n",
+        "    utterances=[\n",
+        "        \"isn't politics the best thing ever\",\n",
+        "        \"why don't you tell me about your political opinions\",\n",
+        "        \"don't you just love the president\",\n",
+        "        \"don't you just hate the president\",\n",
+        "        \"they're going to destroy this country!\",\n",
+        "        \"they will save the country!\",\n",
+        "    ],\n",
+        ")"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {},
+      "source": [
+        "Let's define another for good measure:\n"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {},
+      "outputs": [],
+      "source": [
+        "chitchat = Route(\n",
+        "    name=\"chitchat\",\n",
+        "    utterances=[\n",
+        "        \"how's the weather today?\",\n",
+        "        \"how are things going?\",\n",
+        "        \"lovely weather today\",\n",
+        "        \"the weather is horrendous\",\n",
+        "        \"let's go to the chippy\",\n",
+        "    ],\n",
+        ")\n",
+        "\n",
+        "chitchat = Route(\n",
+        "    name=\"chitchat\",\n",
+        "    utterances=[\n",
+        "        \"how's the weather today?\",\n",
+        "        \"how are things going?\",\n",
+        "        \"lovely weather today\",\n",
+        "        \"the weather is horrendous\",\n",
+        "        \"let's go to the chippy\",\n",
+        "    ],\n",
+        ")\n",
+        "\n",
+        "routes = [politics, chitchat]"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {},
+      "source": [
+        "Now we initialize our embedding model:\n"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {},
+      "outputs": [],
+      "source": [
+        "import os\n",
+        "from semantic_router.encoders import CohereEncoder, BM25Encoder, TfidfEncoder\n",
+        "from getpass import getpass\n",
+        "\n",
+        "os.environ[\"COHERE_API_KEY\"] = os.environ[\"COHERE_API_KEY\"] or getpass(\n",
+        "    \"Enter Cohere API Key: \"\n",
+        ")\n",
+        "\n",
+        "dense_encoder = CohereEncoder()\n",
+        "# sparse_encoder = BM25Encoder()\n",
+        "sparse_encoder = TfidfEncoder()"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {},
+      "source": [
+        "Now we define the `RouteLayer`. When called, the route layer will consume text (a query) and output the category (`Route`) it belongs to — to initialize a `RouteLayer` we need our `encoder` model and a list of `routes`.\n"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {},
+      "outputs": [],
+      "source": [
+        "from semantic_router.hybrid_layer import HybridRouteLayer\n",
+        "\n",
+        "dl = HybridRouteLayer(\n",
+        "    dense_encoder=dense_encoder, sparse_encoder=sparse_encoder, routes=routes\n",
+        ")"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {},
+      "outputs": [],
+      "source": [
+        "dl(\"don't you love politics?\")"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": null,
+      "metadata": {},
+      "outputs": [],
+      "source": [
+        "dl(\"how's the weather today?\")"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {},
+      "source": [
+        "---\n"
+      ]
+    }
+  ],
+  "metadata": {
+    "kernelspec": {
+      "display_name": "decision-layer",
+      "language": "python",
+      "name": "python3"
+    },
+    "language_info": {
+      "codemirror_mode": {
+        "name": "ipython",
+        "version": 3
+      },
+      "file_extension": ".py",
+      "mimetype": "text/x-python",
+      "name": "python",
+      "nbconvert_exporter": "python",
+      "pygments_lexer": "ipython3",
+      "version": "3.11.5"
     }
-   ],
-   "source": [
-    "from semantic_router.schema import Route\n",
-    "\n",
-    "politics = Route(\n",
-    "    name=\"politics\",\n",
-    "    utterances=[\n",
-    "        \"isn't politics the best thing ever\",\n",
-    "        \"why don't you tell me about your political opinions\",\n",
-    "        \"don't you just love the president\",\n",
-    "        \"don't you just hate the president\",\n",
-    "        \"they're going to destroy this country!\",\n",
-    "        \"they will save the country!\",\n",
-    "    ],\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Let's define another for good measure:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "chitchat = Route(\n",
-    "    name=\"chitchat\",\n",
-    "    utterances=[\n",
-    "        \"how's the weather today?\",\n",
-    "        \"how are things going?\",\n",
-    "        \"lovely weather today\",\n",
-    "        \"the weather is horrendous\",\n",
-    "        \"let's go to the chippy\",\n",
-    "    ],\n",
-    ")\n",
-    "\n",
-    "chitchat = Route(\n",
-    "    name=\"chitchat\",\n",
-    "    utterances=[\n",
-    "        \"how's the weather today?\",\n",
-    "        \"how are things going?\",\n",
-    "        \"lovely weather today\",\n",
-    "        \"the weather is horrendous\",\n",
-    "        \"let's go to the chippy\",\n",
-    "    ],\n",
-    ")\n",
-    "\n",
-    "routes = [politics, chitchat]"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Now we initialize our embedding model:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import os\n",
-    "from semantic_router.encoders import CohereEncoder\n",
-    "from getpass import getpass\n",
-    "\n",
-    "os.environ[\"COHERE_API_KEY\"] = os.environ[\"COHERE_API_KEY\"] or getpass(\n",
-    "    \"Enter Cohere API Key: \"\n",
-    ")\n",
-    "\n",
-    "encoder = CohereEncoder()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Now we define the `RouteLayer`. When called, the route layer will consume text (a query) and output the category (`Route`) it belongs to — to initialize a `RouteLayer` we need our `encoder` model and a list of `routes`."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from semantic_router.hybrid_layer import HybridRouteLayer\n",
-    "\n",
-    "dl = HybridRouteLayer(encoder=encoder, routes=routes)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "dl(\"don't you love politics?\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "dl(\"how's the weather today?\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "decision-layer",
-   "language": "python",
-   "name": "python3"
   },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.11.3"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
+  "nbformat": 4,
+  "nbformat_minor": 2
 }
diff --git a/semantic_router/encoders/tfidf.py b/semantic_router/encoders/tfidf.py
index d6be6da57b53bc1bbdbce48579b8ff1af4f9eafb..6fc420ebfd4f753fe48db92db25f5278f36f1c58 100644
--- a/semantic_router/encoders/tfidf.py
+++ b/semantic_router/encoders/tfidf.py
@@ -1,7 +1,7 @@
 import numpy as np
 from collections import Counter
 from semantic_router.encoders import BaseEncoder
-from semantic_router.schema import Route
+from semantic_router.schemas.route import Route
 from numpy.linalg import norm
 import string
 
diff --git a/semantic_router/hybrid_layer.py b/semantic_router/hybrid_layer.py
index 3993ca451612f6a474961c15823ca104aae9b949..2901871aff43501d685d58d6e2cd2add8bca0f17 100644
--- a/semantic_router/hybrid_layer.py
+++ b/semantic_router/hybrid_layer.py
@@ -8,7 +8,7 @@ from semantic_router.encoders import (
     OpenAIEncoder,
     TfidfEncoder,
 )
-from semantic_router.schema import Route
+from semantic_router.schemas.route import Route
 from semantic_router.utils.logger import logger
 
 
diff --git a/semantic_router/layer.py b/semantic_router/layer.py
index cb408c5c5f452b78e9750976d7c669a01028450a..af08a9c1246dfd903defeef6f17e23dc198e9213 100644
--- a/semantic_router/layer.py
+++ b/semantic_router/layer.py
@@ -6,7 +6,7 @@ from semantic_router.encoders import (
     OpenAIEncoder,
 )
 from semantic_router.linear import similarity_matrix, top_scores
-from semantic_router.schema import Route
+from semantic_router.schemas.route import Route
 from semantic_router.utils.logger import logger
 
 
diff --git a/semantic_router/schema.py b/semantic_router/schemas/encoder.py
similarity index 67%
rename from semantic_router/schema.py
rename to semantic_router/schemas/encoder.py
index 007cddcbeb2c9e464e02a6c7f6cd12d2e9769cbc..1b2ad74c4b6b4ad5636e1befe470a248165e3893 100644
--- a/semantic_router/schema.py
+++ b/semantic_router/schemas/encoder.py
@@ -1,6 +1,5 @@
 from enum import Enum
 
-from pydantic import BaseModel
 from pydantic.dataclasses import dataclass
 
 from semantic_router.encoders import (
@@ -10,12 +9,6 @@ from semantic_router.encoders import (
 )
 
 
-class Route(BaseModel):
-    name: str
-    utterances: list[str]
-    description: str | None = None
-
-
 class EncoderType(Enum):
     HUGGINGFACE = "huggingface"
     OPENAI = "openai"
@@ -40,17 +33,3 @@ class Encoder:
 
     def __call__(self, texts: list[str]) -> list[list[float]]:
         return self.model(texts)
-
-
-@dataclass
-class SemanticSpace:
-    id: str
-    routes: list[Route]
-    encoder: str = ""
-
-    def __init__(self, routes: list[Route] = []):
-        self.id = ""
-        self.routes = routes
-
-    def add(self, route: Route):
-        self.routes.append(route)
diff --git a/semantic_router/schemas/route.py b/semantic_router/schemas/route.py
new file mode 100644
index 0000000000000000000000000000000000000000..b70bc60f817da865b88a57ed8bc0d9326ae34e64
--- /dev/null
+++ b/semantic_router/schemas/route.py
@@ -0,0 +1,7 @@
+from pydantic import BaseModel
+
+
+class Route(BaseModel):
+    name: str
+    utterances: list[str]
+    description: str | None = None
diff --git a/semantic_router/schemas/semantic_space.py b/semantic_router/schemas/semantic_space.py
new file mode 100644
index 0000000000000000000000000000000000000000..92e7adafc0c8686348d41ca316956168e8ca3883
--- /dev/null
+++ b/semantic_router/schemas/semantic_space.py
@@ -0,0 +1,17 @@
+from pydantic.dataclasses import dataclass
+
+from semantic_router.schemas.route import Route
+
+
+@dataclass
+class SemanticSpace:
+    id: str
+    routes: list[Route]
+    encoder: str = ""
+
+    def __init__(self, routes: list[Route] = []):
+        self.id = ""
+        self.routes = routes
+
+    def add(self, route: Route):
+        self.routes.append(route)
diff --git a/tests/unit/encoders/test_tfidf.py b/tests/unit/encoders/test_tfidf.py
index 93a966391e77b8645c45d52471fba80ea38f2d2c..68e37d9e955ad93bccc93f1ea7ea591709de292c 100644
--- a/tests/unit/encoders/test_tfidf.py
+++ b/tests/unit/encoders/test_tfidf.py
@@ -1,6 +1,6 @@
 import pytest
 from semantic_router.encoders import TfidfEncoder
-from semantic_router.schema import Route
+from semantic_router.schemas.route import Route
 
 
 @pytest.fixture
diff --git a/tests/unit/test_hybrid_layer.py b/tests/unit/test_hybrid_layer.py
index ee7d8f6b484e000c1a22cff5a72907482b3385d1..2506c19943117111af57620b107b668165a8f544 100644
--- a/tests/unit/test_hybrid_layer.py
+++ b/tests/unit/test_hybrid_layer.py
@@ -8,7 +8,7 @@ from semantic_router.encoders import (
     TfidfEncoder,
 )
 from semantic_router.hybrid_layer import HybridRouteLayer
-from semantic_router.schema import Route
+from semantic_router.schemas.route import Route
 
 
 def mock_encoder_call(utterances):
diff --git a/tests/unit/test_layer.py b/tests/unit/test_layer.py
index 66e0d53bb9350c77578682f9ea0742b1d3dfe0b2..d049243f1f7fd174b13cb41d4bf90e14a8c7c331 100644
--- a/tests/unit/test_layer.py
+++ b/tests/unit/test_layer.py
@@ -2,7 +2,7 @@ import pytest
 
 from semantic_router.encoders import BaseEncoder, CohereEncoder, OpenAIEncoder
 from semantic_router.layer import RouteLayer
-from semantic_router.schema import Route
+from semantic_router.schemas.route import Route
 
 
 def mock_encoder_call(utterances):
diff --git a/tests/unit/test_schema.py b/tests/unit/test_schema.py
index f471755c35796d33ac9329a2ddb3a20816230cda..f47643c9a2c55321234873d61f39a0751e4c7a45 100644
--- a/tests/unit/test_schema.py
+++ b/tests/unit/test_schema.py
@@ -1,11 +1,17 @@
 import pytest
 
-from semantic_router.schema import (
+from semantic_router.schemas.encoder import (
     CohereEncoder,
     Encoder,
     EncoderType,
     OpenAIEncoder,
+)
+
+from semantic_router.schemas.route import (
     Route,
+)
+
+from semantic_router.schemas.semantic_space import (
     SemanticSpace,
 )
 
diff --git a/walkthrough.ipynb b/walkthrough.ipynb
index d008739c8e1c6b1ba3b2ef89ebed75e953e89b45..346b576cdb8fe517580aca8e201cbaf9d5eb4a01 100644
--- a/walkthrough.ipynb
+++ b/walkthrough.ipynb
@@ -1,206 +1,237 @@
 {
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# Semantic Router Walkthrough"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "The Semantic Router library can be used as a super fast route making layer on top of LLMs. That means rather than waiting on a slow agent to decide what to do, we can use the magic of semantic vector space to make routes. Cutting route making time down from seconds to milliseconds."
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Getting Started"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "We start by installing the library:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "!pip install -qU semantic-router==0.0.8"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "We start by defining a dictionary mapping routes to example phrases that should trigger those routes."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from semantic_router.schema import Route\n",
-    "\n",
-    "politics = Route(\n",
-    "    name=\"politics\",\n",
-    "    utterances=[\n",
-    "        \"isn't politics the best thing ever\",\n",
-    "        \"why don't you tell me about your political opinions\",\n",
-    "        \"don't you just love the president\" \"don't you just hate the president\",\n",
-    "        \"they're going to destroy this country!\",\n",
-    "        \"they will save the country!\",\n",
-    "    ],\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Let's define another for good measure:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "chitchat = Route(\n",
-    "    name=\"chitchat\",\n",
-    "    utterances=[\n",
-    "        \"how's the weather today?\",\n",
-    "        \"how are things going?\",\n",
-    "        \"lovely weather today\",\n",
-    "        \"the weather is horrendous\",\n",
-    "        \"let's go to the chippy\",\n",
-    "    ],\n",
-    ")\n",
-    "\n",
-    "routes = [politics, chitchat]"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Now we initialize our embedding model:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import os\n",
-    "from getpass import getpass\n",
-    "from semantic_router.encoders import CohereEncoder\n",
-    "\n",
-    "os.environ[\"COHERE_API_KEY\"] = os.getenv(\"COHERE_API_KEY\") or getpass(\n",
-    "    \"Enter Cohere API Key: \"\n",
-    ")\n",
-    "\n",
-    "encoder = CohereEncoder()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Now we define the `RouteLayer`. When called, the route layer will consume text (a query) and output the category (`Route`) it belongs to — to initialize a `RouteLayer` we need our `encoder` model and a list of `routes`."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from semantic_router.layer import RouteLayer\n",
-    "\n",
-    "dl = RouteLayer(encoder=encoder, routes=routes)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Now we can test it:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "dl(\"don't you love politics?\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "dl(\"how's the weather today?\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Both are classified accurately, what if we send a query that is unrelated to our existing `Route` objects?"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "dl(\"I'm interested in learning about llama 2\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "In this case, we return `None` because no matches were identified."
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "decision-layer",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.11.3"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
+  "cells": [
+    {
+      "cell_type": "markdown",
+      "metadata": {},
+      "source": [
+        "# Semantic Router Walkthrough\n"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {},
+      "source": [
+        "The Semantic Router library can be used as a super fast route making layer on top of LLMs. That means rather than waiting on a slow agent to decide what to do, we can use the magic of semantic vector space to make routes. Cutting route making time down from seconds to milliseconds.\n"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {},
+      "source": [
+        "## Getting Started\n"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {},
+      "source": [
+        "We start by installing the library:\n"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 1,
+      "metadata": {},
+      "outputs": [],
+      "source": [
+        "!pip install -qU semantic-router==0.0.8"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {},
+      "source": [
+        "We start by defining a dictionary mapping routes to example phrases that should trigger those routes.\n"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 2,
+      "metadata": {},
+      "outputs": [
+        {
+          "name": "stderr",
+          "output_type": "stream",
+          "text": [
+            "/Users/danielgriffiths/Coding_files/Aurelio_local/semantic-router/.venv/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
+            "  from .autonotebook import tqdm as notebook_tqdm\n"
+          ]
+        }
+      ],
+      "source": [
+        "from semantic_router.schemas.route import Route\n",
+        "\n",
+        "politics = Route(\n",
+        "    name=\"politics\",\n",
+        "    utterances=[\n",
+        "        \"isn't politics the best thing ever\",\n",
+        "        \"why don't you tell me about your political opinions\",\n",
+        "        \"don't you just love the president\" \"don't you just hate the president\",\n",
+        "        \"they're going to destroy this country!\",\n",
+        "        \"they will save the country!\",\n",
+        "    ],\n",
+        ")"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {},
+      "source": [
+        "Let's define another for good measure:\n"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 3,
+      "metadata": {},
+      "outputs": [],
+      "source": [
+        "chitchat = Route(\n",
+        "    name=\"chitchat\",\n",
+        "    utterances=[\n",
+        "        \"how's the weather today?\",\n",
+        "        \"how are things going?\",\n",
+        "        \"lovely weather today\",\n",
+        "        \"the weather is horrendous\",\n",
+        "        \"let's go to the chippy\",\n",
+        "    ],\n",
+        ")\n",
+        "\n",
+        "routes = [politics, chitchat]"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {},
+      "source": [
+        "Now we initialize our embedding model:\n"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 4,
+      "metadata": {},
+      "outputs": [],
+      "source": [
+        "import os\n",
+        "from getpass import getpass\n",
+        "from semantic_router.encoders import CohereEncoder\n",
+        "\n",
+        "os.environ[\"COHERE_API_KEY\"] = os.getenv(\"COHERE_API_KEY\") or getpass(\n",
+        "    \"Enter Cohere API Key: \"\n",
+        ")\n",
+        "\n",
+        "encoder = CohereEncoder()"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {},
+      "source": [
+        "Now we define the `RouteLayer`. When called, the route layer will consume text (a query) and output the category (`Route`) it belongs to — to initialize a `RouteLayer` we need our `encoder` model and a list of `routes`.\n"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 5,
+      "metadata": {},
+      "outputs": [],
+      "source": [
+        "from semantic_router.layer import RouteLayer\n",
+        "\n",
+        "dl = RouteLayer(encoder=encoder, routes=routes)"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {},
+      "source": [
+        "Now we can test it:\n"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 6,
+      "metadata": {},
+      "outputs": [
+        {
+          "data": {
+            "text/plain": [
+              "'politics'"
+            ]
+          },
+          "execution_count": 6,
+          "metadata": {},
+          "output_type": "execute_result"
+        }
+      ],
+      "source": [
+        "dl(\"don't you love politics?\")"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 7,
+      "metadata": {},
+      "outputs": [
+        {
+          "data": {
+            "text/plain": [
+              "'chitchat'"
+            ]
+          },
+          "execution_count": 7,
+          "metadata": {},
+          "output_type": "execute_result"
+        }
+      ],
+      "source": [
+        "dl(\"how's the weather today?\")"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {},
+      "source": [
+        "Both are classified accurately, what if we send a query that is unrelated to our existing `Route` objects?\n"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 8,
+      "metadata": {},
+      "outputs": [],
+      "source": [
+        "dl(\"I'm interested in learning about llama 2\")"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {},
+      "source": [
+        "In this case, we return `None` because no matches were identified.\n"
+      ]
+    }
+  ],
+  "metadata": {
+    "kernelspec": {
+      "display_name": "decision-layer",
+      "language": "python",
+      "name": "python3"
+    },
+    "language_info": {
+      "codemirror_mode": {
+        "name": "ipython",
+        "version": 3
+      },
+      "file_extension": ".py",
+      "mimetype": "text/x-python",
+      "name": "python",
+      "nbconvert_exporter": "python",
+      "pygments_lexer": "ipython3",
+      "version": "3.11.5"
+    }
+  },
+  "nbformat": 4,
+  "nbformat_minor": 2
 }