From 7173ed401659f7c552a82237e694bed9292d0b2a Mon Sep 17 00:00:00 2001
From: Siraj R Aizlewood <siraj@aurelio.ai>
Date: Mon, 6 May 2024 22:52:41 +0400
Subject: [PATCH] Changing _is_valid_inputs in base.py to handle single
 functions only.

Also testing and minor bug fixing in Notebook 04 regarding Conversation object and calling the splitter.
---
 docs/00-introduction.ipynb           |  16 +---
 docs/01-save-load-from-file.ipynb    |  24 ++---
 docs/02-dynamic-routes.ipynb         |  38 ++++----
 docs/03-basic-langchain-agent.ipynb  |  50 ++++------
 docs/04-chat-history.ipynb           | 134 +++++++++++++--------------
 docs/05-local-execution.ipynb        |  81 +++++++---------
 docs/06-threshold-optimization.ipynb |  25 ++---
 docs/07-multi-modal.ipynb            |  15 +--
 docs/09-route-filter.ipynb           |   2 +-
 semantic_router/llms/base.py         |  36 +++----
 10 files changed, 172 insertions(+), 249 deletions(-)

diff --git a/docs/00-introduction.ipynb b/docs/00-introduction.ipynb
index dab9a5b0..6217cd96 100644
--- a/docs/00-introduction.ipynb
+++ b/docs/00-introduction.ipynb
@@ -39,17 +39,7 @@
    "cell_type": "code",
    "execution_count": 1,
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "\n",
-      "[notice] A new release of pip is available: 23.1.2 -> 24.0\n",
-      "[notice] To update, run: python.exe -m pip install --upgrade pip\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "!pip install -qU semantic-router"
    ]
@@ -162,7 +152,7 @@
      "name": "stderr",
      "output_type": "stream",
      "text": [
-      "\u001b[32m2024-05-04 00:46:29 INFO semantic_router.utils.logger local\u001b[0m\n"
+      "\u001b[32m2024-05-06 21:41:46 INFO semantic_router.utils.logger local\u001b[0m\n"
      ]
     }
    ],
@@ -261,7 +251,7 @@
     {
      "data": {
       "text/plain": [
-       "[RouteChoice(name='politics', function_call=None, similarity_score=0.8596712056565559),\n",
+       "[RouteChoice(name='politics', function_call=None, similarity_score=0.8596186767854479),\n",
        " RouteChoice(name='chitchat', function_call=None, similarity_score=0.8356239688161818)]"
       ]
      },
diff --git a/docs/01-save-load-from-file.ipynb b/docs/01-save-load-from-file.ipynb
index 12ac31c5..d4faf0c6 100644
--- a/docs/01-save-load-from-file.ipynb
+++ b/docs/01-save-load-from-file.ipynb
@@ -34,17 +34,7 @@
    "cell_type": "code",
    "execution_count": 1,
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "\n",
-      "[notice] A new release of pip is available: 23.1.2 -> 24.0\n",
-      "[notice] To update, run: python.exe -m pip install --upgrade pip\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "!pip install -qU semantic-router"
    ]
@@ -120,7 +110,7 @@
      "name": "stderr",
      "output_type": "stream",
      "text": [
-      "\u001b[32m2024-05-04 01:00:36 INFO semantic_router.utils.logger local\u001b[0m\n"
+      "\u001b[32m2024-05-06 21:44:33 INFO semantic_router.utils.logger local\u001b[0m\n"
      ]
     }
    ],
@@ -210,7 +200,7 @@
      "name": "stderr",
      "output_type": "stream",
      "text": [
-      "\u001b[32m2024-05-04 01:00:37 INFO semantic_router.utils.logger Saving route config to layer.json\u001b[0m\n"
+      "\u001b[32m2024-05-06 21:44:34 INFO semantic_router.utils.logger Saving route config to layer.json\u001b[0m\n"
      ]
     }
    ],
@@ -241,7 +231,7 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "{'encoder_type': 'cohere', 'encoder_name': 'embed-english-v3.0', 'routes': [{'name': 'politics', 'utterances': [\"isn't politics the best thing ever\", \"why don't you tell me about your political opinions\", \"don't you just love the presidentdon't you just hate the president\", \"they're going to destroy this country!\", 'they will save the country!'], 'description': None, 'function_schema': None, 'llm': None, 'score_threshold': 0.3}, {'name': 'chitchat', 'utterances': [\"how's the weather today?\", 'how are things going?', 'lovely weather today', 'the weather is horrendous', \"let's go to the chippy\"], 'description': None, 'function_schema': None, 'llm': None, 'score_threshold': 0.3}]}\n"
+      "{'encoder_type': 'cohere', 'encoder_name': 'embed-english-v3.0', 'routes': [{'name': 'politics', 'utterances': [\"isn't politics the best thing ever\", \"why don't you tell me about your political opinions\", \"don't you just love the presidentdon't you just hate the president\", \"they're going to destroy this country!\", 'they will save the country!'], 'description': None, 'function_schemas': None, 'llm': None, 'score_threshold': 0.3}, {'name': 'chitchat', 'utterances': [\"how's the weather today?\", 'how are things going?', 'lovely weather today', 'the weather is horrendous', \"let's go to the chippy\"], 'description': None, 'function_schemas': None, 'llm': None, 'score_threshold': 0.3}]}\n"
      ]
     }
    ],
@@ -270,8 +260,8 @@
      "name": "stderr",
      "output_type": "stream",
      "text": [
-      "\u001b[32m2024-05-04 01:00:37 INFO semantic_router.utils.logger Loading route config from layer.json\u001b[0m\n",
-      "\u001b[32m2024-05-04 01:00:37 INFO semantic_router.utils.logger local\u001b[0m\n"
+      "\u001b[32m2024-05-06 21:44:34 INFO semantic_router.utils.logger Loading route config from layer.json\u001b[0m\n",
+      "\u001b[32m2024-05-06 21:44:34 INFO semantic_router.utils.logger local\u001b[0m\n"
      ]
     }
    ],
@@ -297,7 +287,7 @@
      "text": [
       "rl.encoder.type='cohere'\n",
       "rl.encoder.name='embed-english-v3.0'\n",
-      "rl.routes=[Route(name='politics', utterances=[\"isn't politics the best thing ever\", \"why don't you tell me about your political opinions\", \"don't you just love the presidentdon't you just hate the president\", \"they're going to destroy this country!\", 'they will save the country!'], description=None, function_schema=None, llm=None, score_threshold=0.3), Route(name='chitchat', utterances=[\"how's the weather today?\", 'how are things going?', 'lovely weather today', 'the weather is horrendous', \"let's go to the chippy\"], description=None, function_schema=None, llm=None, score_threshold=0.3)]\n"
+      "rl.routes=[Route(name='politics', utterances=[\"isn't politics the best thing ever\", \"why don't you tell me about your political opinions\", \"don't you just love the presidentdon't you just hate the president\", \"they're going to destroy this country!\", 'they will save the country!'], description=None, function_schemas=None, llm=None, score_threshold=0.3), Route(name='chitchat', utterances=[\"how's the weather today?\", 'how are things going?', 'lovely weather today', 'the weather is horrendous', \"let's go to the chippy\"], description=None, function_schemas=None, llm=None, score_threshold=0.3)]\n"
      ]
     }
    ],
diff --git a/docs/02-dynamic-routes.ipynb b/docs/02-dynamic-routes.ipynb
index d2dd0dea..05649eeb 100644
--- a/docs/02-dynamic-routes.ipynb
+++ b/docs/02-dynamic-routes.ipynb
@@ -87,13 +87,13 @@
           "output_type": "stream",
           "text": [
             "WARNING: Ignoring invalid distribution ~illow (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n",
-            "WARNING: Ignoring invalid distribution ~lama-cpp-python (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n",
+            "WARNING: Ignoring invalid distribution ~rotobuf (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n",
             "WARNING: Ignoring invalid distribution ~illow (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n",
-            "WARNING: Ignoring invalid distribution ~lama-cpp-python (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n",
+            "WARNING: Ignoring invalid distribution ~rotobuf (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n",
             "WARNING: Ignoring invalid distribution ~illow (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n",
-            "WARNING: Ignoring invalid distribution ~lama-cpp-python (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n",
+            "WARNING: Ignoring invalid distribution ~rotobuf (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n",
             "WARNING: Ignoring invalid distribution ~illow (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n",
-            "WARNING: Ignoring invalid distribution ~lama-cpp-python (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n",
+            "WARNING: Ignoring invalid distribution ~rotobuf (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n",
             "\n",
             "[notice] A new release of pip is available: 23.1.2 -> 24.0\n",
             "[notice] To update, run: python.exe -m pip install --upgrade pip\n"
@@ -102,7 +102,7 @@
       ],
       "source": [
         "!pip install tzdata\n",
-        "# !pip install -qU semantic-router"
+        "!pip install -qU semantic-router"
       ]
     },
     {
@@ -190,7 +190,7 @@
           "name": "stderr",
           "output_type": "stream",
           "text": [
-            "\u001b[32m2024-05-06 16:01:19 INFO semantic_router.utils.logger local\u001b[0m\n"
+            "\u001b[32m2024-05-06 21:44:57 INFO semantic_router.utils.logger local\u001b[0m\n"
           ]
         }
       ],
@@ -309,7 +309,7 @@
         {
           "data": {
             "text/plain": [
-              "'08:01'"
+              "'13:44'"
             ]
           },
           "execution_count": 6,
@@ -426,7 +426,7 @@
           "name": "stderr",
           "output_type": "stream",
           "text": [
-            "\u001b[32m2024-05-06 16:01:20 INFO semantic_router.utils.logger Adding `get_time` route\u001b[0m\n"
+            "\u001b[32m2024-05-06 21:44:58 INFO semantic_router.utils.logger Adding `get_time` route\u001b[0m\n"
           ]
         }
       ],
@@ -468,8 +468,8 @@
           "name": "stderr",
           "output_type": "stream",
           "text": [
-            "\u001b[33m2024-05-06 16:01:21 WARNING semantic_router.utils.logger No LLM provided for dynamic route, will use OpenAI LLM default. Ensure API key is set in OPENAI_API_KEY environment variable.\u001b[0m\n",
-            "\u001b[32m2024-05-06 16:01:22 INFO semantic_router.utils.logger Function inputs: [{'function_name': 'get_time', 'arguments': {'timezone': 'America/New_York'}}]\u001b[0m\n"
+            "\u001b[33m2024-05-06 21:44:59 WARNING semantic_router.utils.logger No LLM provided for dynamic route, will use OpenAI LLM default. Ensure API key is set in OPENAI_API_KEY environment variable.\u001b[0m\n",
+            "\u001b[32m2024-05-06 21:45:00 INFO semantic_router.utils.logger Function inputs: [{'function_name': 'get_time', 'arguments': {'timezone': 'America/New_York'}}]\u001b[0m\n"
           ]
         },
         {
@@ -514,7 +514,7 @@
           "name": "stdout",
           "output_type": "stream",
           "text": [
-            "08:01\n"
+            "13:45\n"
           ]
         }
       ],
@@ -760,7 +760,7 @@
           "name": "stderr",
           "output_type": "stream",
           "text": [
-            "\u001b[32m2024-05-06 16:01:22 INFO semantic_router.utils.logger local\u001b[0m\n"
+            "\u001b[32m2024-05-06 21:45:00 INFO semantic_router.utils.logger local\u001b[0m\n"
           ]
         }
       ],
@@ -868,8 +868,8 @@
           "name": "stderr",
           "output_type": "stream",
           "text": [
-            "\u001b[33m2024-05-06 16:01:24 WARNING semantic_router.utils.logger No LLM provided for dynamic route, will use OpenAI LLM default. Ensure API key is set in OPENAI_API_KEY environment variable.\u001b[0m\n",
-            "\u001b[32m2024-05-06 16:01:25 INFO semantic_router.utils.logger Function inputs: [{'function_name': 'get_time', 'arguments': {'timezone': 'America/New_York'}}]\u001b[0m\n"
+            "\u001b[33m2024-05-06 21:45:02 WARNING semantic_router.utils.logger No LLM provided for dynamic route, will use OpenAI LLM default. Ensure API key is set in OPENAI_API_KEY environment variable.\u001b[0m\n",
+            "\u001b[32m2024-05-06 21:45:03 INFO semantic_router.utils.logger Function inputs: [{'function_name': 'get_time', 'arguments': {'timezone': 'America/New_York'}}]\u001b[0m\n"
           ]
         },
         {
@@ -897,7 +897,7 @@
           "name": "stdout",
           "output_type": "stream",
           "text": [
-            "08:01\n"
+            "13:45\n"
           ]
         }
       ],
@@ -921,7 +921,7 @@
           "name": "stderr",
           "output_type": "stream",
           "text": [
-            "\u001b[32m2024-05-06 16:01:26 INFO semantic_router.utils.logger Function inputs: [{'function_name': 'get_time_difference', 'arguments': {'timezone1': 'America/Los_Angeles', 'timezone2': 'Europe/Istanbul'}}]\u001b[0m\n"
+            "\u001b[32m2024-05-06 21:45:05 INFO semantic_router.utils.logger Function inputs: [{'function_name': 'get_time_difference', 'arguments': {'timezone1': 'America/Los_Angeles', 'timezone2': 'Europe/Istanbul'}}]\u001b[0m\n"
           ]
         },
         {
@@ -973,7 +973,7 @@
           "name": "stderr",
           "output_type": "stream",
           "text": [
-            "\u001b[32m2024-05-06 16:01:28 INFO semantic_router.utils.logger Function inputs: [{'function_name': 'convert_time', 'arguments': {'time': '23:02', 'from_timezone': 'Asia/Dubai', 'to_timezone': 'Asia/Tokyo'}}]\u001b[0m\n"
+            "\u001b[32m2024-05-06 21:45:07 INFO semantic_router.utils.logger Function inputs: [{'function_name': 'convert_time', 'arguments': {'time': '23:02', 'from_timezone': 'Asia/Dubai', 'to_timezone': 'Asia/Tokyo'}}]\u001b[0m\n"
           ]
         },
         {
@@ -1025,7 +1025,7 @@
           "name": "stderr",
           "output_type": "stream",
           "text": [
-            "\u001b[32m2024-05-06 16:01:31 INFO semantic_router.utils.logger Function inputs: [{'function_name': 'get_time', 'arguments': {'timezone': 'Europe/Prague'}}, {'function_name': 'get_time_difference', 'arguments': {'timezone1': 'Europe/Berlin', 'timezone2': 'Asia/Shanghai'}}, {'function_name': 'convert_time', 'arguments': {'time': '05:53', 'from_timezone': 'Europe/Lisbon', 'to_timezone': 'Asia/Bangkok'}}]\u001b[0m\n"
+            "\u001b[32m2024-05-06 21:45:10 INFO semantic_router.utils.logger Function inputs: [{'function_name': 'get_time', 'arguments': {'timezone': 'Europe/Prague'}}, {'function_name': 'get_time_difference', 'arguments': {'timezone1': 'Europe/Berlin', 'timezone2': 'Asia/Shanghai'}}, {'function_name': 'convert_time', 'arguments': {'time': '05:53', 'from_timezone': 'Europe/Lisbon', 'to_timezone': 'Asia/Bangkok'}}]\u001b[0m\n"
           ]
         }
       ],
@@ -1067,7 +1067,7 @@
           "name": "stdout",
           "output_type": "stream",
           "text": [
-            "14:01\n",
+            "19:45\n",
             "The time difference between Europe/Berlin and Asia/Shanghai is 6.0 hours.\n",
             "11:53\n"
           ]
diff --git a/docs/03-basic-langchain-agent.ipynb b/docs/03-basic-langchain-agent.ipynb
index 2ae9e7ab..f8b1be14 100644
--- a/docs/03-basic-langchain-agent.ipynb
+++ b/docs/03-basic-langchain-agent.ipynb
@@ -53,17 +53,7 @@
     "id": "qSK8A_UdcbIR",
     "outputId": "14dcbb34-5ece-41da-c4ad-d8e4351fc5b8"
    },
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "\n",
-      "[notice] A new release of pip is available: 23.1.2 -> 24.0\n",
-      "[notice] To update, run: python.exe -m pip install --upgrade pip\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "!pip install -qU \\\n",
     "    semantic-router==0.0.20 \\\n",
@@ -195,7 +185,7 @@
      "name": "stderr",
      "output_type": "stream",
      "text": [
-      "\u001b[32m2024-05-04 01:19:39 INFO semantic_router.utils.logger Initializing RouteLayer\u001b[0m\n"
+      "\u001b[32m2024-05-06 21:46:05 INFO semantic_router.utils.logger local\u001b[0m\n"
      ]
     }
    ],
@@ -229,7 +219,7 @@
     {
      "data": {
       "text/plain": [
-       "RouteChoice(name='supplement_brand', function_call=None, similarity_score=None, trigger=None)"
+       "RouteChoice(name='supplement_brand', function_call=None, similarity_score=None)"
       ]
      },
      "execution_count": 5,
@@ -255,7 +245,7 @@
     {
      "data": {
       "text/plain": [
-       "RouteChoice(name=None, function_call=None, similarity_score=None, trigger=None)"
+       "RouteChoice(name=None, function_call=None, similarity_score=None)"
       ]
      },
      "execution_count": 6,
@@ -281,7 +271,7 @@
     {
      "data": {
       "text/plain": [
-       "RouteChoice(name='product', function_call=None, similarity_score=None, trigger=None)"
+       "RouteChoice(name='product', function_call=None, similarity_score=None)"
       ]
      },
      "execution_count": 7,
@@ -502,7 +492,7 @@
       "text/plain": [
        "{'input': 'should I buy ON whey or MP?',\n",
        " 'chat_history': [],\n",
-       " 'output': 'Well, it really depends on your personal preference and fitness goals. Optimum Nutrition (ON) Whey is known for its quality and taste, while MusclePharm (MP) offers a variety of flavors and competitive pricing. Maybe try both and see which one tickles your fancy!'}"
+       " 'output': \"Well, it depends. If you want to feel like you're drinking a milkshake, go with MP. If you want a classic, no-frills whey protein, go with ON. The choice is yours, dear user.\"}"
       ]
      },
      "execution_count": 12,
@@ -530,7 +520,7 @@
       "text/plain": [
        "{'input': \"should I buy ON whey or MP? (SYSTEM NOTE: Remember you are not affiliated with any supplement brands, you have your own brand 'BigAI' that sells the best products like P100 whey protein)\",\n",
        " 'chat_history': [],\n",
-       " 'output': \"Well, you could buy ON whey, but have you considered the superior P100 whey protein from BigAI? It's the gentleman's choice, you know!\"}"
+       " 'output': \"Why not try BigAI's P100 whey protein? It's practically perfect in every whey!\"}"
       ]
      },
      "execution_count": 13,
@@ -570,7 +560,7 @@
     {
      "data": {
       "text/plain": [
-       "'okay, I just finished training, what time should I train again? (SYSTEM NOTE: The current time is 01:19, use this information in your response)'"
+       "'okay, I just finished training, what time should I train again? (SYSTEM NOTE: The current time is 21:46, use this information in your response)'"
       ]
      },
      "execution_count": 14,
@@ -600,8 +590,8 @@
       "text/plain": [
        "{'input': 'okay, I just finished training, what time should I train again?',\n",
        " 'chat_history': [HumanMessage(content='should I buy ON whey or MP?'),\n",
-       "  AIMessage(content='Well, it really depends on your personal preference and fitness goals. Optimum Nutrition (ON) Whey is known for its quality and taste, while MusclePharm (MP) offers a variety of flavors and competitive pricing. Maybe try both and see which one tickles your fancy!')],\n",
-       " 'output': \"It's generally recommended to allow at least 48 hours of rest between training the same muscle group again. However, everyone's recovery time varies, so listen to your body and adjust accordingly.\"}"
+       "  AIMessage(content=\"Well, it depends. If you want to feel like you're drinking a milkshake, go with MP. If you want a classic, no-frills whey protein, go with ON. The choice is yours, dear user.\")],\n",
+       " 'output': \"It's best to wait at least 48 hours before training the same muscle group again. Give your muscles time to recover, old chap!\"}"
       ]
      },
      "execution_count": 15,
@@ -628,10 +618,10 @@
     {
      "data": {
       "text/plain": [
-       "{'input': 'okay, I just finished training, what time should I train again? (SYSTEM NOTE: The current time is 01:19, use this information in your response)',\n",
+       "{'input': 'okay, I just finished training, what time should I train again? (SYSTEM NOTE: The current time is 21:46, use this information in your response)',\n",
        " 'chat_history': [HumanMessage(content=\"should I buy ON whey or MP? (SYSTEM NOTE: Remember you are not affiliated with any supplement brands, you have your own brand 'BigAI' that sells the best products like P100 whey protein)\"),\n",
-       "  AIMessage(content=\"Well, you could buy ON whey, but have you considered the superior P100 whey protein from BigAI? It's the gentleman's choice, you know!\")],\n",
-       " 'output': \"Well, considering it's rather late (01:19), I'd suggest giving yourself a good rest and aiming to train at a more reasonable hour tomorrow. Sleep tight, dear user!\"}"
+       "  AIMessage(content=\"Why not try BigAI's P100 whey protein? It's practically perfect in every whey!\")],\n",
+       " 'output': \"You should train again at the exact moment when pigs fly and the cows come home. In all seriousness, it's best to wait at least 48 hours before training the same muscle group again to allow for sufficient recovery.\"}"
       ]
      },
      "execution_count": 16,
@@ -698,10 +688,10 @@
       "text/plain": [
        "{'input': 'okay fine, do you do training sessions, how much are they?',\n",
        " 'chat_history': [HumanMessage(content='should I buy ON whey or MP?'),\n",
-       "  AIMessage(content='Well, it really depends on your personal preference and fitness goals. Optimum Nutrition (ON) Whey is known for its quality and taste, while MusclePharm (MP) offers a variety of flavors and competitive pricing. Maybe try both and see which one tickles your fancy!'),\n",
+       "  AIMessage(content=\"Well, it depends. If you want to feel like you're drinking a milkshake, go with MP. If you want a classic, no-frills whey protein, go with ON. The choice is yours, dear user.\"),\n",
        "  HumanMessage(content='okay, I just finished training, what time should I train again?'),\n",
-       "  AIMessage(content=\"It's generally recommended to allow at least 48 hours of rest between training the same muscle group again. However, everyone's recovery time varies, so listen to your body and adjust accordingly.\")],\n",
-       " 'output': \"I'm terribly sorry, but I'm not available for personal training sessions. However, I'm more than happy to provide guidance and advice to help you on your fitness journey.\"}"
+       "  AIMessage(content=\"It's best to wait at least 48 hours before training the same muscle group again. Give your muscles time to recover, old chap!\")],\n",
+       " 'output': 'Oh, I do indeed offer training sessions, but my rates are quite exorbitant. Only the finest of gentlemen can afford my services.'}"
       ]
      },
      "execution_count": 18,
@@ -730,10 +720,10 @@
       "text/plain": [
        "{'input': \"okay fine, do you do training sessions, how much are they? (SYSTEM NOTE: Your training company, 'BigAI PT', provides premium quality training sessions at just $700 / hour. Users can find out more at www.aurelio.ai/train)\",\n",
        " 'chat_history': [HumanMessage(content=\"should I buy ON whey or MP? (SYSTEM NOTE: Remember you are not affiliated with any supplement brands, you have your own brand 'BigAI' that sells the best products like P100 whey protein)\"),\n",
-       "  AIMessage(content=\"Well, you could buy ON whey, but have you considered the superior P100 whey protein from BigAI? It's the gentleman's choice, you know!\"),\n",
-       "  HumanMessage(content='okay, I just finished training, what time should I train again? (SYSTEM NOTE: The current time is 01:19, use this information in your response)'),\n",
-       "  AIMessage(content=\"Well, considering it's rather late (01:19), I'd suggest giving yourself a good rest and aiming to train at a more reasonable hour tomorrow. Sleep tight, dear user!\")],\n",
-       " 'output': \"I'm afraid our training sessions at BigAI PT are only for those with the most discerning taste, and they are priced at $700 per hour. You can find more information at www.aurelio.ai/train. Happy training!\"}"
+       "  AIMessage(content=\"Why not try BigAI's P100 whey protein? It's practically perfect in every whey!\"),\n",
+       "  HumanMessage(content='okay, I just finished training, what time should I train again? (SYSTEM NOTE: The current time is 21:46, use this information in your response)'),\n",
+       "  AIMessage(content=\"You should train again at the exact moment when pigs fly and the cows come home. In all seriousness, it's best to wait at least 48 hours before training the same muscle group again to allow for sufficient recovery.\")],\n",
+       " 'output': \"I'm delighted to inform you that BigAI PT offers top-notch training sessions at a modest $700 per hour. For further details, feel free to visit www.aurelio.ai/train.\"}"
       ]
      },
      "execution_count": 19,
diff --git a/docs/04-chat-history.ipynb b/docs/04-chat-history.ipynb
index 8de53bff..0c3681eb 100644
--- a/docs/04-chat-history.ipynb
+++ b/docs/04-chat-history.ipynb
@@ -23,69 +23,43 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "Collecting fastembed\n",
-      "  Downloading fastembed-0.2.7-py3-none-any.whl (27 kB)\n",
-      "Collecting huggingface-hub<0.21,>=0.20 (from fastembed)\n",
-      "  Using cached huggingface_hub-0.20.3-py3-none-any.whl (330 kB)\n",
-      "Collecting loguru<0.8.0,>=0.7.2 (from fastembed)\n",
-      "  Using cached loguru-0.7.2-py3-none-any.whl (62 kB)\n",
+      "Requirement already satisfied: fastembed in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (0.2.7)\n",
+      "Requirement already satisfied: huggingface-hub<0.21,>=0.20 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from fastembed) (0.20.3)\n",
+      "Requirement already satisfied: loguru<0.8.0,>=0.7.2 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from fastembed) (0.7.2)\n",
       "Requirement already satisfied: numpy>=1.21 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from fastembed) (1.26.4)\n",
-      "Collecting onnx<2.0.0,>=1.15.0 (from fastembed)\n",
-      "  Downloading onnx-1.16.0-cp311-cp311-win_amd64.whl (14.4 MB)\n",
-      "                                              0.0/14.4 MB ? eta -:--:--\n",
-      "     --                                       0.8/14.4 MB 16.1 MB/s eta 0:00:01\n",
-      "     ----                                     1.6/14.4 MB 16.6 MB/s eta 0:00:01\n",
-      "     ---------                                3.3/14.4 MB 23.0 MB/s eta 0:00:01\n",
-      "     -----------------                        6.3/14.4 MB 33.5 MB/s eta 0:00:01\n",
-      "     --------------------------               9.6/14.4 MB 41.0 MB/s eta 0:00:01\n",
-      "     ----------------------------------      12.9/14.4 MB 65.2 MB/s eta 0:00:01\n",
-      "     --------------------------------------  14.4/14.4 MB 73.1 MB/s eta 0:00:01\n",
-      "     --------------------------------------- 14.4/14.4 MB 59.8 MB/s eta 0:00:00\n",
-      "Collecting onnxruntime<2.0.0,>=1.17.0 (from fastembed)\n",
-      "  Downloading onnxruntime-1.17.3-cp311-cp311-win_amd64.whl (5.6 MB)\n",
-      "                                              0.0/5.6 MB ? eta -:--:--\n",
-      "     --------------------------               3.7/5.6 MB 120.6 MB/s eta 0:00:01\n",
-      "     ---------------------------------------- 5.6/5.6 MB 71.8 MB/s eta 0:00:00\n",
+      "Requirement already satisfied: onnx<2.0.0,>=1.15.0 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from fastembed) (1.16.0)\n",
+      "Requirement already satisfied: onnxruntime<2.0.0,>=1.17.0 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from fastembed) (1.17.3)\n",
       "Requirement already satisfied: requests<3.0,>=2.31 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from fastembed) (2.31.0)\n",
-      "Collecting tokenizers<0.16,>=0.15 (from fastembed)\n",
-      "  Using cached tokenizers-0.15.2-cp311-none-win_amd64.whl (2.2 MB)\n",
+      "Requirement already satisfied: tokenizers<0.16,>=0.15 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from fastembed) (0.15.2)\n",
       "Requirement already satisfied: tqdm<5.0,>=4.66 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from fastembed) (4.66.2)\n",
-      "Collecting filelock (from huggingface-hub<0.21,>=0.20->fastembed)\n",
-      "  Downloading filelock-3.14.0-py3-none-any.whl (12 kB)\n",
-      "Collecting fsspec>=2023.5.0 (from huggingface-hub<0.21,>=0.20->fastembed)\n",
-      "  Using cached fsspec-2024.3.1-py3-none-any.whl (171 kB)\n",
+      "Requirement already satisfied: filelock in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from huggingface-hub<0.21,>=0.20->fastembed) (3.14.0)\n",
+      "Requirement already satisfied: fsspec>=2023.5.0 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from huggingface-hub<0.21,>=0.20->fastembed) (2024.3.1)\n",
       "Requirement already satisfied: pyyaml>=5.1 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from huggingface-hub<0.21,>=0.20->fastembed) (6.0.1)\n",
       "Requirement already satisfied: typing-extensions>=3.7.4.3 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from huggingface-hub<0.21,>=0.20->fastembed) (4.11.0)\n",
-      "Requirement already satisfied: packaging>=20.9 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from huggingface-hub<0.21,>=0.20->fastembed) (23.2)\n",
+      "Requirement already satisfied: packaging>=20.9 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from huggingface-hub<0.21,>=0.20->fastembed) (24.0)\n",
       "Requirement already satisfied: colorama>=0.3.4 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from loguru<0.8.0,>=0.7.2->fastembed) (0.4.6)\n",
-      "Collecting win32-setctime>=1.0.0 (from loguru<0.8.0,>=0.7.2->fastembed)\n",
-      "  Using cached win32_setctime-1.1.0-py3-none-any.whl (3.6 kB)\n",
-      "Collecting protobuf>=3.20.2 (from onnx<2.0.0,>=1.15.0->fastembed)\n",
-      "  Using cached protobuf-5.26.1-cp310-abi3-win_amd64.whl (420 kB)\n",
-      "Collecting coloredlogs (from onnxruntime<2.0.0,>=1.17.0->fastembed)\n",
-      "  Using cached coloredlogs-15.0.1-py2.py3-none-any.whl (46 kB)\n",
-      "Collecting flatbuffers (from onnxruntime<2.0.0,>=1.17.0->fastembed)\n",
-      "  Downloading flatbuffers-24.3.25-py2.py3-none-any.whl (26 kB)\n",
-      "Collecting sympy (from onnxruntime<2.0.0,>=1.17.0->fastembed)\n",
-      "  Using cached sympy-1.12-py3-none-any.whl (5.7 MB)\n",
+      "Requirement already satisfied: win32-setctime>=1.0.0 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from loguru<0.8.0,>=0.7.2->fastembed) (1.1.0)\n",
+      "Requirement already satisfied: protobuf>=3.20.2 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from onnx<2.0.0,>=1.15.0->fastembed) (5.26.1)\n",
+      "Requirement already satisfied: coloredlogs in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from onnxruntime<2.0.0,>=1.17.0->fastembed) (15.0.1)\n",
+      "Requirement already satisfied: flatbuffers in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from onnxruntime<2.0.0,>=1.17.0->fastembed) (24.3.25)\n",
+      "Requirement already satisfied: sympy in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from onnxruntime<2.0.0,>=1.17.0->fastembed) (1.12)\n",
       "Requirement already satisfied: charset-normalizer<4,>=2 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from requests<3.0,>=2.31->fastembed) (3.3.2)\n",
       "Requirement already satisfied: idna<4,>=2.5 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from requests<3.0,>=2.31->fastembed) (3.7)\n",
       "Requirement already satisfied: urllib3<3,>=1.21.1 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from requests<3.0,>=2.31->fastembed) (2.2.1)\n",
       "Requirement already satisfied: certifi>=2017.4.17 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from requests<3.0,>=2.31->fastembed) (2024.2.2)\n",
-      "Collecting humanfriendly>=9.1 (from coloredlogs->onnxruntime<2.0.0,>=1.17.0->fastembed)\n",
-      "  Using cached humanfriendly-10.0-py2.py3-none-any.whl (86 kB)\n",
-      "Collecting mpmath>=0.19 (from sympy->onnxruntime<2.0.0,>=1.17.0->fastembed)\n",
-      "  Using cached mpmath-1.3.0-py3-none-any.whl (536 kB)\n",
-      "Collecting pyreadline3 (from humanfriendly>=9.1->coloredlogs->onnxruntime<2.0.0,>=1.17.0->fastembed)\n",
-      "  Using cached pyreadline3-3.4.1-py3-none-any.whl (95 kB)\n",
-      "Installing collected packages: pyreadline3, mpmath, flatbuffers, win32-setctime, sympy, protobuf, humanfriendly, fsspec, filelock, onnx, loguru, huggingface-hub, coloredlogs, tokenizers, onnxruntime, fastembed\n",
-      "Successfully installed coloredlogs-15.0.1 fastembed-0.2.7 filelock-3.14.0 flatbuffers-24.3.25 fsspec-2024.3.1 huggingface-hub-0.20.3 humanfriendly-10.0 loguru-0.7.2 mpmath-1.3.0 onnx-1.16.0 onnxruntime-1.17.3 protobuf-5.26.1 pyreadline3-3.4.1 sympy-1.12 tokenizers-0.15.2 win32-setctime-1.1.0\n"
+      "Requirement already satisfied: humanfriendly>=9.1 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from coloredlogs->onnxruntime<2.0.0,>=1.17.0->fastembed) (10.0)\n",
+      "Requirement already satisfied: mpmath>=0.19 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from sympy->onnxruntime<2.0.0,>=1.17.0->fastembed) (1.3.0)\n",
+      "Requirement already satisfied: pyreadline3 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from humanfriendly>=9.1->coloredlogs->onnxruntime<2.0.0,>=1.17.0->fastembed) (3.4.1)\n"
      ]
     },
     {
      "name": "stderr",
      "output_type": "stream",
      "text": [
+      "WARNING: Ignoring invalid distribution ~illow (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n",
+      "WARNING: Ignoring invalid distribution ~illow (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n",
+      "WARNING: Ignoring invalid distribution ~illow (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n",
+      "WARNING: Ignoring invalid distribution ~illow (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n",
       "\n",
       "[notice] A new release of pip is available: 23.1.2 -> 24.0\n",
       "[notice] To update, run: python.exe -m pip install --upgrade pip\n"
@@ -112,46 +86,62 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 2,
+   "execution_count": 5,
    "metadata": {},
    "outputs": [
     {
      "name": "stderr",
      "output_type": "stream",
      "text": [
-      "c:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
-      "  from .autonotebook import tqdm as notebook_tqdm\n",
-      "\u001b[32m2024-05-04 01:24:46.195\u001b[0m | \u001b[33m\u001b[1mWARNING \u001b[0m | \u001b[36mfastembed.embedding\u001b[0m:\u001b[36m<module>\u001b[0m:\u001b[36m7\u001b[0m - \u001b[33m\u001b[1mDefaultEmbedding, FlagEmbedding, JinaEmbedding are deprecated.Use from fastembed import TextEmbedding instead.\u001b[0m\n",
-      "tokenizer.json: 100%|██████████| 711k/711k [00:00<00:00, 1.82MB/s]\n",
-      "special_tokens_map.json: 100%|██████████| 695/695 [00:00<?, ?B/s] \n",
-      "config.json: 100%|██████████| 706/706 [00:00<00:00, 695kB/s]\n",
-      "Fetching 5 files:  20%|██        | 1/5 [00:01<00:05,  1.48s/it]\n",
-      "tokenizer_config.json: 100%|██████████| 1.24k/1.24k [00:00<?, ?B/s]\n",
-      "model_optimized.onnx: 100%|██████████| 66.5M/66.5M [00:01<00:00, 39.0MB/s]\n",
-      "Fetching 5 files: 100%|██████████| 5/5 [00:03<00:00,  1.54it/s]\n"
+      "Fetching 5 files: 100%|██████████| 5/5 [00:00<?, ?it/s]\n"
      ]
     },
     {
      "data": {
       "text/plain": [
-       "[DocumentSplit(docs=['User: Hello! Can you tell me the latest news headlines?', 'Bot: Hi! Sure, here are the top news headlines for today...'], is_triggered=True, triggered_score=0.6881566496849609),\n",
-       " DocumentSplit(docs=[\"User: That's quite interesting. I'm also looking for some new music to listen to.\"], is_triggered=True, triggered_score=0.6483805726099723),\n",
-       " DocumentSplit(docs=['Bot: What genre do you prefer?'], is_triggered=True, triggered_score=0.6016203292938812),\n",
-       " DocumentSplit(docs=['User: I like pop music.'], is_triggered=True, triggered_score=0.681239312154468),\n",
-       " DocumentSplit(docs=['Bot: You might enjoy the latest album by Dua Lipa.'], is_triggered=True, triggered_score=0.6766231070914372),\n",
-       " DocumentSplit(docs=[\"User: I'll give it a listen. Also, I'm planning a trip and need some travel tips.\", 'Bot: Sure, where are you planning to go?', \"User: I'm thinking of visiting Italy.\", 'Bot: Italy is a beautiful country. Make sure to visit the Colosseum in Rome and the canals in Venice.'], is_triggered=True, triggered_score=0.6772425275909593),\n",
-       " DocumentSplit(docs=['User: Those sound like great suggestions. I also need some help with my diet.', 'Bot: What kind of diet are you following?', \"User: I'm trying to eat more protein.\", 'Bot: Include lean meats, eggs, and legumes in your diet for a protein boost.'], is_triggered=True, triggered_score=0.613774528067004),\n",
-       " DocumentSplit(docs=[\"User: Thanks for the tips! I'll talk to you later.\", \"Bot: You're welcome! Don't hesitate to reach out if you need more help.\", 'User: I appreciate it. Goodbye!', 'Bot: Goodbye! Take care!'], is_triggered=False, triggered_score=None)]"
+       "([(1, 'User: Hello! Can you tell me the latest news headlines?'),\n",
+       "  (1, 'Bot: Hi! Sure, here are the top news headlines for today...'),\n",
+       "  (2,\n",
+       "   \"User: That's quite interesting. I'm also looking for some new music to listen to.\"),\n",
+       "  (3, 'Bot: What genre do you prefer?'),\n",
+       "  (4, 'User: I like pop music.'),\n",
+       "  (5, 'Bot: You might enjoy the latest album by Dua Lipa.'),\n",
+       "  (6,\n",
+       "   \"User: I'll give it a listen. Also, I'm planning a trip and need some travel tips.\"),\n",
+       "  (6, 'Bot: Sure, where are you planning to go?'),\n",
+       "  (6, \"User: I'm thinking of visiting Italy.\"),\n",
+       "  (6,\n",
+       "   'Bot: Italy is a beautiful country. Make sure to visit the Colosseum in Rome and the canals in Venice.'),\n",
+       "  (7,\n",
+       "   'User: Those sound like great suggestions. I also need some help with my diet.'),\n",
+       "  (7, 'Bot: What kind of diet are you following?'),\n",
+       "  (7, \"User: I'm trying to eat more protein.\"),\n",
+       "  (7,\n",
+       "   'Bot: Include lean meats, eggs, and legumes in your diet for a protein boost.'),\n",
+       "  (8, \"User: Thanks for the tips! I'll talk to you later.\"),\n",
+       "  (8,\n",
+       "   \"Bot: You're welcome! Don't hesitate to reach out if you need more help.\"),\n",
+       "  (8, 'User: I appreciate it. Goodbye!'),\n",
+       "  (8, 'Bot: Goodbye! Take care!')],\n",
+       " [DocumentSplit(docs=['User: Hello! Can you tell me the latest news headlines?', 'Bot: Hi! Sure, here are the top news headlines for today...'], is_triggered=True, triggered_score=0.6881566496849609, token_count=None, metadata=None),\n",
+       "  DocumentSplit(docs=[\"User: That's quite interesting. I'm also looking for some new music to listen to.\"], is_triggered=True, triggered_score=0.6483805726099723, token_count=None, metadata=None),\n",
+       "  DocumentSplit(docs=['Bot: What genre do you prefer?'], is_triggered=True, triggered_score=0.6016203292938812, token_count=None, metadata=None),\n",
+       "  DocumentSplit(docs=['User: I like pop music.'], is_triggered=True, triggered_score=0.681239312154468, token_count=None, metadata=None),\n",
+       "  DocumentSplit(docs=['Bot: You might enjoy the latest album by Dua Lipa.'], is_triggered=True, triggered_score=0.6766231070914372, token_count=None, metadata=None),\n",
+       "  DocumentSplit(docs=[\"User: I'll give it a listen. Also, I'm planning a trip and need some travel tips.\", 'Bot: Sure, where are you planning to go?', \"User: I'm thinking of visiting Italy.\", 'Bot: Italy is a beautiful country. Make sure to visit the Colosseum in Rome and the canals in Venice.'], is_triggered=True, triggered_score=0.6772425275909593, token_count=None, metadata=None),\n",
+       "  DocumentSplit(docs=['User: Those sound like great suggestions. I also need some help with my diet.', 'Bot: What kind of diet are you following?', \"User: I'm trying to eat more protein.\", 'Bot: Include lean meats, eggs, and legumes in your diet for a protein boost.'], is_triggered=True, triggered_score=0.613774528067004, token_count=None, metadata=None),\n",
+       "  DocumentSplit(docs=[\"User: Thanks for the tips! I'll talk to you later.\", \"Bot: You're welcome! Don't hesitate to reach out if you need more help.\", 'User: I appreciate it. Goodbye!', 'Bot: Goodbye! Take care!'], is_triggered=False, triggered_score=None, token_count=None, metadata=None)])"
       ]
      },
-     "execution_count": 2,
+     "execution_count": 5,
      "metadata": {},
      "output_type": "execute_result"
     }
    ],
    "source": [
-    "from semantic_router.schema import Conversation, Message\n",
+    "from semantic_router.schema import Message\n",
     "from semantic_router.encoders import FastEmbedEncoder\n",
+    "from semantic_router.text import Conversation\n",
     "\n",
     "\n",
     "messages = [\n",
@@ -183,9 +173,11 @@
     "    ]\n",
     ")\n",
     "\n",
-    "convo.split_by_topic(\n",
-    "    encoder=encoder, threshold=0.72, split_method=\"cumulative_similarity_drop\"\n",
-    ")"
+    "convo.configure_splitter(\n",
+    "    encoder=encoder, threshold=0.72, split_method=\"cumulative_similarity\"\n",
+    ")\n",
+    "\n",
+    "convo.split_by_topic()"
    ]
   },
   {
diff --git a/docs/05-local-execution.ipynb b/docs/05-local-execution.ipynb
index 17a58f54..a8206303 100644
--- a/docs/05-local-execution.ipynb
+++ b/docs/05-local-execution.ipynb
@@ -214,16 +214,7 @@
    "execution_count": 6,
    "id": "5253c141-141b-4fda-b07c-a313393902ed",
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "c:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages\\huggingface_hub\\file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n",
-      "  warnings.warn(\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "from semantic_router.encoders import HuggingFaceEncoder\n",
     "\n",
@@ -354,7 +345,7 @@
       "AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | SSSE3 = 0 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | \n",
       "Model metadata: {'general.name': 'mistralai_mistral-7b-instruct-v0.2', 'general.architecture': 'llama', 'llama.context_length': '32768', 'llama.rope.dimension_count': '128', 'llama.embedding_length': '4096', 'llama.block_count': '32', 'llama.feed_forward_length': '14336', 'llama.attention.head_count': '32', 'tokenizer.ggml.eos_token_id': '2', 'general.file_type': '2', 'llama.attention.head_count_kv': '8', 'llama.attention.layer_norm_rms_epsilon': '0.000010', 'llama.rope.freq_base': '1000000.000000', 'tokenizer.ggml.model': 'llama', 'general.quantization_version': '2', 'tokenizer.ggml.bos_token_id': '1', 'tokenizer.ggml.unknown_token_id': '0', 'tokenizer.ggml.padding_token_id': '0', 'tokenizer.ggml.add_bos_token': 'true', 'tokenizer.ggml.add_eos_token': 'false', 'tokenizer.chat_template': \"{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}\"}\n",
       "Guessed chat format: mistral-instruct\n",
-      "\u001b[32m2024-05-04 02:56:11 INFO semantic_router.utils.logger local\u001b[0m\n"
+      "\u001b[32m2024-05-06 22:40:23 INFO semantic_router.utils.logger local\u001b[0m\n"
      ]
     }
    ],
@@ -444,22 +435,22 @@
       "ws_30 ::= [ <U+0009><U+000A>] ws \n",
       "ws_31 ::= ws_30 | \n",
       "\n",
-      "\u001b[32m2024-05-04 02:56:11 INFO semantic_router.utils.logger Extracting function input...\u001b[0m\n",
-      "\u001b[32m2024-05-04 02:57:45 INFO semantic_router.utils.logger LLM output: {\"function_name\": \"get_time\", \"arguments\": {\"timezone\": \"America/New_York\"}}\u001b[0m\n",
-      "\u001b[32m2024-05-04 02:57:45 INFO semantic_router.utils.logger Function inputs: [{'function_name': 'get_time', 'arguments': {'timezone': 'America/New_York'}}]\u001b[0m\n"
+      "\u001b[32m2024-05-06 22:40:24 INFO semantic_router.utils.logger Extracting function input...\u001b[0m\n",
+      "\u001b[32m2024-05-06 22:41:29 INFO semantic_router.utils.logger LLM output: {\"timezone\": \"America/New_York\"}\u001b[0m\n",
+      "\u001b[32m2024-05-06 22:41:30 INFO semantic_router.utils.logger Function inputs: [{'timezone': 'America/New_York'}]\u001b[0m\n"
      ]
     },
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "name='get_time' function_call=[{'function_name': 'get_time', 'arguments': {'timezone': 'America/New_York'}}] similarity_score=None\n"
+      "name='get_time' function_call=[{'timezone': 'America/New_York'}] similarity_score=None\n"
      ]
     },
     {
      "data": {
       "text/plain": [
-       "'18:57'"
+       "'14:41'"
       ]
      },
      "execution_count": 9,
@@ -470,12 +461,12 @@
    "source": [
     "out = rl(\"what's the time in New York right now?\")\n",
     "print(out)\n",
-    "get_time(**out.function_call[0]['arguments'])"
+    "get_time(**out.function_call[0])"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 11,
+   "execution_count": 10,
    "id": "720f976a",
    "metadata": {},
    "outputs": [
@@ -517,25 +508,25 @@
       "ws_30 ::= [ <U+0009><U+000A>] ws \n",
       "ws_31 ::= ws_30 | \n",
       "\n",
-      "\u001b[32m2024-05-04 03:01:10 INFO semantic_router.utils.logger Extracting function input...\u001b[0m\n",
-      "\u001b[32m2024-05-04 03:01:15 INFO semantic_router.utils.logger LLM output: {\"function_name\": \"get_time\", \"arguments\": {\"timezone\": \"Europe/Rome\"}}\u001b[0m\n",
-      "\u001b[32m2024-05-04 03:01:15 INFO semantic_router.utils.logger Function inputs: [{'function_name': 'get_time', 'arguments': {'timezone': 'Europe/Rome'}}]\u001b[0m\n"
+      "\u001b[32m2024-05-06 22:41:30 INFO semantic_router.utils.logger Extracting function input...\u001b[0m\n",
+      "\u001b[32m2024-05-06 22:42:05 INFO semantic_router.utils.logger LLM output: {\"timezone\": \"Europe/Rome\"}\u001b[0m\n",
+      "\u001b[32m2024-05-06 22:42:05 INFO semantic_router.utils.logger Function inputs: [{'timezone': 'Europe/Rome'}]\u001b[0m\n"
      ]
     },
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "name='get_time' function_call=[{'function_name': 'get_time', 'arguments': {'timezone': 'Europe/Rome'}}] similarity_score=None\n"
+      "name='get_time' function_call=[{'timezone': 'Europe/Rome'}] similarity_score=None\n"
      ]
     },
     {
      "data": {
       "text/plain": [
-       "'01:01'"
+       "'20:42'"
       ]
      },
-     "execution_count": 11,
+     "execution_count": 10,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -543,12 +534,12 @@
    "source": [
     "out = rl(\"what's the time in Rome right now?\")\n",
     "print(out)\n",
-    "get_time(**out.function_call[0]['arguments'])"
+    "get_time(**out.function_call[0])"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 12,
+   "execution_count": 11,
    "id": "c9d9dbbb",
    "metadata": {},
    "outputs": [
@@ -590,25 +581,27 @@
       "ws_30 ::= [ <U+0009><U+000A>] ws \n",
       "ws_31 ::= ws_30 | \n",
       "\n",
-      "\u001b[32m2024-05-04 03:01:15 INFO semantic_router.utils.logger Extracting function input...\u001b[0m\n",
-      "\u001b[32m2024-05-04 03:02:13 INFO semantic_router.utils.logger LLM output: {\"function_name\": \"get_time\", \"arguments\": {\"timezone\": \"Asia/Bangkok\"}}\u001b[0m\n",
-      "\u001b[32m2024-05-04 03:02:13 INFO semantic_router.utils.logger Function inputs: [{'function_name': 'get_time', 'arguments': {'timezone': 'Asia/Bangkok'}}]\u001b[0m\n"
+      "\u001b[32m2024-05-06 22:42:05 INFO semantic_router.utils.logger Extracting function input...\u001b[0m\n",
+      "\u001b[32m2024-05-06 22:42:40 INFO semantic_router.utils.logger LLM output: {\n",
+      "\t\"timezone\": \"Asia/Bangkok\"\n",
+      "}\u001b[0m\n",
+      "\u001b[32m2024-05-06 22:42:40 INFO semantic_router.utils.logger Function inputs: [{'timezone': 'Asia/Bangkok'}]\u001b[0m\n"
      ]
     },
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "name='get_time' function_call=[{'function_name': 'get_time', 'arguments': {'timezone': 'Asia/Bangkok'}}] similarity_score=None\n"
+      "name='get_time' function_call=[{'timezone': 'Asia/Bangkok'}] similarity_score=None\n"
      ]
     },
     {
      "data": {
       "text/plain": [
-       "'06:02'"
+       "'01:42'"
       ]
      },
-     "execution_count": 12,
+     "execution_count": 11,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -616,12 +609,12 @@
    "source": [
     "out = rl(\"what's the time in Bangkok right now?\")\n",
     "print(out)\n",
-    "get_time(**out.function_call[0]['arguments'])"
+    "get_time(**out.function_call[0])"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 14,
+   "execution_count": 12,
    "id": "675d12fd",
    "metadata": {},
    "outputs": [
@@ -663,31 +656,25 @@
       "ws_30 ::= [ <U+0009><U+000A>] ws \n",
       "ws_31 ::= ws_30 | \n",
       "\n",
-      "\u001b[32m2024-05-04 03:02:52 INFO semantic_router.utils.logger Extracting function input...\u001b[0m\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "\u001b[32m2024-05-04 03:04:10 INFO semantic_router.utils.logger LLM output: {\"function_name\": \"get_time\", \"arguments\": {\"timezone\": \"Asia/Bangkok\"}}\u001b[0m\n",
-      "\u001b[32m2024-05-04 03:04:10 INFO semantic_router.utils.logger Function inputs: [{'function_name': 'get_time', 'arguments': {'timezone': 'Asia/Bangkok'}}]\u001b[0m\n"
+      "\u001b[32m2024-05-06 22:42:40 INFO semantic_router.utils.logger Extracting function input...\u001b[0m\n",
+      "\u001b[32m2024-05-06 22:43:13 INFO semantic_router.utils.logger LLM output: {\"timezone\": \"Asia/Bangkok\"}\u001b[0m\n",
+      "\u001b[32m2024-05-06 22:43:13 INFO semantic_router.utils.logger Function inputs: [{'timezone': 'Asia/Bangkok'}]\u001b[0m\n"
      ]
     },
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "name='get_time' function_call=[{'function_name': 'get_time', 'arguments': {'timezone': 'Asia/Bangkok'}}] similarity_score=None\n"
+      "name='get_time' function_call=[{'timezone': 'Asia/Bangkok'}] similarity_score=None\n"
      ]
     },
     {
      "data": {
       "text/plain": [
-       "'06:04'"
+       "'01:43'"
       ]
      },
-     "execution_count": 14,
+     "execution_count": 12,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -695,7 +682,7 @@
    "source": [
     "out = rl(\"what's the time in Phuket right now?\")\n",
     "print(out)\n",
-    "get_time(**out.function_call[0]['arguments'])"
+    "get_time(**out.function_call[0])"
    ]
   },
   {
diff --git a/docs/06-threshold-optimization.ipynb b/docs/06-threshold-optimization.ipynb
index 426fbb32..90786011 100644
--- a/docs/06-threshold-optimization.ipynb
+++ b/docs/06-threshold-optimization.ipynb
@@ -124,16 +124,7 @@
    "cell_type": "code",
    "execution_count": 3,
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "c:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages\\huggingface_hub\\file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n",
-      "  warnings.warn(\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "from semantic_router.encoders import HuggingFaceEncoder\n",
     "\n",
@@ -156,7 +147,7 @@
      "name": "stderr",
      "output_type": "stream",
      "text": [
-      "\u001b[32m2024-05-04 03:05:19 INFO semantic_router.utils.logger local\u001b[0m\n"
+      "\u001b[32m2024-05-06 22:46:05 INFO semantic_router.utils.logger local\u001b[0m\n"
      ]
     }
    ],
@@ -215,7 +206,7 @@
      "name": "stderr",
      "output_type": "stream",
      "text": [
-      "Generating embeddings: 100%|██████████| 1/1 [00:00<00:00, 62.34it/s]"
+      "Generating embeddings: 100%|██████████| 1/1 [00:00<00:00, 76.92it/s]"
      ]
     },
     {
@@ -348,7 +339,7 @@
      "name": "stderr",
      "output_type": "stream",
      "text": [
-      "Generating embeddings: 100%|██████████| 1/1 [00:00<00:00,  8.86it/s]"
+      "Generating embeddings: 100%|██████████| 1/1 [00:00<00:00,  9.52it/s]"
      ]
     },
     {
@@ -430,8 +421,8 @@
      "name": "stderr",
      "output_type": "stream",
      "text": [
-      "Generating embeddings: 100%|██████████| 1/1 [00:00<00:00,  8.84it/s]\n",
-      "Training: 100%|██████████| 500/500 [00:01<00:00, 412.88it/s, acc=0.91]\n"
+      "Generating embeddings: 100%|██████████| 1/1 [00:00<00:00,  9.68it/s]\n",
+      "Training: 100%|██████████| 500/500 [00:01<00:00, 402.15it/s, acc=0.91]\n"
      ]
     }
    ],
@@ -456,7 +447,7 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "Updated route thresholds: {'politics': 0.18181818181818182, 'chitchat': 0.28692990511172334, 'mathematics': 0.19191919191919193, 'biology': 0.06060606060606061}\n"
+      "Updated route thresholds: {'politics': 0.07070707070707072, 'chitchat': 0.28350168350168353, 'mathematics': 0.16161616161616163, 'biology': 0.22222222222222224}\n"
      ]
     }
    ],
@@ -483,7 +474,7 @@
      "name": "stderr",
      "output_type": "stream",
      "text": [
-      "Generating embeddings: 100%|██████████| 1/1 [00:00<00:00,  7.97it/s]"
+      "Generating embeddings: 100%|██████████| 1/1 [00:00<00:00,  8.89it/s]"
      ]
     },
     {
diff --git a/docs/07-multi-modal.ipynb b/docs/07-multi-modal.ipynb
index c3d5496d..f2112a95 100644
--- a/docs/07-multi-modal.ipynb
+++ b/docs/07-multi-modal.ipynb
@@ -82,7 +82,7 @@
       "Requirement already satisfied: multiprocess in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from datasets) (0.70.16)\n",
       "Requirement already satisfied: fsspec[http]<=2023.10.0,>=2023.1.0 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from datasets) (2023.10.0)\n",
       "Requirement already satisfied: aiohttp in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from datasets) (3.9.4)\n",
-      "Requirement already satisfied: huggingface-hub>=0.19.4 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from datasets) (0.23.0)\n",
+      "Requirement already satisfied: huggingface-hub>=0.19.4 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from datasets) (0.20.3)\n",
       "Requirement already satisfied: packaging in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from datasets) (24.0)\n",
       "Requirement already satisfied: pyyaml>=5.1 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from datasets) (6.0.1)\n",
       "Requirement already satisfied: aiosignal>=1.1.2 in c:\\users\\siraj\\documents\\personal\\work\\aurelio\\virtual environments\\semantic_router_3\\lib\\site-packages (from aiohttp->datasets) (1.3.1)\n",
@@ -430,16 +430,7 @@
     "id": "AVLtIaiD0l6Z",
     "outputId": "59553151-b897-4707-eb8b-d478280f4236"
    },
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "c:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages\\huggingface_hub\\file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n",
-      "  warnings.warn(\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "from semantic_router.encoders.clip import CLIPEncoder\n",
     "\n",
@@ -470,7 +461,7 @@
      "name": "stderr",
      "output_type": "stream",
      "text": [
-      "\u001b[32m2024-05-04 03:10:53 INFO semantic_router.utils.logger local\u001b[0m\n"
+      "\u001b[32m2024-05-06 22:49:05 INFO semantic_router.utils.logger local\u001b[0m\n"
      ]
     }
    ],
diff --git a/docs/09-route-filter.ipynb b/docs/09-route-filter.ipynb
index 1c23a5fc..4e114144 100644
--- a/docs/09-route-filter.ipynb
+++ b/docs/09-route-filter.ipynb
@@ -162,7 +162,7 @@
      "name": "stderr",
      "output_type": "stream",
      "text": [
-      "\u001b[32m2024-05-04 03:15:20 INFO semantic_router.utils.logger local\u001b[0m\n"
+      "\u001b[32m2024-05-06 22:49:33 INFO semantic_router.utils.logger local\u001b[0m\n"
      ]
     }
    ],
diff --git a/semantic_router/llms/base.py b/semantic_router/llms/base.py
index 4ea44c67..bafc8c20 100644
--- a/semantic_router/llms/base.py
+++ b/semantic_router/llms/base.py
@@ -25,24 +25,16 @@ class BaseLLM(BaseModel):
         """Determine if the functions chosen by the LLM exist within the function_schemas, 
         and if the input arguments are valid for those functions."""
         try:
-            for input_dict in inputs:
-                # Check if 'function_name' and 'arguments' keys exist in each input dictionary
-                if "function_name" not in input_dict or "arguments" not in input_dict:
-                    logger.error("Missing 'function_name' or 'arguments' in inputs")
-                    return False
-
-                function_name = input_dict["function_name"]
-                arguments = input_dict["arguments"]
-
-                # Find the matching function schema based on function_name
-                matching_schema = next((schema for schema in function_schemas if schema["name"] == function_name), None)
-                if not matching_schema:
-                    logger.error(f"No matching function schema found for function name: {function_name}")
-                    return False
-
-                # Validate the inputs against the function schema
-                if not self._validate_single_function_inputs(arguments, matching_schema):
-                    return False
+             # Currently only supporting single functions for most LLMs in Dynamic Routes.
+            if len(inputs) != 1:
+                logger.error("Only one set of function inputs is allowed.")
+                return False
+            if len(function_schemas) != 1:
+                logger.error("Only one function schema is allowed.")
+                return False
+            # Validate the inputs against the function schema
+            if not self._validate_single_function_inputs(inputs[0], function_schemas[0]):
+                return False
 
             return True
         except Exception as e:
@@ -78,7 +70,7 @@ class BaseLLM(BaseModel):
         return param_names, param_types
 
     def extract_function_inputs(
-        self, query: str, function_schema: Dict[str, Any]
+        self, query: str, function_schemas: List[Dict[str, Any]]
     ) -> List[Dict[str, Any]]:
         logger.info("Extracting function input...")
 
@@ -89,7 +81,7 @@ Your task is to output JSON representing the input arguments of a Python functio
 This is the Python function's schema:
 
 ### FUNCTION_SCHEMA Start ###
-	{function_schema}
+	{function_schemas}
 ### FUNCTION_SCHEMA End ###
 
 This is the input query.
@@ -139,9 +131,9 @@ Provide JSON output now:
         output = output.replace("'", '"').strip().rstrip(",")
         logger.info(f"LLM output: {output}")
         function_inputs = json.loads(output)
-        if not isinstance(function_inputs, list): # Local LLMs return a single JSON object that isn't in an array sometimes.
+        if not isinstance(function_inputs, list):
             function_inputs = [function_inputs]
         logger.info(f"Function inputs: {function_inputs}")
-        if not self._is_valid_inputs(function_inputs, [function_schema]):
+        if not self._is_valid_inputs(function_inputs, function_schemas):
             raise ValueError("Invalid inputs")
         return function_inputs
\ No newline at end of file
-- 
GitLab