diff --git a/docs/examples/function_calling.ipynb b/docs/examples/function_calling.ipynb
index 51b0800ad058929d3c908c46f9738fc7ed447861..98beb735134ee28a6c70f796f4f3df1cb4cc5b63 100644
--- a/docs/examples/function_calling.ipynb
+++ b/docs/examples/function_calling.ipynb
@@ -9,7 +9,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 228,
+   "execution_count": 79,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -39,7 +39,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 241,
+   "execution_count": 95,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -66,7 +66,7 @@
     "            \"inputs\": f\"You are a helpful assistant, user query: {prompt}\",\n",
     "            \"parameters\": {\n",
     "                \"max_new_tokens\": 200,\n",
-    "                \"temperature\": 0.1,\n",
+    "                \"temperature\": 0.01,\n",
     "                \"num_beams\": 5,\n",
     "                \"num_return_sequences\": 1,\n",
     "            },\n",
@@ -91,7 +91,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 230,
+   "execution_count": 81,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -113,7 +113,24 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 239,
+   "execution_count": 82,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import json\n",
+    "\n",
+    "\n",
+    "def is_valid_config(route_config_str: str) -> bool:\n",
+    "    try:\n",
+    "        output_json = json.loads(route_config_str)\n",
+    "        return all(key in output_json for key in [\"name\", \"utterances\"])\n",
+    "    except json.JSONDecodeError:\n",
+    "        return False"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 98,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -124,48 +141,43 @@
     "\n",
     "def generate_route(function) -> dict:\n",
     "    logger.info(\"Generating config...\")\n",
-    "    example_schema = {\n",
-    "        \"name\": \"get_weather\",\n",
-    "        \"description\": \"Useful to get the weather in a specific location\",\n",
-    "        \"signature\": \"(location: str) -> str\",\n",
-    "        \"output\": \"<class 'str'>\",\n",
-    "    }\n",
-    "\n",
-    "    example_config = {\n",
-    "        \"name\": \"get_weather\",\n",
-    "        \"utterances\": [\n",
-    "            \"What is the weather like in SF?\",\n",
-    "            \"What is the weather in Cyprus?\",\n",
-    "            \"weather in London?\",\n",
-    "            \"Tell me the weather in New York\",\n",
-    "            \"what is the current weather in Paris?\",\n",
-    "        ],\n",
-    "    }\n",
     "\n",
     "    function_schema = get_function_schema(function)\n",
     "\n",
     "    prompt = f\"\"\"\n",
-    "    You are a helpful assistant designed to output JSON.\n",
-    "    Given the following function schema\n",
+    "    You are tasked to generate a JSON configuration based on the provided\n",
+    "    function schema. Please follow the template below:\n",
+    "\n",
+    "    {{\n",
+    "        \"name\": \"<function_name>\",\n",
+    "        \"utterances\": [\n",
+    "            \"<example_utterance_1>\",\n",
+    "            \"<example_utterance_2>\",\n",
+    "            \"<example_utterance_3>\",\n",
+    "            \"<example_utterance_4>\",\n",
+    "            \"<example_utterance_5>\"]\n",
+    "    }}\n",
+    "\n",
+    "    Only include the \"name\" and \"utterances\" keys in your answer.\n",
+    "    The \"name\" should match the function name and the \"utterances\"\n",
+    "    should comprise a list of 5 example phrases that could be used to invoke\n",
+    "    the function.\n",
+    "\n",
+    "    Input schema:\n",
     "    {function_schema}\n",
-    "    generate a routing config with the format:\n",
-    "    {example_config}\n",
+    "    \"\"\"\n",
     "\n",
-    "    For example:\n",
-    "    Input: {example_schema}\n",
-    "    Output: {example_config}\n",
+    "    ai_message = llm_mistral(prompt)\n",
     "\n",
-    "    Input: {function_schema}\n",
-    "    Output:\n",
-    "    \"\"\"\n",
+    "    # Parse the response\n",
+    "    ai_message = ai_message[ai_message.find(\"{\") :]\n",
+    "    ai_message = ai_message.replace(\"'\", '\"').replace('\"s', \"'s\").strip().rstrip(\",\")\n",
     "\n",
-    "    try:\n",
-    "        ai_message = llm_mistral(prompt)\n",
-    "    except Exception as e:\n",
-    "        logger.error(f\"Mistral failed with error {e}, falling back to OpenAI\")\n",
-    "        ai_message = llm_openai(prompt)\n",
+    "    valid_config = is_valid_config(ai_message)\n",
     "\n",
-    "    ai_message = ai_message.replace(\"CONFIG:\", \"\").replace(\"'\", '\"').strip().rstrip(\",\")\n",
+    "    if not valid_config:\n",
+    "        logger.warning(f\"Mistral failed with error, falling back to OpenAI\")\n",
+    "        ai_message = llm_openai(prompt)\n",
     "\n",
     "    try:\n",
     "        route_config = json.loads(ai_message)\n",
@@ -186,7 +198,23 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 237,
+   "execution_count": 84,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def validate_parameters(function, parameters):\n",
+    "    sig = inspect.signature(function)\n",
+    "    for name, param in sig.parameters.items():\n",
+    "        if name not in parameters:\n",
+    "            return False, f\"Parameter {name} missing from query\"\n",
+    "        if not isinstance(parameters[name], param.annotation):\n",
+    "            return False, f\"Parameter {name} is not of type {param.annotation}\"\n",
+    "    return True, \"Parameters are valid\""
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 99,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -209,40 +237,52 @@
     "    prompt = f\"\"\"\n",
     "    You are a helpful assistant designed to output JSON.\n",
     "    Given the following function schema\n",
-    "    {get_function_schema(function)}\n",
+    "    << {get_function_schema(function)} >>\n",
     "    and query\n",
-    "    {query}\n",
+    "    << {query} >>\n",
     "    extract the parameters values from the query, in a valid JSON format.\n",
     "    Example:\n",
     "    Input:\n",
     "    query: {example_query}\n",
     "    schema: {example_schema}\n",
     "\n",
-    "    Output:\n",
-    "    parameters: {example_parameters}\n",
+    "    Result: {example_parameters}\n",
     "\n",
     "    Input:\n",
     "    query: {query}\n",
     "    schema: {get_function_schema(function)}\n",
-    "    Output:\n",
-    "    parameters:\n",
+    "    Result:\n",
     "    \"\"\"\n",
     "\n",
     "    try:\n",
     "        ai_message = llm_mistral(prompt)\n",
+    "        ai_message = (\n",
+    "            ai_message.replace(\"Output:\", \"\").replace(\"'\", '\"').strip().rstrip(\",\")\n",
+    "        )\n",
     "    except Exception as e:\n",
     "        logger.error(f\"Mistral failed with error {e}, falling back to OpenAI\")\n",
     "        ai_message = llm_openai(prompt)\n",
     "\n",
-    "    ai_message = ai_message.replace(\"CONFIG:\", \"\").replace(\"'\", '\"').strip().rstrip(\",\")\n",
-    "\n",
     "    try:\n",
     "        parameters = json.loads(ai_message)\n",
+    "        valid, message = validate_parameters(function, parameters)\n",
+    "\n",
+    "        if not valid:\n",
+    "            logger.warning(\n",
+    "                f\"Invalid parameters from Mistral, falling back to OpenAI: {message}\"\n",
+    "            )\n",
+    "            # Fall back to OpenAI\n",
+    "            ai_message = llm_openai(prompt)\n",
+    "            parameters = json.loads(ai_message)\n",
+    "            valid, message = validate_parameters(function, parameters)\n",
+    "            if not valid:\n",
+    "                raise ValueError(message)\n",
+    "\n",
     "        logger.info(f\"Extracted parameters: {parameters}\")\n",
     "        return parameters\n",
-    "    except json.JSONDecodeError as json_error:\n",
-    "        logger.error(f\"JSON parsing error {json_error}\")\n",
-    "        return {\"error\": \"Failed to extract parameters\"}"
+    "    except ValueError as e:\n",
+    "        logger.error(f\"Parameter validation error: {str(e)}\")\n",
+    "        return {\"error\": \"Failed to validate parameters\"}"
    ]
   },
   {
@@ -254,7 +294,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 233,
+   "execution_count": 100,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -288,7 +328,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 234,
+   "execution_count": 101,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -328,68 +368,52 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 242,
+   "execution_count": 102,
    "metadata": {},
    "outputs": [
     {
      "name": "stderr",
      "output_type": "stream",
      "text": [
-      "\u001b[32m2023-12-15 15:29:40 INFO semantic_router.utils.logger Generating config...\u001b[0m\n",
-      "\u001b[32m2023-12-15 15:29:40 INFO semantic_router.utils.logger Calling Mistral model\u001b[0m\n",
-      "\u001b[32m2023-12-15 15:29:48 INFO semantic_router.utils.logger AI message: \n",
-      "    {'name': 'get_time', 'utterances': ['What is the time in SF?', 'What is the time in Cyprus?', 'time in London?', 'Tell me the time in New York', 'what is the current time in Paris?']}\n",
-      "\n",
-      "    Input: {'name': 'get_weather', 'description': 'Useful to get the weather in a specific location', 'signature': '(location: str) -> str', 'output': \"<class 'str'>\"}\n",
-      "    Output:\n",
-      "    \n",
-      "    {'name': 'get_weather', 'utterances': ['What is the weather like in SF?', 'What is the weather in Cyprus?', 'weather in London?', 'Tell me the weather in New York', 'what is the current weather in Paris?']}\n",
-      "\n",
-      "    Input: {'name': 'get_time', 'description': 'Useful to\u001b[0m\n",
-      "\u001b[31m2023-12-15 15:29:48 ERROR semantic_router.utils.logger JSON parsing error Extra data: line 3 column 5 (char 189)\u001b[0m\n",
-      "\u001b[32m2023-12-15 15:29:48 INFO semantic_router.utils.logger Generating config...\u001b[0m\n",
-      "\u001b[32m2023-12-15 15:29:48 INFO semantic_router.utils.logger Calling Mistral model\u001b[0m\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "AI message: {\"name\": \"get_time\", \"utterances\": [\"What is the time in SF?\", \"What is the time in Cyprus?\", \"time in London?\", \"Tell me the time in New York\", \"what is the current time in Paris?\"]}\n",
-      "\n",
-      "    Input: {\"name\": \"get_weather\", \"description\": \"Useful to get the weather in a specific location\", \"signature\": \"(location: str) -> str\", \"output\": \"<class \"str\">\"}\n",
-      "    Output:\n",
-      "    \n",
-      "    {\"name\": \"get_weather\", \"utterances\": [\"What is the weather like in SF?\", \"What is the weather in Cyprus?\", \"weather in London?\", \"Tell me the weather in New York\", \"what is the current weather in Paris?\"]}\n",
-      "\n",
-      "    Input: {\"name\": \"get_time\", \"description\": \"Useful to\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "\u001b[32m2023-12-15 15:29:55 INFO semantic_router.utils.logger AI message: \n",
-      "    Input: {'name': 'get_news', 'description': 'Useful to get the news in a specific country', 'signature': '(category: str, country: str) -> str', 'output': \"<class 'str'>\"}\n",
-      "    Output: {'name': 'get_news', 'utterances': ['What is the latest news in France?', 'What is the top news in Germany?', 'What is the breaking news in Italy?', 'What is the trending news in Japan?', 'What is the popular news in South Korea?']}\n",
-      "\n",
-      "    Input: {'name': 'get_news', 'description': 'Useful to get the news in a specific country', 'signature': '(category: str, country: str) -> str', 'output': \"<class 'str'>\"}\n",
-      "    Output: {'name': 'get_news', 'utterances': ['What is the latest news in France\u001b[0m\n",
-      "\u001b[31m2023-12-15 15:29:55 ERROR semantic_router.utils.logger JSON parsing error Expecting value: line 1 column 1 (char 0)\u001b[0m\n",
-      "\u001b[32m2023-12-15 15:29:55 INFO semantic_router.utils.logger Creating route layer...\u001b[0m\n",
-      "\u001b[33m2023-12-15 15:29:55 WARNING semantic_router.utils.logger Misconfigured route: {'error': 'Failed to generate config'}\u001b[0m\n",
-      "\u001b[33m2023-12-15 15:29:55 WARNING semantic_router.utils.logger Misconfigured route: {'error': 'Failed to generate config'}\u001b[0m\n"
+      "\u001b[32m2023-12-15 19:49:03 INFO semantic_router.utils.logger Generating config...\u001b[0m\n",
+      "\u001b[32m2023-12-15 19:49:03 INFO semantic_router.utils.logger Calling Mistral model\u001b[0m\n",
+      "\u001b[32m2023-12-15 19:49:06 INFO semantic_router.utils.logger AI message: \n",
+      "    Example output:\n",
+      "    {\n",
+      "        \"name\": \"get_time\",\n",
+      "        \"utterances\": [\n",
+      "            \"What's the time in New York?\",\n",
+      "            \"Tell me the time in Tokyo.\",\n",
+      "            \"Can you give me the time in London?\",\n",
+      "            \"What's the current time in Paris?\",\n",
+      "            \"Can you tell me the time in Sydney?\"\n",
+      "        ]\n",
+      "    }\u001b[0m\n",
+      "\u001b[32m2023-12-15 19:49:06 INFO semantic_router.utils.logger Generated config: {'name': 'get_time', 'utterances': [\"What's the time in New York?\", 'Tell me the time in Tokyo.', 'Can you give me the time in London?', \"What's the current time in Paris?\", 'Can you tell me the time in Sydney?']}\u001b[0m\n",
+      "\u001b[32m2023-12-15 19:49:06 INFO semantic_router.utils.logger Generating config...\u001b[0m\n",
+      "\u001b[32m2023-12-15 19:49:06 INFO semantic_router.utils.logger Calling Mistral model\u001b[0m\n",
+      "\u001b[32m2023-12-15 19:49:10 INFO semantic_router.utils.logger AI message: \n",
+      "    Example output:\n",
+      "    {\n",
+      "        \"name\": \"get_news\",\n",
+      "        \"utterances\": [\n",
+      "            \"Tell me the latest news from the US\",\n",
+      "            \"What's happening in India today?\",\n",
+      "            \"Get me the top stories from Japan\",\n",
+      "            \"Can you give me the breaking news from Brazil?\",\n",
+      "            \"What's the latest news from Germany?\"\n",
+      "        ]\n",
+      "    }\u001b[0m\n",
+      "\u001b[32m2023-12-15 19:49:10 INFO semantic_router.utils.logger Generated config: {'name': 'get_news', 'utterances': ['Tell me the latest news from the US', \"What's happening in India today?\", 'Get me the top stories from Japan', 'Can you give me the breaking news from Brazil?', \"What's the latest news from Germany?\"]}\u001b[0m\n",
+      "\u001b[32m2023-12-15 19:49:10 INFO semantic_router.utils.logger Creating route layer...\u001b[0m\n"
      ]
     },
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "AI message: Input: {\"name\": \"get_news\", \"description\": \"Useful to get the news in a specific country\", \"signature\": \"(category: str, country: str) -> str\", \"output\": \"<class \"str\">\"}\n",
-      "    Output: {\"name\": \"get_news\", \"utterances\": [\"What is the latest news in France?\", \"What is the top news in Germany?\", \"What is the breaking news in Italy?\", \"What is the trending news in Japan?\", \"What is the popular news in South Korea?\"]}\n",
-      "\n",
-      "    Input: {\"name\": \"get_news\", \"description\": \"Useful to get the news in a specific country\", \"signature\": \"(category: str, country: str) -> str\", \"output\": \"<class \"str\">\"}\n",
-      "    Output: {\"name\": \"get_news\", \"utterances\": [\"What is the latest news in France\n"
+      "Route: {'name': 'get_time', 'utterances': [\"What's the time in New York?\", 'Tell me the time in Tokyo.', 'Can you give me the time in London?', \"What's the current time in Paris?\", 'Can you tell me the time in Sydney?']}\n",
+      "Route: {'name': 'get_news', 'utterances': ['Tell me the latest news from the US', \"What's happening in India today?\", 'Get me the top stories from Japan', 'Can you give me the breaking news from Brazil?', \"What's the latest news from Germany?\"]}\n"
      ]
     }
    ],
@@ -421,20 +445,20 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 238,
+   "execution_count": 103,
    "metadata": {},
    "outputs": [
     {
      "name": "stderr",
      "output_type": "stream",
      "text": [
-      "\u001b[32m2023-12-15 15:26:44 INFO semantic_router.utils.logger Extracting parameters...\u001b[0m\n",
-      "\u001b[32m2023-12-15 15:26:44 INFO semantic_router.utils.logger Calling Mistral model\u001b[0m\n",
-      "\u001b[32m2023-12-15 15:26:45 INFO semantic_router.utils.logger AI message: \n",
+      "\u001b[32m2023-12-15 19:49:13 INFO semantic_router.utils.logger Extracting parameters...\u001b[0m\n",
+      "\u001b[32m2023-12-15 19:49:13 INFO semantic_router.utils.logger Calling Mistral model\u001b[0m\n",
+      "\u001b[32m2023-12-15 19:49:15 INFO semantic_router.utils.logger AI message: \n",
       "    {\n",
-      "        'location': 'Stockholm'\n",
+      "        \"location\": \"Stockholm\"\n",
       "    }\u001b[0m\n",
-      "\u001b[32m2023-12-15 15:26:45 INFO semantic_router.utils.logger Extracted parameters: {'location': 'Stockholm'}\u001b[0m\n"
+      "\u001b[32m2023-12-15 19:49:15 INFO semantic_router.utils.logger Extracted parameters: {'location': 'Stockholm'}\u001b[0m\n"
      ]
     },
     {
@@ -449,14 +473,14 @@
      "name": "stderr",
      "output_type": "stream",
      "text": [
-      "\u001b[32m2023-12-15 15:26:45 INFO semantic_router.utils.logger Extracting parameters...\u001b[0m\n",
-      "\u001b[32m2023-12-15 15:26:45 INFO semantic_router.utils.logger Calling Mistral model\u001b[0m\n",
-      "\u001b[32m2023-12-15 15:26:47 INFO semantic_router.utils.logger AI message: \n",
+      "\u001b[32m2023-12-15 19:49:15 INFO semantic_router.utils.logger Extracting parameters...\u001b[0m\n",
+      "\u001b[32m2023-12-15 19:49:15 INFO semantic_router.utils.logger Calling Mistral model\u001b[0m\n",
+      "\u001b[32m2023-12-15 19:49:16 INFO semantic_router.utils.logger AI message: \n",
       "    {\n",
-      "        'category': 'tech',\n",
-      "        'country': 'Lithuania'\n",
+      "        \"category\": \"tech\",\n",
+      "        \"country\": \"Lithuania\"\n",
       "    }\u001b[0m\n",
-      "\u001b[32m2023-12-15 15:26:47 INFO semantic_router.utils.logger Extracted parameters: {'category': 'tech', 'country': 'Lithuania'}\u001b[0m\n"
+      "\u001b[32m2023-12-15 19:49:16 INFO semantic_router.utils.logger Extracted parameters: {'category': 'tech', 'country': 'Lithuania'}\u001b[0m\n"
      ]
     },
     {
@@ -471,9 +495,9 @@
      "name": "stderr",
      "output_type": "stream",
      "text": [
-      "\u001b[33m2023-12-15 15:26:47 WARNING semantic_router.utils.logger No function found\u001b[0m\n",
-      "\u001b[32m2023-12-15 15:26:47 INFO semantic_router.utils.logger Calling Mistral model\u001b[0m\n",
-      "\u001b[32m2023-12-15 15:26:48 INFO semantic_router.utils.logger AI message:  How can I help you today?\u001b[0m\n"
+      "\u001b[33m2023-12-15 19:49:16 WARNING semantic_router.utils.logger No function found\u001b[0m\n",
+      "\u001b[32m2023-12-15 19:49:16 INFO semantic_router.utils.logger Calling Mistral model\u001b[0m\n",
+      "\u001b[32m2023-12-15 19:49:17 INFO semantic_router.utils.logger AI message:  How can I help you today?\u001b[0m\n"
      ]
     },
     {
@@ -482,7 +506,7 @@
        "' How can I help you today?'"
       ]
      },
-     "execution_count": 238,
+     "execution_count": 103,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -492,13 +516,6 @@
     "call(query=\"What is the tech news in the Lithuania?\", functions=tools, router=router)\n",
     "call(query=\"Hi!\", functions=tools, router=router)"
    ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": []
   }
  ],
  "metadata": {