diff --git a/recipes/quickstart/agents/Agents_101/Tool_Calling_101.ipynb b/recipes/quickstart/agents/Agents_101/Tool_Calling_101.ipynb
index aef6e5584f5b0cf5d9224e7089f574c8075b1a6a..ee6d89e97b4fad89bcf9d9fc9e00425ca6926804 100644
--- a/recipes/quickstart/agents/Agents_101/Tool_Calling_101.ipynb
+++ b/recipes/quickstart/agents/Agents_101/Tool_Calling_101.ipynb
@@ -6,7 +6,11 @@
    "source": [
     "# Tool Calling 101:\n",
     "\n",
-    "Note: If you are looking for `3.2` Featherlight Model (1B and 3B) instructions, please see the respective notebook, this one covers 3.1 models\n",
+    "Note: If you are looking for `3.2` Featherlight Model (1B and 3B) instructions, please see the respective notebook, this one covers 3.1 models.\n",
+    "\n",
+    "We are briefly introduction the `3.2` models at the end. \n",
+    "\n",
+    "Note: The new vision models behave same as `3.1` models when you are talking to the models without an image\n",
     "\n",
     "This is part (1/2) in the tool calling series, this notebook will cover the basics of what tool calling is and how to perform it with `Llama 3.1 models`\n",
     "\n",
@@ -127,10 +131,6 @@
     "    \n",
     "    chat_history.append({\"role\": \"user\", \"content\": user_input})\n",
     "    \n",
-    "    #print(chat_history)\n",
-    "    \n",
-    "    #print(\"User: \", user_input)\n",
-    "    \n",
     "    response = client.chat.completions.create(model=\"llama-3.1-70b-versatile\",\n",
     "                                          messages=chat_history,\n",
     "                                          max_tokens=max_tokens,\n",
@@ -653,7 +653,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### 3.2 Models Prompt Format"
+    "### 3.2 Models Custom Tool Prompt Format"
    ]
   },
   {
@@ -735,10 +735,6 @@
     "    \n",
     "    chat_history.append({\"role\": \"user\", \"content\": user_input})\n",
     "    \n",
-    "    #print(chat_history)\n",
-    "    \n",
-    "    #print(\"User: \", user_input)\n",
-    "    \n",
     "    response = client.chat.completions.create(model=\"llama-3.2-3b-preview\",\n",
     "                                          messages=chat_history,\n",
     "                                          max_tokens=max_tokens,\n",
@@ -755,6 +751,18 @@
     "    return response.choices[0].message.content"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Note: We are assuming a structure for dataset here:\n",
+    "\n",
+    "- Name\n",
+    "- Email\n",
+    "- Age \n",
+    "- Color request"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": 6,