diff --git a/demo_apps/RAG_Chatbot_example/RAG_Chatbot_Example.ipynb b/demo_apps/RAG_Chatbot_example/RAG_Chatbot_Example.ipynb
index d970afc4e04540037076b1fe12b81cf0f5a74cc2..ec5c1e31e312bdedeb1323a0eceb0d33954ac4fc 100644
--- a/demo_apps/RAG_Chatbot_example/RAG_Chatbot_Example.ipynb
+++ b/demo_apps/RAG_Chatbot_example/RAG_Chatbot_Example.ipynb
@@ -241,12 +241,18 @@
    ]
   },
   {
-   "cell_type": "markdown",
-   "metadata": {},
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "vscode": {
+     "languageId": "plaintext"
+    }
+   },
+   "outputs": [],
    "source": [
-    "model = meta-llama/Llama-2-7b-chat-hf  \n",
-    "volume = $PWD/data  \n",
-    "token = #Your own HF tokens  \n",
+    "model = meta-llama/Llama-2-7b-chat-hf\n",
+    "volume = $PWD/data\n",
+    "token = #Your own HF tokens\n",
     "docker run --gpus all --shm-size 1g -e HUGGING_FACE_HUB_TOKEN=$token -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.1.0 --model-id $model"
    ]
   },