diff --git a/recipes/quickstart/inference/local_inference/multi_modal_infer.py b/recipes/quickstart/inference/local_inference/multi_modal_infer.py
index bd7c118043995e79c2f0ff8f99a60bdacfae742e..5459f2ced28e422aed55d6679f3dd04827c45d0b 100644
--- a/recipes/quickstart/inference/local_inference/multi_modal_infer.py
+++ b/recipes/quickstart/inference/local_inference/multi_modal_infer.py
@@ -16,7 +16,7 @@ DEFAULT_MODEL = "meta-llama/Llama-3.2-11B-Vision-Instruct"
 
 def load_model_and_processor(model_name: str, hf_token: str, finetuning_path: str = None):
     """
-    Load the model and processor, and optionally load adapter weights if specified.
+    Load the model and processor, and optionally load adapter weights if specified
     """
     # Load pre-trained model and processor
     model = MllamaForConditionalGeneration.from_pretrained(