From 20dd4740569b68a69650807be2ec246ac187a309 Mon Sep 17 00:00:00 2001 From: Himanshu Shukla <himanshushukla.shukla3@gmail.com> Date: Sat, 2 Nov 2024 21:51:12 +0000 Subject: [PATCH] added working code of CLI/gradio UI/ LoRA weights merge --- .../quickstart/inference/local_inference/multi_modal_infer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/recipes/quickstart/inference/local_inference/multi_modal_infer.py b/recipes/quickstart/inference/local_inference/multi_modal_infer.py index bd7c1180..5459f2ce 100644 --- a/recipes/quickstart/inference/local_inference/multi_modal_infer.py +++ b/recipes/quickstart/inference/local_inference/multi_modal_infer.py @@ -16,7 +16,7 @@ DEFAULT_MODEL = "meta-llama/Llama-3.2-11B-Vision-Instruct" def load_model_and_processor(model_name: str, hf_token: str, finetuning_path: str = None): """ - Load the model and processor, and optionally load adapter weights if specified. + Load the model and processor, and optionally load adapter weights if specified """ # Load pre-trained model and processor model = MllamaForConditionalGeneration.from_pretrained( -- GitLab