@@ -54,6 +54,7 @@ It lets us specify the training settings for everything from `model_name` to `da
output_dir:str="PATH/to/save/PEFT/model"
freeze_layers:bool=False
num_freeze_layers:int=1
freeze_LLM_only:bool=False# Freeze self-attention layers in the language_model. Vision model, multi_modal_projector, cross-attention will be fine-tuned