Skip to content
Snippets Groups Projects
Commit 4e521426 authored by Colin Wang's avatar Colin Wang
Browse files

Update support for new models:

1. Add support for the new model `glm.py` (GLM-4V)
2. Rename `idefics2.py` to `idefics.py`, which includes support for IDEFICS 3 in addition to IDEFICS 2
3. Update `minicpm.py` to include support for V2.6 and correct the logic to support V2.0 as well
parent 5e102482
No related branches found
No related tags found
No related merge requests found
# Adapted from https://huggingface.co/THUDM/glm-4v-9b
# This has support for the GLM 4v model
import torch
from PIL import Image
from transformers import AutoModelForCausalLM, AutoTokenizer
from tqdm import tqdm
def generate_response(queries, model_path):
device = "cuda"
model = AutoModelForCausalLM.from_pretrained(
model_path,
torch_dtype=torch.bfloat16,
low_cpu_mem_usage=True,
trust_remote_code=True
).to(device).eval()
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
for k in tqdm(queries):
query = queries[k]['question']
image = Image.open(queries[k]["figure_path"]).convert('RGB')
inputs = tokenizer.apply_chat_template([{"role": "user", "image": image, "content": query}],
add_generation_prompt=True, tokenize=True, return_tensors="pt",
return_dict=True) # chat mode
inputs = inputs.to(device)
gen_kwargs = {"max_length": 2500, "do_sample": False, "top_k": 1}
with torch.no_grad():
outputs = model.generate(**inputs, **gen_kwargs)
outputs = outputs[:, inputs['input_ids'].shape[1]:]
res = tokenizer.decode(outputs[0]).replace(' <|endoftext|>', '')
queries[k]['response'] = res
# Adapted from https://huggingface.co/HuggingFaceM4/idefics2-8b
# This has support for all the IDEFICS2 models
# This has support for all the IDEFICS2/3 models
from transformers.image_utils import load_image
from transformers import AutoProcessor, AutoModelForVision2Seq
from tqdm import tqdm
......@@ -23,6 +23,6 @@ def generate_response(queries, model_path):
inputs = processor(text=prompt, images=[image], return_tensors="pt")
inputs = {k: v.to('cuda') for k, v in inputs.items()}
generated_ids = model.generate(**inputs, max_new_tokens=500, do_sample=False)
generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)
response = generated_texts[0].split("Assistant:")[-1].strip()
response = processor.batch_decode(generated_ids, skip_special_tokens=True)
response = response[0].split("Assistant: ")[-1] # get the response from the assistant
queries[k]['response'] = response
# Adapted from https://huggingface.co/openbmb/MiniCPM-Llama3-V-2_5
# This has support for MiniCPM V2 and V2.5
# This has support for MiniCPM V2 and V2.5, and V2.6
from transformers import AutoModel, AutoTokenizer
from tqdm import tqdm
......@@ -7,22 +7,50 @@ from PIL import Image
import torch
def generate_response(queries, model_path):
model = AutoModel.from_pretrained(model_path, trust_remote_code=True, torch_dtype=torch.bfloat16)
model = model.to(device='cuda', dtype=torch.bfloat16)
# sdpa attn impl for v2.6, default for 2 and 2.5
if "MiniCPM-V-2_6" in model_path:
model = AutoModel.from_pretrained(model_path, trust_remote_code=True, torch_dtype=torch.bfloat16, attn_implementation='sdpa')
else:
model = AutoModel.from_pretrained(model_path, trust_remote_code=True, torch_dtype=torch.bfloat16)
model = model.eval().cuda()
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model.eval()
for k in tqdm(queries):
query = queries[k]['question']
image = Image.open(queries[k]["figure_path"]).convert('RGB')
msgs = [{'role': 'user', 'content': query}]
res, context, _ = model.chat(
image=image,
msgs=msgs,
context=None,
tokenizer=tokenizer,
sampling=False,
temperature=0.0,
top_p=1.0,
)
if model_path.endswith("MiniCPM-V-2"):
msgs = [{'role': 'user', 'content': query}]
res, context, _ = model.chat(
image=image,
msgs=msgs,
context=None,
tokenizer=tokenizer,
sampling=False,
temperature=0.0,
top_p=1.0,
)
# for 2.5
elif model_path.endswith("MiniCPM-Llama3-V-2_5"):
msgs = [{'role': 'user', 'content': query}]
res = model.chat(
image=image,
msgs=msgs,
tokenizer=tokenizer,
sampling=False,
temperature=0.0,
top_p=1.0,
)
# for 2.6
elif model_path.endswith("MiniCPM-V-2_6"):
msgs = [{'role': 'user', 'content': [image, query]}]
res = model.chat(
image=None,
msgs=msgs,
tokenizer=tokenizer,
sampling=False,
temperature=0.0,
top_p=1.0,
)
else:
raise NotImplementedError(f"Model path {model_path} not supported")
queries[k]['response'] = res
......@@ -38,6 +38,7 @@ def get_client_fn(model_path):
from .gemini import get_client_model
# gpt
elif model_path in ['gpt-4o-2024-05-13',
'gpt-4o-2024-08-06',
'gpt-4-turbo-2024-04-09',
'gpt-4o-mini-2024-07-18']:
from .gpt import get_client_model
......@@ -82,13 +83,15 @@ def get_generate_fn(model_path):
from .gemini import generate_response
# gpt
elif model_name in ['gpt-4o-2024-05-13',
'gpt-4o-2024-08-06',
'gpt-4-turbo-2024-04-09',
'gpt-4o-mini-2024-07-18']:
from .gpt import generate_response
# idefics2
elif model_name in ['idefics2-8b',
'idefics2-8b-chatty']:
from .idefics2 import generate_response
'idefics2-8b-chatty',
'Idefics3-8B-Llama3']:
from .idefics import generate_response
# ixc2
elif model_name in ['internlm-xcomposer2-4khd-7b',
'internlm-xcomposer2-vl-7b']:
......@@ -110,8 +113,11 @@ def get_generate_fn(model_path):
from .mgm import generate_response
# minicpm
elif model_name in ['MiniCPM-Llama3-V-2_5',
'MiniCPM-V-2']:
'MiniCPM-V-2',
'MiniCPM-V-2_6']:
from .minicpm import generate_response
elif model_name in ['glm-4v-9b']:
from .glm import generate_response
# moai
elif model_name in ['MoAI-7B']:
from .moai import generate_response
......@@ -143,6 +149,27 @@ def get_generate_fn(model_path):
# internvl2pro
elif model_name in ['InternVL2-Pro']:
from .internvl2pro import generate_response
elif model_name in ['ChartLlama-13b']:
from .chartllama import generate_response
elif model_name in ['TinyChart-3B-768']:
from .tinychart import generate_response
elif model_name in ['ChartInstruct-LLama2',
'ChartInstruct-FlanT5-XL']:
from .chartinstruct import generate_response
elif model_name in ['unichart-chartqa-960']:
from .unichart import generate_response
elif model_name in ['ChartAssistant']:
from .chartast import generate_response
elif model_name in ['DocOwl1.5-Omni',
'DocOwl1.5-Chat',]:
from .docowl15 import generate_response
elif model_name in ['ureader-v1']:
from .ureader import generate_response
elif model_name in ['TextMonkey',
'Monkey-Chat',]:
from .textmonkey import generate_response
elif model_name in ['cogagent-vqa-hf']:
from .cogagent import generate_response
else:
raise ValueError(f"Model {model_name} not supported")
return generate_response
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment