Text-only inference
#71
by
ZoeyYao27
- opened
After updating, if we pass model only the text without the figure would cause the following bug:
"xxxximage_utils.py", line 114, in is_batched
return is_valid_image(img[0])
IndexError: list index out of range
Here is the code
import torch
from PIL import Image
from transformers import AutoModelForCausalLM, AutoTokenizer,AutoProcessor
import json
from copy import deepcopy
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained("openbmb/MiniCPM-Llama3-V-2_5", trust_remote_code=True,
torch_dtype=torch.bfloat16,cache_dir="cache") # sdpa or flash_attention_2, no eager
model = model.eval().cuda()
tokenizer = AutoTokenizer.from_pretrained("openbmb/MiniCPM-Llama3-V-2_5", trust_remote_code=True)
msgs = [{'role': 'user', 'content': ["Introduce something about Airbus A380."]}]
answer = model.chat(
image=None,
msgs=msgs,
context=None,
tokenizer=tokenizer
)
print(answer)