| | |
| | from transformers import AutoModelForCausalLM, AutoTokenizer |
| | from PIL import Image |
| | import torch |
| |
|
| | MODEL_ID = "unsloth/qwen2.5-vl-7b-instruct" |
| |
|
| | tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True) |
| | model = AutoModelForCausalLM.from_pretrained( |
| | MODEL_ID, torch_dtype=torch.float16, device_map="auto", trust_remote_code=True |
| | ) |
| |
|
| | def infer(request): |
| | messages = request.get("messages", []) |
| | images = request.get("images", []) |
| |
|
| | inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to(model.device) |
| | outputs = model.generate(**inputs, max_new_tokens=512) |
| | return {"text": tokenizer.decode(outputs[0])} |
| |
|