File size: 606 Bytes
059f0e7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 |
#!/usr/bin/env python3
"""Test script for the converted model"""
from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
from PIL import Image
import torch
print("Loading model...")
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
"qwen25-vl-weather-7b",
torch_dtype=torch.float16,
device_map="auto"
)
processor = AutoProcessor.from_pretrained("qwen25-vl-weather-7b")
print("Model loaded successfully!")
print(f"Model type: {type(model).__name__}")
print(f"Device: {model.device}")
# You can now test with an image
# image = Image.open("test_image.jpg")
# ...
|