|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
Classify images using Vision Language Models with vLLM. |
|
|
|
This script processes images through VLMs to classify them into user-defined categories, |
|
using vLLM's GuidedDecodingParams for structured output. |
|
|
|
Examples: |
|
# Basic classification |
|
uv run vlm-classify.py \\ |
|
username/input-dataset \\ |
|
username/output-dataset \\ |
|
--classes "document,photo,diagram,other" |
|
|
|
# With custom prompt and model |
|
uv run vlm-classify.py \\ |
|
username/input-dataset \\ |
|
username/output-dataset \\ |
|
--classes "index-card,manuscript,title-page,other" \\ |
|
--prompt "What type of historical document is this?" \\ |
|
--model Qwen/Qwen2-VL-7B-Instruct |
|
|
|
# Quick test with sample limit |
|
uv run vlm-classify.py \\ |
|
davanstrien/sloane-index-cards \\ |
|
username/test-output \\ |
|
--classes "index,content,other" \\ |
|
--max-samples 10 |
|
""" |
|
|
|
import argparse |
|
import base64 |
|
import io |
|
import logging |
|
import os |
|
import sys |
|
from collections import Counter |
|
from typing import List, Optional, Union, Dict, Any |
|
|
|
import torch |
|
from PIL import Image |
|
from datasets import load_dataset, Dataset |
|
from huggingface_hub import login |
|
from toolz import partition_all |
|
from tqdm.auto import tqdm |
|
from vllm import LLM, SamplingParams |
|
from vllm.sampling_params import GuidedDecodingParams |
|
|
|
logging.basicConfig(level=logging.INFO) |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
def image_to_data_uri(image: Union[Image.Image, Dict[str, Any]], max_size: Optional[int] = None) -> str: |
|
"""Convert image to base64 data URI for VLM processing. |
|
|
|
Args: |
|
image: PIL Image or dict with image bytes |
|
max_size: Optional maximum dimension (width or height) to resize to. |
|
Preserves aspect ratio using thumbnail method. |
|
""" |
|
if isinstance(image, Image.Image): |
|
pil_img = image |
|
elif isinstance(image, dict) and "bytes" in image: |
|
pil_img = Image.open(io.BytesIO(image["bytes"])) |
|
else: |
|
raise ValueError(f"Unsupported image type: {type(image)}") |
|
|
|
|
|
if max_size and (pil_img.width > max_size or pil_img.height > max_size): |
|
|
|
pil_img = pil_img.copy() |
|
pil_img.thumbnail((max_size, max_size), Image.Resampling.LANCZOS) |
|
|
|
|
|
if pil_img.mode not in ("RGB", "L"): |
|
pil_img = pil_img.convert("RGB") |
|
|
|
|
|
buf = io.BytesIO() |
|
pil_img.save(buf, format="JPEG", quality=95) |
|
base64_str = base64.b64encode(buf.getvalue()).decode() |
|
return f"data:image/jpeg;base64,{base64_str}" |
|
|
|
|
|
def create_classification_messages( |
|
image: Union[Image.Image, Dict[str, Any]], |
|
prompt: str, |
|
max_size: Optional[int] = None, |
|
) -> List[Dict]: |
|
"""Create chat messages for VLM classification.""" |
|
image_uri = image_to_data_uri(image, max_size=max_size) |
|
|
|
return [ |
|
{ |
|
"role": "user", |
|
"content": [ |
|
{"type": "image_url", "image_url": {"url": image_uri}}, |
|
{"type": "text", "text": prompt}, |
|
], |
|
} |
|
] |
|
|
|
|
|
def main( |
|
input_dataset: str, |
|
output_dataset: str, |
|
classes: str, |
|
prompt: Optional[str] = None, |
|
image_column: str = "image", |
|
model: str = "Qwen/Qwen2-VL-7B-Instruct", |
|
batch_size: int = 8, |
|
max_samples: Optional[int] = None, |
|
max_size: Optional[int] = None, |
|
gpu_memory_utilization: float = 0.9, |
|
max_model_len: Optional[int] = None, |
|
tensor_parallel_size: Optional[int] = None, |
|
split: str = "train", |
|
hf_token: Optional[str] = None, |
|
private: bool = False, |
|
): |
|
"""Classify images from a dataset using a Vision Language Model.""" |
|
|
|
|
|
if not torch.cuda.is_available(): |
|
logger.error("CUDA is not available. This script requires a GPU.") |
|
logger.error("If running locally, ensure you have a CUDA-capable GPU.") |
|
logger.error("For cloud execution, use: hf jobs uv run --flavor a10g ...") |
|
sys.exit(1) |
|
|
|
|
|
class_list = [c.strip() for c in classes.split(",")] |
|
logger.info(f"Classes: {class_list}") |
|
|
|
|
|
if prompt is None: |
|
prompt = f"Classify this image into one of the following categories: {', '.join(class_list)}" |
|
logger.info(f"Prompt template: {prompt}") |
|
|
|
|
|
HF_TOKEN = hf_token or os.environ.get("HF_TOKEN") |
|
if HF_TOKEN: |
|
login(token=HF_TOKEN) |
|
|
|
|
|
logger.info(f"Loading dataset: {input_dataset}") |
|
dataset = load_dataset(input_dataset, split=split) |
|
|
|
|
|
if image_column not in dataset.column_names: |
|
raise ValueError(f"Column '{image_column}' not found. Available: {dataset.column_names}") |
|
|
|
|
|
if max_samples: |
|
dataset = dataset.select(range(min(max_samples, len(dataset)))) |
|
logger.info(f"Limited to {len(dataset)} samples") |
|
|
|
|
|
if max_size: |
|
logger.info(f"Image resizing enabled: max dimension = {max_size}px") |
|
|
|
|
|
if tensor_parallel_size is None: |
|
tensor_parallel_size = torch.cuda.device_count() |
|
logger.info(f"Auto-detected {tensor_parallel_size} GPUs for tensor parallelism") |
|
|
|
|
|
logger.info(f"Loading model: {model}") |
|
llm_kwargs = { |
|
"model": model, |
|
"gpu_memory_utilization": gpu_memory_utilization, |
|
"tensor_parallel_size": tensor_parallel_size, |
|
"trust_remote_code": True, |
|
} |
|
|
|
if max_model_len: |
|
llm_kwargs["max_model_len"] = max_model_len |
|
|
|
llm = LLM(**llm_kwargs) |
|
|
|
|
|
guided_decoding_params = GuidedDecodingParams(choice=class_list) |
|
sampling_params = SamplingParams( |
|
temperature=0.1, |
|
max_tokens=50, |
|
guided_decoding=guided_decoding_params, |
|
) |
|
|
|
|
|
logger.info(f"Processing {len(dataset)} images in batches of {batch_size}") |
|
|
|
all_classifications = [] |
|
|
|
|
|
for batch_indices in tqdm( |
|
partition_all(batch_size, range(len(dataset))), |
|
total=(len(dataset) + batch_size - 1) // batch_size, |
|
desc="Classifying images", |
|
): |
|
batch_indices = list(batch_indices) |
|
|
|
|
|
batch_images = [] |
|
valid_batch_indices = [] |
|
|
|
for idx in batch_indices: |
|
try: |
|
image = dataset[idx][image_column] |
|
batch_images.append(image) |
|
valid_batch_indices.append(idx) |
|
except Exception as e: |
|
logger.warning(f"Skipping image at index {idx}: {e}") |
|
all_classifications.append(None) |
|
|
|
if not batch_images: |
|
continue |
|
|
|
try: |
|
|
|
batch_messages = [ |
|
create_classification_messages(img, prompt, max_size=max_size) |
|
for img in batch_images |
|
] |
|
|
|
|
|
outputs = llm.chat( |
|
messages=batch_messages, |
|
sampling_params=sampling_params, |
|
use_tqdm=False, |
|
) |
|
|
|
|
|
for output in outputs: |
|
if output.outputs: |
|
label = output.outputs[0].text.strip() |
|
all_classifications.append(label) |
|
else: |
|
all_classifications.append(None) |
|
logger.warning("Empty output for an image") |
|
|
|
except Exception as e: |
|
logger.error(f"Error processing batch: {e}") |
|
|
|
all_classifications.extend([None] * len(batch_images)) |
|
|
|
|
|
while len(all_classifications) < len(dataset): |
|
all_classifications.append(None) |
|
|
|
|
|
logger.info("Adding classifications to dataset...") |
|
dataset = dataset.add_column("label", all_classifications[:len(dataset)]) |
|
|
|
|
|
logger.info(f"Pushing to {output_dataset}...") |
|
dataset.push_to_hub(output_dataset, private=private, token=HF_TOKEN) |
|
|
|
|
|
logger.info("Classification complete!") |
|
logger.info(f"Processed {len(all_classifications)} images") |
|
logger.info(f"Output dataset: {output_dataset}") |
|
|
|
|
|
label_counts = Counter(all_classifications) |
|
logger.info("Classification distribution:") |
|
for label, count in sorted(label_counts.items()): |
|
if label is not None: |
|
percentage = (count / len(all_classifications)) * 100 if all_classifications else 0 |
|
logger.info(f" {label}: {count} ({percentage:.1f}%)") |
|
|
|
|
|
if __name__ == "__main__": |
|
parser = argparse.ArgumentParser( |
|
description="Classify images using Vision Language Models", |
|
formatter_class=argparse.RawDescriptionHelpFormatter, |
|
epilog=""" |
|
Examples: |
|
# Basic classification |
|
uv run vlm-classify.py \\ |
|
username/input-dataset \\ |
|
username/output-dataset \\ |
|
--classes "document,photo,diagram,other" |
|
|
|
# With custom prompt |
|
uv run vlm-classify.py \\ |
|
username/input-dataset \\ |
|
username/output-dataset \\ |
|
--classes "index-card,manuscript,other" \\ |
|
--prompt "What type of historical document is this?" |
|
|
|
# HF Jobs execution |
|
hf jobs uv run \\ |
|
--flavor a10g \\ |
|
https://huggingface.co/datasets/uv-scripts/vllm/raw/main/vlm-classify.py \\ |
|
username/input-dataset \\ |
|
username/output-dataset \\ |
|
--classes "title-page,content,index,other" |
|
""", |
|
) |
|
|
|
parser.add_argument( |
|
"input_dataset", |
|
help="Input dataset ID on Hugging Face Hub", |
|
) |
|
parser.add_argument( |
|
"output_dataset", |
|
help="Output dataset ID on Hugging Face Hub", |
|
) |
|
parser.add_argument( |
|
"--classes", |
|
required=True, |
|
help='Comma-separated list of classes (e.g., "cat,dog,other")', |
|
) |
|
parser.add_argument( |
|
"--prompt", |
|
default=None, |
|
help="Custom classification prompt (default: auto-generated)", |
|
) |
|
parser.add_argument( |
|
"--image-column", |
|
default="image", |
|
help="Column name containing images (default: image)", |
|
) |
|
parser.add_argument( |
|
"--model", |
|
default="Qwen/Qwen2-VL-7B-Instruct", |
|
help="Vision Language Model to use (default: Qwen/Qwen2-VL-7B-Instruct)", |
|
) |
|
parser.add_argument( |
|
"--batch-size", |
|
type=int, |
|
default=8, |
|
help="Batch size for inference (default: 8)", |
|
) |
|
parser.add_argument( |
|
"--max-samples", |
|
type=int, |
|
default=None, |
|
help="Maximum number of samples to process (for testing)", |
|
) |
|
parser.add_argument( |
|
"--max-size", |
|
type=int, |
|
default=None, |
|
help="Maximum image dimension in pixels. Images larger than this will be resized while preserving aspect ratio (e.g., 768, 1024)", |
|
) |
|
parser.add_argument( |
|
"--gpu-memory-utilization", |
|
type=float, |
|
default=0.9, |
|
help="GPU memory utilization (default: 0.9)", |
|
) |
|
parser.add_argument( |
|
"--max-model-len", |
|
type=int, |
|
default=None, |
|
help="Maximum model context length", |
|
) |
|
parser.add_argument( |
|
"--tensor-parallel-size", |
|
type=int, |
|
default=None, |
|
help="Number of GPUs for tensor parallelism (default: auto-detect)", |
|
) |
|
parser.add_argument( |
|
"--split", |
|
default="train", |
|
help="Dataset split to use (default: train)", |
|
) |
|
parser.add_argument( |
|
"--hf-token", |
|
default=None, |
|
help="Hugging Face API token (or set HF_TOKEN env var)", |
|
) |
|
parser.add_argument( |
|
"--private", |
|
action="store_true", |
|
help="Make output dataset private", |
|
) |
|
|
|
args = parser.parse_args() |
|
|
|
|
|
if len(sys.argv) == 1: |
|
parser.print_help() |
|
print("\n" + "="*60) |
|
print("Example HF Jobs command:") |
|
print("="*60) |
|
print(""" |
|
hf jobs uv run \\ |
|
--flavor a10g \\ |
|
-e HF_TOKEN=$(python3 -c "from huggingface_hub import get_token; print(get_token())") \\ |
|
https://huggingface.co/datasets/uv-scripts/vllm/raw/main/vlm-classify.py \\ |
|
davanstrien/sloane-index-cards \\ |
|
username/classified-cards \\ |
|
--classes "index-card,manuscript,title-page,other" \\ |
|
--max-size 768 \\ |
|
--max-samples 100 |
|
""") |
|
sys.exit(0) |
|
|
|
main( |
|
input_dataset=args.input_dataset, |
|
output_dataset=args.output_dataset, |
|
classes=args.classes, |
|
prompt=args.prompt, |
|
image_column=args.image_column, |
|
model=args.model, |
|
batch_size=args.batch_size, |
|
max_samples=args.max_samples, |
|
max_size=args.max_size, |
|
gpu_memory_utilization=args.gpu_memory_utilization, |
|
max_model_len=args.max_model_len, |
|
tensor_parallel_size=args.tensor_parallel_size, |
|
split=args.split, |
|
hf_token=args.hf_token, |
|
private=args.private, |
|
) |