davanstrien HF Staff commited on
Commit
f051c94
·
1 Parent(s): d450bf5

Add --max-size flag for image resizing

Browse files

- Preserves aspect ratio using PIL thumbnail
- Significantly improves performance for high-res images
- Optional flag - no resizing by default

Files changed (1) hide show
  1. vlm-classify.py +30 -4
vlm-classify.py CHANGED
@@ -63,8 +63,14 @@ logging.basicConfig(level=logging.INFO)
63
  logger = logging.getLogger(__name__)
64
 
65
 
66
- def image_to_data_uri(image: Union[Image.Image, Dict[str, Any]]) -> str:
67
- """Convert image to base64 data URI for VLM processing."""
 
 
 
 
 
 
68
  if isinstance(image, Image.Image):
69
  pil_img = image
70
  elif isinstance(image, dict) and "bytes" in image:
@@ -72,6 +78,12 @@ def image_to_data_uri(image: Union[Image.Image, Dict[str, Any]]) -> str:
72
  else:
73
  raise ValueError(f"Unsupported image type: {type(image)}")
74
 
 
 
 
 
 
 
75
  # Convert to RGB if necessary (handle RGBA, grayscale, etc.)
76
  if pil_img.mode not in ("RGB", "L"):
77
  pil_img = pil_img.convert("RGB")
@@ -86,9 +98,10 @@ def image_to_data_uri(image: Union[Image.Image, Dict[str, Any]]) -> str:
86
  def create_classification_messages(
87
  image: Union[Image.Image, Dict[str, Any]],
88
  prompt: str,
 
89
  ) -> List[Dict]:
90
  """Create chat messages for VLM classification."""
91
- image_uri = image_to_data_uri(image)
92
 
93
  return [
94
  {
@@ -110,6 +123,7 @@ def main(
110
  model: str = "Qwen/Qwen2-VL-7B-Instruct",
111
  batch_size: int = 8,
112
  max_samples: Optional[int] = None,
 
113
  gpu_memory_utilization: float = 0.9,
114
  max_model_len: Optional[int] = None,
115
  tensor_parallel_size: Optional[int] = None,
@@ -153,6 +167,10 @@ def main(
153
  dataset = dataset.select(range(min(max_samples, len(dataset))))
154
  logger.info(f"Limited to {len(dataset)} samples")
155
 
 
 
 
 
156
  # Auto-detect tensor parallel size if not specified
157
  if tensor_parallel_size is None:
158
  tensor_parallel_size = torch.cuda.device_count()
@@ -212,7 +230,7 @@ def main(
212
  try:
213
  # Create messages for just this batch
214
  batch_messages = [
215
- create_classification_messages(img, prompt)
216
  for img in batch_images
217
  ]
218
 
@@ -332,6 +350,12 @@ Examples:
332
  default=None,
333
  help="Maximum number of samples to process (for testing)",
334
  )
 
 
 
 
 
 
335
  parser.add_argument(
336
  "--gpu-memory-utilization",
337
  type=float,
@@ -382,6 +406,7 @@ hf jobs uv run \\
382
  davanstrien/sloane-index-cards \\
383
  username/classified-cards \\
384
  --classes "index-card,manuscript,title-page,other" \\
 
385
  --max-samples 100
386
  """)
387
  sys.exit(0)
@@ -395,6 +420,7 @@ hf jobs uv run \\
395
  model=args.model,
396
  batch_size=args.batch_size,
397
  max_samples=args.max_samples,
 
398
  gpu_memory_utilization=args.gpu_memory_utilization,
399
  max_model_len=args.max_model_len,
400
  tensor_parallel_size=args.tensor_parallel_size,
 
63
  logger = logging.getLogger(__name__)
64
 
65
 
66
+ def image_to_data_uri(image: Union[Image.Image, Dict[str, Any]], max_size: Optional[int] = None) -> str:
67
+ """Convert image to base64 data URI for VLM processing.
68
+
69
+ Args:
70
+ image: PIL Image or dict with image bytes
71
+ max_size: Optional maximum dimension (width or height) to resize to.
72
+ Preserves aspect ratio using thumbnail method.
73
+ """
74
  if isinstance(image, Image.Image):
75
  pil_img = image
76
  elif isinstance(image, dict) and "bytes" in image:
 
78
  else:
79
  raise ValueError(f"Unsupported image type: {type(image)}")
80
 
81
+ # Resize if max_size is specified and image exceeds it
82
+ if max_size and (pil_img.width > max_size or pil_img.height > max_size):
83
+ # Use thumbnail to preserve aspect ratio
84
+ pil_img = pil_img.copy() # Don't modify original
85
+ pil_img.thumbnail((max_size, max_size), Image.Resampling.LANCZOS)
86
+
87
  # Convert to RGB if necessary (handle RGBA, grayscale, etc.)
88
  if pil_img.mode not in ("RGB", "L"):
89
  pil_img = pil_img.convert("RGB")
 
98
  def create_classification_messages(
99
  image: Union[Image.Image, Dict[str, Any]],
100
  prompt: str,
101
+ max_size: Optional[int] = None,
102
  ) -> List[Dict]:
103
  """Create chat messages for VLM classification."""
104
+ image_uri = image_to_data_uri(image, max_size=max_size)
105
 
106
  return [
107
  {
 
123
  model: str = "Qwen/Qwen2-VL-7B-Instruct",
124
  batch_size: int = 8,
125
  max_samples: Optional[int] = None,
126
+ max_size: Optional[int] = None,
127
  gpu_memory_utilization: float = 0.9,
128
  max_model_len: Optional[int] = None,
129
  tensor_parallel_size: Optional[int] = None,
 
167
  dataset = dataset.select(range(min(max_samples, len(dataset))))
168
  logger.info(f"Limited to {len(dataset)} samples")
169
 
170
+ # Log resizing configuration
171
+ if max_size:
172
+ logger.info(f"Image resizing enabled: max dimension = {max_size}px")
173
+
174
  # Auto-detect tensor parallel size if not specified
175
  if tensor_parallel_size is None:
176
  tensor_parallel_size = torch.cuda.device_count()
 
230
  try:
231
  # Create messages for just this batch
232
  batch_messages = [
233
+ create_classification_messages(img, prompt, max_size=max_size)
234
  for img in batch_images
235
  ]
236
 
 
350
  default=None,
351
  help="Maximum number of samples to process (for testing)",
352
  )
353
+ parser.add_argument(
354
+ "--max-size",
355
+ type=int,
356
+ default=None,
357
+ help="Maximum image dimension in pixels. Images larger than this will be resized while preserving aspect ratio (e.g., 768, 1024)",
358
+ )
359
  parser.add_argument(
360
  "--gpu-memory-utilization",
361
  type=float,
 
406
  davanstrien/sloane-index-cards \\
407
  username/classified-cards \\
408
  --classes "index-card,manuscript,title-page,other" \\
409
+ --max-size 768 \\
410
  --max-samples 100
411
  """)
412
  sys.exit(0)
 
420
  model=args.model,
421
  batch_size=args.batch_size,
422
  max_samples=args.max_samples,
423
+ max_size=args.max_size,
424
  gpu_memory_utilization=args.gpu_memory_utilization,
425
  max_model_len=args.max_model_len,
426
  tensor_parallel_size=args.tensor_parallel_size,