davanstrien HF Staff commited on
Commit
a7a851e
·
1 Parent(s): f051c94

Refactor image_to_data_uri and main functions for improved readability and consistency

Browse files
Files changed (1) hide show
  1. vlm-classify.py +52 -46
vlm-classify.py CHANGED
@@ -63,9 +63,11 @@ logging.basicConfig(level=logging.INFO)
63
  logger = logging.getLogger(__name__)
64
 
65
 
66
- def image_to_data_uri(image: Union[Image.Image, Dict[str, Any]], max_size: Optional[int] = None) -> str:
 
 
67
  """Convert image to base64 data URI for VLM processing.
68
-
69
  Args:
70
  image: PIL Image or dict with image bytes
71
  max_size: Optional maximum dimension (width or height) to resize to.
@@ -77,17 +79,17 @@ def image_to_data_uri(image: Union[Image.Image, Dict[str, Any]], max_size: Optio
77
  pil_img = Image.open(io.BytesIO(image["bytes"]))
78
  else:
79
  raise ValueError(f"Unsupported image type: {type(image)}")
80
-
81
  # Resize if max_size is specified and image exceeds it
82
  if max_size and (pil_img.width > max_size or pil_img.height > max_size):
83
  # Use thumbnail to preserve aspect ratio
84
  pil_img = pil_img.copy() # Don't modify original
85
  pil_img.thumbnail((max_size, max_size), Image.Resampling.LANCZOS)
86
-
87
  # Convert to RGB if necessary (handle RGBA, grayscale, etc.)
88
  if pil_img.mode not in ("RGB", "L"):
89
  pil_img = pil_img.convert("RGB")
90
-
91
  # Convert to base64
92
  buf = io.BytesIO()
93
  pil_img.save(buf, format="JPEG", quality=95)
@@ -102,7 +104,7 @@ def create_classification_messages(
102
  ) -> List[Dict]:
103
  """Create chat messages for VLM classification."""
104
  image_uri = image_to_data_uri(image, max_size=max_size)
105
-
106
  return [
107
  {
108
  "role": "user",
@@ -132,50 +134,52 @@ def main(
132
  private: bool = False,
133
  ):
134
  """Classify images from a dataset using a Vision Language Model."""
135
-
136
  # Check GPU availability
137
  if not torch.cuda.is_available():
138
  logger.error("CUDA is not available. This script requires a GPU.")
139
  logger.error("If running locally, ensure you have a CUDA-capable GPU.")
140
  logger.error("For cloud execution, use: hf jobs uv run --flavor a10g ...")
141
  sys.exit(1)
142
-
143
  # Parse classes
144
  class_list = [c.strip() for c in classes.split(",")]
145
  logger.info(f"Classes: {class_list}")
146
-
147
  # Create default prompt if not provided
148
  if prompt is None:
149
  prompt = f"Classify this image into one of the following categories: {', '.join(class_list)}"
150
  logger.info(f"Prompt template: {prompt}")
151
-
152
  # Login to HF if token provided
153
  HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
154
  if HF_TOKEN:
155
  login(token=HF_TOKEN)
156
-
157
  # Load dataset
158
  logger.info(f"Loading dataset: {input_dataset}")
159
  dataset = load_dataset(input_dataset, split=split)
160
-
161
  # Validate image column
162
  if image_column not in dataset.column_names:
163
- raise ValueError(f"Column '{image_column}' not found. Available: {dataset.column_names}")
164
-
 
 
165
  # Limit samples if requested
166
  if max_samples:
167
  dataset = dataset.select(range(min(max_samples, len(dataset))))
168
  logger.info(f"Limited to {len(dataset)} samples")
169
-
170
  # Log resizing configuration
171
  if max_size:
172
  logger.info(f"Image resizing enabled: max dimension = {max_size}px")
173
-
174
  # Auto-detect tensor parallel size if not specified
175
  if tensor_parallel_size is None:
176
  tensor_parallel_size = torch.cuda.device_count()
177
  logger.info(f"Auto-detected {tensor_parallel_size} GPUs for tensor parallelism")
178
-
179
  # Initialize vLLM
180
  logger.info(f"Loading model: {model}")
181
  llm_kwargs = {
@@ -184,25 +188,25 @@ def main(
184
  "tensor_parallel_size": tensor_parallel_size,
185
  "trust_remote_code": True, # Required for some VLMs
186
  }
187
-
188
  if max_model_len:
189
  llm_kwargs["max_model_len"] = max_model_len
190
-
191
  llm = LLM(**llm_kwargs)
192
-
193
  # Create guided decoding params for classification
194
  guided_decoding_params = GuidedDecodingParams(choice=class_list)
195
  sampling_params = SamplingParams(
196
  temperature=0.1, # Low temperature for consistent classification
197
- max_tokens=50, # Classifications are short
198
  guided_decoding=guided_decoding_params,
199
  )
200
-
201
  # Process images in batches to avoid memory issues
202
  logger.info(f"Processing {len(dataset)} images in batches of {batch_size}")
203
-
204
  all_classifications = []
205
-
206
  # Process in batches using lazy loading
207
  for batch_indices in tqdm(
208
  partition_all(batch_size, range(len(dataset))),
@@ -210,11 +214,11 @@ def main(
210
  desc="Classifying images",
211
  ):
212
  batch_indices = list(batch_indices)
213
-
214
  # Load only this batch's images
215
  batch_images = []
216
  valid_batch_indices = []
217
-
218
  for idx in batch_indices:
219
  try:
220
  image = dataset[idx][image_column]
@@ -223,24 +227,24 @@ def main(
223
  except Exception as e:
224
  logger.warning(f"Skipping image at index {idx}: {e}")
225
  all_classifications.append(None)
226
-
227
  if not batch_images:
228
  continue
229
-
230
  try:
231
  # Create messages for just this batch
232
  batch_messages = [
233
- create_classification_messages(img, prompt, max_size=max_size)
234
  for img in batch_images
235
  ]
236
-
237
  # Process with vLLM
238
  outputs = llm.chat(
239
  messages=batch_messages,
240
  sampling_params=sampling_params,
241
  use_tqdm=False, # Already have outer progress bar
242
  )
243
-
244
  # Extract classifications
245
  for output in outputs:
246
  if output.outputs:
@@ -249,35 +253,37 @@ def main(
249
  else:
250
  all_classifications.append(None)
251
  logger.warning("Empty output for an image")
252
-
253
  except Exception as e:
254
  logger.error(f"Error processing batch: {e}")
255
  # Add None for failed batch
256
  all_classifications.extend([None] * len(batch_images))
257
-
258
  # Ensure we have the right number of classifications
259
  while len(all_classifications) < len(dataset):
260
  all_classifications.append(None)
261
-
262
  # Add classifications to dataset
263
  logger.info("Adding classifications to dataset...")
264
- dataset = dataset.add_column("label", all_classifications[:len(dataset)])
265
-
266
  # Push to hub
267
  logger.info(f"Pushing to {output_dataset}...")
268
  dataset.push_to_hub(output_dataset, private=private, token=HF_TOKEN)
269
-
270
  # Print summary
271
  logger.info("Classification complete!")
272
  logger.info(f"Processed {len(all_classifications)} images")
273
  logger.info(f"Output dataset: {output_dataset}")
274
-
275
  # Show distribution of classifications
276
  label_counts = Counter(all_classifications)
277
  logger.info("Classification distribution:")
278
  for label, count in sorted(label_counts.items()):
279
  if label is not None: # Skip None values in summary
280
- percentage = (count / len(all_classifications)) * 100 if all_classifications else 0
 
 
281
  logger.info(f" {label}: {count} ({percentage:.1f}%)")
282
 
283
 
@@ -309,7 +315,7 @@ Examples:
309
  --classes "title-page,content,index,other"
310
  """,
311
  )
312
-
313
  parser.add_argument(
314
  "input_dataset",
315
  help="Input dataset ID on Hugging Face Hub",
@@ -389,15 +395,15 @@ Examples:
389
  action="store_true",
390
  help="Make output dataset private",
391
  )
392
-
393
  args = parser.parse_args()
394
-
395
  # Show example command if no arguments
396
  if len(sys.argv) == 1:
397
  parser.print_help()
398
- print("\n" + "="*60)
399
  print("Example HF Jobs command:")
400
- print("="*60)
401
  print("""
402
  hf jobs uv run \\
403
  --flavor a10g \\
@@ -410,7 +416,7 @@ hf jobs uv run \\
410
  --max-samples 100
411
  """)
412
  sys.exit(0)
413
-
414
  main(
415
  input_dataset=args.input_dataset,
416
  output_dataset=args.output_dataset,
@@ -427,4 +433,4 @@ hf jobs uv run \\
427
  split=args.split,
428
  hf_token=args.hf_token,
429
  private=args.private,
430
- )
 
63
  logger = logging.getLogger(__name__)
64
 
65
 
66
+ def image_to_data_uri(
67
+ image: Union[Image.Image, Dict[str, Any]], max_size: Optional[int] = None
68
+ ) -> str:
69
  """Convert image to base64 data URI for VLM processing.
70
+
71
  Args:
72
  image: PIL Image or dict with image bytes
73
  max_size: Optional maximum dimension (width or height) to resize to.
 
79
  pil_img = Image.open(io.BytesIO(image["bytes"]))
80
  else:
81
  raise ValueError(f"Unsupported image type: {type(image)}")
82
+
83
  # Resize if max_size is specified and image exceeds it
84
  if max_size and (pil_img.width > max_size or pil_img.height > max_size):
85
  # Use thumbnail to preserve aspect ratio
86
  pil_img = pil_img.copy() # Don't modify original
87
  pil_img.thumbnail((max_size, max_size), Image.Resampling.LANCZOS)
88
+
89
  # Convert to RGB if necessary (handle RGBA, grayscale, etc.)
90
  if pil_img.mode not in ("RGB", "L"):
91
  pil_img = pil_img.convert("RGB")
92
+
93
  # Convert to base64
94
  buf = io.BytesIO()
95
  pil_img.save(buf, format="JPEG", quality=95)
 
104
  ) -> List[Dict]:
105
  """Create chat messages for VLM classification."""
106
  image_uri = image_to_data_uri(image, max_size=max_size)
107
+
108
  return [
109
  {
110
  "role": "user",
 
134
  private: bool = False,
135
  ):
136
  """Classify images from a dataset using a Vision Language Model."""
137
+
138
  # Check GPU availability
139
  if not torch.cuda.is_available():
140
  logger.error("CUDA is not available. This script requires a GPU.")
141
  logger.error("If running locally, ensure you have a CUDA-capable GPU.")
142
  logger.error("For cloud execution, use: hf jobs uv run --flavor a10g ...")
143
  sys.exit(1)
144
+
145
  # Parse classes
146
  class_list = [c.strip() for c in classes.split(",")]
147
  logger.info(f"Classes: {class_list}")
148
+
149
  # Create default prompt if not provided
150
  if prompt is None:
151
  prompt = f"Classify this image into one of the following categories: {', '.join(class_list)}"
152
  logger.info(f"Prompt template: {prompt}")
153
+
154
  # Login to HF if token provided
155
  HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
156
  if HF_TOKEN:
157
  login(token=HF_TOKEN)
158
+
159
  # Load dataset
160
  logger.info(f"Loading dataset: {input_dataset}")
161
  dataset = load_dataset(input_dataset, split=split)
162
+
163
  # Validate image column
164
  if image_column not in dataset.column_names:
165
+ raise ValueError(
166
+ f"Column '{image_column}' not found. Available: {dataset.column_names}"
167
+ )
168
+
169
  # Limit samples if requested
170
  if max_samples:
171
  dataset = dataset.select(range(min(max_samples, len(dataset))))
172
  logger.info(f"Limited to {len(dataset)} samples")
173
+
174
  # Log resizing configuration
175
  if max_size:
176
  logger.info(f"Image resizing enabled: max dimension = {max_size}px")
177
+
178
  # Auto-detect tensor parallel size if not specified
179
  if tensor_parallel_size is None:
180
  tensor_parallel_size = torch.cuda.device_count()
181
  logger.info(f"Auto-detected {tensor_parallel_size} GPUs for tensor parallelism")
182
+
183
  # Initialize vLLM
184
  logger.info(f"Loading model: {model}")
185
  llm_kwargs = {
 
188
  "tensor_parallel_size": tensor_parallel_size,
189
  "trust_remote_code": True, # Required for some VLMs
190
  }
191
+
192
  if max_model_len:
193
  llm_kwargs["max_model_len"] = max_model_len
194
+
195
  llm = LLM(**llm_kwargs)
196
+
197
  # Create guided decoding params for classification
198
  guided_decoding_params = GuidedDecodingParams(choice=class_list)
199
  sampling_params = SamplingParams(
200
  temperature=0.1, # Low temperature for consistent classification
201
+ max_tokens=50, # Classifications are short
202
  guided_decoding=guided_decoding_params,
203
  )
204
+
205
  # Process images in batches to avoid memory issues
206
  logger.info(f"Processing {len(dataset)} images in batches of {batch_size}")
207
+
208
  all_classifications = []
209
+
210
  # Process in batches using lazy loading
211
  for batch_indices in tqdm(
212
  partition_all(batch_size, range(len(dataset))),
 
214
  desc="Classifying images",
215
  ):
216
  batch_indices = list(batch_indices)
217
+
218
  # Load only this batch's images
219
  batch_images = []
220
  valid_batch_indices = []
221
+
222
  for idx in batch_indices:
223
  try:
224
  image = dataset[idx][image_column]
 
227
  except Exception as e:
228
  logger.warning(f"Skipping image at index {idx}: {e}")
229
  all_classifications.append(None)
230
+
231
  if not batch_images:
232
  continue
233
+
234
  try:
235
  # Create messages for just this batch
236
  batch_messages = [
237
+ create_classification_messages(img, prompt, max_size=max_size)
238
  for img in batch_images
239
  ]
240
+
241
  # Process with vLLM
242
  outputs = llm.chat(
243
  messages=batch_messages,
244
  sampling_params=sampling_params,
245
  use_tqdm=False, # Already have outer progress bar
246
  )
247
+
248
  # Extract classifications
249
  for output in outputs:
250
  if output.outputs:
 
253
  else:
254
  all_classifications.append(None)
255
  logger.warning("Empty output for an image")
256
+
257
  except Exception as e:
258
  logger.error(f"Error processing batch: {e}")
259
  # Add None for failed batch
260
  all_classifications.extend([None] * len(batch_images))
261
+
262
  # Ensure we have the right number of classifications
263
  while len(all_classifications) < len(dataset):
264
  all_classifications.append(None)
265
+
266
  # Add classifications to dataset
267
  logger.info("Adding classifications to dataset...")
268
+ dataset = dataset.add_column("label", all_classifications[: len(dataset)])
269
+
270
  # Push to hub
271
  logger.info(f"Pushing to {output_dataset}...")
272
  dataset.push_to_hub(output_dataset, private=private, token=HF_TOKEN)
273
+
274
  # Print summary
275
  logger.info("Classification complete!")
276
  logger.info(f"Processed {len(all_classifications)} images")
277
  logger.info(f"Output dataset: {output_dataset}")
278
+
279
  # Show distribution of classifications
280
  label_counts = Counter(all_classifications)
281
  logger.info("Classification distribution:")
282
  for label, count in sorted(label_counts.items()):
283
  if label is not None: # Skip None values in summary
284
+ percentage = (
285
+ (count / len(all_classifications)) * 100 if all_classifications else 0
286
+ )
287
  logger.info(f" {label}: {count} ({percentage:.1f}%)")
288
 
289
 
 
315
  --classes "title-page,content,index,other"
316
  """,
317
  )
318
+
319
  parser.add_argument(
320
  "input_dataset",
321
  help="Input dataset ID on Hugging Face Hub",
 
395
  action="store_true",
396
  help="Make output dataset private",
397
  )
398
+
399
  args = parser.parse_args()
400
+
401
  # Show example command if no arguments
402
  if len(sys.argv) == 1:
403
  parser.print_help()
404
+ print("\n" + "=" * 60)
405
  print("Example HF Jobs command:")
406
+ print("=" * 60)
407
  print("""
408
  hf jobs uv run \\
409
  --flavor a10g \\
 
416
  --max-samples 100
417
  """)
418
  sys.exit(0)
419
+
420
  main(
421
  input_dataset=args.input_dataset,
422
  output_dataset=args.output_dataset,
 
433
  split=args.split,
434
  hf_token=args.hf_token,
435
  private=args.private,
436
+ )