yjernite HF Staff commited on
Commit
93cb9bb
·
verified ·
1 Parent(s): f410eb2

Upload generate-embeddings-uv-vllm.py

Browse files
Files changed (1) hide show
  1. generate-embeddings-uv-vllm.py +320 -0
generate-embeddings-uv-vllm.py ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.10"
3
+ # dependencies = [
4
+ # "datasets",
5
+ # "huggingface-hub[hf_transfer]",
6
+ # "flashinfer-python",
7
+ # "hf-xet>= 1.1.7",
8
+ # "torch",
9
+ # "transformers",
10
+ # "vllm",
11
+ # ]
12
+ #
13
+ # ///
14
+ """
15
+ Generate responses for prompts in a dataset using vLLM for efficient GPU inference.
16
+
17
+ This script loads a dataset from Hugging Face Hub containing chat-formatted messages,
18
+ applies the model's chat template, generates responses using vLLM, and saves the
19
+ results back to the Hub with a comprehensive dataset card.
20
+
21
+ Example usage:
22
+ # Local execution with auto GPU detection
23
+ uv run generate-responses.py \\
24
+ username/input-dataset \\
25
+ username/output-dataset \\
26
+ --messages-column messages
27
+
28
+ # With custom model and sampling parameters
29
+ uv run generate-responses.py \\
30
+ username/input-dataset \\
31
+ username/output-dataset \\
32
+ --model-id meta-llama/Llama-3.1-8B-Instruct \\
33
+ --temperature 0.9 \\
34
+ --top-p 0.95 \\
35
+ --max-tokens 2048
36
+
37
+ # HF Jobs execution (see script output for full command)
38
+ hf jobs uv run --flavor a100x4 ...
39
+ """
40
+
41
+ import argparse
42
+ import logging
43
+ import os
44
+ import sys
45
+ from datetime import datetime
46
+ from typing import Optional
47
+
48
+ from datasets import load_dataset
49
+ from huggingface_hub import get_token, login
50
+ from torch import cuda
51
+ from tqdm.auto import tqdm
52
+ from transformers import AutoTokenizer
53
+ from vllm import LLM
54
+ from dotenv import load_dotenv
55
+
56
+ # Enable HF Transfer for faster downloads
57
+ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
58
+
59
+ logging.basicConfig(
60
+ level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
61
+ )
62
+ logger = logging.getLogger(__name__)
63
+
64
+
65
+ def check_gpu_availability() -> int:
66
+ """Check if CUDA is available and return the number of GPUs."""
67
+ if not cuda.is_available():
68
+ logger.error("CUDA is not available. This script requires a GPU.")
69
+ logger.error(
70
+ "Please run on a machine with NVIDIA GPU or use HF Jobs with GPU flavor."
71
+ )
72
+ sys.exit(1)
73
+
74
+ num_gpus = cuda.device_count()
75
+ for i in range(num_gpus):
76
+ gpu_name = cuda.get_device_name(i)
77
+ gpu_memory = cuda.get_device_properties(i).total_memory / 1024**3
78
+ logger.info(f"GPU {i}: {gpu_name} with {gpu_memory:.1f} GB memory")
79
+
80
+ return num_gpus
81
+
82
+ def main(
83
+ src_dataset_hub_id: str,
84
+ output_dataset_hub_id: str,
85
+ model_id: str = "Qwen/Qwen3-Embedding-0.6B",
86
+ input_column: str = "text",
87
+ output_column: str = "embeddings",
88
+ gpu_memory_utilization: float = 0.90,
89
+ input_truncation_len: Optional[int] = None,
90
+ tensor_parallel_size: Optional[int] = None,
91
+ max_samples: Optional[int] = None,
92
+ hf_token: Optional[str] = None,
93
+ ):
94
+ """
95
+ Main generation pipeline.
96
+
97
+ Args:
98
+ src_dataset_hub_id: Input dataset on Hugging Face Hub
99
+ output_dataset_hub_id: Where to save results on Hugging Face Hub
100
+ model_id: Hugging Face model ID for embedding generation
101
+ input_column: Column name containing documents to embed
102
+ output_column: Column name for generated embeddings
103
+ gpu_memory_utilization: GPU memory utilization factor
104
+ input_truncation_len: Maximum input length (None uses model default)
105
+ tensor_parallel_size: Number of GPUs to use (auto-detect if None)
106
+ max_samples: Maximum number of samples to process (None for all)
107
+ hf_token: Hugging Face authentication token
108
+ """
109
+ generation_start_time = datetime.now().isoformat()
110
+
111
+ # GPU check and configuration
112
+ num_gpus = check_gpu_availability()
113
+ if tensor_parallel_size is None:
114
+ tensor_parallel_size = num_gpus
115
+ logger.info(
116
+ f"Auto-detected {num_gpus} GPU(s), using tensor_parallel_size={tensor_parallel_size}"
117
+ )
118
+ else:
119
+ logger.info(f"Using specified tensor_parallel_size={tensor_parallel_size}")
120
+ if tensor_parallel_size > num_gpus:
121
+ logger.warning(
122
+ f"Requested {tensor_parallel_size} GPUs but only {num_gpus} available"
123
+ )
124
+
125
+ # Authentication - try multiple methods
126
+ load_dotenv()
127
+ HF_TOKEN = hf_token or os.environ.get("HF_TOKEN") or get_token()
128
+
129
+ if not HF_TOKEN:
130
+ logger.error("No HuggingFace token found. Please provide token via:")
131
+ logger.error(" 1. --hf-token argument")
132
+ logger.error(" 2. HF_TOKEN environment variable")
133
+ logger.error(" 3. Run 'huggingface-cli login' or use login() in Python")
134
+ sys.exit(1)
135
+
136
+ logger.info("HuggingFace token found, authenticating...")
137
+ login(token=HF_TOKEN)
138
+
139
+ # Initialize vLLM
140
+ logger.info(f"Loading model: {model_id}")
141
+ vllm_kwargs = {
142
+ "model": model_id,
143
+ "tensor_parallel_size": tensor_parallel_size,
144
+ "gpu_memory_utilization": gpu_memory_utilization,
145
+ "task": "embed",
146
+ "max_model_len": input_truncation_len + 128,
147
+ }
148
+
149
+ llm = LLM(**vllm_kwargs)
150
+
151
+ # Load tokenizer for chat template
152
+ logger.info("Loading tokenizer...")
153
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
154
+
155
+ # Load dataset
156
+ logger.info(f"Loading dataset: {src_dataset_hub_id}")
157
+ dataset = load_dataset(src_dataset_hub_id, split="train")
158
+
159
+ # Apply max_samples if specified
160
+ if max_samples is not None and max_samples < len(dataset):
161
+ logger.info(f"Limiting dataset to {max_samples} samples")
162
+ dataset = dataset.select(range(max_samples))
163
+
164
+ total_examples = len(dataset)
165
+ logger.info(f"Dataset loaded with {total_examples:,} examples")
166
+
167
+ # Determine which column to use and validate
168
+ if input_column not in dataset.column_names:
169
+ logger.error(
170
+ f"Column '{input_column}' not found. Available columns: {dataset.column_names}"
171
+ )
172
+ sys.exit(1)
173
+ logger.info(f"Using input column mode with column: '{input_column}'")
174
+
175
+ # Process documents and truncate if specified
176
+ logger.info("Preparing documents...")
177
+ all_documents = []
178
+ for example in tqdm(dataset, desc="Processing documents"):
179
+ document = f"# {example['title_dl']}\n\nFrom: {example['source_url']}\n\n{example[input_column]}"
180
+ # apply tokenizer to the document, then truncate using token counts
181
+ if input_truncation_len is not None:
182
+ tokens = tokenizer.encode(document)
183
+ if len(tokens) > input_truncation_len:
184
+ document = tokenizer.decode(tokens[:input_truncation_len])
185
+ all_documents.append(document) # this is a list of strings
186
+
187
+ # Generate embeddings - vLLM handles batching internally
188
+ logger.info("vLLM will handle batching and scheduling automatically")
189
+ outputs = llm.embed(all_documents)
190
+
191
+ # Extract generated embeddings and create full response list
192
+ logger.info("Extracting generated embeddings...")
193
+ embeddings = [o.outputs.embedding for o in outputs]
194
+
195
+ # Add responses to dataset
196
+ logger.info("Adding responses to dataset...")
197
+ dataset = dataset.add_column(output_column, embeddings)
198
+
199
+ # Push dataset to hub
200
+ logger.info(f"Pushing dataset to: {output_dataset_hub_id}")
201
+ dataset.push_to_hub(output_dataset_hub_id, token=HF_TOKEN)
202
+
203
+ logger.info("✅ Embedding generation complete!")
204
+ logger.info(
205
+ f"Dataset available at: https://huggingface.co/datasets/{output_dataset_hub_id}"
206
+ )
207
+
208
+
209
+ if __name__ == "__main__":
210
+ if len(sys.argv) > 1:
211
+ parser = argparse.ArgumentParser(
212
+ description="Generate responses for dataset prompts using vLLM",
213
+ formatter_class=argparse.RawDescriptionHelpFormatter,
214
+ epilog="""
215
+ Examples:
216
+ # Basic usage with default Qwen model
217
+ uv run generate-embeddings-uv-vllm.py input-dataset output-dataset
218
+
219
+ # With custom model and parameters
220
+ uv run generate-embeddings-uv-vllm.py input-dataset output-dataset \\
221
+ --model-id Qwen/Qwen3-Embedding-0.6B \\
222
+ --input-column text \\
223
+ --output-column embeddings
224
+
225
+ # Force specific GPU configuration
226
+ uv run generate-embeddings-uv-vllm.py input-dataset output-dataset \\
227
+ --tensor-parallel-size 2 \\
228
+ --gpu-memory-utilization 0.95
229
+
230
+ # Using environment variable for token
231
+ HF_TOKEN=hf_xxx uv run generate-embeddings-uv-vllm.py input-dataset output-dataset
232
+ """,
233
+ )
234
+
235
+ parser.add_argument(
236
+ "src_dataset_hub_id",
237
+ help="Input dataset on Hugging Face Hub (e.g., username/dataset-name)",
238
+ )
239
+ parser.add_argument(
240
+ "output_dataset_hub_id", help="Output dataset name on Hugging Face Hub"
241
+ )
242
+ parser.add_argument(
243
+ "--model-id",
244
+ type=str,
245
+ default="Qwen/Qwen3-Embedding-0.6B",
246
+ help="Model to use for generation (default: Qwen3-Embedding-0.6B)",
247
+ )
248
+ parser.add_argument(
249
+ "--input-column",
250
+ type=str,
251
+ default="text",
252
+ help="Column containing text to embed (default: text)",
253
+ )
254
+ parser.add_argument(
255
+ "--output-column",
256
+ type=str,
257
+ default="embeddings",
258
+ help="Column name for generated embeddings (default: embeddings)",
259
+ )
260
+ parser.add_argument(
261
+ "--max-samples",
262
+ type=int,
263
+ help="Maximum number of samples to process (default: all)",
264
+ )
265
+ parser.add_argument(
266
+ "--input-truncation-len",
267
+ type=int,
268
+ help="Maximum input length (default: model's default)",
269
+ )
270
+ parser.add_argument(
271
+ "--tensor-parallel-size",
272
+ type=int,
273
+ help="Number of GPUs to use (default: auto-detect)",
274
+ )
275
+ parser.add_argument(
276
+ "--gpu-memory-utilization",
277
+ type=float,
278
+ default=0.90,
279
+ help="GPU memory utilization factor (default: 0.90)",
280
+ )
281
+ parser.add_argument(
282
+ "--hf-token",
283
+ type=str,
284
+ help="Hugging Face token (can also use HF_TOKEN env var)",
285
+ )
286
+ args = parser.parse_args()
287
+
288
+ main(
289
+ src_dataset_hub_id=args.src_dataset_hub_id,
290
+ output_dataset_hub_id=args.output_dataset_hub_id,
291
+ model_id=args.model_id,
292
+ input_column=args.input_column,
293
+ output_column=args.output_column,
294
+ gpu_memory_utilization=args.gpu_memory_utilization,
295
+ input_truncation_len=args.input_truncation_len,
296
+ tensor_parallel_size=args.tensor_parallel_size,
297
+ max_samples=args.max_samples,
298
+ hf_token=args.hf_token,
299
+ )
300
+ else:
301
+ # Show HF Jobs example when run without arguments
302
+ print("""
303
+ vLLM Response Generation Script
304
+ ==============================
305
+
306
+ This script requires arguments. For usage information:
307
+ uv run generate-responses.py --help
308
+
309
+ Example HF Jobs command with multi-GPU:
310
+ # If you're logged in with huggingface-cli, token will be auto-detected
311
+ hf jobs uv run \\
312
+ --flavor l4x4 \\
313
+ https://huggingface.co/datasets/uv-scripts/vllm/raw/main/generate-responses.py \\
314
+ username/input-dataset \\
315
+ username/output-dataset \\
316
+ --messages-column messages \\
317
+ --model-id Qwen/Qwen3-30B-A3B-Instruct-2507 \\
318
+ --temperature 0.7 \\
319
+ --max-tokens 16384
320
+ """)