Added VLLM Offline Serve working code.

#107

So, in this commit, I have attached the solution to the OSS 20b model inference code via vLLM. The original code in the cookbook: https://cookbook.openai.com/articles/gpt-oss/run-vllm, was not working; with a few modifications, it worked.

What GPU are you using? Ampere, Ada Lovelace?

Thank you @hrithiksagar-tih ! Can you actually instead PR into github.com/openai/gpt-oss and I'll copy back into both model cards? Thanks

dkundel-openai changed pull request status to closed

Yes I will do it
Thanks!

@hrithiksagar-tih thanks for sharing your code. I tried it on an offline cluster with an A100 using vllm Version: 0.10.0 but it failed:

Full error log:

INFO 08-18 10:57:18 [init.py:235] Automatically detected platform cuda.
INFO 08-18 10:57:26 [config.py:1604] Using max model len 5000
INFO 08-18 10:57:27 [config.py:2434] Chunked prefill is enabled with max_num_batched_tokens=4096.
INFO 08-18 10:57:28 [core.py:572] Waiting for init message from front-end.
INFO 08-18 10:57:28 [core.py:71] Initializing a V1 LLM engine (v0.10.0) with config: model='/scratch/usr/nimtsspi/models/gpt-oss-20b', speculative_config=None, tokenizer='/scratch/usr/nimtsspi/models/gpt-oss-20b', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config={}, tokenizer_revision=None, trust_remote_code=True, dtype=torch.bfloat16, max_seq_len=5000, download_dir=None, load_format=LoadFormat.AUTO, tensor_parallel_size=1, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(backend='auto', disable_fallback=False, disable_any_whitespace=False, disable_additional_properties=False, reasoning_backend=''), observability_config=ObservabilityConfig(show_hidden_metrics_for_version=None, otlp_traces_endpoint=None, collect_detailed_traces=None), seed=0, served_model_name=/scratch/usr/nimtsspi/models/gpt-oss-20b, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, pooler_config=None, compilation_config={"level":3,"debug_dump_path":"","cache_dir":"","backend":"","custom_ops":[],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output","vllm.mamba_mixer2"],"use_inductor":true,"compile_sizes":[],"inductor_compile_config":{"enable_auto_functionalized_v2":false},"inductor_passes":{},"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"cudagraph_copy_inputs":false,"full_cuda_graph":false,"max_capture_size":512,"local_cache_dir":null}
INFO 08-18 10:57:30 [parallel_state.py:1102] rank 0 in world size 1 is assigned as DP rank 0, PP rank 0, TP rank 0, EP rank 0
WARNING 08-18 10:57:30 [topk_topp_sampler.py:59] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
WARNING 08-18 10:57:30 [utils.py:185] GptOssForCausalLM has no vLLM implementation, falling back to Transformers implementation. Some features may not be supported and performance may not be optimal.
INFO 08-18 10:57:30 [gpu_model_runner.py:1843] Starting to load model /scratch/usr/nimtsspi/models/gpt-oss-20b...
INFO 08-18 10:57:30 [gpu_model_runner.py:1875] Loading model from scratch...
INFO 08-18 10:57:30 [transformers.py:421] Using Transformers backend.
ERROR 08-18 10:57:31 [core.py:632] EngineCore failed to start.
ERROR 08-18 10:57:31 [core.py:632] Traceback (most recent call last):
ERROR 08-18 10:57:31 [core.py:632] File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/v1/engine/core.py", line 623, in run_engine_core
ERROR 08-18 10:57:31 [core.py:632] engine_core = EngineCoreProc(*args, **kwargs)
ERROR 08-18 10:57:31 [core.py:632] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
ERROR 08-18 10:57:31 [core.py:632] File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/v1/engine/core.py", line 441, in init
ERROR 08-18 10:57:31 [core.py:632] super().init(vllm_config, executor_class, log_stats,
ERROR 08-18 10:57:31 [core.py:632] File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/v1/engine/core.py", line 77, in init
ERROR 08-18 10:57:31 [core.py:632] self.model_executor = executor_class(vllm_config)
ERROR 08-18 10:57:31 [core.py:632] ^^^^^^^^^^^^^^^^^^^^^^^^^^^
ERROR 08-18 10:57:31 [core.py:632] File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/executor/executor_base.py", line 53, in init
ERROR 08-18 10:57:31 [core.py:632] self._init_executor()
ERROR 08-18 10:57:31 [core.py:632] File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/executor/uniproc_executor.py", line 49, in _init_executor
ERROR 08-18 10:57:31 [core.py:632] self.collective_rpc("load_model")
ERROR 08-18 10:57:31 [core.py:632] File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/executor/uniproc_executor.py", line 58, in collective_rpc
ERROR 08-18 10:57:31 [core.py:632] answer = run_method(self.driver_worker, method, args, kwargs)
ERROR 08-18 10:57:31 [core.py:632] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
ERROR 08-18 10:57:31 [core.py:632] File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/utils/init.py", line 2985, in run_method
ERROR 08-18 10:57:31 [core.py:632] return func(*args, **kwargs)
ERROR 08-18 10:57:31 [core.py:632] ^^^^^^^^^^^^^^^^^^^^^
ERROR 08-18 10:57:31 [core.py:632] File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/v1/worker/gpu_worker.py", line 201, in load_model
ERROR 08-18 10:57:31 [core.py:632] self.model_runner.load_model(eep_scale_up=eep_scale_up)
ERROR 08-18 10:57:31 [core.py:632] File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/v1/worker/gpu_model_runner.py", line 1876, in load_model
ERROR 08-18 10:57:31 [core.py:632] self.model = model_loader.load_model(
ERROR 08-18 10:57:31 [core.py:632] ^^^^^^^^^^^^^^^^^^^^^^^^
ERROR 08-18 10:57:31 [core.py:632] File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/model_executor/model_loader/base_loader.py", line 44, in load_model
ERROR 08-18 10:57:31 [core.py:632] model = initialize_model(vllm_config=vllm_config,
ERROR 08-18 10:57:31 [core.py:632] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
ERROR 08-18 10:57:31 [core.py:632] File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/model_executor/model_loader/utils.py", line 67, in initialize_model
ERROR 08-18 10:57:31 [core.py:632] return model_class(vllm_config=vllm_config, prefix=prefix)
ERROR 08-18 10:57:31 [core.py:632] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
ERROR 08-18 10:57:31 [core.py:632] File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/compilation/decorators.py", line 183, in init
ERROR 08-18 10:57:31 [core.py:632] old_init(self, vllm_config=vllm_config, prefix=prefix, **kwargs)
ERROR 08-18 10:57:31 [core.py:632] File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/model_executor/models/transformers.py", line 691, in init
ERROR 08-18 10:57:31 [core.py:632] self.transformers_model = TransformersModel(vllm_config=vllm_config,
ERROR 08-18 10:57:31 [core.py:632] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
ERROR 08-18 10:57:31 [core.py:632] File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/model_executor/models/transformers.py", line 464, in init
ERROR 08-18 10:57:31 [core.py:632] self.tensor_parallel()
ERROR 08-18 10:57:31 [core.py:632] File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/model_executor/models/transformers.py", line 565, in tensor_parallel
ERROR 08-18 10:57:31 [core.py:632] _tensor_parallel(self.model)
ERROR 08-18 10:57:31 [core.py:632] File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/model_executor/models/transformers.py", line 555, in _tensor_parallel
ERROR 08-18 10:57:31 [core.py:632] for pattern, style in tp_plan.items():
ERROR 08-18 10:57:31 [core.py:632] ^^^^^^^^^^^^^
ERROR 08-18 10:57:31 [core.py:632] AttributeError: 'NoneType' object has no attribute 'items'
Process EngineCore_0:
Traceback (most recent call last):
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/multiprocessing/process.py", line 314, in _bootstrap
self.run()
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/v1/engine/core.py", line 636, in run_engine_core
raise e
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/v1/engine/core.py", line 623, in run_engine_core
engine_core = EngineCoreProc(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/v1/engine/core.py", line 441, in init
super().init(vllm_config, executor_class, log_stats,
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/v1/engine/core.py", line 77, in init
self.model_executor = executor_class(vllm_config)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/executor/executor_base.py", line 53, in init
self._init_executor()
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/executor/uniproc_executor.py", line 49, in _init_executor
self.collective_rpc("load_model")
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/executor/uniproc_executor.py", line 58, in collective_rpc
answer = run_method(self.driver_worker, method, args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/utils/init.py", line 2985, in run_method
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/v1/worker/gpu_worker.py", line 201, in load_model
self.model_runner.load_model(eep_scale_up=eep_scale_up)
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/v1/worker/gpu_model_runner.py", line 1876, in load_model
self.model = model_loader.load_model(
^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/model_executor/model_loader/base_loader.py", line 44, in load_model
model = initialize_model(vllm_config=vllm_config,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/model_executor/model_loader/utils.py", line 67, in initialize_model
return model_class(vllm_config=vllm_config, prefix=prefix)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/compilation/decorators.py", line 183, in init
old_init(self, vllm_config=vllm_config, prefix=prefix, **kwargs)
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/model_executor/models/transformers.py", line 691, in init
self.transformers_model = TransformersModel(vllm_config=vllm_config,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/model_executor/models/transformers.py", line 464, in init
self.tensor_parallel()
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/model_executor/models/transformers.py", line 565, in tensor_parallel
_tensor_parallel(self.model)
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/model_executor/models/transformers.py", line 555, in _tensor_parallel
for pattern, style in tp_plan.items():
^^^^^^^^^^^^^
AttributeError: 'NoneType' object has no attribute 'items'
Traceback (most recent call last):
File "/home/nimtsspi/thesis-code/judge_evaluation/annotations/gpt-oss-20b.py", line 44, in
llm = LLM(
^^^^
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/entrypoints/llm.py", line 273, in init
self.llm_engine = LLMEngine.from_engine_args(
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/engine/llm_engine.py", line 497, in from_engine_args
return engine_cls.from_vllm_config(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/v1/engine/llm_engine.py", line 126, in from_vllm_config
return cls(vllm_config=vllm_config,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/v1/engine/llm_engine.py", line 103, in init
self.engine_core = EngineCoreClient.make_client(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/v1/engine/core_client.py", line 77, in make_client
return SyncMPClient(vllm_config, executor_class, log_stats)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/v1/engine/core_client.py", line 514, in init
super().init(
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/v1/engine/core_client.py", line 408, in init
with launch_core_engines(vllm_config, executor_class,
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/contextlib.py", line 144, in exit
next(self.gen)
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/v1/engine/utils.py", line 697, in launch_core_engines
wait_for_engine_startup(
File "/home/nimtsspi/micromamba/envs/tk_ma/lib/python3.11/site-packages/vllm/v1/engine/utils.py", line 750, in wait_for_engine_startup
raise RuntimeError("Engine core initialization failed. "
RuntimeError: Engine core initialization failed. See root cause above. Failed core proc(s): {}

Dear @dkundel-openai

As requested, i have created a pull request in https://github.com/openai/gpt-oss/pull/150. Could you please look into it?

@TomData , could you please share the code that you used?

import os
os.environ["VLLM_USE_FLASHINFER_SAMPLER"] = "0"

import json
from openai_harmony import (
HarmonyEncodingName,
load_harmony_encoding,
Conversation,
Message,
Role,
SystemContent,
DeveloperContent,
)

from vllm import LLM, SamplingParams

--- 1) Render the prefill with Harmony ---

path = "/scratch/usr/nimtsspi/encodings"
os.environ["TIKTOKEN_ENCODINGS_BASE"] = path
#encoding = load_harmony_encoding(HarmonyEncodingName.HARMONY_GPT_OSS)
encoding = load_harmony_encoding(HarmonyEncodingName.HARMONY_GPT_OSS)

convo = Conversation.from_messages(
[
Message.from_role_and_content(Role.SYSTEM, SystemContent.new()),
Message.from_role_and_content(
Role.DEVELOPER,
DeveloperContent.new().with_instructions("Always respond in riddles"),
),
Message.from_role_and_content(Role.USER, "What is the weather like in SF?"),
]
)

prefill_ids = encoding.render_conversation_for_completion(convo, Role.ASSISTANT)

Harmony stop tokens (pass to sampler so they won't be included in output)

stop_token_ids = encoding.stop_tokens_for_assistant_actions()

--- 2) Run vLLM with prefill ---

input_path = "/scratch/usr/nimtsspi"
model = "gpt-oss-20b"
model_path = f"{input_path}/models/{model}"
llm = LLM(
model=model_path,
trust_remote_code=True,
gpu_memory_utilization = 0.95,
max_num_batched_tokens=4096,
max_model_len=5000,
tensor_parallel_size=4
)

sampling = SamplingParams(
max_tokens=128,
temperature=1,
stop_token_ids=stop_token_ids,
)

outputs = llm.generate(
prompt_token_ids=[prefill_ids], # batch of size 1
sampling_params=sampling,
)

vLLM gives you both text and token IDs

gen = outputs[0].outputs[0]
text = gen.text
output_tokens = gen.token_ids # <-- these are the completion token IDs (no prefill)

--- 3) Parse the completion token IDs back into structured Harmony messages ---

entries = encoding.parse_messages_from_completion_tokens(output_tokens, Role.ASSISTANT)

'entries' is a sequence of structured conversation entries (assistant messages, tool calls, etc.).

print(output_tokens)
print("-----------------")

for message in entries:
print(f"{json.dumps(message.to_dict())}")

Sign up or log in to comment