# Run VLLM Thor & Spark 1. Install uv ```bash curl -LsSf https://astral.sh/uv/install.sh | sh ``` 2. Create environment ```bash sudo apt install python3-dev python3.12-dev uv venv .vllm --python 3.12 source .vllm/bin/activate ``` 3. Install Pytorch ```bash uv pip install torch==2.9.1 torchvision torchaudio triton --index-url https://download.pytorch.org/whl/cu130 ``` 4. Install vllm ```bash uv pip install https://github.com/vllm-project/vllm/releases/download/v0.15.0/vllm-0.15.0+cu130-cp38-abi3-manylinux_2_35_aarch64.whl ``` 4. Export variables ```bash export TORCH_CUDA_ARCH_LIST=12.1f # Spark, for Thor 11.0a export TRITON_PTXAS_PATH=/usr/local/cuda/bin/ptxas export PATH=/usr/local/cuda/bin:$PATH export LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH ``` 5. Clean memory ```bash sudo sysctl -w vm.drop_caches=3 ``` 6. Run gptoss 120b ```bash mkdir -p tiktoken_encodings wget -O tiktoken_encodings/o200k_base.tiktoken "https://openaipublic.blob.core.windows.net/encodings/o200k_base.tiktoken" wget -O tiktoken_encodings/cl100k_base.tiktoken "https://openaipublic.blob.core.windows.net/encodings/cl100k_base.tiktoken" export TIKTOKEN_ENCODINGS_BASE=${PWD}/tiktoken_encodings ``` ```bash # mxfp8 activation for MoE. faster, but higher risk for accuracy. export VLLM_USE_FLASHINFER_MXFP4_MOE=1 uv run vllm serve "openai/gpt-oss-120b" --async-scheduling --port 8000 --host 0.0.0.0 --trust_remote_code --swap-space 16 --max-model-len 32000 --tensor-parallel-size 1 --max-num-seqs 1024 --gpu-memory-utilization 0.7 ``` 7. Benchmark ```bash vllm bench serve \ --host 0.0.0.0 \ --port 8000 \ --model openai/gpt-oss-120b \ --trust-remote-code \ --dataset-name random \ --random-input-len 1024 \ --random-output-len 1024 \ --ignore-eos \ --max-concurrency 512 \ --num-prompts 2560 \ --save-result --result-filename vllm_benchmark_serving_results.json ``` 8. Run Nemotron Download the custom parser from the Hugging Face repository. ```bash wget https://huggingface.co/nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-NVFP4/resolve/main/nano_v3_reasoning_parser.py ``` ```bash VLLM_USE_FLASHINFER_MOE_FP4=1 \ VLLM_FLASHINFER_MOE_BACKEND=throughput \ vllm serve nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-NVFP4 \ --served-model-name model \ --max-num-seqs 8 \ --tensor-parallel-size 1 \ --max-model-len 262144 \ --port 8000 \ --trust-remote-code \ --enable-auto-tool-choice \ --tool-call-parser qwen3_coder \ --reasoning-parser-plugin nano_v3_reasoning_parser.py \ --reasoning-parser nano_v3 \ --kv-cache-dtype fp8 ```