[build-system] requires = ["setuptools>=61.0", "setuptools-scm>=8.0", "wheel"] build-backend = "setuptools.build_meta" [project] name = "sglang" dynamic = ["version"] description = "SGLang is a fast serving framework for large language models and vision language models." readme = "README.md" requires-python = ">=3.10" license = { file = "LICENSE" } classifiers = [ "Programming Language :: Python :: 3", "License :: OSI Approved :: Apache Software License", ] dependencies = ["aiohttp", "requests", "tqdm", "numpy", "IPython", "setproctitle"] [project.optional-dependencies] runtime_common = [ "IPython", "aiohttp", "anthropic>=0.20.0", "blobfile==3.0.0", "av", "build", "compressed-tensors", "decord2", "datasets", "einops", "fastapi", "gguf", "hf_transfer", "huggingface_hub", "interegular", "llguidance>=0.7.11,<0.8.0", "modelscope", "msgspec", "ninja", "numpy", "openai-harmony==0.0.4", "openai==2.6.1", "orjson", "outlines==0.1.11", "packaging", "partial_json_parser", "pillow", "prometheus-client>=0.20.0", "psutil", "py-spy", "pybase64", "pydantic", "python-multipart", "pyzmq>=25.1.2", "requests", "scipy", "sentencepiece", "setproctitle", "soundfile==0.13.1", "tiktoken", "timm==1.0.16", "torchao==0.9.0", "tqdm", "transformers==4.57.1", "uvicorn", "uvloop", "xgrammar==0.1.27", "smg-grpc-proto>=0.4.1", "grpcio>=1.78.0", "grpcio-reflection>=1.78.0", ] tracing = [ "opentelemetry-sdk", "opentelemetry-api", "opentelemetry-exporter-otlp", "opentelemetry-exporter-otlp-proto-grpc", ] # HIP (Heterogeneous-computing Interface for Portability) for AMD # => base docker rocm/vllm-dev:20250114, not from public vllm whl srt_hip = [ "sglang[runtime_common]", "torch", "petit_kernel==0.0.2", "wave-lang==3.8.2", ] diffusion_hip = [ "PyYAML==6.0.1", "cloudpickle", "diffusers==0.36.0", "imageio==2.36.0", "imageio-ffmpeg==0.5.1", "moviepy>=2.0.0", "opencv-python-headless==4.10.0.84", "remote-pdb", "st_attn==0.0.7", "vsa==0.0.4", "runai_model_streamer>=0.15.5", "cache-dit==1.1.8", "addict", "scikit-image==0.25.2", "trimesh>=4.0.0", "xatlas", ] # For Intel Gaudi(device : hpu) follow the installation guide # https://docs.vllm.ai/en/latest/getting_started/gaudi-installation.html srt_hpu = ["sglang[runtime_common]"] # https://docs.sglang.io/platforms/mthreads_gpu.md srt_musa = [ "sglang[runtime_common]", "torch", "torch_musa", "torchada>=0.1.25", "mthreads-ml-py", "numpy<2.0", ] diffusion_musa = [ "PyYAML==6.0.1", "cloudpickle", "diffusers==0.36.0", "imageio==2.36.0", "imageio-ffmpeg==0.5.1", "moviepy>=2.0.0", "opencv-python-headless==4.10.0.84", "remote-pdb", "st_attn==0.0.7", "vsa==0.0.4", "runai_model_streamer>=0.15.5", "cache-dit==1.1.8", "addict", "scikit-image==0.25.2", "trimesh>=4.0.0", "xatlas", ] test = [ "accelerate", "expecttest", "gguf", "jsonlines", "matplotlib", "pandas", "peft", "pytest", "sentence_transformers", "tabulate", ] all_hip = ["sglang[srt_hip]", "sglang[diffusion_hip]"] all_hpu = ["sglang[srt_hpu]"] all_musa = ["sglang[srt_musa]", "sglang[diffusion_musa]"] dev_hip = ["sglang[all_hip]", "sglang[test]"] dev_hpu = ["sglang[all_hpu]", "sglang[test]"] dev_musa = ["sglang[all_musa]", "sglang[test]"] [project.urls] "Homepage" = "https://github.com/sgl-project/sglang" "Bug Tracker" = "https://github.com/sgl-project/sglang/issues" [project.scripts] sglang = "sglang.cli.main:main" [tool.setuptools.package-data] "sglang" = [ "srt/**/*", "jit_kernel/**/*" ] [tool.setuptools.packages.find] exclude = [ "assets*", "benchmark*", "docs*", "dist*", "playground*", "scripts*", "tests*", ] [tool.wheel] exclude = [ "assets*", "benchmark*", "docs*", "dist*", "playground*", "scripts*", "tests*", ] [tool.setuptools_scm] root = ".." version_file = "sglang/_version.py" git_describe_command = ["git", "describe", "--tags", "--long", "--match", "v*"]