text stringlengths 0 59.1k |
|---|
<|endoftext|> |
# source: k8s_examples/AI/vllm-deployment/hpa/horizontal-pod-autoscaler.yaml type: yaml |
# This HorizontalPodAutoscaler (HPA) targets the vLLM deployment and scales |
# it based on the average number of concurrent requests across all pods. |
# It uses the custom metric 'vllm_num_requests_running', which is provided |
# by the Prometheus Adapter. |
apiVersion: autoscaling/v2 |
kind: HorizontalPodAutoscaler |
metadata: |
name: gemma-server-hpa |
spec: |
# scaleTargetRef points the HPA to the deployment it needs to scale. |
scaleTargetRef: |
apiVersion: apps/v1 |
kind: Deployment |
name: vllm-gemma-deployment |
minReplicas: 1 |
maxReplicas: 5 |
metrics: |
- type: Pods |
pods: |
metric: |
# This is the custom metric that the HPA will query. |
# IMPORTANT: This name ('vllm_num_requests_running') is not the raw metric |
# from the vLLM server. It is the clean, renamed metric that is |
# exposed by the Prometheus Adapter. The names must match exactly. |
name: vllm_num_requests_running |
target: |
type: AverageValue |
# This is the target value for the metric. The HPA will add or remove |
# pods to keep the average number of running requests per pod at 4. |
averageValue: 4 |
behavior: |
# The scaling behavior can be customized to control how quickly the |
# deployment scales up or down. |
scaleDown: |
# The stabilizationWindowSeconds is set to 30 to prevent the HPA from |
# scaling down too aggressively. This means the controller will wait for |
# 30 seconds after a scale-down event before considering another one. |
# This helps to smooth out the scaling behavior and prevent "flapping" |
# (rapidly scaling up and down). A larger value will make the scaling |
# more conservative, which can be useful for workloads with fluctuating |
# metrics, but it may also result in higher costs if the resources are |
# not released quickly after a load decrease. |
stabilizationWindowSeconds: 30 |
policies: |
- type: Percent |
value: 100 |
periodSeconds: 15 |
<|endoftext|> |
# source: k8s_examples/AI/vllm-deployment/hpa/gpu-service-monitor-gke.yaml type: yaml |
# This ServiceMonitor tells the Prometheus Operator how to discover and scrape |
# metrics from the NVIDIA DCGM Exporter. It is designed to find the |
# 'gke-managed-dcgm-exporter' Service in the 'gke-managed-system' namespace |
# and scrape its '/metrics' endpoint. |
apiVersion: monitoring.coreos.com/v1 |
kind: ServiceMonitor |
metadata: |
name: nvidia-dcgm-exporter-servicemonitor |
namespace: monitoring |
labels: |
# This label is used by the Prometheus Operator to discover this |
# ServiceMonitor. It must match the 'serviceMonitorSelector' configured |
# in the Prometheus custom resource. |
release: prometheus |
spec: |
# This selector identifies the specific Service to scrape. It must match |
# the labels on the 'gke-managed-dcgm-exporter' Service. |
selector: |
matchLabels: |
# GKE-SPECIFIC: This label matches the Service for GKE's managed DCGM |
# exporter. If you are using a different DCGM daemon, you must |
# update this label to match the label of the corresponding Service. |
app.kubernetes.io/name: gke-managed-dcgm-exporter |
# This selector specifies which namespace to search for the target Service. |
# For GKE, the DCGM service is in 'gke-managed-system'. |
namespaceSelector: |
matchNames: |
# GKE-SPECIFIC: This is the namespace for GKE's managed DCGM exporter. |
# For other environments, this should be the namespace where you have |
# deployed the DCGM exporter Service. |
- gke-managed-system |
endpoints: |
- port: metrics |
interval: 15s |
<|endoftext|> |
# source: k8s_examples/AI/vllm-deployment/hpa/vllm-hpa.md type: docs |
# Autoscaling an AI Inference Server with HPA using vLLM Server Metrics |
This guide provides a comprehensive walkthrough for configuring a Kubernetes Horizontal Pod Autoscaler (HPA) to dynamically scale a vLLM AI inference server. The autoscaling logic is driven by a custom metric, `vllm:num_requests_running`, which is exposed directly by the vLLM server. This approach allows the system to ... |
This guide assumes you have already deployed the vLLM inference server from the [parent directory's exercise](../README.md) into the `vllm-example` namespace. |
--- |
## 1. Verify vLLM Server Metrics |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.