text stringlengths 0 59.1k |
|---|
# This label is critical. The ServiceMonitor uses this label to find this |
# specific Service. If the labels don't match, Prometheus will not be |
# able to discover the metrics endpoint. |
# GKE-SPECIFIC: This label is used by GKE's managed service. For a custom |
# deployment, you would use a more generic label like 'nvidia-dcgm-exporter'. |
app.kubernetes.io/name: gke-managed-dcgm-exporter |
spec: |
selector: |
# This selector tells the Service which pods to route traffic to. |
# It must match the labels on the DCGM exporter pods. |
# GKE-SPECIFIC: This selector matches the labels on GKE's managed DCGM pods. |
app.kubernetes.io/name: gke-managed-dcgm-exporter |
ports: |
- # The 'name' of this port is important. The ServiceMonitor will specifically |
# look for a port with this name to scrape metrics from. |
name: metrics |
port: 9400 |
targetPort: 9400 |
<|endoftext|> |
# source: k8s_examples/AI/vllm-deployment/hpa/gpu-service-monitor-generic.yaml type: yaml |
# This ServiceMonitor is for users who have MANUALLY installed the NVIDIA DCGM |
# Exporter, for example, on EKS or AKS. It tells the Prometheus Operator |
# how to discover and scrape metrics from the DCGM exporter's Service. |
apiVersion: monitoring.coreos.com/v1 |
kind: ServiceMonitor |
metadata: |
name: nvidia-dcgm-exporter-servicemonitor |
namespace: monitoring |
labels: |
# This label is used by the Prometheus Operator to discover this |
# ServiceMonitor. It must match the 'serviceMonitorSelector' configured |
# in the Prometheus custom resource. |
release: prometheus |
spec: |
# This selector identifies the Service to scrape. It must match the labels |
# on the 'gpu-dcgm-exporter-service' Service defined in the |
# 'gpu-dcgm-exporter-service.yaml' file. |
selector: |
matchLabels: |
app.kubernetes.io/name: gpu-dcgm-exporter |
# This selector specifies which namespace to search for the target Service. |
# The 'gpu-dcgm-exporter-service' is deployed in the 'monitoring' |
# namespace by the Helm chart instructions. |
namespaceSelector: |
matchNames: |
- monitoring |
endpoints: |
- port: metrics |
interval: 15s |
<|endoftext|> |
# source: k8s_examples/AI/vllm-deployment/hpa/gpu-horizontal-pod-autoscaler.yaml type: yaml |
# This HorizontalPodAutoscaler (HPA) targets the vLLM deployment and scales |
# it based on the average GPU utilization across all pods. It uses the |
# custom metric 'gpu_utilization_percent', which is provided by the |
# Prometheus Adapter. |
apiVersion: autoscaling/v2 |
kind: HorizontalPodAutoscaler |
metadata: |
name: gemma-server-gpu-hpa |
spec: |
# scaleTargetRef points the HPA to the deployment it needs to scale. |
scaleTargetRef: |
apiVersion: apps/v1 |
kind: Deployment |
name: vllm-gemma-deployment |
minReplicas: 1 |
maxReplicas: 5 |
metrics: |
- type: Pods |
pods: |
metric: |
# This is the custom metric that the HPA will query. |
# IMPORTANT: This name ('gpu_utilization_percent') is not the raw metric |
# from the DCGM exporter. It is the clean, renamed metric that is |
# exposed by the Prometheus Adapter. The names must match exactly. |
name: gpu_utilization_percent |
target: |
type: AverageValue |
# This is the target value for the metric. The HPA will add or remove |
# pods to keep the average GPU utilization across all pods at 20%. |
averageValue: 20 |
behavior: |
scaleUp: |
# The stabilizationWindowSeconds is set to 0 to allow for immediate |
# scaling up. This is a trade-off: |
# - For highly volatile workloads, immediate scaling is critical to |
# maintain performance and responsiveness. |
# - However, this also introduces a risk of over-scaling if the workload |
# spikes are very brief. A non-zero value would make the scaling |
# less sensitive to short-lived spikes, but could introduce latency |
# if the load persists. |
stabilizationWindowSeconds: 0 |
policies: |
- type: Pods |
value: 4 |
periodSeconds: 15 |
- type: Percent |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.