text
stringlengths
0
59.1k
rules:
# This rule renames the 'vllm:num_requests_running' metric to
# 'vllm_num_requests_running' to make it a valid custom metric name.
- seriesQuery: 'vllm:num_requests_running'
resources:
overrides:
namespace: {resource: "namespace"}
pod: {resource: "pod"}
name:
matches: "vllm:num_requests_running"
as: "vllm_num_requests_running"
metricsQuery: 'sum(vllm:num_requests_running{<<.LabelMatchers>>}) by (<<.GroupBy>>)'
# It takes 'dcgm_fi_dev_gpu_util_relabelled' (which now has the correct
# 'pod' and 'namespace' labels) and exposes it as 'gpu_utilization_percent'.
- seriesQuery: 'dcgm_fi_dev_gpu_util_relabelled'
resources:
overrides:
namespace: {resource: "namespace"}
pod: {resource: "pod"}
name:
matches: "dcgm_fi_dev_gpu_util_relabelled"
as: "gpu_utilization_percent"
metricsQuery: 'sum(dcgm_fi_dev_gpu_util_relabelled{<<.LabelMatchers>>}) by (<<.GroupBy>>)'
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: prometheus-adapter
namespace: monitoring
spec:
replicas: 1
selector:
matchLabels:
app: prometheus-adapter
template:
metadata:
labels:
app: prometheus-adapter
spec:
serviceAccountName: prometheus-adapter
containers:
- name: prometheus-adapter
image: registry.k8s.io/prometheus-adapter/prometheus-adapter:v0.11.2
args:
- --prometheus-url=http://prometheus-kube-prometheus-prometheus.monitoring.svc:9090/
- --metrics-relist-interval=1m
- --config=/etc/adapter/config.yaml
- --secure-port=6443
- --cert-dir=/tmp/cert
- --v=4 # Increase log verbosity for debugging
ports:
- containerPort: 6443
name: https
volumeMounts:
- name: config
mountPath: /etc/adapter
readOnly: true
- name: certs
mountPath: /tmp/cert
volumes:
- name: config
configMap:
name: prometheus-adapter
- name: certs
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: prometheus-adapter
namespace: monitoring
spec:
ports:
- port: 443
targetPort: https
selector:
app: prometheus-adapter
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
name: v1beta1.custom.metrics.k8s.io
spec:
service:
name: prometheus-adapter
namespace: monitoring
group: custom.metrics.k8s.io
version: v1beta1
insecureSkipTLSVerify: true
groupPriorityMinimum: 100
versionPriority: 100
<|endoftext|>
# source: k8s_examples/AI/vllm-deployment/hpa/README.md type: docs
# Horizontal Pod Autoscaling AI Inference Server
This exercise shows how to set up the infrastructure to automatically
scale an AI inference server, using custom metrics (either server
or GPU metrics). This exercise requires a running Prometheus instance,
preferably managed by the Prometheus Operator. We assume