xuan-luo's picture
Upload packaged project bundle
e92c350 verified
#!/usr/bin/env bash
#
# BioPacific launcher and orchestrator.
#
# This script manages two different concepts:
#
# 1. Services (long-running background processes)
# - embedding : vLLM embedding server
# - llm : vLLM chat/completions server
# - qdrant : vector database
# - agent : paper agent REST service
#
# 2. Pipeline stages (one-shot foreground jobs)
# - s1_index : build journal article indexes
# - s2_filter : filter indexed papers
# - s3_fetch : fetch article content / sections
# - s4_embedding : preprocess and push embeddings into Qdrant
#
# Default orchestration:
# start / restart with no explicit targets performs a full bring-up:
# 1) start embedding + llm + qdrant
# 2) wait until those prerequisite services are ready
# 3) run pipeline.s1_index -> s2_filter -> s3_fetch -> s4_embedding
# 4) start the agent service
#
# Service processes run in the background via nohup; PIDs are recorded under
# logs/ so subsequent stop/status calls can find them again. Pipeline stages
# run in the foreground and stream output to both the terminal and log files.
#
# Common usage:
# ./start_service.sh
# ./start_service.sh start
# ./start_service.sh start embedding llm qdrant
# ./start_service.sh stop
# ./start_service.sh stop agent
# ./start_service.sh restart
# ./start_service.sh status
# ./start_service.sh pipeline
# ./start_service.sh pipeline s2_filter s3_fetch
# CONFIG_FILE=/path/to/config.yaml ./start_service.sh start
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
CONFIG_FILE="${CONFIG_FILE:-${ROOT_DIR}/config.yaml}"
LOG_DIR="${LOG_DIR:-${ROOT_DIR}/logs}"
mkdir -p "${LOG_DIR}"
SERVICES=(embedding llm qdrant agent)
PIPELINE_STAGES=(s1_index s2_filter s3_fetch s4_embedding)
PREREQ_SERVICES=(embedding llm qdrant)
STOP_ORDER=(agent qdrant llm embedding)
PYTHON_BIN=""
VLLM_BIN=""
CONDA_ENV_DIR=""
# ---------------------------------------------------------------------------
# Tiny helpers
# ---------------------------------------------------------------------------
die() {
echo "$*" >&2
exit 1
}
require_executable() {
local executable="$1"
local label="$2"
if [[ -z "${executable}" || ! -x "${executable}" ]]; then
die "Missing executable: ${label}"
fi
}
require_dir() {
[[ -d "$1" ]] || die "Directory not found: $1"
}
require_file() {
[[ -f "$1" ]] || die "File not found: $1"
}
append_unique_path() {
local value="$1"
local current="$2"
if [[ -z "${current}" ]]; then
printf '%s' "${value}"
elif [[ ":${current}:" == *":${value}:"* ]]; then
printf '%s' "${current}"
else
printf '%s:%s' "${value}" "${current}"
fi
}
# ---------------------------------------------------------------------------
# Conda env auto-detection (same behavior as before)
# ---------------------------------------------------------------------------
detect_conda_env_dir() {
local env_name="$1"
local -a candidates=()
if [[ -n "${PREFERRED_ENV_DIR:-}" ]]; then
candidates+=("${PREFERRED_ENV_DIR}")
fi
if [[ -n "${CONDA_PREFIX:-}" && "$(basename "${CONDA_PREFIX}")" == "${env_name}" ]]; then
candidates+=("${CONDA_PREFIX}")
fi
candidates+=("${ROOT_DIR}/../anaconda/envs/${env_name}")
if command -v conda >/dev/null 2>&1; then
local conda_base
conda_base="$(conda info --base 2>/dev/null || true)"
[[ -n "${conda_base}" ]] && candidates+=("${conda_base}/envs/${env_name}")
fi
candidates+=(
"${HOME}/anaconda/envs/${env_name}"
"${HOME}/miniconda3/envs/${env_name}"
"${HOME}/mambaforge/envs/${env_name}"
)
for candidate in "${candidates[@]}"; do
if [[ -n "${candidate}" && -x "${candidate}/bin/python" ]]; then
printf '%s\n' "${candidate}"
return 0
fi
done
return 1
}
use_preferred_runtime() {
local env_name="$1"
local env_dir
env_dir="$(detect_conda_env_dir "${env_name}" || true)"
if [[ -n "${env_dir}" ]]; then
CONDA_ENV_DIR="${env_dir}"
export PATH
PATH="$(append_unique_path "${CONDA_ENV_DIR}/bin" "${PATH}")"
export LD_LIBRARY_PATH
LD_LIBRARY_PATH="$(append_unique_path "${CONDA_ENV_DIR}/lib" "${LD_LIBRARY_PATH:-}")"
export LIBRARY_PATH
LIBRARY_PATH="$(append_unique_path "${CONDA_ENV_DIR}/lib" "${LIBRARY_PATH:-}")"
export CONDA_PREFIX="${CONDA_ENV_DIR}"
export CONDA_DEFAULT_ENV="${env_name}"
PYTHON_BIN="${CONDA_ENV_DIR}/bin/python"
VLLM_BIN="${CONDA_ENV_DIR}/bin/vllm"
fi
[[ -z "${PYTHON_BIN}" ]] && PYTHON_BIN="$(command -v python3 || true)"
[[ -z "${VLLM_BIN}" ]] && VLLM_BIN="$(command -v vllm || true)"
require_executable "${PYTHON_BIN}" "python3"
}
# ---------------------------------------------------------------------------
# Config loading via PyYAML. Emits shell assignments for CFG_* variables.
# ---------------------------------------------------------------------------
load_config() {
require_file "${CONFIG_FILE}"
local dump
dump="$(
ROOT_DIR="${ROOT_DIR}" \
CONFIG_FILE="${CONFIG_FILE}" \
"${PYTHON_BIN}" - <<'PY'
import os, shlex, sys
try:
import yaml
except ImportError:
sys.stderr.write(
"PyYAML is required. Install it in the biopacific env, e.g.\n"
" pip install pyyaml\n"
)
sys.exit(1)
root = os.environ["ROOT_DIR"]
with open(os.environ["CONFIG_FILE"], encoding="utf-8") as fh:
cfg = yaml.safe_load(fh) or {}
def resolve_path(value: str) -> str:
if not value:
return ""
return value if os.path.isabs(value) else os.path.normpath(os.path.join(root, value))
def emit(name: str, value) -> None:
print(f"{name}={shlex.quote(str(value))}")
emit("CFG_ENV_NAME", cfg.get("env_name", "biopacific"))
emit("CFG_SERVICE_HOST", cfg.get("service_host", "0.0.0.0"))
q = cfg.get("qdrant") or {}
emit("CFG_QDRANT_BINARY", resolve_path(q.get("binary", "")))
emit("CFG_QDRANT_STORAGE_PATH", resolve_path(q.get("storage_path", "")))
emit("CFG_QDRANT_PORT", q.get("port", 6333))
pipeline = cfg.get("pipeline")
if not isinstance(pipeline, dict):
sys.stderr.write("config.yaml: [pipeline] must be a mapping\n")
sys.exit(1)
a = pipeline.get("s5_agent")
if not isinstance(a, dict):
sys.stderr.write("config.yaml: [pipeline.s5_agent] must be a mapping\n")
sys.exit(1)
if "service_port" not in a:
sys.stderr.write("config.yaml: [pipeline.s5_agent].service_port is required\n")
sys.exit(1)
emit("CFG_AGENT_PORT", a["service_port"])
for yaml_key, prefix in (("embedding", "EMBEDDING"),
("llm", "LLM")):
s = cfg.get(yaml_key)
if not isinstance(s, dict):
sys.stderr.write(
"config.yaml: [%s] must be a mapping (section missing or wrong type)\n"
% yaml_key
)
sys.exit(1)
if "max_model_len" not in s:
sys.stderr.write(
"config.yaml: [%s].max_model_len is required "
"(per-service vLLM --max-model-len; no default)\n" % yaml_key
)
sys.exit(1)
emit(f"CFG_{prefix}_SERVED_NAME", s.get("served_name", yaml_key))
emit(f"CFG_{prefix}_MODEL_DIR", resolve_path(s.get("model_dir", "")))
emit(f"CFG_{prefix}_PORT", s.get("port", 0))
emit(f"CFG_{prefix}_GPU", s.get("gpu", "0"))
emit(f"CFG_{prefix}_GPU_MEMORY_UTILIZATION",
s.get("gpu_memory_utilization", 0.8))
emit(f"CFG_{prefix}_MAX_MODEL_LEN", s["max_model_len"])
PY
)"
eval "${dump}"
}
# ---------------------------------------------------------------------------
# Per-service metadata lookup. Given a service name, populate globals:
# SVC_LABEL, SVC_KIND (vllm|qdrant|python), SVC_PORT, SVC_GPU, SVC_MEM,
# SVC_MODEL_DIR, SVC_SCRIPT_PATH, SVC_SERVED_NAME, SVC_LOG_FILE,
# SVC_PID_FILE, SVC_PGREP_PATTERN,
# SVC_DEFAULT_CHAT_TEMPLATE_KWARGS.
# ---------------------------------------------------------------------------
select_service() {
local svc="$1"
SVC_LABEL="${svc}"
SVC_LOG_FILE="${LOG_DIR}/${svc}.log"
SVC_PID_FILE="${LOG_DIR}/${svc}.pid"
case "${svc}" in
embedding)
SVC_KIND="vllm"
SVC_PORT="${CFG_EMBEDDING_PORT}"
SVC_GPU="${CFG_EMBEDDING_GPU}"
SVC_MEM="${CFG_EMBEDDING_GPU_MEMORY_UTILIZATION}"
SVC_MODEL_DIR="${CFG_EMBEDDING_MODEL_DIR}"
SVC_SERVED_NAME="${CFG_EMBEDDING_SERVED_NAME}"
SVC_MAX_MODEL_LEN="${CFG_EMBEDDING_MAX_MODEL_LEN}"
SVC_VLLM_RUNNER="pooling"
SVC_DEFAULT_CHAT_TEMPLATE_KWARGS=""
SVC_PGREP_PATTERN="vllm serve ${SVC_MODEL_DIR}"
;;
llm)
SVC_KIND="vllm"
SVC_PORT="${CFG_LLM_PORT}"
SVC_GPU="${CFG_LLM_GPU}"
SVC_MEM="${CFG_LLM_GPU_MEMORY_UTILIZATION}"
SVC_MODEL_DIR="${CFG_LLM_MODEL_DIR}"
SVC_SERVED_NAME="${CFG_LLM_SERVED_NAME}"
SVC_MAX_MODEL_LEN="${CFG_LLM_MAX_MODEL_LEN}"
SVC_VLLM_RUNNER=""
SVC_DEFAULT_CHAT_TEMPLATE_KWARGS='{"enable_thinking": false}'
SVC_PGREP_PATTERN="vllm serve ${SVC_MODEL_DIR}"
;;
qdrant)
SVC_KIND="qdrant"
SVC_PORT="${CFG_QDRANT_PORT}"
SVC_GPU="-"
SVC_MEM="-"
SVC_MODEL_DIR="${CFG_QDRANT_STORAGE_PATH}"
SVC_SERVED_NAME="qdrant"
SVC_VLLM_RUNNER=""
SVC_DEFAULT_CHAT_TEMPLATE_KWARGS=""
SVC_PGREP_PATTERN="${CFG_QDRANT_BINARY}"
;;
agent)
SVC_KIND="python"
SVC_PORT="${CFG_AGENT_PORT}"
SVC_GPU="-"
SVC_MEM="-"
SVC_MODEL_DIR="-"
SVC_SCRIPT_PATH="${ROOT_DIR}/pipeline/s5-agent/paper_agent.py"
SVC_SERVED_NAME="paper-agent"
SVC_VLLM_RUNNER=""
SVC_DEFAULT_CHAT_TEMPLATE_KWARGS=""
SVC_PGREP_PATTERN="${SVC_SCRIPT_PATH}"
;;
*)
die "Unknown service: ${svc}. Valid: ${SERVICES[*]}"
;;
esac
}
# ---------------------------------------------------------------------------
# Runtime utilities
# ---------------------------------------------------------------------------
port_in_use() {
"${PYTHON_BIN}" - "$1" <<'PY'
import socket, sys
port = int(sys.argv[1])
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
s.bind(("0.0.0.0", port))
except OSError:
sys.exit(0)
sys.exit(1)
PY
}
validate_shared_gpu_budget() {
"${PYTHON_BIN}" - \
"${CFG_EMBEDDING_GPU}" "${CFG_LLM_GPU}" \
"${CFG_EMBEDDING_GPU_MEMORY_UTILIZATION}" \
"${CFG_LLM_GPU_MEMORY_UTILIZATION}" <<'PY'
import sys
labels = ["embedding", "llm"]
gpus = sys.argv[1:3]
mems = [float(x) for x in sys.argv[3:5]]
per_gpu, per_gpu_who = {}, {}
for label, gpu, mem in zip(labels, gpus, mems):
for single in gpu.split(","):
single = single.strip()
if not single:
continue
per_gpu[single] = per_gpu.get(single, 0.0) + mem
per_gpu_who.setdefault(single, []).append(label)
bad = [(g, t, per_gpu_who[g]) for g, t in per_gpu.items() if t > 1.0]
if bad:
for gpu, total, who in bad:
print(
f"GPU {gpu} is shared by {', '.join(who)} with a combined "
f"memory budget of {total:.2f} (> 1.0).",
file=sys.stderr,
)
print(
"Move one of the services to a different GPU, or lower the "
"gpu_memory_utilization values in config.yaml.",
file=sys.stderr,
)
sys.exit(1)
PY
}
gpu_count() {
local list="$1"
local IFS=','
local -a gpus=($list)
echo "${#gpus[@]}"
}
get_running_pid() {
local pid_file="$1"
local expected="$2"
if [[ -f "${pid_file}" ]]; then
local pid
pid="$(<"${pid_file}")"
if [[ -n "${pid}" ]] && kill -0 "${pid}" >/dev/null 2>&1; then
local cmdline
cmdline="$(ps -p "${pid}" -o args= 2>/dev/null || true)"
if [[ "${cmdline}" == *"${expected}"* ]]; then
echo "${pid}"
return 0
fi
fi
fi
if command -v pgrep >/dev/null 2>&1; then
local pid
pid="$(pgrep -f "${expected}" | head -n 1 || true)"
if [[ -n "${pid}" ]]; then
echo "${pid}" > "${pid_file}"
echo "${pid}"
return 0
fi
fi
return 1
}
wait_for_stop() {
local pid="$1"
for _ in $(seq 1 20); do
kill -0 "${pid}" >/dev/null 2>&1 || return 0
sleep 1
done
return 1
}
validate_vllm_targets() {
local -a targets=("$@")
local target
for target in "${targets[@]}"; do
if [[ "${target}" == "embedding" || "${target}" == "llm" ]]; then
validate_shared_gpu_budget
return 0
fi
done
}
wait_for_http_ready() {
local label="$1"
local method="$2"
local url="$3"
require_executable "$(command -v curl || true)" "curl"
local attempt
for attempt in $(seq 1 60); do
if [[ "${method}" == "POST" ]]; then
if curl --silent --show-error --fail \
-X POST \
-H "Content-Type: application/json" \
-d '{}' \
"${url}" >/dev/null 2>&1; then
echo "${label} is ready: ${url}"
return 0
fi
else
if curl --silent --show-error --fail "${url}" >/dev/null 2>&1; then
echo "${label} is ready: ${url}"
return 0
fi
fi
sleep 2
done
die "Timed out waiting for ${label} to become ready: ${url}"
}
wait_for_service_ready() {
local svc="$1"
select_service "${svc}"
case "${svc}" in
embedding|llm)
wait_for_http_ready "${svc}" GET "http://127.0.0.1:${SVC_PORT}/v1/models"
;;
qdrant)
wait_for_http_ready "${svc}" GET "http://127.0.0.1:${SVC_PORT}/"
;;
agent)
wait_for_http_ready "${svc}" POST "http://127.0.0.1:${SVC_PORT}/v1/session/start"
;;
*)
die "Unknown service for readiness check: ${svc}"
;;
esac
}
select_pipeline_stage() {
local stage="$1"
PIPELINE_STAGE_LABEL="${stage}"
PIPELINE_STAGE_LOG_FILE="${LOG_DIR}/${stage}.log"
case "${stage}" in
s1_index)
PIPELINE_STAGE_SCRIPT="${ROOT_DIR}/pipeline/s1-index/paper-index.sh"
;;
s2_filter)
PIPELINE_STAGE_SCRIPT="${ROOT_DIR}/pipeline/s2-filter/paper-filter.sh"
;;
s3_fetch)
PIPELINE_STAGE_SCRIPT="${ROOT_DIR}/pipeline/s3-fetch/paper-fetch.sh"
;;
s4_embedding)
PIPELINE_STAGE_SCRIPT="${ROOT_DIR}/pipeline/s4-embedding/paper_embedding.sh"
;;
*)
die "Unknown pipeline stage: ${stage}. Valid: ${PIPELINE_STAGES[*]}"
;;
esac
}
run_pipeline_stage() {
local stage="$1"
select_pipeline_stage "${stage}"
require_file "${PIPELINE_STAGE_SCRIPT}"
echo "============================================================"
echo "Running pipeline stage: ${PIPELINE_STAGE_LABEL}"
echo " script: ${PIPELINE_STAGE_SCRIPT}"
echo " log: ${PIPELINE_STAGE_LOG_FILE}"
echo "============================================================"
(
cd "${ROOT_DIR}"
env BIOPACIFIC_CONFIG="${CONFIG_FILE}" \
bash "${PIPELINE_STAGE_SCRIPT}"
) 2>&1 | tee "${PIPELINE_STAGE_LOG_FILE}"
}
run_pipeline_sequence() {
local -a stages=("$@")
local stage
for stage in "${stages[@]}"; do
run_pipeline_stage "${stage}"
done
}
stop_services_in_order() {
local -a ordered=("$@")
local svc
for svc in "${ordered[@]}"; do
stop_one "${svc}"
done
}
run_full_start() {
local svc
validate_vllm_targets "${PREREQ_SERVICES[@]}"
echo "Starting prerequisite services: ${PREREQ_SERVICES[*]}"
for svc in "${PREREQ_SERVICES[@]}"; do
start_one "${svc}"
done
echo "Waiting for prerequisite services to become ready ..."
for svc in "${PREREQ_SERVICES[@]}"; do
wait_for_service_ready "${svc}"
done
echo "Running full pipeline: ${PIPELINE_STAGES[*]}"
run_pipeline_sequence "${PIPELINE_STAGES[@]}"
echo "Starting final service: agent"
start_one agent
wait_for_service_ready agent
}
# ---------------------------------------------------------------------------
# Start / stop / status for a single service
# ---------------------------------------------------------------------------
start_one() {
local svc="$1"
select_service "${svc}"
local existing_pid
if existing_pid="$(get_running_pid "${SVC_PID_FILE}" "${SVC_PGREP_PATTERN}")"; then
echo "${SVC_LABEL} is already running with PID ${existing_pid} (log: ${SVC_LOG_FILE})"
return 0
fi
if port_in_use "${SVC_PORT}"; then
die "Port ${SVC_PORT} is already in use (needed by ${SVC_LABEL})."
fi
case "${SVC_KIND}" in
vllm)
require_executable "${VLLM_BIN}" "vllm"
require_dir "${SVC_MODEL_DIR}"
local tp_size
tp_size="$(gpu_count "${SVC_GPU}")"
local -a cmd=(
env "CUDA_VISIBLE_DEVICES=${SVC_GPU}"
"${VLLM_BIN}" serve "${SVC_MODEL_DIR}"
--host "${CFG_SERVICE_HOST}"
--port "${SVC_PORT}"
--served-model-name "${SVC_SERVED_NAME}"
--max-model-len "${SVC_MAX_MODEL_LEN}"
--gpu-memory-utilization "${SVC_MEM}"
--tensor-parallel-size "${tp_size}"
--trust-remote-code
)
[[ -n "${SVC_VLLM_RUNNER}" ]] && cmd+=(--runner "${SVC_VLLM_RUNNER}")
[[ -n "${SVC_DEFAULT_CHAT_TEMPLATE_KWARGS}" ]] && \
cmd+=(--default-chat-template-kwargs "${SVC_DEFAULT_CHAT_TEMPLATE_KWARGS}")
nohup "${cmd[@]}" > "${SVC_LOG_FILE}" 2>&1 &
local pid=$!
echo "${pid}" > "${SVC_PID_FILE}"
echo "${SVC_LABEL} started on port ${SVC_PORT}, GPU(s) ${SVC_GPU} (tp=${tp_size}), PID ${pid}"
echo " log: ${SVC_LOG_FILE}"
;;
qdrant)
require_executable "${CFG_QDRANT_BINARY}" "qdrant"
mkdir -p "${CFG_QDRANT_STORAGE_PATH}"
local -a cmd=(
env
"QDRANT__SERVICE__HOST=${CFG_SERVICE_HOST}"
"QDRANT__SERVICE__HTTP_PORT=${CFG_QDRANT_PORT}"
"QDRANT__STORAGE__STORAGE_PATH=${CFG_QDRANT_STORAGE_PATH}"
"QDRANT__TELEMETRY_DISABLED=true"
"${CFG_QDRANT_BINARY}"
)
( cd "$(dirname "${CFG_QDRANT_BINARY}")" && \
nohup "${cmd[@]}" > "${SVC_LOG_FILE}" 2>&1 & echo $! > "${SVC_PID_FILE}" )
local pid
pid="$(<"${SVC_PID_FILE}")"
echo "${SVC_LABEL} started on port ${SVC_PORT}, storage ${CFG_QDRANT_STORAGE_PATH}, PID ${pid}"
echo " log: ${SVC_LOG_FILE}"
;;
python)
require_file "${SVC_SCRIPT_PATH}"
local -a cmd=(
env
"BIOPACIFIC_CONFIG=${CONFIG_FILE}"
"BIOPACIFIC_AGENT_HOST=${CFG_SERVICE_HOST}"
"${PYTHON_BIN}" "${SVC_SCRIPT_PATH}"
)
( cd "${ROOT_DIR}" && \
nohup "${cmd[@]}" > "${SVC_LOG_FILE}" 2>&1 & echo $! > "${SVC_PID_FILE}" )
local pid
pid="$(<"${SVC_PID_FILE}")"
echo "${SVC_LABEL} started on port ${SVC_PORT}, script ${SVC_SCRIPT_PATH}, PID ${pid}"
echo " log: ${SVC_LOG_FILE}"
;;
esac
}
stop_one() {
local svc="$1"
select_service "${svc}"
local pid
if ! pid="$(get_running_pid "${SVC_PID_FILE}" "${SVC_PGREP_PATTERN}")"; then
rm -f "${SVC_PID_FILE}"
echo "${SVC_LABEL} is not running"
return 0
fi
kill "${pid}" >/dev/null 2>&1 || true
if wait_for_stop "${pid}"; then
rm -f "${SVC_PID_FILE}"
echo "${SVC_LABEL} stopped (PID ${pid})"
return 0
fi
echo "${SVC_LABEL} did not exit after SIGTERM; sending SIGKILL"
kill -9 "${pid}" >/dev/null 2>&1 || true
rm -f "${SVC_PID_FILE}"
echo "${SVC_LABEL} stopped (SIGKILL)"
}
status_one() {
local svc="$1"
select_service "${svc}"
local pid
if pid="$(get_running_pid "${SVC_PID_FILE}" "${SVC_PGREP_PATTERN}")"; then
printf "%-10s running pid=%-7s port=%-5s gpu=%s\n" \
"${SVC_LABEL}" "${pid}" "${SVC_PORT}" "${SVC_GPU}"
else
printf "%-10s stopped port=%-5s gpu=%s\n" \
"${SVC_LABEL}" "${SVC_PORT}" "${SVC_GPU}"
fi
echo " log: ${SVC_LOG_FILE}"
}
# ---------------------------------------------------------------------------
# Dispatch
# ---------------------------------------------------------------------------
usage() {
cat <<EOF
Usage:
$(basename "$0") [start|stop|restart|status] [SERVICE ...]
$(basename "$0") pipeline [STAGE ...]
Services: ${SERVICES[*]}
Pipeline stages: ${PIPELINE_STAGES[*]}
Config file: ${CONFIG_FILE}
(Override via CONFIG_FILE env var.)
Behavior:
- start / restart with no SERVICE arguments performs the full orchestrated flow:
start ${PREREQ_SERVICES[*]} -> run ${PIPELINE_STAGES[*]} -> start agent
- start / stop / restart / status with SERVICE arguments only manages
background services.
- pipeline runs one-shot foreground pipeline stages only.
Examples:
$(basename "$0") # full bring-up
$(basename "$0") start # same as above
$(basename "$0") start llm qdrant # start only these services
$(basename "$0") stop # stop all services
$(basename "$0") stop agent # stop only the agent
$(basename "$0") restart # full stop + full bring-up
$(basename "$0") restart embedding # restart one service
$(basename "$0") status # status of all services
$(basename "$0") pipeline # run s1 -> s2 -> s3 -> s4
$(basename "$0") pipeline s3_fetch # run only one pipeline stage
EOF
}
resolve_service_list() {
local -a requested=("$@")
if [[ ${#requested[@]} -eq 0 || "${requested[0]}" == "all" ]]; then
printf '%s\n' "${SERVICES[@]}"
return
fi
for svc in "${requested[@]}"; do
local ok=0
for valid in "${SERVICES[@]}"; do
if [[ "${svc}" == "${valid}" ]]; then
ok=1; break
fi
done
[[ ${ok} -eq 1 ]] || die "Unknown service: ${svc}. Valid: ${SERVICES[*]}"
printf '%s\n' "${svc}"
done
}
resolve_pipeline_list() {
local -a requested=("$@")
if [[ ${#requested[@]} -eq 0 || "${requested[0]}" == "all" ]]; then
printf '%s\n' "${PIPELINE_STAGES[@]}"
return
fi
local stage valid ok
for stage in "${requested[@]}"; do
ok=0
for valid in "${PIPELINE_STAGES[@]}"; do
if [[ "${stage}" == "${valid}" ]]; then
ok=1
break
fi
done
[[ ${ok} -eq 1 ]] || die "Unknown pipeline stage: ${stage}. Valid: ${PIPELINE_STAGES[*]}"
printf '%s\n' "${stage}"
done
}
is_full_orchestration_request() {
[[ $# -eq 0 ]] && return 0
[[ $# -eq 1 && "$1" == "all" ]] && return 0
return 1
}
main() {
local action="start"
if [[ $# -gt 0 ]]; then
case "$1" in
start|stop|restart|status|pipeline) action="$1"; shift ;;
-h|--help) usage; exit 0 ;;
*) # first token is already a service name -> implicit "start"
;;
esac
fi
# Need python to load config.
use_preferred_runtime "biopacific" # env_name gets overridden once config loads
load_config
use_preferred_runtime "${CFG_ENV_NAME}"
case "${action}" in
start)
if is_full_orchestration_request "$@"; then
run_full_start
else
local -a targets=()
local t
mapfile -t targets < <(resolve_service_list "$@")
validate_vllm_targets "${targets[@]}"
for t in "${targets[@]}"; do start_one "${t}"; done
fi
;;
stop)
local -a stop_targets=()
local t
if is_full_orchestration_request "$@"; then
stop_services_in_order "${STOP_ORDER[@]}"
else
mapfile -t stop_targets < <(resolve_service_list "$@")
for t in "${stop_targets[@]}"; do stop_one "${t}"; done
fi
;;
restart)
if is_full_orchestration_request "$@"; then
stop_services_in_order "${STOP_ORDER[@]}"
run_full_start
else
local -a restart_targets=()
local t
mapfile -t restart_targets < <(resolve_service_list "$@")
for t in "${restart_targets[@]}"; do stop_one "${t}"; done
validate_vllm_targets "${restart_targets[@]}"
for t in "${restart_targets[@]}"; do start_one "${t}"; done
fi
;;
status)
local -a status_targets=()
local t
mapfile -t status_targets < <(resolve_service_list "$@")
for t in "${status_targets[@]}"; do status_one "${t}"; done
;;
pipeline)
local -a stages=()
local stage
mapfile -t stages < <(resolve_pipeline_list "$@")
run_pipeline_sequence "${stages[@]}"
;;
*)
usage; exit 1 ;;
esac
}
main "$@"