sumobot_ml / llm /api_gguf.py
arby-pc-lab
add vdb, train_llm
e9c2363
"""
Model Pool Sumobot API - Multiple model instances for true parallelism
Requires more GPU memory but can process multiple requests simultaneously
"""
import re
import asyncio
from typing import Dict, Optional, List
from fastapi import FastAPI
from pydantic import BaseModel
import uvicorn
from llama_cpp import Llama
import multiprocessing as mp
from concurrent.futures import ProcessPoolExecutor
# ==================== Configuration ====================
MODEL_PATH = "models/qwen2.5-sumobot-half-q8_0.gguf"
N_GPU_LAYERS = -1
NUM_WORKERS = 20 # Number of parallel model instances
ACTION_PATTERN = re.compile(r"^([A-Za-z]+)\s*([\d.]+)$")
# ==================== Worker Process ====================
class ModelWorker:
"""Each worker has its own model instance"""
def __init__(self):
self.model = None
def initialize(self):
"""Load model in worker process"""
if self.model is None:
print(f"Loading model in worker {mp.current_process().name}...")
self.model = Llama(
model_path=MODEL_PATH,
n_ctx=512,
n_threads=2, # Fewer threads per worker
n_gpu_layers=N_GPU_LAYERS,
verbose=False,
n_batch=512,
use_mmap=True,
)
print(f"✅ Model loaded in {mp.current_process().name}")
def parse_action(self, output: str) -> Dict[str, Optional[float]]:
"""Parse model output to action dict"""
action_map = {
"SK": "Skill", "DS": "Dash", "FWD": "Accelerate",
"TL": "TurnLeft", "TR": "TurnRight",
}
actions = {}
for part in output.split(","):
part = part.strip()
if not part:
continue
name = part
duration = None
match = ACTION_PATTERN.match(part)
if match:
name = match.group(1)
duration = float(match.group(2))
name_upper = name.upper()
for short, full in action_map.items():
if name_upper.startswith(short):
actions[full] = duration
break
return actions
def process(self, state: str) -> dict:
"""Process a single request"""
import time
if self.model is None:
self.initialize()
prompt = f"""<|im_start|>system
Sumobot assistant.<|im_end|>
<|im_start|>user
{state}<|im_end|>
<|im_start|>assistant
action:"""
start_time = time.time()
output = self.model(
prompt,
max_tokens=50,
temperature=0.1,
stop=["<|im_end|>", "\n"],
top_p=0.95,
echo=False
)
inference_time = (time.time() - start_time) * 1000
raw_output = output['choices'][0]['text'].strip()
return {
"raw_output": raw_output,
"action": self.parse_action(raw_output),
"inference_time_ms": round(inference_time, 2),
"worker": mp.current_process().name
}
# ==================== Global Worker Instance ====================
_worker = ModelWorker()
def process_request(state: str) -> dict:
"""Global function for ProcessPoolExecutor"""
return _worker.process(state)
# ==================== FastAPI ====================
app = FastAPI()
class QueryInput(BaseModel):
state: str
# Process pool for parallel execution
executor = None
@app.on_event("startup")
async def startup_event():
"""Initialize process pool on startup"""
global executor
print(f"🚀 Starting {NUM_WORKERS} model workers...")
executor = ProcessPoolExecutor(
max_workers=NUM_WORKERS,
mp_context=mp.get_context('spawn')
)
# Warm up workers
print("🔥 Warming up workers...")
dummy_state = "BotPos=[0,0], BotRot=0, EnemyPos=[1,1], EnemyRot=0"
futures = [
executor.submit(process_request, dummy_state)
for _ in range(NUM_WORKERS)
]
for future in futures:
future.result()
print(f"✅ {NUM_WORKERS} workers ready!\n")
@app.on_event("shutdown")
async def shutdown_event():
"""Clean up process pool"""
global executor
if executor:
executor.shutdown(wait=True)
@app.post("/query")
async def query(input: QueryInput):
"""
Process request using worker pool
Can handle NUM_WORKERS requests truly in parallel
"""
try:
loop = asyncio.get_event_loop()
result = await loop.run_in_executor(
executor,
process_request,
input.state
)
return result
except Exception as e:
return {
"error": str(e),
"message": "Failed to process request"
}
@app.get("/stats")
async def stats():
"""Get worker pool statistics"""
return {
"num_workers": NUM_WORKERS,
"model_path": MODEL_PATH,
}
# ==================== Run ====================
if __name__ == "__main__":
# Required for Windows multiprocessing
mp.set_start_method('spawn', force=True)
print(f"🚀 Starting API with {NUM_WORKERS} parallel workers")
print("⚡ Can process multiple requests simultaneously\n")
uvicorn.run(
app,
host="0.0.0.0",
port=8000,
workers=1,
log_level="warning"
)
# ==================== Notes ====================
"""
MEMORY REQUIREMENTS:
- Each worker loads a full model copy
- Q4 model ~800MB × 4 workers = ~3.2GB VRAM minimum
- Adjust NUM_WORKERS based on your GPU memory
PERFORMANCE:
- Queue-based (single model): ~50ms per request, sequential
- Pool-based (4 models): ~50ms per request, but 4 in parallel
- For 4 Unity clients @ 100ms interval: Pool is 4x faster
When to use which:
- Queue (single model): <3 concurrent clients, limited VRAM
- Pool (multiple models): 3+ concurrent clients, enough VRAM
"""