sumobot_ml / llm /api.py
arby-pc-lab
add vdb, train_llm
e9c2363
import re
from typing import Dict, Optional
from fastapi import FastAPI
from pydantic import BaseModel
from transformers import BitsAndBytesConfig, AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
import uvicorn
import torch
base_model_id = "Qwen/Qwen2.5-0.5B-Instruct"
# base_model_id = "qwen2.5-0.5b-instruct-sumobot-merged"
#lora_adapter_or_id = "qwen2.5-0.5b-instruct-sumobot"
lora_adapter_or_id = "adapters/qwen2.5_0.5b_lora_half"
# # Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(
base_model_id,
trust_remote_code=True, use_fast=True)
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=False,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16,
)
# Load base model
base_model = AutoModelForCausalLM.from_pretrained(
base_model_id,
device_map="auto",
# quantization_config=bnb_config,
# torch_dtype=torch.bfloat16,
trust_remote_code=True )
# Load LoRA weights
model = PeftModel.from_pretrained(base_model, lora_adapter_or_id)
# Merge LoRA into the base model (optional if you want a standalone model)
model = model.merge_and_unload()
# model.eval()
import re
from typing import Dict, Optional
def parse_action(output: str):
# Mapping of shorthand to full action names
action_map = {
"SK": "Skill",
"DS": "Dash",
"FWD": "Accelerate",
"TL": "TurnLeft",
"TR": "TurnRight",
}
actions: Dict[str, Optional[float]] = {}
for part in [p.strip() for p in output.split(",")]:
name = part
duration = None
direct_match = re.match(r"^([A-Za-z]+)\s*([\d.]+)$", part)
if direct_match:
name = direct_match.group(1).strip()
duration = float(direct_match.group(2))
# Normalize shorthand to full name
for short, full in action_map.items():
if name.upper().startswith(short):
name = full
break
actions[name] = duration
print(f"result: {actions}")
return {"action": actions}
def get_finetuned_action(query_state):
# Inference with chat template
messages = [
{"role": "system", "content": "You are a Sumobot assistant"},
{"role": "user", "content": f"State: {query_state}"},
]
# Apply the tokenizer's built-in chat template
chat_prompt = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
inputs = tokenizer(chat_prompt, return_tensors="pt").to(model.device)
outputs = model.generate(
**inputs,
max_new_tokens=15
)
# Slice out only the newly generated tokens
generated_tokens = outputs[0][inputs["input_ids"].shape[1]:]
decoded = tokenizer.decode(generated_tokens, skip_special_tokens=True).strip()
parsedResult = parse_action(decoded)
return parsedResult
# --------- API Setup ----------
app = FastAPI()
class QueryInput(BaseModel):
state: str
@app.post("/query")
def query(input: QueryInput):
return get_finetuned_action(input.state)
# Run with: python rag_api.py
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
# TEST CURL
# curl --location 'http://localhost:8000/query' \
# --header 'Content-Type: application/json' \
# --data '{
# "state":"AngleToEnemy=7.77, AngleToEnemyScore=0.99, DistanceToEnemyScore=0.76, NearBorderArenaScore=0.81, FacingToArena=-0.99"
# }'