File size: 3,484 Bytes
3e32121 e9c2363 3e32121 e9c2363 3e32121 e9c2363 3e32121 e9c2363 3e32121 e9c2363 3e32121 e9c2363 3e32121 e9c2363 3e32121 e9c2363 3e32121 e9c2363 3e32121 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 | import re
from typing import Dict, Optional
from fastapi import FastAPI
from pydantic import BaseModel
from transformers import BitsAndBytesConfig, AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
import uvicorn
import torch
base_model_id = "Qwen/Qwen2.5-0.5B-Instruct"
# base_model_id = "qwen2.5-0.5b-instruct-sumobot-merged"
#lora_adapter_or_id = "qwen2.5-0.5b-instruct-sumobot"
lora_adapter_or_id = "adapters/qwen2.5_0.5b_lora_half"
# # Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(
base_model_id,
trust_remote_code=True, use_fast=True)
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=False,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16,
)
# Load base model
base_model = AutoModelForCausalLM.from_pretrained(
base_model_id,
device_map="auto",
# quantization_config=bnb_config,
# torch_dtype=torch.bfloat16,
trust_remote_code=True )
# Load LoRA weights
model = PeftModel.from_pretrained(base_model, lora_adapter_or_id)
# Merge LoRA into the base model (optional if you want a standalone model)
model = model.merge_and_unload()
# model.eval()
import re
from typing import Dict, Optional
def parse_action(output: str):
# Mapping of shorthand to full action names
action_map = {
"SK": "Skill",
"DS": "Dash",
"FWD": "Accelerate",
"TL": "TurnLeft",
"TR": "TurnRight",
}
actions: Dict[str, Optional[float]] = {}
for part in [p.strip() for p in output.split(",")]:
name = part
duration = None
direct_match = re.match(r"^([A-Za-z]+)\s*([\d.]+)$", part)
if direct_match:
name = direct_match.group(1).strip()
duration = float(direct_match.group(2))
# Normalize shorthand to full name
for short, full in action_map.items():
if name.upper().startswith(short):
name = full
break
actions[name] = duration
print(f"result: {actions}")
return {"action": actions}
def get_finetuned_action(query_state):
# Inference with chat template
messages = [
{"role": "system", "content": "You are a Sumobot assistant"},
{"role": "user", "content": f"State: {query_state}"},
]
# Apply the tokenizer's built-in chat template
chat_prompt = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
inputs = tokenizer(chat_prompt, return_tensors="pt").to(model.device)
outputs = model.generate(
**inputs,
max_new_tokens=15
)
# Slice out only the newly generated tokens
generated_tokens = outputs[0][inputs["input_ids"].shape[1]:]
decoded = tokenizer.decode(generated_tokens, skip_special_tokens=True).strip()
parsedResult = parse_action(decoded)
return parsedResult
# --------- API Setup ----------
app = FastAPI()
class QueryInput(BaseModel):
state: str
@app.post("/query")
def query(input: QueryInput):
return get_finetuned_action(input.state)
# Run with: python rag_api.py
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
# TEST CURL
# curl --location 'http://localhost:8000/query' \
# --header 'Content-Type: application/json' \
# --data '{
# "state":"AngleToEnemy=7.77, AngleToEnemyScore=0.99, DistanceToEnemyScore=0.76, NearBorderArenaScore=0.81, FacingToArena=-0.99"
# }' |