|
|
import re |
|
|
from typing import Dict, Optional |
|
|
from fastapi import FastAPI |
|
|
from pydantic import BaseModel |
|
|
from transformers import BitsAndBytesConfig, AutoModelForCausalLM, AutoTokenizer |
|
|
from peft import PeftModel |
|
|
import uvicorn |
|
|
import torch |
|
|
|
|
|
base_model_id = "Qwen/Qwen2.5-0.5B-Instruct" |
|
|
|
|
|
|
|
|
lora_adapter_or_id = "adapters/qwen2.5_0.5b_lora_half" |
|
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained( |
|
|
base_model_id, |
|
|
trust_remote_code=True, use_fast=True) |
|
|
|
|
|
bnb_config = BitsAndBytesConfig( |
|
|
load_in_4bit=True, |
|
|
bnb_4bit_use_double_quant=False, |
|
|
bnb_4bit_quant_type="nf4", |
|
|
bnb_4bit_compute_dtype=torch.bfloat16, |
|
|
) |
|
|
|
|
|
|
|
|
base_model = AutoModelForCausalLM.from_pretrained( |
|
|
base_model_id, |
|
|
device_map="auto", |
|
|
|
|
|
|
|
|
trust_remote_code=True ) |
|
|
|
|
|
|
|
|
|
|
|
model = PeftModel.from_pretrained(base_model, lora_adapter_or_id) |
|
|
|
|
|
|
|
|
model = model.merge_and_unload() |
|
|
|
|
|
|
|
|
import re |
|
|
from typing import Dict, Optional |
|
|
|
|
|
def parse_action(output: str): |
|
|
|
|
|
action_map = { |
|
|
"SK": "Skill", |
|
|
"DS": "Dash", |
|
|
"FWD": "Accelerate", |
|
|
"TL": "TurnLeft", |
|
|
"TR": "TurnRight", |
|
|
} |
|
|
|
|
|
actions: Dict[str, Optional[float]] = {} |
|
|
|
|
|
for part in [p.strip() for p in output.split(",")]: |
|
|
name = part |
|
|
duration = None |
|
|
|
|
|
direct_match = re.match(r"^([A-Za-z]+)\s*([\d.]+)$", part) |
|
|
if direct_match: |
|
|
name = direct_match.group(1).strip() |
|
|
duration = float(direct_match.group(2)) |
|
|
|
|
|
|
|
|
for short, full in action_map.items(): |
|
|
if name.upper().startswith(short): |
|
|
name = full |
|
|
break |
|
|
|
|
|
actions[name] = duration |
|
|
|
|
|
print(f"result: {actions}") |
|
|
return {"action": actions} |
|
|
|
|
|
|
|
|
def get_finetuned_action(query_state): |
|
|
|
|
|
messages = [ |
|
|
{"role": "system", "content": "You are a Sumobot assistant"}, |
|
|
{"role": "user", "content": f"State: {query_state}"}, |
|
|
] |
|
|
|
|
|
|
|
|
chat_prompt = tokenizer.apply_chat_template( |
|
|
messages, |
|
|
tokenize=False, |
|
|
add_generation_prompt=True |
|
|
) |
|
|
|
|
|
inputs = tokenizer(chat_prompt, return_tensors="pt").to(model.device) |
|
|
|
|
|
outputs = model.generate( |
|
|
**inputs, |
|
|
max_new_tokens=15 |
|
|
) |
|
|
|
|
|
|
|
|
generated_tokens = outputs[0][inputs["input_ids"].shape[1]:] |
|
|
|
|
|
decoded = tokenizer.decode(generated_tokens, skip_special_tokens=True).strip() |
|
|
|
|
|
parsedResult = parse_action(decoded) |
|
|
return parsedResult |
|
|
|
|
|
|
|
|
app = FastAPI() |
|
|
|
|
|
class QueryInput(BaseModel): |
|
|
state: str |
|
|
|
|
|
@app.post("/query") |
|
|
def query(input: QueryInput): |
|
|
return get_finetuned_action(input.state) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
uvicorn.run(app, host="0.0.0.0", port=8000) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|