File size: 2,407 Bytes
3e32121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e9c2363
 
 
3e32121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import re
from typing import Dict, Optional
from fastapi import FastAPI
from pydantic import BaseModel
from mlx_lm import load, generate
from mlx_lm.sample_utils import make_sampler

base_model_id = "qwen2.5-0.5b-instruct-sumobot"
lora_adapter_or_id = "qwen2.5-0.5b-instruct-sumobot"

model, tokenizer = load(base_model_id, adapter_path=lora_adapter_or_id)

def parse_action(output: str):
    # Mapping of shorthand to full action names
    action_map = {
        "SK": "Skill",
        "DS": "Dash",
        "FWD": "Accelerate",
        "TL": "TurnLeft",
        "TR": "TurnRight",
    }

    actions: Dict[str, Optional[float]] = {}

    for part in [p.strip() for p in output.split(",")]:
        name = part
        duration = None

        direct_match = re.match(r"^([A-Za-z]+)\s*([\d.]+)$", part)
        if direct_match:
            name = direct_match.group(1).strip()
            duration = float(direct_match.group(2))

        # Normalize shorthand to full name
        for short, full in action_map.items():
            if name.upper().startswith(short):
                name = full
                break

        actions[name] = duration

    return {"action": actions}


def get_finetuned_action(query_state):
    # Inference with chat template
    messages = [
        {"role": "system", "content": "You are a Sumobot assistant that decides actions based on game state."},
        {"role": "user", "content": f"Given this game state: {query_state}"},
    ]

    # Apply the tokenizer's built-in chat template
    chat_prompt = tokenizer.apply_chat_template(
        messages,
        tokenize=False,             
        add_generation_prompt=True   
    )

    result = ""
    for token in generate(model, tokenizer, chat_prompt, max_tokens=50):
        result += token

    parsedResult = parse_action(result)
    return parsedResult

# --------- API Setup ----------
app = FastAPI()

class QueryInput(BaseModel):
    state: str

@app.post("/query")
def query(input: QueryInput):
    return get_finetuned_action(input.state)

# Run with: python rag_api.py
if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8000)

# TEST CURL
# curl --location 'http://localhost:8000/query' \
# --header 'Content-Type: application/json' \
# --data '{
#     "state":"AngleToEnemy=7.77, AngleToEnemyScore=0.99, DistanceToEnemyScore=0.76, NearBorderArenaScore=0.81, FacingToArena=-0.99"
# }'