Chloé Court
Submission
9c0dbe0
from huggingface_hub import InferenceClient
import os
from dotenv import load_dotenv
load_dotenv()
LLM_MODEL = "Qwen/Qwen2.5-7B-Instruct"
_hf_token = os.getenv("HF_TOKEN")
if not _hf_token:
raise ValueError("HF_TOKEN not found. Set it in your .env file.")
LLM_CLIENT = InferenceClient(token=_hf_token)
def call_llm(prompt: str, system_prompt: str = "", seed: int = 0, max_tokens: int = 300) -> str:
messages = []
if system_prompt.strip():
messages.append({"role": "system", "content": system_prompt})
messages.append({"role": "user", "content": prompt})
response = LLM_CLIENT.chat.completions.create(
model=LLM_MODEL,
messages=messages,
temperature=0.0,
max_tokens=max_tokens,
seed=seed,
)
return response.choices[0].message.content
def is_new_location(observation: str, known_locations: set, last_tool:str) -> bool:
if last_tool != "play_action":
return False
location = extract_location(observation)
if location.strip().endswith(('.', '!', '?', ')')) or location in known_locations:
return False
return True
def extract_location(observation: str) -> str:
return observation.lower().split("\n")[0].strip()