from huggingface_hub import InferenceClient import os import json import re def query_model(prompt): try: import streamlit as st HF_TOKEN = st.secrets["HF_TOKEN"] client = InferenceClient( provider="auto", api_key=HF_TOKEN ) response = client.chat.completions.create( model="Qwen/Qwen2.5-7B-Instruct", messages=[ {"role": "system", "content": "You are a professional fitness trainer that returns ONLY JSON workout plans."}, {"role": "user", "content": prompt} ], max_tokens=1200, temperature=0.6 ) result = response.choices[0].message.content # Remove markdown JSON blocks if model returns ```json ``` result = re.sub(r"```json|```", "", result).strip() try: result_json = json.loads(result) return result_json except json.JSONDecodeError: return {"error": "Model returned invalid JSON", "raw_output": result} except Exception as e: return {"error": str(e)}