Spaces:
Sleeping
Sleeping
File size: 718 Bytes
11b81fb a497f10 11b81fb c06dc28 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 | from huggingface_hub import InferenceClient
import os
def query_model(prompt):
try:
HF_TOKEN = os.getenv("HF_TOKEN")
# You can also set provider at the client level
client = InferenceClient(api_key=HF_TOKEN, provider="auto")
response = client.chat.completions.create(
model="Qwen/Qwen2.5-7B-Instruct",
messages=[
{"role": "system", "content": "You are a professional fitness trainer."},
{"role": "user", "content": prompt}
],
max_tokens=1500,
temperature=0.7
)
return response.choices[0].message.content
except Exception as e:
return f"Error: {str(e)}" |