srbhavya01 commited on
Commit
038cb96
·
verified ·
1 Parent(s): 180afa2

Update model_api.py

Browse files
Files changed (1) hide show
  1. model_api.py +9 -23
model_api.py CHANGED
@@ -1,39 +1,25 @@
1
  from huggingface_hub import InferenceClient
2
  import os
3
- import json
4
- import re
5
 
6
  def query_model(prompt):
7
  try:
8
- import streamlit as st
9
-
10
- HF_TOKEN = st.secrets["HF_TOKEN"]
11
 
12
  client = InferenceClient(
13
- provider="auto",
14
- api_key=HF_TOKEN
15
  )
16
 
17
- response = client.chat.completions.create(
18
- model="Qwen/Qwen2.5-7B-Instruct",
19
  messages=[
20
- {"role": "system", "content": "You are a professional fitness trainer that returns ONLY JSON workout plans."},
21
  {"role": "user", "content": prompt}
22
  ],
23
- max_tokens=1200,
24
- temperature=0.6
25
  )
26
 
27
- result = response.choices[0].message.content
28
-
29
- # Remove markdown JSON blocks if model returns ```json ```
30
- result = re.sub(r"```json|```", "", result).strip()
31
-
32
- try:
33
- result_json = json.loads(result)
34
- return result_json
35
- except json.JSONDecodeError:
36
- return {"error": "Model returned invalid JSON", "raw_output": result}
37
 
38
  except Exception as e:
39
- return {"error": str(e)}
 
1
  from huggingface_hub import InferenceClient
2
  import os
 
 
3
 
4
  def query_model(prompt):
5
  try:
6
+ HF_TOKEN = os.getenv("HF_TOKEN")
 
 
7
 
8
  client = InferenceClient(
9
+ model="meta-llama/Llama-3.2-3B-Instruct",
10
+ token=HF_TOKEN
11
  )
12
 
13
+ response = client.chat_completion(
 
14
  messages=[
15
+ {"role": "system", "content": "You are a certified professional fitness trainer."},
16
  {"role": "user", "content": prompt}
17
  ],
18
+ max_tokens=2000,
19
+ temperature=0.7
20
  )
21
 
22
+ return response.choices[0].message.content
 
 
 
 
 
 
 
 
 
23
 
24
  except Exception as e:
25
+ return f"Error: {str(e)}"