Spaces:
Running
Running
Update model_api.py
Browse files- model_api.py +6 -13
model_api.py
CHANGED
|
@@ -1,28 +1,21 @@
|
|
| 1 |
from huggingface_hub import InferenceClient
|
| 2 |
import os
|
| 3 |
-
from dotenv import load_dotenv
|
| 4 |
-
|
| 5 |
-
load_dotenv()
|
| 6 |
|
| 7 |
def query_model(prompt):
|
| 8 |
try:
|
| 9 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 10 |
-
|
| 11 |
-
client = InferenceClient(
|
|
|
|
|
|
|
| 12 |
model="Qwen/Qwen2.5-7B-Instruct",
|
| 13 |
-
token=HF_TOKEN
|
| 14 |
-
)
|
| 15 |
-
|
| 16 |
-
response = client.chat_completion(
|
| 17 |
messages=[
|
| 18 |
{"role": "system", "content": "You are a professional fitness trainer."},
|
| 19 |
{"role": "user", "content": prompt}
|
| 20 |
],
|
| 21 |
-
max_tokens=
|
| 22 |
temperature=0.7
|
| 23 |
)
|
| 24 |
-
|
| 25 |
return response.choices[0].message.content
|
| 26 |
-
|
| 27 |
except Exception as e:
|
| 28 |
-
return f"
|
|
|
|
| 1 |
from huggingface_hub import InferenceClient
|
| 2 |
import os
|
|
|
|
|
|
|
|
|
|
| 3 |
|
| 4 |
def query_model(prompt):
|
| 5 |
try:
|
| 6 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 7 |
+
# You can also set provider at the client level
|
| 8 |
+
client = InferenceClient(api_key=HF_TOKEN, provider="auto")
|
| 9 |
+
|
| 10 |
+
response = client.chat.completions.create(
|
| 11 |
model="Qwen/Qwen2.5-7B-Instruct",
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
messages=[
|
| 13 |
{"role": "system", "content": "You are a professional fitness trainer."},
|
| 14 |
{"role": "user", "content": prompt}
|
| 15 |
],
|
| 16 |
+
max_tokens=1500,
|
| 17 |
temperature=0.7
|
| 18 |
)
|
|
|
|
| 19 |
return response.choices[0].message.content
|
|
|
|
| 20 |
except Exception as e:
|
| 21 |
+
return f"Error: {str(e)}"
|