Module_2 / model_api.py
srbhavya01's picture
Update model_api.py
d8de767 verified
raw
history blame
645 Bytes
import streamlit as st
from huggingface_hub import InferenceClient
def query_model(prompt):
try:
HF_TOKEN = st.secrets["HF_TOKEN"]
client = InferenceClient(
model="meta-llama/Meta-Llama-3-8B-Instruct",
token=HF_TOKEN
)
response = client.chat_completion(
messages=[
{"role": "system", "content": "You are a certified fitness trainer."},
{"role": "user", "content": prompt}
],
max_tokens=800
)
return response.choices[0].message.content
except Exception as e:
return f"Error: {str(e)}"