Spaces:
Sleeping
Sleeping
| import os | |
| from dotenv import load_dotenv | |
| from huggingface_hub import InferenceClient | |
| from src.prompt import system_instruction | |
| load_dotenv() | |
| HF_TOKEN = os.getenv("HF_TOKEN") | |
| client = InferenceClient( | |
| model="mistralai/Mistral-7B-Instruct-v0.2", | |
| token=HF_TOKEN | |
| ) | |
| messages = [ | |
| {"role": "system", "content": system_instruction} | |
| ] | |
| def ask_order(messages): | |
| # Fixed: Changed chat_completions.create to chat_completion | |
| response = client.chat_completion( | |
| messages=messages, | |
| max_tokens=500 | |
| ) | |
| # Fixed: Accessing the attribute correctly for InferenceClient | |
| return response.choices[0].message.content |