| import os | |
| import dotenv | |
| from openai import OpenAI | |
| from .Base import BaseModel | |
| dotenv.load_dotenv() | |
| class Qwen(BaseModel): | |
| def __init__(self, temperature=0): | |
| api_key = os.getenv("SILICONFLOW_API_KEY") | |
| api_base_url = os.getenv("SILICONFLOW_API_URL") | |
| self.model = "Qwen/Qwen2.5-32B-Instruct" | |
| self.Qwen_client = OpenAI(base_url=api_base_url, api_key=api_key) | |
| def prompt(self, processed_input): | |
| response = self.Qwen_client.chat.completions.create( | |
| model=self.model, | |
| messages=processed_input, | |
| stream=False, | |
| temperature=0 | |
| ) | |
| return response.choices[0].message.content, response.usage.prompt_tokens, response.usage.completion_tokens |