| import os | |
| from dotenv import load_dotenv | |
| from groq import Groq | |
| load_dotenv() | |
| class GroqClient: | |
| def __init__(self, api_key): | |
| self.client = Groq(api_key=api_key) | |
| class GroqCompletion: | |
| def __init__(self, client, model, domain, prompt_template, user_content, temperature, max_tokens, top_p, stream, stop): | |
| self.client = client | |
| self.model = model | |
| self.domain = domain | |
| self.prompt_template = prompt_template | |
| self.user_content = user_content | |
| self.temperature = temperature | |
| self.max_tokens = max_tokens | |
| self.top_p = top_p | |
| self.stream = stream | |
| self.stop = stop | |
| def create_completion(self): | |
| prompt = f"{self.prompt_template}\n\n{self.user_content}\n" | |
| system_role = f"you are an helpful AI assistant in text based question answering and retriving context from given domain {self.domain}" | |
| completion = self.client.client.chat.completions.create( | |
| model=self.model, | |
| messages=[ | |
| { | |
| "role": "system", | |
| "content": system_role | |
| }, | |
| { | |
| "role": "user", | |
| "content": prompt | |
| } | |
| ], | |
| temperature=self.temperature, | |
| max_tokens=self.max_tokens, | |
| top_p=self.top_p, | |
| stream=self.stream, | |
| stop=self.stop, | |
| ) | |
| result = "" | |
| for chunk in completion: | |
| result += chunk.choices[0].delta.content or "" | |
| return result | |