Kurian07 commited on
Commit
2ea5d4f
·
verified ·
1 Parent(s): ea273a9

Upload llm.py

Browse files
Files changed (1) hide show
  1. modules/llm.py +53 -0
modules/llm.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ from groq import Groq
4
+
5
+ load_dotenv()
6
+
7
+ class GroqClient:
8
+ def __init__(self, api_key):
9
+ self.client = Groq(api_key=api_key)
10
+
11
+
12
+ class GroqCompletion:
13
+ def __init__(self, client, model, domain, prompt_template, user_content, temperature, max_tokens, top_p, stream, stop):
14
+ self.client = client
15
+ self.model = model
16
+ self.domain = domain
17
+ self.prompt_template = prompt_template
18
+ self.user_content = user_content
19
+ self.temperature = temperature
20
+ self.max_tokens = max_tokens
21
+ self.top_p = top_p
22
+ self.stream = stream
23
+ self.stop = stop
24
+
25
+ def create_completion(self):
26
+ prompt = f"{self.prompt_template}\n\n{self.user_content}\n"
27
+ system_role = f"you are an helpful AI assistant in text based question answering and retriving context from given domain {self.domain}"
28
+
29
+ completion = self.client.client.chat.completions.create(
30
+ model=self.model,
31
+ messages=[
32
+ {
33
+ "role": "system",
34
+ "content": system_role
35
+ },
36
+ {
37
+ "role": "user",
38
+ "content": prompt
39
+ }
40
+ ],
41
+ temperature=self.temperature,
42
+ max_tokens=self.max_tokens,
43
+ top_p=self.top_p,
44
+ stream=self.stream,
45
+ stop=self.stop,
46
+ )
47
+
48
+ result = ""
49
+ for chunk in completion:
50
+ result += chunk.choices[0].delta.content or ""
51
+
52
+ return result
53
+