alessandroptsn commited on
Commit
035598f
verified
1 Parent(s): 3409bf7

Update llm_func.py

Browse files
Files changed (1) hide show
  1. llm_func.py +153 -23
llm_func.py CHANGED
@@ -1,24 +1,154 @@
1
  import time
2
- from llama_cpp import Llama
3
- #model = Llama.from_pretrained(repo_id="tensorblock/SmolLM-135M-Instruct-GGUF",filename="*SmolLM-135M-Instruct-Q6_K.gguf",verbose=False,n_ctx=1000)
4
- model = Llama.from_pretrained(repo_id="HuggingFaceTB/SmolLM2-360M-Instruct-GGUF",filename="*smollm2-360m-instruct-q8_0.gguf",verbose=False,n_ctx=500)
5
- #model = Llama.from_pretrained(repo_id="tensorblock/SmolLM2-360M-GGUF",filename="*SmolLM2-360M-Q4_K_M.gguf",verbose=False,n_ctx=500)
6
- #model = Llama.from_pretrained(repo_id="bartowski/SmolLM2-135M-Instruct-GGUF",filename="*SmolLM2-135M-Instruct-Q4_K_M.gguf",verbose=False,n_ctx=500)
7
- #model = Llama.from_pretrained(repo_id="bartowski/granite-3.0-1b-a400m-instruct-GGUF",filename="*granite-3.0-1b-a400m-instruct-Q5_K_S.gguf",verbose=False,n_ctx=1000)
8
-
9
-
10
- def mdl(input):
11
- print(input)
12
- start = time.time()
13
- output = model(prompt=f"""<|start_of_role|>system<|end_of_role|>You are a helpful chatbot<|end_of_text|>
14
- <|start_of_role|>user<|end_of_role|>{input}<|end_of_text|>
15
- <|start_of_role|>assistant<|end_of_role|>""",
16
- max_tokens=200,
17
- temperature=0.1,
18
- top_p=0.1,
19
- echo=False,
20
- stop=["<|"])
21
- end = time.time()
22
- total_time = end - start
23
- print(f"Execution time: {total_time:.2f} seconds")
24
- return output["choices"][0]["text"].replace('\nassistant\n','')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import time
2
+ import json
3
+
4
+ def llm_normal(question,llm):
5
+ print(question)
6
+ start = time.time()
7
+ answer = llm.create_chat_completion(
8
+ messages= [
9
+ {
10
+ "role": "user",
11
+ "content": question
12
+ }
13
+ ]
14
+ )
15
+ end = time.time()
16
+ total_time = end - start
17
+ print(answer["choices"][0]["message"]['content'])
18
+ print(f"Execution time: {total_time:.2f} seconds")
19
+ return answer["choices"][0]["message"]['content']
20
+
21
+
22
+ def llm_agent(question,llm):
23
+ print(question)
24
+ start = time.time()
25
+ answer = llm.create_chat_completion(
26
+ messages= [
27
+ {
28
+ "role": "system",
29
+ "content": """
30
+ You are a json formatter assistant,
31
+ specialized in converting user-supplied raw text into json format
32
+ """
33
+ },{
34
+ "role": "user",
35
+ "content": question+"""
36
+ format in JSON: """
37
+ }
38
+ ],response_format = {"type":"json_object"}
39
+ )
40
+ end = time.time()
41
+ total_time = end - start
42
+ try:
43
+ args = json.loads(answer["choices"][0]["message"]['content'])
44
+ print(args)
45
+ print(f"Execution time: {total_time:.2f} seconds")
46
+ except:
47
+ print(answer["choices"][0]["message"]['content'])
48
+ print(f"Execution time: {total_time:.2f} seconds")
49
+ return answer["choices"][0]["message"]['content']
50
+
51
+
52
+ def name_age(name,age):
53
+ return str(name) + ' - '+str(age)
54
+
55
+ def llm_functioncalling(question,llm):
56
+ print(question)
57
+ start = time.time()
58
+ answer = llm.create_chat_completion(
59
+ messages = [
60
+ {
61
+ "role": "system",
62
+ "content": """
63
+ You are a json formatter assistant,
64
+ specialized in converting user-supplied raw text into json format
65
+ """
66
+ },{
67
+ "role": "user",
68
+ "content": question +"""
69
+ format in JSON: """
70
+ }
71
+ ],
72
+ tools=[{
73
+ "type": "function",
74
+ "function": {
75
+ "name": "name_age",
76
+ "description": """return the name and age, if not informed,
77
+ set name None and age 0""",
78
+ "parameters": {
79
+ "type": "object",
80
+ "title": "return the name and age",
81
+ "properties": {
82
+ "name": {
83
+ "title": "Name",
84
+ "type": "string"
85
+ },
86
+ "age": {
87
+ "title": "Age",
88
+ "type": "integer"
89
+ }
90
+ },
91
+ "required": [ "name", "age" ]
92
+ }
93
+ }
94
+ }
95
+ ],response_format = {"type":"json_object"},
96
+ )
97
+ end = time.time()
98
+ total_time = end - start
99
+ try:
100
+ args = json.loads(answer["choices"][0]["message"]['content'])
101
+ print(name_age(args['name'],args['age']))
102
+ print(f"Execution time: {total_time:.2f} seconds")
103
+ return name_age(args['name'],args['age'])
104
+ except:
105
+ print(answer["choices"][0]["message"]['content'])
106
+ print(f"Execution time: {total_time:.2f} seconds.")
107
+ return answer["choices"][0]["message"]['content']
108
+
109
+
110
+
111
+ import requests
112
+ from bs4 import BeautifulSoup
113
+
114
+ def search(question):
115
+ payload = {'q': question}
116
+ request = requests.get('https://www.bing.com/search?&cc=US', params=payload,
117
+ headers = {
118
+ 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36'
119
+ })
120
+ soup = BeautifulSoup(request.text, 'html.parser')
121
+ try:
122
+ text = 'context : '+ str(soup.text.split('路')[1].split('https://')[0])
123
+ return text
124
+ except:
125
+ return ' '
126
+
127
+
128
+
129
+
130
+ def llm_search(question,llm):
131
+ print(question)
132
+ start = time.time()
133
+ messages = [
134
+ {
135
+ "role": "system",
136
+ "content": """You are a search assistant, gives helpful,
137
+ detailed, and polite answers to the user's questions
138
+ """
139
+ },
140
+ {
141
+ "role": "user",
142
+ "content": question+"""
143
+ """+search(question)
144
+ },
145
+ ]
146
+ answer = llm.create_chat_completion(
147
+ messages = messages
148
+ )
149
+ end = time.time()
150
+ total_time = end - start
151
+ print(answer["choices"][0]["message"]['content'])
152
+ print(f"Execution time: {total_time:.2f} seconds")
153
+ return answer["choices"][0]["message"]['content']
154
+