MGLDZM commited on
Commit
a2ba07c
·
1 Parent(s): 2aff525
modules/chat_functions/buscar_google.py CHANGED
@@ -2,6 +2,7 @@ import json
2
  import os
3
  from googleapiclient.discovery import build
4
  from .. import log_module
 
5
 
6
  activo = True
7
  try:
@@ -51,6 +52,7 @@ def ejecutar(busqueda):
51
  retorno = {}
52
  retorno["conseguidos"] = []
53
  for result in results["items"]:
 
54
  retorno["conseguidos"].append({
55
  "titulo": result["title"],
56
  "link": result["link"],
 
2
  import os
3
  from googleapiclient.discovery import build
4
  from .. import log_module
5
+
6
 
7
  activo = True
8
  try:
 
52
  retorno = {}
53
  retorno["conseguidos"] = []
54
  for result in results["items"]:
55
+ if "title" not in result or "snippet" not in result: continue
56
  retorno["conseguidos"].append({
57
  "titulo": result["title"],
58
  "link": result["link"],
modules/llm.py CHANGED
@@ -6,6 +6,7 @@ from openai import OpenAI
6
  import tiktoken
7
 
8
  from . import log_module, error_map, chat_functions as tools, settings
 
9
  from typing import TYPE_CHECKING
10
  if TYPE_CHECKING:
11
  from . import model
@@ -19,7 +20,7 @@ encoding = tiktoken.encoding_for_model(settings.GPT_MODEL)
19
 
20
 
21
  def ejecutar(body: "model.Chat", session: "model.Session"):
22
- temp_messages = [{"role":msg.role, "content":msg.content} for msg in body.messages]
23
 
24
  try:
25
  client = OpenAI(
@@ -43,47 +44,24 @@ def ejecutar(body: "model.Chat", session: "model.Session"):
43
  log_module.logger.error(repr(error) + " - " + session.gid)
44
  raise HTTPException( **error_map.error_table.get(type(error), error_map.error_table["undefined"]))
45
 
46
- def manejar_funciones(chunk, response_async, data):
47
- f_cb = dict(chunk["choices"][0]["delta"]["function_call"])
48
- for chunk in response_async:
49
- if chunk["choices"][0]["finish_reason"]:
50
- break
51
- for k, v in chunk["choices"][0]["delta"]["function_call"].items():
52
- f_cb[k] += v
53
-
54
- f_cb["arguments"] = json.loads(f_cb["arguments"])
55
- log_module.log_write(data["token_data"]["user"], "Funcion ejecutada RQ", json.dumps(f_cb))
56
- resultado_funcion = chat_functions.function_callbacks[f_cb["name"]](**f_cb["arguments"])
57
- log_module.log_write(data["token_data"]["user"], "Funcion ejecutada RS", json.dumps(resultado_funcion))
58
- data["messages"].append({"role": "function",
59
- "name": f_cb["name"],
60
- "content": resultado_funcion,
61
- })
62
- return json.dumps(f_cb), ejecutar(data)
63
-
64
  async def streamer(chat: "model.Chat", session: "model.Session"):
65
  response_async = ejecutar(chat, session)
66
 
67
-
68
-
69
  yield json.dumps({"comando": "status", "status":{"mensaje":"Cargando", "modo": "reemplazar"}})
70
  message = chat.new_msg()
71
 
72
- st = time.time()
73
- st_times = 0
74
- function_exec = {
75
- "name": "",
76
- "args": ""
77
- }
78
  for chunk in response_async:
79
- print(chunk.model_dump())
80
- choise = chunk.choices[0]
81
- delta = choise.delta
 
82
 
83
- message.tokensOutput += 1
84
- if choise.finish_reason in ["stop", "tool_calls"]:
85
  break
86
-
87
  elif delta.role:
88
  message.role = delta.role
89
  continue
@@ -92,12 +70,17 @@ async def streamer(chat: "model.Chat", session: "model.Session"):
92
  message.content += delta.content
93
 
94
  elif delta.tool_calls:
95
- if(name := delta.tool_calls[0].function.name):
96
- function_exec["name"] = name
97
- if(args := delta.tool_calls[0].function.arguments):
98
- function_exec["args"] += args
 
 
 
99
 
100
-
 
 
101
  if time.time() - st > 3:
102
  if st_times > 2:
103
  yield json.dumps({"comando": "status", "status":{"mensaje":"Cargando", "modo": "reemplazar"}})
@@ -106,11 +89,26 @@ async def streamer(chat: "model.Chat", session: "model.Session"):
106
  yield json.dumps({"comando": "status", "status":{"mensaje":".", "modo": "enlinea"}})
107
  st_times += 1
108
  st = time.time()
109
- message.tokensPrompt = chat.tokens
110
- session.update_usage(message)
 
 
 
 
111
 
 
 
 
 
 
 
 
112
 
113
 
114
  yield json.dumps({"comando":"challenge", "challenge": session.challenge} )
115
  yield json.dumps({"comando":"token", "token": session.create_cookie_token() } )
116
- yield json.dumps({"comando":"mensaje", "mensaje": message.model_dump()} )
 
 
 
 
 
6
  import tiktoken
7
 
8
  from . import log_module, error_map, chat_functions as tools, settings
9
+ from .model import Functions
10
  from typing import TYPE_CHECKING
11
  if TYPE_CHECKING:
12
  from . import model
 
20
 
21
 
22
  def ejecutar(body: "model.Chat", session: "model.Session"):
23
+ temp_messages = [msg.model_dump() for msg in body.messages]
24
 
25
  try:
26
  client = OpenAI(
 
44
  log_module.logger.error(repr(error) + " - " + session.gid)
45
  raise HTTPException( **error_map.error_table.get(type(error), error_map.error_table["undefined"]))
46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  async def streamer(chat: "model.Chat", session: "model.Session"):
48
  response_async = ejecutar(chat, session)
49
 
 
 
50
  yield json.dumps({"comando": "status", "status":{"mensaje":"Cargando", "modo": "reemplazar"}})
51
  message = chat.new_msg()
52
 
53
+ functions_exec = []
54
+
55
+
 
 
 
56
  for chunk in response_async:
57
+ choice = chunk.choices[0]
58
+ delta = choice.delta
59
+
60
+ message._tokensOutput += 1
61
 
62
+ if choice.finish_reason in ["stop", "tool_calls"]:
 
63
  break
64
+
65
  elif delta.role:
66
  message.role = delta.role
67
  continue
 
70
  message.content += delta.content
71
 
72
  elif delta.tool_calls:
73
+ tool_call = delta.tool_calls[0]
74
+ if tool_call.id:
75
+ functions_exec.append(Functions(tool_call_id=tool_call.id, role=message.role))
76
+ if(name := tool_call.function.name):
77
+ functions_exec[-1].name = name
78
+ if(args := tool_call.function.arguments):
79
+ functions_exec[-1]._busqueda += args
80
 
81
+
82
+ st = time.time()
83
+ st_times = 0
84
  if time.time() - st > 3:
85
  if st_times > 2:
86
  yield json.dumps({"comando": "status", "status":{"mensaje":"Cargando", "modo": "reemplazar"}})
 
89
  yield json.dumps({"comando": "status", "status":{"mensaje":".", "modo": "enlinea"}})
90
  st_times += 1
91
  st = time.time()
92
+
93
+ if functions_exec:
94
+ for i in functions_exec:
95
+ i.exec()
96
+ chat.messages.append(i)
97
+
98
 
99
+ async for out in streamer(chat, session):
100
+ yield out
101
+
102
+
103
+
104
+ message._tokensPrompt = chat.tokens
105
+ session.update_usage(message)
106
 
107
 
108
  yield json.dumps({"comando":"challenge", "challenge": session.challenge} )
109
  yield json.dumps({"comando":"token", "token": session.create_cookie_token() } )
110
+ yield json.dumps({"comando":"mensaje", "mensaje": message.model_dump()} )
111
+
112
+
113
+
114
+
modules/model.py CHANGED
@@ -2,7 +2,7 @@ import json
2
  from pymongo.mongo_client import MongoClient
3
  from pymongo.server_api import ServerApi
4
  from fastapi import Request, Response
5
- from . import log_module, security, settings
6
  from datetime import timezone, datetime, timedelta
7
  from pydantic import BaseModel, Field, PrivateAttr
8
  from typing import List, Self
@@ -33,28 +33,55 @@ class Configs(BaseModel):
33
  useTool: bool = True
34
  assistant: str = "clasico"
35
 
36
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  class Message(BaseModel):
38
  role: str
39
  content: str
40
- tokensPrompt: int = 0
41
- tokensOutput: int = 0
42
 
43
  def __init__(self, *args: list, **kwargs: dict):
44
  super(Message, self).__init__(*args, **kwargs)
45
- self.tokensPrompt += count_tokens_on_message(list(kwargs.values()))
 
 
 
 
46
 
47
  class Chat(BaseModel):
48
- messages: List[Message]
49
  tokens: int = 3
50
 
51
  def __init__(self: Self, *args: list, **kwargs: dict):
52
  super(Chat, self).__init__(*args, **kwargs)
53
- self.tokens += sum([x.tokensPrompt+3 for x in self.messages])
54
 
55
- def append(self: Self, mesage: Message):
56
  self.messages.append(mesage)
57
- self.tokens += mesage.tokensPrompt
58
 
59
  def new_msg(self: Self):
60
  return Message(role="", content="")
@@ -233,7 +260,7 @@ class User(BaseModel):
233
 
234
  @staticmethod
235
  def update_usage(gid:str, message:Message):
236
- count_tokens:int = message.tokensOutput+message.tokensPrompt
237
  inc_field = datetime.now().strftime("tokens.%y.%m.%d")
238
  DB.user.update_one({"gid": gid}, {"$inc":{inc_field: count_tokens}})
239
 
 
2
  from pymongo.mongo_client import MongoClient
3
  from pymongo.server_api import ServerApi
4
  from fastapi import Request, Response
5
+ from . import log_module, security, settings, chat_functions
6
  from datetime import timezone, datetime, timedelta
7
  from pydantic import BaseModel, Field, PrivateAttr
8
  from typing import List, Self
 
33
  useTool: bool = True
34
  assistant: str = "clasico"
35
 
36
+ class Functions(BaseModel):
37
+ role: str = ""
38
+ tool_call_id: str = ""
39
+ name: str = ""
40
+ content: str = ""
41
+ _busqueda: str = ""
42
+ _args: str = ""
43
+ _tokensPrompt: int = 0
44
+ _tokensOutput: int = 0
45
+
46
+ def parse(self):
47
+ self._args = json.loads(self._busqueda)
48
+
49
+
50
+ def exec(self):
51
+ if not self._args:
52
+ self.parse()
53
+ self.content = chat_functions.function_callbacks[self.name](self._args)
54
+ self._tokensPrompt += count_tokens_on_message([self.name, self.content])
55
+
56
+
57
+
58
+
59
+
60
  class Message(BaseModel):
61
  role: str
62
  content: str
63
+ _tokensPrompt: int = 0
64
+ _tokensOutput: int = 0
65
 
66
  def __init__(self, *args: list, **kwargs: dict):
67
  super(Message, self).__init__(*args, **kwargs)
68
+ self._tokensPrompt += count_tokens_on_message(list(kwargs.values()))
69
+
70
+ def get_tokens(self):
71
+ return self._tokensPrompt, self._tokensOutput
72
+
73
 
74
  class Chat(BaseModel):
75
+ messages: List[Message|Functions]
76
  tokens: int = 3
77
 
78
  def __init__(self: Self, *args: list, **kwargs: dict):
79
  super(Chat, self).__init__(*args, **kwargs)
80
+ self.tokens += sum([x._tokensPrompt+3 for x in self.messages])
81
 
82
+ def append(self: Self, mesage: Message|Functions):
83
  self.messages.append(mesage)
84
+ self.tokens += mesage._tokensPrompt
85
 
86
  def new_msg(self: Self):
87
  return Message(role="", content="")
 
260
 
261
  @staticmethod
262
  def update_usage(gid:str, message:Message):
263
+ count_tokens:int = message._tokensOutput+message._tokensPrompt
264
  inc_field = datetime.now().strftime("tokens.%y.%m.%d")
265
  DB.user.update_one({"gid": gid}, {"$inc":{inc_field: count_tokens}})
266
 
static/js/chatHandler.js CHANGED
@@ -310,7 +310,6 @@ class ChatGPT{
310
  }).catch(err =>{
311
  // Error
312
  console.log('algo paso', err)
313
- document.location.href = "/";
314
  });
315
  }
316
 
 
310
  }).catch(err =>{
311
  // Error
312
  console.log('algo paso', err)
 
313
  });
314
  }
315