File size: 2,686 Bytes
b3f1645
a8153a1
 
 
 
e2e403d
a8153a1
e2e403d
75d9414
 
 
 
29fc30a
a8153a1
 
e2e403d
75d9414
29fc30a
 
75d9414
a8153a1
 
e2e403d
a8153a1
 
 
8a75ffe
e2e403d
abcd79a
ddc6dad
 
 
75d9414
ddc6dad
a8153a1
ddc6dad
8a75ffe
a8153a1
a1888d0
a8153a1
 
3c51054
19e5163
a1888d0
 
19e5163
29fc30a
 
 
 
 
 
a2ba07c
3c51054
 
 
29fc30a
3c51054
a1888d0
 
 
75d9414
3c51054
 
 
 
29fc30a
a1888d0
 
19e5163
a1888d0
 
 
 
a2ba07c
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import json, time

from hashlib import sha256
from fastapi import HTTPException
from openai import OpenAI
import tiktoken

from . import log_module, error_map, chat_functions as tools, settings
from typing import TYPE_CHECKING                
if TYPE_CHECKING:                               
    from . import model

import asyncio


encoding = tiktoken.encoding_for_model(settings.GPT_MODEL)

def ejecutar(chat: "model.Chat", session: "model.Session"):
    temp_messages = [msg.model_dump() for msg in chat.messages]
     
    try:
        client = OpenAI(
            api_key=settings.OPENAI_API_KEY,  
            timeout=30.0,
            max_retries=3
            )
        generated = client.chat.completions.create(
            model=settings.GPT_MODEL,
            messages=temp_messages,
            frequency_penalty=session.configs.frequency_penalty,
            presence_penalty=session.configs.presence_penalty,
            tools= tools.functions if session.configs.useTool else None,
            stream=True,
            user=sha256(session.gid.encode('UTF-8')).hexdigest()
        )
        
        return generated
    except Exception as error:
        log_module.logger(session.gid).error(repr(error) + " - " + session.gid)
        raise HTTPException(  **error_map.error_table.get(type(error), error_map.error_table["undefined"]))

async def streamer(chat: "model.Chat", session: "model.Session", sub_exec:bool = False):
    response_async = ejecutar(chat, session)
    if not sub_exec:
        yield json.dumps({"comando": "status", "status":{"mensaje":"Cargando", "modo": "reemplazar"}})
    
    message = None
    role = None 


    chunk = next(response_async)
    role = chunk.choices[0].delta.role
    
    if isinstance(chunk.choices[0].delta.content, str):
        message = chat.new_msg(role, response_async)
        chat.append(message)
    else:
        message = chat.new_func(role, response_async, chunk)
        if not sub_exec:
            yield json.dumps({"comando": "status", "status":{"mensaje":"Buscando en google o algo así", "modo": "reemplazar"}})  
        message.exec(session.gid)
        
        chat.append(message)
        
        async for r_async in streamer(chat, session, True):
            yield r_async
    
    if not sub_exec:
        session.update_usage(chat.tokens)
    
        log_module.logger(session.gid).info(f"Chat used, tokens: {chat.tokens}")
        yield json.dumps({"comando":"challenge", "challenge": session.challenge} )
        yield json.dumps({"comando":"token", "token": session.create_cookie_token() } )
        yield json.dumps({"comando":"mensaje", "mensaje": chat.messages[-1].model_dump()} )