Miguel Diaz commited on
Commit
071c698
1 Parent(s): 1110b36

Fix: Adjustments

Browse files
Files changed (2) hide show
  1. main.py +4 -102
  2. static/js/windowHandler.js +19 -6
main.py CHANGED
@@ -7,9 +7,7 @@ from datetime import datetime, timedelta
7
  import time
8
  import jwt
9
  import openai
10
- from openai import util as openai_util
11
  from openai import error as openai_error
12
- from html import escape
13
  from hashlib import sha256
14
  import tiktoken
15
  from supported import supp_langs
@@ -70,112 +68,16 @@ async def root(request: Request, credentials: HTTPBasicCredentials = Depends(sec
70
  return HTMLResponse(f.read().replace("{% token %}", token).replace("{% version %}", "3"))
71
 
72
 
73
- @app.post("/chat")
74
- async def chat(data = Depends(validate_token)):
75
- openai.api_key = str(os.getenv("OPENAI_API_KEY"))
76
- model_engine = "gpt-3.5-turbo"
77
- messages = data["messages"]
78
- config = {
79
- "temperature": 1.0,
80
- "frequency_penalty": 0.0,
81
- "presence_penalty": 0.0,
82
- }
83
- if "config" in data:
84
- config["temperature"] = float(data["config"].get("temperature", 1))
85
- config["frequency_penalty"] = float(data["config"].get("frequency_penalty", 0))
86
- config["presence_penalty"] = float(data["config"].get("presence_penalty", 0))
87
- try:
88
- response = openai.ChatCompletion.create(
89
- model=model_engine,
90
- messages=messages,
91
- max_tokens=3000,
92
- temperature=config["temperature"],
93
- frequency_penalty=config["frequency_penalty"],
94
- presence_penalty=config["presence_penalty"],
95
- request_timeout = 25
96
- )
97
- except requests.exceptions.RequestException as e:
98
- print("Timeout (requests)")
99
- print(e)
100
- raise HTTPException(
101
- status_code=status.HTTP_408_REQUEST_TIMEOUT,
102
- detail="Los servidores tardaron mucho en responder, puede haber sobrecarga en OpenAI, reintenta luego (error 1)"
103
- )
104
- except openai_error.APIConnectionError as e:
105
- print("APIConnectionError")
106
- print(e)
107
- raise HTTPException(
108
- status_code=status.HTTP_408_REQUEST_TIMEOUT,
109
- detail="El servidor no respondi贸, puede haber sobrecarga en OpenAI, reintenta luego (error 2)"
110
- )
111
- except openai_error.Timeout as e:
112
- print("Timeout (openai)")
113
- print(e)
114
- raise HTTPException(
115
- status_code=status.HTTP_408_REQUEST_TIMEOUT,
116
- detail="El servidor no respondi贸, puede haber sobrecarga en OpenAI, reintenta luego (error 3)"
117
- )
118
-
119
- message = response.choices[0].message.content
120
- contexto = openai_util.convert_to_dict(response)
121
-
122
- codigos = []
123
- while "```" in message:
124
- start = message.find("```")
125
- end = message.find("```", start+3)+3
126
- temp_text = message[start:end]
127
- end_lang = temp_text.find("\n")
128
-
129
- message = message.replace(temp_text, "{#$&---&$#}")
130
-
131
- t_language = temp_text[3:end_lang].strip()
132
-
133
-
134
- language = t_language.replace(" ", "-")
135
-
136
- if language in supp_langs:
137
- temp_text = f'<pre><code class="language-{language}">' + escape(temp_text[end_lang:-3].strip()) + "</code></pre>"
138
- else:
139
- temp_text = '<pre><code class="language-none">' + escape(temp_text[3:-3].strip()) + "</code></pre>"
140
- codigos.append(temp_text)
141
-
142
- message = message.replace("\n", "<br>")
143
-
144
- for codigo in codigos:
145
- message = message.replace("{#$&---&$#}", codigo, 1)
146
- message = re.sub(r'`([^`]+)`', r'<b>\1</b>', message)
147
-
148
-
149
- messages.append({"role": "assistant", "content": message})
150
- token = create_jwt_token(data.pop("token_data"))
151
- return {"messages": messages, "token": token, "config": config, "contexto":contexto}
152
-
153
- def procesar_codigo(data):
154
- temp_text = data[3:-3]
155
- end_lang = temp_text.find("\n")
156
- language = temp_text[:end_lang].strip().replace(" ", "-")
157
-
158
- if language in supp_langs:
159
- temp_text = f'<pre><code class="language-{language}">' + escape(temp_text[end_lang:].strip()) + "</code></pre>"
160
- else:
161
- temp_text = '<pre><code class="language-none">' + escape(temp_text.strip()) + "</code></pre>"
162
-
163
- return temp_text
164
-
165
-
166
  @app.post("/chat_stream")
167
  async def chat_stream(data = Depends(validate_token)):
168
  messages = data["messages"]
169
  token_length = len(tokenizer.encode(messages[-1]["content"]))
170
  config = {
171
- "temperature": 1.0,
172
- "frequency_penalty": 0.0,
173
- "presence_penalty": 0.0
174
  }
175
- if "config" in data:
176
- config["temperature"] = float(data["config"].get("temperature", 1))
177
- config["frequency_penalty"] = float(data["config"].get("frequency_penalty", 0))
178
- config["presence_penalty"] = float(data["config"].get("presence_penalty", 0))
179
  try:
180
  response = openai.ChatCompletion.create(
181
  model="gpt-3.5-turbo",
 
7
  import time
8
  import jwt
9
  import openai
 
10
  from openai import error as openai_error
 
11
  from hashlib import sha256
12
  import tiktoken
13
  from supported import supp_langs
 
68
  return HTMLResponse(f.read().replace("{% token %}", token).replace("{% version %}", "3"))
69
 
70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  @app.post("/chat_stream")
72
  async def chat_stream(data = Depends(validate_token)):
73
  messages = data["messages"]
74
  token_length = len(tokenizer.encode(messages[-1]["content"]))
75
  config = {
76
+ "temperature": float(data.get("config", []).get("temperature", 1)),
77
+ "frequency_penalty": float(data.get("config", []).get("frequency_penalty", 1)),
78
+ "presence_penalty": float(data.get("config", []).get("presence_penalty", 1))
79
  }
80
+
 
 
 
81
  try:
82
  response = openai.ChatCompletion.create(
83
  model="gpt-3.5-turbo",
static/js/windowHandler.js CHANGED
@@ -2,10 +2,8 @@ class WindowHandler{
2
  constructor(token){
3
  this.chatbox = $("#chat");
4
  this.template = $('<div class="message"><div><p></p></div></div>');
5
-
6
- this.evCtx = document;
7
-
8
 
 
9
 
10
  $(this.evCtx).on("enviar:exito", (event, params) => this.procesarTexto(params));
11
  $(this.evCtx).on("chat:cargar", (event, params) => this.cargarChat(params));
@@ -14,11 +12,13 @@ class WindowHandler{
14
  }
15
 
16
  manejadorEnviar(){
17
- let message = $("#input-text").val();
18
- if(message==""){
19
  return false;
20
  }
21
  $("#input-text").val("");
 
 
22
  $(this.evCtx).trigger("chat:enviar", message);
23
  }
24
 
@@ -56,12 +56,25 @@ class WindowHandler{
56
  if(role=="user"){
57
  clone.addClass("me");
58
  }
 
 
 
 
 
 
59
  clone.find("div p").html(mensaje.content);
60
  Prism.highlightElement(clone[0], true, () => {
61
  this.chatbox.append(clone);
62
  this.chatbox.scrollTop(this.chatbox[0].scrollHeight);
63
  } );
64
-
 
 
 
 
 
 
 
65
  }
66
 
67
 
 
2
  constructor(token){
3
  this.chatbox = $("#chat");
4
  this.template = $('<div class="message"><div><p></p></div></div>');
 
 
 
5
 
6
+ this.evCtx = document;
7
 
8
  $(this.evCtx).on("enviar:exito", (event, params) => this.procesarTexto(params));
9
  $(this.evCtx).on("chat:cargar", (event, params) => this.cargarChat(params));
 
12
  }
13
 
14
  manejadorEnviar(){
15
+ let mensaje = $("#input-text").val();
16
+ if(mensaje==""){
17
  return false;
18
  }
19
  $("#input-text").val("");
20
+ mostarMensaje(mensaje, "user");
21
+ precargaMensaje("", "")
22
  $(this.evCtx).trigger("chat:enviar", message);
23
  }
24
 
 
56
  if(role=="user"){
57
  clone.addClass("me");
58
  }
59
+
60
+ if($(".precarga").length){
61
+ clone = $(".precarga")
62
+ clone.removeClass("precarga")
63
+ }
64
+
65
  clone.find("div p").html(mensaje.content);
66
  Prism.highlightElement(clone[0], true, () => {
67
  this.chatbox.append(clone);
68
  this.chatbox.scrollTop(this.chatbox[0].scrollHeight);
69
  } );
70
+ }
71
+
72
+ precargaMensaje(mensaje, role){
73
+ let clone = this.template.clone();
74
+ clone.addClass("precarga");
75
+ clone.find("div p").html("Cargando...");
76
+ this.chatbox.append(clone);
77
+ this.chatbox.scrollTop(this.chatbox[0].scrollHeight);
78
  }
79
 
80