jupiter0913 commited on
Commit
08b8484
·
1 Parent(s): fd31343

feature(#12): fix some issue in handle_chat_completion function.

Browse files
src/rising_plugin/risingplugin.py CHANGED
@@ -4,6 +4,7 @@ import datetime
4
  import openai
5
  import replicate
6
  import textwrap
 
7
 
8
  from typing import Any
9
 
@@ -46,19 +47,21 @@ def processLargeText(app: any, chunks: any):
46
  }
47
  ]
48
  )
49
- response_text = ""
50
  try:
51
- response_text += json.loads(message["content"])["content"]
52
  except Exception as e:
53
  # fmt: off
54
- message["content"] = message["content"].replace("\'", '"')
 
 
 
 
55
  # fmt: on
56
- response_text = json.loads(message["content"])
57
  return response_text
58
  else:
59
- response_text: str = ""
60
  first_query = "The total length of the content that I want to send you is too large to send in only one piece.\nFor sending you that content, I will follow this rule:\n[START PART 1/10]\nThis is the content of the part 1 out of 10 in total\n[END PART 1/10]\nThen you just answer: 'Received part 1/10'\nAnd when I tell you 'ALL PART SENT', then you can continue processing the data and answering my requests."
61
- message = app.generate(messages=[{"role": "user", "content": first_query}])
62
  for index, chunk in enumerate(chunks):
63
  # Process each chunk with ChatGPT
64
  if index + 1 != len(chunks):
@@ -81,7 +84,7 @@ def processLargeText(app: any, chunks: any):
81
  + "]\n"
82
  + "Remember not answering yet. Just acknowledge you received this part with the message 'Part 1/10 received' and wait for the next part."
83
  )
84
- message = app.generate(
85
  messages=[
86
  {
87
  "role": "user",
@@ -107,10 +110,14 @@ def processLargeText(app: any, chunks: any):
107
  messages=[{"role": "user", "content": last_query}]
108
  )
109
  try:
110
- response_text += json.loads(message["content"])["content"]
111
  except Exception as e:
112
  # fmt: off
113
- message["content"] = message["content"].replace("\'", '"')
 
 
 
 
114
  # fmt: on
115
  response_text = json.loads(message["content"])["content"]
116
  program = json.loads(message["content"])["program"]
@@ -221,11 +228,11 @@ def handle_chat_completion(messages: Any, model: str = "gpt-3.5-turbo") -> Any:
221
  )
222
 
223
  # Filter the reply using the content filter
224
- # result = filter_guardrails(model, messages[-1]["content"])
225
-
226
- # if result == "":
227
- # return response
228
- # else:
229
- # response["choices"][0]["message"]["content"] = result
230
- # return response
231
- return response
 
4
  import openai
5
  import replicate
6
  import textwrap
7
+ import collections
8
 
9
  from typing import Any
10
 
 
47
  }
48
  ]
49
  )
 
50
  try:
51
+ response_text = json.loads(message["content"])["content"]
52
  except Exception as e:
53
  # fmt: off
54
+ # message["content"] = message["content"].replace("'", '"')
55
+ message["content"] = message["content"].replace("{'program': 'message', 'content'",
56
+ '{"program": "message", "content"')
57
+ message["content"] = message["content"].replace(message["content"][34], '"')
58
+ message["content"] = message["content"].replace(message["content"][-2], '"')
59
  # fmt: on
60
+ response_text = json.loads(message["content"])["content"]
61
  return response_text
62
  else:
 
63
  first_query = "The total length of the content that I want to send you is too large to send in only one piece.\nFor sending you that content, I will follow this rule:\n[START PART 1/10]\nThis is the content of the part 1 out of 10 in total\n[END PART 1/10]\nThen you just answer: 'Received part 1/10'\nAnd when I tell you 'ALL PART SENT', then you can continue processing the data and answering my requests."
64
+ app.generate(messages=[{"role": "user", "content": first_query}])
65
  for index, chunk in enumerate(chunks):
66
  # Process each chunk with ChatGPT
67
  if index + 1 != len(chunks):
 
84
  + "]\n"
85
  + "Remember not answering yet. Just acknowledge you received this part with the message 'Part 1/10 received' and wait for the next part."
86
  )
87
+ app.generate(
88
  messages=[
89
  {
90
  "role": "user",
 
110
  messages=[{"role": "user", "content": last_query}]
111
  )
112
  try:
113
+ response_text = json.loads(message["content"])["content"]
114
  except Exception as e:
115
  # fmt: off
116
+ # message["content"] = message["content"].replace("'", '"')
117
+ message["content"] = message["content"].replace("{'program': 'message', 'content'",
118
+ '{"program": "message", "content"')
119
+ message["content"] = message["content"].replace(message["content"][34], '"')
120
+ message["content"] = message["content"].replace(message["content"][-2], '"')
121
  # fmt: on
122
  response_text = json.loads(message["content"])["content"]
123
  program = json.loads(message["content"])["program"]
 
228
  )
229
 
230
  # Filter the reply using the content filter
231
+ result = filter_guardrails(model, messages[-1]["content"])
232
+
233
+ if result == "":
234
+ return response
235
+ else:
236
+ response["choices"][0]["message"]["content"] = result
237
+ return response
238
+ # return response
templates/index.html CHANGED
@@ -13,7 +13,16 @@
13
  <textarea id="urlInput" name="urlInput" required></textarea><br><br>
14
 
15
  <label for="messageInput">Message:</label>
16
- <textarea id="messageInput" name="messageInput" required></textarea><br><br>
 
 
 
 
 
 
 
 
 
17
 
18
  <label for="tokenInput">token:</label>
19
  <textarea id="tokenInput" name="tokenInput" required></textarea><br><br>
@@ -37,6 +46,9 @@
37
  var message = {
38
  "message": $("#messageInput").val(),
39
  "token": $("#tokenInput").val(),
 
 
 
40
  "uuid": $("#uuidInput").val()
41
  }
42
  var headers = JSON.parse($("#headersInput").val());
 
13
  <textarea id="urlInput" name="urlInput" required></textarea><br><br>
14
 
15
  <label for="messageInput">Message:</label>
16
+ <textarea id="messageInput" name="messageInput"></textarea><br><br>
17
+
18
+ <label for="historyInput">History:</label>
19
+ <textarea id="historyInput" name="historyInput"></textarea><br><br>
20
+
21
+ <label for="userInput">User Input:</label>
22
+ <textarea id="userInput" name="userInput"></textarea><br><br>
23
+
24
+ <label for="modelInput">Model:</label>
25
+ <textarea id="modelInput" name="modelInput"></textarea><br><br>
26
 
27
  <label for="tokenInput">token:</label>
28
  <textarea id="tokenInput" name="tokenInput" required></textarea><br><br>
 
46
  var message = {
47
  "message": $("#messageInput").val(),
48
  "token": $("#tokenInput").val(),
49
+ "history": JSON.parse($("#historyInput").val()),
50
+ "user_input": $("#userInput").val(),
51
+ "model": $("#modelInput").val(),
52
  "uuid": $("#uuidInput").val()
53
  }
54
  var headers = JSON.parse($("#headersInput").val());