Kotta commited on
Commit
a305767
·
1 Parent(s): a1b1b91

feature(#45): Actions-Rails: Issues Fixed to pass its argument

Browse files
.gitignore CHANGED
@@ -1,2 +1,2 @@
1
- /firebase_cred.json
2
  /.idea
 
1
+ /Brain/firebase_cred.json
2
  /.idea
Brain/src/common/brain_exception.py CHANGED
@@ -5,6 +5,8 @@ from Brain.src.common.http_response_codes import responses
5
 
6
 
7
  class BrainException(Exception):
 
 
8
  def __init__(self, message: str = "Exception occurred in brain"):
9
  self.message = message
10
  super().__init__(self.message)
 
5
 
6
 
7
  class BrainException(Exception):
8
+ JSON_PARSING_ISSUE_MSG = "Exception occurred in json paring."
9
+
10
  def __init__(self, message: str = "Exception occurred in brain"):
11
  self.message = message
12
  super().__init__(self.message)
Brain/src/rising_plugin/guardrails-config/actions/actions.py CHANGED
@@ -22,6 +22,7 @@ from langchain.vectorstores import utils
22
  from langchain.document_loaders.csv_loader import CSVLoader
23
  from langchain.docstore.document import Document
24
 
 
25
  from Brain.src.common.utils import (
26
  OPENAI_API_KEY,
27
  COMMAND_SMS_INDEXS,
@@ -43,10 +44,31 @@ from Brain.src.rising_plugin.llm.llms import (
43
  FALCON_7B,
44
  )
45
 
 
 
 
 
 
 
 
 
 
 
46
 
47
  @action()
48
- async def general_question(query, model, uuid, image_search):
49
- """step1: handle with gpt-4"""
 
 
 
 
 
 
 
 
 
 
 
50
  file_path = os.path.dirname(os.path.abspath(__file__))
51
 
52
  with open(f"{file_path}/phone.json", "r") as infile:
 
22
  from langchain.document_loaders.csv_loader import CSVLoader
23
  from langchain.docstore.document import Document
24
 
25
+ from Brain.src.common.brain_exception import BrainException
26
  from Brain.src.common.utils import (
27
  OPENAI_API_KEY,
28
  COMMAND_SMS_INDEXS,
 
44
  FALCON_7B,
45
  )
46
 
47
+ """
48
+ query is json string with below format
49
+ {
50
+ "query": string,
51
+ "model": string,
52
+ "uuid": string,
53
+ "image_search": bool,
54
+ }
55
+ """
56
+
57
 
58
  @action()
59
+ async def general_question(query):
60
+ """step 0: convert string to json"""
61
+ try:
62
+ json_query = json.loads(query)
63
+ except Exception as ex:
64
+ raise BrainException(BrainException.JSON_PARSING_ISSUE_MSG)
65
+ """step 0-->: parsing parms from the json query"""
66
+ query = json_query["query"]
67
+ model = json_query["model"]
68
+ uuid = json_query["uuid"]
69
+ image_search = json_query["image_search"]
70
+
71
+ """step 1: handle with gpt-4"""
72
  file_path = os.path.dirname(os.path.abspath(__file__))
73
 
74
  with open(f"{file_path}/phone.json", "r") as infile:
Brain/src/rising_plugin/guardrails-config/general.co CHANGED
@@ -15,5 +15,5 @@ define bot inform capabilities
15
  define flow
16
  priority 0.9
17
  user ...
18
- $result = execute general_question(query=$last_user_message, model="gpt-4", uuid="", image_search=True)
19
  bot $result
 
15
  define flow
16
  priority 0.9
17
  user ...
18
+ $result = execute general_question(query=$last_user_message)
19
  bot $result
Brain/src/rising_plugin/risingplugin.py CHANGED
@@ -40,13 +40,20 @@ def getChunks(query: str):
40
  )
41
 
42
 
43
- def processLargeText(app: any, chunks: any):
 
 
44
  if len(chunks) == 1:
45
  message = app.generate(
46
  messages=[
47
  {
48
  "role": "user",
49
- "content": chunks[0],
 
 
 
 
 
50
  }
51
  ]
52
  )
@@ -81,7 +88,12 @@ def processLargeText(app: any, chunks: any):
81
  messages=[
82
  {
83
  "role": "user",
84
- "content": chunk_query,
 
 
 
 
 
85
  }
86
  ]
87
  )
@@ -100,7 +112,17 @@ def processLargeText(app: any, chunks: any):
100
  + "ALL PART SENT. Now you can continue processing the request."
101
  )
102
  message = app.generate(
103
- messages=[{"role": "user", "content": last_query}]
 
 
 
 
 
 
 
 
 
 
104
  )
105
  result = json.dumps(message["content"])
106
  return parseJsonFromCompletion(result)
@@ -119,7 +141,9 @@ def getCompletion(
119
 
120
  app = LLMRails(config, llm)
121
 
122
- return processLargeText(app, chunks)
 
 
123
 
124
 
125
  def getCompletionOnly(
@@ -198,7 +222,7 @@ def filter_guardrails(model: any, query: str):
198
  chunks = getChunks(query)
199
 
200
  # get message from guardrails
201
- message = processLargeText(app, chunks)
202
 
203
  if (
204
  message
@@ -211,6 +235,22 @@ def filter_guardrails(model: any, query: str):
211
  return ""
212
 
213
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
  def handle_chat_completion(messages: Any, model: str = "gpt-3.5-turbo") -> Any:
215
  openai.api_key = OPENAI_API_KEY
216
 
@@ -220,11 +260,11 @@ def handle_chat_completion(messages: Any, model: str = "gpt-3.5-turbo") -> Any:
220
  )
221
 
222
  # Filter the reply using the content filter
223
- result = filter_guardrails(model, messages[-1]["content"])
224
-
225
- if result == "":
226
- return response
227
- else:
228
- response["choices"][0]["message"]["content"] = result
229
- return response
230
- # return response
 
40
  )
41
 
42
 
43
+ def processLargeText(
44
+ app: any, chunks: any, model: str, uuid: str = "", image_search: bool = True
45
+ ):
46
  if len(chunks) == 1:
47
  message = app.generate(
48
  messages=[
49
  {
50
  "role": "user",
51
+ "content": rails_input_with_args(
52
+ query=chunks[0],
53
+ model=model,
54
+ uuid=uuid,
55
+ image_search=image_search,
56
+ ),
57
  }
58
  ]
59
  )
 
88
  messages=[
89
  {
90
  "role": "user",
91
+ "content": rails_input_with_args(
92
+ query=chunk_query,
93
+ model=model,
94
+ uuid=uuid,
95
+ image_search=image_search,
96
+ ),
97
  }
98
  ]
99
  )
 
112
  + "ALL PART SENT. Now you can continue processing the request."
113
  )
114
  message = app.generate(
115
+ messages=[
116
+ {
117
+ "role": "user",
118
+ "content": rails_input_with_args(
119
+ query=last_query,
120
+ model=model,
121
+ uuid=uuid,
122
+ image_search=image_search,
123
+ ),
124
+ }
125
+ ]
126
  )
127
  result = json.dumps(message["content"])
128
  return parseJsonFromCompletion(result)
 
141
 
142
  app = LLMRails(config, llm)
143
 
144
+ return processLargeText(
145
+ app=app, chunks=chunks, model=model, uuid=uuid, image_search=image_search
146
+ )
147
 
148
 
149
  def getCompletionOnly(
 
222
  chunks = getChunks(query)
223
 
224
  # get message from guardrails
225
+ message = processLargeText(app=app, chunks=chunks, model=model)
226
 
227
  if (
228
  message
 
235
  return ""
236
 
237
 
238
+ """
239
+ compose json_string for rails input with its arguments
240
+ """
241
+
242
+
243
+ def rails_input_with_args(query: str, model: str, uuid: str, image_search: bool) -> str:
244
+ # convert json with params for rails.
245
+ json_query_with_params = {
246
+ "query": query,
247
+ "model": model,
248
+ "uuid": uuid,
249
+ "image_search": image_search,
250
+ }
251
+ return json.dumps(json_query_with_params)
252
+
253
+
254
  def handle_chat_completion(messages: Any, model: str = "gpt-3.5-turbo") -> Any:
255
  openai.api_key = OPENAI_API_KEY
256
 
 
260
  )
261
 
262
  # Filter the reply using the content filter
263
+ # result = filter_guardrails(model, messages[-1]["content"])
264
+ # comment logic issue with guardrails
265
+ # if result == "":
266
+ # return response
267
+ # else:
268
+ # response["choices"][0]["message"]["content"] = result
269
+ # return response
270
+ return response