Kotta commited on
Commit
13c410c
·
1 Parent(s): 944c5fb

feature(#8): rebased with main

Browse files
src/rising_plugin/guardrails-config/actions/phone.json CHANGED
The diff for this file is too large to render. See raw diff
 
src/rising_plugin/risingplugin.py CHANGED
@@ -7,6 +7,7 @@ import textwrap
7
 
8
  from typing import Any
9
 
 
10
  from nemoguardrails.rails import LLMRails, RailsConfig
11
 
12
  from langchain.chat_models import ChatOpenAI
@@ -17,6 +18,8 @@ from .llm.llms import get_llm, GPT_4, FALCON_7B
17
  from ..common.utils import (
18
  OPENAI_API_KEY,
19
  FIREBASE_STORAGE_ROOT,
 
 
20
  )
21
  from .image_embedding import (
22
  query_image_text,
@@ -48,24 +51,7 @@ def processLargeText(app: any, chunks: any):
48
  ]
49
  )
50
  result = json.dumps(message["content"])
51
- result = result[1:-1]
52
- # fmt: off
53
- result = result.replace("{'", '{"')
54
- result = result.replace("'}", '"}')
55
- result = result.replace("': '", '": "')
56
- result = result.replace("': \\\"", '": \"')
57
- result = result.replace("', '", '", "')
58
-
59
- substring = '\\"}'
60
- replacement = '\"}'
61
-
62
- index = result.rfind(substring)
63
-
64
- if index == len(result) - 3:
65
- result = result[:index] + replacement + result[index + len(substring):]
66
- # fmt: on
67
- result = json.loads(result)
68
- return result
69
  else:
70
  first_query = "The total length of the content that I want to send you is too large to send in only one piece.\nFor sending you that content, I will follow this rule:\n[START PART 1/10]\nThis is the content of the part 1 out of 10 in total\n[END PART 1/10]\nThen you just answer: 'Received part 1/10'\nAnd when I tell you 'ALL PART SENT', then you can continue processing the data and answering my requests."
71
  app.generate(messages=[{"role": "user", "content": first_query}])
@@ -117,34 +103,17 @@ def processLargeText(app: any, chunks: any):
117
  messages=[{"role": "user", "content": last_query}]
118
  )
119
  result = json.dumps(message["content"])
120
- result = result[1:-1]
121
- # fmt: off
122
- result = result.replace("{'", '{"')
123
- result = result.replace("'}", '"}')
124
- result = result.replace("': '", '": "')
125
- result = result.replace("': \\\"", '": \"')
126
- result = result.replace("', '", '", "')
127
-
128
- substring = '\\"}'
129
- replacement = '\"}'
130
-
131
- index = result.rfind(substring)
132
-
133
- if index == len(result) - 3:
134
- result = result[:index] + replacement + result[index + len(substring):]
135
- # fmt: on
136
- result = json.loads(result)
137
- return result
138
  # out of for-loop
139
 
140
 
141
  def getCompletion(
142
  query,
143
- model="gpt-3.5-turbo",
144
  uuid="",
145
  image_search=True,
146
  ):
147
- llm = get_llm(model=model).get_llm()
148
  # Break input text into chunks
149
  chunks = getChunks(query)
150
 
@@ -153,6 +122,17 @@ def getCompletion(
153
  return processLargeText(app, chunks)
154
 
155
 
 
 
 
 
 
 
 
 
 
 
 
156
  def query_image_ask(image_content, message, uuid):
157
  prompt_template = get_prompt_image_with_message(image_content, message)
158
  try:
 
7
 
8
  from typing import Any
9
 
10
+ from langchain.chains.question_answering import load_qa_chain
11
  from nemoguardrails.rails import LLMRails, RailsConfig
12
 
13
  from langchain.chat_models import ChatOpenAI
 
18
  from ..common.utils import (
19
  OPENAI_API_KEY,
20
  FIREBASE_STORAGE_ROOT,
21
+ DEFAULT_GPT_MODEL,
22
+ parseJsonFromCompletion,
23
  )
24
  from .image_embedding import (
25
  query_image_text,
 
51
  ]
52
  )
53
  result = json.dumps(message["content"])
54
+ return parseJsonFromCompletion(result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  else:
56
  first_query = "The total length of the content that I want to send you is too large to send in only one piece.\nFor sending you that content, I will follow this rule:\n[START PART 1/10]\nThis is the content of the part 1 out of 10 in total\n[END PART 1/10]\nThen you just answer: 'Received part 1/10'\nAnd when I tell you 'ALL PART SENT', then you can continue processing the data and answering my requests."
57
  app.generate(messages=[{"role": "user", "content": first_query}])
 
103
  messages=[{"role": "user", "content": last_query}]
104
  )
105
  result = json.dumps(message["content"])
106
+ return parseJsonFromCompletion(result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
  # out of for-loop
108
 
109
 
110
  def getCompletion(
111
  query,
112
+ model=DEFAULT_GPT_MODEL,
113
  uuid="",
114
  image_search=True,
115
  ):
116
+ llm = get_llm(model=DEFAULT_GPT_MODEL).get_llm()
117
  # Break input text into chunks
118
  chunks = getChunks(query)
119
 
 
122
  return processLargeText(app, chunks)
123
 
124
 
125
+ def getCompletionOnly(
126
+ query: str,
127
+ model: str = "gpt-4",
128
+ ) -> str:
129
+ llm = ChatOpenAI(model_name=model, temperature=1.7, openai_api_key=OPENAI_API_KEY)
130
+ chain = load_qa_chain(llm, chain_type="stuff")
131
+ test_question = """Please return the link of best relatedness of item which the title is "Android Studio in browser" from the below data. [{"title": "Android Studio", "link": "https://android.com"} , {"title": "What's this?", "link": "https://test.com"} , {"title": "How are you?", "link": "https://d.com"}]"""
132
+ chain_data = chain.run(input_documents=[], question=test_question)
133
+ return chain_data
134
+
135
+
136
  def query_image_ask(image_content, message, uuid):
137
  prompt_template = get_prompt_image_with_message(image_content, message)
138
  try: