Ishaan Shah
commited on
Commit
·
467b7f2
1
Parent(s):
2c0039e
lesgo2
Browse files
app.py
CHANGED
|
@@ -46,11 +46,12 @@ def process_llm_response(llm_response):
|
|
| 46 |
print(source.metadata['source']+ "Page Number: " + str(source.metadata['page']))
|
| 47 |
response_data['sources'].append({"book": source.metadata['source'], "page": source.metadata['page']})
|
| 48 |
# return json.dumps(response_data)
|
| 49 |
-
return
|
| 50 |
|
| 51 |
def get_answer(question):
|
| 52 |
llm_response = qa_chain(question)
|
| 53 |
response = process_llm_response(llm_response)
|
|
|
|
| 54 |
return response["result"], response["sources"]
|
| 55 |
|
| 56 |
# @app.route('/question', methods=['POST'])
|
|
@@ -144,10 +145,17 @@ Use only text found in the context as your knowledge source for the answer.
|
|
| 144 |
----------------
|
| 145 |
{context}"""
|
| 146 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
def getanswer(question):
|
| 148 |
llm_response = qa_chain(question)
|
| 149 |
response = process_llm_response(llm_response)
|
| 150 |
-
|
|
|
|
| 151 |
|
| 152 |
# iface = gr.Interface(fn=getanswer, inputs="text", outputs="text")
|
| 153 |
# iface.launch()
|
|
|
|
| 46 |
print(source.metadata['source']+ "Page Number: " + str(source.metadata['page']))
|
| 47 |
response_data['sources'].append({"book": source.metadata['source'], "page": source.metadata['page']})
|
| 48 |
# return json.dumps(response_data)
|
| 49 |
+
return response_data
|
| 50 |
|
| 51 |
def get_answer(question):
|
| 52 |
llm_response = qa_chain(question)
|
| 53 |
response = process_llm_response(llm_response)
|
| 54 |
+
|
| 55 |
return response["result"], response["sources"]
|
| 56 |
|
| 57 |
# @app.route('/question', methods=['POST'])
|
|
|
|
| 145 |
----------------
|
| 146 |
{context}"""
|
| 147 |
|
| 148 |
+
|
| 149 |
+
def print_array(arr):
|
| 150 |
+
# Convert the array to a string representation
|
| 151 |
+
arr_str = str(arr)
|
| 152 |
+
return arr_str
|
| 153 |
+
|
| 154 |
def getanswer(question):
|
| 155 |
llm_response = qa_chain(question)
|
| 156 |
response = process_llm_response(llm_response)
|
| 157 |
+
sources= print_array(response["sources"])
|
| 158 |
+
return response["result"], sources
|
| 159 |
|
| 160 |
# iface = gr.Interface(fn=getanswer, inputs="text", outputs="text")
|
| 161 |
# iface.launch()
|