Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
|
@@ -80,7 +80,7 @@ async def pdf_file_qa_process(user_question: str, request: Request, file_to_proc
|
|
| 80 |
uploaded_file = file_to_process.uploaded_file
|
| 81 |
print("File received:"+uploaded_file.filename)
|
| 82 |
|
| 83 |
-
|
| 84 |
filename = request.query_params.get("filename")
|
| 85 |
print("User entered question: "+user_question)
|
| 86 |
print("User uploaded file: "+filename)
|
|
@@ -115,7 +115,7 @@ async def pdf_file_qa_process(user_question: str, request: Request, file_to_proc
|
|
| 115 |
print("db_embeddings created...")
|
| 116 |
|
| 117 |
#question = var_query.query
|
| 118 |
-
question =
|
| 119 |
print("API Call Query Received: "+question)
|
| 120 |
q_embedding=get_embeddings(question)
|
| 121 |
final_q_embedding = torch.FloatTensor(q_embedding)
|
|
@@ -143,7 +143,11 @@ async def pdf_file_qa_process(user_question: str, request: Request, file_to_proc
|
|
| 143 |
|
| 144 |
loader = TextLoader(file_path, encoding="utf-8")
|
| 145 |
loaded_documents = loader.load()
|
|
|
|
| 146 |
print(loaded_documents)
|
|
|
|
|
|
|
|
|
|
| 147 |
|
| 148 |
print("LLM Chain Starts...")
|
| 149 |
start_2 = timeit.default_timer()
|
|
@@ -151,14 +155,19 @@ async def pdf_file_qa_process(user_question: str, request: Request, file_to_proc
|
|
| 151 |
end_2 = timeit.default_timer()
|
| 152 |
print("LLM Chain Ends...")
|
| 153 |
print(f'LLM Chain共耗时: @ {end_2 - start_2}')
|
|
|
|
|
|
|
| 154 |
|
| 155 |
initial_ai_response=temp_ai_response['output_text']
|
| 156 |
|
|
|
|
|
|
|
| 157 |
cleaned_initial_ai_response = remove_context(initial_ai_response)
|
| 158 |
|
| 159 |
#final_ai_response = cleaned_initial_ai_response.partition('¿Cuál es')[0].strip().replace('\n\n', '\n').replace('<|end|>', '').replace('<|user|>', '').replace('<|system|>', '').replace('<|assistant|>', '')
|
| 160 |
final_ai_response = cleaned_initial_ai_response.partition('¿Cuál es')[0].strip()
|
| 161 |
final_ai_response = final_ai_response.partition('¿Cuáles')[0].strip()
|
|
|
|
| 162 |
final_ai_response = final_ai_response.partition('<|end|>')[0].strip().replace('\n\n', '\n').replace('<|end|>', '').replace('<|user|>', '').replace('<|system|>', '').replace('<|assistant|>', '')
|
| 163 |
new_final_ai_response = final_ai_response.split('Unhelpful Answer:')[0].strip()
|
| 164 |
new_final_ai_response = new_final_ai_response.split('Note:')[0].strip()
|
|
|
|
| 80 |
uploaded_file = file_to_process.uploaded_file
|
| 81 |
print("File received:"+uploaded_file.filename)
|
| 82 |
|
| 83 |
+
user_question = request.query_params.get("user_question")
|
| 84 |
filename = request.query_params.get("filename")
|
| 85 |
print("User entered question: "+user_question)
|
| 86 |
print("User uploaded file: "+filename)
|
|
|
|
| 115 |
print("db_embeddings created...")
|
| 116 |
|
| 117 |
#question = var_query.query
|
| 118 |
+
question = user_question
|
| 119 |
print("API Call Query Received: "+question)
|
| 120 |
q_embedding=get_embeddings(question)
|
| 121 |
final_q_embedding = torch.FloatTensor(q_embedding)
|
|
|
|
| 143 |
|
| 144 |
loader = TextLoader(file_path, encoding="utf-8")
|
| 145 |
loaded_documents = loader.load()
|
| 146 |
+
print("*****loaded_documents******")
|
| 147 |
print(loaded_documents)
|
| 148 |
+
print("***********")
|
| 149 |
+
print(question)
|
| 150 |
+
print("*****question******")
|
| 151 |
|
| 152 |
print("LLM Chain Starts...")
|
| 153 |
start_2 = timeit.default_timer()
|
|
|
|
| 155 |
end_2 = timeit.default_timer()
|
| 156 |
print("LLM Chain Ends...")
|
| 157 |
print(f'LLM Chain共耗时: @ {end_2 - start_2}')
|
| 158 |
+
|
| 159 |
+
print(temp_ai_response)
|
| 160 |
|
| 161 |
initial_ai_response=temp_ai_response['output_text']
|
| 162 |
|
| 163 |
+
print(initial_ai_response)
|
| 164 |
+
|
| 165 |
cleaned_initial_ai_response = remove_context(initial_ai_response)
|
| 166 |
|
| 167 |
#final_ai_response = cleaned_initial_ai_response.partition('¿Cuál es')[0].strip().replace('\n\n', '\n').replace('<|end|>', '').replace('<|user|>', '').replace('<|system|>', '').replace('<|assistant|>', '')
|
| 168 |
final_ai_response = cleaned_initial_ai_response.partition('¿Cuál es')[0].strip()
|
| 169 |
final_ai_response = final_ai_response.partition('¿Cuáles')[0].strip()
|
| 170 |
+
final_ai_response = final_ai_response.partition('¿Qué es')[0].strip()
|
| 171 |
final_ai_response = final_ai_response.partition('<|end|>')[0].strip().replace('\n\n', '\n').replace('<|end|>', '').replace('<|user|>', '').replace('<|system|>', '').replace('<|assistant|>', '')
|
| 172 |
new_final_ai_response = final_ai_response.split('Unhelpful Answer:')[0].strip()
|
| 173 |
new_final_ai_response = new_final_ai_response.split('Note:')[0].strip()
|