Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1254,137 +1254,6 @@ def transcribe_audio(filename):
|
|
| 1254 |
output = query(filename)
|
| 1255 |
return output
|
| 1256 |
|
| 1257 |
-
def whisper_main():
|
| 1258 |
-
filename = save_and_play_audio(audio_recorder)
|
| 1259 |
-
if filename is not None:
|
| 1260 |
-
transcription = transcribe_audio(filename)
|
| 1261 |
-
try:
|
| 1262 |
-
transcript = transcription['text']
|
| 1263 |
-
st.write(transcript)
|
| 1264 |
-
|
| 1265 |
-
except:
|
| 1266 |
-
transcript=''
|
| 1267 |
-
st.write(transcript)
|
| 1268 |
-
|
| 1269 |
-
st.write('Reasoning with your inputs..')
|
| 1270 |
-
response = chat_with_model(transcript)
|
| 1271 |
-
st.write('Response:')
|
| 1272 |
-
st.write(response)
|
| 1273 |
-
filename = generate_filename(response, "txt")
|
| 1274 |
-
create_file(filename, transcript, response, should_save)
|
| 1275 |
-
|
| 1276 |
-
# Whisper to Llama:
|
| 1277 |
-
response = StreamLLMChatResponse(transcript)
|
| 1278 |
-
filename_txt = generate_filename(transcript, "md")
|
| 1279 |
-
create_file(filename_txt, transcript, response, should_save)
|
| 1280 |
-
filename_wav = filename_txt.replace('.txt', '.wav')
|
| 1281 |
-
import shutil
|
| 1282 |
-
try:
|
| 1283 |
-
if os.path.exists(filename):
|
| 1284 |
-
shutil.copyfile(filename, filename_wav)
|
| 1285 |
-
except:
|
| 1286 |
-
st.write('.')
|
| 1287 |
-
if os.path.exists(filename):
|
| 1288 |
-
os.remove(filename)
|
| 1289 |
-
|
| 1290 |
-
|
| 1291 |
-
|
| 1292 |
-
|
| 1293 |
-
prompt = '''
|
| 1294 |
-
What is MoE?
|
| 1295 |
-
What are Multi Agent Systems?
|
| 1296 |
-
What is Self Rewarding AI?
|
| 1297 |
-
What is Semantic and Episodic memory?
|
| 1298 |
-
What is AutoGen?
|
| 1299 |
-
What is ChatDev?
|
| 1300 |
-
What is Omniverse?
|
| 1301 |
-
What is Lumiere?
|
| 1302 |
-
What is SORA?
|
| 1303 |
-
'''
|
| 1304 |
-
|
| 1305 |
-
with st.expander("Prompts ๐", expanded=True):
|
| 1306 |
-
#example_input = st.text_input("Enter your prompt text:", value=prompt, help="Enter text to get a response.")
|
| 1307 |
-
#example_input = st.text_area("Enter Prompt :", '', height=100
|
| 1308 |
-
|
| 1309 |
-
# Search History to ArXiv
|
| 1310 |
-
session_state = {}
|
| 1311 |
-
if "search_queries" not in session_state:
|
| 1312 |
-
session_state["search_queries"] = []
|
| 1313 |
-
example_input = st.text_input("Search", value=session_state["search_queries"][-1] if session_state["search_queries"] else "")
|
| 1314 |
-
if example_input:
|
| 1315 |
-
session_state["search_queries"].append(example_input)
|
| 1316 |
-
|
| 1317 |
-
# Search AI
|
| 1318 |
-
query=example_input
|
| 1319 |
-
if query:
|
| 1320 |
-
result = search_arxiv(query)
|
| 1321 |
-
#search_glossary(query)
|
| 1322 |
-
search_glossary(result)
|
| 1323 |
-
st.markdown(' ')
|
| 1324 |
-
|
| 1325 |
-
st.write("Search history:")
|
| 1326 |
-
for example_input in session_state["search_queries"]:
|
| 1327 |
-
st.write(example_input)
|
| 1328 |
-
|
| 1329 |
-
if st.button("Run Prompt", help="Click to run."):
|
| 1330 |
-
try:
|
| 1331 |
-
response=StreamLLMChatResponse(example_input)
|
| 1332 |
-
create_file(filename, example_input, response, should_save)
|
| 1333 |
-
except:
|
| 1334 |
-
st.write('model is asleep. Starting now on A10 GPU. Please wait one minute then retry. KEDA triggered.')
|
| 1335 |
-
|
| 1336 |
-
openai.api_key = os.getenv('OPENAI_API_KEY')
|
| 1337 |
-
if openai.api_key == None: openai.api_key = st.secrets['OPENAI_API_KEY']
|
| 1338 |
-
menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
|
| 1339 |
-
choice = st.sidebar.selectbox("Output File Type:", menu)
|
| 1340 |
-
model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
|
| 1341 |
-
user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
|
| 1342 |
-
|
| 1343 |
-
|
| 1344 |
-
collength, colupload = st.columns([2,3]) # adjust the ratio as needed
|
| 1345 |
-
with collength:
|
| 1346 |
-
max_length = st.slider(key='maxlength', label="File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
|
| 1347 |
-
with colupload:
|
| 1348 |
-
uploaded_file = st.file_uploader("Add a file for context:", type=["pdf", "xml", "json", "xlsx", "csv", "html", "htm", "md", "txt"])
|
| 1349 |
-
document_sections = deque()
|
| 1350 |
-
document_responses = {}
|
| 1351 |
-
if uploaded_file is not None:
|
| 1352 |
-
file_content = read_file_content(uploaded_file, max_length)
|
| 1353 |
-
document_sections.extend(divide_document(file_content, max_length))
|
| 1354 |
-
if len(document_sections) > 0:
|
| 1355 |
-
if st.button("๐๏ธ View Upload"):
|
| 1356 |
-
st.markdown("**Sections of the uploaded file:**")
|
| 1357 |
-
for i, section in enumerate(list(document_sections)):
|
| 1358 |
-
st.markdown(f"**Section {i+1}**\n{section}")
|
| 1359 |
-
st.markdown("**Chat with the model:**")
|
| 1360 |
-
for i, section in enumerate(list(document_sections)):
|
| 1361 |
-
if i in document_responses:
|
| 1362 |
-
st.markdown(f"**Section {i+1}**\n{document_responses[i]}")
|
| 1363 |
-
else:
|
| 1364 |
-
if st.button(f"Chat about Section {i+1}"):
|
| 1365 |
-
st.write('Reasoning with your inputs...')
|
| 1366 |
-
#response = chat_with_model(user_prompt, section, model_choice)
|
| 1367 |
-
st.write('Response:')
|
| 1368 |
-
st.write(response)
|
| 1369 |
-
document_responses[i] = response
|
| 1370 |
-
filename = generate_filename(f"{user_prompt}_section_{i+1}", choice)
|
| 1371 |
-
create_file(filename, user_prompt, response, should_save)
|
| 1372 |
-
st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
| 1373 |
-
|
| 1374 |
-
if st.button('๐ฌ Chat'):
|
| 1375 |
-
st.write('Reasoning with your inputs...')
|
| 1376 |
-
user_prompt_sections = divide_prompt(user_prompt, max_length)
|
| 1377 |
-
full_response = ''
|
| 1378 |
-
for prompt_section in user_prompt_sections:
|
| 1379 |
-
response = chat_with_model(prompt_section, ''.join(list(document_sections)), model_choice)
|
| 1380 |
-
full_response += response + '\n' # Combine the responses
|
| 1381 |
-
response = full_response
|
| 1382 |
-
st.write('Response:')
|
| 1383 |
-
st.write(response)
|
| 1384 |
-
filename = generate_filename(user_prompt, choice)
|
| 1385 |
-
create_file(filename, user_prompt, response, should_save)
|
| 1386 |
-
|
| 1387 |
-
|
| 1388 |
|
| 1389 |
# Sample function to demonstrate a response, replace with your own logic
|
| 1390 |
def StreamMedChatResponse(topic):
|
|
@@ -1492,11 +1361,136 @@ if st.button("Clear Query Parameters", key='ClearQueryParams'):
|
|
| 1492 |
st.experimental_set_query_params
|
| 1493 |
st.experimental_rerun()
|
| 1494 |
|
| 1495 |
-
|
| 1496 |
-
|
| 1497 |
-
|
| 1498 |
-
|
| 1499 |
-
|
| 1500 |
-
|
| 1501 |
-
|
| 1502 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1254 |
output = query(filename)
|
| 1255 |
return output
|
| 1256 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1257 |
|
| 1258 |
# Sample function to demonstrate a response, replace with your own logic
|
| 1259 |
def StreamMedChatResponse(topic):
|
|
|
|
| 1361 |
st.experimental_set_query_params
|
| 1362 |
st.experimental_rerun()
|
| 1363 |
|
| 1364 |
+
|
| 1365 |
+
st.markdown("### ๐ฒ๐บ๏ธ Arxiv Paper Search QA RAG MAS using Streamlit and Gradio API")
|
| 1366 |
+
|
| 1367 |
+
filename = save_and_play_audio(audio_recorder)
|
| 1368 |
+
if filename is not None:
|
| 1369 |
+
transcription = transcribe_audio(filename)
|
| 1370 |
+
try:
|
| 1371 |
+
transcript = transcription['text']
|
| 1372 |
+
st.write(transcript)
|
| 1373 |
+
|
| 1374 |
+
except:
|
| 1375 |
+
transcript=''
|
| 1376 |
+
st.write(transcript)
|
| 1377 |
+
|
| 1378 |
+
st.write('Reasoning with your inputs..')
|
| 1379 |
+
response = chat_with_model(transcript)
|
| 1380 |
+
st.write('Response:')
|
| 1381 |
+
st.write(response)
|
| 1382 |
+
filename = generate_filename(response, "txt")
|
| 1383 |
+
create_file(filename, transcript, response, should_save)
|
| 1384 |
+
|
| 1385 |
+
# Whisper to Llama:
|
| 1386 |
+
response = StreamLLMChatResponse(transcript)
|
| 1387 |
+
filename_txt = generate_filename(transcript, "md")
|
| 1388 |
+
create_file(filename_txt, transcript, response, should_save)
|
| 1389 |
+
filename_wav = filename_txt.replace('.txt', '.wav')
|
| 1390 |
+
import shutil
|
| 1391 |
+
try:
|
| 1392 |
+
if os.path.exists(filename):
|
| 1393 |
+
shutil.copyfile(filename, filename_wav)
|
| 1394 |
+
except:
|
| 1395 |
+
st.write('.')
|
| 1396 |
+
if os.path.exists(filename):
|
| 1397 |
+
os.remove(filename)
|
| 1398 |
+
|
| 1399 |
+
|
| 1400 |
+
|
| 1401 |
+
|
| 1402 |
+
prompt = '''
|
| 1403 |
+
What is MoE?
|
| 1404 |
+
What are Multi Agent Systems?
|
| 1405 |
+
What is Self Rewarding AI?
|
| 1406 |
+
What is Semantic and Episodic memory?
|
| 1407 |
+
What is AutoGen?
|
| 1408 |
+
What is ChatDev?
|
| 1409 |
+
What is Omniverse?
|
| 1410 |
+
What is Lumiere?
|
| 1411 |
+
What is SORA?
|
| 1412 |
+
'''
|
| 1413 |
+
|
| 1414 |
+
|
| 1415 |
+
# Search History to ArXiv
|
| 1416 |
+
session_state = {}
|
| 1417 |
+
if "search_queries" not in session_state:
|
| 1418 |
+
session_state["search_queries"] = []
|
| 1419 |
+
example_input = st.text_input("Search", value=session_state["search_queries"][-1] if session_state["search_queries"] else "")
|
| 1420 |
+
if example_input:
|
| 1421 |
+
session_state["search_queries"].append(example_input)
|
| 1422 |
+
|
| 1423 |
+
# Search AI
|
| 1424 |
+
query=example_input
|
| 1425 |
+
if query:
|
| 1426 |
+
result = search_arxiv(query)
|
| 1427 |
+
#search_glossary(query)
|
| 1428 |
+
search_glossary(result)
|
| 1429 |
+
st.markdown(' ')
|
| 1430 |
+
|
| 1431 |
+
st.write("Search history:")
|
| 1432 |
+
for example_input in session_state["search_queries"]:
|
| 1433 |
+
st.write(example_input)
|
| 1434 |
+
|
| 1435 |
+
if st.button("Run Prompt", help="Click to run."):
|
| 1436 |
+
try:
|
| 1437 |
+
response=StreamLLMChatResponse(example_input)
|
| 1438 |
+
create_file(filename, example_input, response, should_save)
|
| 1439 |
+
except:
|
| 1440 |
+
st.write('model is asleep. Starting now on A10 GPU. Please wait one minute then retry. KEDA triggered.')
|
| 1441 |
+
|
| 1442 |
+
openai.api_key = os.getenv('OPENAI_API_KEY')
|
| 1443 |
+
if openai.api_key == None: openai.api_key = st.secrets['OPENAI_API_KEY']
|
| 1444 |
+
menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
|
| 1445 |
+
choice = st.sidebar.selectbox("Output File Type:", menu)
|
| 1446 |
+
model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
|
| 1447 |
+
user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
|
| 1448 |
+
|
| 1449 |
+
|
| 1450 |
+
collength, colupload = st.columns([2,3]) # adjust the ratio as needed
|
| 1451 |
+
with collength:
|
| 1452 |
+
max_length = st.slider(key='maxlength', label="File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
|
| 1453 |
+
with colupload:
|
| 1454 |
+
uploaded_file = st.file_uploader("Add a file for context:", type=["pdf", "xml", "json", "xlsx", "csv", "html", "htm", "md", "txt"])
|
| 1455 |
+
document_sections = deque()
|
| 1456 |
+
document_responses = {}
|
| 1457 |
+
if uploaded_file is not None:
|
| 1458 |
+
file_content = read_file_content(uploaded_file, max_length)
|
| 1459 |
+
document_sections.extend(divide_document(file_content, max_length))
|
| 1460 |
+
if len(document_sections) > 0:
|
| 1461 |
+
if st.button("๐๏ธ View Upload"):
|
| 1462 |
+
st.markdown("**Sections of the uploaded file:**")
|
| 1463 |
+
for i, section in enumerate(list(document_sections)):
|
| 1464 |
+
st.markdown(f"**Section {i+1}**\n{section}")
|
| 1465 |
+
st.markdown("**Chat with the model:**")
|
| 1466 |
+
for i, section in enumerate(list(document_sections)):
|
| 1467 |
+
if i in document_responses:
|
| 1468 |
+
st.markdown(f"**Section {i+1}**\n{document_responses[i]}")
|
| 1469 |
+
else:
|
| 1470 |
+
if st.button(f"Chat about Section {i+1}"):
|
| 1471 |
+
st.write('Reasoning with your inputs...')
|
| 1472 |
+
#response = chat_with_model(user_prompt, section, model_choice)
|
| 1473 |
+
st.write('Response:')
|
| 1474 |
+
st.write(response)
|
| 1475 |
+
document_responses[i] = response
|
| 1476 |
+
filename = generate_filename(f"{user_prompt}_section_{i+1}", choice)
|
| 1477 |
+
create_file(filename, user_prompt, response, should_save)
|
| 1478 |
+
st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
| 1479 |
+
|
| 1480 |
+
if st.button('๐ฌ Chat'):
|
| 1481 |
+
st.write('Reasoning with your inputs...')
|
| 1482 |
+
user_prompt_sections = divide_prompt(user_prompt, max_length)
|
| 1483 |
+
full_response = ''
|
| 1484 |
+
for prompt_section in user_prompt_sections:
|
| 1485 |
+
response = chat_with_model(prompt_section, ''.join(list(document_sections)), model_choice)
|
| 1486 |
+
full_response += response + '\n' # Combine the responses
|
| 1487 |
+
response = full_response
|
| 1488 |
+
st.write('Response:')
|
| 1489 |
+
st.write(response)
|
| 1490 |
+
filename = generate_filename(user_prompt, choice)
|
| 1491 |
+
create_file(filename, user_prompt, response, should_save)
|
| 1492 |
+
|
| 1493 |
+
display_glossary_grid(roleplaying_glossary) # Word Glossary Jump Grid
|
| 1494 |
+
display_videos_and_links() # Video Jump Grid
|
| 1495 |
+
display_images_and_wikipedia_summaries() # Image Jump Grid
|
| 1496 |
+
#display_buttons_with_scores() # Feedback Jump Grid
|