Spaces:
Sleeping
Sleeping
File size: 6,061 Bytes
8db5210 657926e 8db5210 0e4fab4 946dc22 9ae830c 8db5210 8539b6e ca7640e 8539b6e 8db5210 8539b6e 8db5210 8539b6e ca7640e 8db5210 8539b6e 8db5210 8539b6e 8db5210 ca7640e 9ae830c 946dc22 ca7640e 8539b6e ca7640e 8539b6e ca7640e 657926e 946dc22 8539b6e ca7640e 8556c73 d2bca13 9ae830c d2bca13 ca7640e 8be885f 8539b6e ca7640e 9ae830c ca7640e 9ae830c ca7640e 9ae830c ca7640e 9ae830c ca7640e 9ae830c 8539b6e 946dc22 8539b6e 4f2a2ff 8539b6e 4f2a2ff 8539b6e 4f2a2ff 8539b6e 4f2a2ff 8539b6e 4f2a2ff 9ae830c 4f2a2ff 946dc22 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 |
import streamlit as st
import os
from dotenv import load_dotenv
from langchain_groq import ChatGroq
from langchain_community.embeddings import HuggingFaceEmbeddings
from visualization import generate_visual_insights
# Import all modules
from document_processor import process_pdfs, create_vector_store
from summarizer import summarize_document
from gap_analyzer import identify_research_gaps
from idea_generator import suggest_research_ideas
from debate_simulator import simulate_debate
from citation_generator import generate_citation
from chat_handler import chat_with_paper
from translator import translate_text
# Load environment variables
load_dotenv()
groq_api_key = os.getenv("GROQ_API_KEY")
# Streamlit UI setup
st.set_page_config(page_title="Multi-Agent Research Assistant", layout="wide")
st.title("π€ Multi-Agent Research Assistant")
st.markdown("Enhance your research process with intelligent summarization, critique, debate, translation, citation, and interactive Q&A. Upload a research paper and let our agents do the thinking!")
# Load Groq LLM (Llama3)
llm = ChatGroq(groq_api_key=groq_api_key, model_name="Llama3-8b-8192")
# Load embedding model
embedding = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
# File uploader
uploaded_files = st.file_uploader("π Upload one or more PDF files", type=["pdf"], accept_multiple_files=True)
if uploaded_files and st.button("π Process Documents"):
with st.spinner("Processing documents and generating vector store..."):
documents = process_pdfs(uploaded_files)
st.session_state.documents = documents
st.session_state.vectorstore = create_vector_store(documents, embedding)
st.session_state.uploaded_files = uploaded_files
st.success("β
Document vector store created!")
# Agent Activation
if "documents" in st.session_state:
st.subheader("π Master Agent: What would you like me to do?")
task = st.selectbox("Choose a task:", [
"Summarize document",
"Identify research gaps",
"Suggest research ideas",
"Simulate a debate",
"Generate citation",
"Generate visual insights",
"Chat with paper"
])
# Handle Chat with paper separately
if task == "Chat with paper":
query = st.text_input("π¬ Ask a question about the paper:")
if query and st.button("π Ask Question"):
with st.spinner("Searching paper for answer..."):
output = chat_with_paper(llm, st.session_state.vectorstore, query)
st.session_state["last_agent_output"] = output
# Handle other tasks
elif st.button("π Run Agent"):
with st.spinner("Running agents..."):
docs = st.session_state.documents[:10]
output = ""
if task == "Summarize document":
output = summarize_document(llm, docs)
elif task == "Identify research gaps":
output = identify_research_gaps(llm, docs)
elif task == "Suggest research ideas":
output = suggest_research_ideas(llm, docs)
elif task == "Simulate a debate":
output = simulate_debate(llm, docs)
elif task == "Generate citation":
output = generate_citation(llm, docs)
elif task == "Generate visual insights": # NEW FEATURE HANDLER
with st.spinner("Extracting data and generating visualizations..."):
# Get the original uploaded files from session state
if "uploaded_files" in st.session_state:
insights = generate_visual_insights(llm, st.session_state.uploaded_files)
st.session_state["visual_insights"] = insights
output = insights['ai_analysis']
else:
output = "Error: Please re-upload your PDF files to use this feature."
if output:
st.session_state["last_agent_output"] = output
# Final Display Section with Translation Option
if "last_agent_output" in st.session_state:
output = st.session_state["last_agent_output"]
translate_toggle = st.toggle("π Translate the response?")
if not translate_toggle:
st.markdown("### π€ Agent Response")
st.write(output)
if translate_toggle:
default_languages = ["Spanish", "French", "German", "Chinese", "Urdu", "Other"]
selected_language = st.selectbox("Choose translation language:", default_languages)
if selected_language == "Other":
user_language = st.text_input("Please enter your desired language:", key="custom_lang")
else:
user_language = selected_language
if user_language:
translated = translate_text(llm, output, user_language)
st.markdown(f"### π Translated Response ({user_language})")
st.write(translated)
# Display Visual Insights if available
if "visual_insights" in st.session_state:
insights = st.session_state["visual_insights"]
st.markdown("### π Visual Insights")
# Display data summary
st.markdown("#### Data Extraction Summary")
st.info(f"π {insights['data_summary']}")
st.info(f"π Tables found: {insights['tables_found']}")
if insights['extracted_numbers']:
st.info(f"π’ Sample extracted numbers: {', '.join(insights['extracted_numbers'][:10])}")
# Display charts
col1, col2 = st.columns(2)
with col1:
st.markdown("#### Interactive Chart (Plotly)")
st.plotly_chart(insights['plotly_fig'], use_container_width=True)
with col2:
st.markdown("#### Static Chart (Matplotlib)")
st.pyplot(insights['matplotlib_fig'])
# Display AI analysis
st.markdown("#### π€ AI Analysis of Visual Data")
st.write(insights['ai_analysis'])
# Clear button
if st.button("ποΈ Clear Visual Insights"):
del st.session_state["visual_insights"]
st.rerun() |