Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import requests | |
| import json | |
| import os | |
| # Flask API endpoint for querying | |
| API_URL = "http://localhost:5000/search" | |
| # Set Streamlit page configuration | |
| st.set_page_config( | |
| page_title="The Odyssey RAG", | |
| page_icon="", | |
| layout="centered" | |
| ) | |
| st.title("Explore the Odyssey with this RAG") | |
| st.markdown("Ask questions about Homer's Odyssey (might get spoilers for the upcoming movie ;)") | |
| # session state to hold chat history | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| # chat interface for the user | |
| for message in st.session_state.messages: | |
| with st.chat_message(message["role"]): | |
| st.markdown(message["content"]) | |
| if prompt := st.chat_input("Ask anything about the Odyssey"): | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| with st.chat_message("user"): | |
| st.markdown(prompt) | |
| # call the flask api backend to get the answer and sources | |
| with st.chat_message("assistant"): | |
| message_placeholder = st.empty() | |
| message_placeholder.markdown("Thinking... ") | |
| try: | |
| payload = {"query": prompt} | |
| response = requests.post(API_URL, json=payload, timeout=60) | |
| if response.status_code == 200: | |
| data = response.json() | |
| # Display Answer | |
| answer_text = data.get("answer", "I'm sorry, I couldn't generate an answer.") | |
| message_placeholder.markdown(answer_text) | |
| # Display Sources | |
| if data.get("sources"): | |
| with st.expander("View Sources"): | |
| results = data.get("results", []) | |
| for i, result in enumerate(results): | |
| episode_id = result.get('episode_id', 'N/A') | |
| summary = result.get('summary', 'No summary available') | |
| full_text = result.get('text', '') | |
| # Get the full text from payload if available, or use what's provided | |
| full_text = result.get('payload', {}).get('episode_text', full_text) | |
| # Display episode info | |
| st.markdown(f"**Episode ID:** `{episode_id}`") | |
| st.markdown(f"**Score:** {result.get('score', 0):.3f}") | |
| # Show summary | |
| if summary: | |
| st.markdown(f"**Summary:** {summary}") | |
| # Show full episode text in a scrollable container if it's long | |
| st.markdown("**Full Text:**") | |
| if len(full_text) > 1000: | |
| with st.container(height=400): # Scrollable container for long texts | |
| st.markdown(full_text) | |
| else: | |
| st.markdown(full_text) | |
| st.markdown("---") | |
| # Save this answer to history | |
| st.session_state.messages.append({"role": "assistant", "content": answer_text}) | |
| else: | |
| error_msg = f"Error: {response.status_code} - {response.text}" | |
| message_placeholder.markdown(f"⚠️ {error_msg}") | |
| st.session_state.messages.append({"role": "assistant", "content": error_msg}) | |
| except requests.exceptions.RequestException as e: | |
| err_msg = f"Connection Error: Is the Flask backend running? \n{str(e)}" | |
| message_placeholder.markdown(f"⚠️ {err_msg}") | |
| st.session_state.messages.append({"role": "assistant", "content": err_msg}) | |
| with st.sidebar: | |
| st.header("About") | |
| st.info(""" | |
| This application uses: | |
| - **Flask** for the backend API. | |
| - **Qdrant** for vector storage. | |
| - **Groq** for LLM inference. | |
| - **Streamlit** for this UI. | |
| """) | |
| if st.button("Clear Chat History"): | |
| st.session_state.messages = [] | |
| st.rerun() | |