Spaces:
Sleeping
Sleeping
| """ | |
| Gradio Chat UI for Podcast Assistant. | |
| A modern, minimalistic interface for exploring Lex Fridman podcast insights. | |
| """ | |
| import os | |
| import json | |
| import uuid | |
| from datetime import datetime | |
| from typing import Generator | |
| import gradio as gr | |
| from dotenv import load_dotenv | |
| from huggingface_hub import HfApi, hf_hub_download | |
| from assistant import PodcastAssistant | |
| # Load environment variables from local .env | |
| SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) | |
| load_dotenv(dotenv_path=os.path.join(SCRIPT_DIR, '.env')) | |
| # Configuration | |
| MAX_MESSAGES = 3 | |
| FEEDBACK_REPO = "StanKonkin/podcast-assistant-feedback" | |
| FEEDBACK_FILE = "feedback.json" | |
| # Load custom CSS from external file | |
| with open(os.path.join(SCRIPT_DIR, "styles.css"), "r") as f: | |
| CUSTOM_CSS = f.read() | |
| # Example questions for quick start | |
| EXAMPLES = [ | |
| "What habits and routines do high performers follow?", | |
| "What is the future of AI and AGI according to experts?", | |
| "How should young people approach their career and education?" | |
| ] | |
| class FeedbackLogger: | |
| """ | |
| Saves conversation feedback to a Hugging Face dataset. | |
| Privacy-focused: No user-specific data collected. | |
| Only stores: timestamp, conversation turns, and feedback. | |
| Data is persisted to HF dataset repo, surviving Space restarts. | |
| """ | |
| def __init__(self, repo_id: str, filename: str): | |
| self.repo_id = repo_id | |
| self.filename = filename | |
| self.api = HfApi() | |
| self._local_cache = os.path.join(SCRIPT_DIR, f".{filename}.cache") | |
| def _load_feedback(self) -> list: | |
| """Load feedback entries from HF dataset.""" | |
| try: | |
| # Download the latest version from HF | |
| local_path = hf_hub_download( | |
| repo_id=self.repo_id, | |
| filename=self.filename, | |
| repo_type="dataset", | |
| force_download=True # Always get latest | |
| ) | |
| with open(local_path, 'r') as f: | |
| return json.load(f) | |
| except Exception as e: | |
| print(f"Warning: Could not load feedback from HF: {e}") | |
| # Try local cache as fallback | |
| if os.path.exists(self._local_cache): | |
| with open(self._local_cache, 'r') as f: | |
| return json.load(f) | |
| return [] | |
| def _save_feedback(self, entries: list): | |
| """Save feedback entries to HF dataset.""" | |
| # Save to local cache first | |
| with open(self._local_cache, 'w') as f: | |
| json.dump(entries, f, indent=2) | |
| try: | |
| # Upload to HF dataset | |
| self.api.upload_file( | |
| path_or_fileobj=self._local_cache, | |
| path_in_repo=self.filename, | |
| repo_id=self.repo_id, | |
| repo_type="dataset", | |
| commit_message=f"Add feedback entry {datetime.now().isoformat()}" | |
| ) | |
| except Exception as e: | |
| print(f"Warning: Could not save feedback to HF: {e}") | |
| # Data is still saved locally as fallback | |
| def save_with_feedback(self, turns: list, is_helpful: bool, feedback_text: str = ""): | |
| """ | |
| Save conversation and feedback together. | |
| Only called when user explicitly submits feedback. | |
| No data is stored until feedback is submitted. | |
| """ | |
| entries = self._load_feedback() | |
| # Create minimal, privacy-focused entry | |
| entry = { | |
| "timestamp": datetime.now().isoformat(), | |
| "turns": [{"role": msg["role"], "content": msg["content"]} for msg in turns], | |
| "feedback": { | |
| "helpful": is_helpful, | |
| "text": feedback_text if feedback_text else "" | |
| } | |
| } | |
| entries.append(entry) | |
| self._save_feedback(entries) | |
| # Initialize feedback logger | |
| feedback_logger = FeedbackLogger(FEEDBACK_REPO, FEEDBACK_FILE) | |
| def create_app(): | |
| """Create and configure the Gradio application.""" | |
| # Create a custom theme that supports both light and dark modes | |
| custom_theme = gr.themes.Soft( | |
| primary_hue=gr.themes.Color( | |
| c50="#f0fdfa", | |
| c100="#ccfbf1", | |
| c200="#99f6e4", | |
| c300="#5eead4", | |
| c400="#2dd4bf", | |
| c500="#0091ad", # Our primary teal | |
| c600="#0091ad", | |
| c700="#007a94", | |
| c800="#006275", | |
| c900="#004d5c", | |
| c950="#003844", | |
| ), | |
| secondary_hue="slate", | |
| neutral_hue="slate", | |
| font=[ | |
| gr.themes.GoogleFont("Poppins"), | |
| "ui-sans-serif", | |
| "system-ui", | |
| "sans-serif", | |
| ], | |
| ) | |
| with gr.Blocks(title="Podcast Assistant") as app: | |
| # State variables | |
| conversation_id = gr.State(lambda: str(uuid.uuid4())) | |
| message_count = gr.State(0) | |
| is_frozen = gr.State(False) | |
| pending_feedback = gr.State(None) # Stores True (helpful) or False (unhelpful) | |
| assistant_state = gr.State(None) # Will hold the PodcastAssistant instance | |
| # Header - centered | |
| gr.Markdown(""" | |
| <div class="header-content"> | |
| <h1>🎧 Podcast Assistant</h1> | |
| <p>Ask questions about Lex Fridman podcast episodes. Get answers with quotes and links.</p> | |
| </div> | |
| """, elem_classes=["header-section"]) | |
| # Chat interface | |
| chatbot = gr.Chatbot( | |
| value=[], | |
| height=270, | |
| show_label=False, | |
| elem_id="chatbot" | |
| ) | |
| # Loading indicator (hidden by default) - animated | |
| loading_msg = gr.Markdown( | |
| value="🔍 Thinking and searching...", | |
| visible=False, | |
| elem_classes=["loading-msg"] | |
| ) | |
| # Message counter - minimal | |
| counter_display = gr.Markdown( | |
| value="💬 3 messages remaining", | |
| elem_classes=["message-counter"] | |
| ) | |
| # Limit reached message (hidden by default) | |
| limit_msg = gr.Markdown( | |
| value="✨ You've used all 3 messages. Start a new conversation to continue exploring!", | |
| visible=False, | |
| elem_classes=["limit-msg"] | |
| ) | |
| # Input area with buttons stacked on right | |
| with gr.Row(elem_classes=["input-row"]): | |
| msg_input = gr.Textbox( | |
| placeholder="Ask about habits, AI, science, philosophy...", | |
| show_label=False, | |
| scale=8, | |
| container=False | |
| ) | |
| with gr.Column(scale=1, elem_classes=["buttons-col"]): | |
| submit_btn = gr.Button("Send ➤", variant="primary") | |
| with gr.Column(elem_classes=["new-btn"]): | |
| new_conv_btn = gr.Button("New ↻") | |
| # Frozen state message (hidden by default) | |
| frozen_msg = gr.Markdown( | |
| visible=False, | |
| elem_classes=["frozen-message"] | |
| ) | |
| # Example buttons - horizontal row | |
| with gr.Row(elem_classes=["examples-section"]): | |
| gr.Markdown("💡 Try examples:") | |
| example_btns = [ | |
| gr.Button(ex, size="sm", elem_classes=["example-btn"]) | |
| for ex in EXAMPLES | |
| ] | |
| # Feedback section - inline, hidden until response complete | |
| with gr.Row(visible=False, elem_classes=["feedback-section"]) as feedback_group: | |
| gr.Markdown("🤔 Was this helpful?") | |
| helpful_btn = gr.Button("👍 Yes", size="sm") | |
| unhelpful_btn = gr.Button("👎 No", size="sm") | |
| # Feedback text input (hidden until button clicked) | |
| with gr.Group(visible=False, elem_classes=["feedback-input"]) as feedback_input_group: | |
| feedback_text = gr.Textbox( | |
| placeholder="Any additional thoughts? (optional)", | |
| show_label=False, | |
| lines=2 | |
| ) | |
| submit_feedback_btn = gr.Button("Submit 📨", variant="primary") | |
| # Footer | |
| gr.Markdown( | |
| "AI Podcast Assistant v2.0 • Built with Langchain and Gradio • Made with ❤️ for learning", | |
| elem_classes=["footer"] | |
| ) | |
| # ============ Event Handlers ============ | |
| def respond( | |
| message: str, | |
| history: list, | |
| conv_id: str, | |
| msg_count: int, | |
| frozen: bool, | |
| assistant: PodcastAssistant | |
| ) -> Generator: | |
| """Handle user message and stream response.""" | |
| import time | |
| # Output order: chatbot, message_count, is_frozen, assistant_state, | |
| # counter_display, loading_msg, feedback_group, limit_msg, | |
| # msg_input, submit_btn | |
| if frozen or not message.strip(): | |
| yield ( | |
| history, msg_count, frozen, assistant, | |
| gr.update(), gr.update(), gr.update(), gr.update(), | |
| gr.update(), gr.update() | |
| ) | |
| return | |
| # Initialize assistant if needed | |
| if assistant is None: | |
| assistant = PodcastAssistant() | |
| # Add user message to history | |
| history = history + [{"role": "user", "content": message}] | |
| new_count = msg_count + 1 | |
| # Update counter display | |
| remaining = MAX_MESSAGES - new_count | |
| counter_text = f"💬 {remaining} message{'s' if remaining != 1 else ''} remaining" | |
| # Yield initial state - show loading, disable inputs | |
| yield ( | |
| history, | |
| new_count, | |
| frozen, | |
| assistant, | |
| gr.update(value=counter_text), | |
| gr.update(visible=True), # Show loading | |
| gr.update(visible=False), # Hide feedback | |
| gr.update(visible=False), # Hide limit msg | |
| gr.update(value="", interactive=False), # Clear and disable input | |
| gr.update(interactive=False) # Disable send button | |
| ) | |
| try: | |
| # Add empty assistant message that we'll stream into | |
| history = history + [{"role": "assistant", "content": ""}] | |
| # Stream the response | |
| response_text = "" | |
| chunk_count = 0 | |
| for chunk in assistant.stream_chat(message): | |
| response_text += chunk | |
| history[-1]["content"] = response_text | |
| chunk_count += 1 | |
| # Add delay for smoother streaming | |
| if chunk_count % 2 == 0: | |
| time.sleep(0.03) | |
| yield ( | |
| history, | |
| new_count, | |
| frozen, | |
| assistant, | |
| gr.update(value=counter_text), | |
| gr.update(visible=False), # Hide loading once streaming | |
| gr.update(visible=False), | |
| gr.update(visible=False), | |
| gr.update(interactive=False), # Keep input disabled | |
| gr.update(interactive=False) # Keep button disabled | |
| ) | |
| # Response complete - show feedback, re-enable inputs | |
| # Note: Conversation is NOT saved here - only saved when feedback is submitted | |
| if new_count >= MAX_MESSAGES: | |
| # Limit reached - keep inputs disabled | |
| frozen = True | |
| yield ( | |
| history, | |
| new_count, | |
| frozen, | |
| assistant, | |
| gr.update(visible=False), # Hide counter | |
| gr.update(visible=False), # Hide loading | |
| gr.update(visible=True), # Show feedback | |
| gr.update(visible=True), # Show limit msg | |
| gr.update(interactive=False), # Keep input disabled | |
| gr.update(interactive=False) # Keep button disabled | |
| ) | |
| else: | |
| # Still have messages remaining - re-enable inputs | |
| yield ( | |
| history, | |
| new_count, | |
| frozen, | |
| assistant, | |
| gr.update(value=counter_text), | |
| gr.update(visible=False), # Hide loading | |
| gr.update(visible=True), # Show feedback | |
| gr.update(visible=False), | |
| gr.update(interactive=True), # Re-enable input | |
| gr.update(interactive=True) # Re-enable button | |
| ) | |
| except Exception as e: | |
| # Handle errors gracefully - re-enable inputs | |
| error_msg = "😔 Sorry, something went wrong on our end. We'll make sure to get better! Please try again." | |
| if history and history[-1]["role"] == "assistant": | |
| history[-1]["content"] = error_msg | |
| else: | |
| history = history + [{"role": "assistant", "content": error_msg}] | |
| yield ( | |
| history, | |
| new_count, | |
| frozen, | |
| assistant, | |
| gr.update(value=counter_text), | |
| gr.update(visible=False), | |
| gr.update(visible=False), | |
| gr.update(visible=False), | |
| gr.update(interactive=True), # Re-enable input on error | |
| gr.update(interactive=True) # Re-enable button on error | |
| ) | |
| def show_feedback_input(is_helpful: bool, conv_id: str): | |
| """Show feedback text input when helpful/unhelpful clicked.""" | |
| return ( | |
| is_helpful, | |
| gr.update(visible=True), # Show feedback input | |
| gr.update(visible=False), # Hide feedback buttons | |
| ) | |
| def submit_feedback( | |
| is_helpful: bool, | |
| feedback_txt: str, | |
| conv_id: str, | |
| history: list | |
| ): | |
| """ | |
| Submit feedback and save conversation. | |
| This is the ONLY place where data is saved. | |
| Conversation + feedback saved together when user submits. | |
| """ | |
| if is_helpful is not None and history: | |
| feedback_logger.save_with_feedback(history, is_helpful, feedback_txt or "") | |
| return ( | |
| True, # Freeze conversation | |
| gr.update(visible=False), # Hide feedback input | |
| gr.update(interactive=False), # Disable input | |
| gr.update(value="✅ Appreciate your feedback! Click \"New\" to start fresh."), | |
| gr.update(value="") # Clear feedback text box for next time | |
| ) | |
| def start_new_conversation(): | |
| """Reset everything for a new conversation.""" | |
| new_id = str(uuid.uuid4()) | |
| new_assistant = PodcastAssistant() | |
| return ( | |
| [], # Clear chat | |
| new_id, # New conversation ID | |
| 0, # Reset message count | |
| False, # Unfreeze | |
| new_assistant, # New assistant | |
| None, # Clear pending feedback | |
| gr.update(value="💬 3 messages remaining", visible=True), # Reset counter | |
| gr.update(visible=False), # Hide feedback group | |
| gr.update(visible=False), # Hide feedback input | |
| gr.update(value="", interactive=True), # Clear and enable input | |
| gr.update(visible=False), # Hide frozen message | |
| gr.update(visible=False), # Hide limit msg | |
| gr.update(visible=False), # Hide loading | |
| gr.update(interactive=True), # Enable send button | |
| ) | |
| # Common outputs for respond function | |
| respond_outputs = [ | |
| chatbot, message_count, is_frozen, assistant_state, | |
| counter_display, loading_msg, feedback_group, limit_msg, | |
| msg_input, submit_btn | |
| ] | |
| # Wire up events | |
| msg_input.submit( | |
| fn=respond, | |
| inputs=[msg_input, chatbot, conversation_id, message_count, is_frozen, assistant_state], | |
| outputs=respond_outputs | |
| ) | |
| submit_btn.click( | |
| fn=respond, | |
| inputs=[msg_input, chatbot, conversation_id, message_count, is_frozen, assistant_state], | |
| outputs=respond_outputs | |
| ) | |
| # Example buttons - send message directly without showing in textbox | |
| for btn, example in zip(example_btns, EXAMPLES): | |
| # Create a closure to capture the example text | |
| def make_example_handler(ex_text): | |
| def handler(history, conv_id, msg_count, frozen, assistant): | |
| yield from respond(ex_text, history, conv_id, msg_count, frozen, assistant) | |
| return handler | |
| btn.click( | |
| fn=make_example_handler(example), | |
| inputs=[chatbot, conversation_id, message_count, is_frozen, assistant_state], | |
| outputs=respond_outputs | |
| ) | |
| # Feedback buttons | |
| helpful_btn.click( | |
| fn=lambda conv_id: show_feedback_input(True, conv_id), | |
| inputs=[conversation_id], | |
| outputs=[pending_feedback, feedback_input_group, feedback_group] | |
| ) | |
| unhelpful_btn.click( | |
| fn=lambda conv_id: show_feedback_input(False, conv_id), | |
| inputs=[conversation_id], | |
| outputs=[pending_feedback, feedback_input_group, feedback_group] | |
| ) | |
| submit_feedback_btn.click( | |
| fn=submit_feedback, | |
| inputs=[pending_feedback, feedback_text, conversation_id, chatbot], | |
| outputs=[is_frozen, feedback_input_group, msg_input, frozen_msg, feedback_text] | |
| ).then( | |
| fn=lambda: gr.update(visible=True), | |
| outputs=[frozen_msg] | |
| ) | |
| # New conversation button | |
| new_conv_btn.click( | |
| fn=start_new_conversation, | |
| outputs=[ | |
| chatbot, conversation_id, message_count, is_frozen, assistant_state, | |
| pending_feedback, counter_display, feedback_group, feedback_input_group, | |
| msg_input, frozen_msg, limit_msg, loading_msg, submit_btn | |
| ] | |
| ) | |
| return app, custom_theme | |
| if __name__ == "__main__": | |
| app, theme = create_app() | |
| app.queue( | |
| max_size=20, # queue up to 20 waiting requests | |
| default_concurrency_limit=4, # handle 4 requests simultaneously | |
| ) | |
| # server_name="0.0.0.0" allows access from other devices on the same network | |
| app.launch( | |
| theme=theme, | |
| css=CUSTOM_CSS, | |
| footer_links=[], | |
| server_name="0.0.0.0", | |
| show_error=True, # show errors to users instead of silent failures | |
| ) | |