Spaces:
Sleeping
Sleeping
| ############################################################################################################ | |
| # Importing Libraries - Core dependencies for the Schema Study App | |
| # | |
| # This app helps students learn biology concepts through interactive conversations | |
| # with an AI tutor, guided by course-specific terms and schemas. | |
| ############################################################################################################ | |
| import streamlit as st # Web app framework | |
| import pandas as pd # Data handling | |
| import os # File operations | |
| import logging # Logging functionality | |
| import time # Time operations for retry logic | |
| import config # Local configuration module | |
| from openai import OpenAI # OpenAI API client | |
| from typing import Dict, List, Any, Optional # Type hints | |
| # Set up logging to track app activity | |
| logging.basicConfig(filename='app.log', level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') | |
| ############################################################################################################ | |
| # Theme Configuration - UI appearance and layout settings | |
| ############################################################################################################ | |
| # Set page layout, icon, and meta information | |
| st.set_page_config( | |
| layout="wide", | |
| page_title="Schema Study - BILD 5", | |
| page_icon="📚", | |
| menu_items={ | |
| 'Get Help': 'https://keefereuther.com', | |
| 'Report a bug': "mailto:kdreuther@ucsd.edu", | |
| 'About': "# Schema Study\n An AI-enhanced study app for biology students." | |
| } | |
| ) | |
| # Apply custom CSS for better visual appearance | |
| st.markdown(""" | |
| <style> | |
| /* Overall app styling */ | |
| .main .block-container { | |
| padding-top: 1rem; | |
| } | |
| /* Chat container styling */ | |
| .stChatMessage { | |
| padding: 1rem; | |
| border-radius: 0.5rem; | |
| margin-bottom: 1rem; | |
| border: 1px solid rgba(38, 70, 83, 0.1); | |
| } | |
| /* User message styling */ | |
| .stChatMessage[data-testid="user"] { | |
| background-color: rgba(231, 111, 81, 0.1); | |
| color: #264653; | |
| border-left: 4px solid #E76F51; | |
| } | |
| /* Assistant message styling */ | |
| .stChatMessage[data-testid="assistant"] { | |
| background-color: rgba(42, 157, 143, 0.1); | |
| color: #264653; | |
| border-left: 4px solid #2A9D8F; | |
| } | |
| /* Success message styling */ | |
| .stSuccess { | |
| background-color: rgba(42, 157, 143, 0.2); | |
| border-left: 4px solid #2A9D8F; | |
| color: #264653; | |
| } | |
| /* Warning message styling */ | |
| .stWarning { | |
| background-color: rgba(233, 196, 106, 0.2); | |
| border-left: 4px solid #E9C46A; | |
| color: #264653; | |
| } | |
| /* Error message styling */ | |
| .stError { | |
| background-color: rgba(231, 111, 81, 0.2); | |
| border-left: 4px solid #E76F51; | |
| color: #264653; | |
| } | |
| /* Chat input box styling */ | |
| .stChatInputContainer { | |
| background-color: rgba(42, 157, 143, 0.05); | |
| border-radius: 0.5rem; | |
| padding: 0.5rem; | |
| } | |
| .stTextInput input { | |
| border: 1px solid #2A9D8F !important; | |
| } | |
| /* Template button styling */ | |
| .stButton button { | |
| border: 1px solid #2A9D8F !important; | |
| border-radius: 6px !important; | |
| background-color: rgba(42, 157, 143, 0.1) !important; | |
| color: #264653 !important; | |
| font-weight: 500 !important; | |
| box-shadow: none !important; | |
| width: 100%; | |
| text-align: center; | |
| transition: all 0.2s ease; | |
| } | |
| /* Button hover effect */ | |
| .stButton button:hover { | |
| background-color: rgba(42, 157, 143, 0.2) !important; | |
| border-color: #2A9D8F !important; | |
| transform: translateY(-1px); | |
| box-shadow: 0 2px 4px rgba(42, 157, 143, 0.2) !important; | |
| } | |
| /* Specific button for clear chat */ | |
| [data-testid="baseButton-secondary"] { | |
| border-color: #F4A261 !important; | |
| background-color: rgba(244, 162, 97, 0.1) !important; | |
| color: #264653 !important; | |
| } | |
| [data-testid="baseButton-secondary"]:hover { | |
| background-color: rgba(244, 162, 97, 0.2) !important; | |
| border-color: #F4A261 !important; | |
| } | |
| /* Sidebar styling */ | |
| .st-emotion-cache-16txtl3 { | |
| background-color: rgba(38, 70, 83, 0.03); | |
| } | |
| /* Header and subheader styling */ | |
| h1, h2, h3 { | |
| color: #264653; | |
| } | |
| /* Expander styling */ | |
| .streamlit-expanderHeader { | |
| background-color: rgba(233, 196, 106, 0.1); | |
| border-radius: 4px; | |
| border: none; | |
| color: #264653; | |
| } | |
| .streamlit-expanderHeader:hover { | |
| background-color: rgba(233, 196, 106, 0.2); | |
| } | |
| /* Selectbox styling */ | |
| .stSelectbox label { | |
| color: #264653; | |
| } | |
| .stSelectbox div[data-baseweb="select"] > div { | |
| background-color: rgba(42, 157, 143, 0.05); | |
| border-color: #2A9D8F; | |
| } | |
| </style> | |
| """, unsafe_allow_html=True) | |
| ############################################################################################################ | |
| # Model Configuration - Responses API model capabilities | |
| ############################################################################################################ | |
| # Model configurations with capability flags | |
| MODEL_CONFIGS = { | |
| "gpt-5.1": { | |
| "api_type": "responses", | |
| "supports_reasoning": True, | |
| "supports_verbosity": False, | |
| "supports_temperature": False, # do NOT send temperature | |
| "supports_max_tokens": True, # maps to max_output_tokens | |
| "supports_web_search": True, # Supported | |
| }, | |
| "gpt-4.1": { | |
| "api_type": "responses", | |
| "supports_reasoning": False, | |
| "supports_verbosity": False, | |
| "supports_temperature": True, | |
| "supports_max_tokens": True, # maps to max_output_tokens | |
| "supports_web_search": True, # Confirmed working | |
| }, | |
| } | |
| def get_model_config(model: str) -> dict: | |
| """Get configuration for a specific model""" | |
| return MODEL_CONFIGS.get( | |
| model, | |
| { | |
| "api_type": "responses", | |
| "supports_reasoning": False, | |
| "supports_verbosity": False, | |
| "supports_temperature": True, | |
| "supports_max_tokens": True, | |
| "supports_web_search": False, | |
| }, | |
| ) | |
| def build_request_data( | |
| model: str, | |
| messages: List[Dict[str, str]], | |
| reasoning_effort: Optional[str] = None, | |
| temperature: Optional[float] = None, | |
| max_tokens: Optional[int] = None, | |
| enable_web_search: bool = False, | |
| ) -> dict: | |
| """Build a Responses API request body using capability flags.""" | |
| model_config = get_model_config(model) | |
| # Base payload | |
| request_data = {"model": model, "input": messages} | |
| # Web search tool (if enabled and supported by model) | |
| if enable_web_search and model_config.get("supports_web_search", False): | |
| request_data["tools"] = [{"type": "web_search", "search_context_size": "low"}] | |
| request_data["tool_choice"] = "auto" # Let model decide when to search | |
| # Web search is incompatible with reasoning effort, so disable it | |
| reasoning_effort = None | |
| # GPT-5.1: reasoning (top-level) - only if web search is not enabled | |
| if model_config["supports_reasoning"] and reasoning_effort and not enable_web_search: | |
| request_data["reasoning"] = {"effort": reasoning_effort} | |
| # Temperature (only if supported) | |
| if model_config["supports_temperature"] and temperature is not None: | |
| request_data["temperature"] = temperature | |
| # Map max_tokens -> Responses max_output_tokens | |
| if model_config["supports_max_tokens"] and max_tokens is not None: | |
| request_data["max_output_tokens"] = max_tokens | |
| return request_data | |
| with st.sidebar: | |
| st.header("Configuration") | |
| # API Key Input Field | |
| api_key = st.text_input( | |
| "OpenAI API Key", | |
| type="password", | |
| key="api_key", | |
| help="This is the API key for your OpenAI account. You can find it [here](https://platform.openai.com/api-keys)." | |
| ) | |
| if api_key: | |
| st.session_state["OPENAI_API_KEY"] = api_key | |
| else: | |
| st.warning("Please provide your OpenAI API key to enable chat functionality.") | |
| ############################################################################################################ | |
| # Initialize all session state variables - Persistent data between app reruns | |
| ############################################################################################################ | |
| # Initialize core session state variables if they don't exist | |
| if 'selected_term' not in st.session_state: | |
| st.session_state.selected_term = None # Currently selected term | |
| if 'selected_context' not in st.session_state: | |
| st.session_state.selected_context = None # Context for the selected term | |
| if 'display_messages' not in st.session_state: | |
| st.session_state.display_messages = [] # Chat history | |
| if 'display_term' not in st.session_state: | |
| st.session_state.display_term = False # Whether to display the term | |
| if 'initial_message_displayed' not in st.session_state: | |
| st.session_state.initial_message_displayed = False # Initial message flag | |
| if 'old_term' not in st.session_state: | |
| st.session_state.old_term = None # Previously selected term | |
| if 'seen_terms' not in st.session_state: | |
| st.session_state.seen_terms = set() # Set of viewed terms | |
| if 'openai_model' not in st.session_state: | |
| st.session_state.openai_model = config.ai_model # AI model being used | |
| ############################################################################################################ | |
| # Loading Terms - Data loading and preparation functions | |
| ############################################################################################################ | |
| # Load the terms file | |
| terms = pd.read_csv(config.default_terms_csv) | |
| def load_terms(file_path): | |
| """Loads terms from a CSV file containing terms and definitions. | |
| Args: | |
| file_path (str): Path to the CSV file. | |
| Returns: | |
| DataFrame: Loaded terms data or empty DataFrame on error. | |
| """ | |
| try: | |
| return pd.read_csv(file_path) | |
| except Exception as e: | |
| st.error(f"An error occurred while loading the file: {str(e)}") | |
| logging.exception(f"Error loading file: {e}") | |
| return pd.DataFrame() | |
| def get_first_column_values(local_df): | |
| """Extracts values from the first column of a DataFrame. | |
| Args: | |
| local_df (DataFrame): DataFrame containing terms. | |
| Returns: | |
| list: List of terms from the first column. | |
| """ | |
| if not local_df.empty: | |
| return local_df.iloc[:, 0].tolist() | |
| else: | |
| return [] | |
| # Prepare terms for the app | |
| terms = load_terms(config.default_terms_csv) | |
| term_list = get_first_column_values(terms) | |
| ############################################################################################################ | |
| # Streamlit app layout - Main UI components and interaction logic | |
| ############################################################################################################ | |
| # Create two columns with a 1:2 ratio for the main layout | |
| left_col, right_col = st.columns([1, 2]) | |
| # Left column for app info, term selection, current term, prompt templates, and instructions | |
| with left_col: | |
| # App header | |
| st.header(config.app_title) | |
| st.markdown("---") | |
| # Term selection dropdown | |
| selected_term = st.selectbox('**SELECT FROM THE DROPDOWN MENU**', term_list) | |
| if selected_term: | |
| # Display selected term | |
| st.markdown(f"### {selected_term}") | |
| # Handle new term selection | |
| if selected_term != st.session_state.old_term: | |
| # Get context for the selected term | |
| term_context = terms[terms.iloc[:, 0] == selected_term].iloc[:, 1].values[0] if not terms.empty else "" | |
| # Update session state | |
| st.session_state.selected_context = term_context | |
| st.session_state.selected_term = selected_term | |
| # Create initial user message | |
| user_message = f"What is one thing you know about '{selected_term}'? What do you want to know about it? This could include a definition, examples, misconceptions, associations with other course terms, opinions, etc. You may also choose one of the template buttons on the left to help you get started." | |
| st.session_state["display_messages"].append({"role": "user", "content": user_message}) | |
| # Save current term as previous term | |
| st.session_state.old_term = selected_term | |
| st.rerun() | |
| # Template buttons section | |
| st.markdown("**Prompt Templates:**") | |
| # Calculate layout for template buttons (3 per row) | |
| buttons_per_row = 3 | |
| num_rows = (len(config.prompt_templates) + buttons_per_row - 1) // buttons_per_row | |
| # Emoji mapping for templates | |
| template_emojis = { | |
| "Misconception Check": "❓", | |
| "Two Truths & a Lie": "🎮", | |
| "Connect Terms": "🔄", | |
| "Schema Map": "🗺️", | |
| "Create a Study Plan": "📚" | |
| } | |
| # Create rows of template buttons | |
| for row in range(num_rows): | |
| # Create columns for this row | |
| start_idx = row * buttons_per_row | |
| end_idx = min(start_idx + buttons_per_row, len(config.prompt_templates)) | |
| btn_cols = st.columns(end_idx - start_idx) | |
| # Add buttons for this row | |
| for i, template in enumerate(config.prompt_templates[start_idx:end_idx]): | |
| template_name = template["name"] | |
| # Add emoji to template name | |
| button_text = f"{template_emojis.get(template_name, '')} {template_name}" | |
| # Create button and handle click | |
| if btn_cols[i].button(button_text, key=f"btn_{row}_{i}"): | |
| # Format template with appropriate variables | |
| if "term_list" in template["template"]: | |
| formatted_content = template["template"].format(term=selected_term, term_list=term_list) | |
| else: | |
| formatted_content = template["template"].format(term=selected_term) | |
| # Add to chat messages and trigger LLM response | |
| st.session_state.display_messages.append({"role": "user", "content": formatted_content}) | |
| st.session_state["trigger_llm"] = True | |
| st.rerun() | |
| # Instructions for students | |
| with st.expander("INSTRUCTIONS FOR STUDENTS:"): | |
| st.markdown(config.instructions) | |
| # Right column for chat window and input | |
| with right_col: | |
| # Main chat container - displays conversation history | |
| with st.container(height=450, border=True): | |
| # Display previous chat messages | |
| for message in st.session_state["display_messages"][1:]: | |
| if message["role"] == "user": | |
| with st.chat_message("user"): | |
| st.markdown(message["content"]) | |
| else: | |
| with st.chat_message("assistant"): | |
| st.markdown(message["content"]) | |
| # Generate and display AI response when triggered | |
| if st.session_state.get("trigger_llm", False): | |
| try: | |
| # Initialize OpenAI client from session state | |
| api_key = st.session_state.get("OPENAI_API_KEY", "") | |
| if api_key: | |
| client = OpenAI(api_key=api_key) | |
| else: | |
| client = None | |
| st.error("🔒 Missing API key—please enter it in the sidebar above.") | |
| # Generate system message with term context | |
| system_message = config.term_prompt( | |
| selected_term=st.session_state.selected_term, | |
| selected_context=st.session_state.selected_context, | |
| term_list=term_list | |
| ) | |
| # Prepare messages for API call | |
| messages = [{"role": "system", "content": system_message}] + [ | |
| {"role": m["role"], "content": m["content"]} | |
| for m in st.session_state["display_messages"] | |
| ] | |
| # Get model configuration | |
| model_config = get_model_config(st.session_state["openai_model"]) | |
| # Prepare parameters based on model support | |
| reasoning_effort_param = None | |
| temperature_param = None | |
| enable_web_search_param = False | |
| # Check if web search is enabled and supported | |
| if config.enable_web_search and model_config.get("supports_web_search", False): | |
| enable_web_search_param = True | |
| # Web search disables reasoning automatically | |
| reasoning_effort_param = None | |
| elif model_config["supports_reasoning"]: | |
| reasoning_effort_param = config.reasoning_effort | |
| if model_config["supports_temperature"]: | |
| temperature_param = config.temperature | |
| # Build request data for Responses API | |
| request_data = build_request_data( | |
| model=st.session_state["openai_model"], | |
| messages=messages, | |
| reasoning_effort=reasoning_effort_param, | |
| temperature=temperature_param, | |
| max_tokens=config.max_tokens, | |
| enable_web_search=enable_web_search_param, | |
| ) | |
| # Create streaming response using Responses API | |
| with st.chat_message("assistant"): | |
| message_placeholder = st.empty() | |
| buf: List[str] = [] # collect deltas safely | |
| last_delta_ts = time.time() | |
| inactivity_limit_s = 60 # stop if no deltas for 60s | |
| try: | |
| # Stream events with timeout handling | |
| with client.responses.stream(**request_data) as stream: | |
| completed = False | |
| for event in stream: | |
| et = getattr(event, "type", None) | |
| if et == "response.output_text.delta": | |
| # Append the new chunk, update the UI | |
| buf.append(event.delta) | |
| full_response = "".join(buf) | |
| message_placeholder.markdown(full_response + "▌") | |
| last_delta_ts = time.time() | |
| elif et == "response.error": | |
| # Show the error inline, then stop | |
| error_msg = getattr(event, "error", "Unknown streaming error") | |
| message_placeholder.error(f"⚠️ Error: {error_msg}") | |
| buf.clear() | |
| buf.append(f"Error while streaming: {error_msg}") | |
| break | |
| elif et == "response.completed": | |
| # Response completed successfully | |
| completed = True | |
| break | |
| # Inactivity guard: if no deltas for too long, stop | |
| if time.time() - last_delta_ts > inactivity_limit_s: | |
| message_placeholder.warning("⚠️ Streaming paused due to inactivity from the server. Partial content shown above.") | |
| break | |
| # Remove cursor and show final message | |
| if buf: | |
| message_placeholder.markdown("".join(buf)) | |
| # Try to get final response, but don't fail if it's not available | |
| try: | |
| final = stream.get_final_response() | |
| except Exception as final_error: | |
| final = None | |
| if not completed: | |
| st.warning(f"⚠️ Note: Could not retrieve response metadata: {final_error}") | |
| except Exception as e: | |
| # Handle streaming exceptions gracefully | |
| error_msg = str(e) | |
| if "response.completed" in error_msg: | |
| # This is expected - the response completed without the event | |
| if buf: | |
| message_placeholder.markdown("".join(buf)) | |
| st.info("ℹ️ Response completed successfully (streaming ended)") | |
| else: | |
| message_placeholder.error("❌ No response content received") | |
| buf.clear() | |
| buf.append("No response content received") | |
| else: | |
| # Other streaming errors | |
| message_placeholder.error(f"❌ Error while streaming: {error_msg}") | |
| buf.clear() | |
| buf.append(f"Error while streaming: {error_msg}") | |
| # Save response to chat history | |
| response = "".join(buf) | |
| st.session_state["display_messages"].append({"role": "assistant", "content": response}) | |
| # Log the exchange | |
| logging.info(f"User prompt: {st.session_state['display_messages'][-2]['content']}") | |
| logging.info(f"Assistant response: {response}") | |
| except Exception as e: | |
| # Handle errors | |
| st.error(f"An error occurred: {str(e)}") | |
| logging.exception(f"Error generating response: {e}") | |
| # Reset trigger flag after response is generated | |
| st.session_state["trigger_llm"] = False | |
| # Chat input field | |
| prompt = st.chat_input("What do you know? What do you want to know?") | |
| if prompt: | |
| # Add user message to chat and trigger LLM response | |
| st.session_state.display_messages.append({"role": "user", "content": prompt}) | |
| st.session_state["trigger_llm"] = True | |
| st.rerun() | |
| # Clear chat history button | |
| if st.button("Clear Chat History"): | |
| st.session_state["display_messages"] = [] | |
| st.session_state["trigger_llm"] = False | |
| st.rerun() | |
| # Warning message about AI limitations | |
| st.markdown(config.warning_message, unsafe_allow_html=True) | |
| ############################################################################################################ | |
| # Sidebar Content - Resources and information | |
| ############################################################################################################ | |
| # Resources section | |
| st.sidebar.title("Resources") | |
| # Display each resource | |
| for resource in config.resources: | |
| st.sidebar.markdown(f"### {resource['title']}") | |
| st.sidebar.markdown(resource['description']) | |
| # Add URL link if available | |
| if "url" in resource: | |
| st.sidebar.markdown(f"[Open Link]({resource['url']})") | |
| # Add file download button if available | |
| if "file_path" in resource: | |
| with open(resource["file_path"], "rb") as file: | |
| btn = st.sidebar.download_button( | |
| label=f"Download {resource['title']}", | |
| data=file, | |
| file_name=os.path.basename(resource["file_path"]), | |
| mime="application/pdf" | |
| ) | |
| # Separator between resources | |
| st.sidebar.markdown("---") | |
| # About section | |
| st.sidebar.markdown("### About") | |
| st.sidebar.markdown(config.app_creation_message) | |
| st.sidebar.markdown("---") | |
| st.sidebar.markdown(config.app_repo_license_message) | |
| st.sidebar.markdown("---") | |