import streamlit as st import matplotlib.pyplot as plt import numpy as np import re import os import io import json import requests import base64 from PIL import Image from gtts import gTTS from duckduckgo_search import DDGS import warnings warnings.filterwarnings('ignore') # ----------------------------------------------------------------------------- # 1. PAGE CONFIGURATION # ----------------------------------------------------------------------------- st.set_page_config( page_title="H2 Physics Feynman Bot", page_icon="โš›๏ธ", layout="centered", initial_sidebar_state="expanded" ) # ----------------------------------------------------------------------------- # 2. HELPER FUNCTIONS # ----------------------------------------------------------------------------- @st.cache_data(show_spinner=False, ttl=3600) def generate_audio(text): """Generates MP3 audio from text.""" clean_text = re.sub(r'```.*?```', 'I have generated a graph.', text, flags=re.DOTALL) clean_text = re.sub(r'\[IMAGE:.*?\]', 'Here is a diagram.', clean_text) if len(clean_text) > 1000: clean_text = clean_text[:1000] + "..." try: tts = gTTS(text=clean_text, lang='en', slow=False) audio_fp = io.BytesIO() tts.write_to_fp(audio_fp) audio_fp.seek(0) return audio_fp except Exception as e: return None def google_search_api(query, api_key, cx): """Google Custom Search API.""" try: url = "https://www.googleapis.com/customsearch/v1" params = { "q": query, "cx": cx, "key": api_key, "searchType": "image", "num": 3, "safe": "active" } response = requests.get(url, params=params, timeout=10) if response.status_code in [403, 429]: return None data = response.json() if "items" in data and len(data["items"]) > 0: for item in data["items"]: link = item["link"] if link.lower().endswith(('.jpg', '.jpeg', '.png', '.webp')): return link return data["items"][0]["link"] except Exception as e: return None return None def duckduckgo_search_api(query): """DuckDuckGo image search.""" try: with DDGS() as ddgs: results = list(ddgs.images(query, max_results=1)) if results: return results[0]['image'] except Exception as e: return None return None @st.cache_data(show_spinner=False, ttl=300) def search_image(query): """Image search with fallback.""" try: # Check secrets if hasattr(st, 'secrets'): cx = st.secrets.get("GOOGLE_CX", "") key1 = st.secrets.get("GOOGLE_SEARCH_KEY", "") key2 = st.secrets.get("GOOGLE_SEARCH_KEY_2", "") if key1 and cx: url = google_search_api(query, key1, cx) if url: return url if key2 and cx: url = google_search_api(query, key2, cx) if url: return url # Fallback to DuckDuckGo return duckduckgo_search_api(query) except Exception as e: return None def analyze_image_with_huggingface(image, query="What physics concepts are shown?", hf_token=None): """Analyze image using Hugging Face Inference API.""" try: # Convert image to base64 buffered = io.BytesIO() # Convert to RGB if necessary if image.mode != 'RGB': image = image.convert('RGB') image.save(buffered, format="JPEG", quality=85) img_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8') # Use Hugging Face Inference API # Model: Qwen/Qwen-VL-Chat api_url = "https://api-inference.huggingface.co/models/Qwen/Qwen-VL-Chat" headers = { "Authorization": f"Bearer {hf_token}", "Content-Type": "application/json" } # Prepare the payload for Qwen-VL payload = { "inputs": { "question": query, "image": img_base64 }, "parameters": { "max_new_tokens": 500, "temperature": 0.7 } } # Make API call response = requests.post(api_url, json=payload, headers=headers, timeout=60) if response.status_code == 200: result = response.json() # Handle different response formats if isinstance(result, dict) and "generated_text" in result: return result["generated_text"] elif isinstance(result, list) and len(result) > 0: if "generated_text" in result[0]: return result[0]["generated_text"] else: return str(result[0])[:500] else: return str(result)[:500] else: return f"API Error {response.status_code}: {response.text[:200]}" except Exception as e: return f"Image analysis error: {str(e)[:100]}" def analyze_image_with_openrouter(image, query="What physics concepts are shown?", api_key=None): """Analyze image using OpenRouter API (multiple providers).""" try: # Convert image to base64 buffered = io.BytesIO() if image.mode != 'RGB': image = image.convert('RGB') image.save(buffered, format="JPEG", quality=85) img_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8') api_url = "https://openrouter.ai/api/v1/chat/completions" headers = { "Authorization": f"Bearer {api_key}", "HTTP-Referer": "https://h2-feynman-bot.vercel.app", "X-Title": "H2 Physics Feynman Bot" } # Try different vision models available on OpenRouter payload = { "model": "google/gemini-pro-vision", # Free tier available "messages": [ { "role": "user", "content": [ { "type": "text", "text": query }, { "type": "image_url", "image_url": { "url": f"data:image/jpeg;base64,{img_base64}" } } ] } ], "max_tokens": 1000, "temperature": 0.7 } response = requests.post(api_url, json=payload, headers=headers, timeout=60) if response.status_code == 200: result = response.json() return result["choices"][0]["message"]["content"] else: # Fallback to another model payload["model"] = "claude-3-haiku-20240307" response = requests.post(api_url, json=payload, headers=headers, timeout=60) if response.status_code == 200: result = response.json() return result["choices"][0]["message"]["content"] else: return f"API Error: {response.status_code}" except Exception as e: return f"Image analysis error: {str(e)[:100]}" def analyze_image_with_togetherai(image, query="What physics concepts are shown?", api_key=None): """Analyze image using Together.ai API (supports Qwen-VL).""" try: # Convert image to base64 buffered = io.BytesIO() if image.mode != 'RGB': image = image.convert('RGB') image.save(buffered, format="JPEG", quality=85) img_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8') api_url = "https://api.together.xyz/v1/chat/completions" headers = { "Authorization": f"Bearer {api_key}", "Content-Type": "application/json" } # Together.ai supports Qwen-VL models payload = { "model": "Qwen/Qwen-VL-Chat", # or "Qwen/Qwen-VL-Chat-Int4" for faster "messages": [ { "role": "user", "content": [ { "type": "text", "text": query }, { "type": "image_url", "image_url": { "url": f"data:image/jpeg;base64,{img_base64}" } } ] } ], "max_tokens": 1000, "temperature": 0.7 } response = requests.post(api_url, json=payload, headers=headers, timeout=60) if response.status_code == 200: result = response.json() return result["choices"][0]["message"]["content"] else: return f"API Error {response.status_code}: {response.text[:200]}" except Exception as e: return f"Image analysis error: {str(e)[:100]}" def analyze_image_local_fallback(image, query): """Simple local image analysis fallback.""" try: # Get basic image info width, height = image.size format_info = image.format if image.format else "Unknown" mode = image.mode analysis = f"Image analysis: {width}x{height} pixels, format: {format_info}, mode: {mode}. " # Try to detect if it's a physics-related image if width > height: analysis += "The image appears to be landscape orientation. " else: analysis += "The image appears to be portrait orientation. " # Add physics context analysis += "For physics analysis, please describe what you see in the image, and I'll help explain the physics concepts." return analysis except: return "Image received. Please describe what you see in the image for physics analysis." # ----------------------------------------------------------------------------- # 4. GRAPH FUNCTIONS # ----------------------------------------------------------------------------- def execute_plotting_code(code_snippet): """Execute Python plotting code.""" try: plt.close('all') fig, ax = plt.subplots(figsize=(10, 6)) namespace = { 'plt': plt, 'np': np, 'ax': ax, 'fig': fig, 'math': __import__('math') } cleaned_code = code_snippet.strip() # Ensure imports if 'import matplotlib' not in cleaned_code: cleaned_code = 'import matplotlib.pyplot as plt\nimport numpy as np\n' + cleaned_code # Ensure plotting plot_keywords = ['plt.plot(', 'ax.plot(', 'plt.scatter(', 'ax.scatter('] has_plot = any(keyword in cleaned_code for keyword in plot_keywords) if not has_plot: cleaned_code += '\n\n# Sample plot\nx = np.linspace(0, 10, 100)\ny = np.sin(x)\nplt.plot(x, y, "b-", linewidth=2, label="sin(x)")\n' # Remove plt.show cleaned_code = cleaned_code.replace('plt.show()', '') # Add labels if missing if 'plt.xlabel' not in cleaned_code: cleaned_code += '\nplt.xlabel("X-axis", fontsize=12)' if 'plt.ylabel' not in cleaned_code: cleaned_code += '\nplt.ylabel("Y-axis", fontsize=12)' if 'plt.grid' not in cleaned_code: cleaned_code += '\nplt.grid(True, linestyle="--", alpha=0.6)' exec(cleaned_code, namespace) ax = plt.gca() if not ax.get_title(): ax.set_title('Physics Graph', fontsize=14) st.pyplot(fig) plt.close(fig) except Exception as e: st.error(f"Graph Error: {str(e)[:100]}") # Fallback plot try: fig, ax = plt.subplots(figsize=(10, 6)) x = np.linspace(0, 10, 100) y = x**2 ax.plot(x, y, 'b-', linewidth=2, label='y = xยฒ') ax.set_xlabel('X', fontsize=12) ax.set_ylabel('Y', fontsize=12) ax.set_title('Sample Graph', fontsize=14) ax.grid(True, linestyle='--', alpha=0.6) ax.legend() st.pyplot(fig) plt.close(fig) except: st.warning("Could not generate graph.") def display_message(role, content, enable_voice=False): """Display chat message.""" with st.chat_message(role): text_to_display = content # Extract code blocks code_pattern = r'```python\s*(.*?)```' code_matches = list(re.finditer(code_pattern, content, re.DOTALL)) for match in reversed(code_matches): text_to_display = text_to_display.replace(match.group(0), "") # Check for image tags image_match = re.search(r'\[IMAGE:\s*(.*?)\]', text_to_display, re.IGNORECASE) image_result = None if image_match and role == "assistant": search_query = image_match.group(1) text_to_display = text_to_display.replace(image_match.group(0), "") with st.spinner(f"Searching for '{search_query}'..."): image_result = search_image(search_query) # Display text st.markdown(text_to_display) # Show code and graph if code_matches and role == "assistant": for match in code_matches: code_content = match.group(1).strip() if code_content: with st.expander("๐Ÿ“ View Python Code", expanded=False): st.code(code_content, language='python') with st.expander("๐Ÿ“Š Generated Graph", expanded=True): execute_plotting_code(code_content) # Show image if image_match and role == "assistant": if image_result and "http" in str(image_result): try: st.image(image_result, caption=f"๐Ÿ“ท {image_match.group(1)}", width=500) st.markdown(f"๐Ÿ”— [Open Image]({image_result})") except: st.warning("Could not display image.") else: st.warning(f"Image search failed.") # Audio output if enable_voice and role == "assistant" and len(text_to_display.strip()) > 10: audio_bytes = generate_audio(text_to_display) if audio_bytes: st.audio(audio_bytes, format='audio/mp3') # ----------------------------------------------------------------------------- # 5. GROQ API FUNCTION # ----------------------------------------------------------------------------- def call_groq_api(api_key, messages, max_tokens=2000): """Call Groq API.""" url = "https://api.groq.com/openai/v1/chat/completions" headers = { "Authorization": f"Bearer {api_key}", "Content-Type": "application/json" } formatted_messages = [] for msg in messages: if msg["role"] == "system": formatted_messages.append({ "role": "system", "content": msg["content"] }) elif msg["role"] in ["user", "assistant"]: formatted_messages.append({ "role": msg["role"], "content": msg["content"] }) models_to_try = [ "llama-3.1-8b-instant", "llama-3.2-3b-preview", ] for model in models_to_try: data = { "model": model, "messages": formatted_messages, "max_tokens": max_tokens, "temperature": 0.7, "stream": False } try: response = requests.post(url, headers=headers, json=data, timeout=30) if response.status_code == 200: result = response.json() return result["choices"][0]["message"]["content"] elif response.status_code == 429: continue except: continue return "Service temporarily unavailable. Please try again." # System Instructions for the AI - Part 5a (Start) SYSTEM_INSTRUCTIONS = """ **Identity:** Richard Feynman. Tutor for Singapore H2 Physics (9478). **CORE DIRECTIVE:** STRICTLY adhere to the Syllabus 9478 topics below. Reject non-included topics from UK A-level syllabus. **โœ… SYLLABUS TOPICS & FORMULAS (9478):** 1. **Measurement:** SI units (mass, length, time, current, temp, mol), prefixes (p to T), homogeneity, scalars/vectors (resolution/addition), errors (random/systematic), uncertainty. 2. **Forces:** Normal, buoyant, drag (qualitative only; no viscosity coeff), Hookeโ€™s Law ($F=kx$), Moments/Torque (couples, center of gravity), Equilibrium (no resultant F or Torque). 3. **Motion:** Kinematics ($s, u, v, a, t$ graphs & equations), Newtonโ€™s Laws (1, 2, 3), Momentum ($p=mv$), Impulse, $F_{net}=ma$ (const mass). 4. **Energy:** Stores/Transfers, Work ($W=Fs$), $E_k=\frac{1}{2}mv^2$, $E_p$ (grav/elastic/electric), Power ($P=Fv$), Efficiency, Conservation of Energy. 5. **Projectile:** Parabolic motion, $\Delta E_p=mg\Delta h$, Terminal velocity. 6. **Collisions:** Conservation of Momentum, Elastic vs Inelastic, Relative speeds (elastic). *Excluded: Coeff of restitution.* 7. **Circular Motion:** Radians, $\omega$, $v=r\omega$, $a=r\omega^2=v^2/r$, $F_c=mv^2/r$. 8. **Gravitation:** $F=G\frac{Mm}{r^2}$, Field $g=G\frac{M}{r^2}$, Potential $\phi=-\frac{GM}{r}$, $U=-\frac{GMm}{r}$, $g=-\frac{d\phi}{dr}$, Escape velocity, Orbits ($F_g=F_c$), Geostationary satellites. 9. **Oscillations (SHM):** $a=-\omega^2x$, $x=x_0\sin\omega t$, $v=\pm\omega\sqrt{x_0^2-x^2}$, Energy interchange, Damping (light/critical/heavy), Resonance (frequency response). 10. **Waves:** Transverse/Longitudinal, $v=f\lambda$, Intensity $\propto A^2$, Inverse square law, Polarization (Malusโ€™ Law $I \propto \cos^2\theta$). 11. **Superposition:** Standing waves (nodes/antinodes), Path/Phase difference, Coherence, Double-slit ($\lambda=\frac{ax}{D}$), Diffraction grating ($d\sin\theta=n\lambda$), Single slit ($b\sin\theta=\lambda$ for min), Rayleigh criterion ($\theta \approx \lambda/b$). 12. **Thermal:** Kelvin ($T_K = T_C + 273.15$), Ideal Gas ($pV=NkT$), Avogadro ($N_A$), Kinetic Theory assumptions, $pV=\frac{1}{3}Nm\langle c^2\rangle$, Mean $E_k = \frac{3}{2}kT$. 13. **Thermodynamics:** Internal Energy ($U$), 1st Law ($\Delta U = Q+W$), Work on gas ($W=-p\Delta V$ implied) or by gas ($W=p\Delta V$), Specific Heat/Latent Heat. 14. **E-Fields:** Coulomb's $F=\frac{Q_1Q_2}{4\pi\varepsilon_0 r^2}$, Field $E=\frac{Q}{4\pi\varepsilon_0 r^2}$, Potential $V=\frac{Q}{4\pi\varepsilon_0 r}$, $U=\frac{Q_1Q_2}{4\pi\varepsilon_0 r}$, $E=-\frac{dV}{dr}$, Uniform field $E=V/d$, Capacitance $C=Q/V$, Energy $U=\frac{1}{2}CV^2$. 15. **Currents:** $I=Q/t$, $I=nAvq$, $V=W/Q$, $P=VI=I^2R$, EMF vs PD, AC (rms $I_0/\sqrt{2}$), Half-wave rectification. 16. **Circuits:** Symbols, $V=IR$, $R=\rho l/A$, I-V graphs (diode, lamp, NTC), Int. Resistance, Series/Parallel R & C, Potential Divider, Charging/Discharging ($x=x_0 e^{-t/RC}$). 17. **EM Forces:** B-fields (wire, coil, solenoid), Flux density $B$, Force on wire ($F=BIl\sin\theta$), Force on charge ($F=Bqv\sin\theta$), Velocity selector, Hall effect concept. 18. **EM Induction:** Flux $\Phi=BA$, Linkage $N\Phi$, Faradayโ€™s & Lenzโ€™s Laws, Transformers ($N_s/N_p = V_s/V_p = I_p/I_s$). 19. **Modern Physics:** Photoelectric ($E=hf$, Work function), Photon momentum ($p=h/\lambda$), De Broglie ($\lambda=h/p$), Wavefunction $\psi$, Uncertainty ($\Delta x \Delta p \gtrsim h$), Infinite well ($E_n = \frac{n^2 h^2}{8 m L^2}$), Line spectra. 20. **Nuclear:** Rutherford, Notation $^A_Z X$, Decay ($A=\lambda N$, $x=x_0e^{-\lambda t}$, $t_{1/2}=\ln 2/\lambda$), $E=mc^2$, Mass defect, Binding energy (curve), Fusion/Fission. **GRAPH GENERATION RULES (CRITICAL):** When asked to create a graph, you MUST write COMPLETE, EXECUTABLE Python code that: 1. Starts with: import matplotlib.pyplot as plt, import numpy as np 2. Creates a figure: plt.figure(figsize=(10, 6)) or fig, ax = plt.subplots(figsize=(10, 6)) 3. Generates or uses appropriate physics data 4. Plots the data with plt.plot(), plt.scatter(), etc. 5. Adds proper labels: plt.xlabel(), plt.ylabel(), plt.title() 6. Adds grid: plt.grid(True, linestyle='--', alpha=0.6) 7. Adds legend if needed: plt.legend() **IMPORTANT GRAPH RULES:** - You MUST include ACTUAL DATA in your graph code - Example of BAD code (won't show graph): ```python import matplotlib.pyplot as plt plt.figure(figsize=(10, 6)) # Missing actual plot() command! plt.show() **EXAMPLE OF GOOD GRAPH CODE:** import matplotlib.pyplot as plt import numpy as np plt.figure(figsize=(10, 6)) x = np.linspace(0, 10, 100) y = np.sin(x) plt.plot(x, y, 'b-', linewidth=2, label='sin(x)') plt.xlabel('X Variable', fontsize=12) plt.ylabel('Y Variable', fontsize=12) plt.title('Physics Graph', fontsize=14) plt.grid(True, linestyle='--', alpha=0.6) plt.legend() **DIAGRAMS:** When you need to show a diagram, use: [IMAGE: search query] Example: "Here's the setup: [IMAGE: double slit experiment diagram]" **TEACHING METHOD:** 1. Ask ONE question at a time 2. Use analogies to explain complex concepts 3. Guide, don't give answers immediately 4. Validate understanding frequently 5. Teach with clarity, enthusiasm, and the Feynman method 6. Only give full solutions when student says "I give up" 7. Summarize each concept with a clear summary in > blockquote **FORMATTING:** - Use LaTeX for equations: $F = ma$, $E = mc^2$ - Use **bold** for key terms - Keep responses concise but thorough - Be enthusiastic and encouraging """ ## **PART 6: SIDEBAR CONFIGURATION** # Sidebar Configuration - SIMPLIFIED # ----------------------------------------------------------------------------- # 6. SIDEBAR CONFIGURATION # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # 6. SIDEBAR CONFIGURATION # ----------------------------------------------------------------------------- with st.sidebar: # Header col1, col2 = st.columns([1, 3]) with col1: st.image("https://upload.wikimedia.org/wikipedia/en/4/42/Richard_Feynman_Nobel.jpg", width=80) with col2: st.markdown("### โš›๏ธ H2 Physics") st.caption("Feynman Tutor") st.divider() # Settings st.header("โš™๏ธ Settings") topic = st.selectbox( "Topic Focus:", [ "General / Any", "Measurement & Uncertainty", "Kinematics & Dynamics", "Forces & Turning Effects", "Work, Energy, Power", "Circular Motion", "Gravitational Fields", "Thermal Physics", "Oscillations & Waves", "Electricity & DC Circuits", "Electromagnetism (EMI/AC)", "Modern Physics (Quantum/Nuclear)", "Paper 4: Practical Skills" ] ) enable_voice = st.toggle("๐Ÿ—ฃ๏ธ Text-to-Speech", value=False) st.divider() # Image Analysis Settings st.header("๐Ÿ–ผ๏ธ Image Analysis") image_analysis_mode = st.radio( "Analysis Service:", ["Hugging Face", "Together.ai", "OpenRouter", "Basic"], index=0, help="Choose image analysis service" ) # Initialize variables - FIXED SECRETS CHECKING hf_token = None together_key = None openrouter_key = None # Safe secrets checking function def get_secret(key, default=None): """Safely get a secret from st.secrets or environment.""" try: # First try st.secrets (Hugging Face Spaces) if hasattr(st, 'secrets'): # Check if secrets is loaded and has the key secrets_dict = dict(st.secrets) if key in secrets_dict: return secrets_dict[key] except: pass # Fallback to environment variables return os.environ.get(key, default) if image_analysis_mode == "Hugging Face": hf_token = get_secret("HF_TOKEN") if hf_token: st.success("โœ“ Hugging Face Token configured") else: st.warning("Add HF_TOKEN to secrets") st.info("Get token: huggingface.co/settings/tokens") elif image_analysis_mode == "Together.ai": together_key = get_secret("TOGETHER_API_KEY") if together_key: st.success("โœ“ Together.ai API configured") else: st.warning("Add TOGETHER_API_KEY to secrets") st.info("Get key: together.ai (free credits available)") elif image_analysis_mode == "OpenRouter": openrouter_key = get_secret("OPENROUTER_API_KEY") if openrouter_key: st.success("โœ“ OpenRouter API configured") else: st.warning("Add OPENROUTER_API_KEY to secrets") st.info("Get key: openrouter.ai (free credits available)") else: # Basic st.info("Basic image analysis mode") #st.divider() st.divider() # Media Input st.header("๐Ÿ“ค Input Methods") visual_content = None image_analysis = None # Method selection input_method = st.radio( "Choose input method:", ["Camera", "Upload"], horizontal=True ) if input_method == "Camera": st.subheader("๐Ÿ“ท Camera") camera_photo = st.camera_input("Take a photo of physics problem") if camera_photo: try: image = Image.open(camera_photo) visual_content = image st.image(image, caption="Camera Capture", width=200) st.success("โœ“ Photo captured") # Analyze immediately based on selected mode with st.spinner("Analyzing image..."): if image_analysis_mode == "Hugging Face" and hf_token: image_analysis = analyze_image_with_huggingface( image, "Analyze this physics image. What concepts, diagrams, or equations do you see?", hf_token ) elif image_analysis_mode == "Together.ai" and together_key: image_analysis = analyze_image_with_togetherai( image, "Analyze this physics image. What concepts, diagrams, or equations do you see?", together_key ) elif image_analysis_mode == "OpenRouter" and openrouter_key: image_analysis = analyze_image_with_openrouter( image, "Analyze this physics image. What concepts, diagrams, or equations do you see?", openrouter_key ) else: image_analysis = analyze_image_local_fallback( image, "Analyze this physics image" ) if image_analysis: st.info(f"๐Ÿ“‹ Analysis: {image_analysis[:150]}...") except Exception as e: st.error(f"Camera error: {e}") else: # Upload st.subheader("๐Ÿ“ Upload") uploaded_file = st.file_uploader( "Choose image", type=["jpg", "jpeg", "png"], help="Upload physics diagrams or problems" ) if uploaded_file: try: image = Image.open(uploaded_file) visual_content = image st.image(image, caption="Uploaded Image", width=200) st.success("โœ“ Image loaded") # Analyze immediately based on selected mode with st.spinner("Analyzing image..."): if image_analysis_mode == "Hugging Face" and hf_token: image_analysis = analyze_image_with_huggingface( image, "Analyze this physics image. What concepts, diagrams, or equations do you see?", hf_token ) elif image_analysis_mode == "Together.ai" and together_key: image_analysis = analyze_image_with_togetherai( image, "Analyze this physics image. What concepts, diagrams, or equations do you see?", together_key ) elif image_analysis_mode == "OpenRouter" and openrouter_key: image_analysis = analyze_image_with_openrouter( image, "Analyze this physics image. What concepts, diagrams, or equations do you see?", openrouter_key ) else: image_analysis = analyze_image_local_fallback( image, "Analyze this physics image" ) if image_analysis: st.info(f"๐Ÿ“‹ Analysis: {image_analysis[:150]}...") except Exception as e: st.error(f"Upload error: {e}") st.divider() # Controls if st.button("๐Ÿงน Clear Chat History", use_container_width=True): st.session_state.messages = [] st.rerun() st.divider() st.caption("H2 Physics 9478 | AI Image Analysis") st.caption("Made with โค๏ธ for JPJC H2 Physics students | Powered by Groq AI") # 7 Main Chat Interface # ----------------------------------------------------------------------------- # 7. MAIN CHAT INTERFACE # ----------------------------------------------------------------------------- # Initialize session state if "messages" not in st.session_state: st.session_state.messages = [ {"role": "system", "content": SYSTEM_INSTRUCTIONS}, {"role": "assistant", "content": "**Hello! I'm Richard Feynman, ready to help you master H2 Physics!** โš›๏ธ\n\nI can:\n- ๐Ÿ“Š **Plot graphs** with Python\n- ๐Ÿ–ผ๏ธ **Find diagrams** online\n- ๐Ÿ“ท **Analyze images** from camera/upload\n- ๐Ÿ’ฌ **Explain concepts** with analogies\n\n**Try:**\n1. Take a photo of a physics problem\n2. Ask me to plot a graph\n3. Request a diagram\n\n**What would you like to learn today?**"} ] # Title st.title("โš›๏ธ H2 Physics Feynman Tutor") st.caption(f"**Topic:** {topic} | **Image Analysis:** {image_analysis_mode}") # Display chat history for message in st.session_state.messages: if message["role"] != "system": display_message(message["role"], message["content"], enable_voice) # Chat input user_input = st.chat_input("Type your question or describe the image...") # Process input if user_input or visual_content or image_analysis: # Build user message user_message = "" if user_input: user_message += user_input + " " if visual_content: user_message += "[I have uploaded/taken a photo of a physics problem] " if image_analysis: user_message += f"[Image Analysis: {image_analysis}] " if topic != "General / Any": user_message += f"(Focus on: {topic})" # Add to history st.session_state.messages.append({"role": "user", "content": user_message}) # Display user message with st.chat_message("user"): if user_input: st.markdown(user_input) if visual_content: st.image(visual_content, caption="Your Image", width=300) if image_analysis: with st.expander("๐Ÿ“‹ Image Analysis Details"): st.write(image_analysis) # Check for Groq API key - FIXED VERSION groq_key = None try: # First try environment variable (Hugging Face method) groq_key = os.environ.get("GROQ_API_KEY") # If not found in env, try to get from st.secrets (only if it exists) if not groq_key and hasattr(st, 'secrets'): try: # Convert secrets to dict safely secrets_dict = dict(st.secrets) if "GROQ_API_KEY" in secrets_dict: groq_key = secrets_dict["GROQ_API_KEY"] except: # st.secrets exists but can't be accessed pass except: pass if not groq_key: st.error( "**โŒ API Configuration Required**\n\n" "Groq API key not found. Please ensure GROQ_API_KEY is set in:\n" "1. Hugging Face Space Secrets (recommended)\n" "2. Environment variables\n\n" "Get free API key: console.groq.com" ) st.stop() # Prepare API call try: conversation_context = [] conversation_context.append({"role": "system", "content": SYSTEM_INSTRUCTIONS}) recent_messages = st.session_state.messages[-8:] for msg in recent_messages: if msg["role"] != "system": conversation_context.append(msg) # Call Groq API with st.spinner("Feynman is thinking... โš›๏ธ"): response_text = call_groq_api(groq_key, conversation_context) if response_text: st.session_state.messages.append({"role": "assistant", "content": response_text}) display_message("assistant", response_text, enable_voice) # Clear image content after processing if 'visual_content' in locals(): visual_content = None if 'image_analysis' in locals(): image_analysis = None else: st.error("Failed to get response. Please try again.") except Exception as e: st.error(f"Error: {str(e)[:200]}") # Footer st.divider() st.markdown("**H2 Physics Feynman Tutor** | Singapore H2 Physics (9478) Syllabus") st.markdown("Powered by Groq AI + AI Image Analysis") st.markdown("*AI tutoring assistant. Verify with official syllabus.*")