Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from st_pages import Page, show_pages | |
| from openai import OpenAI | |
| from whisper_stt import whisper_stt | |
| # Set page configuration | |
| st.set_page_config(layout="wide") | |
| show_pages([Page("app.py", "Home", "🏠")]) | |
| # Custom CSS for expander header | |
| st.markdown(""" | |
| <style> | |
| .streamlit-expanderHeader { | |
| font-size: 24px !important; | |
| font-weight: bold !important; | |
| color: #1E90FF !important; | |
| } | |
| </style> | |
| """, unsafe_allow_html=True) | |
| # JavaScript to preserve scroll position | |
| st.markdown(""" | |
| <script> | |
| window.addEventListener("load", function() { | |
| if (window.location.hash) { | |
| var hash = window.location.hash.substring(1); | |
| if (hash && document.getElementById(hash)) { | |
| document.getElementById(hash).scrollIntoView(); | |
| } | |
| } | |
| }); | |
| document.querySelectorAll('a').forEach(anchor => { | |
| anchor.addEventListener('click', function (e) { | |
| e.preventDefault(); | |
| var targetId = this.getAttribute('href').substring(1); | |
| var targetElement = document.getElementById(targetId); | |
| if (targetElement) { | |
| targetElement.scrollIntoView({ behavior: 'smooth' }); | |
| } | |
| window.location.hash = targetId; | |
| }); | |
| }); | |
| </script> | |
| """, unsafe_allow_html=True) | |
| # Initialize session state variables | |
| if 'paused' not in st.session_state: | |
| st.session_state.paused = False | |
| if 'question_text' not in st.session_state: | |
| st.session_state.question_text = "" | |
| if 'submitted' not in st.session_state: | |
| st.session_state.submitted = False | |
| if 'response_content' not in st.session_state: | |
| st.session_state.response_content = "" | |
| if 'stopped' not in st.session_state: | |
| st.session_state.stopped = False | |
| if 'function_call_count' not in st.session_state: | |
| st.session_state.function_call_count = 0 | |
| if 'transcribed_text' not in st.session_state: | |
| st.session_state.transcribed_text = "" | |
| if 'last_processed_text' not in st.session_state: | |
| st.session_state.last_processed_text = "" | |
| if 'headers' not in st.session_state: | |
| st.session_state.headers = [] | |
| if 'history' not in st.session_state: | |
| st.session_state.history = [] | |
| def on_stop(): | |
| st.session_state.stopped = True | |
| def handle_enter(key): | |
| if key == "ctrl+enter": | |
| new_question = st.session_state.question_input | |
| print(f"handle_enter called. new_question: '{new_question}'") | |
| print(f"session state: {st.session_state}") | |
| with st.sidebar: | |
| api_key = st.text_input("API Key", key="chatbot_api_key", type="password") | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| # Call whisper_stt without a callback | |
| transcribed_text = whisper_stt( | |
| openai_api_key=api_key, | |
| language='en' | |
| ) | |
| if transcribed_text: | |
| st.session_state.question_text = transcribed_text | |
| # Check if new transcription is available | |
| if transcribed_text and transcribed_text != st.session_state.transcribed_text: | |
| st.session_state.transcribed_text = transcribed_text | |
| st.session_state.question_text = transcribed_text | |
| st.session_state.submitted = True | |
| with col2: | |
| st.button(label='Stop', on_click=on_stop) | |
| # Create an input for the question and use new_question directly | |
| new_question = st.text_area("Question", | |
| value=st.session_state.question_text or "", | |
| height=150, | |
| key="question_input", | |
| on_change=handle_enter, | |
| args=("ctrl+enter",) | |
| ) | |
| print(f"After text_area, new_question: '{new_question}'") | |
| # Check if new_question has changed and is not empty | |
| if new_question and new_question != st.session_state.question_text: | |
| st.session_state.question_text = new_question | |
| st.session_state.submitted = True | |
| if st.session_state.question_text and not api_key: | |
| st.info("Please add your OpenAI API key to continue.") | |
| st.stop() | |
| if st.session_state.submitted and not st.session_state.stopped: | |
| st.session_state.headers.append(st.session_state.question_text) | |
| client = OpenAI(api_key=api_key) | |
| st.session_state.messages = [{"role": "user", "content": st.session_state.question_text}] | |
| response = client.chat.completions.create( | |
| model="gpt-4o", | |
| messages=st.session_state.messages, | |
| stream=True | |
| ) | |
| complete_response = "" | |
| current_expander = st.expander(st.session_state.question_text, expanded=True) | |
| response_placeholder = current_expander.empty() | |
| for chunk in response: | |
| if st.session_state.stopped: | |
| st.session_state.stopped = False | |
| st.session_state.submitted = False | |
| break | |
| else: | |
| if chunk and chunk.choices[0].delta.content: | |
| complete_response += chunk.choices[0].delta.content | |
| response_placeholder.markdown(complete_response, unsafe_allow_html=True) | |
| st.session_state.response_content = complete_response | |
| st.session_state.history.insert(0, { | |
| 'question': st.session_state.question_text, | |
| 'response': complete_response | |
| }) | |
| st.session_state.submitted = False | |
| st.session_state.stopped = False | |
| # Display all questions and answers | |
| for idx, entry in enumerate(st.session_state.history): | |
| if idx == 0 and st.session_state.response_content: | |
| continue # Skip the first item since it's already shown as the current expander | |
| with st.expander(entry['question'], expanded=False): | |
| st.markdown(entry['response'], unsafe_allow_html=True) | |