Spaces:
Sleeping
Sleeping
| import os | |
| import gradio as gr | |
| from src.const.agent_response_constants import * | |
| from src.rag.agent_chain import ExecutiveAgentChain | |
| from src.rag.utilclasses import LeadAgentQueryResponse | |
| from src.utils.logging import get_logger | |
| from src.cache.cache import Cache | |
| logger = get_logger("chatbot_app") | |
| cache_logger = get_logger("cache_chatbot_app") | |
| class ChatbotApplication: | |
| def __init__(self, language: str = 'de') -> None: | |
| self._app = gr.Blocks() | |
| self._language = language | |
| self._cache = Cache.get_cache() | |
| with self._app: | |
| agent_state = gr.State(None) | |
| lang_state = gr.State(language) | |
| with gr.Row(): | |
| lang_selector = gr.Radio( | |
| choices=["Deutsch", "English"], | |
| value="English" if language == 'en' else 'Deutsch', | |
| label="Selected Language", | |
| interactive=True, | |
| ) | |
| reset_button = gr.Button("Reset Conversation") | |
| chatbot = gr.Chatbot( | |
| height=600, | |
| type='messages', | |
| label="Executive Education Adviser" | |
| ) | |
| chat = gr.ChatInterface( | |
| fn=lambda msg, history, agent: self._chat( | |
| message=msg, | |
| history=history, | |
| agent=agent, | |
| ), | |
| additional_inputs=[agent_state], | |
| title="Executive Education Adviser", | |
| type='messages', | |
| chatbot=chatbot, | |
| fill_height=True | |
| ) | |
| def clear_chat_immediate(): | |
| return [], "" | |
| def on_lang_change(language): | |
| lang_code = 'en' if language == 'English' else 'de' | |
| return switch_language(lang_code) | |
| def initalize_agent(language): | |
| agent = ExecutiveAgentChain(language=language) | |
| greeting = agent.generate_greeting() | |
| return agent, [{"role": "assistant", "content": greeting}] | |
| def switch_language(new_language): | |
| new_agent, greeting = initalize_agent(new_language) | |
| return ( | |
| new_agent, | |
| new_language, | |
| greeting, | |
| "" | |
| ) | |
| lang_selector.change( | |
| fn=clear_chat_immediate, | |
| outputs=[chat.chatbot_value], | |
| queue=True, | |
| ) | |
| lang_selector.change( | |
| fn=on_lang_change, | |
| inputs=[lang_selector], | |
| outputs=[agent_state, lang_state, chat.chatbot_value], | |
| queue=True, | |
| ) | |
| reset_button.click( | |
| fn=clear_chat_immediate, | |
| outputs=[chat.chatbot_value], | |
| queue=True, | |
| ) | |
| reset_button.click( | |
| fn=switch_language, | |
| inputs=[lang_state], | |
| outputs=[agent_state, lang_state, chat.chatbot_value], | |
| queue=True, | |
| ) | |
| # Initialize the agent chain on the app startup | |
| self._app.load( | |
| fn=lambda: initalize_agent(self._language), | |
| outputs=[agent_state, chat.chatbot_value], | |
| ) | |
| def app(self) -> gr.Blocks: | |
| """Expose underlying Gradio Blocks for external runners (e.g., HF Spaces).""" | |
| return self._app | |
| def _chat(self, message: str, history: list[dict], agent: ExecutiveAgentChain): | |
| if agent is None: | |
| logger.error("Agent not initialized") | |
| return ["I apologize, but the chatbot is not properly initialized."] | |
| answers = [] | |
| try: | |
| logger.info(f"Processing user query: {message[:100]}...") | |
| preprocess_resp = agent.preprocess_query(message) | |
| final_response: LeadAgentQueryResponse = None | |
| current_lang = preprocess_resp.language | |
| processed_q = preprocess_resp.processed_query | |
| if preprocess_resp.response: | |
| # Response comes from preprocessing step | |
| final_response = preprocess_resp | |
| elif Cache._settings["enabled"]: | |
| cached_data = self._cache.get(processed_q, language=current_lang) | |
| if cached_data: | |
| # Cache Hit — restore response with metadata | |
| if isinstance(cached_data, dict): | |
| final_response = LeadAgentQueryResponse( | |
| response=cached_data["response"], | |
| language=current_lang, | |
| appointment_requested=cached_data.get("appointment_requested", False), | |
| relevant_programs=cached_data.get("relevant_programs", []), | |
| ) | |
| else: | |
| # Legacy: plain string cache entry | |
| final_response = LeadAgentQueryResponse( | |
| response=cached_data, | |
| language=current_lang, | |
| ) | |
| if not final_response: | |
| # Response needs to be generated by the agent | |
| final_response = agent.agent_query(processed_q) | |
| answers.append(final_response.response) | |
| self._language = final_response.language | |
| if final_response.confidence_fallback or final_response.max_turns_reached or final_response.appointment_requested: | |
| html_code = get_booking_widget(language=self._language, programs=final_response.relevant_programs) | |
| answers.append(gr.HTML(value=html_code)) | |
| if final_response.should_cache and Cache._settings["enabled"]: | |
| # Caching response with metadata | |
| self._cache.set( | |
| key=processed_q, | |
| value={ | |
| "response": final_response.response, | |
| "appointment_requested": final_response.appointment_requested, | |
| "relevant_programs": final_response.relevant_programs, | |
| }, | |
| language=current_lang | |
| ) | |
| except Exception as e: | |
| logger.error(f"Error processing query: {e}", exc_info=True) | |
| error_message = ( | |
| "I apologize, but I encountered an error processing your request. " | |
| "Please try rephrasing your question or contact our admissions team for assistance." | |
| ) | |
| answers.append(error_message) | |
| return answers | |
| def run(self): | |
| self._app.launch( | |
| share=os.getenv("GRADIO_SHARE", "false").lower() == "true", | |
| server_name=os.getenv("SERVER_NAME", "0.0.0.0"), | |
| server_port=int(os.getenv("PORT", 7860)), | |
| ) | |