Spaces:
Sleeping
Sleeping
| import os | |
| import json | |
| import gradio as gr | |
| import uuid | |
| from typing import List, Any, Tuple, Dict # Added Dict | |
| from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder | |
| from langchain_core.messages import HumanMessage, AIMessage | |
| from langchain_google_genai import ChatGoogleGenerativeAI | |
| from langchain_core.output_parsers import StrOutputParser | |
| # Define custom CSS for Gradio UI (can be empty or contain styles) | |
| custom_css = """ | |
| .gradio-container { | |
| max-width: none !important; | |
| } | |
| """ | |
| # --- Secrets and API Key Configuration --- | |
| os.environ["GOOGLE_API_KEY"] = os.environ.get("GOOGLE_API_KEY") | |
| if not os.environ["GOOGLE_API_KEY"]: | |
| print("FATAL: GOOGLE_API_KEY is not set. Please add it in the Hugging Face secrets.") | |
| exit() | |
| # --- 1) LLM and Prompt Configuration --- | |
| llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash", temperature=0.5) | |
| JSON_ONLY_INSTRUCTION = "Your output MUST BE ONLY the raw JSON object, without any markdown formatting like ```json or any other text." | |
| # Define the comprehensive list of BRD sections and their descriptions | |
| BRD_SECTIONS = { | |
| "Project Scope": "What are the specific, measurable features for the MVP (Minimum Viable Product)? What is explicitly out of scope?", | |
| "Target Audience": "Detailed personas (e.g., \"Busy Professional\" needs more detail: age, tech-savviness, cooking skill).", | |
| "Business Goals": "What are the key business metrics for success (e.g., user acquisition targets, revenue goals, churn rate)?", | |
| "Functional Requirements": "How should AI personalization work? What are the steps in the grocery ordering and delivery process? How does inventory tracking function?", | |
| "Non-Functional Requirements": "What are the expected performance benchmarks (e.g., app load time)? What are the security and data privacy considerations?", | |
| "Data Sources & Constraints": "What specific APIs will be used for recipes or grocery data? Are there any budget or technology stack limitations?", | |
| "Timeline": "What is the desired deadline or a rough project timeline?" | |
| } | |
| # Clarifier system template now includes a placeholder for the dynamic checklist | |
| clarifier_system_template = """You are a BRD-building assistant. Your primary goal is to gather details by asking clarifying questions. When you have enough information, you will signal you are ready to report. | |
| You will be given a conversation history. Analyze it. | |
| - If you have enough information to write a great BRD, set "status" to "REPORT_READY". | |
| - If you still need more information, set "status" to "ASK" and provide a list of questions you need answers to. | |
| **Required Information Checklist:** | |
| {dynamic_checklist_content} | |
| Return ONLY valid JSON with this exact schema: | |
| {{{{ | |
| "status": "ASK" | "REPORT_READY", | |
| "questions": string[], | |
| "reason": string | |
| }}}} | |
| """ # Removed {JSON_ONLY_INSTRUCTION} from here | |
| # clarifier_prompt will be created dynamically in chat_logic | |
| report_system = "You are a meticulous BRD writer. Create a polished, structured BRD based on all information collected in the conversation." | |
| report_prompt = ChatPromptTemplate.from_messages([ | |
| ("system", report_system), | |
| MessagesPlaceholder("history"), | |
| ("human", "Excellent. Please compile all the information into a comprehensive Business Requirement Document.") | |
| ]) | |
| # NEW: Prompt for generating a summary of current understanding | |
| summarizer_system_template = """You are a helpful assistant. Based on the provided conversation history, summarize the key information gathered so far for each of the following BRD sections. If a section has no information yet, state that. Be concise and use bullet points. | |
| **BRD Sections to Summarize:** | |
| {dynamic_checklist_content} | |
| Provide a summary for each section. | |
| """ | |
| # --- 2) Chat History & Session Management --- | |
| HISTORY_DIR = "chat_histories" | |
| if not os.path.exists(HISTORY_DIR): | |
| os.makedirs(HISTORY_DIR) | |
| def get_session_filepath(session_id: str) -> str: | |
| return os.path.join(HISTORY_DIR, f"session_{session_id}.json") | |
| def save_session_state(session_id: str, history: List[Any], pending_questions: List[str], selected_sections: List[str]): | |
| if not session_id: return | |
| filepath = get_session_filepath(session_id) | |
| state = { | |
| "history": [{"type": msg.type, "content": msg.content} for msg in history], | |
| "pending_questions": pending_questions, | |
| "selected_sections": selected_sections # NEW: Save selected sections | |
| } | |
| with open(filepath, "w") as f: | |
| json.dump(state, f, indent=2) | |
| def load_session_state(session_id: str) -> Tuple[List[Any], List[str], List[str]]: # NEW: Return type includes selected_sections | |
| filepath = get_session_filepath(session_id) | |
| if not os.path.exists(filepath): return [], [], list(BRD_SECTIONS.keys()) # Default all sections if new | |
| with open(filepath, "r") as f: | |
| try: | |
| state = json.load(f) | |
| history = [ | |
| HumanMessage(content=item["content"]) if item["type"] == "human" else AIMessage(content=item["content"]) | |
| for item in state.get("history", []) | |
| ] | |
| pending_questions = state.get("pending_questions", []) | |
| selected_sections = state.get("selected_sections", list(BRD_SECTIONS.keys())) # NEW: Load selected sections, default all | |
| return history, pending_questions, selected_sections | |
| except (json.JSONDecodeError, TypeError): | |
| return [], [], list(BRD_SECTIONS.keys()) # Default all sections on error | |
| def get_saved_sessions() -> List[Tuple[str, str]]: | |
| sessions = [] | |
| for f in os.listdir(HISTORY_DIR): | |
| if f.startswith("session_") and f.endswith(".json"): | |
| session_id = f.replace("session_", "").replace(".json", "") | |
| history, _, _ = load_session_state(session_id) | |
| label = "New Chat" | |
| # Safely get the content of the first message for the label | |
| if history and isinstance(history[0], (HumanMessage, AIMessage)) and history[0].content: | |
| first_message_content = history[0].content | |
| label = first_message_content[:40] + ("..." if len(first_message_content) > 40 else "") | |
| sessions.append((label, session_id)) | |
| # Fix: Use x[1] to get the session_id from the tuple for get_session_filepath | |
| sorted_sessions = sorted(sessions, key=lambda x: os.path.getmtime(get_session_filepath(x[1])), reverse=True) | |
| return [("โ New Chat", "NEW_CHAT_SESSION")] + sorted_sessions # Add a fixed "New Chat" option | |
| # --- 3) Professional Output Formatting --- | |
| def format_ai_output(result: dict) -> str: | |
| """ | |
| Transforms the AI's JSON output (clarifier questions) into a polished, professional message. | |
| """ | |
| reason = result.get("reason", "No reason provided.") | |
| questions = result.get("questions", []) | |
| response = ( | |
| "Thank you. To build a comprehensive document, please provide details on the following points.\n\n" | |
| f"**Reasoning:** *{reason}*\n\n" | |
| "--- \n\n" | |
| ) | |
| categorized = {} | |
| for q in questions: | |
| # Try to extract category from question, e.g., "Project Scope: What are..." | |
| parts = q.split(":", 1) | |
| if len(parts) == 2: | |
| category, question_text = parts | |
| category = category.replace("Regarding ", "").strip() | |
| if category not in categorized: categorized[category] = [] | |
| categorized[category].append(question_text.strip()) | |
| else: | |
| # If no clear category, put in 'General' | |
| if "General" not in categorized: categorized["General"] = [] | |
| categorized["General"].append(q) | |
| q_counter = 1 | |
| for category, q_list in categorized.items(): | |
| response += f"**{category}**\n" | |
| for q_text in q_list: | |
| response += f"- {q_counter}. {q_text}\n" # Add numbering | |
| q_counter += 1 | |
| response += "\n" | |
| return response.strip() | |
| # --- 4) Core Gradio Application Logic (One-by-One Questions) --- | |
| def chat_logic(user_message: str, history: List[Any], pending_questions: List[str], session_id: str, selected_sections: List[str]) -> Tuple: | |
| """Main logic for a single chat turn with a question queue.""" | |
| # If this is a "NEW_CHAT_SESSION", generate a real UUID and update the state | |
| if session_id == "NEW_CHAT_SESSION": | |
| session_id = str(uuid.uuid4()) | |
| is_first_message_in_new_session = True | |
| else: | |
| is_first_message_in_new_session = len(history) == 0 # Check if history is empty for existing session | |
| # Determine if checkboxes should be interactive (only interactive for truly new sessions before first message) | |
| checkbox_interactive_state = gr.update(interactive=False) if history else gr.update(interactive=True) | |
| # 1. Add user's latest message to the history (this history is the one saved and passed to LLM) | |
| history.append(HumanMessage(content=user_message)) | |
| # If this is the first message in a new session, save it now | |
| if is_first_message_in_new_session: | |
| save_session_state(session_id, history, pending_questions, selected_sections) | |
| updated_session_list_choices = gr.update(choices=get_saved_sessions(), value=session_id) | |
| else: | |
| updated_session_list_choices = gr.update() | |
| # Initialize content for the BRD display | |
| brd_display_content = "Thank you for your input. Processing...\n\n" | |
| # 2. Dynamically build the checklist content for prompts | |
| dynamic_checklist_content = "\n".join([f"- {section}: {BRD_SECTIONS[section]}" for section in selected_sections if section in BRD_SECTIONS]) | |
| # 3. Generate Progress Summary (for BRD Preview) | |
| current_summarizer_system = summarizer_system_template.format(dynamic_checklist_content=dynamic_checklist_content) | |
| summarizer_prompt_instance = ChatPromptTemplate.from_messages([ | |
| ("system", current_summarizer_system), | |
| MessagesPlaceholder("history") # Pass the current history (user message added) | |
| ]) | |
| summary_chain = summarizer_prompt_instance | llm | StrOutputParser() | |
| progress_summary = summary_chain.invoke({"history": history}) | |
| brd_display_content += f"**Current Understanding:**\n{progress_summary}\n\n" | |
| # 4. Determine next AI action (clarification or report) | |
| ai_response_for_chat = "" # This will be the message shown in the chatbot | |
| if pending_questions: | |
| # User was answering a pending question, so pop the next one | |
| ai_response_for_chat = pending_questions.pop(0) | |
| else: | |
| # No pending questions, call clarifier to get new questions or report | |
| current_clarifier_system = clarifier_system_template.format(dynamic_checklist_content=dynamic_checklist_content) + "\n" + JSON_ONLY_INSTRUCTION | |
| clarifier_prompt_instance = ChatPromptTemplate.from_messages([("system", current_clarifier_system), MessagesPlaceholder("history")]) | |
| clarifier_chain = clarifier_prompt_instance | llm | StrOutputParser() | |
| raw_response = clarifier_chain.invoke({"history": history}) | |
| try: | |
| result = json.loads(raw_response) | |
| status = result.get("status") | |
| if status == "ASK" and result.get("questions"): | |
| pending_questions.extend(result["questions"]) # Add all new questions to the queue | |
| reason = result.get("reason", "To gather more details, I have some questions.") | |
| if pending_questions: | |
| first_question = pending_questions.pop(0) # Pop only the first one for current turn | |
| ai_response_for_chat = f"Thank you for your input. {reason}\n\nHere is my first question:\n- {first_question}" | |
| else: | |
| ai_response_for_chat = "I need more information, but I couldn't generate a specific question. Can you please elaborate further?" | |
| elif status == "REPORT_READY": | |
| report_intro = "Excellent, I have all the information required. Generating the final report now..." | |
| final_chain = report_prompt | llm | StrOutputParser() | |
| report = final_chain.invoke({"history": history}) | |
| ai_response_for_chat = f"{report_intro}\n\n---\n\n{report}" | |
| brd_display_content += f"\n\n=== FINAL BRD ===\n\n{report}" # Add final report to BRD display | |
| else: | |
| ai_response_for_chat = "I received an unexpected status. Let's try again. Can you rephrase?" | |
| except (json.JSONDecodeError, ValueError): | |
| ai_response_for_chat = "I'm having a little trouble processing that. Could you please clarify or rephrase your last message?" | |
| # 5. Append the AI's direct response (question or report) to history for LLM context | |
| # This is the only AI message that should be part of the conversational history for the LLM. | |
| history.append(AIMessage(content=ai_response_for_chat)) # Append the *formatted* message for consistency | |
| # 6. Save state and update display | |
| save_session_state(session_id, history, pending_questions, selected_sections) | |
| # Format history for chatbot display (now simpler as history only contains relevant messages) | |
| chatbot_display = _format_history_for_chatbot(history) | |
| # Return values: user_input, history_state, pending_questions_state, chatbot, brd_display, selected_sections_state, brd_sections_checkboxes, session_id_state, session_list | |
| return ( | |
| "", # user_input | |
| history, # history_state | |
| pending_questions, # pending_questions_state | |
| chatbot_display, # chatbot | |
| brd_display_content, # brd_display (now includes summary and potentially final report) | |
| selected_sections, # selected_sections_state | |
| checkbox_interactive_state, # brd_sections_checkboxes | |
| session_id, # session_id_state | |
| updated_session_list_choices # session_list | |
| ) | |
| def _format_history_for_chatbot(history: List[Any]) -> List[Dict[str, str]]: | |
| """Converts message history to Gradio chatbot 'messages' format.""" | |
| # Since history now only contains human messages and the *single* relevant AI response per turn, | |
| # this function becomes straightforward. | |
| chatbot_display = [] | |
| for msg in history: | |
| role = "user" if msg.type == "human" else "assistant" | |
| chatbot_display.append({"role": role, "content": msg.content}) | |
| return chatbot_display | |
| # --- 4) UI Functions --- | |
| def start_new_chat(): | |
| """Resets the state for a new, unsaved chat.""" | |
| default_sections = list(BRD_SECTIONS.keys()) | |
| # Do NOT save here. Session will be saved on first user message. | |
| # Return values: history_state, pending_questions_state, session_id_state, chatbot, user_input, session_list, selected_sections_state, brd_sections_checkboxes, brd_display | |
| return ( | |
| [], # history_state (empty list for chatbot) | |
| [], # pending_questions_state (empty list) | |
| "NEW_CHAT_SESSION", # session_id_state | |
| [], # chatbot (empty list for display) | |
| "", # user_input (empty string) | |
| gr.update(choices=get_saved_sessions(), value="NEW_CHAT_SESSION"), # session_list | |
| default_sections, # selected_sections_state (list of strings) | |
| gr.update(value=default_sections, interactive=True), # brd_sections_checkboxes | |
| "BRD will appear here once generated." # brd_display reset | |
| ) | |
| def load_chat_session(session_id: str): | |
| """Loads a selected chat session from the sidebar or starts a new one.""" | |
| if session_id == "NEW_CHAT_SESSION": | |
| return start_new_chat() | |
| history, pending_questions, selected_sections = load_session_state(session_id) | |
| chatbot_display = _format_history_for_chatbot(history) | |
| # Determine if checkboxes should be interactive based on whether history exists | |
| checkbox_interactive_state = gr.update(interactive=False) if history else gr.update(interactive=True) | |
| # Re-generate the last summary for the BRD display if history exists | |
| current_brd_display_content = "BRD will appear here once generated." | |
| if history: | |
| dynamic_checklist_content = "\n".join([f"- {section}: {BRD_SECTIONS[section]}" for section in selected_sections if section in BRD_SECTIONS]) | |
| current_summarizer_system = summarizer_system_template.format(dynamic_checklist_content=dynamic_checklist_content) | |
| summarizer_prompt_instance = ChatPromptTemplate.from_messages([ | |
| ("system", current_summarizer_system), | |
| MessagesPlaceholder("history") | |
| ]) | |
| summary_chain = summarizer_prompt_instance | llm | StrOutputParser() | |
| current_brd_display_content = f"**Current Understanding:**\n{summary_chain.invoke({'history': history})}" | |
| # If the last message in history was a final report, append it to the BRD display | |
| if history and history[-1].type == "ai" and "Excellent, I have all the information required. Generating the final report now..." in history[-1].content: | |
| current_brd_display_content += f"\n\n---\n\n{history[-1].content}" # Append the full report text | |
| # Return values: history_state, pending_questions_state, session_id_state, chatbot, user_input, session_list, selected_sections_state, brd_sections_checkboxes, brd_display | |
| return ( | |
| history, | |
| pending_questions, | |
| session_id, | |
| chatbot_display, | |
| "", # user_input (empty string) | |
| gr.update(choices=get_saved_sessions(), value=session_id), # session_list | |
| selected_sections, | |
| gr.update(value=selected_sections, interactive=checkbox_interactive_state['interactive']), # brd_sections_checkboxes | |
| current_brd_display_content # brd_display updated | |
| ) | |
| def on_ui_load(): | |
| """Called when the UI is first loaded in the browser.""" | |
| return start_new_chat() | |
| # --- 5) Gradio UI Layout --- | |
| with gr.Blocks(theme=gr.themes.Soft(), title="BRD Assistant", css=custom_css) as app: # Added custom_css | |
| history_state = gr.State([]) | |
| pending_questions_state = gr.State([]) | |
| session_id_state = gr.State("") | |
| brd_text_state = gr.State("") | |
| selected_sections_state = gr.State(list(BRD_SECTIONS.keys())) # NEW STATE: Initialize with all sections | |
| with gr.Row(): | |
| with gr.Column(scale=1, min_width=250): | |
| gr.Markdown("### Chat History") # Changed to ### for consistency | |
| new_chat_button = gr.Button("โ New Chat", variant="primary") | |
| session_list = gr.Radio( | |
| label="Past Conversations", | |
| choices=get_saved_sessions(), | |
| interactive=True, | |
| type="value" | |
| ) | |
| with gr.Column(scale=3): | |
| gr.Markdown("### BRD Sections") # NEW HEADER - MOVED HERE | |
| brd_sections_checkboxes = gr.CheckboxGroup( # NEW COMPONENT - MOVED HERE | |
| label="Select sections for your BRD", | |
| choices=list(BRD_SECTIONS.keys()), | |
| value=list(BRD_SECTIONS.keys()), # Default all selected | |
| interactive=True | |
| ) | |
| chatbot = gr.Chatbot( | |
| label="Conversation", | |
| # Removed bubble_full_width=False (deprecated) | |
| height=500, | |
| show_copy_button=True, | |
| type='messages' # Set chatbot type to 'messages' | |
| ) | |
| with gr.Row(): | |
| user_input = gr.Textbox( | |
| show_label=False, | |
| placeholder="Enter your project idea or answers here...", | |
| scale=5, | |
| container=False | |
| ) | |
| submit_button = gr.Button("Send", variant="primary", scale=1, min_width=150) | |
| # โ Add the BRD Preview here: | |
| with gr.Column(scale=3): # Adjusted column scale for better layout | |
| gr.Markdown("### ๐ Current BRD Preview (Auto-Updated)") | |
| brd_display = gr.Markdown("BRD will appear here once generated.") | |
| # --- Event Handlers --- | |
| app.load( | |
| on_ui_load, | |
| None, | |
| [history_state, pending_questions_state, session_id_state, chatbot, user_input, session_list, selected_sections_state, brd_sections_checkboxes, brd_display] # Added brd_display to outputs | |
| ) | |
| submit_triggers = [user_input.submit, submit_button.click] | |
| for trigger in submit_triggers: | |
| trigger( | |
| chat_logic, | |
| [user_input, history_state, pending_questions_state, session_id_state, selected_sections_state], | |
| [user_input, history_state, pending_questions_state, chatbot, brd_display, selected_sections_state, brd_sections_checkboxes, session_id_state, session_list] | |
| ) | |
| new_chat_button.click( | |
| start_new_chat, | |
| [], | |
| [history_state, pending_questions_state, session_id_state, chatbot, user_input, session_list, selected_sections_state, brd_sections_checkboxes, brd_display] # Added brd_display to outputs | |
| ) | |
| session_list.change( | |
| load_chat_session, | |
| [session_list], | |
| [history_state, pending_questions_state, session_id_state, chatbot, user_input, session_list, selected_sections_state, brd_sections_checkboxes, brd_display] # Added brd_display to outputs | |
| ) | |
| # NEW HANDLER: Update selected_sections_state when checkboxes change | |
| brd_sections_checkboxes.change( | |
| lambda x: x, # Simple passthrough function to update the state | |
| [brd_sections_checkboxes], | |
| [selected_sections_state] | |
| ) | |
| if __name__ == "__main__": | |
| app.launch() #debug=True, share=True) | |
| # TODO: show sections | |
| # Show outputs (aknowledgment, questions, report) | |
| # How to canvas. |