OmarAlasqa commited on
Commit
20cac61
·
1 Parent(s): 1de0c23

Add application file

Browse files
Files changed (2) hide show
  1. app.py +430 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,430 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import gradio as gr
4
+ import uuid
5
+ from typing import List, Any, Tuple, Dict # Added Dict
6
+
7
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
8
+ from langchain_core.messages import HumanMessage, AIMessage
9
+ from langchain_google_genai import ChatGoogleGenerativeAI
10
+ from langchain_core.output_parsers import StrOutputParser
11
+
12
+ # Define custom CSS for Gradio UI (can be empty or contain styles)
13
+ custom_css = """
14
+ .gradio-container {
15
+ max-width: none !important;
16
+ }
17
+ """
18
+
19
+ # --- Secrets and API Key Configuration ---
20
+ # Load the API key from a secret file (ensure secrets.json exists)
21
+ try:
22
+ with open('secrets.json') as f:
23
+ secrets = json.load(f)
24
+ os.environ["GOOGLE_API_KEY"] = secrets["GOOGLE_API_KEY"]
25
+ except FileNotFoundError:
26
+ print("FATAL: secrets.json file not found. Please create it with your GOOGLE_API_KEY.")
27
+ exit()
28
+
29
+ # --- 1) LLM and Prompt Configuration ---
30
+ llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash", temperature=0.5)
31
+
32
+ JSON_ONLY_INSTRUCTION = "Your output MUST BE ONLY the raw JSON object, without any markdown formatting like ```json or any other text."
33
+
34
+ # Define the comprehensive list of BRD sections and their descriptions
35
+ BRD_SECTIONS = {
36
+ "Project Scope": "What are the specific, measurable features for the MVP (Minimum Viable Product)? What is explicitly out of scope?",
37
+ "Target Audience": "Detailed personas (e.g., \"Busy Professional\" needs more detail: age, tech-savviness, cooking skill).",
38
+ "Business Goals": "What are the key business metrics for success (e.g., user acquisition targets, revenue goals, churn rate)?",
39
+ "Functional Requirements": "How should AI personalization work? What are the steps in the grocery ordering and delivery process? How does inventory tracking function?",
40
+ "Non-Functional Requirements": "What are the expected performance benchmarks (e.g., app load time)? What are the security and data privacy considerations?",
41
+ "Data Sources & Constraints": "What specific APIs will be used for recipes or grocery data? Are there any budget or technology stack limitations?",
42
+ "Timeline": "What is the desired deadline or a rough project timeline?"
43
+ }
44
+
45
+ # Clarifier system template now includes a placeholder for the dynamic checklist
46
+ clarifier_system_template = """You are a BRD-building assistant. Your primary goal is to gather details by asking clarifying questions. When you have enough information, you will signal you are ready to report.
47
+
48
+ You will be given a conversation history. Analyze it.
49
+ - If you have enough information to write a great BRD, set "status" to "REPORT_READY".
50
+ - If you still need more information, set "status" to "ASK" and provide a list of questions you need answers to.
51
+
52
+ **Required Information Checklist:**
53
+ {dynamic_checklist_content}
54
+
55
+ Return ONLY valid JSON with this exact schema:
56
+ {{{{
57
+ "status": "ASK" | "REPORT_READY",
58
+ "questions": string[],
59
+ "reason": string
60
+ }}}}
61
+ """ # Removed {JSON_ONLY_INSTRUCTION} from here
62
+ # clarifier_prompt will be created dynamically in chat_logic
63
+
64
+ report_system = "You are a meticulous BRD writer. Create a polished, structured BRD based on all information collected in the conversation."
65
+ report_prompt = ChatPromptTemplate.from_messages([
66
+ ("system", report_system),
67
+ MessagesPlaceholder("history"),
68
+ ("human", "Excellent. Please compile all the information into a comprehensive Business Requirement Document.")
69
+ ])
70
+
71
+ # NEW: Prompt for generating a summary of current understanding
72
+ summarizer_system_template = """You are a helpful assistant. Based on the provided conversation history, summarize the key information gathered so far for each of the following BRD sections. If a section has no information yet, state that. Be concise and use bullet points.
73
+
74
+ **BRD Sections to Summarize:**
75
+ {dynamic_checklist_content}
76
+
77
+ Provide a summary for each section.
78
+ """
79
+
80
+ # --- 2) Chat History & Session Management ---
81
+
82
+ HISTORY_DIR = "chat_histories"
83
+ if not os.path.exists(HISTORY_DIR):
84
+ os.makedirs(HISTORY_DIR)
85
+
86
+ def get_session_filepath(session_id: str) -> str:
87
+ return os.path.join(HISTORY_DIR, f"session_{session_id}.json")
88
+
89
+ def save_session_state(session_id: str, history: List[Any], pending_questions: List[str], selected_sections: List[str]):
90
+ if not session_id: return
91
+ filepath = get_session_filepath(session_id)
92
+ state = {
93
+ "history": [{"type": msg.type, "content": msg.content} for msg in history],
94
+ "pending_questions": pending_questions,
95
+ "selected_sections": selected_sections # NEW: Save selected sections
96
+ }
97
+ with open(filepath, "w") as f:
98
+ json.dump(state, f, indent=2)
99
+
100
+ def load_session_state(session_id: str) -> Tuple[List[Any], List[str], List[str]]: # NEW: Return type includes selected_sections
101
+ filepath = get_session_filepath(session_id)
102
+ if not os.path.exists(filepath): return [], [], list(BRD_SECTIONS.keys()) # Default all sections if new
103
+ with open(filepath, "r") as f:
104
+ try:
105
+ state = json.load(f)
106
+ history = [
107
+ HumanMessage(content=item["content"]) if item["type"] == "human" else AIMessage(content=item["content"])
108
+ for item in state.get("history", [])
109
+ ]
110
+ pending_questions = state.get("pending_questions", [])
111
+ selected_sections = state.get("selected_sections", list(BRD_SECTIONS.keys())) # NEW: Load selected sections, default all
112
+ return history, pending_questions, selected_sections
113
+ except (json.JSONDecodeError, TypeError):
114
+ return [], [], list(BRD_SECTIONS.keys()) # Default all sections on error
115
+
116
+ def get_saved_sessions() -> List[Tuple[str, str]]:
117
+ sessions = []
118
+ for f in os.listdir(HISTORY_DIR):
119
+ if f.startswith("session_") and f.endswith(".json"):
120
+ session_id = f.replace("session_", "").replace(".json", "")
121
+ history, _, _ = load_session_state(session_id)
122
+ label = "New Chat"
123
+ # Safely get the content of the first message for the label
124
+ if history and isinstance(history[0], (HumanMessage, AIMessage)) and history[0].content:
125
+ first_message_content = history[0].content
126
+ label = first_message_content[:40] + ("..." if len(first_message_content) > 40 else "")
127
+ sessions.append((label, session_id))
128
+ # Fix: Use x[1] to get the session_id from the tuple for get_session_filepath
129
+ sorted_sessions = sorted(sessions, key=lambda x: os.path.getmtime(get_session_filepath(x[1])), reverse=True)
130
+ return [("➕ New Chat", "NEW_CHAT_SESSION")] + sorted_sessions # Add a fixed "New Chat" option
131
+
132
+ # --- 3) Professional Output Formatting ---
133
+
134
+ def format_ai_output(result: dict) -> str:
135
+ """
136
+ Transforms the AI's JSON output (clarifier questions) into a polished, professional message.
137
+ """
138
+ reason = result.get("reason", "No reason provided.")
139
+ questions = result.get("questions", [])
140
+
141
+ response = (
142
+ "Thank you. To build a comprehensive document, please provide details on the following points.\n\n"
143
+ f"**Reasoning:** *{reason}*\n\n"
144
+ "--- \n\n"
145
+ )
146
+
147
+ categorized = {}
148
+ for q in questions:
149
+ # Try to extract category from question, e.g., "Project Scope: What are..."
150
+ parts = q.split(":", 1)
151
+ if len(parts) == 2:
152
+ category, question_text = parts
153
+ category = category.replace("Regarding ", "").strip()
154
+ if category not in categorized: categorized[category] = []
155
+ categorized[category].append(question_text.strip())
156
+ else:
157
+ # If no clear category, put in 'General'
158
+ if "General" not in categorized: categorized["General"] = []
159
+ categorized["General"].append(q)
160
+
161
+ q_counter = 1
162
+ for category, q_list in categorized.items():
163
+ response += f"**{category}**\n"
164
+ for q_text in q_list:
165
+ response += f"- {q_counter}. {q_text}\n" # Add numbering
166
+ q_counter += 1
167
+ response += "\n"
168
+
169
+ return response.strip()
170
+
171
+ # --- 4) Core Gradio Application Logic (One-by-One Questions) ---
172
+
173
+ def chat_logic(user_message: str, history: List[Any], pending_questions: List[str], session_id: str, selected_sections: List[str]) -> Tuple:
174
+ """Main logic for a single chat turn with a question queue."""
175
+
176
+ # If this is a "NEW_CHAT_SESSION", generate a real UUID and update the state
177
+ if session_id == "NEW_CHAT_SESSION":
178
+ session_id = str(uuid.uuid4())
179
+ is_first_message_in_new_session = True
180
+ else:
181
+ is_first_message_in_new_session = len(history) == 0 # Check if history is empty for existing session
182
+
183
+ # Determine if checkboxes should be interactive (only interactive for truly new sessions before first message)
184
+ checkbox_interactive_state = gr.update(interactive=False) if history else gr.update(interactive=True)
185
+
186
+ # 1. Add user's latest message to the history (this history is the one saved and passed to LLM)
187
+ history.append(HumanMessage(content=user_message))
188
+
189
+ # If this is the first message in a new session, save it now
190
+ if is_first_message_in_new_session:
191
+ save_session_state(session_id, history, pending_questions, selected_sections)
192
+ updated_session_list_choices = gr.update(choices=get_saved_sessions(), value=session_id)
193
+ else:
194
+ updated_session_list_choices = gr.update()
195
+
196
+ # Initialize content for the BRD display
197
+ brd_display_content = "Thank you for your input. Processing...\n\n"
198
+
199
+ # 2. Dynamically build the checklist content for prompts
200
+ dynamic_checklist_content = "\n".join([f"- {section}: {BRD_SECTIONS[section]}" for section in selected_sections if section in BRD_SECTIONS])
201
+
202
+ # 3. Generate Progress Summary (for BRD Preview)
203
+ current_summarizer_system = summarizer_system_template.format(dynamic_checklist_content=dynamic_checklist_content)
204
+ summarizer_prompt_instance = ChatPromptTemplate.from_messages([
205
+ ("system", current_summarizer_system),
206
+ MessagesPlaceholder("history") # Pass the current history (user message added)
207
+ ])
208
+ summary_chain = summarizer_prompt_instance | llm | StrOutputParser()
209
+ progress_summary = summary_chain.invoke({"history": history})
210
+ brd_display_content += f"**Current Understanding:**\n{progress_summary}\n\n"
211
+
212
+ # 4. Determine next AI action (clarification or report)
213
+ ai_response_for_chat = "" # This will be the message shown in the chatbot
214
+ if pending_questions:
215
+ # User was answering a pending question, so pop the next one
216
+ ai_response_for_chat = pending_questions.pop(0)
217
+ else:
218
+ # No pending questions, call clarifier to get new questions or report
219
+ current_clarifier_system = clarifier_system_template.format(dynamic_checklist_content=dynamic_checklist_content) + "\n" + JSON_ONLY_INSTRUCTION
220
+ clarifier_prompt_instance = ChatPromptTemplate.from_messages([("system", current_clarifier_system), MessagesPlaceholder("history")])
221
+ clarifier_chain = clarifier_prompt_instance | llm | StrOutputParser()
222
+ raw_response = clarifier_chain.invoke({"history": history})
223
+
224
+ try:
225
+ result = json.loads(raw_response)
226
+ status = result.get("status")
227
+
228
+ if status == "ASK" and result.get("questions"):
229
+ pending_questions.extend(result["questions"]) # Add all new questions to the queue
230
+ reason = result.get("reason", "To gather more details, I have some questions.")
231
+ if pending_questions:
232
+ first_question = pending_questions.pop(0) # Pop only the first one for current turn
233
+ ai_response_for_chat = f"Thank you for your input. {reason}\n\nHere is my first question:\n- {first_question}"
234
+ else:
235
+ ai_response_for_chat = "I need more information, but I couldn't generate a specific question. Can you please elaborate further?"
236
+ elif status == "REPORT_READY":
237
+ report_intro = "Excellent, I have all the information required. Generating the final report now..."
238
+ final_chain = report_prompt | llm | StrOutputParser()
239
+ report = final_chain.invoke({"history": history})
240
+ ai_response_for_chat = f"{report_intro}\n\n---\n\n{report}"
241
+ brd_display_content += f"\n\n=== FINAL BRD ===\n\n{report}" # Add final report to BRD display
242
+ else:
243
+ ai_response_for_chat = "I received an unexpected status. Let's try again. Can you rephrase?"
244
+ except (json.JSONDecodeError, ValueError):
245
+ ai_response_for_chat = "I'm having a little trouble processing that. Could you please clarify or rephrase your last message?"
246
+
247
+ # 5. Append the AI's direct response (question or report) to history for LLM context
248
+ # This is the only AI message that should be part of the conversational history for the LLM.
249
+ history.append(AIMessage(content=ai_response_for_chat)) # Append the *formatted* message for consistency
250
+
251
+ # 6. Save state and update display
252
+ save_session_state(session_id, history, pending_questions, selected_sections)
253
+
254
+ # Format history for chatbot display (now simpler as history only contains relevant messages)
255
+ chatbot_display = _format_history_for_chatbot(history)
256
+
257
+ # Return values: user_input, history_state, pending_questions_state, chatbot, brd_display, selected_sections_state, brd_sections_checkboxes, session_id_state, session_list
258
+ return (
259
+ "", # user_input
260
+ history, # history_state
261
+ pending_questions, # pending_questions_state
262
+ chatbot_display, # chatbot
263
+ brd_display_content, # brd_display (now includes summary and potentially final report)
264
+ selected_sections, # selected_sections_state
265
+ checkbox_interactive_state, # brd_sections_checkboxes
266
+ session_id, # session_id_state
267
+ updated_session_list_choices # session_list
268
+ )
269
+
270
+ def _format_history_for_chatbot(history: List[Any]) -> List[Dict[str, str]]:
271
+ """Converts message history to Gradio chatbot 'messages' format."""
272
+ # Since history now only contains human messages and the *single* relevant AI response per turn,
273
+ # this function becomes straightforward.
274
+ chatbot_display = []
275
+ for msg in history:
276
+ role = "user" if msg.type == "human" else "assistant"
277
+ chatbot_display.append({"role": role, "content": msg.content})
278
+ return chatbot_display
279
+
280
+ # --- 4) UI Functions ---
281
+
282
+ def start_new_chat():
283
+ """Resets the state for a new, unsaved chat."""
284
+ default_sections = list(BRD_SECTIONS.keys())
285
+ # Do NOT save here. Session will be saved on first user message.
286
+ # Return values: history_state, pending_questions_state, session_id_state, chatbot, user_input, session_list, selected_sections_state, brd_sections_checkboxes, brd_display
287
+ return (
288
+ [], # history_state (empty list for chatbot)
289
+ [], # pending_questions_state (empty list)
290
+ "NEW_CHAT_SESSION", # session_id_state
291
+ [], # chatbot (empty list for display)
292
+ "", # user_input (empty string)
293
+ gr.update(choices=get_saved_sessions(), value="NEW_CHAT_SESSION"), # session_list
294
+ default_sections, # selected_sections_state (list of strings)
295
+ gr.update(value=default_sections, interactive=True), # brd_sections_checkboxes
296
+ "BRD will appear here once generated." # brd_display reset
297
+ )
298
+
299
+ def load_chat_session(session_id: str):
300
+ """Loads a selected chat session from the sidebar or starts a new one."""
301
+ if session_id == "NEW_CHAT_SESSION":
302
+ return start_new_chat()
303
+
304
+ history, pending_questions, selected_sections = load_session_state(session_id)
305
+ chatbot_display = _format_history_for_chatbot(history)
306
+
307
+ # Determine if checkboxes should be interactive based on whether history exists
308
+ checkbox_interactive_state = gr.update(interactive=False) if history else gr.update(interactive=True)
309
+
310
+ # Re-generate the last summary for the BRD display if history exists
311
+ current_brd_display_content = "BRD will appear here once generated."
312
+ if history:
313
+ dynamic_checklist_content = "\n".join([f"- {section}: {BRD_SECTIONS[section]}" for section in selected_sections if section in BRD_SECTIONS])
314
+ current_summarizer_system = summarizer_system_template.format(dynamic_checklist_content=dynamic_checklist_content)
315
+ summarizer_prompt_instance = ChatPromptTemplate.from_messages([
316
+ ("system", current_summarizer_system),
317
+ MessagesPlaceholder("history")
318
+ ])
319
+ summary_chain = summarizer_prompt_instance | llm | StrOutputParser()
320
+ current_brd_display_content = f"**Current Understanding:**\n{summary_chain.invoke({'history': history})}"
321
+
322
+ # If the last message in history was a final report, append it to the BRD display
323
+ if history and history[-1].type == "ai" and "Excellent, I have all the information required. Generating the final report now..." in history[-1].content:
324
+ current_brd_display_content += f"\n\n---\n\n{history[-1].content}" # Append the full report text
325
+
326
+
327
+ # Return values: history_state, pending_questions_state, session_id_state, chatbot, user_input, session_list, selected_sections_state, brd_sections_checkboxes, brd_display
328
+ return (
329
+ history,
330
+ pending_questions,
331
+ session_id,
332
+ chatbot_display,
333
+ "", # user_input (empty string)
334
+ gr.update(choices=get_saved_sessions(), value=session_id), # session_list
335
+ selected_sections,
336
+ gr.update(value=selected_sections, interactive=checkbox_interactive_state['interactive']), # brd_sections_checkboxes
337
+ current_brd_display_content # brd_display updated
338
+ )
339
+
340
+ def on_ui_load():
341
+ """Called when the UI is first loaded in the browser."""
342
+ return start_new_chat()
343
+
344
+ # --- 5) Gradio UI Layout ---
345
+
346
+ with gr.Blocks(theme=gr.themes.Soft(), title="BRD Assistant", css=custom_css) as app: # Added custom_css
347
+ history_state = gr.State([])
348
+ pending_questions_state = gr.State([])
349
+ session_id_state = gr.State("")
350
+ brd_text_state = gr.State("")
351
+ selected_sections_state = gr.State(list(BRD_SECTIONS.keys())) # NEW STATE: Initialize with all sections
352
+
353
+ with gr.Row():
354
+ with gr.Column(scale=1, min_width=250):
355
+ gr.Markdown("### Chat History") # Changed to ### for consistency
356
+ new_chat_button = gr.Button("➕ New Chat", variant="primary")
357
+ session_list = gr.Radio(
358
+ label="Past Conversations",
359
+ choices=get_saved_sessions(),
360
+ interactive=True,
361
+ type="value"
362
+ )
363
+ with gr.Column(scale=3):
364
+ gr.Markdown("### BRD Sections") # NEW HEADER - MOVED HERE
365
+ brd_sections_checkboxes = gr.CheckboxGroup( # NEW COMPONENT - MOVED HERE
366
+ label="Select sections for your BRD",
367
+ choices=list(BRD_SECTIONS.keys()),
368
+ value=list(BRD_SECTIONS.keys()), # Default all selected
369
+ interactive=True
370
+ )
371
+ chatbot = gr.Chatbot(
372
+ label="Conversation",
373
+ # Removed bubble_full_width=False (deprecated)
374
+ height=500,
375
+ show_copy_button=True,
376
+ type='messages' # Set chatbot type to 'messages'
377
+ )
378
+ with gr.Row():
379
+ user_input = gr.Textbox(
380
+ show_label=False,
381
+ placeholder="Enter your project idea or answers here...",
382
+ scale=5,
383
+ container=False
384
+ )
385
+ submit_button = gr.Button("Send", variant="primary", scale=1, min_width=150)
386
+ # ✅ Add the BRD Preview here:
387
+ with gr.Column(scale=3): # Adjusted column scale for better layout
388
+ gr.Markdown("### 📄 Current BRD Preview (Auto-Updated)")
389
+ brd_display = gr.Markdown("BRD will appear here once generated.")
390
+
391
+ # --- Event Handlers ---
392
+ app.load(
393
+ on_ui_load,
394
+ None,
395
+ [history_state, pending_questions_state, session_id_state, chatbot, user_input, session_list, selected_sections_state, brd_sections_checkboxes, brd_display] # Added brd_display to outputs
396
+ )
397
+
398
+ submit_triggers = [user_input.submit, submit_button.click]
399
+ for trigger in submit_triggers:
400
+ trigger(
401
+ chat_logic,
402
+ [user_input, history_state, pending_questions_state, session_id_state, selected_sections_state],
403
+ [user_input, history_state, pending_questions_state, chatbot, brd_display, selected_sections_state, brd_sections_checkboxes, session_id_state, session_list]
404
+ )
405
+
406
+ new_chat_button.click(
407
+ start_new_chat,
408
+ [],
409
+ [history_state, pending_questions_state, session_id_state, chatbot, user_input, session_list, selected_sections_state, brd_sections_checkboxes, brd_display] # Added brd_display to outputs
410
+ )
411
+ session_list.change(
412
+ load_chat_session,
413
+ [session_list],
414
+ [history_state, pending_questions_state, session_id_state, chatbot, user_input, session_list, selected_sections_state, brd_sections_checkboxes, brd_display] # Added brd_display to outputs
415
+ )
416
+
417
+ # NEW HANDLER: Update selected_sections_state when checkboxes change
418
+ brd_sections_checkboxes.change(
419
+ lambda x: x, # Simple passthrough function to update the state
420
+ [brd_sections_checkboxes],
421
+ [selected_sections_state]
422
+ )
423
+
424
+ if __name__ == "__main__":
425
+ app.launch() #debug=True, share=True)
426
+
427
+
428
+ # TODO: show sections
429
+ # Show outputs (aknowledgment, questions, report)
430
+ # How to canvas.
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio
2
+ langchain-core
3
+ langchain-google-genai