Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,695 +1,111 @@
|
|
|
|
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
-
import requests
|
| 3 |
from openai import OpenAI
|
| 4 |
-
import
|
| 5 |
-
import anthropic # Import the Anthropic client library for Claude
|
| 6 |
-
import streamlit.components.v1 as components
|
| 7 |
-
import re
|
| 8 |
-
# from supabase import create_client # Keep commented if not using history/auth
|
| 9 |
-
import base64 # Needed for download buttons
|
| 10 |
-
import time # For potential delays/retries if needed
|
| 11 |
-
|
| 12 |
-
# ---------- Helper: Safe Rerun ----------
|
| 13 |
-
def safe_rerun():
|
| 14 |
-
if hasattr(st, "experimental_rerun"):
|
| 15 |
-
st.experimental_rerun()
|
| 16 |
-
elif hasattr(st, "rerun"):
|
| 17 |
-
st.rerun()
|
| 18 |
-
else:
|
| 19 |
-
st.warning("Rerun function not available. Please update Streamlit.")
|
| 20 |
-
|
| 21 |
-
# ---------- Setup & API Client Initialization ----------
|
| 22 |
-
openai_client = None
|
| 23 |
-
genai_client = None # For Google GenAI
|
| 24 |
-
deepseek_api_key = None
|
| 25 |
-
claude_client = None # For Anthropic Claude
|
| 26 |
-
secrets_available = {"openai": False, "gemini": False, "deepseek": False, "claude": False}
|
| 27 |
-
secret_errors = [] # Store errors for display
|
| 28 |
-
|
| 29 |
-
# OpenAI API Key
|
| 30 |
-
try:
|
| 31 |
-
openai_api_key = st.secrets.get("OPENAI_API_KEY")
|
| 32 |
-
if openai_api_key:
|
| 33 |
-
openai_client = OpenAI(api_key=openai_api_key)
|
| 34 |
-
secrets_available["openai"] = True
|
| 35 |
-
else:
|
| 36 |
-
secret_errors.append("Streamlit Secret `OPENAI_API_KEY` not found.")
|
| 37 |
-
except KeyError:
|
| 38 |
-
secret_errors.append("Streamlit Secret `OPENAI_API_KEY` not found.")
|
| 39 |
-
except Exception as e:
|
| 40 |
-
secret_errors.append(f"Error initializing OpenAI client: {e}")
|
| 41 |
-
|
| 42 |
-
# Gemini API Key (Google GenAI)
|
| 43 |
-
try:
|
| 44 |
-
gemini_api_key = st.secrets.get("GEMINI_API_KEY")
|
| 45 |
-
if gemini_api_key:
|
| 46 |
-
genai.configure(api_key=gemini_api_key)
|
| 47 |
-
genai_client = genai # Store the configured module
|
| 48 |
-
secrets_available["gemini"] = True
|
| 49 |
-
else:
|
| 50 |
-
secret_errors.append("Streamlit Secret `GEMINI_API_KEY` not found.")
|
| 51 |
-
except KeyError:
|
| 52 |
-
secret_errors.append("Streamlit Secret `GEMINI_API_KEY` not found.")
|
| 53 |
-
except Exception as e:
|
| 54 |
-
secret_errors.append(f"Error initializing Google GenAI client: {e}")
|
| 55 |
-
|
| 56 |
-
# DeepSeek API Key
|
| 57 |
-
try:
|
| 58 |
-
deepseek_api_key = st.secrets.get("DEEPSEEK_API_KEY")
|
| 59 |
-
if deepseek_api_key:
|
| 60 |
-
secrets_available["deepseek"] = True
|
| 61 |
-
else:
|
| 62 |
-
secret_errors.append("Streamlit Secret `DEEPSEEK_API_KEY` not found.")
|
| 63 |
-
except KeyError:
|
| 64 |
-
secret_errors.append("Streamlit Secret `DEEPSEEK_API_KEY` not found.")
|
| 65 |
-
except Exception as e:
|
| 66 |
-
secret_errors.append(f"Error reading DeepSeek API key: {e}")
|
| 67 |
-
|
| 68 |
-
# CLAUDE API Key and Client Initialization using Anthropic library
|
| 69 |
-
try:
|
| 70 |
-
claude_api_key = st.secrets.get("CLAUDE_API_KEY")
|
| 71 |
-
if claude_api_key:
|
| 72 |
-
claude_client = anthropic.Anthropic(api_key=claude_api_key)
|
| 73 |
-
secrets_available["claude"] = True
|
| 74 |
-
else:
|
| 75 |
-
secret_errors.append("Streamlit Secret `CLAUDE_API_KEY` not found.")
|
| 76 |
-
except KeyError:
|
| 77 |
-
secret_errors.append("Streamlit Secret `CLAUDE_API_KEY` not found.")
|
| 78 |
-
except Exception as e:
|
| 79 |
-
secret_errors.append(f"Error initializing Claude client: {e}")
|
| 80 |
-
|
| 81 |
-
# Check if at least one API key is loaded
|
| 82 |
-
any_secret_loaded = any(secrets_available.values())
|
| 83 |
-
|
| 84 |
-
# ---------- Model Configuration ----------
|
| 85 |
-
SUPPORTED_MODELS = {}
|
| 86 |
-
|
| 87 |
-
# OpenAI Models
|
| 88 |
-
if secrets_available["openai"] and openai_client:
|
| 89 |
-
SUPPORTED_MODELS.update({
|
| 90 |
-
"GPT-4o (OpenAI)": {"id": "gpt-4o", "provider": "openai", "client": openai_client},
|
| 91 |
-
"GPT-4o Mini (OpenAI)": {"id": "gpt-4o-mini", "provider": "openai", "client": openai_client},
|
| 92 |
-
"GPT-4 Turbo (OpenAI)": {"id": "gpt-4-turbo", "provider": "openai", "client": openai_client},
|
| 93 |
-
"GPT-4 (OpenAI)": {"id": "gpt-4", "provider": "openai", "client": openai_client},
|
| 94 |
-
"GPT-3.5 Turbo (OpenAI)": {"id": "gpt-3.5-turbo", "provider": "openai", "client": openai_client},
|
| 95 |
-
"ChatGPT o1 (OpenAI)": {"id": "chatgpt-o1", "provider": "openai", "client": openai_client},
|
| 96 |
-
"ChatGPT o3 (OpenAI)": {"id": "chatgpt-o3", "provider": "openai", "client": openai_client},
|
| 97 |
-
})
|
| 98 |
-
|
| 99 |
-
# Gemini Models (Google)
|
| 100 |
-
if secrets_available["gemini"] and genai_client:
|
| 101 |
-
SUPPORTED_MODELS.update({
|
| 102 |
-
"Gemini 2.0 (Google)": {"id": "gemini-2.0-latest", "provider": "gemini", "client": genai_client},
|
| 103 |
-
"Gemini 2.5 (Google)": {"id": "gemini-2.5-latest", "provider": "gemini", "client": genai_client},
|
| 104 |
-
"Gemini 1.5 Pro (Google)": {"id": "gemini-1.5-pro-latest", "provider": "gemini", "client": genai_client},
|
| 105 |
-
"Gemini 1.5 Flash (Google)": {"id": "gemini-1.5-flash-latest", "provider": "gemini", "client": genai_client},
|
| 106 |
-
"Gemini 1.0 Pro (Google)": {"id": "gemini-1.0-pro", "provider": "gemini", "client": genai_client},
|
| 107 |
-
})
|
| 108 |
-
|
| 109 |
-
# DeepSeek Models
|
| 110 |
-
if secrets_available["deepseek"] and deepseek_api_key:
|
| 111 |
-
SUPPORTED_MODELS.update({
|
| 112 |
-
"DeepSeek Chat": {"id": "deepseek-chat", "provider": "deepseek", "client": None},
|
| 113 |
-
"DeepSeek Coder": {"id": "deepseek-coder", "provider": "deepseek", "client": None},
|
| 114 |
-
})
|
| 115 |
|
| 116 |
-
#
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
"Claude 3.7 Sonnet (Anthropic)": {"id": "claude-3-7-sonnet-20250219", "provider": "claude", "client": claude_client},
|
| 120 |
-
"Claude 3.5 Haiku (Anthropic)": {"id": "claude-3-5-haiku-20241022", "provider": "claude", "client": claude_client},
|
| 121 |
-
})
|
| 122 |
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
"GPT-3.5 Turbo (OpenAI)",
|
| 128 |
-
]
|
| 129 |
-
DEFAULT_MODEL = next((m for m in DEFAULT_MODEL_PREFERENCE if m in SUPPORTED_MODELS), None)
|
| 130 |
-
if not DEFAULT_MODEL and SUPPORTED_MODELS:
|
| 131 |
-
DEFAULT_MODEL = next(iter(SUPPORTED_MODELS))
|
| 132 |
-
|
| 133 |
-
# ---------- Helper Functions for Generation ----------
|
| 134 |
-
|
| 135 |
-
def _generate_with_openai_provider(client, model_id, prompt, max_tokens):
|
| 136 |
-
try:
|
| 137 |
-
response = client.chat.completions.create(
|
| 138 |
-
model=model_id,
|
| 139 |
-
messages=[
|
| 140 |
-
{"role": "system", "content": "You are an expert product manager and software architect. Be detailed and thorough."},
|
| 141 |
-
{"role": "user", "content": prompt}
|
| 142 |
-
],
|
| 143 |
-
temperature=0.6,
|
| 144 |
-
max_tokens=max_tokens
|
| 145 |
-
)
|
| 146 |
-
return response.choices[0].message.content
|
| 147 |
-
except Exception as e:
|
| 148 |
-
st.error(f"❌ OpenAI API Error ({model_id}): {e}")
|
| 149 |
-
return f"Error: OpenAI API call failed for {model_id}. Details: {e}"
|
| 150 |
-
|
| 151 |
-
def _generate_with_gemini_provider(client, model_id, prompt, max_tokens):
|
| 152 |
-
try:
|
| 153 |
-
model = client.GenerativeModel(
|
| 154 |
-
model_id,
|
| 155 |
-
safety_settings={
|
| 156 |
-
'HARM_CATEGORY_HARASSMENT': 'block_medium_and_above',
|
| 157 |
-
'HARM_CATEGORY_HATE_SPEECH': 'block_medium_and_above',
|
| 158 |
-
'HARM_CATEGORY_SEXUALLY_EXPLICIT': 'block_medium_and_above',
|
| 159 |
-
'HARM_CATEGORY_DANGEROUS_CONTENT': 'block_medium_and_above',
|
| 160 |
-
},
|
| 161 |
-
generation_config=client.types.GenerationConfig(temperature=0.7)
|
| 162 |
-
)
|
| 163 |
-
response = model.generate_content(prompt)
|
| 164 |
-
if hasattr(response, 'text') and response.text:
|
| 165 |
-
return response.text
|
| 166 |
-
elif response.parts:
|
| 167 |
-
return "".join(part.text for part in response.parts if hasattr(part, 'text'))
|
| 168 |
-
elif response.prompt_feedback.block_reason:
|
| 169 |
-
st.warning(f"Gemini response blocked ({model_id}). Reason: {response.prompt_feedback.block_reason}")
|
| 170 |
-
return f"Response blocked by API safety filters ({model_id}): {response.prompt_feedback.block_reason}"
|
| 171 |
-
else:
|
| 172 |
-
st.warning(f"Gemini returned an empty or unexpected response ({model_id}). Response: {response}")
|
| 173 |
-
return f"Error: Gemini returned an empty response for {model_id}."
|
| 174 |
-
except Exception as e:
|
| 175 |
-
st.error(f"❌ Gemini SDK error ({model_id}): {e}")
|
| 176 |
-
error_detail = getattr(e, 'message', str(e))
|
| 177 |
-
return f"Error: Gemini SDK call failed for {model_id}. Details: {error_detail}"
|
| 178 |
-
|
| 179 |
-
def _generate_with_deepseek_provider(api_key, model_id, prompt, max_tokens):
|
| 180 |
-
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
|
| 181 |
-
payload = {
|
| 182 |
-
"model": model_id,
|
| 183 |
-
"messages": [
|
| 184 |
-
{"role": "system", "content": "You are an expert product manager and software architect. Be detailed and thorough."},
|
| 185 |
-
{"role": "user", "content": prompt}
|
| 186 |
-
],
|
| 187 |
-
"temperature": 0.6,
|
| 188 |
-
"max_tokens": max_tokens
|
| 189 |
-
}
|
| 190 |
-
try:
|
| 191 |
-
response = requests.post("https://api.deepseek.com/chat/completions", headers=headers, json=payload, timeout=90)
|
| 192 |
-
response.raise_for_status()
|
| 193 |
-
response_data = response.json()
|
| 194 |
-
if ("choices" in response_data and response_data["choices"] and
|
| 195 |
-
"message" in response_data["choices"][0] and
|
| 196 |
-
"content" in response_data["choices"][0]["message"]):
|
| 197 |
-
return response_data["choices"][0]["message"]["content"]
|
| 198 |
-
else:
|
| 199 |
-
st.warning(f"DeepSeek returned an unexpected response structure ({model_id}): {response_data}")
|
| 200 |
-
return f"Error: DeepSeek returned an unexpected structure for {model_id}."
|
| 201 |
-
except requests.exceptions.RequestException as e:
|
| 202 |
-
st.error(f"❌ DeepSeek API Request Error ({model_id}): {e}")
|
| 203 |
-
return f"Error: DeepSeek API request failed for {model_id}. Details: {e}"
|
| 204 |
-
except Exception as e:
|
| 205 |
-
st.error(f"❌ DeepSeek Error processing response ({model_id}): {e}")
|
| 206 |
-
return f"Error: DeepSeek processing failed for {model_id}. Details: {e}"
|
| 207 |
-
|
| 208 |
-
def _generate_with_claude_provider(client, model_id, prompt, max_tokens):
|
| 209 |
-
try:
|
| 210 |
-
message = client.messages.create(
|
| 211 |
-
model=model_id,
|
| 212 |
-
max_tokens=max_tokens,
|
| 213 |
-
messages=[
|
| 214 |
-
{"role": "user", "content": prompt}
|
| 215 |
-
]
|
| 216 |
-
)
|
| 217 |
-
content = message.content
|
| 218 |
-
# If content is a list of TextBlock objects, extract and join their text
|
| 219 |
-
if isinstance(content, list):
|
| 220 |
-
content = "\n".join([block.text for block in content if hasattr(block, "text")])
|
| 221 |
-
elif not isinstance(content, str):
|
| 222 |
-
content = str(content)
|
| 223 |
-
return content
|
| 224 |
-
except Exception as e:
|
| 225 |
-
st.error(f"❌ Claude API Error ({model_id}): {e}")
|
| 226 |
-
return f"Error: Claude API call failed for {model_id}. Details: {e}"
|
| 227 |
-
|
| 228 |
-
def generate_with_selected_model(selected_model_name, prompt, max_tokens=2000):
|
| 229 |
-
if not any_secret_loaded or not SUPPORTED_MODELS:
|
| 230 |
-
return "Error: No API keys loaded or models available."
|
| 231 |
-
if selected_model_name not in SUPPORTED_MODELS:
|
| 232 |
-
st.error(f"Selected model '{selected_model_name}' is not configured or unavailable.")
|
| 233 |
-
selected_model_name = DEFAULT_MODEL
|
| 234 |
-
if not selected_model_name:
|
| 235 |
-
return "Error: Default model also unavailable."
|
| 236 |
-
st.warning(f"Falling back to default model: {DEFAULT_MODEL}")
|
| 237 |
-
|
| 238 |
-
model_config = SUPPORTED_MODELS[selected_model_name]
|
| 239 |
-
provider = model_config["provider"]
|
| 240 |
-
model_id = model_config["id"]
|
| 241 |
-
client = model_config.get("client")
|
| 242 |
-
|
| 243 |
-
st.info(f"Generating with: **{selected_model_name}**")
|
| 244 |
-
start_time = time.time()
|
| 245 |
-
result = None
|
| 246 |
-
if provider == "openai":
|
| 247 |
-
if not client:
|
| 248 |
-
result = f"Error: OpenAI client not initialized for {selected_model_name}."
|
| 249 |
-
else:
|
| 250 |
-
result = _generate_with_openai_provider(client, model_id, prompt, max_tokens)
|
| 251 |
-
elif provider == "gemini":
|
| 252 |
-
if not client:
|
| 253 |
-
result = f"Error: Gemini client not initialized for {selected_model_name}."
|
| 254 |
-
else:
|
| 255 |
-
result = _generate_with_gemini_provider(client, model_id, prompt, max_tokens)
|
| 256 |
-
elif provider == "deepseek":
|
| 257 |
-
if not deepseek_api_key:
|
| 258 |
-
result = f"Error: DeepSeek API key not available for {selected_model_name}."
|
| 259 |
-
else:
|
| 260 |
-
result = _generate_with_deepseek_provider(deepseek_api_key, model_id, prompt, max_tokens)
|
| 261 |
-
elif provider == "claude":
|
| 262 |
-
if not client:
|
| 263 |
-
result = f"Error: Claude client not initialized for {selected_model_name}."
|
| 264 |
-
else:
|
| 265 |
-
result = _generate_with_claude_provider(client, model_id, prompt, max_tokens)
|
| 266 |
-
else:
|
| 267 |
-
st.error(f"Unknown provider '{provider}' configured for model '{selected_model_name}'.")
|
| 268 |
-
result = f"Error: Unknown provider {provider}."
|
| 269 |
-
end_time = time.time()
|
| 270 |
-
duration = end_time - start_time
|
| 271 |
-
# st.caption(f"Generation took {duration:.2f} seconds.")
|
| 272 |
-
|
| 273 |
-
if isinstance(result, str) and result.startswith("Error:"):
|
| 274 |
return None
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
return False
|
| 280 |
-
code_lower = code.strip().lower()
|
| 281 |
-
return bool(re.search(r"^\s*(%%.*?\n)*\s*(graph|flowchart|sequenceDiagram|classDiagram|stateDiagram|erDiagram|gantt|pie|gitGraph)", code_lower, re.MULTILINE))
|
| 282 |
-
|
| 283 |
-
def render_mermaid_diagram(mermaid_code, key):
|
| 284 |
-
if not isinstance(mermaid_code, str) or not mermaid_code.strip():
|
| 285 |
-
st.warning(f"Mermaid code is empty or invalid (Key: {key}).")
|
| 286 |
-
return
|
| 287 |
-
|
| 288 |
-
cleaned_code = re.sub(r"^```mermaid\s*\n?", "", mermaid_code, flags=re.IGNORECASE).strip()
|
| 289 |
-
cleaned_code = re.sub(r"\n?```\s*$", "", cleaned_code).strip()
|
| 290 |
-
|
| 291 |
-
if not is_valid_mermaid(cleaned_code):
|
| 292 |
-
st.warning(f"⚠️ Mermaid diagram might not render correctly (Key: {key}). Check syntax. Displaying raw code.")
|
| 293 |
-
st.code(cleaned_code, language="mermaid")
|
| 294 |
-
return
|
| 295 |
-
|
| 296 |
-
components.html(
|
| 297 |
-
f"""
|
| 298 |
-
<div id="mermaid-container-{key}" style="background-color: white; padding: 10px; border-radius: 5px; overflow: auto;">
|
| 299 |
-
<pre class="mermaid" id="mermaid-{key}">
|
| 300 |
-
{cleaned_code}
|
| 301 |
-
</pre>
|
| 302 |
-
</div>
|
| 303 |
-
<script type="module">
|
| 304 |
-
import mermaid from 'https://cdn.jsdelivr.net/npm/mermaid@10/dist/mermaid.esm.min.mjs';
|
| 305 |
-
try {{
|
| 306 |
-
mermaid.initialize({{ startOnLoad: false, theme: 'default' }});
|
| 307 |
-
const el = document.getElementById('mermaid-{key}');
|
| 308 |
-
if (el) {{
|
| 309 |
-
await mermaid.run({{ nodes: [el] }});
|
| 310 |
-
}} else {{
|
| 311 |
-
console.error("Mermaid target element not found: mermaid-{key}");
|
| 312 |
-
}}
|
| 313 |
-
}} catch (e) {{
|
| 314 |
-
console.error("Mermaid rendering error (Key: {key}):", e);
|
| 315 |
-
}}
|
| 316 |
-
</script>
|
| 317 |
-
""",
|
| 318 |
-
height=500, scrolling=True,
|
| 319 |
-
)
|
| 320 |
-
|
| 321 |
-
# ---------- Initialize Session State ----------
|
| 322 |
-
if 'elaboration_pending' not in st.session_state:
|
| 323 |
-
st.session_state.elaboration_pending = False
|
| 324 |
-
if 'elaboration_complete' not in st.session_state:
|
| 325 |
-
st.session_state.elaboration_complete = False
|
| 326 |
-
if 'generating_prd' not in st.session_state:
|
| 327 |
-
st.session_state.generating_prd = False
|
| 328 |
-
if 'generating_optional' not in st.session_state:
|
| 329 |
-
st.session_state.generating_optional = False
|
| 330 |
-
if 'initial_product_idea' not in st.session_state:
|
| 331 |
-
st.session_state.initial_product_idea = ""
|
| 332 |
-
if 'tech_stack_hint' not in st.session_state:
|
| 333 |
-
st.session_state.tech_stack_hint = ""
|
| 334 |
-
if 'model_choice' not in st.session_state:
|
| 335 |
-
st.session_state.model_choice = DEFAULT_MODEL
|
| 336 |
-
if 'elaborated_idea_raw' not in st.session_state:
|
| 337 |
-
st.session_state.elaborated_idea_raw = None
|
| 338 |
-
if 'confirmed_product_idea' not in st.session_state:
|
| 339 |
-
st.session_state.confirmed_product_idea = None
|
| 340 |
-
if 'prd_content' not in st.session_state:
|
| 341 |
-
st.session_state.prd_content = None
|
| 342 |
-
if 'selected_docs_to_generate' not in st.session_state:
|
| 343 |
-
st.session_state.selected_docs_to_generate = {}
|
| 344 |
-
if 'generated_docs' not in st.session_state:
|
| 345 |
-
st.session_state.generated_docs = {}
|
| 346 |
-
|
| 347 |
-
# ---------- Define Document Options ----------
|
| 348 |
-
doc_options = {
|
| 349 |
-
"frontend": {
|
| 350 |
-
"label": "Frontend Architecture",
|
| 351 |
-
"prompt": lambda idea, hint: f"Based on the following refined product description, describe the frontend tech stack and architecture.\n\nProduct Description:\n{idea}\n\nConsider these preferences if relevant: {hint}",
|
| 352 |
-
"display_func": lambda content, key: st.markdown(content),
|
| 353 |
-
"download_filename": "frontend_architecture.md",
|
| 354 |
-
"mime": "text/markdown",
|
| 355 |
-
"render_func": None,
|
| 356 |
-
"code_language": None,
|
| 357 |
-
},
|
| 358 |
-
"backend": {
|
| 359 |
-
"label": "Backend Architecture",
|
| 360 |
-
"prompt": lambda idea, hint: f"Based on the following refined product description, describe the backend system design.\n\nProduct Description:\n{idea}\n\nConsider these preferences if relevant: {hint}",
|
| 361 |
-
"display_func": lambda content, key: st.markdown(content),
|
| 362 |
-
"download_filename": "backend_architecture.md",
|
| 363 |
-
"mime": "text/markdown",
|
| 364 |
-
"render_func": None,
|
| 365 |
-
"code_language": None,
|
| 366 |
-
},
|
| 367 |
-
"project_structure": {
|
| 368 |
-
"label": "Project Folder Structure",
|
| 369 |
-
"prompt": lambda idea, hint: f"""
|
| 370 |
-
Based on the following refined product description and any technical hints provided, generate a suggested file and folder structure for the project... [Same prompt as before] ...
|
| 371 |
-
|
| 372 |
-
**Confirmed Product Description:**
|
| 373 |
-
---
|
| 374 |
-
{idea}
|
| 375 |
-
---
|
| 376 |
-
**Optional Preferences/Hints Provided:**
|
| 377 |
-
{hint if hint else "None provided"}
|
| 378 |
-
""",
|
| 379 |
-
"display_func": None,
|
| 380 |
-
"download_filename": "project_structure.txt",
|
| 381 |
-
"mime": "text/plain",
|
| 382 |
-
"render_func": None,
|
| 383 |
-
"code_language": "bash",
|
| 384 |
-
},
|
| 385 |
-
"user_flow": {
|
| 386 |
-
"label": "User Flow (Text)",
|
| 387 |
-
"prompt": lambda idea, hint: f"Based on the following refined product description, describe the primary user flow.\n\nProduct Description:\n{idea}\n\nConsider these preferences if relevant: {hint}",
|
| 388 |
-
"display_func": lambda content, key: st.markdown(content),
|
| 389 |
-
"download_filename": "user_flow.md",
|
| 390 |
-
"mime": "text/markdown",
|
| 391 |
-
"render_func": None,
|
| 392 |
-
"code_language": None,
|
| 393 |
-
},
|
| 394 |
-
"user_flow_mermaid": {
|
| 395 |
-
"label": "User Flow Diagram (Mermaid)",
|
| 396 |
-
"prompt": lambda idea, hint: f"Based on the following refined product description, create a user flow diagram in Mermaid.js flowchart format... [Same prompt as before] ...\n\nProduct Description:\n{idea}\n\nConsider these preferences if relevant: {hint}",
|
| 397 |
-
"display_func": None,
|
| 398 |
-
"download_filename": "user_flow_diagram.mmd",
|
| 399 |
-
"mime": "text/plain",
|
| 400 |
-
"render_func": render_mermaid_diagram,
|
| 401 |
-
"code_language": "mermaid",
|
| 402 |
-
},
|
| 403 |
-
"system_arch_mermaid": {
|
| 404 |
-
"label": "System Architecture Diagram (Mermaid)",
|
| 405 |
-
"prompt": lambda idea, hint: f"Based on the following refined product description, create a system architecture diagram in Mermaid.js format... [Same prompt as before] ...\n\nProduct Description:\n{idea}\n\nConsider these preferences if relevant: {hint}",
|
| 406 |
-
"display_func": None,
|
| 407 |
-
"download_filename": "system_architecture.mmd",
|
| 408 |
-
"mime": "text/plain",
|
| 409 |
-
"render_func": render_mermaid_diagram,
|
| 410 |
-
"code_language": "mermaid",
|
| 411 |
-
},
|
| 412 |
-
"db_schema": {
|
| 413 |
-
"label": "Database Schema",
|
| 414 |
-
"prompt": lambda idea, hint: f"Based on the following refined product description, design a relational database schema... [Same prompt as before] ...\n\nProduct Description:\n{idea}\n\nConsider these preferences if relevant: {hint}",
|
| 415 |
-
"display_func": lambda content, key: st.code(content, language='sql'),
|
| 416 |
-
"download_filename": "database_schema.sql",
|
| 417 |
-
"mime": "text/x-sql",
|
| 418 |
-
"render_func": None,
|
| 419 |
-
"code_language": "sql",
|
| 420 |
-
},
|
| 421 |
-
"sql_queries": {
|
| 422 |
-
"label": "SQL Queries",
|
| 423 |
-
"prompt": lambda idea, hint: f"Based on the following refined product description and likely schema, write 5-10 sample SQL CRUD queries.\n\nProduct Description:\n{idea}\n\nConsider these preferences if relevant: {hint}",
|
| 424 |
-
"display_func": None,
|
| 425 |
-
"download_filename": "sample_queries.sql",
|
| 426 |
-
"mime": "text/x-sql",
|
| 427 |
-
"render_func": None,
|
| 428 |
-
"code_language": "sql",
|
| 429 |
-
},
|
| 430 |
-
}
|
| 431 |
-
|
| 432 |
-
# ---------- UI Layout ----------
|
| 433 |
-
st.set_page_config(layout="wide", page_title="Idea to PRD Generator")
|
| 434 |
-
st.title("🚀 Product Idea → Refinement → PRD & Docs Generator")
|
| 435 |
-
st.caption("Enter your initial idea, let the AI refine it, edit if needed, then generate detailed documents.")
|
| 436 |
-
|
| 437 |
-
# Display any errors loading secrets prominently at the top
|
| 438 |
-
if secret_errors:
|
| 439 |
-
st.error("API Key Configuration Issues Found:")
|
| 440 |
-
for error in secret_errors:
|
| 441 |
-
st.error(f"- {error}")
|
| 442 |
-
|
| 443 |
-
if not any_secret_loaded or not SUPPORTED_MODELS:
|
| 444 |
-
st.error("No API keys loaded or LLM models available. Please configure necessary Streamlit secrets. Cannot proceed.")
|
| 445 |
-
st.stop()
|
| 446 |
-
|
| 447 |
-
# --- Columns for Side-by-Side Layout ---
|
| 448 |
-
col1, col2 = st.columns(2)
|
| 449 |
-
|
| 450 |
-
# ---------- Step 1: Input Initial Idea (in Column 1) ----------
|
| 451 |
-
with col1:
|
| 452 |
-
st.header("1. Define Your Initial Idea")
|
| 453 |
-
with st.form(key="idea_form"):
|
| 454 |
-
initial_idea_input = st.text_area(
|
| 455 |
-
"💡 Enter your raw product idea here:", height=180,
|
| 456 |
-
value=st.session_state.initial_product_idea,
|
| 457 |
-
help="Describe your app concept, target users, and core goals."
|
| 458 |
-
)
|
| 459 |
-
tech_hint_input = st.text_input(
|
| 460 |
-
"⚙️ Optional: Tech Stack or Specific Preferences",
|
| 461 |
-
placeholder="e.g., Use React Native, focus on scalability",
|
| 462 |
-
value=st.session_state.tech_stack_hint,
|
| 463 |
-
help="Mention any preferred technologies or constraints."
|
| 464 |
-
)
|
| 465 |
-
available_model_names = list(SUPPORTED_MODELS.keys())
|
| 466 |
-
default_index = 0
|
| 467 |
-
current_model_choice = st.session_state.get('model_choice', DEFAULT_MODEL)
|
| 468 |
-
if current_model_choice in available_model_names:
|
| 469 |
-
default_index = available_model_names.index(current_model_choice)
|
| 470 |
-
elif DEFAULT_MODEL in available_model_names:
|
| 471 |
-
default_index = available_model_names.index(DEFAULT_MODEL)
|
| 472 |
-
st.session_state.model_choice = DEFAULT_MODEL
|
| 473 |
-
model_choice_input = st.selectbox(
|
| 474 |
-
"🧠 Choose AI model:",
|
| 475 |
-
options=available_model_names,
|
| 476 |
-
index=default_index,
|
| 477 |
-
key="model_choice_select",
|
| 478 |
-
help="Select the AI model to use for all generation steps."
|
| 479 |
-
)
|
| 480 |
-
analyze_button = st.form_submit_button(
|
| 481 |
-
label="1️⃣ Analyze & Refine Idea",
|
| 482 |
-
disabled=st.session_state.elaboration_pending or st.session_state.generating_prd,
|
| 483 |
-
use_container_width=True
|
| 484 |
-
)
|
| 485 |
-
# --- Update session state when the analyze button is pressed ---
|
| 486 |
-
if analyze_button:
|
| 487 |
-
st.session_state.initial_product_idea = initial_idea_input
|
| 488 |
-
st.session_state.tech_stack_hint = tech_hint_input
|
| 489 |
-
st.session_state.model_choice = model_choice_input
|
| 490 |
-
st.session_state.elaboration_pending = True
|
| 491 |
-
safe_rerun()
|
| 492 |
-
|
| 493 |
-
# ---------- Step 2: Review and Confirm Elaborated Idea (in Column 2) ----------
|
| 494 |
-
with col2:
|
| 495 |
-
st.header("2. Review & Confirm Refined Idea")
|
| 496 |
-
if not st.session_state.elaboration_complete and not st.session_state.elaboration_pending:
|
| 497 |
-
st.info("The refined idea and analysis will appear here after you click 'Analyze & Refine Idea'.")
|
| 498 |
-
|
| 499 |
-
if st.session_state.elaboration_pending:
|
| 500 |
-
with st.spinner(f"🧠 AI ({st.session_state.model_choice}) is analyzing..."):
|
| 501 |
-
elaboration_prompt = f"""
|
| 502 |
-
Analyze the following product idea meticulously... [USE THE SAME ELABORATION PROMPT AS BEFORE - includes Optimized Description, Suggested Platform(s), Key Features, Target Audience, Initial Technical Considerations] ...
|
| 503 |
-
---
|
| 504 |
-
**Original Idea:**
|
| 505 |
-
{st.session_state.initial_product_idea}
|
| 506 |
-
**Optional Preferences/Hints Provided:**
|
| 507 |
-
{st.session_state.tech_stack_hint if st.session_state.tech_stack_hint else "None provided"}
|
| 508 |
-
---
|
| 509 |
-
Important: Base your analysis and suggestions *only* on the 'Original Idea' and 'Optional Preferences/Hints'.
|
| 510 |
-
"""
|
| 511 |
-
max_tokens_elaboration = 1200
|
| 512 |
-
st.session_state.elaborated_idea_raw = generate_with_selected_model(
|
| 513 |
-
st.session_state.model_choice, elaboration_prompt, max_tokens=max_tokens_elaboration
|
| 514 |
-
)
|
| 515 |
-
st.session_state.elaboration_pending = False
|
| 516 |
-
if st.session_state.elaborated_idea_raw:
|
| 517 |
-
st.session_state.elaboration_complete = True
|
| 518 |
-
else:
|
| 519 |
-
st.error("Failed to elaborate on the idea. Check API errors above or model availability.")
|
| 520 |
-
st.session_state.elaboration_complete = False
|
| 521 |
-
safe_rerun()
|
| 522 |
-
|
| 523 |
-
if st.session_state.elaboration_complete:
|
| 524 |
-
st.info("Review and edit the AI's analysis below. This text will be used to generate all documents.")
|
| 525 |
-
edited_idea = st.text_area(
|
| 526 |
-
"✏️ **Edit Refined Description & Analysis:**",
|
| 527 |
-
value=st.session_state.elaborated_idea_raw,
|
| 528 |
-
height=450,
|
| 529 |
-
key="elaborated_idea_edit_area",
|
| 530 |
-
help="Modify the description, features, platform, etc. This text becomes the basis for the PRD."
|
| 531 |
-
)
|
| 532 |
-
if st.button("2️⃣ Confirm & Generate PRD", key="confirm_prd_button", disabled=st.session_state.generating_prd, use_container_width=True):
|
| 533 |
-
if not edited_idea.strip():
|
| 534 |
-
st.warning("The refined description cannot be empty.")
|
| 535 |
-
else:
|
| 536 |
-
st.session_state.confirmed_product_idea = edited_idea
|
| 537 |
-
st.session_state.prd_content = None
|
| 538 |
-
st.session_state.generated_docs = {}
|
| 539 |
-
st.session_state.selected_docs_to_generate = {k: False for k in doc_options}
|
| 540 |
-
st.session_state.generating_prd = True
|
| 541 |
-
st.session_state.elaboration_complete = False
|
| 542 |
-
st.session_state.generating_optional = False
|
| 543 |
-
safe_rerun()
|
| 544 |
-
|
| 545 |
-
st.markdown("---")
|
| 546 |
-
|
| 547 |
-
# ---------- PRD Generation Logic (Below columns) ----------
|
| 548 |
-
if st.session_state.generating_prd and st.session_state.confirmed_product_idea:
|
| 549 |
-
st.header("3. Generating Product Requirements Document (PRD)")
|
| 550 |
-
st.info(f"Generating PRD using {st.session_state.model_choice}...")
|
| 551 |
-
with st.spinner("Please wait... This might take a moment."):
|
| 552 |
-
prd_prompt = f"""
|
| 553 |
-
Write a comprehensive and detailed Product Requirements Document (PRD) based *strictly* on the provided 'Confirmed Product Description'... [USE THE SAME OPTIMIZED PRD PROMPT AS BEFORE]...
|
| 554 |
-
|
| 555 |
-
**Confirmed Product Description:**
|
| 556 |
-
---
|
| 557 |
-
{st.session_state.confirmed_product_idea}
|
| 558 |
-
---
|
| 559 |
-
**Optional Preferences/Hints (Consider if relevant):**
|
| 560 |
-
{st.session_state.tech_stack_hint if st.session_state.tech_stack_hint else "None provided"}
|
| 561 |
-
"""
|
| 562 |
-
prd_content_result = generate_with_selected_model(
|
| 563 |
-
st.session_state.model_choice, prd_prompt, max_tokens=3500
|
| 564 |
-
)
|
| 565 |
-
if prd_content_result:
|
| 566 |
-
st.session_state.prd_content = prd_content_result
|
| 567 |
-
else:
|
| 568 |
-
st.session_state.prd_content = None
|
| 569 |
-
st.error("Failed to generate PRD. Please check API errors or model availability.")
|
| 570 |
-
st.session_state.generating_prd = False
|
| 571 |
-
safe_rerun()
|
| 572 |
-
|
| 573 |
-
# ---------- Display PRD and Offer Optional Docs ----------
|
| 574 |
-
if st.session_state.prd_content:
|
| 575 |
-
st.header("4. Product Requirements Document (PRD)")
|
| 576 |
-
st.markdown(st.session_state.prd_content)
|
| 577 |
-
# Convert prd_content to a string if it's a list
|
| 578 |
-
prd_text = st.session_state.prd_content
|
| 579 |
-
if isinstance(prd_text, list):
|
| 580 |
-
prd_text = "\n".join(prd_text)
|
| 581 |
-
st.download_button(
|
| 582 |
-
label="📥 Download PRD",
|
| 583 |
-
data=prd_text.encode('utf-8'),
|
| 584 |
-
file_name="prd.md",
|
| 585 |
-
mime="text/markdown",
|
| 586 |
-
key="download_prd"
|
| 587 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 588 |
|
| 589 |
-
|
| 590 |
-
|
| 591 |
-
st.
|
| 592 |
-
|
| 593 |
-
|
| 594 |
-
|
| 595 |
-
|
| 596 |
-
|
| 597 |
-
|
| 598 |
-
|
| 599 |
-
is_checked = st.session_state.selected_docs_to_generate.get(key, False)
|
| 600 |
-
st.session_state.selected_docs_to_generate[key] = st.checkbox(
|
| 601 |
-
config["label"],
|
| 602 |
-
value=is_checked,
|
| 603 |
-
key=f"checkbox_{key}",
|
| 604 |
-
disabled=st.session_state.generating_optional
|
| 605 |
-
)
|
| 606 |
|
| 607 |
-
|
| 608 |
-
|
| 609 |
-
|
| 610 |
-
|
| 611 |
-
|
| 612 |
-
|
| 613 |
-
|
| 614 |
-
|
| 615 |
-
|
| 616 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 617 |
|
| 618 |
-
#
|
| 619 |
-
if st.session_state
|
| 620 |
-
|
| 621 |
-
|
| 622 |
-
|
| 623 |
-
|
| 624 |
-
|
| 625 |
-
|
| 626 |
-
for
|
| 627 |
-
|
| 628 |
-
|
| 629 |
-
|
| 630 |
-
|
| 631 |
-
|
| 632 |
-
if content:
|
| 633 |
-
st.session_state.generated_docs[key] = content
|
| 634 |
-
else:
|
| 635 |
-
st.session_state.generated_docs[key] = f"Error generating {config['label']}."
|
| 636 |
-
generation_successful = False
|
| 637 |
-
st.error(f"Failed to generate {config['label']}.")
|
| 638 |
-
progress_bar.progress((i + 1) / total_selected)
|
| 639 |
-
time.sleep(0.1)
|
| 640 |
-
st.session_state.generating_optional = False
|
| 641 |
-
progress_bar.empty()
|
| 642 |
-
if generation_successful:
|
| 643 |
-
st.success("✅ Selected documents generated!")
|
| 644 |
-
else:
|
| 645 |
-
st.warning("Some documents could not be generated.")
|
| 646 |
-
safe_rerun()
|
| 647 |
-
else:
|
| 648 |
-
st.session_state.generating_optional = False
|
| 649 |
-
safe_rerun()
|
| 650 |
|
| 651 |
-
#
|
| 652 |
-
if
|
| 653 |
-
st.
|
| 654 |
-
st.
|
| 655 |
-
for key in doc_options.keys():
|
| 656 |
-
content = st.session_state.generated_docs.get(key)
|
| 657 |
if content:
|
| 658 |
-
|
| 659 |
-
st.
|
| 660 |
-
|
| 661 |
-
|
| 662 |
-
|
| 663 |
-
|
| 664 |
-
|
| 665 |
-
|
| 666 |
-
st.code(content, language=config["code_language"])
|
| 667 |
-
elif config["display_func"]:
|
| 668 |
-
config["display_func"](content, key=f"display_{key}")
|
| 669 |
-
else:
|
| 670 |
-
st.markdown(content)
|
| 671 |
-
if not is_error:
|
| 672 |
-
try:
|
| 673 |
-
st.download_button(
|
| 674 |
-
label=f"📥 Download {config['label']}",
|
| 675 |
-
data=content.encode('utf-8') if isinstance(content, str) else "\n".join(content).encode('utf-8'),
|
| 676 |
-
file_name=config["download_filename"],
|
| 677 |
-
mime=config["mime"],
|
| 678 |
-
key=f"download_{key}"
|
| 679 |
-
)
|
| 680 |
-
except Exception as e:
|
| 681 |
-
st.warning(f"Could not prepare download: {e}")
|
| 682 |
-
if config["render_func"] and config["code_language"] == "mermaid":
|
| 683 |
-
if st.checkbox(f"🔍 Show raw Mermaid code", key=f"show_raw_mermaid_{key}"):
|
| 684 |
-
cleaned_code_raw = re.sub(r"^```mermaid\s*\n?", "", content, flags=re.IGNORECASE).strip()
|
| 685 |
-
cleaned_code_raw = re.sub(r"\n?```\s*$", "", cleaned_code_raw).strip()
|
| 686 |
-
st.code(cleaned_code_raw, language="mermaid")
|
| 687 |
-
elif config["code_language"] and not config["render_func"]:
|
| 688 |
-
if st.checkbox(f"🔍 Show raw {config['code_language']} code", key=f"show_raw_code_{key}"):
|
| 689 |
-
st.code(content, language=config["code_language"])
|
| 690 |
-
st.markdown("---")
|
| 691 |
|
| 692 |
-
|
| 693 |
-
st.markdown("---")
|
| 694 |
-
footer_model_choice = st.session_state.get('model_choice', 'N/A')
|
| 695 |
-
st.caption(f"Using model: **{footer_model_choice}**. Remember to review all generated content carefully.")
|
|
|
|
| 1 |
+
# Provibe App (v1.0) - Streamlit Implementation
|
| 2 |
+
|
| 3 |
import streamlit as st
|
|
|
|
| 4 |
from openai import OpenAI
|
| 5 |
+
import time
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
+
# Initialize OpenAI client
|
| 8 |
+
openai_api_key = st.secrets.get("OPENAI_API_KEY")
|
| 9 |
+
openai_client = OpenAI(api_key=openai_api_key) if openai_api_key else None
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
+
# ---------- Helper Functions ----------
|
| 12 |
+
def generate_ai_response(prompt, max_tokens=2000):
|
| 13 |
+
if not openai_client:
|
| 14 |
+
st.error("OpenAI API key not configured.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
return None
|
| 16 |
+
response = openai_client.chat.completions.create(
|
| 17 |
+
model="gpt-4-turbo",
|
| 18 |
+
messages=[{"role": "user", "content": prompt}],
|
| 19 |
+
max_tokens=max_tokens
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
)
|
| 21 |
+
return response.choices[0].message.content
|
| 22 |
+
|
| 23 |
+
# ---------- Session State Initialization ----------
|
| 24 |
+
for key in ['step', 'idea', 'refined_idea', 'answers', 'documents', 'status']:
|
| 25 |
+
if key not in st.session_state:
|
| 26 |
+
st.session_state[key] = None
|
| 27 |
+
if 'docs_ready' not in st.session_state:
|
| 28 |
+
st.session_state['docs_ready'] = {}
|
| 29 |
+
|
| 30 |
+
# ---------- UI ----------
|
| 31 |
+
st.title("🚀 Provibe - AI-Powered Product Documentation Generator")
|
| 32 |
+
|
| 33 |
+
# Step-by-step Wizard
|
| 34 |
+
|
| 35 |
+
# Step 1: Idea Input
|
| 36 |
+
if st.session_state['step'] is None or st.session_state['step'] == 1:
|
| 37 |
+
st.header("Step 1: Enter Your Product Idea")
|
| 38 |
+
idea_input = st.text_area("Describe your initial product idea:", height=150)
|
| 39 |
+
if st.button("Refine Idea") and idea_input.strip():
|
| 40 |
+
st.session_state['idea'] = idea_input
|
| 41 |
+
st.session_state['step'] = 2
|
| 42 |
+
st.rerun()
|
| 43 |
|
| 44 |
+
# Step 2: Idea Refinement
|
| 45 |
+
if st.session_state['step'] == 2:
|
| 46 |
+
st.header("Step 2: AI-Refined Idea")
|
| 47 |
+
with st.spinner("Refining your idea..."):
|
| 48 |
+
refined_prompt = f"Refine and enhance the clarity of this product idea:\n\n{st.session_state['idea']}"
|
| 49 |
+
refined = generate_ai_response(refined_prompt, 1000)
|
| 50 |
+
st.session_state['refined_idea'] = st.text_area("Refined Idea:", refined, height=200)
|
| 51 |
+
if st.button("Proceed to Detailed Questions"):
|
| 52 |
+
st.session_state['step'] = 3
|
| 53 |
+
st.rerun()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
|
| 55 |
+
# Step 3: AI-Generated Questions
|
| 56 |
+
if st.session_state['step'] == 3:
|
| 57 |
+
st.header("Step 3: Product Details")
|
| 58 |
+
questions_prompt = f"Generate 5 critical questions to detail the product idea:\n\n{st.session_state['refined_idea']}"
|
| 59 |
+
questions = generate_ai_response(questions_prompt, 500).split('\n')
|
| 60 |
+
answers = []
|
| 61 |
+
with st.form("detail_form"):
|
| 62 |
+
for i, q in enumerate(questions):
|
| 63 |
+
answer = st.text_input(f"{q.strip()}")
|
| 64 |
+
answers.append(answer)
|
| 65 |
+
submitted = st.form_submit_button("Generate Development Plan")
|
| 66 |
+
if submitted and all(answers):
|
| 67 |
+
st.session_state['answers'] = answers
|
| 68 |
+
st.session_state['step'] = 4
|
| 69 |
+
st.rerun()
|
| 70 |
+
|
| 71 |
+
# Step 4: Development Plan Generation
|
| 72 |
+
if st.session_state['step'] == 4:
|
| 73 |
+
st.header("Step 4: AI-Generated Development Plan")
|
| 74 |
+
with st.spinner("Creating your development plan..."):
|
| 75 |
+
detail_prompt = f"Create a concise development plan based on:\nIdea: {st.session_state['refined_idea']}\nDetails: {st.session_state['answers']}"
|
| 76 |
+
dev_plan = generate_ai_response(detail_prompt, 1500)
|
| 77 |
+
st.markdown(dev_plan)
|
| 78 |
+
if st.button("Generate Documentation"):
|
| 79 |
+
st.session_state['step'] = 5
|
| 80 |
+
st.rerun()
|
| 81 |
|
| 82 |
+
# Step 5: Document Generation
|
| 83 |
+
if st.session_state['step'] == 5:
|
| 84 |
+
st.header("Step 5: Document Generation")
|
| 85 |
+
doc_types = ["Product Requirements Document", "User Flow", "System Architecture", "Database Schema"]
|
| 86 |
+
selected_docs = st.multiselect("Select documents to generate:", doc_types, default=doc_types)
|
| 87 |
+
if st.button("Generate Selected Documents"):
|
| 88 |
+
st.session_state['documents'] = selected_docs
|
| 89 |
+
st.session_state['status'] = {doc: False for doc in selected_docs}
|
| 90 |
+
for doc in selected_docs:
|
| 91 |
+
with st.spinner(f"Generating {doc}..."):
|
| 92 |
+
doc_prompt = f"Generate a detailed {doc.lower()} for the following product:\n\n{st.session_state['refined_idea']}\nDetails: {st.session_state['answers']}"
|
| 93 |
+
st.session_state['docs_ready'][doc] = generate_ai_response(doc_prompt, 2000)
|
| 94 |
+
st.session_state['status'][doc] = True
|
| 95 |
+
st.rerun()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 96 |
|
| 97 |
+
# Display generated documents
|
| 98 |
+
if st.session_state['docs_ready']:
|
| 99 |
+
st.header("Generated Documents")
|
| 100 |
+
for doc, content in st.session_state['docs_ready'].items():
|
|
|
|
|
|
|
| 101 |
if content:
|
| 102 |
+
st.subheader(doc)
|
| 103 |
+
st.markdown(content)
|
| 104 |
+
st.download_button(
|
| 105 |
+
f"Download {doc}",
|
| 106 |
+
data=content,
|
| 107 |
+
file_name=f"{doc.replace(' ', '_').lower()}.md",
|
| 108 |
+
mime="text/markdown"
|
| 109 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
|
| 111 |
+
st.caption("Powered by Provibe AI")
|
|
|
|
|
|
|
|
|