Naveen-2007 commited on
Commit
714f42f
Β·
1 Parent(s): ee0f8f3

Add Product MVP and Video Brain modes + Render deployment

Browse files
Dockerfile.render ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.11-slim
2
+
3
+ WORKDIR /app
4
+
5
+ # Install system dependencies
6
+ RUN apt-get update && apt-get install -y --no-install-recommends \
7
+ curl \
8
+ supervisor \
9
+ && rm -rf /var/lib/apt/lists/* \
10
+ && apt-get clean
11
+
12
+ # Copy and install Python dependencies (cached layer)
13
+ COPY requirements.txt .
14
+ RUN pip install --no-cache-dir --upgrade pip && \
15
+ pip install --no-cache-dir -r requirements.txt
16
+
17
+ # Copy ALL application code
18
+ COPY . .
19
+
20
+ # Create supervisor config directory
21
+ RUN mkdir -p /etc/supervisor/conf.d
22
+
23
+ # Create workspace directories
24
+ RUN mkdir -p /app/workspace_data /app/chroma_db
25
+
26
+ # Expose both ports
27
+ EXPOSE 8000 8501
28
+
29
+ # Copy supervisor config
30
+ COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf
31
+
32
+ # Run supervisor to manage both processes
33
+ CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/conf.d/supervisord.conf"]
app/api.py CHANGED
@@ -992,3 +992,243 @@ def agentic_mode(req: ModeRequest):
992
  )
993
 
994
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
992
  )
993
 
994
 
995
+ # =======================================================
996
+ # PRODUCT MVP ENDPOINT - Generates MVP Blueprints
997
+ # =======================================================
998
+ class ProductMVPRequest(BaseModel):
999
+ message: str
1000
+ workspace_id: str = "default"
1001
+ mode: str = "product_mvp"
1002
+
1003
+
1004
+ @app.post("/api/product_mvp", response_model=ChatResponse)
1005
+ def product_mvp_mode(req: ProductMVPRequest):
1006
+ """
1007
+ Product MVP Mode - Generates comprehensive MVP blueprints from product ideas.
1008
+ Includes product name, pitch, target users, features, architecture, tech stack, and more.
1009
+ """
1010
+ q = req.message.strip()
1011
+ ws = req.workspace_id
1012
+
1013
+ memory.add(ws, "user", q)
1014
+ print(f"\nπŸš€ PRODUCT MVP MODE: {q}")
1015
+
1016
+ # Research similar products and market
1017
+ market_research = ""
1018
+ try:
1019
+ results = search_tool.search(f"{q} startup MVP product", num_results=3)
1020
+ if results:
1021
+ for r in results:
1022
+ url = r.get("url", "")
1023
+ text = browse_tool.fetch_clean(url)
1024
+ if text:
1025
+ market_research += text[:800] + "\n\n"
1026
+ except Exception as e:
1027
+ print(f"Market research error: {e}")
1028
+
1029
+ prompt = f"""You are a PRODUCT BUILDER AI that creates comprehensive MVP blueprints.
1030
+
1031
+ The user wants to build: {q}
1032
+
1033
+ {f"MARKET RESEARCH (use for context):{chr(10)}{market_research}" if market_research else ""}
1034
+
1035
+ Generate a COMPLETE MVP Blueprint with the following sections. Use markdown formatting with tables where appropriate:
1036
+
1037
+ # πŸ“„ MVP Blueprint – [Product Name]
1038
+ A one-line description of the product.
1039
+
1040
+ ## 1. Product Name
1041
+ Create a catchy, memorable product name.
1042
+
1043
+ ## 2. One‑Line Pitch
1044
+ A compelling pitch in quotes that explains the value proposition.
1045
+
1046
+ ## 3. Target Users
1047
+ Create a markdown table with columns: Persona | Age | Occupation | Goals | Pain Points | How [Product] Helps
1048
+ Include 4-5 different user personas.
1049
+
1050
+ ## 4. Problems to Solve
1051
+ List 5 key problems the product solves with bullet points.
1052
+
1053
+ ## 5. MVP Features
1054
+ Create a table with: Feature | Description | Priority (Must-have/Nice-to-have)
1055
+ Include 8-10 features.
1056
+
1057
+ ## 6. User Journey (Step‑by‑Step)
1058
+ Number each step of the user journey from landing to retention.
1059
+
1060
+ ## 7. System Architecture
1061
+ Create an ASCII diagram showing the system components and their connections.
1062
+ Include: Frontend, Backend, Database, APIs, Third-party services.
1063
+
1064
+ ## 8. Database Tables
1065
+ Create a table showing the main database tables with: Table | Columns | Notes
1066
+
1067
+ ## 9. API Endpoints (REST)
1068
+ Create a table with: Method | Endpoint | Description | Auth Required
1069
+
1070
+ ## 10. Tech Stack
1071
+ Create a table with: Layer | Technology | Reason
1072
+ Cover: Frontend, Backend, Auth, Database, Cache, Storage, Hosting, CI/CD, Monitoring
1073
+
1074
+ ## 11. Future Features (Post‑MVP)
1075
+ List 8 features for after MVP launch.
1076
+
1077
+ ## Next Steps
1078
+ List 5 actionable next steps to start building.
1079
+
1080
+ End with: **Happy building! πŸš€**
1081
+
1082
+ Be detailed, practical, and use real-world best practices. Make it production-ready."""
1083
+
1084
+ msgs = build_context(ws, prompt)
1085
+ answer = llm.invoke(msgs).content
1086
+
1087
+ # Generate follow-up questions
1088
+ follow = [
1089
+ "Generate wireframes for core screens",
1090
+ "Create a development timeline",
1091
+ "Estimate the MVP budget",
1092
+ "Design the database schema in detail",
1093
+ "Write user stories for MVP features"
1094
+ ]
1095
+
1096
+ memory.add(ws, "assistant", answer)
1097
+ print(f" βœ… Product MVP: Blueprint generated")
1098
+
1099
+ return ChatResponse(
1100
+ answer=answer,
1101
+ sources=[],
1102
+ links=[],
1103
+ images=[],
1104
+ followups=follow,
1105
+ default_tab="answer",
1106
+ workspace_id=ws
1107
+ )
1108
+
1109
+
1110
+ # =======================================================
1111
+ # VIDEO BRAIN ENDPOINT - YouTube Video Analysis
1112
+ # =======================================================
1113
+ class VideoBrainRequest(BaseModel):
1114
+ message: str
1115
+ workspace_id: str = "default"
1116
+ mode: str = "video_brain"
1117
+ youtube_url: str = ""
1118
+
1119
+
1120
+ @app.post("/api/video_brain", response_model=ChatResponse)
1121
+ def video_brain_mode(req: VideoBrainRequest):
1122
+ """
1123
+ Video Brain Mode - Analyzes YouTube videos and answers questions about them.
1124
+ Extracts transcript/content and provides intelligent responses.
1125
+ """
1126
+ q = req.message.strip()
1127
+ ws = req.workspace_id
1128
+ youtube_url = req.youtube_url
1129
+
1130
+ memory.add(ws, "user", q)
1131
+ print(f"\nπŸŽ₯ VIDEO BRAIN MODE: {q}")
1132
+ print(f" πŸ“Ί YouTube URL: {youtube_url}")
1133
+
1134
+ if not youtube_url:
1135
+ return ChatResponse(
1136
+ answer="⚠️ Please provide a YouTube URL first. Enter the URL in the Video Brain interface and click 'Load' before asking questions.",
1137
+ sources=[],
1138
+ links=[],
1139
+ images=[],
1140
+ followups=[],
1141
+ default_tab="answer",
1142
+ workspace_id=ws
1143
+ )
1144
+
1145
+ # Try to get video information
1146
+ video_content = ""
1147
+ video_title = ""
1148
+
1149
+ try:
1150
+ # Extract video ID
1151
+ video_id = ""
1152
+ if "v=" in youtube_url:
1153
+ video_id = youtube_url.split("v=")[1].split("&")[0]
1154
+ elif "youtu.be/" in youtube_url:
1155
+ video_id = youtube_url.split("youtu.be/")[1].split("?")[0]
1156
+
1157
+ print(f" πŸ” Video ID: {video_id}")
1158
+
1159
+ # Search for video information and related content
1160
+ if video_id:
1161
+ # Search for the video title and description
1162
+ topic_results = search_tool.search(f"youtube {video_id}", num_results=3)
1163
+ if topic_results:
1164
+ for r in topic_results:
1165
+ title = r.get("title", "")
1166
+ if title and not video_title:
1167
+ video_title = title
1168
+ snippet = r.get("content", "") or r.get("snippet", "")
1169
+ if snippet:
1170
+ video_content += snippet + "\n"
1171
+
1172
+ # Search for transcript or summary
1173
+ search_query = f"youtube video transcript summary {video_title or video_id}"
1174
+ results = search_tool.search(search_query, num_results=3)
1175
+
1176
+ for r in results[:2]:
1177
+ url = r.get("url", "")
1178
+ if url and "youtube.com" not in url: # Skip YouTube pages, get transcripts
1179
+ text = browse_tool.fetch_clean(url)
1180
+ if text:
1181
+ video_content += text[:2000] + "\n\n"
1182
+
1183
+ print(f" πŸ“ Content gathered: {len(video_content)} chars")
1184
+
1185
+ except Exception as e:
1186
+ print(f" ❌ Video content fetch error: {e}")
1187
+
1188
+ prompt = f"""You are VIDEO BRAIN AI - an expert at analyzing and explaining YouTube video content.
1189
+
1190
+ VIDEO URL: {youtube_url}
1191
+ {f"VIDEO TITLE: {video_title}" if video_title else ""}
1192
+
1193
+ {f"AVAILABLE VIDEO CONTEXT:{chr(10)}{video_content[:4000]}" if video_content else "Note: Could not fetch video transcript directly. I will provide helpful guidance based on the question and general knowledge."}
1194
+
1195
+ USER QUESTION: {q}
1196
+
1197
+ Instructions:
1198
+ 1. If context is available, answer based on the video content
1199
+ 2. If the question is about summarizing, provide key points and takeaways
1200
+ 3. If asking about specific topics, explain them clearly
1201
+ 4. Use timestamps if available (e.g., "At around 5:30...")
1202
+ 5. If limited information is available, be honest but still provide helpful guidance
1203
+ 6. Format your response with headers and bullet points for clarity
1204
+ 7. Make the response educational and easy to understand
1205
+
1206
+ Provide a comprehensive, helpful response:"""
1207
+
1208
+ msgs = build_context(ws, prompt)
1209
+ answer = llm.invoke(msgs).content
1210
+
1211
+ # Generate follow-up questions about the video
1212
+ follow = [
1213
+ "Summarize the main points of this video",
1214
+ "What are the key takeaways?",
1215
+ "Explain the most important concept covered",
1216
+ "What questions should I ask about this topic?",
1217
+ "Create study notes from this video"
1218
+ ]
1219
+
1220
+ sources = [{"title": f"πŸŽ₯ {video_title or 'YouTube Video'}", "url": youtube_url}]
1221
+ links = [{"title": video_title or "YouTube Video", "url": youtube_url, "snippet": "Source video"}]
1222
+
1223
+ memory.add(ws, "assistant", answer)
1224
+ print(f" βœ… Video Brain: Response generated")
1225
+
1226
+ return ChatResponse(
1227
+ answer=answer,
1228
+ sources=sources,
1229
+ links=links,
1230
+ images=[],
1231
+ followups=follow,
1232
+ default_tab="answer",
1233
+ workspace_id=ws
1234
+ )
config/config.py CHANGED
@@ -8,7 +8,7 @@ class Config:
8
  GROQ_API_KEY = os.getenv("GROQ_API_KEY")
9
  TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
10
 
11
- LLM_MODEL = "llama-3.1-70b-versatile"
12
 
13
  CHUNK_SIZE = 400
14
  CHUNK_OVERLAP = 80
 
8
  GROQ_API_KEY = os.getenv("GROQ_API_KEY")
9
  TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
10
 
11
+ LLM_MODEL = "openai/gpt-oss-120b"
12
 
13
  CHUNK_SIZE = 400
14
  CHUNK_OVERLAP = 80
rag/graph_deep.py CHANGED
@@ -110,13 +110,13 @@ class WebSearchGraph:
110
 
111
  g.add_node("search", self.search_node.search)
112
  g.add_node("fetch", self.fetch_node.fetch)
113
- g.add_node("context", self.context_node.build_context)
114
  g.add_node("answer", self.answer_node.answer)
115
 
116
  g.set_entry_point("search")
117
  g.add_edge("search", "fetch")
118
- g.add_edge("fetch", "context")
119
- g.add_edge("context", "answer")
120
  g.add_edge("answer", END)
121
 
122
  self.graph = g.compile()
@@ -148,12 +148,12 @@ class RAGOnlyGraph:
148
  g = StateGraph(RAGOnlyState)
149
 
150
  g.add_node("retrieve", self.retrieve_node.retrieve)
151
- g.add_node("context", self.context_node.build_context)
152
  g.add_node("answer", self.answer_node.answer)
153
 
154
  g.set_entry_point("retrieve")
155
- g.add_edge("retrieve", "context")
156
- g.add_edge("context", "answer")
157
  g.add_edge("answer", END)
158
 
159
  self.graph = g.compile()
 
110
 
111
  g.add_node("search", self.search_node.search)
112
  g.add_node("fetch", self.fetch_node.fetch)
113
+ g.add_node("build_context", self.context_node.build_context)
114
  g.add_node("answer", self.answer_node.answer)
115
 
116
  g.set_entry_point("search")
117
  g.add_edge("search", "fetch")
118
+ g.add_edge("fetch", "build_context")
119
+ g.add_edge("build_context", "answer")
120
  g.add_edge("answer", END)
121
 
122
  self.graph = g.compile()
 
148
  g = StateGraph(RAGOnlyState)
149
 
150
  g.add_node("retrieve", self.retrieve_node.retrieve)
151
+ g.add_node("build_context", self.context_node.build_context)
152
  g.add_node("answer", self.answer_node.answer)
153
 
154
  g.set_entry_point("retrieve")
155
+ g.add_edge("retrieve", "build_context")
156
+ g.add_edge("build_context", "answer")
157
  g.add_edge("answer", END)
158
 
159
  self.graph = g.compile()
render.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ services:
2
+ - type: web
3
+ name: perplexity-clone
4
+ env: docker
5
+ dockerfilePath: ./Dockerfile.render
6
+ dockerContext: .
7
+ plan: free
8
+ region: oregon
9
+ healthCheckPath: /health
10
+ envVars:
11
+ - key: ANTHROPIC_API_KEY
12
+ sync: false
13
+ - key: TAVILY_API_KEY
14
+ sync: false
15
+ - key: GOOGLE_API_KEY
16
+ sync: false
17
+ - key: GOOGLE_CSE_ID
18
+ sync: false
19
+ - key: BACKEND_URL
20
+ value: http://localhost:8000
streamlit_app.py CHANGED
@@ -28,6 +28,12 @@ if "uploaded_files" not in st.session_state:
28
  st.session_state.uploaded_files = []
29
  if "show_upload" not in st.session_state:
30
  st.session_state.show_upload = False
 
 
 
 
 
 
31
 
32
  # =====================================
33
  # CONFIGURATION
@@ -78,6 +84,16 @@ MODES = {
78
  "desc": "Direct AI chat",
79
  "endpoint": "/api/focus"
80
  },
 
 
 
 
 
 
 
 
 
 
81
  }
82
 
83
  # =====================================
@@ -443,7 +459,7 @@ st.markdown(get_css(), unsafe_allow_html=True)
443
  # =====================================
444
  # HELPER FUNCTIONS
445
  # =====================================
446
- def call_api(query: str, mode: str):
447
  """Call backend API based on selected mode."""
448
  mode_config = MODES.get(mode, MODES["Automatic"])
449
  endpoint = mode_config["endpoint"]
@@ -454,9 +470,23 @@ def call_api(query: str, mode: str):
454
  "mode": mode.lower().replace(" ", "_")
455
  }
456
 
 
 
 
 
457
  try:
458
  response = requests.post(f"{API_URL}{endpoint}", json=payload, timeout=180)
459
- return response.json()
 
 
 
 
 
 
 
 
 
 
460
  except Exception as e:
461
  return {
462
  "answer": f"Error: {str(e)}",
@@ -574,6 +604,56 @@ st.markdown('</div>', unsafe_allow_html=True)
574
  # Mode description
575
  st.markdown(f'<div class="mode-desc">{MODES[st.session_state.mode]["icon"]} {st.session_state.mode}: {MODES[st.session_state.mode]["desc"]}</div>', unsafe_allow_html=True)
576
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
577
  # Show file uploader when icon is clicked
578
  if st.session_state.show_upload:
579
  uploaded = st.file_uploader(
@@ -602,8 +682,25 @@ if st.session_state.uploaded_files:
602
  # HANDLE SEARCH
603
  # =====================================
604
  if submit and query.strip():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
605
  with st.spinner(f"πŸ”„ {st.session_state.mode}..."):
606
- result = call_api(query.strip(), st.session_state.mode)
607
  st.session_state.current_result = {
608
  "query": query.strip(),
609
  "mode": st.session_state.mode,
@@ -621,6 +718,26 @@ if st.session_state.current_result:
621
 
622
  st.divider()
623
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  # Query box
625
  mode_info = MODES.get(result['mode'], MODES['Automatic'])
626
  st.markdown(f"""
@@ -635,6 +752,10 @@ if st.session_state.current_result:
635
  if sources:
636
  st.success(f"βœ“ {len(sources)} sources")
637
 
 
 
 
 
638
  # Layout - Full width (removed duplicate sidebar sources)
639
  tabs = st.tabs(["✨ Answer", "πŸ”— Sources", "πŸ–ΌοΈ Images"])
640
 
@@ -649,8 +770,11 @@ if st.session_state.current_result:
649
  st.markdown("**Related:**")
650
  for i, fu in enumerate(followups[:3]):
651
  if st.button(f"β†’ {fu}", key=f"fu_{i}"):
 
 
 
652
  with st.spinner("..."):
653
- new_result = call_api(fu, st.session_state.mode)
654
  st.session_state.current_result = {
655
  "query": fu,
656
  "mode": st.session_state.mode,
@@ -700,6 +824,11 @@ with st.sidebar:
700
  st.session_state.uploaded_files = []
701
  st.info("Files cleared")
702
 
 
 
 
 
 
703
  st.divider()
704
  st.caption(f"Theme: {'πŸŒ™ Dark' if st.session_state.theme == 'dark' else 'β˜€οΈ Light'}")
705
  st.caption(f"Mode: {st.session_state.mode}")
@@ -709,3 +838,16 @@ with st.sidebar:
709
  st.markdown("### πŸ“ Files")
710
  for f in st.session_state.uploaded_files:
711
  st.caption(f"πŸ“„ {f}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  st.session_state.uploaded_files = []
29
  if "show_upload" not in st.session_state:
30
  st.session_state.show_upload = False
31
+ if "youtube_url" not in st.session_state:
32
+ st.session_state.youtube_url = ""
33
+ if "video_loaded" not in st.session_state:
34
+ st.session_state.video_loaded = False
35
+ if "product_ideas" not in st.session_state:
36
+ st.session_state.product_ideas = []
37
 
38
  # =====================================
39
  # CONFIGURATION
 
84
  "desc": "Direct AI chat",
85
  "endpoint": "/api/focus"
86
  },
87
+ "Product MVP": {
88
+ "icon": "πŸš€",
89
+ "desc": "Idea β†’ MVP Blueprint",
90
+ "endpoint": "/api/product_mvp"
91
+ },
92
+ "Video Brain": {
93
+ "icon": "πŸŽ₯",
94
+ "desc": "Understand YouTube lectures",
95
+ "endpoint": "/api/video_brain"
96
+ },
97
  }
98
 
99
  # =====================================
 
459
  # =====================================
460
  # HELPER FUNCTIONS
461
  # =====================================
462
+ def call_api(query: str, mode: str, extra_data: dict = None):
463
  """Call backend API based on selected mode."""
464
  mode_config = MODES.get(mode, MODES["Automatic"])
465
  endpoint = mode_config["endpoint"]
 
470
  "mode": mode.lower().replace(" ", "_")
471
  }
472
 
473
+ # Add extra data for special modes
474
+ if extra_data:
475
+ payload.update(extra_data)
476
+
477
  try:
478
  response = requests.post(f"{API_URL}{endpoint}", json=payload, timeout=180)
479
+ response.raise_for_status()
480
+ try:
481
+ return response.json()
482
+ except ValueError:
483
+ return {
484
+ "answer": f"Error: Invalid JSON response from server",
485
+ "sources": [],
486
+ "links": [],
487
+ "images": [],
488
+ "followups": []
489
+ }
490
  except Exception as e:
491
  return {
492
  "answer": f"Error: {str(e)}",
 
604
  # Mode description
605
  st.markdown(f'<div class="mode-desc">{MODES[st.session_state.mode]["icon"]} {st.session_state.mode}: {MODES[st.session_state.mode]["desc"]}</div>', unsafe_allow_html=True)
606
 
607
+ # =====================================
608
+ # SPECIAL UI FOR PRODUCT MVP MODE
609
+ # =====================================
610
+ if st.session_state.mode == "Product MVP" and not st.session_state.current_result:
611
+ st.markdown("""
612
+ <div style="text-align: center; padding: 20px; margin: 20px auto; max-width: 700px;
613
+ background: linear-gradient(135deg, #FF6B35 0%, #F7931E 100%);
614
+ border-radius: 16px; color: white;">
615
+ <h3 style="margin: 0; font-size: 24px;">πŸš€ Product Builder – Idea β†’ MVP Blueprint</h3>
616
+ <p style="margin: 10px 0 0; opacity: 0.9;">🟠 Product Builder Active</p>
617
+ </div>
618
+ """, unsafe_allow_html=True)
619
+
620
+ st.markdown("<p style='text-align: center; color: #888; margin: 15px 0;'>Describe your product idea:</p>", unsafe_allow_html=True)
621
+
622
+ # =====================================
623
+ # SPECIAL UI FOR VIDEO BRAIN MODE
624
+ # =====================================
625
+ if st.session_state.mode == "Video Brain" and not st.session_state.current_result:
626
+ st.markdown("""
627
+ <div style="text-align: center; padding: 20px; margin: 20px auto; max-width: 700px;
628
+ background: linear-gradient(135deg, #3B82F6 0%, #8B5CF6 100%);
629
+ border-radius: 16px; color: white;">
630
+ <h3 style="margin: 0; font-size: 24px;">πŸŽ₯ Video Brain – Understand Any YouTube Lecture</h3>
631
+ <p style="margin: 10px 0 0; opacity: 0.9;">πŸ”΅ Upload Video First</p>
632
+ </div>
633
+ """, unsafe_allow_html=True)
634
+
635
+ # YouTube URL input
636
+ col_yt1, col_yt2 = st.columns([5, 1])
637
+ with col_yt1:
638
+ youtube_url = st.text_input(
639
+ "youtube_url",
640
+ placeholder="Enter YouTube URL (e.g., https://youtube.com/watch?v=...)",
641
+ label_visibility="collapsed",
642
+ key="youtube_url_input"
643
+ )
644
+ with col_yt2:
645
+ if st.button("πŸ“Ί Load", key="load_video_btn"):
646
+ if youtube_url and ("youtube.com" in youtube_url or "youtu.be" in youtube_url):
647
+ st.session_state.youtube_url = youtube_url
648
+ st.session_state.video_loaded = True
649
+ st.success("βœ… Video loaded! Now ask questions about it.")
650
+ else:
651
+ st.error("Please enter a valid YouTube URL")
652
+
653
+ if st.session_state.video_loaded and st.session_state.youtube_url:
654
+ st.success(f"πŸ“Ί Video ready: {st.session_state.youtube_url[:50]}...")
655
+ st.markdown("<p style='text-align: center; color: #888; margin: 15px 0;'>Ask about the video:</p>", unsafe_allow_html=True)
656
+
657
  # Show file uploader when icon is clicked
658
  if st.session_state.show_upload:
659
  uploaded = st.file_uploader(
 
682
  # HANDLE SEARCH
683
  # =====================================
684
  if submit and query.strip():
685
+ extra_data = None
686
+
687
+ # For Video Brain mode, include the YouTube URL
688
+ if st.session_state.mode == "Video Brain":
689
+ if st.session_state.video_loaded and st.session_state.youtube_url:
690
+ extra_data = {"youtube_url": st.session_state.youtube_url}
691
+ else:
692
+ st.warning("⚠️ Please load a YouTube video first!")
693
+ st.stop()
694
+
695
+ # For Product MVP mode, save to ideas history
696
+ if st.session_state.mode == "Product MVP":
697
+ st.session_state.product_ideas.append({
698
+ "idea": query.strip(),
699
+ "time": "just now"
700
+ })
701
+
702
  with st.spinner(f"πŸ”„ {st.session_state.mode}..."):
703
+ result = call_api(query.strip(), st.session_state.mode, extra_data)
704
  st.session_state.current_result = {
705
  "query": query.strip(),
706
  "mode": st.session_state.mode,
 
718
 
719
  st.divider()
720
 
721
+ # Special header for Product MVP mode
722
+ if result['mode'] == "Product MVP":
723
+ st.markdown("""
724
+ <div style="text-align: center; padding: 15px; margin: 10px auto; max-width: 700px;
725
+ background: linear-gradient(135deg, #FF6B35 0%, #F7931E 100%);
726
+ border-radius: 12px; color: white;">
727
+ <h4 style="margin: 0;">πŸ“„ MVP Blueprint</h4>
728
+ </div>
729
+ """, unsafe_allow_html=True)
730
+
731
+ # Special header for Video Brain mode
732
+ if result['mode'] == "Video Brain":
733
+ st.markdown("""
734
+ <div style="text-align: center; padding: 15px; margin: 10px auto; max-width: 700px;
735
+ background: linear-gradient(135deg, #3B82F6 0%, #8B5CF6 100%);
736
+ border-radius: 12px; color: white;">
737
+ <h4 style="margin: 0;">πŸŽ₯ Video Analysis</h4>
738
+ </div>
739
+ """, unsafe_allow_html=True)
740
+
741
  # Query box
742
  mode_info = MODES.get(result['mode'], MODES['Automatic'])
743
  st.markdown(f"""
 
752
  if sources:
753
  st.success(f"βœ“ {len(sources)} sources")
754
 
755
+ # Memory saved notification for Product MVP
756
+ if result['mode'] == "Product MVP":
757
+ st.info("πŸ“ New Memory Saved")
758
+
759
  # Layout - Full width (removed duplicate sidebar sources)
760
  tabs = st.tabs(["✨ Answer", "πŸ”— Sources", "πŸ–ΌοΈ Images"])
761
 
 
770
  st.markdown("**Related:**")
771
  for i, fu in enumerate(followups[:3]):
772
  if st.button(f"β†’ {fu}", key=f"fu_{i}"):
773
+ extra = None
774
+ if st.session_state.mode == "Video Brain" and st.session_state.youtube_url:
775
+ extra = {"youtube_url": st.session_state.youtube_url}
776
  with st.spinner("..."):
777
+ new_result = call_api(fu, st.session_state.mode, extra)
778
  st.session_state.current_result = {
779
  "query": fu,
780
  "mode": st.session_state.mode,
 
824
  st.session_state.uploaded_files = []
825
  st.info("Files cleared")
826
 
827
+ if st.button("πŸ—‘οΈ Clear Video", use_container_width=True):
828
+ st.session_state.youtube_url = ""
829
+ st.session_state.video_loaded = False
830
+ st.info("Video cleared")
831
+
832
  st.divider()
833
  st.caption(f"Theme: {'πŸŒ™ Dark' if st.session_state.theme == 'dark' else 'β˜€οΈ Light'}")
834
  st.caption(f"Mode: {st.session_state.mode}")
 
838
  st.markdown("### πŸ“ Files")
839
  for f in st.session_state.uploaded_files:
840
  st.caption(f"πŸ“„ {f}")
841
+
842
+ # Show video info for Video Brain mode
843
+ if st.session_state.video_loaded and st.session_state.youtube_url:
844
+ st.divider()
845
+ st.markdown("### πŸŽ₯ Loaded Video")
846
+ st.caption(f"πŸ“Ί {st.session_state.youtube_url[:40]}...")
847
+
848
+ # Show recent product ideas
849
+ if st.session_state.product_ideas:
850
+ st.divider()
851
+ st.markdown("### 🧾 Recent Ideas")
852
+ for idea in st.session_state.product_ideas[-3:]:
853
+ st.caption(f"πŸ’‘ {idea['idea'][:30]}...")
supervisord.conf ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [supervisord]
2
+ nodaemon=true
3
+ user=root
4
+ logfile=/dev/stdout
5
+ logfile_maxbytes=0
6
+ loglevel=info
7
+
8
+ [program:fastapi]
9
+ command=uvicorn app.api:app --host 0.0.0.0 --port 8000 --workers 1
10
+ directory=/app
11
+ autostart=true
12
+ autorestart=true
13
+ stdout_logfile=/dev/stdout
14
+ stdout_logfile_maxbytes=0
15
+ stderr_logfile=/dev/stderr
16
+ stderr_logfile_maxbytes=0
17
+ priority=1
18
+
19
+ [program:streamlit]
20
+ command=streamlit run streamlit_app.py --server.port=8501 --server.address=0.0.0.0 --server.headless=true --browser.gatherUsageStats=false
21
+ directory=/app
22
+ autostart=true
23
+ autorestart=true
24
+ stdout_logfile=/dev/stdout
25
+ stdout_logfile_maxbytes=0
26
+ stderr_logfile=/dev/stderr
27
+ stderr_logfile_maxbytes=0
28
+ priority=2
29
+ startsecs=10
tools/knowledge_panel.py CHANGED
@@ -25,6 +25,7 @@ class KnowledgePanel:
25
  try:
26
  url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{query.replace(' ', '_')}"
27
  r = requests.get(url, timeout=10)
 
28
  data = r.json()
29
 
30
  return {
@@ -34,7 +35,8 @@ class KnowledgePanel:
34
  "thumbnail": data.get("thumbnail", {}).get("source", ""),
35
  "url": data.get("content_urls", {}).get("desktop", {}).get("page", "")
36
  }
37
- except:
 
38
  return {}
39
 
40
  def get_fast_facts(self, query: str) -> List[str]:
 
25
  try:
26
  url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{query.replace(' ', '_')}"
27
  r = requests.get(url, timeout=10)
28
+ r.raise_for_status()
29
  data = r.json()
30
 
31
  return {
 
35
  "thumbnail": data.get("thumbnail", {}).get("source", ""),
36
  "url": data.get("content_urls", {}).get("desktop", {}).get("page", "")
37
  }
38
+ except (requests.exceptions.RequestException, ValueError) as e:
39
+ print(f"Wikipedia API error: {e}")
40
  return {}
41
 
42
  def get_fast_facts(self, query: str) -> List[str]:
tools/search_tool.py CHANGED
@@ -16,7 +16,11 @@ class SearchTool:
16
  url = "https://api.tavily.com/search"
17
  payload = {"query": query, "num_results": num_results}
18
  headers = {"Authorization": self.api_key}
19
- resp = requests.post(url, json=payload, headers=headers, timeout=20)
20
- resp.raise_for_status()
21
- data = resp.json()
22
- return data.get("results", [])
 
 
 
 
 
16
  url = "https://api.tavily.com/search"
17
  payload = {"query": query, "num_results": num_results}
18
  headers = {"Authorization": self.api_key}
19
+ try:
20
+ resp = requests.post(url, json=payload, headers=headers, timeout=20)
21
+ resp.raise_for_status()
22
+ data = resp.json()
23
+ return data.get("results", [])
24
+ except (requests.exceptions.RequestException, ValueError) as e:
25
+ print(f"Search error: {e}")
26
+ return []