userIdc2024 commited on
Commit
790e7c3
·
verified ·
1 Parent(s): 34be0e9

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +518 -261
src/streamlit_app.py CHANGED
@@ -1,5 +1,5 @@
1
  import streamlit as st
2
- from google import genai
3
  import tempfile
4
  import os
5
  import time
@@ -7,9 +7,13 @@ import json
7
  from typing import Optional
8
  import pandas as pd
9
  import logging
 
 
 
 
10
 
11
  # Backend API Key Configuration
12
- GEMINI_API_KEY = os.getenv("GEMENI_KEY")
13
 
14
  # Page configuration
15
  st.set_page_config(
@@ -19,42 +23,78 @@ st.set_page_config(
19
  initial_sidebar_state="expanded"
20
  )
21
 
 
22
  logging.basicConfig(
23
- level=logging.INFO,
24
- format="%(asctime)s [%(levelname)s] %(message)s",
25
  handlers=[
26
  logging.StreamHandler()
27
  ]
28
  )
29
  logger = logging.getLogger(__name__)
30
 
31
-
32
  def configure_gemini():
33
  """Configure Gemini API with backend key"""
34
- return genai.Client(api_key=GEMINI_API_KEY)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
  # Enhanced system prompt with timestamp-based improvements
37
- SYSTEM_PROMPT = f"""{os.getenv("SYS_PROMPT")}"""
 
38
 
39
  def analyze_video_and_generate_script(
40
- video_bytes,
41
- video_name,
42
- offer_details: str = "",
43
- target_audience: str = "",
44
- specific_hooks: str = "",
45
- additional_context: str = ""
46
  ):
47
  """
48
  Analyze video and generate direct response script variations
49
  """
 
 
 
50
  try:
51
  # Save uploaded video to temporary file
 
52
  with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(video_name)[1]) as tmp_file:
53
  tmp_file.write(video_bytes)
54
  tmp_file_path = tmp_file.name
55
 
 
 
 
56
  # Configure Gemini
57
- client = configure_gemini()
 
 
 
58
 
59
  # Show upload progress
60
  upload_progress = st.progress(0)
@@ -62,192 +102,328 @@ def analyze_video_and_generate_script(
62
 
63
  upload_status.text("Uploading video to Google AI...")
64
  upload_progress.progress(20)
 
65
 
66
- # Upload video to Gemini
67
- video_file_obj = client.files.upload(file=tmp_file_path)
68
- upload_progress.progress(40)
 
 
 
 
 
 
 
 
 
69
 
70
  upload_status.text("Processing video...")
 
 
 
 
 
71
  while video_file_obj.state.name == "PROCESSING":
 
 
 
 
 
 
 
 
 
72
  time.sleep(2)
73
- video_file_obj = client.files.get(name=video_file_obj.name)
74
- upload_progress.progress(60)
 
 
 
 
 
 
 
 
75
 
76
  if video_file_obj.state.name == "FAILED":
77
- upload_status.error("Google AI file processing failed. Please try another video.")
 
 
 
 
 
 
 
 
78
  return None
79
 
80
  upload_progress.progress(80)
81
  upload_status.text("Generating script variations...")
 
82
 
83
  # Build the enhanced user prompt
84
  user_prompt = f"""Analyze this reference video and generate 3 high-converting direct response video script variations with detailed timestamp-based improvements.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
 
86
- ADDITIONAL CONTEXT:
87
- - Offer Details: {offer_details if offer_details else 'Extract from video'}
88
- - Target Audience: {target_audience if target_audience else 'Determine from video content'}
89
- - Specific Hooks to Consider: {specific_hooks if specific_hooks else 'Create based on video analysis'}
90
- - Additional Context: {additional_context}
91
-
92
- Please provide a comprehensive analysis including:
93
-
94
- 1. DETAILED VIDEO ANALYSIS with timestamp-based metrics:
95
- - Break down the video into 5-10 second segments
96
- - Rate each segment's effectiveness (1-10 scale)
97
- - Identify specific elements (hook, transition, proof, CTA, etc.)
98
-
99
- 2. TIMESTAMP-BASED IMPROVEMENTS:
100
- - Specific recommendations for each time segment
101
- - Priority level for each improvement
102
- - Expected impact of implementing changes
103
-
104
- 3. SCRIPT VARIATIONS:
105
- - Create 2-3 complete script variations
106
- - Each with timestamp-by-timestamp breakdown
107
- - Different psychological triggers and approaches
108
-
109
- IMPORTANT: Return only valid JSON in the exact format specified in the system prompt. Analyze the video second-by-second for maximum detail."""
110
 
111
  # Generate response
112
- response = client.models.generate_content(
113
- model="gemini-2.0-flash",
114
- contents=[video_file_obj, user_prompt + "\n\n" + SYSTEM_PROMPT]
115
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
  upload_progress.progress(100)
118
  upload_status.success("Analysis complete!")
 
119
 
120
  # Clean up temporary file
121
- os.unlink(tmp_file_path)
 
 
 
 
122
 
123
  # Parse JSON response
 
124
  try:
 
 
 
 
 
 
125
  response_text = response.text.strip()
 
 
126
  if response_text.startswith('```json'):
127
  response_text = response_text[7:-3]
 
128
  elif response_text.startswith('```'):
129
  response_text = response_text[3:-3]
 
 
 
130
 
131
  json_response = json.loads(response_text)
 
 
 
132
  return json_response
133
 
134
- except json.JSONDecodeError as e:
135
- st.error(f"Error parsing AI response: {str(e)}")
 
 
 
 
136
  return None
137
 
138
  except Exception as e:
139
- st.error(f"Error processing video: {str(e)}")
 
 
140
  return None
141
 
142
  def display_script_variations(json_data):
143
  """Display script variations in formatted tables"""
 
 
144
  if not json_data or "script_variations" not in json_data:
145
- st.error("No script variations found in the response")
 
 
 
146
  return
147
 
148
- for i, variation in enumerate(json_data["script_variations"], 1):
149
- variation_name = variation.get("variation_name", f"Variation {i}")
150
-
151
- st.subheader(variation_name)
152
 
153
- # Convert script table to DataFrame for better display
154
- script_data = variation.get("script_table", [])
155
- if script_data:
156
- df = pd.DataFrame(script_data)
 
 
 
 
 
 
 
 
 
 
 
157
 
 
 
158
  # Rename columns for better display
159
- column_mapping = {
160
  'timestamp': 'Timestamp',
161
  'script_voiceover': 'Script / Voiceover',
162
  'visual_direction': 'Visual Direction',
163
  'psychological_trigger': 'Psychological Trigger',
164
  'cta_action': 'CTA / Action'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
  }
166
 
167
- df = df.rename(columns=column_mapping)
 
168
 
169
- # Display as interactive table
170
  st.dataframe(
171
- df,
172
  use_container_width=True,
173
  hide_index=True,
174
  column_config={
175
  "Timestamp": st.column_config.TextColumn(width="small"),
176
- "Script / Voiceover": st.column_config.TextColumn(width="large"),
177
- "Visual Direction": st.column_config.TextColumn(width="large"),
178
- "Psychological Trigger": st.column_config.TextColumn(width="medium"),
179
- "CTA / Action": st.column_config.TextColumn(width="medium")
180
  }
181
  )
182
  else:
183
- st.warning(f"No script data available for {variation_name}")
184
-
185
- st.divider()
186
-
187
- def display_video_analysis(json_data):
188
- """Display video analysis in tabular format"""
189
- if not json_data or "video_analysis" not in json_data:
190
- st.error("No video analysis found in the response")
191
- return
192
-
193
- analysis = json_data["video_analysis"]
194
-
195
- # Display general analysis
196
- col1, col2 = st.columns(2)
197
-
198
- with col1:
199
- st.subheader("Effectiveness Factors")
200
- st.write(analysis.get('effectiveness_factors', 'N/A'))
201
-
202
- st.subheader("Target Audience")
203
- st.write(analysis.get('target_audience', 'N/A'))
204
-
205
- with col2:
206
- st.subheader("Psychological Triggers")
207
- st.write(analysis.get('psychological_triggers', 'N/A'))
208
-
209
- # Display video metrics in tabular format
210
- st.subheader("Detailed Video Metrics (Timestamp Analysis)")
211
- video_metrics = analysis.get('video_metrics', [])
212
- if video_metrics:
213
- metrics_df = pd.DataFrame(video_metrics)
214
-
215
- # Rename columns for better display
216
- column_mapping = {
217
- 'timestamp': 'Timestamp',
218
- 'element': 'Element',
219
- 'current_approach': 'Current Approach',
220
- 'effectiveness_score': 'Score',
221
- 'notes': 'Analysis Notes'
222
- }
223
-
224
- metrics_df = metrics_df.rename(columns=column_mapping)
225
 
226
- st.dataframe(
227
- metrics_df,
228
- use_container_width=True,
229
- hide_index=True,
230
- column_config={
231
- "Timestamp": st.column_config.TextColumn(width="small"),
232
- "Element": st.column_config.TextColumn(width="medium"),
233
- "Current Approach": st.column_config.TextColumn(width="large"),
234
- "Score": st.column_config.TextColumn(width="small"),
235
- "Analysis Notes": st.column_config.TextColumn(width="large")
236
- }
237
- )
238
- else:
239
- st.warning("No detailed video metrics available")
240
 
241
  def display_timestamp_improvements(json_data):
242
  """Display timestamp-based improvements in tabular format"""
243
- if not json_data or "timestamp_improvements" not in json_data:
244
- st.error("No timestamp improvements found in the response")
245
- return
246
-
247
- st.subheader("Timestamp-by-Timestamp Improvement Recommendations")
248
 
249
- improvements = json_data["timestamp_improvements"]
250
- if improvements:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251
  improvements_df = pd.DataFrame(improvements)
252
 
253
  # Rename columns for better display
@@ -261,6 +437,7 @@ def display_timestamp_improvements(json_data):
261
  }
262
 
263
  improvements_df = improvements_df.rename(columns=column_mapping)
 
264
 
265
  # Color code priority
266
  def color_priority(val):
@@ -287,79 +464,146 @@ def display_timestamp_improvements(json_data):
287
  "Priority": st.column_config.TextColumn(width="small")
288
  }
289
  )
290
- else:
291
- st.warning("No timestamp improvements available")
 
 
 
 
 
292
 
293
  def create_csv_download(json_data):
294
  """Create CSV content with all scripts combined"""
295
- all_scripts_data = []
296
 
297
- # Combine all script variations into one dataset
298
- for i, variation in enumerate(json_data.get("script_variations", []), 1):
299
- variation_name = variation.get("variation_name", f"Variation {i}")
300
 
301
- for row in variation.get("script_table", []):
302
- script_row = {
303
- 'Variation': variation_name,
304
- 'Timestamp': row.get('timestamp', ''),
305
- 'Script_Voiceover': row.get('script_voiceover', ''),
306
- 'Visual_Direction': row.get('visual_direction', ''),
307
- 'Psychological_Trigger': row.get('psychological_trigger', ''),
308
- 'CTA_Action': row.get('cta_action', '')
309
- }
310
- all_scripts_data.append(script_row)
311
-
312
- # Convert to DataFrame and then to CSV
313
- if all_scripts_data:
314
- df = pd.DataFrame(all_scripts_data)
315
- return df.to_csv(index=False)
316
- else:
317
- return "No script data available"
 
 
 
 
 
 
 
 
 
 
 
 
 
318
 
319
  def check_token(user_token):
 
 
320
  ACCESS_TOKEN = os.getenv("ACCESS_TOKEN")
321
  if not ACCESS_TOKEN:
322
- logger.critical("ACCESS_TOKEN not set in environment.")
 
323
  return False, "Server error: Access token not configured."
 
324
  if user_token == ACCESS_TOKEN:
325
  logger.info("Access token validated successfully.")
326
  return True, ""
 
327
  logger.warning("Invalid access token attempt.")
328
  return False, "Invalid token."
329
 
330
  def main():
331
  """Main application function"""
332
-
333
- # Header
334
- st.title("Video Analyser and Script Generator")
335
- st.divider()
336
 
337
  if "authenticated" not in st.session_state:
338
  st.session_state["authenticated"] = False
 
339
 
340
  if not st.session_state["authenticated"]:
 
341
  st.markdown("## Access Required")
342
  token_input = st.text_input("Enter Access Token", type="password")
343
  if st.button("Unlock App"):
344
  ok, error_msg = check_token(token_input)
345
  if ok:
346
  st.session_state["authenticated"] = True
 
347
  st.rerun()
348
  else:
 
349
  st.error(error_msg)
350
- else:
351
-
352
- # Sidebar for inputs
353
- with st.sidebar:
354
- st.header("Input Configuration")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
355
 
356
- # Video upload
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
357
  uploaded_video = st.file_uploader(
358
  "Upload Reference Video",
359
  type=['mp4', 'mov', 'avi', 'mkv'],
360
  help="Upload a profitable ad video to analyze and create variations from"
361
  )
362
 
 
 
 
 
 
363
  st.subheader("Additional Context (Optional)")
364
 
365
  offer_details = st.text_area(
@@ -389,109 +633,122 @@ def main():
389
  height=100,
390
  help="Compliance requirements, brand guidelines, or other notes"
391
  )
392
-
393
- # Generate button
394
- generate_button = st.button(
395
- "Generate Script Variations",
396
- type="primary",
397
- use_container_width=True
398
- )
399
-
400
- # Clear results button (only show if results exist)
401
  if "analysis_results" in st.session_state and st.session_state["analysis_results"]:
402
- if st.button(
403
- "Clear Results",
404
- type="secondary",
405
- use_container_width=True
406
- ):
407
  del st.session_state["analysis_results"]
 
408
  st.rerun()
409
-
410
- # Main content area
411
- if uploaded_video is None:
412
- st.info("Please upload a reference video to begin analysis.")
413
-
414
- # Instructions
415
- with st.expander("How to Use This Tool"):
416
- st.markdown("""
417
- ### Upload Guidelines:
418
- - **Best videos to analyze**: Already profitable Facebook/TikTok ads in your niche
419
- - **Video length**: 30-90 seconds work best for analysis
420
- - **Quality**: Clear audio and visuals help with better analysis
421
-
422
- ### Context Tips:
423
- - **Offer details**: Be specific about your main promise and mechanism
424
- - **Audience**: Include demographics, pain points, and desires
425
- - **Hooks**: Mention any specific angles that have worked for you
426
-
427
- ### Script Optimization:
428
- - Generated scripts focus on stopping scroll and driving clicks
429
- - Each variation tests different psychological triggers
430
- - Use the timestamp format for precise video production
431
- - Test multiple variations to find your best performer
432
- """)
433
-
434
- elif generate_button:
435
- if not GEMINI_API_KEY or GEMINI_API_KEY == "your-gemini-api-key-here":
436
- st.error("Please configure your Gemini API key in the backend.")
437
- return
438
-
439
- # Process video
440
- with st.spinner("Analyzing video and generating scripts..."):
441
- video_bytes = uploaded_video.read()
442
-
443
- # Reset file pointer for potential re-use
444
- uploaded_video.seek(0)
445
 
446
- json_response = analyze_video_and_generate_script(
447
- video_bytes,
448
- uploaded_video.name,
449
- offer_details,
450
- target_audience,
451
- specific_hooks,
452
- additional_context
453
- )
454
-
455
- if json_response:
456
- # Store results in session state
457
- st.session_state["analysis_results"] = json_response
458
- st.success("Analysis complete! Here are your script variations:")
459
- else:
460
- st.error("Failed to generate script variations. Please try again.")
461
-
462
- # Display results if they exist in session state
463
- if "analysis_results" in st.session_state and st.session_state["analysis_results"]:
464
- json_response = st.session_state["analysis_results"]
465
-
466
- # Create tabs for different outputs
467
- tab1, tab2, tab3 = st.tabs(["Script Variations", "Video Analysis", "Improvement Recommendations"])
468
-
469
- with tab1:
470
- display_script_variations(json_response)
 
 
 
 
 
 
 
 
 
 
 
 
 
471
 
472
- # CSV Download button
473
- csv_content = create_csv_download(json_response)
474
- st.download_button(
475
- label="Download All Scripts (CSV)",
476
- data=csv_content,
477
- file_name="video_script_variations.csv",
478
- mime="text/csv",
479
- type="secondary",
480
- use_container_width=True
481
- )
482
-
483
- with tab2:
484
- display_video_analysis(json_response)
485
 
486
- with tab3:
487
- display_timestamp_improvements(json_response)
488
-
489
- else:
490
- st.info("Configure your inputs in the sidebar and click 'Generate Script Variations' to begin.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
491
 
492
  if __name__ == "__main__":
493
  try:
494
- logger.info("Launching Streamlit app...")
 
 
495
  main()
496
  except Exception as e:
497
- logger.exception("Unhandled error during app launch.")
 
 
 
1
  import streamlit as st
2
+ import google.generativeai as genai
3
  import tempfile
4
  import os
5
  import time
 
7
  from typing import Optional
8
  import pandas as pd
9
  import logging
10
+ from database import insert_analysis_result
11
+ from dotenv import load_dotenv
12
+
13
+ load_dotenv()
14
 
15
  # Backend API Key Configuration
16
+ GEMINI_API_KEY = os.getenv("GEMINI_KEY")
17
 
18
  # Page configuration
19
  st.set_page_config(
 
23
  initial_sidebar_state="expanded"
24
  )
25
 
26
+ # Enhanced logging configuration
27
  logging.basicConfig(
28
+ level=logging.DEBUG, # Changed to DEBUG for more detailed logs
29
+ format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
30
  handlers=[
31
  logging.StreamHandler()
32
  ]
33
  )
34
  logger = logging.getLogger(__name__)
35
 
 
36
  def configure_gemini():
37
  """Configure Gemini API with backend key"""
38
+ logger.info("Starting Gemini API configuration...")
39
+
40
+ if not GEMINI_API_KEY:
41
+ error_msg = "GEMINI_KEY not found in environment variables"
42
+ logger.error(error_msg)
43
+ st.error(error_msg)
44
+ return False
45
+
46
+ logger.info(f"API Key found, length: {len(GEMINI_API_KEY)}")
47
+ logger.debug(f"API Key starts with: {GEMINI_API_KEY[:10]}..." if len(GEMINI_API_KEY) > 10 else "API Key too short")
48
+
49
+ try:
50
+ genai.configure(api_key=GEMINI_API_KEY)
51
+ logger.info("Gemini API configured successfully")
52
+
53
+ # Test API connection
54
+ logger.info("Testing API connection...")
55
+ models = list(genai.list_models())
56
+ logger.info(f"Available models: {[model.name for model in models]}")
57
+
58
+ return True
59
+ except Exception as e:
60
+ error_msg = f"Failed to configure Gemini API: {str(e)}"
61
+ logger.error(error_msg, exc_info=True)
62
+ st.error(error_msg)
63
+ return False
64
 
65
  # Enhanced system prompt with timestamp-based improvements
66
+ SYSTEM_PROMPT = f"""{os.getenv("SYS_PROMPT", "")}"""
67
+ logger.info(f"System prompt loaded, length: {len(SYSTEM_PROMPT) if SYSTEM_PROMPT else 0}")
68
 
69
  def analyze_video_and_generate_script(
70
+ video_bytes,
71
+ video_name,
72
+ offer_details: str = "",
73
+ target_audience: str = "",
74
+ specific_hooks: str = "",
75
+ additional_context: str = ""
76
  ):
77
  """
78
  Analyze video and generate direct response script variations
79
  """
80
+ logger.info(f"Starting video analysis for: {video_name}")
81
+ logger.info(f"Video size: {len(video_bytes)} bytes")
82
+
83
  try:
84
  # Save uploaded video to temporary file
85
+ logger.info("Creating temporary file...")
86
  with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(video_name)[1]) as tmp_file:
87
  tmp_file.write(video_bytes)
88
  tmp_file_path = tmp_file.name
89
 
90
+ logger.info(f"Temporary file created: {tmp_file_path}")
91
+ logger.info(f"File size on disk: {os.path.getsize(tmp_file_path)} bytes")
92
+
93
  # Configure Gemini
94
+ logger.info("Configuring Gemini API...")
95
+ if not configure_gemini():
96
+ logger.error("Gemini configuration failed")
97
+ return None
98
 
99
  # Show upload progress
100
  upload_progress = st.progress(0)
 
102
 
103
  upload_status.text("Uploading video to Google AI...")
104
  upload_progress.progress(20)
105
+ logger.info("Starting file upload to Gemini...")
106
 
107
+ try:
108
+ # Upload video to Gemini
109
+ video_file_obj = genai.upload_file(tmp_file_path)
110
+ logger.info(f"File uploaded successfully. File URI: {video_file_obj.uri}")
111
+ logger.info(f"File state: {video_file_obj.state.name}")
112
+ upload_progress.progress(40)
113
+
114
+ except Exception as upload_error:
115
+ error_msg = f"File upload failed: {str(upload_error)}"
116
+ logger.error(error_msg, exc_info=True)
117
+ upload_status.error(error_msg)
118
+ return None
119
 
120
  upload_status.text("Processing video...")
121
+ logger.info("Waiting for video processing...")
122
+
123
+ processing_attempts = 0
124
+ max_processing_attempts = 30 # 1 minute timeout
125
+
126
  while video_file_obj.state.name == "PROCESSING":
127
+ processing_attempts += 1
128
+ logger.debug(f"Processing attempt {processing_attempts}/{max_processing_attempts}")
129
+
130
+ if processing_attempts > max_processing_attempts:
131
+ error_msg = "Video processing timed out after 1 minute"
132
+ logger.error(error_msg)
133
+ upload_status.error(error_msg)
134
+ return None
135
+
136
  time.sleep(2)
137
+ try:
138
+ video_file_obj = genai.get_file(video_file_obj.name)
139
+ logger.debug(f"Processing state: {video_file_obj.state.name}")
140
+ except Exception as get_file_error:
141
+ logger.error(f"Error checking file status: {str(get_file_error)}", exc_info=True)
142
+ break
143
+
144
+ upload_progress.progress(40 + (processing_attempts * 20 // max_processing_attempts))
145
+
146
+ logger.info(f"Final file state: {video_file_obj.state.name}")
147
 
148
  if video_file_obj.state.name == "FAILED":
149
+ error_msg = "Google AI file processing failed. Please try another video."
150
+ logger.error(error_msg)
151
+ upload_status.error(error_msg)
152
+ return None
153
+
154
+ if video_file_obj.state.name != "ACTIVE":
155
+ error_msg = f"Unexpected file state: {video_file_obj.state.name}"
156
+ logger.error(error_msg)
157
+ upload_status.error(error_msg)
158
  return None
159
 
160
  upload_progress.progress(80)
161
  upload_status.text("Generating script variations...")
162
+ logger.info("Starting content generation...")
163
 
164
  # Build the enhanced user prompt
165
  user_prompt = f"""Analyze this reference video and generate 3 high-converting direct response video script variations with detailed timestamp-based improvements.
166
+ IMPORTANT CONTEXT TO FOLLOW WHEN CREATING OUTPUT:
167
+ - Offer Details: {offer_details}
168
+ - Target Audience: {target_audience}
169
+ - Specific Hooks: {specific_hooks}
170
+ ADDITIONAL CONTEXT (MANDATORY TO FOLLOW):
171
+ {additional_context}
172
+ You must reflect this additional context in:
173
+ - The script tone, CTA, visuals
174
+ - Compliance or branding constraints
175
+ - Any assumptions about audience or product
176
+ Failure to include this will be considered incomplete.
177
+ Please provide a comprehensive analysis including:
178
+ 1. DETAILED VIDEO ANALYSIS with timestamp-based metrics:
179
+ - Break down the video into 5-10 second segments
180
+ - Rate each segment's effectiveness (1-10 scale)
181
+ - Identify specific elements (hook, transition, proof, CTA, etc.)
182
+ 2. TIMESTAMP-BASED IMPROVEMENTS:
183
+ - Specific recommendations for each time segment
184
+ - Priority level for each improvement
185
+ - Expected impact of implementing changes
186
+ 3. SCRIPT VARIATIONS:
187
+ - Create 2-3 complete script variations
188
+ - Each with timestamp-by-timestamp breakdown
189
+ - Different psychological triggers and approaches
190
+ IMPORTANT: Return only valid JSON in the exact format specified in the system prompt. Analyze the video second-by-second for maximum detail."""
191
 
192
+ logger.info(f"User prompt length: {len(user_prompt)}")
193
+ logger.info(f"System prompt length: {len(SYSTEM_PROMPT) if SYSTEM_PROMPT else 0}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
 
195
  # Generate response
196
+ try:
197
+ logger.info("Creating GenerativeModel instance...")
198
+ model = genai.GenerativeModel("gemini-2.0-flash-exp")
199
+ logger.info("Model created successfully")
200
+
201
+ logger.info("Generating content with video and prompts...")
202
+ full_prompt = user_prompt + "\n\n" + (SYSTEM_PROMPT or "")
203
+ logger.debug(f"Full prompt length: {len(full_prompt)}")
204
+
205
+ response = model.generate_content([video_file_obj, full_prompt])
206
+ logger.info("Content generation completed successfully")
207
+ logger.debug(f"Response text length: {len(response.text) if hasattr(response, 'text') else 'No text attribute'}")
208
+
209
+ except Exception as generation_error:
210
+ error_msg = f"Error generating content with Gemini: {str(generation_error)}"
211
+ logger.error(error_msg, exc_info=True)
212
+ upload_status.error(error_msg)
213
+ return None
214
 
215
  upload_progress.progress(100)
216
  upload_status.success("Analysis complete!")
217
+ logger.info("Video analysis completed successfully")
218
 
219
  # Clean up temporary file
220
+ try:
221
+ os.unlink(tmp_file_path)
222
+ logger.info(f"Temporary file deleted: {tmp_file_path}")
223
+ except Exception as cleanup_error:
224
+ logger.warning(f"Failed to delete temporary file: {str(cleanup_error)}")
225
 
226
  # Parse JSON response
227
+ logger.info("Parsing JSON response...")
228
  try:
229
+ if not hasattr(response, 'text'):
230
+ error_msg = "Response object has no text attribute"
231
+ logger.error(error_msg)
232
+ st.error(error_msg)
233
+ return None
234
+
235
  response_text = response.text.strip()
236
+ logger.debug(f"Raw response text preview: {response_text[:500]}...")
237
+
238
  if response_text.startswith('```json'):
239
  response_text = response_text[7:-3]
240
+ logger.debug("Removed json code block markers")
241
  elif response_text.startswith('```'):
242
  response_text = response_text[3:-3]
243
+ logger.debug("Removed generic code block markers")
244
+
245
+ logger.debug(f"Cleaned response text preview: {response_text[:500]}...")
246
 
247
  json_response = json.loads(response_text)
248
+ logger.info("JSON parsing successful")
249
+ logger.debug(f"JSON keys: {list(json_response.keys()) if isinstance(json_response, dict) else 'Not a dict'}")
250
+
251
  return json_response
252
 
253
+ except json.JSONDecodeError as json_error:
254
+ error_msg = f"Error parsing AI response as JSON: {str(json_error)}"
255
+ logger.error(error_msg)
256
+ logger.error(f"Response text that failed to parse: {response_text[:1000]}...")
257
+ st.error(error_msg)
258
+ st.text_area("Raw Response (for debugging):", response_text, height=200)
259
  return None
260
 
261
  except Exception as e:
262
+ error_msg = f"Unexpected error processing video: {str(e)}"
263
+ logger.error(error_msg, exc_info=True)
264
+ st.error(error_msg)
265
  return None
266
 
267
  def display_script_variations(json_data):
268
  """Display script variations in formatted tables"""
269
+ logger.info("Displaying script variations...")
270
+
271
  if not json_data or "script_variations" not in json_data:
272
+ error_msg = "No script variations found in the response"
273
+ logger.error(error_msg)
274
+ logger.debug(f"JSON data keys: {list(json_data.keys()) if isinstance(json_data, dict) else 'Not a dict'}")
275
+ st.error(error_msg)
276
  return
277
 
278
+ try:
279
+ variations = json_data["script_variations"]
280
+ logger.info(f"Found {len(variations)} script variations")
 
281
 
282
+ for i, variation in enumerate(variations, 1):
283
+ variation_name = variation.get("variation_name", f"Variation {i}")
284
+ logger.debug(f"Processing variation {i}: {variation_name}")
285
+
286
+ st.markdown(f"### Variation {i}: {variation_name}")
287
+
288
+ # Convert script table to DataFrame for better display
289
+ script_data = variation.get("script_table")
290
+ if not script_data:
291
+ warning_msg = f"No script data for {variation_name}"
292
+ logger.warning(warning_msg)
293
+ st.warning(warning_msg)
294
+ continue
295
+
296
+ logger.debug(f"Script data for {variation_name}: {len(script_data)} rows")
297
 
298
+ df = pd.DataFrame(script_data)
299
+
300
  # Rename columns for better display
301
+ df = df.rename(columns={
302
  'timestamp': 'Timestamp',
303
  'script_voiceover': 'Script / Voiceover',
304
  'visual_direction': 'Visual Direction',
305
  'psychological_trigger': 'Psychological Trigger',
306
  'cta_action': 'CTA / Action'
307
+ })
308
+
309
+ st.table(df)
310
+ st.markdown("---")
311
+
312
+ logger.info("Script variations displayed successfully")
313
+
314
+ except Exception as e:
315
+ error_msg = f"Error displaying script variations: {str(e)}"
316
+ logger.error(error_msg, exc_info=True)
317
+ st.error(error_msg)
318
+
319
+ def display_video_analysis(json_data):
320
+ """Display video analysis in tabular format"""
321
+ logger.info("Displaying video analysis...")
322
+
323
+ if not json_data or "video_analysis" not in json_data:
324
+ error_msg = "No video analysis found in the response"
325
+ logger.error(error_msg)
326
+ st.error(error_msg)
327
+ return
328
+
329
+ try:
330
+ analysis = json_data["video_analysis"]
331
+ logger.debug(f"Video analysis type: {type(analysis)}")
332
+
333
+ # Display general analysis
334
+ video_metrics = []
335
+ if isinstance(analysis, dict):
336
+ col1, col2 = st.columns(2)
337
+
338
+ with col1:
339
+ st.subheader("Effectiveness Factors")
340
+ effectiveness = analysis.get('effectiveness_factors', 'N/A')
341
+ st.write(effectiveness)
342
+ logger.debug(f"Effectiveness factors: {effectiveness}")
343
+
344
+ st.subheader("Target Audience")
345
+ audience = analysis.get('target_audience', 'N/A')
346
+ st.write(audience)
347
+ logger.debug(f"Target audience: {audience}")
348
+
349
+ with col2:
350
+ st.subheader("Psychological Triggers")
351
+ triggers = analysis.get('psychological_triggers', 'N/A')
352
+ st.write(triggers)
353
+ logger.debug(f"Psychological triggers: {triggers}")
354
+
355
+ video_metrics = analysis.get("video_metrics", [])
356
+ logger.debug(f"Video metrics count: {len(video_metrics)}")
357
+
358
+ else:
359
+ warning_msg = "Unexpected format in video_analysis. Skipping metadata."
360
+ logger.warning(warning_msg)
361
+ st.warning(warning_msg)
362
+ if isinstance(analysis, list):
363
+ video_metrics = analysis
364
+
365
+ if video_metrics:
366
+ logger.info(f"Processing {len(video_metrics)} video metrics")
367
+ metrics_df = pd.DataFrame(video_metrics)
368
+
369
+ # Rename columns for better display
370
+ column_mapping = {
371
+ 'timestamp': 'Timestamp',
372
+ 'element': 'Element',
373
+ 'current_approach': 'Current Approach',
374
+ 'effectiveness_score': 'Score',
375
+ 'notes': 'Analysis Notes'
376
  }
377
 
378
+ metrics_df = metrics_df.rename(columns=column_mapping)
379
+ logger.debug(f"Metrics dataframe columns: {list(metrics_df.columns)}")
380
 
 
381
  st.dataframe(
382
+ metrics_df,
383
  use_container_width=True,
384
  hide_index=True,
385
  column_config={
386
  "Timestamp": st.column_config.TextColumn(width="small"),
387
+ "Element": st.column_config.TextColumn(width="medium"),
388
+ "Current Approach": st.column_config.TextColumn(width="large"),
389
+ "Score": st.column_config.TextColumn(width="small"),
390
+ "Analysis Notes": st.column_config.TextColumn(width="large")
391
  }
392
  )
393
  else:
394
+ warning_msg = "No detailed video metrics available"
395
+ logger.warning(warning_msg)
396
+ st.warning(warning_msg)
397
+
398
+ logger.info("Video analysis displayed successfully")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
399
 
400
+ except Exception as e:
401
+ error_msg = f"Error displaying video analysis: {str(e)}"
402
+ logger.error(error_msg, exc_info=True)
403
+ st.error(error_msg)
 
 
 
 
 
 
 
 
 
 
404
 
405
  def display_timestamp_improvements(json_data):
406
  """Display timestamp-based improvements in tabular format"""
407
+ logger.info("Displaying timestamp improvements...")
 
 
 
 
408
 
409
+ improvements = json_data.get("timestamp_improvements")
410
+
411
+ if improvements is None:
412
+ error_msg = "No timestamp improvements found in the response"
413
+ logger.error(error_msg)
414
+ st.error(error_msg)
415
+ return
416
+
417
+ if not improvements:
418
+ warning_msg = "No timestamp improvements available"
419
+ logger.warning(warning_msg)
420
+ st.warning(warning_msg)
421
+ return
422
+
423
+ try:
424
+ st.subheader("Timestamp-by-Timestamp Improvement Recommendations")
425
+ logger.info(f"Processing {len(improvements)} improvement recommendations")
426
+
427
  improvements_df = pd.DataFrame(improvements)
428
 
429
  # Rename columns for better display
 
437
  }
438
 
439
  improvements_df = improvements_df.rename(columns=column_mapping)
440
+ logger.debug(f"Improvements dataframe columns: {list(improvements_df.columns)}")
441
 
442
  # Color code priority
443
  def color_priority(val):
 
464
  "Priority": st.column_config.TextColumn(width="small")
465
  }
466
  )
467
+
468
+ logger.info("Timestamp improvements displayed successfully")
469
+
470
+ except Exception as e:
471
+ error_msg = f"Error displaying timestamp improvements: {str(e)}"
472
+ logger.error(error_msg, exc_info=True)
473
+ st.error(error_msg)
474
 
475
  def create_csv_download(json_data):
476
  """Create CSV content with all scripts combined"""
477
+ logger.info("Creating CSV download...")
478
 
479
+ try:
480
+ all_scripts_data = []
 
481
 
482
+ # Combine all script variations into one dataset
483
+ for i, variation in enumerate(json_data.get("script_variations", []), 1):
484
+ variation_name = variation.get("variation_name", f"Variation {i}")
485
+ logger.debug(f"Processing variation for CSV: {variation_name}")
486
+
487
+ for row in variation.get("script_table", []):
488
+ script_row = {
489
+ 'Variation': variation_name,
490
+ 'Timestamp': row.get('timestamp', ''),
491
+ 'Script_Voiceover': row.get('script_voiceover', ''),
492
+ 'Visual_Direction': row.get('visual_direction', ''),
493
+ 'Psychological_Trigger': row.get('psychological_trigger', ''),
494
+ 'CTA_Action': row.get('cta_action', '')
495
+ }
496
+ all_scripts_data.append(script_row)
497
+
498
+ # Convert to DataFrame and then to CSV
499
+ if all_scripts_data:
500
+ df = pd.DataFrame(all_scripts_data)
501
+ csv_content = df.to_csv(index=False)
502
+ logger.info(f"CSV created successfully with {len(all_scripts_data)} rows")
503
+ return csv_content
504
+ else:
505
+ logger.warning("No script data available for CSV")
506
+ return "No script data available"
507
+
508
+ except Exception as e:
509
+ error_msg = f"Error creating CSV: {str(e)}"
510
+ logger.error(error_msg, exc_info=True)
511
+ return f"Error creating CSV: {error_msg}"
512
 
513
  def check_token(user_token):
514
+ logger.info("Checking access token...")
515
+
516
  ACCESS_TOKEN = os.getenv("ACCESS_TOKEN")
517
  if not ACCESS_TOKEN:
518
+ error_msg = "ACCESS_TOKEN not set in environment."
519
+ logger.critical(error_msg)
520
  return False, "Server error: Access token not configured."
521
+
522
  if user_token == ACCESS_TOKEN:
523
  logger.info("Access token validated successfully.")
524
  return True, ""
525
+
526
  logger.warning("Invalid access token attempt.")
527
  return False, "Invalid token."
528
 
529
  def main():
530
  """Main application function"""
531
+ logger.info("Starting main application...")
 
 
 
532
 
533
  if "authenticated" not in st.session_state:
534
  st.session_state["authenticated"] = False
535
+ logger.debug("Authentication state initialized")
536
 
537
  if not st.session_state["authenticated"]:
538
+ logger.info("User not authenticated, showing login screen")
539
  st.markdown("## Access Required")
540
  token_input = st.text_input("Enter Access Token", type="password")
541
  if st.button("Unlock App"):
542
  ok, error_msg = check_token(token_input)
543
  if ok:
544
  st.session_state["authenticated"] = True
545
+ logger.info("User authenticated successfully")
546
  st.rerun()
547
  else:
548
+ logger.warning(f"Authentication failed: {error_msg}")
549
  st.error(error_msg)
550
+ return
551
+
552
+ # Add API test button for debugging
553
+ if st.sidebar.button("🔧 Test API Connection"):
554
+ logger.info("Testing API connection...")
555
+ try:
556
+ genai.configure(api_key=GEMINI_API_KEY)
557
+ models = list(genai.list_models())
558
+ st.sidebar.success(f"✅ API Working! Found {len(models)} models")
559
+ logger.info(f"API test successful, found {len(models)} models")
560
+ for model in models[:3]: # Show first 3 models
561
+ st.sidebar.text(f"• {model.name}")
562
+ except Exception as e:
563
+ error_msg = f"❌ API Test Failed: {str(e)}"
564
+ st.sidebar.error(error_msg)
565
+ logger.error(f"API test failed: {str(e)}", exc_info=True)
566
+
567
+ # Sidebar navigation
568
+ if st.session_state["authenticated"]:
569
+ logger.info("User authenticated, showing main interface")
570
+
571
+ selected_tab = st.sidebar.radio("Select Mode", ["Script Generator", "History"])
572
+ logger.debug(f"Selected tab: {selected_tab}")
573
+
574
+ # ========== SCRIPT GENERATOR ==========
575
+ if selected_tab == "Script Generator":
576
+ logger.info("Script Generator mode selected")
577
 
578
+ with st.expander("How to Use This Tool", expanded=False):
579
+ st.markdown("""
580
+ ### Upload Guidelines:
581
+ - **Best videos to analyze**: Already profitable Facebook/TikTok ads in your niche
582
+ - **Video length**: 30–90 seconds work best for analysis
583
+ - **Quality**: Clear audio and visuals help with better analysis
584
+ ### Context Tips:
585
+ - **Offer details**: Be specific about your main promise and mechanism
586
+ - **Audience**: Include demographics, pain points, and desires
587
+ - **Hooks**: Mention any specific angles that have worked for you
588
+ ### Script Optimization:
589
+ - Generated scripts focus on stopping scroll and driving clicks
590
+ - Each variation tests different psychological triggers
591
+ - Use the timestamp format for precise video production
592
+ - Test multiple variations to find your best performer
593
+ """)
594
+ st.subheader("Input Configuration")
595
+
596
  uploaded_video = st.file_uploader(
597
  "Upload Reference Video",
598
  type=['mp4', 'mov', 'avi', 'mkv'],
599
  help="Upload a profitable ad video to analyze and create variations from"
600
  )
601
 
602
+ if uploaded_video is not None:
603
+ logger.info(f"Video uploaded: {uploaded_video.name}, size: {uploaded_video.size} bytes")
604
+ else:
605
+ st.info("Please upload a reference video to begin analysis.")
606
+
607
  st.subheader("Additional Context (Optional)")
608
 
609
  offer_details = st.text_area(
 
633
  height=100,
634
  help="Compliance requirements, brand guidelines, or other notes"
635
  )
636
+
637
+ generate_button = st.button("Generate Script Variations", use_container_width=True)
638
+
 
 
 
 
 
 
639
  if "analysis_results" in st.session_state and st.session_state["analysis_results"]:
640
+ if st.button("Clear Results", use_container_width=True):
 
 
 
 
641
  del st.session_state["analysis_results"]
642
+ logger.info("Analysis results cleared")
643
  st.rerun()
644
+
645
+ # Generate & show results
646
+ if uploaded_video and generate_button:
647
+ logger.info("Starting video analysis process...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
648
 
649
+ with st.spinner("Analyzing video and generating scripts..."):
650
+ video_bytes = uploaded_video.read()
651
+ uploaded_video.seek(0)
652
+
653
+ json_response = analyze_video_and_generate_script(
654
+ video_bytes,
655
+ uploaded_video.name,
656
+ offer_details,
657
+ target_audience,
658
+ specific_hooks,
659
+ additional_context
660
+ )
661
+
662
+ if json_response:
663
+ logger.info("Analysis completed successfully, saving to database...")
664
+ try:
665
+ insert_analysis_result(
666
+ video_name=uploaded_video.name,
667
+ offer_details=offer_details,
668
+ target_audience=target_audience,
669
+ specific_hook=specific_hooks,
670
+ additional_context=additional_context,
671
+ response=json_response
672
+ )
673
+ logger.info("Results saved to database")
674
+ except Exception as db_error:
675
+ logger.error(f"Failed to save to database: {str(db_error)}", exc_info=True)
676
+ st.warning("Analysis completed but failed to save to database")
677
+
678
+ st.session_state["analysis_results"] = json_response
679
+ else:
680
+ logger.error("Analysis failed, no response received")
681
+
682
+ if "analysis_results" in st.session_state:
683
+ logger.info("Displaying analysis results...")
684
+ json_response = st.session_state["analysis_results"]
685
+
686
+ tab1, tab2, tab3 = st.tabs(["Script Variations", "Video Analysis", "Improvement Recommendations"])
687
 
688
+ with tab1:
689
+ display_script_variations(json_response)
690
+ csv_content = create_csv_download(json_response)
691
+ st.download_button("Download All Scripts (CSV)", data=csv_content,
692
+ file_name="video_script_variations.csv", mime="text/csv")
693
+ with tab2:
694
+ display_video_analysis(json_response)
695
+ with tab3:
696
+ display_timestamp_improvements(json_response)
697
+
698
+ # ========== HISTORY ==========
699
+ elif selected_tab == "History":
700
+ logger.info("History mode selected")
701
 
702
+ try:
703
+ from database import get_all_results
704
+ history_items = get_all_results(limit=20)
705
+ logger.info(f"Retrieved {len(history_items) if history_items else 0} history items")
706
+
707
+ if history_items:
708
+ video_titles = [
709
+ f"{item['video_name']} ({item['created_at'].strftime('%Y-%m-%d %H:%M')})"
710
+ for item in history_items
711
+ ]
712
+
713
+ selected = st.sidebar.radio("History Items", video_titles, index=0)
714
+ selected_index = video_titles.index(selected)
715
+ selected_data = history_items[selected_index]
716
+
717
+ logger.info(f"Selected history item: {selected_data['video_name']}")
718
+
719
+ st.subheader(f"Analysis for: {selected_data['video_name']}")
720
+ json_response = selected_data.get("response")
721
+
722
+ if json_response:
723
+ tab1, tab2, tab3 = st.tabs(["Script Variations", "Video Analysis", "Improvement Recommendations"])
724
+
725
+ with tab1:
726
+ display_script_variations(json_response)
727
+ with tab2:
728
+ display_video_analysis(json_response)
729
+ with tab3:
730
+ display_timestamp_improvements(json_response)
731
+ else:
732
+ warning_msg = "No valid response data for this analysis."
733
+ logger.warning(warning_msg)
734
+ st.warning(warning_msg)
735
+ else:
736
+ logger.info("No history items found")
737
+ st.sidebar.info("No saved analyses found.")
738
+ st.info("No saved history available.")
739
+
740
+ except Exception as history_error:
741
+ error_msg = f"Error loading history: {str(history_error)}"
742
+ logger.error(error_msg, exc_info=True)
743
+ st.error(error_msg)
744
 
745
  if __name__ == "__main__":
746
  try:
747
+ logger.info("=" * 50)
748
+ logger.info("LAUNCHING VIDEO ANALYZER APPLICATION")
749
+ logger.info("=" * 50)
750
  main()
751
  except Exception as e:
752
+ logger.exception("CRITICAL ERROR: Unhandled error during app launch")
753
+ st.error(f"Critical application error: {str(e)}")
754
+ st.error("Please check the logs for more details.")