userIdc2024 commited on
Commit
7395720
·
verified ·
1 Parent(s): d62772f

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +190 -423
src/streamlit_app.py CHANGED
@@ -1,5 +1,5 @@
1
- import streamlit as st
2
- import google.generativeai as genai
3
  import tempfile
4
  import os
5
  import time
@@ -11,7 +11,6 @@ from database import insert_analysis_result
11
  from dotenv import load_dotenv
12
 
13
  load_dotenv()
14
-
15
  # Backend API Key Configuration
16
  GEMINI_API_KEY = os.getenv("GEMINI_KEY")
17
 
@@ -23,48 +22,22 @@ st.set_page_config(
23
  initial_sidebar_state="expanded"
24
  )
25
 
26
- # Enhanced logging configuration
27
  logging.basicConfig(
28
- level=logging.DEBUG, # Changed to DEBUG for more detailed logs
29
- format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
30
  handlers=[
31
  logging.StreamHandler()
32
  ]
33
  )
34
  logger = logging.getLogger(__name__)
35
 
 
36
  def configure_gemini():
37
  """Configure Gemini API with backend key"""
38
- logger.info("Starting Gemini API configuration...")
39
-
40
- if not GEMINI_API_KEY:
41
- error_msg = "GEMINI_KEY not found in environment variables"
42
- logger.error(error_msg)
43
- st.error(error_msg)
44
- return False
45
-
46
- logger.info(f"API Key found, length: {len(GEMINI_API_KEY)}")
47
- logger.debug(f"API Key starts with: {GEMINI_API_KEY[:10]}..." if len(GEMINI_API_KEY) > 10 else "API Key too short")
48
-
49
- try:
50
- genai.configure(api_key=GEMINI_API_KEY)
51
- logger.info("Gemini API configured successfully")
52
-
53
- # Test API connection
54
- logger.info("Testing API connection...")
55
- models = list(genai.list_models())
56
- logger.info(f"Available models: {[model.name for model in models]}")
57
-
58
- return True
59
- except Exception as e:
60
- error_msg = f"Failed to configure Gemini API: {str(e)}"
61
- logger.error(error_msg, exc_info=True)
62
- st.error(error_msg)
63
- return False
64
 
65
  # Enhanced system prompt with timestamp-based improvements
66
- SYSTEM_PROMPT = f"""{os.getenv("SYS_PROMPT", "")}"""
67
- logger.info(f"System prompt loaded, length: {len(SYSTEM_PROMPT) if SYSTEM_PROMPT else 0}")
68
 
69
  def analyze_video_and_generate_script(
70
  video_bytes,
@@ -77,24 +50,14 @@ def analyze_video_and_generate_script(
77
  """
78
  Analyze video and generate direct response script variations
79
  """
80
- logger.info(f"Starting video analysis for: {video_name}")
81
- logger.info(f"Video size: {len(video_bytes)} bytes")
82
-
83
  try:
84
  # Save uploaded video to temporary file
85
- logger.info("Creating temporary file...")
86
  with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(video_name)[1]) as tmp_file:
87
  tmp_file.write(video_bytes)
88
  tmp_file_path = tmp_file.name
89
 
90
- logger.info(f"Temporary file created: {tmp_file_path}")
91
- logger.info(f"File size on disk: {os.path.getsize(tmp_file_path)} bytes")
92
-
93
  # Configure Gemini
94
- logger.info("Configuring Gemini API...")
95
- if not configure_gemini():
96
- logger.error("Gemini configuration failed")
97
- return None
98
 
99
  # Show upload progress
100
  upload_progress = st.progress(0)
@@ -102,328 +65,200 @@ def analyze_video_and_generate_script(
102
 
103
  upload_status.text("Uploading video to Google AI...")
104
  upload_progress.progress(20)
105
- logger.info("Starting file upload to Gemini...")
106
 
107
- try:
108
- # Upload video to Gemini
109
- video_file_obj = genai.upload_file(tmp_file_path)
110
- logger.info(f"File uploaded successfully. File URI: {video_file_obj.uri}")
111
- logger.info(f"File state: {video_file_obj.state.name}")
112
- upload_progress.progress(40)
113
-
114
- except Exception as upload_error:
115
- error_msg = f"File upload failed: {str(upload_error)}"
116
- logger.error(error_msg, exc_info=True)
117
- upload_status.error(error_msg)
118
- return None
119
 
120
  upload_status.text("Processing video...")
121
- logger.info("Waiting for video processing...")
122
-
123
- processing_attempts = 0
124
- max_processing_attempts = 30 # 1 minute timeout
125
-
126
  while video_file_obj.state.name == "PROCESSING":
127
- processing_attempts += 1
128
- logger.debug(f"Processing attempt {processing_attempts}/{max_processing_attempts}")
129
-
130
- if processing_attempts > max_processing_attempts:
131
- error_msg = "Video processing timed out after 1 minute"
132
- logger.error(error_msg)
133
- upload_status.error(error_msg)
134
- return None
135
-
136
  time.sleep(2)
137
- try:
138
- video_file_obj = genai.get_file(video_file_obj.name)
139
- logger.debug(f"Processing state: {video_file_obj.state.name}")
140
- except Exception as get_file_error:
141
- logger.error(f"Error checking file status: {str(get_file_error)}", exc_info=True)
142
- break
143
-
144
- upload_progress.progress(40 + (processing_attempts * 20 // max_processing_attempts))
145
-
146
- logger.info(f"Final file state: {video_file_obj.state.name}")
147
 
148
  if video_file_obj.state.name == "FAILED":
149
- error_msg = "Google AI file processing failed. Please try another video."
150
- logger.error(error_msg)
151
- upload_status.error(error_msg)
152
- return None
153
-
154
- if video_file_obj.state.name != "ACTIVE":
155
- error_msg = f"Unexpected file state: {video_file_obj.state.name}"
156
- logger.error(error_msg)
157
- upload_status.error(error_msg)
158
  return None
159
 
160
  upload_progress.progress(80)
161
  upload_status.text("Generating script variations...")
162
- logger.info("Starting content generation...")
163
 
164
  # Build the enhanced user prompt
165
  user_prompt = f"""Analyze this reference video and generate 3 high-converting direct response video script variations with detailed timestamp-based improvements.
 
166
  IMPORTANT CONTEXT TO FOLLOW WHEN CREATING OUTPUT:
167
  - Offer Details: {offer_details}
168
  - Target Audience: {target_audience}
169
  - Specific Hooks: {specific_hooks}
 
170
  ADDITIONAL CONTEXT (MANDATORY TO FOLLOW):
171
  {additional_context}
 
172
  You must reflect this additional context in:
173
  - The script tone, CTA, visuals
174
  - Compliance or branding constraints
175
  - Any assumptions about audience or product
 
176
  Failure to include this will be considered incomplete.
 
177
  Please provide a comprehensive analysis including:
 
178
  1. DETAILED VIDEO ANALYSIS with timestamp-based metrics:
179
  - Break down the video into 5-10 second segments
180
  - Rate each segment's effectiveness (1-10 scale)
181
  - Identify specific elements (hook, transition, proof, CTA, etc.)
 
182
  2. TIMESTAMP-BASED IMPROVEMENTS:
183
  - Specific recommendations for each time segment
184
  - Priority level for each improvement
185
  - Expected impact of implementing changes
 
186
  3. SCRIPT VARIATIONS:
187
  - Create 2-3 complete script variations
188
  - Each with timestamp-by-timestamp breakdown
189
  - Different psychological triggers and approaches
190
- IMPORTANT: Return only valid JSON in the exact format specified in the system prompt. Analyze the video second-by-second for maximum detail."""
191
 
192
- logger.info(f"User prompt length: {len(user_prompt)}")
193
- logger.info(f"System prompt length: {len(SYSTEM_PROMPT) if SYSTEM_PROMPT else 0}")
194
 
195
  # Generate response
196
- try:
197
- logger.info("Creating GenerativeModel instance...")
198
- model = genai.GenerativeModel("gemini-2.0-flash-exp")
199
- logger.info("Model created successfully")
200
-
201
- logger.info("Generating content with video and prompts...")
202
- full_prompt = user_prompt + "\n\n" + (SYSTEM_PROMPT or "")
203
- logger.debug(f"Full prompt length: {len(full_prompt)}")
204
-
205
- response = model.generate_content([video_file_obj, full_prompt])
206
- logger.info("Content generation completed successfully")
207
- logger.debug(f"Response text length: {len(response.text) if hasattr(response, 'text') else 'No text attribute'}")
208
-
209
- except Exception as generation_error:
210
- error_msg = f"Error generating content with Gemini: {str(generation_error)}"
211
- logger.error(error_msg, exc_info=True)
212
- upload_status.error(error_msg)
213
- return None
214
 
215
  upload_progress.progress(100)
216
  upload_status.success("Analysis complete!")
217
- logger.info("Video analysis completed successfully")
218
 
219
  # Clean up temporary file
220
- try:
221
- os.unlink(tmp_file_path)
222
- logger.info(f"Temporary file deleted: {tmp_file_path}")
223
- except Exception as cleanup_error:
224
- logger.warning(f"Failed to delete temporary file: {str(cleanup_error)}")
225
 
226
  # Parse JSON response
227
- logger.info("Parsing JSON response...")
228
  try:
229
- if not hasattr(response, 'text'):
230
- error_msg = "Response object has no text attribute"
231
- logger.error(error_msg)
232
- st.error(error_msg)
233
- return None
234
-
235
  response_text = response.text.strip()
236
- logger.debug(f"Raw response text preview: {response_text[:500]}...")
237
-
238
  if response_text.startswith('```json'):
239
  response_text = response_text[7:-3]
240
- logger.debug("Removed json code block markers")
241
  elif response_text.startswith('```'):
242
  response_text = response_text[3:-3]
243
- logger.debug("Removed generic code block markers")
244
-
245
- logger.debug(f"Cleaned response text preview: {response_text[:500]}...")
246
 
247
  json_response = json.loads(response_text)
248
- logger.info("JSON parsing successful")
249
- logger.debug(f"JSON keys: {list(json_response.keys()) if isinstance(json_response, dict) else 'Not a dict'}")
250
-
251
  return json_response
252
 
253
- except json.JSONDecodeError as json_error:
254
- error_msg = f"Error parsing AI response as JSON: {str(json_error)}"
255
- logger.error(error_msg)
256
- logger.error(f"Response text that failed to parse: {response_text[:1000]}...")
257
- st.error(error_msg)
258
- st.text_area("Raw Response (for debugging):", response_text, height=200)
259
  return None
260
 
261
  except Exception as e:
262
- error_msg = f"Unexpected error processing video: {str(e)}"
263
- logger.error(error_msg, exc_info=True)
264
- st.error(error_msg)
265
  return None
266
 
267
  def display_script_variations(json_data):
268
  """Display script variations in formatted tables"""
269
- logger.info("Displaying script variations...")
270
-
271
  if not json_data or "script_variations" not in json_data:
272
- error_msg = "No script variations found in the response"
273
- logger.error(error_msg)
274
- logger.debug(f"JSON data keys: {list(json_data.keys()) if isinstance(json_data, dict) else 'Not a dict'}")
275
- st.error(error_msg)
276
  return
277
 
278
- try:
279
- variations = json_data["script_variations"]
280
- logger.info(f"Found {len(variations)} script variations")
281
-
282
- for i, variation in enumerate(variations, 1):
283
- variation_name = variation.get("variation_name", f"Variation {i}")
284
- logger.debug(f"Processing variation {i}: {variation_name}")
285
 
286
- st.markdown(f"### Variation {i}: {variation_name}")
287
 
288
- # Convert script table to DataFrame for better display
289
- script_data = variation.get("script_table")
290
- if not script_data:
291
- warning_msg = f"No script data for {variation_name}"
292
- logger.warning(warning_msg)
293
- st.warning(warning_msg)
294
- continue
 
 
 
 
 
 
 
 
 
 
 
 
295
 
296
- logger.debug(f"Script data for {variation_name}: {len(script_data)} rows")
297
-
298
- df = pd.DataFrame(script_data)
299
-
300
- # Rename columns for better display
301
- df = df.rename(columns={
302
- 'timestamp': 'Timestamp',
303
- 'script_voiceover': 'Script / Voiceover',
304
- 'visual_direction': 'Visual Direction',
305
- 'psychological_trigger': 'Psychological Trigger',
306
- 'cta_action': 'CTA / Action'
307
- })
308
-
309
- st.table(df)
310
- st.markdown("---")
311
-
312
- logger.info("Script variations displayed successfully")
313
-
314
- except Exception as e:
315
- error_msg = f"Error displaying script variations: {str(e)}"
316
- logger.error(error_msg, exc_info=True)
317
- st.error(error_msg)
318
 
319
  def display_video_analysis(json_data):
320
  """Display video analysis in tabular format"""
321
- logger.info("Displaying video analysis...")
322
-
323
  if not json_data or "video_analysis" not in json_data:
324
- error_msg = "No video analysis found in the response"
325
- logger.error(error_msg)
326
- st.error(error_msg)
327
  return
328
 
329
- try:
330
- analysis = json_data["video_analysis"]
331
- logger.debug(f"Video analysis type: {type(analysis)}")
332
-
333
- # Display general analysis
334
- video_metrics = []
335
- if isinstance(analysis, dict):
336
- col1, col2 = st.columns(2)
337
-
338
- with col1:
339
- st.subheader("Effectiveness Factors")
340
- effectiveness = analysis.get('effectiveness_factors', 'N/A')
341
- st.write(effectiveness)
342
- logger.debug(f"Effectiveness factors: {effectiveness}")
343
-
344
- st.subheader("Target Audience")
345
- audience = analysis.get('target_audience', 'N/A')
346
- st.write(audience)
347
- logger.debug(f"Target audience: {audience}")
348
-
349
- with col2:
350
- st.subheader("Psychological Triggers")
351
- triggers = analysis.get('psychological_triggers', 'N/A')
352
- st.write(triggers)
353
- logger.debug(f"Psychological triggers: {triggers}")
354
-
355
- video_metrics = analysis.get("video_metrics", [])
356
- logger.debug(f"Video metrics count: {len(video_metrics)}")
357
-
358
- else:
359
- warning_msg = "Unexpected format in video_analysis. Skipping metadata."
360
- logger.warning(warning_msg)
361
- st.warning(warning_msg)
362
- if isinstance(analysis, list):
363
- video_metrics = analysis
364
-
365
- if video_metrics:
366
- logger.info(f"Processing {len(video_metrics)} video metrics")
367
- metrics_df = pd.DataFrame(video_metrics)
368
-
369
- # Rename columns for better display
370
- column_mapping = {
371
- 'timestamp': 'Timestamp',
372
- 'element': 'Element',
373
- 'current_approach': 'Current Approach',
374
- 'effectiveness_score': 'Score',
375
- 'notes': 'Analysis Notes'
376
- }
377
-
378
- metrics_df = metrics_df.rename(columns=column_mapping)
379
- logger.debug(f"Metrics dataframe columns: {list(metrics_df.columns)}")
380
-
381
- st.dataframe(
382
- metrics_df,
383
- use_container_width=True,
384
- hide_index=True,
385
- column_config={
386
- "Timestamp": st.column_config.TextColumn(width="small"),
387
- "Element": st.column_config.TextColumn(width="medium"),
388
- "Current Approach": st.column_config.TextColumn(width="large"),
389
- "Score": st.column_config.TextColumn(width="small"),
390
- "Analysis Notes": st.column_config.TextColumn(width="large")
391
- }
392
- )
393
- else:
394
- warning_msg = "No detailed video metrics available"
395
- logger.warning(warning_msg)
396
- st.warning(warning_msg)
397
-
398
- logger.info("Video analysis displayed successfully")
399
 
400
- except Exception as e:
401
- error_msg = f"Error displaying video analysis: {str(e)}"
402
- logger.error(error_msg, exc_info=True)
403
- st.error(error_msg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
404
 
405
  def display_timestamp_improvements(json_data):
406
  """Display timestamp-based improvements in tabular format"""
407
- logger.info("Displaying timestamp improvements...")
408
-
409
  improvements = json_data.get("timestamp_improvements")
410
 
411
  if improvements is None:
412
- error_msg = "No timestamp improvements found in the response"
413
- logger.error(error_msg)
414
- st.error(error_msg)
415
  return
416
 
417
  if not improvements:
418
- warning_msg = "No timestamp improvements available"
419
- logger.warning(warning_msg)
420
- st.warning(warning_msg)
421
  return
422
 
423
- try:
424
- st.subheader("Timestamp-by-Timestamp Improvement Recommendations")
425
- logger.info(f"Processing {len(improvements)} improvement recommendations")
426
-
427
  improvements_df = pd.DataFrame(improvements)
428
 
429
  # Rename columns for better display
@@ -437,7 +272,6 @@ def display_timestamp_improvements(json_data):
437
  }
438
 
439
  improvements_df = improvements_df.rename(columns=column_mapping)
440
- logger.debug(f"Improvements dataframe columns: {list(improvements_df.columns)}")
441
 
442
  # Color code priority
443
  def color_priority(val):
@@ -464,127 +298,94 @@ def display_timestamp_improvements(json_data):
464
  "Priority": st.column_config.TextColumn(width="small")
465
  }
466
  )
467
-
468
- logger.info("Timestamp improvements displayed successfully")
469
-
470
- except Exception as e:
471
- error_msg = f"Error displaying timestamp improvements: {str(e)}"
472
- logger.error(error_msg, exc_info=True)
473
- st.error(error_msg)
474
 
475
  def create_csv_download(json_data):
476
  """Create CSV content with all scripts combined"""
477
- logger.info("Creating CSV download...")
478
 
479
- try:
480
- all_scripts_data = []
481
-
482
- # Combine all script variations into one dataset
483
- for i, variation in enumerate(json_data.get("script_variations", []), 1):
484
- variation_name = variation.get("variation_name", f"Variation {i}")
485
- logger.debug(f"Processing variation for CSV: {variation_name}")
486
-
487
- for row in variation.get("script_table", []):
488
- script_row = {
489
- 'Variation': variation_name,
490
- 'Timestamp': row.get('timestamp', ''),
491
- 'Script_Voiceover': row.get('script_voiceover', ''),
492
- 'Visual_Direction': row.get('visual_direction', ''),
493
- 'Psychological_Trigger': row.get('psychological_trigger', ''),
494
- 'CTA_Action': row.get('cta_action', '')
495
- }
496
- all_scripts_data.append(script_row)
497
 
498
- # Convert to DataFrame and then to CSV
499
- if all_scripts_data:
500
- df = pd.DataFrame(all_scripts_data)
501
- csv_content = df.to_csv(index=False)
502
- logger.info(f"CSV created successfully with {len(all_scripts_data)} rows")
503
- return csv_content
504
- else:
505
- logger.warning("No script data available for CSV")
506
- return "No script data available"
507
-
508
- except Exception as e:
509
- error_msg = f"Error creating CSV: {str(e)}"
510
- logger.error(error_msg, exc_info=True)
511
- return f"Error creating CSV: {error_msg}"
 
 
 
512
 
513
  def check_token(user_token):
514
- logger.info("Checking access token...")
515
-
516
  ACCESS_TOKEN = os.getenv("ACCESS_TOKEN")
517
  if not ACCESS_TOKEN:
518
- error_msg = "ACCESS_TOKEN not set in environment."
519
- logger.critical(error_msg)
520
  return False, "Server error: Access token not configured."
521
-
522
  if user_token == ACCESS_TOKEN:
523
  logger.info("Access token validated successfully.")
524
  return True, ""
525
-
526
  logger.warning("Invalid access token attempt.")
527
  return False, "Invalid token."
528
 
529
  def main():
530
  """Main application function"""
531
- logger.info("Starting main application...")
 
 
 
 
 
 
 
 
 
532
 
533
  if "authenticated" not in st.session_state:
534
  st.session_state["authenticated"] = False
535
- logger.debug("Authentication state initialized")
536
 
537
  if not st.session_state["authenticated"]:
538
- logger.info("User not authenticated, showing login screen")
539
  st.markdown("## Access Required")
540
  token_input = st.text_input("Enter Access Token", type="password")
541
  if st.button("Unlock App"):
542
  ok, error_msg = check_token(token_input)
543
  if ok:
544
  st.session_state["authenticated"] = True
545
- logger.info("User authenticated successfully")
546
  st.rerun()
547
  else:
548
- logger.warning(f"Authentication failed: {error_msg}")
549
  st.error(error_msg)
550
  return
551
 
552
- # Add API test button for debugging
553
- if st.sidebar.button("🔧 Test API Connection"):
554
- logger.info("Testing API connection...")
555
- try:
556
- genai.configure(api_key=GEMINI_API_KEY)
557
- models = list(genai.list_models())
558
- st.sidebar.success(f"✅ API Working! Found {len(models)} models")
559
- logger.info(f"API test successful, found {len(models)} models")
560
- for model in models[:3]: # Show first 3 models
561
- st.sidebar.text(f"• {model.name}")
562
- except Exception as e:
563
- error_msg = f"❌ API Test Failed: {str(e)}"
564
- st.sidebar.error(error_msg)
565
- logger.error(f"API test failed: {str(e)}", exc_info=True)
566
 
567
  # Sidebar navigation
568
  if st.session_state["authenticated"]:
569
- logger.info("User authenticated, showing main interface")
570
 
571
  selected_tab = st.sidebar.radio("Select Mode", ["Script Generator", "History"])
572
- logger.debug(f"Selected tab: {selected_tab}")
573
 
574
  # ========== SCRIPT GENERATOR ==========
575
  if selected_tab == "Script Generator":
576
- logger.info("Script Generator mode selected")
577
-
578
  with st.expander("How to Use This Tool", expanded=False):
579
  st.markdown("""
580
  ### Upload Guidelines:
581
  - **Best videos to analyze**: Already profitable Facebook/TikTok ads in your niche
582
  - **Video length**: 30–90 seconds work best for analysis
583
  - **Quality**: Clear audio and visuals help with better analysis
 
584
  ### Context Tips:
585
  - **Offer details**: Be specific about your main promise and mechanism
586
  - **Audience**: Include demographics, pain points, and desires
587
  - **Hooks**: Mention any specific angles that have worked for you
 
588
  ### Script Optimization:
589
  - Generated scripts focus on stopping scroll and driving clicks
590
  - Each variation tests different psychological triggers
@@ -598,10 +399,7 @@ def main():
598
  type=['mp4', 'mov', 'avi', 'mkv'],
599
  help="Upload a profitable ad video to analyze and create variations from"
600
  )
601
-
602
- if uploaded_video is not None:
603
- logger.info(f"Video uploaded: {uploaded_video.name}, size: {uploaded_video.size} bytes")
604
- else:
605
  st.info("Please upload a reference video to begin analysis.")
606
 
607
  st.subheader("Additional Context (Optional)")
@@ -639,13 +437,10 @@ def main():
639
  if "analysis_results" in st.session_state and st.session_state["analysis_results"]:
640
  if st.button("Clear Results", use_container_width=True):
641
  del st.session_state["analysis_results"]
642
- logger.info("Analysis results cleared")
643
  st.rerun()
644
 
645
  # Generate & show results
646
  if uploaded_video and generate_button:
647
- logger.info("Starting video analysis process...")
648
-
649
  with st.spinner("Analyzing video and generating scripts..."):
650
  video_bytes = uploaded_video.read()
651
  uploaded_video.seek(0)
@@ -660,31 +455,20 @@ def main():
660
  )
661
 
662
  if json_response:
663
- logger.info("Analysis completed successfully, saving to database...")
664
- try:
665
- insert_analysis_result(
666
- video_name=uploaded_video.name,
667
- offer_details=offer_details,
668
- target_audience=target_audience,
669
- specific_hook=specific_hooks,
670
- additional_context=additional_context,
671
- response=json_response
672
- )
673
- logger.info("Results saved to database")
674
- except Exception as db_error:
675
- logger.error(f"Failed to save to database: {str(db_error)}", exc_info=True)
676
- st.warning("Analysis completed but failed to save to database")
677
-
678
  st.session_state["analysis_results"] = json_response
679
- else:
680
- logger.error("Analysis failed, no response received")
681
 
682
  if "analysis_results" in st.session_state:
683
- logger.info("Displaying analysis results...")
684
  json_response = st.session_state["analysis_results"]
685
 
686
  tab1, tab2, tab3 = st.tabs(["Script Variations", "Video Analysis", "Improvement Recommendations"])
687
-
688
  with tab1:
689
  display_script_variations(json_response)
690
  csv_content = create_csv_download(json_response)
@@ -697,58 +481,41 @@ def main():
697
 
698
  # ========== HISTORY ==========
699
  elif selected_tab == "History":
700
- logger.info("History mode selected")
701
-
702
- try:
703
- from database import get_all_results
704
- history_items = get_all_results(limit=20)
705
- logger.info(f"Retrieved {len(history_items) if history_items else 0} history items")
706
-
707
- if history_items:
708
- video_titles = [
709
- f"{item['video_name']} ({item['created_at'].strftime('%Y-%m-%d %H:%M')})"
710
- for item in history_items
711
- ]
712
-
713
- selected = st.sidebar.radio("History Items", video_titles, index=0)
714
- selected_index = video_titles.index(selected)
715
- selected_data = history_items[selected_index]
716
-
717
- logger.info(f"Selected history item: {selected_data['video_name']}")
718
-
719
- st.subheader(f"Analysis for: {selected_data['video_name']}")
720
- json_response = selected_data.get("response")
721
-
722
- if json_response:
723
- tab1, tab2, tab3 = st.tabs(["Script Variations", "Video Analysis", "Improvement Recommendations"])
724
-
725
- with tab1:
726
- display_script_variations(json_response)
727
- with tab2:
728
- display_video_analysis(json_response)
729
- with tab3:
730
- display_timestamp_improvements(json_response)
731
- else:
732
- warning_msg = "No valid response data for this analysis."
733
- logger.warning(warning_msg)
734
- st.warning(warning_msg)
735
  else:
736
- logger.info("No history items found")
737
- st.sidebar.info("No saved analyses found.")
738
- st.info("No saved history available.")
739
-
740
- except Exception as history_error:
741
- error_msg = f"Error loading history: {str(history_error)}"
742
- logger.error(error_msg, exc_info=True)
743
- st.error(error_msg)
744
 
745
  if __name__ == "__main__":
746
  try:
747
- logger.info("=" * 50)
748
- logger.info("LAUNCHING VIDEO ANALYZER APPLICATION")
749
- logger.info("=" * 50)
750
  main()
751
  except Exception as e:
752
- logger.exception("CRITICAL ERROR: Unhandled error during app launch")
753
- st.error(f"Critical application error: {str(e)}")
754
- st.error("Please check the logs for more details.")
 
1
+ mport streamlit as st
2
+ from google import genai
3
  import tempfile
4
  import os
5
  import time
 
11
  from dotenv import load_dotenv
12
 
13
  load_dotenv()
 
14
  # Backend API Key Configuration
15
  GEMINI_API_KEY = os.getenv("GEMINI_KEY")
16
 
 
22
  initial_sidebar_state="expanded"
23
  )
24
 
 
25
  logging.basicConfig(
26
+ level=logging.INFO,
27
+ format="%(asctime)s [%(levelname)s] %(message)s",
28
  handlers=[
29
  logging.StreamHandler()
30
  ]
31
  )
32
  logger = logging.getLogger(__name__)
33
 
34
+
35
  def configure_gemini():
36
  """Configure Gemini API with backend key"""
37
+ return genai.Client(api_key=GEMINI_API_KEY)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
  # Enhanced system prompt with timestamp-based improvements
40
+ SYSTEM_PROMPT = f"""{os.getenv("SYS_PROMPT")}"""
 
41
 
42
  def analyze_video_and_generate_script(
43
  video_bytes,
 
50
  """
51
  Analyze video and generate direct response script variations
52
  """
 
 
 
53
  try:
54
  # Save uploaded video to temporary file
 
55
  with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(video_name)[1]) as tmp_file:
56
  tmp_file.write(video_bytes)
57
  tmp_file_path = tmp_file.name
58
 
 
 
 
59
  # Configure Gemini
60
+ client = configure_gemini()
 
 
 
61
 
62
  # Show upload progress
63
  upload_progress = st.progress(0)
 
65
 
66
  upload_status.text("Uploading video to Google AI...")
67
  upload_progress.progress(20)
 
68
 
69
+ # Upload video to Gemini
70
+ video_file_obj = client.files.upload(file=tmp_file_path)
71
+ upload_progress.progress(40)
 
 
 
 
 
 
 
 
 
72
 
73
  upload_status.text("Processing video...")
 
 
 
 
 
74
  while video_file_obj.state.name == "PROCESSING":
 
 
 
 
 
 
 
 
 
75
  time.sleep(2)
76
+ video_file_obj = client.files.get(name=video_file_obj.name)
77
+ upload_progress.progress(60)
 
 
 
 
 
 
 
 
78
 
79
  if video_file_obj.state.name == "FAILED":
80
+ upload_status.error("Google AI file processing failed. Please try another video.")
 
 
 
 
 
 
 
 
81
  return None
82
 
83
  upload_progress.progress(80)
84
  upload_status.text("Generating script variations...")
 
85
 
86
  # Build the enhanced user prompt
87
  user_prompt = f"""Analyze this reference video and generate 3 high-converting direct response video script variations with detailed timestamp-based improvements.
88
+
89
  IMPORTANT CONTEXT TO FOLLOW WHEN CREATING OUTPUT:
90
  - Offer Details: {offer_details}
91
  - Target Audience: {target_audience}
92
  - Specific Hooks: {specific_hooks}
93
+
94
  ADDITIONAL CONTEXT (MANDATORY TO FOLLOW):
95
  {additional_context}
96
+
97
  You must reflect this additional context in:
98
  - The script tone, CTA, visuals
99
  - Compliance or branding constraints
100
  - Any assumptions about audience or product
101
+
102
  Failure to include this will be considered incomplete.
103
+
104
  Please provide a comprehensive analysis including:
105
+
106
  1. DETAILED VIDEO ANALYSIS with timestamp-based metrics:
107
  - Break down the video into 5-10 second segments
108
  - Rate each segment's effectiveness (1-10 scale)
109
  - Identify specific elements (hook, transition, proof, CTA, etc.)
110
+
111
  2. TIMESTAMP-BASED IMPROVEMENTS:
112
  - Specific recommendations for each time segment
113
  - Priority level for each improvement
114
  - Expected impact of implementing changes
115
+
116
  3. SCRIPT VARIATIONS:
117
  - Create 2-3 complete script variations
118
  - Each with timestamp-by-timestamp breakdown
119
  - Different psychological triggers and approaches
 
120
 
121
+ IMPORTANT: Return only valid JSON in the exact format specified in the system prompt. Analyze the video second-by-second for maximum detail."""
 
122
 
123
  # Generate response
124
+ response = client.models.generate_content(
125
+ model="gemini-2.0-flash",
126
+ contents=[video_file_obj, user_prompt + "\n\n" + SYSTEM_PROMPT]
127
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
  upload_progress.progress(100)
130
  upload_status.success("Analysis complete!")
 
131
 
132
  # Clean up temporary file
133
+ os.unlink(tmp_file_path)
 
 
 
 
134
 
135
  # Parse JSON response
 
136
  try:
 
 
 
 
 
 
137
  response_text = response.text.strip()
 
 
138
  if response_text.startswith('```json'):
139
  response_text = response_text[7:-3]
 
140
  elif response_text.startswith('```'):
141
  response_text = response_text[3:-3]
 
 
 
142
 
143
  json_response = json.loads(response_text)
 
 
 
144
  return json_response
145
 
146
+ except json.JSONDecodeError as e:
147
+ st.error(f"Error parsing AI response: {str(e)}")
 
 
 
 
148
  return None
149
 
150
  except Exception as e:
151
+ st.error(f"Error processing video: {str(e)}")
 
 
152
  return None
153
 
154
  def display_script_variations(json_data):
155
  """Display script variations in formatted tables"""
 
 
156
  if not json_data or "script_variations" not in json_data:
157
+ st.error("No script variations found in the response")
 
 
 
158
  return
159
 
160
+ for i, variation in enumerate(json_data["script_variations"], 1):
161
+ variation_name = variation.get("variation_name", f"Variation {i}")
 
 
 
 
 
162
 
163
+ st.markdown(f"### Variation {i}: {variation_name}")
164
 
165
+ #Convert script table to DataFrame for better display
166
+ script_data = variation.get("script_table")
167
+ if not script_data:
168
+ st.warning(f"No script data for {variation_name}")
169
+ continue
170
+
171
+ df = pd.DataFrame(script_data)
172
+
173
+ # Rename columns for better display
174
+ df = df.rename(columns={
175
+ 'timestamp': 'Timestamp',
176
+ 'script_voiceover': 'Script / Voiceover',
177
+ 'visual_direction': 'Visual Direction',
178
+ 'psychological_trigger': 'Psychological Trigger',
179
+ 'cta_action': 'CTA / Action'
180
+ })
181
+
182
+ st.table(df)
183
+ st.markdown("---")
184
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
 
186
  def display_video_analysis(json_data):
187
  """Display video analysis in tabular format"""
 
 
188
  if not json_data or "video_analysis" not in json_data:
189
+ st.error("No video analysis found in the response")
 
 
190
  return
191
 
192
+ analysis = json_data["video_analysis"]
193
+
194
+ #Display general analysis
195
+ video_metrics = []
196
+ if isinstance(analysis, dict):
197
+ col1, col2 = st.columns(2)
198
+
199
+ with col1:
200
+ st.subheader("Effectiveness Factors")
201
+ st.write(analysis.get('effectiveness_factors', 'N/A'))
202
+
203
+ st.subheader("Target Audience")
204
+ st.write(analysis.get('target_audience', 'N/A'))
205
+
206
+ with col2:
207
+ st.subheader("Psychological Triggers")
208
+ st.write(analysis.get('psychological_triggers', 'N/A'))
209
+
210
+ video_metrics = analysis.get("video_metrics", [])
211
+
212
+ else:
213
+ st.warning("Unexpected format in video_analysis. Skipping metadata.")
214
+ if isinstance(analysis, list):
215
+ video_metrics = analysis
216
+
217
+ if video_metrics:
218
+ metrics_df = pd.DataFrame(video_metrics)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
 
220
+ # Rename columns for better display
221
+ column_mapping = {
222
+ 'timestamp': 'Timestamp',
223
+ 'element': 'Element',
224
+ 'current_approach': 'Current Approach',
225
+ 'effectiveness_score': 'Score',
226
+ 'notes': 'Analysis Notes'
227
+ }
228
+
229
+ metrics_df = metrics_df.rename(columns=column_mapping)
230
+
231
+ st.dataframe(
232
+ metrics_df,
233
+ use_container_width=True,
234
+ hide_index=True,
235
+ column_config={
236
+ "Timestamp": st.column_config.TextColumn(width="small"),
237
+ "Element": st.column_config.TextColumn(width="medium"),
238
+ "Current Approach": st.column_config.TextColumn(width="large"),
239
+ "Score": st.column_config.TextColumn(width="small"),
240
+ "Analysis Notes": st.column_config.TextColumn(width="large")
241
+ }
242
+ )
243
+ else:
244
+ st.warning("No detailed video metrics available")
245
 
246
  def display_timestamp_improvements(json_data):
247
  """Display timestamp-based improvements in tabular format"""
 
 
248
  improvements = json_data.get("timestamp_improvements")
249
 
250
  if improvements is None:
251
+ st.error("No timestamp improvements found in the response")
 
 
252
  return
253
 
254
  if not improvements:
255
+ st.warning("No timestamp improvements available")
 
 
256
  return
257
 
258
+ st.subheader("Timestamp-by-Timestamp Improvement Recommendations")
259
+
260
+ improvements = json_data["timestamp_improvements"]
261
+ if improvements:
262
  improvements_df = pd.DataFrame(improvements)
263
 
264
  # Rename columns for better display
 
272
  }
273
 
274
  improvements_df = improvements_df.rename(columns=column_mapping)
 
275
 
276
  # Color code priority
277
  def color_priority(val):
 
298
  "Priority": st.column_config.TextColumn(width="small")
299
  }
300
  )
301
+ else:
302
+ st.warning("No timestamp improvements available")
 
 
 
 
 
303
 
304
  def create_csv_download(json_data):
305
  """Create CSV content with all scripts combined"""
306
+ all_scripts_data = []
307
 
308
+ # Combine all script variations into one dataset
309
+ for i, variation in enumerate(json_data.get("script_variations", []), 1):
310
+ variation_name = variation.get("variation_name", f"Variation {i}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311
 
312
+ for row in variation.get("script_table", []):
313
+ script_row = {
314
+ 'Variation': variation_name,
315
+ 'Timestamp': row.get('timestamp', ''),
316
+ 'Script_Voiceover': row.get('script_voiceover', ''),
317
+ 'Visual_Direction': row.get('visual_direction', ''),
318
+ 'Psychological_Trigger': row.get('psychological_trigger', ''),
319
+ 'CTA_Action': row.get('cta_action', '')
320
+ }
321
+ all_scripts_data.append(script_row)
322
+
323
+ # Convert to DataFrame and then to CSV
324
+ if all_scripts_data:
325
+ df = pd.DataFrame(all_scripts_data)
326
+ return df.to_csv(index=False)
327
+ else:
328
+ return "No script data available"
329
 
330
  def check_token(user_token):
 
 
331
  ACCESS_TOKEN = os.getenv("ACCESS_TOKEN")
332
  if not ACCESS_TOKEN:
333
+ logger.critical("ACCESS_TOKEN not set in environment.")
 
334
  return False, "Server error: Access token not configured."
 
335
  if user_token == ACCESS_TOKEN:
336
  logger.info("Access token validated successfully.")
337
  return True, ""
 
338
  logger.warning("Invalid access token attempt.")
339
  return False, "Invalid token."
340
 
341
  def main():
342
  """Main application function"""
343
+
344
+ st.set_page_config(
345
+ page_title="Video Analyser and Script Generator",
346
+ page_icon="🎥",
347
+ layout="wide",
348
+ initial_sidebar_state="expanded"
349
+ )
350
+
351
+ st.title("Video Analyser and Script Generator")
352
+ st.divider()
353
 
354
  if "authenticated" not in st.session_state:
355
  st.session_state["authenticated"] = False
 
356
 
357
  if not st.session_state["authenticated"]:
 
358
  st.markdown("## Access Required")
359
  token_input = st.text_input("Enter Access Token", type="password")
360
  if st.button("Unlock App"):
361
  ok, error_msg = check_token(token_input)
362
  if ok:
363
  st.session_state["authenticated"] = True
 
364
  st.rerun()
365
  else:
 
366
  st.error(error_msg)
367
  return
368
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
369
 
370
  # Sidebar navigation
371
  if st.session_state["authenticated"]:
 
372
 
373
  selected_tab = st.sidebar.radio("Select Mode", ["Script Generator", "History"])
 
374
 
375
  # ========== SCRIPT GENERATOR ==========
376
  if selected_tab == "Script Generator":
 
 
377
  with st.expander("How to Use This Tool", expanded=False):
378
  st.markdown("""
379
  ### Upload Guidelines:
380
  - **Best videos to analyze**: Already profitable Facebook/TikTok ads in your niche
381
  - **Video length**: 30–90 seconds work best for analysis
382
  - **Quality**: Clear audio and visuals help with better analysis
383
+
384
  ### Context Tips:
385
  - **Offer details**: Be specific about your main promise and mechanism
386
  - **Audience**: Include demographics, pain points, and desires
387
  - **Hooks**: Mention any specific angles that have worked for you
388
+
389
  ### Script Optimization:
390
  - Generated scripts focus on stopping scroll and driving clicks
391
  - Each variation tests different psychological triggers
 
399
  type=['mp4', 'mov', 'avi', 'mkv'],
400
  help="Upload a profitable ad video to analyze and create variations from"
401
  )
402
+ if uploaded_video is None:
 
 
 
403
  st.info("Please upload a reference video to begin analysis.")
404
 
405
  st.subheader("Additional Context (Optional)")
 
437
  if "analysis_results" in st.session_state and st.session_state["analysis_results"]:
438
  if st.button("Clear Results", use_container_width=True):
439
  del st.session_state["analysis_results"]
 
440
  st.rerun()
441
 
442
  # Generate & show results
443
  if uploaded_video and generate_button:
 
 
444
  with st.spinner("Analyzing video and generating scripts..."):
445
  video_bytes = uploaded_video.read()
446
  uploaded_video.seek(0)
 
455
  )
456
 
457
  if json_response:
458
+ insert_analysis_result(
459
+ video_name=uploaded_video.name,
460
+ offer_details=offer_details,
461
+ target_audience=target_audience,
462
+ specific_hook=specific_hooks,
463
+ additional_context=additional_context,
464
+ response=json_response
465
+ )
 
 
 
 
 
 
 
466
  st.session_state["analysis_results"] = json_response
 
 
467
 
468
  if "analysis_results" in st.session_state:
 
469
  json_response = st.session_state["analysis_results"]
470
 
471
  tab1, tab2, tab3 = st.tabs(["Script Variations", "Video Analysis", "Improvement Recommendations"])
 
472
  with tab1:
473
  display_script_variations(json_response)
474
  csv_content = create_csv_download(json_response)
 
481
 
482
  # ========== HISTORY ==========
483
  elif selected_tab == "History":
484
+ from database import get_all_results
485
+ history_items = get_all_results(limit=20)
486
+
487
+ if history_items:
488
+ video_titles = [
489
+ f"{item['video_name']} ({item['created_at'].strftime('%Y-%m-%d %H:%M')})"
490
+ for item in history_items
491
+ ]
492
+
493
+ selected = st.sidebar.radio("History Items", video_titles, index=0)
494
+ selected_index = video_titles.index(selected)
495
+ selected_data = history_items[selected_index]
496
+
497
+ st.subheader(f"Analysis for: {selected_data['video_name']}")
498
+ json_response = selected_data.get("response")
499
+
500
+ if json_response:
501
+ tab1, tab2, tab3 = st.tabs(["Script Variations", "Video Analysis", "Improvement Recommendations"])
502
+
503
+ with tab1:
504
+ display_script_variations(json_response)
505
+ with tab2:
506
+ display_video_analysis(json_response)
507
+ with tab3:
508
+ display_timestamp_improvements(json_response)
 
 
 
 
 
 
 
 
 
 
509
  else:
510
+ st.warning("No valid response data for this analysis.")
511
+ else:
512
+ st.sidebar.info("No saved analyses found.")
513
+ st.info("No saved history available.")
514
+
 
 
 
515
 
516
  if __name__ == "__main__":
517
  try:
518
+ logger.info("Launching Streamlit app...")
 
 
519
  main()
520
  except Exception as e:
521
+ logger.exception("Unhandled error during app launch.")