userIdc2024 commited on
Commit
9bc1376
·
verified ·
1 Parent(s): 003cffb

Upload 41 files

Browse files
app.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from dotenv import load_dotenv
3
+
4
+ from app_pages.script_library import render_script_library
5
+ from app_pages.video_analyser import render_video_analyser_page
6
+ from authen.authentication import login_gate, logout
7
+
8
+
9
+ from app_pages.text_image_generation import render_multimodel_ad_generator
10
+ from app_pages.image_generation import render_bulk_image_generator
11
+
12
+ from app_pages.video_library import render_video_library
13
+ from app_pages.image_generation_library import render_image_variations_library
14
+ from app_pages.text_image_generation_library import render_text_image_library
15
+
16
+ from app_pages.script_generator import generator_page
17
+
18
+
19
+ load_dotenv()
20
+ st.set_page_config(page_title="Gen AI + AI Library", page_icon="✨", layout="wide")
21
+
22
+
23
+ # ---------- Auth ----------
24
+ uid = login_gate()
25
+ with st.sidebar:
26
+ st.markdown(f"**Signed in as:** `{uid}`")
27
+ if st.button("Log out", use_container_width=True, key="btn_logout"):
28
+ logout()
29
+ st.rerun()
30
+
31
+ section = st.radio(" ", ["Gen AI", "AI Library"], index=0, horizontal=True, key="main_section")
32
+
33
+ # ---------------------------- Gen AI ---------------------------------
34
+ def render_genai():
35
+ _, main = st.columns([0.1, 1], gap="small")
36
+ with main:
37
+ mod_tabs = st.tabs(["Image Gen", "Video Analyser", "Script Generator"])
38
+
39
+ # -------- Image Generation --------
40
+ with mod_tabs[0]:
41
+ sub = st.tabs(["Text", "Img"])
42
+ with sub[0]:
43
+ render_multimodel_ad_generator(prefix="ig_text")
44
+ with sub[1]:
45
+ render_bulk_image_generator(prefix="ig_img")
46
+
47
+ # -------- Video Analyser --------
48
+ with mod_tabs[1]:
49
+ render_video_analyser_page()
50
+
51
+ # -------- Script Generator --------
52
+ with mod_tabs[2]:
53
+ generator_page()
54
+
55
+
56
+ # ---------------------------- AI Library ---------------------------------
57
+ def render_ai_library():
58
+ _, main = st.columns([0.1, 1], gap="small")
59
+ with main:
60
+ st.subheader("Library")
61
+ lib_tabs = st.tabs(["Image", "Analysed Video", "Generated Scripts"])
62
+
63
+
64
+
65
+ # ---------- Image Tab ----------
66
+ with lib_tabs[0]:
67
+ sub = st.tabs(["New generated", "Variations"])
68
+
69
+ with sub[0]:
70
+ render_text_image_library()
71
+
72
+
73
+ with sub[1]:
74
+ render_image_variations_library()
75
+
76
+ # ---------- Video Library Tab ----------
77
+ with lib_tabs[1]:
78
+ render_video_library()
79
+
80
+ # ---------- Generated Scripts Tab ----------
81
+ with lib_tabs[2]:
82
+ render_script_library()
83
+
84
+
85
+
86
+ if section == "Gen AI":
87
+ render_genai()
88
+ else:
89
+ render_ai_library()
app_pages/image_generation.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import zipfile, requests, logging, tempfile, shutil, os
3
+ import streamlit as st
4
+ from generator_function.image_processor import process_zip_and_generate_images
5
+
6
+ logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s")
7
+ logger = logging.getLogger(__name__)
8
+
9
+ def _zip_gallery_images(gallery_items):
10
+ if not gallery_items: return None
11
+ image_urls = [item[0] if isinstance(item, tuple) else item for item in gallery_items]
12
+ temp_dir = tempfile.mkdtemp()
13
+ try:
14
+ zip_path = tempfile.NamedTemporaryFile(delete=False, suffix=".zip").name
15
+ for i, url in enumerate(image_urls):
16
+ try:
17
+ ext = url.split('?')[0].split('.')[-1]; ext = ext if ext and len(ext) <= 5 else "png"
18
+ file_path = os.path.join(temp_dir, f"image_{i}.{ext}")
19
+ if url.startswith(("http://", "https://")):
20
+ resp = requests.get(url, timeout=10); resp.raise_for_status()
21
+ with open(file_path, "wb") as f: f.write(resp.content)
22
+ elif os.path.exists(url):
23
+ shutil.copy(url, file_path)
24
+ except Exception as e:
25
+ logger.error(f"Error processing image {url}: {e}")
26
+ with zipfile.ZipFile(zip_path, "w") as zipf:
27
+ for file_name in os.listdir(temp_dir):
28
+ zipf.write(os.path.join(temp_dir, file_name), arcname=file_name)
29
+ return zip_path
30
+ except Exception as e:
31
+ logger.critical(f"Failed to create zip: {e}"); return None
32
+ finally:
33
+ shutil.rmtree(temp_dir, ignore_errors=True)
34
+
35
+ def render_bulk_image_generator(prefix: str = "ig_img"):
36
+ zip_file = st.file_uploader("Upload Zip or Single File", type=["zip", "png", "jpg", "jpeg"], key=f"{prefix}_upload")
37
+ category = st.text_input("Category", key=f"{prefix}_category")
38
+ col1, col2, col3, col4, col5, col6 = st.columns([2,2.5,2.5,2.5,2.5,2])
39
+ with col1: blur = st.checkbox("Blur Image", key=f"{prefix}_blur")
40
+ with col2: size = st.selectbox("Image Size", ["auto","1024x1024","1536x1024","1024x1536"], key=f"{prefix}_size")
41
+ with col3: quality = st.selectbox("Quality", ["auto","low","medium"], key=f"{prefix}_quality")
42
+ with col4: sentiment = st.selectbox("Sentiment", ["as original image","positive","negative"], key=f"{prefix}_sentiment")
43
+ with col5: platform = st.selectbox("Platform", ["Facebook","Native","Newsbreak","Google Display Network"], key=f"{prefix}_platform")
44
+ with col6: num_images = st.slider("No. of Images to be generated:", 1, 10, value=2, step=1, key=f"{prefix}_num_images")
45
+ user_prompt = st.text_area("User Prompt", height=100, key=f"{prefix}_user_prompt")
46
+
47
+ colA, colB, colC = st.columns([1,1,1])
48
+ with colA: demo_btn = st.button("Generate Demo Image", key=f"{prefix}_demo")
49
+ with colB: gen_all_btn = st.button("Generate All Images", key=f"{prefix}_gen_all")
50
+ with colC: download_btn = st.button("Download All", key=f"{prefix}_download_all")
51
+
52
+ gallery_key = f"{prefix}_gallery"
53
+ gallery = st.session_state.setdefault(gallery_key, [])
54
+
55
+ if demo_btn or gen_all_btn:
56
+ if zip_file and category and user_prompt:
57
+ temp_dir = tempfile.mkdtemp()
58
+ try:
59
+ temp_path = os.path.join(temp_dir, zip_file.name if zip_file.name.lower().endswith(".zip") else f"input.{zip_file.name.split('.')[-1]}")
60
+ with open(temp_path, "wb") as f: f.write(zip_file.read())
61
+ images = process_zip_and_generate_images(temp_path, category, size, quality, user_prompt, sentiment, platform, num_images, demo_btn, gallery, blur)
62
+ st.session_state[gallery_key] = images or []; images = st.session_state[gallery_key]
63
+ if images:
64
+ cols = st.columns(4)
65
+ for idx, img_path in enumerate(images):
66
+ with cols[idx % 4]: st.image(img_path, use_container_width=True)
67
+ else:
68
+ st.info("No images generated.")
69
+ except Exception as e:
70
+ st.error(f"Error: {e}"); logger.exception("Generation failed (embedded).")
71
+ finally:
72
+ shutil.rmtree(temp_dir, ignore_errors=True)
73
+ else:
74
+ st.warning("Please upload a file and fill all required fields.")
75
+
76
+ if download_btn:
77
+ zip_path = _zip_gallery_images(st.session_state.get(gallery_key, []))
78
+ if zip_path:
79
+ with open(zip_path, "rb") as f:
80
+ st.download_button("Download ZIP", data=f.read(), file_name="generated_images.zip", mime="application/zip", key=f"{prefix}_zip_dl_btn", use_container_width=True)
81
+ try: os.remove(zip_path)
82
+ except Exception: pass
83
+ else:
84
+ st.warning("No images to zip and download.")
app_pages/image_generation_library.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from datetime import datetime, timedelta, date
4
+ from typing import Any, Dict, List
5
+
6
+ import streamlit as st
7
+
8
+ from database.connections import get_results_collection
9
+
10
+ def _distinct_options(col, field: str, source: str) -> List[str]:
11
+ try:
12
+ vals = col.distinct(field, {"source": source})
13
+ vals = [v for v in vals if v not in (None, "", [])]
14
+ vals = sorted(set(vals))
15
+ return ["All"] + vals if vals else ["All"]
16
+ except Exception:
17
+ return ["All"]
18
+
19
+
20
+ def _query_docs(
21
+ col,
22
+ source: str,
23
+ start_d: date,
24
+ end_d: date,
25
+ category: str,
26
+ platform: str,
27
+ limit: int = 200,
28
+ ) -> List[Dict[str, Any]]:
29
+ try:
30
+ start_dt = datetime.combine(start_d, datetime.min.time())
31
+ end_dt = datetime.combine(end_d + timedelta(days=1), datetime.min.time())
32
+ q: Dict[str, Any] = {"source": source, "created_at": {"$gte": start_dt, "$lt": end_dt}}
33
+ if category and category != "All":
34
+ q["category"] = category
35
+ if platform and platform != "All":
36
+ q["platform"] = platform
37
+ cursor = col.find(q).sort("created_at", -1).limit(limit)
38
+ return list(cursor)
39
+ except Exception:
40
+ return []
41
+
42
+
43
+ def _render_grid(docs: List[Dict[str, Any]]) -> None:
44
+ seen = set()
45
+ items = []
46
+ for d in docs:
47
+ urls = d.get("urls") or []
48
+ for u in urls:
49
+ if u and u not in seen:
50
+ seen.add(u)
51
+ items.append((u, d))
52
+
53
+ if not items:
54
+ st.info("No images for the current filters.")
55
+ return
56
+
57
+ cols = st.columns(4)
58
+ for i, (url, meta) in enumerate(items):
59
+ with cols[i % 4]:
60
+ try:
61
+ st.image(url, use_container_width=True)
62
+ cat = meta.get("category") or "—"
63
+
64
+ plat = (
65
+ meta.get("platform")
66
+ or (meta.get("settings") or {}).get("platform")
67
+ or (meta.get("provider_settings") or {}).get("platform")
68
+ or (meta.get("metadata") or {}).get("platform")
69
+ or "—"
70
+ )
71
+ ts = meta.get("created_at")
72
+ ts_str = ts.strftime("%Y-%m-%d %H:%M UTC") if isinstance(ts, datetime) else "—"
73
+ st.caption(f"{cat} • {plat} • {ts_str}")
74
+ except Exception:
75
+ st.error("Failed to load image")
76
+
77
+
78
+ def render_image_variations_library() -> None:
79
+
80
+ col = get_results_collection()
81
+ if col is None:
82
+ st.warning("Database not available. Set MONGO_URI and restart.")
83
+ return
84
+
85
+ today = datetime.utcnow().date()
86
+ default_start = today - timedelta(days=30)
87
+
88
+ c1, c2 = st.columns(2)
89
+ with c1:
90
+ start_date = st.date_input("Start date", value=default_start, key="img_var_start")
91
+ with c2:
92
+ end_date = st.date_input("End date", value=today, key="img_var_end")
93
+
94
+ cat_opts = _distinct_options(col, "category", source="variation")
95
+ plat_opts = _distinct_options(col, "platform", source="variation")
96
+
97
+ r1, r2 = st.columns(2)
98
+ with r1:
99
+ category = st.selectbox("Category", options=cat_opts, key="img_var_cat")
100
+ with r2:
101
+ platform = st.selectbox("Platform", options=plat_opts, key="img_var_plat")
102
+
103
+ st.markdown("---")
104
+ docs = _query_docs(col, "variation", start_date, end_date, category, platform)
105
+ _render_grid(docs)
app_pages/script_generator.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import tempfile
3
+ import pandas as pd
4
+ import streamlit as st
5
+
6
+ from generator_function.script_generator import generate_scripts
7
+ from helpers_function.helpers import get_video_thumbnail_base64
8
+ from database.operations import insert_script_result
9
+ from authen.authentication import login_gate
10
+
11
+ uid = login_gate()
12
+
13
+ def _fmt_ts(ts) -> str:
14
+ try:
15
+ return ts.strftime("%Y-%m-%d %H:%M")
16
+ except Exception:
17
+ return "Unknown time"
18
+
19
+
20
+ def generator_page():
21
+ st.subheader("Script Generator")
22
+
23
+ uploaded_video = st.file_uploader(
24
+ "Upload Video or ZIP (max 3 videos)", type=["mp4", "mov", "avi", "mkv", "zip"], key="sg_upload"
25
+ )
26
+ script_duration = st.slider("Script Duration (seconds)", 0, 180, 60, 5, key="sg_dur")
27
+ num_scripts = st.slider("Number of Scripts", 1, 5, 3, key="sg_count")
28
+
29
+ st.markdown("Additional Information")
30
+ offer_details = st.text_area(
31
+ "Offer Details",
32
+ placeholder="e.g., Solar installation with $0 down payment...",
33
+ key="sg_offer",
34
+ )
35
+ target_audience = st.text_area(
36
+ "Target Audience",
37
+ placeholder="e.g., 40+ homeowners with high electricity bills...",
38
+ key="sg_aud",
39
+ )
40
+ specific_hooks = st.text_area(
41
+ "Specific Hooks to Test",
42
+ placeholder="e.g., Government rebate angle...",
43
+ key="sg_hooks",
44
+ )
45
+ additional_context = st.text_area(
46
+ "Additional Context",
47
+ placeholder="Compliance requirements, brand guidelines...",
48
+ key="sg_ctx",
49
+ )
50
+
51
+ script_button = st.button("Generate Scripts", use_container_width=True, key="sg_go")
52
+ if script_button and uploaded_video:
53
+ with tempfile.NamedTemporaryFile(
54
+ delete=False, suffix=os.path.splitext(uploaded_video.name)[1]
55
+ ) as tmp:
56
+ src_bytes = uploaded_video.read()
57
+ tmp.write(src_bytes)
58
+ video_path = tmp.name
59
+
60
+ with st.spinner("Generating scripts..."):
61
+ st.session_state.setdefault("scripts", [])
62
+ result = generate_scripts(
63
+ video_path,
64
+ offer_details,
65
+ target_audience,
66
+ specific_hooks,
67
+ additional_context,
68
+ num_scripts=num_scripts,
69
+ duration=script_duration,
70
+ )
71
+ if result and "script_variations" in result:
72
+ st.session_state["scripts"].append(
73
+ {"prompt_used": "Initial Generation", "variations": result["script_variations"]}
74
+ )
75
+ st.session_state["video_name"] = uploaded_video.name
76
+ st.session_state["video_path"] = video_path
77
+ try:
78
+ st.session_state["thumbnail"] = get_video_thumbnail_base64(video_path)
79
+ except Exception:
80
+ st.session_state["thumbnail"] = ""
81
+ st.session_state["meta"] = {
82
+ "offer_details": offer_details,
83
+ "target_audience": target_audience,
84
+ "specific_hook": specific_hooks,
85
+ "additional_context": additional_context,
86
+ }
87
+
88
+ if "scripts" in st.session_state and st.session_state["scripts"]:
89
+ for round_idx, round_data in enumerate(st.session_state["scripts"], 1):
90
+ st.markdown(f"### Generation Round {round_idx}")
91
+ st.text_input(
92
+ "Prompt used:", round_data["prompt_used"], disabled=True, key=f"prompt_{round_idx}"
93
+ )
94
+ for i, variation in enumerate(round_data["variations"], 1):
95
+ st.markdown(f"#### Variation {i}: {variation.get('variation_name', 'Var')}")
96
+ df = pd.DataFrame(variation.get("script_table", []))
97
+ st.table(df)
98
+
99
+ st.divider()
100
+ save_button = st.button("Save to DB", use_container_width=True, key="sg_save")
101
+ if save_button:
102
+ try:
103
+ insert_script_result(
104
+ video_name=st.session_state.get("video_name", "unknown"),
105
+ offer_details=st.session_state["meta"].get("offer_details", ""),
106
+ target_audience=st.session_state["meta"].get("target_audience", ""),
107
+ specific_hook=st.session_state["meta"].get("specific_hook", ""),
108
+ additional_context=st.session_state["meta"].get("additional_context", ""),
109
+ response=st.session_state["scripts"],
110
+ thumbnail=st.session_state.get("thumbnail", ""),
111
+ created_by=uid,
112
+ num_scripts=len(st.session_state["scripts"][-1]["variations"]),
113
+ category="general",
114
+ )
115
+ st.success("Scripts saved to database!")
116
+ except Exception as e:
117
+ st.error(f"Failed to save scripts: {e}")
118
+
119
+ st.subheader("Generate More Scripts")
120
+ more_num = st.slider("How many more scripts?", 1, 5, 1, key="sg_more_count")
121
+ more_prompt = st.text_area("Required Prompt", placeholder="Add specific guidance", key="sg_more_prompt")
122
+ if st.button("Generate More Scripts", use_container_width=True, key="sg_more_go"):
123
+ if not more_prompt.strip():
124
+ st.error("Please provide a prompt before generating more scripts.")
125
+ else:
126
+ video_path = st.session_state.get("video_path")
127
+ if not video_path:
128
+ st.error("No video available. Please upload again.")
129
+ else:
130
+ with st.spinner("Generating more scripts..."):
131
+ extra_result = generate_scripts(
132
+ video_path,
133
+ st.session_state["meta"]["offer_details"],
134
+ st.session_state["meta"]["target_audience"],
135
+ st.session_state["meta"]["specific_hook"],
136
+ st.session_state["meta"]["additional_context"] + "\n\n" + more_prompt,
137
+ num_scripts=more_num,
138
+ duration=script_duration,
139
+ )
140
+ if extra_result and "script_variations" in extra_result:
141
+ st.session_state["scripts"].append(
142
+ {"prompt_used": more_prompt, "variations": extra_result["script_variations"]}
143
+ )
app_pages/script_library.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime, timedelta
2
+ from typing import Any, Dict
3
+
4
+ from database.operations import find_script_results
5
+ import streamlit as st
6
+ import pandas as pd
7
+
8
+ def render_script_library() ->None:
9
+ st.subheader("Generated Scripts")
10
+
11
+ today = datetime.utcnow().date()
12
+ default_start = today - timedelta(days=30)
13
+
14
+ f1, f2, _ = st.columns([1, 1, 1.2])
15
+ with f1:
16
+ start_date_scripts = st.date_input("Start date", value=default_start, key="scripts_start")
17
+ with f2:
18
+ end_date_scripts = st.date_input("End date", value=today, key="scripts_end")
19
+
20
+ if "scripts_page" not in st.session_state:
21
+ st.session_state.scripts_page = 0
22
+
23
+ start_dt = datetime.combine(start_date_scripts, datetime.min.time())
24
+ end_dt = datetime.combine(end_date_scripts + timedelta(days=1), datetime.min.time())
25
+
26
+ records, total_count = find_script_results(
27
+ start_date=start_dt,
28
+ end_date=end_dt,
29
+ page=st.session_state.scripts_page,
30
+ page_size=20,
31
+ )
32
+
33
+ if total_count == 0 or not records:
34
+ st.info("No script generations for the selected filters.")
35
+ st.stop()
36
+
37
+ start_idx = st.session_state.scripts_page * 20 + 1
38
+ end_idx = min(start_idx + len(records) - 1, total_count)
39
+ st.caption(f"Showing {start_idx}-{end_idx} of {total_count} items")
40
+
41
+ def _label(d: Dict[str, Any]) -> str:
42
+ ts = d.get("created_at")
43
+ ts_s = ts.strftime("%Y-%m-%d %H:%M") if hasattr(ts, "strftime") else "Unknown time"
44
+ vn = d.get("video_name") or d.get("file_name") or "Untitled"
45
+ return f"{ts_s} · {vn}"
46
+
47
+ options = [_label(d) for d in records]
48
+ selected_label = st.selectbox("Select generated script", options=options, index=0, key="scripts_sel_dropdown")
49
+ sel_idx = options.index(selected_label) if selected_label in options else 0
50
+ doc = records[sel_idx]
51
+
52
+ if doc.get("thumbnail"):
53
+ try:
54
+ st.image("data:image/jpeg;base64," + doc["thumbnail"], width=160, caption="Thumbnail")
55
+ except Exception:
56
+ pass
57
+
58
+ json_response = doc.get("response")
59
+ if not json_response:
60
+ st.info("No variations saved.")
61
+ else:
62
+ all_tables = []
63
+ if isinstance(json_response, list):
64
+ for round_idx, round_data in enumerate(json_response, 1):
65
+ st.markdown(f"#### Generation Round {round_idx}")
66
+ st.text_input(
67
+ "Prompt used:",
68
+ round_data.get("prompt_used", "N/A"),
69
+ disabled=True,
70
+ key=f"hist_prompt_{doc.get('_id')}_{round_idx}",
71
+ )
72
+ for i, variation in enumerate(round_data.get("variations", []), 1):
73
+ st.markdown(f"**Variation {i}: {variation.get('variation_name', 'Var')}**")
74
+ df = pd.DataFrame(variation.get("script_table", []))
75
+ st.table(df)
76
+ if not df.empty:
77
+ df["Variation"] = variation.get("variation_name", f"Var{i}")
78
+ df["Round"] = round_idx
79
+ all_tables.append(df)
80
+
81
+ if all_tables:
82
+ csv_scripts = pd.concat(all_tables, ignore_index=True).to_csv(index=False)
83
+ st.download_button(
84
+ "Download CSV",
85
+ data=csv_scripts,
86
+ file_name=f"{(doc.get('video_name') or doc.get('file_name') or 'scripts')}_scripts.csv",
87
+ mime="text/csv",
88
+ use_container_width=True,
89
+ key="scripts_download_csv",
90
+ )
app_pages/text_image_generation.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import streamlit as st
3
+
4
+ from generator_function.image_function import (
5
+ handle_image_generation_optimized,
6
+ MODEL_REGISTRY,
7
+ get_model_config,
8
+
9
+ )
10
+
11
+ def render_multimodel_ad_generator(prefix: str = "img_text"):
12
+ colA, colB = st.columns([1, 1])
13
+
14
+ with colA:
15
+ model_key = st.selectbox("Model", list(MODEL_REGISTRY.keys()), index=0, key=f"{prefix}_model")
16
+ aspect_options = get_model_config(model_key)["aspect_ratios"] if get_model_config(model_key) else ["1:1"]
17
+ aspect_ratio = st.selectbox("Aspect Ratio", aspect_options, index=0, key=f"{prefix}_ar")
18
+ num_images = st.slider("Number of images", 1, 10, 1, key=f"{prefix}_count")
19
+
20
+ category = st.text_input("Category", key=f"{prefix}_cat", placeholder="e.g. skincare, supplements")
21
+ platform = st.selectbox(
22
+ "Platform",
23
+ ["Facebook", "Instagram", "TikTok", "YouTube", "Google Display Network"],
24
+ index=0,
25
+ key=f"{prefix}_plat",
26
+ )
27
+
28
+ with colB:
29
+ prompt = st.text_area("Prompt", placeholder="Describe the image you want to generate...", height=160, key=f"{prefix}_prompt")
30
+ debug_mode = st.checkbox("Debug mode", key=f"{prefix}_debug", value=False)
31
+
32
+ if st.button("Generate Images", type="primary", key=f"{prefix}_go"):
33
+ handle_image_generation_optimized(
34
+ model_key=model_key,
35
+ aspect_ratio=aspect_ratio,
36
+ prompt=prompt,
37
+ num_images=num_images,
38
+ debug_mode=debug_mode,
39
+ category=(category.strip() or None),
40
+ platform=platform or None,
41
+ )
app_pages/text_image_generation_library.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from datetime import datetime, timedelta, date
4
+ from typing import Any, Dict, List
5
+
6
+ import streamlit as st
7
+
8
+ from database.connections import get_results_collection
9
+
10
+
11
+ def _distinct_options(col, field: str, source: str) -> List[str]:
12
+ try:
13
+ vals = col.distinct(field, {"source": source})
14
+ vals = [v for v in vals if v not in (None, "", [])]
15
+ vals = sorted(set(vals))
16
+ return ["All"] + vals if vals else ["All"]
17
+ except Exception:
18
+ return ["All"]
19
+
20
+
21
+ def _query_docs(
22
+ col,
23
+ source: str,
24
+ start_d: date,
25
+ end_d: date,
26
+ category: str,
27
+ platform: str,
28
+ limit: int = 200,
29
+ ) -> List[Dict[str, Any]]:
30
+ try:
31
+ start_dt = datetime.combine(start_d, datetime.min.time())
32
+ end_dt = datetime.combine(end_d + timedelta(days=1), datetime.min.time())
33
+ q: Dict[str, Any] = {"source": source, "created_at": {"$gte": start_dt, "$lt": end_dt}}
34
+ if category and category != "All":
35
+ q["category"] = category
36
+ if platform and platform != "All":
37
+ q["platform"] = platform
38
+ cursor = col.find(q).sort("created_at", -1).limit(limit)
39
+ return list(cursor)
40
+ except Exception:
41
+ return []
42
+
43
+
44
+ def _render_grid(docs: List[Dict[str, Any]], key_prefix: str) -> None:
45
+ seen = set()
46
+ items = []
47
+ for d in docs:
48
+ urls = d.get("urls") or []
49
+ for u in urls:
50
+ if u and u not in seen:
51
+ seen.add(u)
52
+ items.append((u, d))
53
+
54
+ if not items:
55
+ st.info("No images for the current filters.")
56
+ return
57
+
58
+ cols = st.columns(4)
59
+ for i, (url, meta) in enumerate(items):
60
+ with cols[i % 4]:
61
+ try:
62
+ st.image(url, use_container_width=True)
63
+ cat = meta.get("category") or "—"
64
+ plat = (
65
+ meta.get("platform")
66
+ or (meta.get("settings") or {}).get("platform")
67
+ or (meta.get("provider_settings") or {}).get("platform")
68
+ or (meta.get("metadata") or {}).get("platform")
69
+ or "—"
70
+ )
71
+ ts = meta.get("created_at")
72
+ ts_str = ts.strftime("%Y-%m-%d %H:%M UTC") if isinstance(ts, datetime) else "—"
73
+ st.caption(f"{cat} • {plat} • {ts_str}")
74
+ except Exception:
75
+ st.error("Failed to load image")
76
+
77
+
78
+ def render_text_image_library() -> None:
79
+
80
+ col = get_results_collection()
81
+ if col is None:
82
+ st.warning("Database not available. Set MONGO_URI and restart.")
83
+ return
84
+
85
+ today = datetime.utcnow().date()
86
+ default_start = today - timedelta(days=30)
87
+
88
+ c1, c2 = st.columns(2)
89
+ with c1:
90
+ start_date = st.date_input("Start date", value=default_start, key="img_text_start")
91
+ with c2:
92
+ end_date = st.date_input("End date", value=today, key="img_text_end")
93
+
94
+ cat_opts = _distinct_options(col, "category", source="text")
95
+ plat_opts = _distinct_options(col, "platform", source="text")
96
+
97
+ r1, r2 = st.columns(2)
98
+ with r1:
99
+ category = st.selectbox("Category", options=cat_opts, key="img_text_cat")
100
+ with r2:
101
+ platform = st.selectbox("Platform", options=plat_opts, key="img_text_plat")
102
+
103
+ st.markdown("---")
104
+ docs = _query_docs(col, "text", start_date, end_date, category, platform)
105
+ _render_grid(docs, key_prefix="text")
app_pages/video_analyser.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import os, tempfile
3
+ import streamlit as st
4
+ import base64
5
+ from typing import Any, Dict
6
+ from background_task.generation_tasks import run_and_store_video_analysis
7
+ from app_pages.video_library import render_analyzer_results
8
+
9
+
10
+ def render_video_library_page() -> None:
11
+ from app_pages.video_library import render_video_library
12
+ render_video_library()
13
+
14
+
15
+ def render_video_analyser_page(task_manager=None, uid: str = "anonymous") -> None:
16
+ st.markdown("### Video Analyser")
17
+
18
+ category_va = st.text_input("Category", key="va_category")
19
+ uploaded = st.file_uploader(
20
+ "Upload video",
21
+ type=["mp4", "mov", "mkv", "webm", "avi"],
22
+ key="va_upload",
23
+ )
24
+
25
+ video_bytes = None
26
+ video_ext = "mp4"
27
+
28
+ if uploaded is not None:
29
+
30
+ video_bytes = uploaded.read()
31
+ if uploaded.name and "." in uploaded.name:
32
+ video_ext = uploaded.name.rsplit(".", 1)[-1].lower() or "mp4"
33
+
34
+
35
+ b64 = base64.b64encode(video_bytes).decode("utf-8")
36
+ st.caption("Preview")
37
+ st.markdown(
38
+ f"""
39
+ <video controls width="720">
40
+ <source src="data:video/{video_ext};base64,{b64}">
41
+ Your browser does not support the video tag.
42
+ </video>
43
+ """,
44
+ unsafe_allow_html=True,
45
+ )
46
+
47
+ go = st.button("Analyze", key="va_analyse")
48
+
49
+ if go:
50
+ if not video_bytes or not category_va.strip():
51
+ st.warning("Please provide a category and upload a video.")
52
+ else:
53
+
54
+ suffix = f".{video_ext}" if video_ext else ".mp4"
55
+ with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tmp:
56
+ tmp.write(video_bytes)
57
+ tmp_path = tmp.name
58
+
59
+ try:
60
+ with st.spinner("Analyzing video..."):
61
+ result: Dict[str, Any] = run_and_store_video_analysis(
62
+ category=category_va.strip(),
63
+ uploaded_file_path=tmp_path,
64
+ created_by=uid,
65
+ )
66
+ finally:
67
+ try:
68
+ os.remove(tmp_path)
69
+ except Exception:
70
+ pass
71
+
72
+ if isinstance(result, dict) and result.get("results"):
73
+ st.success(f"Saved analysis (_id: {result.get('_id', 'N/A')})")
74
+ render_analyzer_results(result["results"])
75
+ else:
76
+ st.error("Analysis failed. Check GEMINI_API_KEY and try again.")
77
+
app_pages/video_library.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from datetime import datetime, timedelta, date
5
+ from typing import Any, Dict, List, Optional
6
+
7
+ import streamlit as st
8
+
9
+ from database.operations import find_video_analyses, list_video_categories
10
+ from components.render_analysis import render_analyzer_results
11
+
12
+
13
+ def _coerce_dt(val: Any) -> Optional[datetime]:
14
+ if isinstance(val, datetime):
15
+ return val
16
+ try:
17
+ return datetime.fromisoformat(str(val))
18
+ except Exception:
19
+ return None
20
+
21
+
22
+ def _label_for_item(doc: Dict[str, Any]) -> str:
23
+ """Shown in the wide dropdown."""
24
+ ts = _coerce_dt(doc.get("created_at"))
25
+ ts_s = ts.strftime("%Y-%m-%d %H:%M") if ts else "Unknown time"
26
+ cat = doc.get("category") or "—"
27
+ model = doc.get("analyzer_model") or "—"
28
+ return f"{ts_s} · {cat} · {model}"
29
+
30
+
31
+ def render_video_library() -> None:
32
+ st.subheader("Video Library")
33
+
34
+ # --- Filters (unchanged layout) ---
35
+ today = datetime.utcnow().date()
36
+ default_start = today - timedelta(days=30)
37
+
38
+ c1, c2, c3 = st.columns([1, 1, 1])
39
+ with c1:
40
+ start_date: date = st.date_input("Start date", value=default_start, key="vid_lib_start")
41
+ with c2:
42
+ end_date: date = st.date_input("End date", value=today, key="vid_lib_end")
43
+ with c3:
44
+ try:
45
+ cats: List[str] = list_video_categories()
46
+ if "All" not in cats:
47
+ cats = ["All"] + [c for c in cats if c != "All"]
48
+ except Exception:
49
+ cats = ["All"]
50
+ category = st.selectbox("Category", options=cats, index=0, key="vid_lib_cat")
51
+
52
+ st.markdown("---")
53
+
54
+
55
+ try:
56
+ start_dt = datetime.combine(start_date, datetime.min.time())
57
+ end_dt = datetime.combine(end_date + timedelta(days=1), datetime.min.time())
58
+ query_cat = None if (not category or category == "All") else category
59
+ docs: List[Dict[str, Any]] = find_video_analyses(
60
+ category=query_cat, start_date=start_dt, end_date=end_dt, limit=200
61
+ )
62
+ except Exception as e:
63
+ st.error(f"Failed to load video analyses: {e}")
64
+ return
65
+
66
+ if not docs:
67
+ st.info("No video analyses for the selected filters.")
68
+ return
69
+
70
+
71
+ labels = [_label_for_item(d) for d in docs]
72
+ selected_label = st.selectbox(
73
+ "Select an analysis",
74
+ options=labels,
75
+ index=0,
76
+ key="vid_lib_sel",
77
+ )
78
+ sel_idx = labels.index(selected_label) if selected_label in labels else 0
79
+ doc = docs[sel_idx]
80
+
81
+ thumb_b64 = doc.get("thumbnail")
82
+ if thumb_b64:
83
+ try:
84
+ st.image(
85
+ f"data:image/jpeg;base64,{thumb_b64}",
86
+ caption="Thumbnail",
87
+ width=220,
88
+ )
89
+ except Exception:
90
+ pass
91
+
92
+
93
+ analysis = doc.get("results") or {}
94
+ if analysis:
95
+ render_analyzer_results(analysis)
96
+ try:
97
+ pretty = json.dumps(analysis, indent=2, ensure_ascii=False)
98
+ st.download_button(
99
+ "Download JSON",
100
+ data=pretty.encode("utf-8"),
101
+ file_name=f"video_analysis_{doc.get('_id','item')}.json",
102
+ mime="application/json",
103
+ use_container_width=True,
104
+ key="vid_lib_json_dl",
105
+ )
106
+ except Exception:
107
+ pass
108
+ else:
109
+ st.info("No analysis stored for this item.")
authen/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+
authen/authentication.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from datetime import datetime
3
+
4
+ import streamlit as st
5
+ import os
6
+ from pymongo import MongoClient, ASCENDING
7
+ from pymongo.errors import OperationFailure
8
+ from dotenv import load_dotenv
9
+
10
+ from .password_utils import hash_password, verify_password, verify_legacy_password
11
+
12
+ load_dotenv()
13
+ EMAIL_RE = re.compile(r"^[^@\s]+@[^@\s]+\.[^@\s]+$")
14
+
15
+ def _users_col():
16
+
17
+ uri = os.getenv("MONGO_URI")
18
+ dbname = os.getenv("MONGO_DB", "Gen_Ai")
19
+ coll = os.getenv("USERS_COLLECTION", "users")
20
+ if not uri:
21
+ raise RuntimeError("MONGO_URI is not set")
22
+ client = MongoClient(uri)
23
+ col = client[dbname][coll]
24
+ _ensure_email_unique_index(col)
25
+ return col
26
+
27
+ def _ensure_email_unique_index(col):
28
+ try:
29
+ unique_ok, to_drop = False, None
30
+ for idx in col.list_indexes():
31
+ keys = list(idx.get("key", {}).items())
32
+ if keys == [("email", 1)]:
33
+ if idx.get("unique", False): unique_ok = True
34
+ else: to_drop = idx["name"]
35
+ break
36
+ if to_drop: col.drop_index(to_drop)
37
+ if not unique_ok: col.create_index([("email", ASCENDING)], unique=True)
38
+ except OperationFailure:
39
+ pass
40
+
41
+ def login_or_create(email: str, password: str):
42
+ if not email or not password:
43
+ return False, "Enter both email and password."
44
+ email_norm = email.strip().lower()
45
+ if not EMAIL_RE.match(email_norm):
46
+ return False, "Enter a valid email address."
47
+
48
+ col = _users_col()
49
+ user = col.find_one({"email": email_norm})
50
+
51
+ if user:
52
+ pwd = user.get("password")
53
+ if isinstance(pwd, str):
54
+ if not verify_password(password, pwd):
55
+ return False, "Invalid credentials."
56
+ col.update_one({"_id": user["_id"]}, {"$set": {"last_login": datetime.utcnow()}})
57
+ st.session_state["uid"] = email_norm
58
+ return True, email_norm
59
+
60
+ if isinstance(pwd, dict):
61
+ if verify_legacy_password(password, pwd.get("salt"), pwd.get("hash")):
62
+ new_hash = hash_password(password)
63
+ col.update_one(
64
+ {"_id": user["_id"]},
65
+ {"$set": {"password": new_hash, "last_login": datetime.utcnow()}}
66
+ )
67
+ st.session_state["uid"] = email_norm
68
+ return True, email_norm
69
+ return False, "Invalid credentials."
70
+ else:
71
+ st.warning("Invalid Credentials")
72
+
73
+
74
+
75
+ def logout():
76
+ st.session_state.pop("uid", None)
77
+ st.session_state.pop("login_ts", None)
78
+
79
+ def login_gate() -> str:
80
+ if st.session_state.get("uid"):
81
+ return st.session_state["uid"]
82
+
83
+ st.markdown("<div style='height: 6vh'></div>", unsafe_allow_html=True)
84
+ c1, c2, c3 = st.columns([1, 1, 1])
85
+ with c2:
86
+ st.markdown("### Log in")
87
+ with st.form("login_form", clear_on_submit=False):
88
+ email = st.text_input("Email", key="login_email", placeholder="you@example.com")
89
+ password = st.text_input("Password", type="password", key="login_pass")
90
+ submit = st.form_submit_button("Continue", use_container_width=True)
91
+ if submit:
92
+ ok, msg = login_or_create(email, password)
93
+ if ok:
94
+ st.session_state["uid"] = msg
95
+ st.session_state["login_ts"] = datetime.utcnow().isoformat()
96
+ st.rerun()
97
+ else:
98
+ st.error(msg)
99
+ st.stop()
100
+
authen/password_utils.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ import bcrypt, binascii, hashlib, secrets
3
+
4
+ def hash_password(password: str) -> str:
5
+ return bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt(rounds=12)).decode("utf-8")
6
+
7
+ def verify_password(password: str, hashed: str) -> bool:
8
+ try:
9
+ return bcrypt.checkpw(password.encode("utf-8"), hashed.encode("utf-8"))
10
+ except Exception:
11
+ return False
12
+
13
+ def verify_legacy_password(password: str, salt_hex: Optional[str], hash_hex: Optional[str]) -> bool:
14
+ if not salt_hex or not hash_hex:
15
+ return False
16
+ salt = binascii.unhexlify(salt_hex.encode())
17
+ dk = hashlib.pbkdf2_hmac("sha256", password.encode("utf-8"), salt, 200_000)
18
+ return secrets.compare_digest(binascii.hexlify(dk).decode(), hash_hex)
background_task/__init__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .task_manager import TaskManager
2
+ from .generation_tasks import (
3
+ register_video_analyzer_tasks,
4
+ enqueue_video_analyzer,
5
+ register_script_generator_tasks,
6
+ enqueue_script_generation,
7
+ register_text_to_image_tasks,
8
+ enqueue_text_to_image,
9
+ register_image_gen_tasks,
10
+ enqueue_image_gen,
11
+ )
12
+
13
+ def register_all_background_tasks(manager: TaskManager) -> None:
14
+ """Register all background tasks with a given TaskManager instance."""
15
+ register_video_analyzer_tasks(manager)
16
+ register_script_generator_tasks(manager)
17
+ register_text_to_image_tasks(manager)
18
+ register_image_gen_tasks(manager)
background_task/generation_tasks.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ from datetime import datetime
5
+ from typing import Any, Dict, List, Optional
6
+
7
+
8
+ from generator_function.video_analyzer_services import run_and_store_video_analysis
9
+ from generator_function.script_generator import generate_scripts
10
+ from database.operations import insert_script_result
11
+ from helpers_function.helpers import get_video_thumbnail_base64
12
+ from database.connections import get_results_collection
13
+ from generator_function.image_function import generate_images_parallel
14
+
15
+ from core.task_enum import TaskType
16
+ from .utils import safe_copy_temp, safe_unlink
17
+
18
+ from database.operations import start_job, finish_job
19
+
20
+ # --- TEXT -> IMAGE
21
+
22
+ def background_text_to_image(
23
+ job_id: str,
24
+ progress_cb,
25
+ *,
26
+ model_key: str,
27
+ aspect_ratio: str,
28
+ prompt: str,
29
+ num_images: int,
30
+ category: Optional[str],
31
+ platform: Optional[str],
32
+ created_by: Optional[str],
33
+ ) -> Dict[str, Any]:
34
+ """
35
+ Runs multi-model text-to-image generation and logs a job to the results collection.
36
+ """
37
+ progress_cb(job_id, 10, "Starting text→image generation...")
38
+
39
+
40
+ results_col = get_results_collection()
41
+ db_job_id: Optional[str] = None
42
+
43
+ if results_col is not None:
44
+ db_job_id = start_job(
45
+ results_col,
46
+ type="generation",
47
+ created_by=created_by or "anonymous",
48
+ category=(category or "general"),
49
+ inputs={"model_key": model_key, "aspect_ratio": aspect_ratio, "num_images": num_images},
50
+ settings={"platform": platform},
51
+ user_prompt=prompt,
52
+ )
53
+
54
+ r2_urls, source_urls, errors = generate_images_parallel(model_key, aspect_ratio, prompt, num_images)
55
+ urls = r2_urls or source_urls
56
+
57
+ if results_col is not None and db_job_id:
58
+ finish_job(
59
+ results_col,
60
+ db_job_id,
61
+ status="completed" if urls else "failed",
62
+ outputs_urls=urls or [],
63
+ provider_update={"errors": errors} if errors else None,
64
+ )
65
+
66
+ progress_cb(job_id, 100, f"Generated {len(urls)} image(s).")
67
+ return {"success": True, "type": TaskType.TEXT_TO_IMAGE, "urls": urls, "errors": errors or []}
68
+
69
+
70
+ def register_text_to_image_tasks(task_manager) -> None:
71
+ task_manager.register(TaskType.TEXT_TO_IMAGE, background_text_to_image)
72
+
73
+
74
+ def enqueue_text_to_image(task_manager, **kwargs) -> str:
75
+ job_id = task_manager.create_job(TaskType.TEXT_TO_IMAGE)
76
+ task_manager.submit(job_id, background_text_to_image, **kwargs)
77
+ return job_id
78
+
79
+
80
+ # --- IMAGE GEN
81
+
82
+ def background_image_gen(
83
+ job_id: str,
84
+ progress_cb,
85
+ *,
86
+ upload_path: str,
87
+ category: str,
88
+ size: str,
89
+ quality: str,
90
+ user_prompt: str,
91
+ sentiment: str,
92
+ platform: str,
93
+ num_images: int,
94
+ blur: bool,
95
+ ) -> Dict[str, Any]:
96
+ """
97
+ Runs image generation from uploaded asset(s) (zip or single); returns paths/urls.
98
+ """
99
+ progress_cb(job_id, 10, "Processing input archive...")
100
+
101
+ from generator_function.image_processor import process_zip_and_generate_images
102
+
103
+
104
+ images = process_zip_and_generate_images(
105
+ upload_path, category, size, quality, user_prompt, sentiment, platform, num_images, False, [], blur
106
+ )
107
+
108
+ progress_cb(job_id, 100, f"Generated {len(images)} images.")
109
+ return {"success": True, "type": TaskType.IMAGE_GEN, "images": images or []}
110
+
111
+
112
+ def register_image_gen_tasks(task_manager) -> None:
113
+ task_manager.register(TaskType.IMAGE_GEN, background_image_gen)
114
+
115
+
116
+ def enqueue_image_gen(task_manager, **kwargs) -> str:
117
+ job_id = task_manager.create_job(TaskType.IMAGE_GEN)
118
+ task_manager.submit(job_id, background_image_gen, **kwargs)
119
+ return job_id
120
+
121
+
122
+ # --- VIDEO ANALYZER
123
+
124
+ def background_video_analyzer(
125
+ job_id: str,
126
+ progress_cb,
127
+ *,
128
+ uploaded_file_path: str,
129
+ uploaded_file_name: str,
130
+ category: str,
131
+ created_by: Optional[str],
132
+ script_num: int = 3,
133
+ script_duration: int = 60,
134
+ offer_details: str = "",
135
+ target_audience: str = "",
136
+ specific_hooks: str = "",
137
+ additional_context: str = "",
138
+ task_manager=None,
139
+ ) -> Dict[str, Any]:
140
+ """
141
+ Analyzes the uploaded video and (optionally) chains a background script-gen job.
142
+ """
143
+ progress_cb(job_id, 5, "Preparing video...")
144
+
145
+ if not os.path.exists(uploaded_file_path):
146
+ raise FileNotFoundError(uploaded_file_path)
147
+
148
+
149
+
150
+ tmp = safe_copy_temp(
151
+ uploaded_file_path,
152
+ suffix=(os.path.splitext(uploaded_file_name or ".mp4")[1] or ".mp4"),
153
+ )
154
+ try:
155
+ progress_cb(job_id, 20, "Analyzing video...")
156
+ result = run_and_store_video_analysis(
157
+ category=category,
158
+ uploaded_file_path=tmp,
159
+ created_by=created_by,
160
+ )
161
+
162
+ if not isinstance(result, dict) or not result.get("results"):
163
+ raise RuntimeError("Video analysis failed.")
164
+
165
+ progress_cb(job_id, 95, "Analysis saved.")
166
+
167
+ chained_job_id = None
168
+ if task_manager:
169
+ chained_job_id = enqueue_script_generation(
170
+ task_manager,
171
+ video_path=tmp,
172
+ video_name=uploaded_file_name,
173
+ offer_details=offer_details,
174
+ target_audience=target_audience,
175
+ specific_hooks=specific_hooks,
176
+ additional_context=additional_context,
177
+ num_scripts=script_num,
178
+ duration=script_duration,
179
+ created_by=created_by,
180
+ category=category,
181
+ )
182
+
183
+ progress_cb(job_id, 100, "Video analysis complete.")
184
+ return {
185
+ "success": True,
186
+ "type": TaskType.VIDEO_ANALYZER,
187
+ "video_analysis_id": result.get("_id"),
188
+ "chained_script_job_id": chained_job_id,
189
+ }
190
+ finally:
191
+ #
192
+ safe_unlink(tmp)
193
+
194
+
195
+ def register_video_analyzer_tasks(task_manager) -> None:
196
+ task_manager.register(TaskType.VIDEO_ANALYZER, background_video_analyzer)
197
+
198
+
199
+ def enqueue_video_analyzer(task_manager, **kwargs) -> str:
200
+ job_id = task_manager.create_job(TaskType.VIDEO_ANALYZER)
201
+ task_manager.submit(job_id, background_video_analyzer, task_manager=task_manager, **kwargs)
202
+ return job_id
203
+
204
+
205
+ # --- SCRIPT GENERATOR --------------------------------------------------------
206
+
207
+ def background_script_generation(
208
+ job_id: str,
209
+ progress_cb,
210
+ *,
211
+ video_path: str,
212
+ video_name: str,
213
+ offer_details: str,
214
+ target_audience: str,
215
+ specific_hooks: str,
216
+ additional_context: str,
217
+ num_scripts: int,
218
+ duration: int,
219
+ created_by: Optional[str],
220
+ category: Optional[str] = None,
221
+ ) -> Dict[str, Any]:
222
+ """
223
+ Generates script variations from the provided video and persists the run.
224
+ """
225
+ progress_cb(job_id, 5, "Preparing inputs...")
226
+
227
+ if not os.path.exists(video_path):
228
+ raise FileNotFoundError(video_path)
229
+
230
+
231
+
232
+ tmp = safe_copy_temp(video_path, suffix=(os.path.splitext(video_name or ".mp4")[1] or ".mp4"))
233
+ try:
234
+ progress_cb(job_id, 20, "Generating scripts...")
235
+ gen = generate_scripts(
236
+ tmp,
237
+ offer_details,
238
+ target_audience,
239
+ specific_hooks,
240
+ additional_context,
241
+ num_scripts=max(1, int(num_scripts)),
242
+ duration=max(0, int(duration)),
243
+ )
244
+ if not gen or "script_variations" not in gen or not gen["script_variations"]:
245
+ raise RuntimeError("Script generation returned no variations.")
246
+
247
+ packed_round = [{"prompt_used": "Auto after analysis", "variations": gen["script_variations"]}]
248
+
249
+ progress_cb(job_id, 75, "Creating thumbnail...")
250
+ thumb = ""
251
+ try:
252
+ thumb = get_video_thumbnail_base64(tmp) or ""
253
+ except Exception:
254
+ pass
255
+
256
+ progress_cb(job_id, 90, "Saving run...")
257
+ insert_script_result(
258
+ video_name=video_name or os.path.basename(tmp),
259
+ offer_details=offer_details or "",
260
+ target_audience=target_audience or "",
261
+ specific_hook=specific_hooks or "",
262
+ additional_context=additional_context or "",
263
+ response=packed_round,
264
+ thumbnail=thumb,
265
+ created_by=created_by,
266
+ num_scripts=len(gen["script_variations"]),
267
+ category=category or "general",
268
+ )
269
+
270
+ progress_cb(job_id, 100, "Scripts saved.")
271
+ return {
272
+ "success": True,
273
+ "type": TaskType.SCRIPT_GENERATION,
274
+ "num_variations": len(gen["script_variations"]),
275
+ "created_at": datetime.utcnow().isoformat(),
276
+ }
277
+ finally:
278
+ safe_unlink(tmp)
279
+
280
+
281
+ def register_script_generator_tasks(task_manager) -> None:
282
+ task_manager.register(TaskType.SCRIPT_GENERATION, background_script_generation)
283
+
284
+
285
+ def enqueue_script_generation(task_manager, **kwargs) -> str:
286
+ job_id = task_manager.create_job(TaskType.SCRIPT_GENERATION)
287
+ task_manager.submit(job_id, background_script_generation, **kwargs)
288
+ return job_id
background_task/task_manager.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import threading, uuid, traceback
3
+ from concurrent.futures import ThreadPoolExecutor
4
+ from typing import Callable, Dict, Any, Optional
5
+
6
+ ProgressCB = Callable[[str, int, str], None]
7
+
8
+ class TaskManager:
9
+ def __init__(self, max_workers: int = 8):
10
+ self._exec = ThreadPoolExecutor(max_workers=max_workers)
11
+ self._lock = threading.Lock()
12
+ self._jobs: Dict[str, dict] = {}
13
+ self._registry: Dict[str, Callable[..., Any]] = {}
14
+
15
+ # ---- job bookkeeping ----
16
+ def create_job(self, name: str) -> str:
17
+ job_id = uuid.uuid4().hex
18
+ with self._lock:
19
+ self._jobs[job_id] = {"name": name, "status": "queued", "progress": 0, "message": "", "result": None, "error": None}
20
+ return job_id
21
+
22
+ def register(self, name: str, worker: Callable[..., Any]) -> None:
23
+ self._registry[name] = worker
24
+
25
+ def progress_update(self, job_id: str, percent: int, message: str = "") -> None:
26
+ with self._lock:
27
+ job = self._jobs.get(job_id)
28
+ if job:
29
+ job["progress"] = max(0, min(100, int(percent)))
30
+ if message:
31
+ job["message"] = message
32
+
33
+ def _set_status(self, job_id: str, status: str) -> None:
34
+ with self._lock:
35
+ job = self._jobs.get(job_id)
36
+ if job:
37
+ job["status"] = status
38
+
39
+ def submit(self, job_id: str, worker: Callable[..., Any], **kwargs) -> None:
40
+ self._set_status(job_id, "running")
41
+
42
+ def _runner():
43
+ try:
44
+ result = worker(job_id, self.progress_update, **kwargs)
45
+ with self._lock:
46
+ self._jobs[job_id]["result"] = result
47
+ self._jobs[job_id]["status"] = "completed"
48
+ self._jobs[job_id]["progress"] = 100
49
+ except Exception:
50
+ err = traceback.format_exc()
51
+ with self._lock:
52
+ self._jobs[job_id]["status"] = "failed"
53
+ self._jobs[job_id]["error"] = err
54
+
55
+ self._exec.submit(_runner)
56
+
57
+ # ---- accessors for UI ----
58
+ def get(self, job_id: str) -> Optional[dict]:
59
+ with self._lock:
60
+ return dict(self._jobs.get(job_id) or {})
61
+
62
+ def list_jobs(self) -> Dict[str, dict]:
63
+ with self._lock:
64
+ return {k: dict(v) for k, v in self._jobs.items()}
background_task/utils.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, shutil, tempfile
2
+
3
+ def safe_copy_temp(src_path: str, suffix: str) -> str:
4
+ """Copy a file to a managed temp path so the caller can delete their tmp safely."""
5
+ out = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
6
+ out.close()
7
+ shutil.copyfile(src_path, out.name)
8
+ return out.name
9
+
10
+ def safe_unlink(path: str) -> None:
11
+ try: os.unlink(path)
12
+ except Exception: pass
components/display_variations.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import streamlit as st
3
+
4
+ def display_script_variations(json_data: dict):
5
+ if not json_data or "script_variations" not in json_data:
6
+ st.error("No script variations found")
7
+ return
8
+ for i, variation in enumerate(json_data["script_variations"], 1):
9
+ st.markdown(f"### Variation {i}: {variation.get('variation_name','Var')}")
10
+ df = pd.DataFrame(variation.get("script_table", []))
11
+ st.table(df)
12
+ csv_content = pd.concat(
13
+ [pd.DataFrame(v.get("script_table", []))
14
+ .assign(Variation=v.get("variation_name", f"Var{i+1}"))
15
+ for i, v in enumerate(json_data["script_variations"])],
16
+ ignore_index=True
17
+ ).to_csv(index=False)
18
+ st.download_button("Download CSV", data=csv_content,
19
+ file_name="scripts.csv", mime="text/csv")
components/render_analysis.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json, pandas as pd, streamlit as st
2
+ from typing import Dict, Any
3
+
4
+ def _normalize_list(v):
5
+ if v is None: return []
6
+ if isinstance(v, list): return [str(x) for x in v]
7
+ return [s for s in str(v).splitlines() if s.strip()]
8
+
9
+ def _to_dataframe(items, columns_map):
10
+ import pandas as pd
11
+ if not isinstance(items, list) or not items:
12
+ return pd.DataFrame(columns=list(columns_map.values()))
13
+ df = pd.DataFrame(items).rename(columns=columns_map)
14
+ ordered_cols = [columns_map[k] for k in columns_map.keys() if columns_map[k] in df.columns]
15
+ return df.reindex(columns=ordered_cols)
16
+
17
+ def _mean_effectiveness(metrics):
18
+ if not metrics: return 0.0
19
+ scores = []
20
+ for m in metrics:
21
+ s = str(m.get("effectiveness_score", "0/10")).split("/")[0]
22
+ try: scores.append(int(s))
23
+ except Exception: pass
24
+ return round(sum(scores) / len(scores), 2) if scores else 0.0
25
+
26
+ def _search_dataframe(df, query):
27
+ if not query or df.empty: return df
28
+ mask = df.apply(lambda col: col.astype(str).str.contains(query, case=False, na=False))
29
+ return df[mask.any(axis=1)]
30
+
31
+ def render_analyzer_results(analysis: Dict[str, Any]) -> None:
32
+ if not isinstance(analysis, dict) or not analysis:
33
+ st.warning("No analysis available."); return
34
+
35
+ st.markdown(
36
+ """<style>
37
+ .metric-card{background:#0f172a;padding:14px 16px;border-radius:14px;border:1px solid #1f2937}
38
+ .label{font-size:12px;color:#94a3b8;margin-bottom:6px}
39
+ .value{font-size:16px;color:#e2e8f0}
40
+ </style>""",
41
+ unsafe_allow_html=True,
42
+ )
43
+ va = analysis.get("video_analysis", {}) or {}
44
+ storyboard = analysis.get("storyboard", []) or []
45
+ script = analysis.get("script", []) or []
46
+ metrics = va.get("video_metrics", []) or []
47
+ mean_score = _mean_effectiveness(metrics)
48
+
49
+ m1,m2,m3,m4 = st.columns([1,1,1,1])
50
+ m1.markdown(f'<div class="metric-card"><div class="label">Scenes</div><div class="value">{len(storyboard)}</div></div>', unsafe_allow_html=True)
51
+ m2.markdown(f'<div class="metric-card"><div class="label">Dialogue Lines</div><div class="value">{len(script)}</div></div>', unsafe_allow_html=True)
52
+ m3.markdown(f'<div class="metric-card"><div class="label">Avg Effectiveness</div><div class="value">{mean_score}/10</div></div>', unsafe_allow_html=True)
53
+ m4.markdown(f'<div class="metric-card"><div class="label">Improvements</div><div class="value">{len(analysis.get("timestamp_improvements", []) or [])}</div></div>', unsafe_allow_html=True)
54
+
55
+ colA, colB = st.columns([1.3,1])
56
+ with colA:
57
+ st.markdown("### Executive Summary")
58
+ c1,c2 = st.columns(2)
59
+ with c1:
60
+ with st.expander("Brief", expanded=True): st.write(analysis.get("brief", "N/A"))
61
+ with st.expander("Caption Details", expanded=False): st.write(analysis.get("caption_details", "N/A"))
62
+ with c2:
63
+ hook = analysis.get("hook", {}) or {}
64
+ with st.expander("Hook", expanded=True):
65
+ st.markdown(f"**Opening:** {hook.get('hook_text','N/A')}")
66
+ st.markdown(f"**Principle:** {hook.get('principle','N/A')}")
67
+ adv = _normalize_list(hook.get("advantages"))
68
+ if adv:
69
+ st.markdown("**Advantages:**")
70
+ st.markdown("\n".join([f"- {a}" for a in adv]))
71
+ st.divider()
72
+ st.markdown("### Narrative & Copy Frameworks")
73
+ with st.expander("Framework Analysis", expanded=True): st.write(analysis.get("framework_analysis", "N/A"))
74
+ with colB:
75
+ st.markdown("### Snapshot")
76
+ st.caption("Top Drivers"); st.markdown(f'{va.get("effectiveness_factors","N/A")}</div>', unsafe_allow_html=True)
77
+ st.markdown(""); st.caption("Psychological Triggers"); st.markdown(f'{va.get("psychological_triggers","N/A")}</div>', unsafe_allow_html=True)
78
+ st.markdown(""); st.caption("Target Audience"); st.markdown(f'{va.get("target_audience","N/A")}</div>', unsafe_allow_html=True)
79
+
80
+ st.divider()
81
+ tabs = st.tabs(["Storyboard","Script","Scored Metrics","Improvements","Raw JSON"])
82
+
83
+ with tabs[0]:
84
+ q = st.text_input("Search storyboard")
85
+ if storyboard:
86
+ df = _to_dataframe(
87
+ storyboard,
88
+ {"timeline":"Timeline","scene":"Scene","visuals":"Visuals","dialogue":"Dialogue","camera":"Camera","sound_effects":"Sound Effects"}
89
+ )
90
+ st.dataframe(_search_dataframe(df, q), use_container_width=True, height=480)
91
+ else:
92
+ st.info("No storyboard available.")
93
+
94
+ with tabs[1]:
95
+ q2 = st.text_input("Search script")
96
+ if script:
97
+ df = _to_dataframe(script, {"timeline":"Timeline","dialogue":"Dialogue"})
98
+ st.dataframe(_search_dataframe(df, q2), use_container_width=True, height=480)
99
+ else:
100
+ st.info("No script breakdown available.")
101
+
102
+ with tabs[2]:
103
+ q3 = st.text_input("Search metrics")
104
+ if metrics:
105
+ dfm = _to_dataframe(
106
+ metrics,
107
+ {"timestamp":"Timestamp","element":"Element","current_approach":"Current Approach","effectiveness_score":"Effectiveness Score","notes":"Notes"}
108
+ )
109
+ st.dataframe(_search_dataframe(dfm, q3), use_container_width=True, height=480)
110
+ else:
111
+ st.info("No video metrics available.")
112
+
113
+ with tabs[3]:
114
+ improvements = analysis.get("timestamp_improvements", []) or []
115
+ q4 = st.text_input("Search improvements")
116
+ if improvements:
117
+ imp_df = _to_dataframe(
118
+ improvements,
119
+ {"timestamp":"Timestamp","current_element":"Current Element","improvement_type":"Improvement Type",
120
+ "recommended_change":"Recommended Change","expected_impact":"Expected Impact","priority":"Priority"}
121
+ )
122
+ if "Priority" in imp_df.columns:
123
+ order = pd.CategoricalDtype(["High","Medium","Low"], ordered=True)
124
+ imp_df["Priority"] = imp_df["Priority"].astype(order)
125
+ if "Timestamp" in imp_df.columns:
126
+ imp_df = imp_df.sort_values(["Priority","Timestamp"])
127
+ st.dataframe(_search_dataframe(imp_df, q4), use_container_width=True, height=480)
128
+ else:
129
+ st.info("No timestamp-based improvements available.")
130
+
131
+
132
+ with tabs[4]:
133
+ pretty = json.dumps(analysis, indent=2, ensure_ascii=False)
134
+ st.code(pretty, language="json")
135
+ st.download_button(
136
+ "Download JSON",
137
+ data=pretty.encode("utf-8"),
138
+ file_name="ad_analysis.json",
139
+ mime="application/json",
140
+ use_container_width=True
141
+ )
core/logger.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+
3
+ logging.basicConfig(
4
+ level=logging.INFO,
5
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
6
+ handlers=[logging.StreamHandler()]
7
+ )
8
+ logger = logging.getLogger(__name__)
core/settings.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ import os
3
+ import os
4
+ from typing import Dict, Any, Optional
5
+
6
+ from dotenv import dotenv_values
7
+ from pydantic import BaseModel, ConfigDict
8
+ from pydantic_settings import BaseSettings, JsonConfigSettingsSource
9
+
10
+ from core.logger import logger
11
+
12
+
13
+ def load_json_config(json_file: str) -> Dict[str, Any]:
14
+ try:
15
+ file_path = os.path.join('/mnt/secrets-store', json_file)
16
+ if os.path.exists(file_path):
17
+ logger.info(f"The file is located at: {file_path}")
18
+ else:
19
+ logger.error("JSON file not found.")
20
+ except FileNotFoundError:
21
+ logger.error("Secrets file not found.")
22
+ return {}
23
+ return JsonConfigSettingsSource(settings_cls=GlobalConfig, json_file=file_path).json_data
24
+
25
+
26
+ def load_env_config():
27
+ if GlobalConfig().APP_ENV == "dev":
28
+ return dotenv_values()
29
+ else:
30
+ return dict(os.environ)
31
+
32
+
33
+ def get_config() -> Dict[str, Any]:
34
+ json_config = load_json_config('config.json')
35
+ env_config = load_env_config()
36
+
37
+ config_model = GlobalConfig.model_construct()
38
+
39
+
40
+ final_config = {**config_model.model_dump(), **env_config, **json_config}
41
+
42
+ return final_config
43
+
44
+ class AppConfig(BaseModel):
45
+ """Application configurations."""
46
+
47
+ app_name: str = "Creative AdGenesis"
48
+ app_version: str = "0.1.0"
49
+ app_description: str = (
50
+ "Creative AdGenesis"
51
+ )
52
+ debug_mode: bool = False
53
+
54
+
55
+ class GlobalConfig(BaseSettings):
56
+ """Typed global core object."""
57
+ APP_CONFIG: AppConfig = AppConfig()
58
+ model_config = ConfigDict(extra="ignore")
59
+
60
+ # Environment
61
+ APP_ENV: Optional[str] = "dev"
62
+ DEBUG: Optional[bool] = False
63
+
64
+ MONGO_HOST: Optional[str] = None
65
+ MONGO_USERNAME: Optional[str] = None
66
+ MONGO_PASSWORD: Optional[str] = None
67
+ MONGO_DB: Optional[str] = None
68
+
69
+
70
+ # Cloudflare R2
71
+ R2_BUCKET_NAME: Optional[str] = None
72
+ R2_ACCESS_KEY: Optional[str] = None
73
+ R2_SECRET_KEY: Optional[str] = None
74
+ R2_ENDPOINT: Optional[str] = None
75
+
76
+
77
+ # OpenAI
78
+ OPENAI_API_KEY: Optional[str] = None
79
+
80
+ # Replicate
81
+ REPLICATE_API_KEY: Optional[str] = None
82
+ GEMINI_API_KEY: Optional[str] = None
83
+
84
+
85
+ # Custom
86
+ NEW_BASE: Optional[str] = None
87
+
88
+ # Load configurations
89
+ config_data = get_config()
90
+ cnf = GlobalConfig(**config_data)
91
+ app_cnf = cnf.APP_CONFIG
core/task_enum.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ class TaskType:
2
+ VIDEO_ANALYZER = "video_analyzer"
3
+ SCRIPT_GENERATION = "script_generation"
4
+ TEXT_TO_IMAGE = "text_to_image"
5
+ IMAGE_GEN = "image_gen"
database/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from database.connections import get_mongo_client
2
+ from database.operations import (
3
+ start_job, finish_job,
4
+ insert_video_analysis, list_video_categories, find_video_analyses
5
+ )
database/connections.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from pymongo import MongoClient
3
+
4
+
5
+ from core.logger import logger
6
+ from core.settings import cnf
7
+
8
+
9
+
10
+
11
+ def get_mongo_client():
12
+ """Initialize MongoDB connection with enhanced error handling"""
13
+ try:
14
+ username = cnf.MONGO_USERNAME
15
+ password = cnf.MONGO_PASSWORD
16
+ db_name = cnf.MONGO_DB
17
+ host = cnf.MONGO_HOST
18
+
19
+ connection_string = f"mongodb+srv://{username}:{password}@{host}/{db_name}?retryWrites=true&w=majority"
20
+ client = MongoClient(connection_string, serverSelectionTimeoutMS=5000)
21
+ db = client[db_name]
22
+ db.command('ping')
23
+ logger.info("MongoDB connection established successfully")
24
+ return db
25
+ except Exception as e:
26
+ print("Database connection failed")
27
+ return None
28
+
29
+
30
+ def get_results_collection():
31
+ db = get_mongo_client()
32
+ if db is None:
33
+ return None
34
+ return db["results"]
35
+
36
+ def get_video_collection():
37
+ db = get_mongo_client()
38
+ if db is None:
39
+ return None
40
+ return db["video_analyses"]
41
+
42
+ def get_script_collection():
43
+ db = get_mongo_client()
44
+ if db is None:
45
+ return None
46
+ return db["script_generator"]
database/operations.py ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import os, logging
3
+ from typing import Optional, List, Dict, Any, Tuple
4
+ from datetime import datetime
5
+ from bson import ObjectId
6
+ from pymongo import ASCENDING, DESCENDING
7
+ from pymongo import MongoClient
8
+
9
+
10
+ from database.connections import get_video_collection, get_script_collection, get_results_collection
11
+
12
+
13
+ log = logging.getLogger(__name__)
14
+
15
+
16
+ def _ensure_results_indexes(col):
17
+ try:
18
+ col.create_index([("created_at", DESCENDING)])
19
+ col.create_index([("type", ASCENDING), ("created_at", DESCENDING)])
20
+ col.create_index([("category", ASCENDING), ("created_at", DESCENDING)])
21
+ col.create_index([("created_by", ASCENDING), ("created_at", DESCENDING)])
22
+
23
+ except Exception:
24
+ pass
25
+
26
+ def _ensure_video_indexes(col):
27
+ try:
28
+ col.create_index([("created_at", DESCENDING)])
29
+ col.create_index([("category", ASCENDING), ("created_at", DESCENDING)])
30
+ col.create_index([("created_by", ASCENDING), ("created_at", DESCENDING)])
31
+ except Exception:
32
+ pass
33
+
34
+
35
+ _rc = get_results_collection()
36
+ if _rc is not None:
37
+ _ensure_results_indexes(_rc)
38
+
39
+ _vc = get_video_collection()
40
+ if _vc is not None:
41
+ _ensure_video_indexes(_vc)
42
+
43
+ _sg = get_script_collection()
44
+ if _sg is not None:
45
+ _ensure_video_indexes(_sg)
46
+
47
+
48
+
49
+ # ---------- Image jobs (generation / variation) ----------
50
+ def start_job(
51
+ col,
52
+ *,
53
+ type: str,
54
+ created_by: str,
55
+ category: str,
56
+ inputs: Dict[str, Any],
57
+ settings: Dict[str, Any],
58
+ user_prompt: Optional[str] = None,
59
+ ) -> str:
60
+ """
61
+ Variation schema (AI Library compatible):
62
+ {
63
+ type: "variation",
64
+ source: "variation",
65
+ created_by, category, file_name, prompt, lob,
66
+ status: "in_progress",
67
+ urls: [],
68
+ created_at,
69
+ settings, inputs
70
+ }
71
+
72
+ Generation schema:
73
+ {
74
+ type: "generation",
75
+ source: "text",
76
+ category, prompt,
77
+ status: "in_progress",
78
+ urls: [],
79
+ settings,
80
+ inputs,
81
+ created_by,
82
+ created_at,
83
+ }
84
+ """
85
+ now = datetime.utcnow()
86
+ doc: Dict[str, Any] = {
87
+ "type": type,
88
+ "source": "variation" if type == "variation" else "text",
89
+ "category": category or "general",
90
+ "prompt": user_prompt,
91
+ "status": "in_progress",
92
+ "urls": [],
93
+ "inputs": inputs or {},
94
+ "settings": settings or {},
95
+ "created_by": created_by,
96
+ "created_at": now,
97
+ }
98
+
99
+ if "file_name" in (inputs or {}):
100
+ doc["file_name"] = inputs["file_name"]
101
+
102
+ res = col.insert_one(doc)
103
+ return str(res.inserted_id)
104
+
105
+
106
+ def finish_job(
107
+ col,
108
+ job_id: str,
109
+ *,
110
+ status: str = "completed",
111
+ outputs_urls: Optional[List[str]] = None,
112
+ provider_update: Optional[Dict[str, Any]] = None,
113
+ ) -> None:
114
+ """
115
+ Finalizes a job. Writes top-level `urls` and `status`.
116
+ """
117
+ set_fields: Dict[str, Any] = {"status": status}
118
+ if outputs_urls is not None:
119
+ set_fields["urls"] = list(dict.fromkeys(outputs_urls or []))
120
+ if provider_update:
121
+ for k, v in provider_update.items():
122
+ set_fields[f"provider.{k}"] = v
123
+ try:
124
+ col.update_one({"_id": ObjectId(job_id)}, {"$set": set_fields})
125
+ except Exception as e:
126
+ log.error(f"finish_job update failed: {e}")
127
+
128
+
129
+ # ---------- Video analyses ----------
130
+ def insert_video_analysis(
131
+ *,
132
+ video_name: str,
133
+ response: Dict[str, Any],
134
+ category: Optional[str] = None,
135
+ created_by: Optional[str] = None,
136
+ analyzer_model: Optional[str] = None,
137
+ video_meta: Optional[Dict[str, Any]] = None,
138
+ thumbnail: str = "",
139
+ ) -> Optional[str]:
140
+ col = get_video_collection()
141
+ if col is None:
142
+ return None
143
+ doc: Dict[str, Any] = {
144
+ "category": (category or "general"),
145
+ "video": {"name": video_name, **(video_meta or {})},
146
+ "analyzer_model": analyzer_model or os.getenv("VIDEO_ANALYZER_MODEL", "gemini-2.0-flash"),
147
+ "results": response or {},
148
+ "created_by": created_by,
149
+ "created_at": datetime.utcnow(),
150
+ "thumbnail": thumbnail or "",
151
+ }
152
+ res = col.insert_one(doc)
153
+ return str(res.inserted_id)
154
+
155
+
156
+ def list_video_categories() -> List[str]:
157
+ col = get_video_collection()
158
+ if col is None:
159
+ return []
160
+ try:
161
+ vals = col.distinct("category")
162
+ return sorted({v for v in vals if v not in (None, "", [])})
163
+ except Exception:
164
+ return []
165
+
166
+
167
+ def find_video_analyses(
168
+ *,
169
+ category: Optional[str] = None,
170
+ start_date: Optional[datetime] = None,
171
+ end_date: Optional[datetime] = None,
172
+ limit: int = 200,
173
+ ) -> List[Dict[str, Any]]:
174
+ col = get_video_collection()
175
+ if col is None:
176
+ return []
177
+ q: Dict[str, Any] = {}
178
+ if category: q["category"] = category
179
+ if start_date or end_date:
180
+ rng: Dict[str, Any] = {}
181
+ if start_date: rng["$gte"] = start_date
182
+ if end_date: rng["$lt"] = end_date
183
+ q["created_at"] = rng
184
+ cur = col.find(q).sort("created_at", DESCENDING).limit(max(1, int(limit)))
185
+ out: List[Dict[str, Any]] = []
186
+ for d in cur:
187
+ d["_id"] = str(d.get("_id"))
188
+ out.append(d)
189
+ return out
190
+
191
+
192
+
193
+ def find_generation_jobs(
194
+ *,
195
+ category: Optional[str] = None,
196
+ start_date: Optional[datetime] = None,
197
+ end_date: Optional[datetime] = None,
198
+ page: int = 0,
199
+ page_size: int = 20,
200
+ ) -> Tuple[List[Dict[str, Any]], int]:
201
+ col = get_results_collection()
202
+ if col is None:
203
+ return [], 0
204
+
205
+ q: Dict[str, Any] = {"type": "generation", "source": "text"}
206
+ if category:
207
+ q["category"] = category
208
+ if start_date or end_date:
209
+ rng: Dict[str, Any] = {}
210
+ if start_date: rng["$gte"] = start_date
211
+ if end_date: rng["$lt"] = end_date
212
+ q["created_at"] = rng
213
+
214
+ total = col.count_documents(q)
215
+ cur = (
216
+ col.find(q)
217
+ .sort("created_at", DESCENDING)
218
+ .skip(page * page_size)
219
+ .limit(page_size)
220
+ )
221
+
222
+ out: List[Dict[str, Any]] = []
223
+ for d in cur:
224
+ d["_id"] = str(d.get("_id"))
225
+ out.append(d)
226
+ return out, int(total)
227
+
228
+
229
+ script_collection = get_script_collection()
230
+
231
+
232
+
233
+ def insert_script_result(
234
+ *,
235
+ video_name: str,
236
+ offer_details: str,
237
+ target_audience: str,
238
+ specific_hook: str,
239
+ additional_context: str,
240
+ response: List[Dict[str, Any]],
241
+ thumbnail: str = "",
242
+ created_by: Optional[str] = None,
243
+ num_scripts: Optional[int] = None,
244
+ category: Optional[str] = None,
245
+ ) -> None:
246
+ """
247
+ Save a script generation run into the results collection.
248
+ Stored with type/source so the library can query it.
249
+ """
250
+ col = get_script_collection()
251
+ if col is None:
252
+ raise ValueError("Results collection is not available.")
253
+
254
+ doc: Dict[str, Any] = {
255
+ "type": "script",
256
+ "source": "script_generator",
257
+ "video_name": video_name,
258
+ "category": category or "general",
259
+ "offer_details": offer_details,
260
+ "target_audience": target_audience,
261
+ "specific_hook": specific_hook,
262
+ "additional_context": additional_context,
263
+ "response": response,
264
+ "thumbnail": thumbnail,
265
+ "created_by": created_by,
266
+ "num_scripts": num_scripts,
267
+ "created_at": datetime.utcnow(),
268
+ }
269
+
270
+ try:
271
+ col.insert_one(doc)
272
+ except Exception as e:
273
+ raise ValueError(f"Failed to insert script result: {e}")
274
+
275
+
276
+ def find_script_results(
277
+ *,
278
+ start_date: Optional[datetime] = None,
279
+ end_date: Optional[datetime] = None,
280
+ page: int = 0,
281
+ page_size: int = 20,
282
+ created_by: Optional[str] = None,
283
+ video_name_query: Optional[str] = None,
284
+ ) -> Tuple[List[Dict[str, Any]], int]:
285
+ col = get_script_collection()
286
+ if col is None:
287
+ return [], 0
288
+
289
+ q: Dict[str, Any] = {
290
+ "$or": [
291
+ {"type": "script", "source": "script_generator"},
292
+ {"type": {"$exists": False}, "response": {"$exists": True}},
293
+ ]
294
+ }
295
+
296
+ if start_date or end_date:
297
+ rng: Dict[str, Any] = {}
298
+ if start_date: rng["$gte"] = start_date
299
+ if end_date: rng["$lt"] = end_date
300
+ q["created_at"] = rng
301
+
302
+ if created_by:
303
+ q["created_by"] = created_by
304
+
305
+ if video_name_query:
306
+ q["video_name"] = {"$regex": video_name_query, "$options": "i"}
307
+
308
+ total = col.count_documents(q)
309
+ cur = (col.find(q)
310
+ .sort("created_at", DESCENDING)
311
+ .skip(page * page_size)
312
+ .limit(page_size))
313
+
314
+ out: List[Dict[str, Any]] = []
315
+ for d in cur:
316
+ d["_id"] = str(d.get("_id"))
317
+ out.append(d)
318
+ return out, int(total)
319
+
320
+
321
+ def get_all_scripts(start_date: Optional[datetime] = None, end_date: Optional[datetime] = None, limit: int = 20) -> \
322
+ List[Dict[str, Any]]:
323
+ col = get_script_collection()
324
+ if col is None:
325
+ return []
326
+ query = {"type": "generation", "source": "text"}
327
+
328
+ if start_date:
329
+ query["created_at"] = {"$gte": start_date}
330
+ if end_date:
331
+ query["created_at"]["$lt"] = end_date
332
+
333
+ cursor = col.find(query).sort("created_at", DESCENDING).limit(limit)
334
+ return list(cursor)
generator_function/image_function.py ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, io, zipfile, replicate, time, logging, requests, streamlit as st, boto3, threading
2
+ from concurrent.futures import ThreadPoolExecutor, as_completed
3
+ from typing import Dict, Any, List, Tuple, Optional, Union
4
+ from uuid import uuid4
5
+ from urllib.parse import urlparse
6
+ from functools import lru_cache
7
+ import os, base64, logging
8
+ from openai import OpenAI
9
+
10
+ from database.operations import start_job, finish_job
11
+
12
+ from database.connections import get_results_collection
13
+
14
+
15
+ from dotenv import load_dotenv
16
+
17
+
18
+ load_dotenv()
19
+ logger = logging.getLogger(__name__)
20
+
21
+ def _encode_image_to_base64(image_path):
22
+ try:
23
+ with open(image_path, "rb") as f:
24
+ return base64.b64encode(f.read()).decode("utf-8")
25
+ except Exception:
26
+ logger.exception(f"Failed to base64 encode image: {image_path}")
27
+ return ""
28
+
29
+ REPLICATE_API_TOKEN = os.getenv("REPLICATE_API_TOKEN")
30
+ MAX_WORKERS = min(32, (os.cpu_count() or 1) + 4)
31
+ REQUEST_TIMEOUT = 30
32
+ RETRY_ATTEMPTS = 3
33
+
34
+ MODEL_REGISTRY: Dict[str, Dict[str, Any]] = {
35
+ "imagegen-4-ultra": {"id": "google/imagen-4-ultra","aspect_ratios": ["1:1","16:9","9:16","3:4","4:3"],"param_name":"aspect_ratio"},
36
+ "imagen-4": {"id": "google/imagen-4","aspect_ratios": ["1:1","16:9","9:16","3:4","4:3"],"param_name":"aspect_ratio"},
37
+ "nano-banana": {"id": "google/nano-banana","aspect_ratios": ["1:1","16:9","9:16","3:4","4:3"],"param_name":"aspect_ratio"},
38
+ "qwen": {"id": "qwen/qwen-image","aspect_ratios": ["1:1","16:9","9:16","3:4","4:3","3:2","2:3"],"param_name":"aspect_ratio"},
39
+ "seedream-3": {"id": "bytedance/seedream-3","aspect_ratios": ["1:1","16:9","9:16","3:4","4:3","3:2","2:3","21:9"],"param_name":"aspect_ratio"},
40
+ "recraft-v3": {"id": "recraft-ai/recraft-v3","aspect_ratios": ["1:1","4:3","3:4","3:2","2:3","16:9","9:16","1:2","2:1","7:5","5:7","4:5","5:4","3:5","5:3"],"param_name":"aspect_ratio"},
41
+ "photon": {"id": "luma/photon","aspect_ratios": ["1:1","3:4","4:3","9:16","16:9","9:21","21:9"],"param_name":"aspect_ratio"},
42
+ "ideogram-v3-quality": {"id": "ideogram-ai/ideogram-v3-quality","aspect_ratios": ["1:3","3:1","1:2","2:1","9:16","16:9","10:16","16:10","2:3","3:2","3:4","4:3","4:5","5:4","1:1"],"param_name":"aspect_ratio"},
43
+ }
44
+
45
+ _thread_local = threading.local()
46
+
47
+
48
+
49
+ def get_model_config(model_key: str) -> Optional[Dict[str, Any]]:
50
+ return MODEL_REGISTRY.get(model_key)
51
+
52
+ @lru_cache(maxsize=128)
53
+ def _get_model_config_cached(model_key: str) -> Optional[Dict[str, Any]]:
54
+ return MODEL_REGISTRY.get(model_key)
55
+
56
+ def _s3():
57
+ if not hasattr(_thread_local, "s3"):
58
+ needed = ["R2_ENDPOINT","R2_ACCESS_KEY","R2_SECRET_KEY","R2_BUCKET_NAME","NEW_BASE"]
59
+ if any(not os.getenv(k) for k in needed):
60
+ _thread_local.s3 = None
61
+ return None
62
+ try:
63
+ _thread_local.s3 = boto3.client(
64
+ "s3",
65
+ endpoint_url=os.getenv("R2_ENDPOINT"),
66
+ aws_access_key_id=os.getenv("R2_ACCESS_KEY"),
67
+ aws_secret_access_key=os.getenv("R2_SECRET_KEY"),
68
+ region_name="auto",
69
+ )
70
+ except Exception as e:
71
+ logger.error(f"S3 init failed: {e}")
72
+ _thread_local.s3 = None
73
+ return _thread_local.s3
74
+
75
+ def _upload_to_r2(image_bytes: bytes) -> Optional[str]:
76
+ s3 = _s3()
77
+ if not s3:
78
+ return None
79
+ for attempt in range(RETRY_ATTEMPTS):
80
+ try:
81
+ filename = f"{uuid4().hex}.png"
82
+ key = f"adgenesis_image_text/creative_adgenesis/images/{filename}"
83
+ s3.put_object(
84
+ Bucket=os.getenv("R2_BUCKET_NAME"),
85
+ Key=key,
86
+ Body=image_bytes,
87
+ ContentType="image/png",
88
+ )
89
+ return f"{os.getenv('NEW_BASE').rstrip('/')}/{key}"
90
+ except Exception as e:
91
+ if attempt == RETRY_ATTEMPTS - 1:
92
+ logger.error(f"R2 upload failed: {e}")
93
+ return None
94
+ time.sleep(2 ** attempt)
95
+ return None
96
+
97
+ def _generate_one(model_key: str, prompt: str, aspect_ratio: str) -> List[str]:
98
+ if not REPLICATE_API_TOKEN:
99
+ return []
100
+ cfg = _get_model_config_cached(model_key)
101
+ if not cfg:
102
+ return []
103
+ for attempt in range(RETRY_ATTEMPTS):
104
+ try:
105
+ output = replicate.run(cfg["id"], input={"prompt": prompt, cfg["param_name"]: aspect_ratio})
106
+ urls: List[str] = []
107
+ if isinstance(output, list) and output:
108
+ first = output[0]
109
+ url = getattr(first, "url", str(first))
110
+ urls = [url]
111
+ elif isinstance(output, str):
112
+ urls = [output]
113
+ elif hasattr(output, "url"):
114
+ urls = [getattr(output, "url")]
115
+ if urls:
116
+ return urls
117
+ except Exception as e:
118
+ if attempt == RETRY_ATTEMPTS - 1:
119
+ logger.error(f"replicate run failed: {e}")
120
+ return []
121
+ time.sleep(1)
122
+ return []
123
+
124
+ def _fetch(url: Union[str, Any]) -> Optional[bytes]:
125
+ url_str = getattr(url, "url", str(url))
126
+ for attempt in range(RETRY_ATTEMPTS):
127
+ try:
128
+ r = requests.get(
129
+ url_str, timeout=REQUEST_TIMEOUT, stream=True,
130
+ headers={"Cache-Control":"no-cache","Pragma":"no-cache","User-Agent":"ImageBot/1.0"}
131
+ )
132
+ r.raise_for_status()
133
+ buf = b""
134
+ for chunk in r.iter_content(8192):
135
+ buf += chunk
136
+ return buf
137
+ except Exception:
138
+ if attempt == RETRY_ATTEMPTS - 1:
139
+ return None
140
+ time.sleep(1)
141
+ return None
142
+
143
+ def _process_one(args: Tuple[str, str, str, int]) -> Dict[str, Any]:
144
+ model_key, prompt, aspect_ratio, idx = args
145
+ out = {"index": idx, "success": False, "source_url": None, "r2_url": None, "error": None}
146
+ try:
147
+ urls = _generate_one(model_key, prompt, aspect_ratio)
148
+ if not urls:
149
+ out["error"] = "No URLs returned"; return out
150
+ src = urls[0]
151
+ out["source_url"] = getattr(src, "url", str(src))
152
+ b = _fetch(src)
153
+ if not b:
154
+ out["error"] = "Fetch failed"; return out
155
+ r2 = _upload_to_r2(b)
156
+ if r2:
157
+ out["r2_url"] = r2; out["success"] = True
158
+ else:
159
+ out["error"] = "Upload to R2 failed"
160
+ except Exception as e:
161
+ out["error"] = str(e)
162
+ return out
163
+
164
+ def _generate_images_parallel(model_key: str, aspect_ratio: str, prompt: str, num_images: int) -> Tuple[List[str], List[str], List[str]]:
165
+ if num_images == 1:
166
+ res = _process_one((model_key, prompt, aspect_ratio, 0))
167
+ if res["success"]:
168
+ return [res["r2_url"]], [res["source_url"]], []
169
+ return [], [], [res["error"] or "Generation failed"]
170
+ args = [(model_key, prompt, aspect_ratio, i) for i in range(num_images)]
171
+ r2, src, errs = [], [], []
172
+ with ThreadPoolExecutor(max_workers=min(MAX_WORKERS, num_images)) as ex:
173
+ for fut in as_completed({ex.submit(_process_one, a): a[3] for a in args}):
174
+ try:
175
+ res = fut.result()
176
+ if res["success"]:
177
+ if res["r2_url"]: r2.append(res["r2_url"])
178
+ if res["source_url"]: src.append(res["source_url"])
179
+ else:
180
+ errs.append(res["error"] or "Generation failed")
181
+ except Exception as e:
182
+ errs.append(f"Future err: {e}")
183
+ # de-dup
184
+ r2 = list(dict.fromkeys(r2)); src = list(dict.fromkeys(src))
185
+ return r2, src, errs
186
+
187
+
188
+
189
+ def generate_images_parallel(model_key: str, aspect_ratio: str, prompt: str, num_images: int) -> Tuple[List[str], List[str], List[str]]:
190
+ """Back-compat public export used by background tasks."""
191
+ return _generate_images_parallel(model_key, aspect_ratio, prompt, num_images)
192
+
193
+ def handle_image_generation_optimized(
194
+ *,
195
+ model_key: str,
196
+ aspect_ratio: str,
197
+ prompt: str,
198
+ num_images: int,
199
+ debug_mode: bool = False,
200
+ category: Optional[str] = None,
201
+ platform: Optional[str] = None,
202
+ ):
203
+ """
204
+ Streamlit-friendly wrapper: kicks off parallel gen, persists a job row,
205
+ and renders results in-place (no return value).
206
+ """
207
+ if not REPLICATE_API_TOKEN:
208
+ st.error("Missing REPLICATE_API_TOKEN. Set it as an environment variable.")
209
+ return
210
+ if not prompt.strip():
211
+ st.warning("Please enter a prompt.")
212
+ return
213
+
214
+ created_by = (
215
+ st.session_state.get("uid")
216
+ or os.getenv("USER_ID")
217
+ or os.getenv("CREATED_BY")
218
+ or "anonymous"
219
+ )
220
+
221
+ results_col = get_results_collection()
222
+ db_job_id = None
223
+ if results_col is not None:
224
+ try:
225
+ db_job_id = start_job(
226
+ results_col,
227
+ type="generation",
228
+ created_by=str(created_by),
229
+ category=(category or "general"),
230
+ inputs={"model_key": model_key, "aspect_ratio": aspect_ratio, "num_images": num_images},
231
+ settings={"platform": platform},
232
+ user_prompt=prompt.strip(),
233
+ )
234
+ except Exception as e:
235
+ logger.error(f"start_job failed: {e}")
236
+
237
+ progress = st.progress(0, text="Starting generation...")
238
+ status = st.empty()
239
+ start = time.time()
240
+
241
+ try:
242
+ with status.container():
243
+ st.info(f"Generating {num_images} image(s) in parallel...")
244
+ progress.progress(10, text="Running...")
245
+
246
+ r2_urls, source_urls, errors = _generate_images_parallel(model_key, aspect_ratio, prompt.strip(), num_images)
247
+ urls = r2_urls or source_urls
248
+
249
+ if results_col is not None and db_job_id:
250
+ try:
251
+ finish_job(
252
+ results_col,
253
+ db_job_id,
254
+ status="completed" if urls else "failed",
255
+ outputs_urls=urls or [],
256
+ provider_update={"errors": errors} if errors else None,
257
+ )
258
+ except Exception as e:
259
+ logger.error(f"finish_job failed: {e}")
260
+
261
+ progress.progress(100, text="Complete!")
262
+ took = time.time() - start
263
+
264
+ if urls:
265
+ with status.container():
266
+ st.success(f"Generated {len(urls)} image(s) in {took:.1f}s. Job ID: {db_job_id or 'N/A'}")
267
+
268
+ cols = st.columns(min(4, len(urls)) or 1)
269
+ for i, u in enumerate(urls):
270
+ with cols[i % len(cols)]:
271
+ try:
272
+ b = _fetch(u)
273
+ if b is None:
274
+ st.error("Failed to load image")
275
+ continue
276
+ st.image(b, use_container_width=True)
277
+ path = urlparse(str(u)).path
278
+ base = os.path.basename(path) or "image.png"
279
+ if not os.path.splitext(base)[1]:
280
+ base = f"{base}.png"
281
+ st.download_button("Download image", b, base, mime="image/png", use_container_width=True)
282
+ except Exception as e:
283
+ st.error(f"Display failed: {e}")
284
+ else:
285
+ with status.container():
286
+ st.error("No images were generated.")
287
+ if errors and debug_mode:
288
+ with st.expander("Generation Errors", expanded=True):
289
+ for e in errors:
290
+ st.error(e)
291
+ except Exception as e:
292
+ if results_col is not None and db_job_id:
293
+ try:
294
+ finish_job(results_col, db_job_id, status="failed")
295
+ except Exception:
296
+ pass
297
+ with status.container():
298
+ st.error(f"Generation failed: {e}")
299
+
300
+
301
+
302
+ def generate_image(file_path, size, quality, category, sentiment, user_prompt, platform, blur, i=None):
303
+ try:
304
+ api_key = os.getenv("OPENAI_API_KEY")
305
+ if not api_key:
306
+ logger.critical("OPENAI_API_KEY is not set.")
307
+ raise RuntimeError("OPENAI_API_KEY is missing")
308
+ client = OpenAI(api_key=api_key)
309
+
310
+ if platform == "Google Display Network":
311
+ size_messages = {0: "The aspect ratio of the image should be 1024x1024",
312
+ 1: "The aspect ratio of the image should be 1536x1024.",
313
+ 2: "The aspect ratio of the image should be 1024x1536."}
314
+ img_size = size_messages.get(i, "")
315
+ else:
316
+ img_size = ""
317
+
318
+ with open(file_path, "rb") as img_file:
319
+ background = "blurred background." if blur else " not blurred background."
320
+ result = client.images.edit(
321
+ model="gpt-image-1",
322
+ prompt=(
323
+ f"You are a top-tier performance digital marketer and creative strategist with 15+ years of expertise in affiliate marketing.\n"
324
+ f"Your objective is to analyze the provided winning ad image, deconstruct its concept, visual composition, and color scheme, and generate a fresh, conversion-focused ad visual tailored for the {category} niche.\n"
325
+ f"The new design should convey a {sentiment} sentiment and incorporate the user instruction: \n {user_prompt}.\n If user has given multple choices or options to be include in the image so choose randomly relevant to the reference image."
326
+ f"Create a visually compelling ad optimized for {platform} Ads that is scroll-stopping, pattern-interrupting, and designed to drive high CTR and Conversion Rate. Utilize striking color combinations, dynamic contrast levels, and strategic layout compositions to command attention while aligning with the target audience avatar.\n"
327
+ f"Make sure the images should be realistic, not be stocky at all and raw which should look like they are shot from an iPhone with {background}.{img_size}"
328
+ ),
329
+ image=img_file,
330
+ size=size,
331
+ quality=quality,
332
+ )
333
+ image_base64 = result.data[0].b64_json
334
+ image_bytes = base64.b64decode(image_base64)
335
+ logger.info(f"Successfully generated image for {file_path}")
336
+ return image_bytes
337
+ except Exception as e:
338
+ logger.exception(f"Failed to generate image for {file_path}: {e}")
339
+ raise
generator_function/image_processor.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, zipfile, tempfile, logging
2
+ from concurrent.futures import ThreadPoolExecutor, as_completed
3
+ from typing import List, Tuple, Optional
4
+
5
+ from generator_function.image_function import generate_image
6
+ from helpers_function.helper_meta_data import meta_data_helper_function
7
+ from helpers_function.helpers import upload_image_to_r2
8
+ from helpers_function.helpers import is_valid_image
9
+ from database.connections import get_results_collection as get_collection
10
+ from database.operations import start_job, finish_job
11
+ from util.session_state import current_uid
12
+
13
+ logger = logging.getLogger(__name__)
14
+ COL = get_collection()
15
+
16
+ def _resolve_user_id() -> str:
17
+ return current_uid() or os.getenv("DEFAULT_USER_ID") or "anonymous"
18
+
19
+ uid = _resolve_user_id()
20
+
21
+ def process_zip_and_generate_images(
22
+ zip_path: str,
23
+ category: str,
24
+ size: str,
25
+ quality: str,
26
+ user_prompt: str,
27
+ sentiment: str,
28
+ platform: str,
29
+ num_images: int,
30
+ demo_mode: bool,
31
+ existing_images: Optional[List[str]],
32
+ blur: bool,
33
+ ) -> List[str]:
34
+ num_images = 1 if demo_mode else num_images
35
+ try:
36
+ if zip_path.endswith(".zip"):
37
+ temp_dir = extract_zip_file(zip_path)
38
+ image_files = get_valid_image_files(temp_dir)
39
+ else:
40
+ image_files = [(os.path.basename(zip_path), zip_path)]
41
+
42
+ results = process_image_files(
43
+ image_files, category, size, quality, user_prompt, sentiment, platform, num_images, blur
44
+ )
45
+ all_urls = [url for entry in results for url in entry["urls"]]
46
+ seen, deduped = set(), []
47
+ for u in all_urls:
48
+ if u not in seen:
49
+ seen.add(u); deduped.append(u)
50
+ return (existing_images or []) + deduped
51
+ except Exception:
52
+ logger.exception(f"Global error during processing file: {zip_path}")
53
+ return existing_images or []
54
+
55
+ def extract_zip_file(zip_path: str) -> tempfile.TemporaryDirectory:
56
+ temp_dir = tempfile.TemporaryDirectory()
57
+ with zipfile.ZipFile(zip_path, "r") as zip_ref:
58
+ zip_ref.extractall(temp_dir.name)
59
+ logger.info(f"Extracted ZIP file: {zip_path}")
60
+ return temp_dir
61
+
62
+ def get_valid_image_files(temp_dir: tempfile.TemporaryDirectory) -> List[Tuple[str, str]]:
63
+ valid_files: List[Tuple[str, str]] = []
64
+ for file in os.listdir(temp_dir.name):
65
+ if "__MACOSX" in file: continue
66
+ file_path = os.path.join(temp_dir.name, file)
67
+ if is_valid_image(file):
68
+ valid_files.append((file, file_path))
69
+ else:
70
+ logger.warning(f"Ignored non-image file: {file}")
71
+ logger.info(f"Found {len(valid_files)} valid images.")
72
+ return valid_files
73
+
74
+ def process_image_files(
75
+ image_files: List[Tuple[str, str]],
76
+ category: str,
77
+ size: str,
78
+ quality: str,
79
+ user_prompt: str,
80
+ sentiment: str,
81
+ platform: str,
82
+ num_images: int,
83
+ blur: bool,
84
+ ) -> List[dict]:
85
+ final_results: List[dict] = []
86
+ with ThreadPoolExecutor(max_workers=5) as executor:
87
+ futures = []
88
+ for file_name, file_path in image_files:
89
+ job_id: Optional[str] = None
90
+ if COL is not None:
91
+ try:
92
+ settings = {"size": size,"quality": quality,"sentiment": sentiment,"platform": platform,"num_images": num_images,"blur": bool(blur)}
93
+ inputs = {"file_name": file_name, "mode": "img_or_zip"}
94
+ job_id = start_job(
95
+ COL,
96
+ type="variation",
97
+ created_by=uid,
98
+ category=category or "general",
99
+ inputs=inputs,
100
+ settings=settings,
101
+ user_prompt=user_prompt
102
+
103
+ )
104
+ except Exception:
105
+ logger.exception("Failed to start DB job; continuing without DB logging.")
106
+ futures.append(
107
+ executor.submit(
108
+ process_single_image,
109
+ file_name, file_path, category, size, quality, user_prompt, sentiment, platform, num_images, blur, job_id,
110
+ )
111
+ )
112
+ for future in as_completed(futures):
113
+ try:
114
+ result = future.result()
115
+ if result: final_results.append(result)
116
+ except Exception:
117
+ logger.exception("Unhandled exception during image processing thread.")
118
+ return final_results
119
+
120
+ def process_single_image(
121
+ file_name: str,
122
+ file_path: str,
123
+ category: str,
124
+ size: str,
125
+ quality: str,
126
+ user_prompt: str,
127
+ sentiment: str,
128
+ platform: str,
129
+ num_images: int,
130
+ blur: bool,
131
+ job_id: Optional[str],
132
+ ) -> Optional[dict]:
133
+ try:
134
+ image_urls = generate_images_from_prompts(
135
+ file_path, size, quality, category, sentiment, user_prompt, platform, num_images, blur
136
+ )
137
+ if COL is not None and job_id:
138
+ try:
139
+ finish_job(COL, job_id, status=("completed" if image_urls else "failed"), outputs_urls=image_urls)
140
+ except Exception:
141
+ logger.exception("Failed to finish DB job.")
142
+ if image_urls:
143
+ return {"file_name": file_name, "urls": image_urls}
144
+ return None
145
+ except Exception as e:
146
+ logger.error(f"Processing failed for {file_name}: {e}")
147
+ if COL is not None and job_id:
148
+ try:
149
+ finish_job(COL, job_id, status="failed", outputs_urls=[])
150
+ except Exception:
151
+ logger.exception("Also failed to mark DB job as failed.")
152
+ return None
153
+
154
+ def generate_images_from_prompts(
155
+ file_path: str, size: str, quality: str, category: str, sentiment: str, user_prompt: str,
156
+ platform: str, num_images: int, blur: bool,
157
+ ) -> List[str]:
158
+ image_urls: List[str] = []
159
+
160
+ def worker(i: int) -> Optional[str]:
161
+ try:
162
+ image_bytes = generate_image(file_path, size, quality, category, sentiment, user_prompt, platform, blur, i)
163
+ if not image_bytes: return None
164
+ image_with_metadata = meta_data_helper_function(image_bytes)
165
+ s3_url = upload_image_to_r2(image_with_metadata)
166
+ return s3_url
167
+ except Exception as e:
168
+ logger.error(f"Image generation failed: {e}")
169
+ return None
170
+
171
+ with ThreadPoolExecutor(max_workers=min(10, num_images)) as executor:
172
+ futures = [executor.submit(worker, i) for i in range(num_images)]
173
+ for future in as_completed(futures):
174
+ result = future.result()
175
+ if result: image_urls.append(result)
176
+ return image_urls
generator_function/script_generator.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import time
4
+ from core.logger import logger
5
+ from typing import Dict, Any
6
+
7
+ from prompt.system_prompt_script import SYSTEM_PROMPT
8
+ from schema.pydantic_schema_script import ScriptResponse
9
+ from google import genai
10
+ from dotenv import load_dotenv
11
+
12
+ load_dotenv()
13
+ GEMINI_API_KEY = os.getenv("GEMINI_KEY")
14
+
15
+ def configure_gemini():
16
+ return genai.Client(api_key=GEMINI_API_KEY)
17
+
18
+
19
+ def generate_scripts(
20
+ video_path: str,
21
+ offer_details: str,
22
+ target_audience: str,
23
+ specific_hooks: str,
24
+ additional_context: str,
25
+ num_scripts: int = 3,
26
+ duration: int = 60
27
+ ) -> Dict[str, Any]:
28
+ client = configure_gemini()
29
+
30
+ try:
31
+ user_prompt = f"""
32
+ Generate {num_scripts} high-converting direct response script variations,
33
+ each about {duration} seconds long.
34
+ Each variation MUST be designed for a total runtime ≤ {duration} seconds.
35
+ - Do NOT include any timestamp beyond {duration}s.
36
+ - Keep all beats within 0–{duration}s.
37
+ - Use timestamps as "M:SS" (e.g., "0:03", "0:12").
38
+
39
+
40
+ CONTEXT TO FOLLOW:
41
+ - Offer Details: {offer_details}
42
+ - Target Audience: {target_audience}
43
+ - Specific Hooks: {specific_hooks}
44
+
45
+ ADDITIONAL CONTEXT:
46
+ {additional_context}
47
+ You must reflect this additional context in:
48
+ - The script tone, CTA, visuals
49
+ - Compliance or branding constraints
50
+ - Any assumptions about audience or product
51
+ Failure to include this will be considered incomplete.
52
+ Please provide a comprehensive analysis including:
53
+ 1. DETAILED VIDEO ANALYSIS with timestamp-based metrics:
54
+ - Break down the video into 5-10 second segments
55
+ - Rate each segment's effectiveness (1-10 scale)
56
+ - Identify specific elements (hook, transition, proof, CTA, etc.)
57
+ 2. TIMESTAMP-BASED IMPROVEMENTS:
58
+ - Specific recommendations for each time segment
59
+ - Priority level for each improvement
60
+ - Expected impact of implementing changes
61
+ 3. SCRIPT VARIATIONS:
62
+ - Create complete script variations
63
+ - Each with timestamp-by-timestamp breakdown
64
+ - Different psychological triggers and approaches
65
+ IMPORTANT: Return only valid JSON in the exact format specified in the system prompt. Analyze the video second-by-second for maximum detail."""
66
+
67
+ video_file = client.files.upload(file=video_path)
68
+
69
+ while getattr(video_file.state, "name", "") == "PROCESSING":
70
+ time.sleep(1.0)
71
+ video_file = client.files.get(name=video_file.name)
72
+ if getattr(video_file.state, "name", "") == "FAILED":
73
+ logger.error("Video processing FAILED.")
74
+ return {}
75
+
76
+ resp = client.models.generate_content(
77
+ model="gemini-2.0-flash",
78
+ contents=[SYSTEM_PROMPT, user_prompt, video_file],
79
+ config={
80
+ "response_mime_type": "application/json",
81
+ "response_schema": ScriptResponse,
82
+
83
+ },
84
+ )
85
+
86
+ parsed = getattr(resp, "parsed", None)
87
+ if parsed is None:
88
+
89
+ raw_text = getattr(resp, "text", "") or ""
90
+ if not raw_text:
91
+ parts = None
92
+ if getattr(resp, "candidates", None):
93
+ parts = getattr(resp.candidates[0].content, "parts", None)
94
+ raise RuntimeError(f"Model returned no JSON text. parts={parts}")
95
+ data = json.loads(raw_text)
96
+ return data
97
+
98
+ out = parsed.model_dump()
99
+ logger.info("Generated %d variations.", len(out.get("script_variations", [])))
100
+ return out
101
+
102
+ except Exception as e:
103
+ logger.exception("generate_scripts failed: %s", e)
104
+ return {}
105
+
generator_function/video_analyzer_services.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import os, time, json, mimetypes, tempfile, logging
3
+ from typing import Dict, Any, Optional
4
+ from schema.pydantic_schema_video import AdAnalysis
5
+ from prompt.analyser_prompt import ANALYSER_PROMPT
6
+ from google import genai
7
+ from dotenv import load_dotenv
8
+ from database.operations import insert_video_analysis
9
+ from helpers_function.helpers import get_video_thumbnail_base64
10
+
11
+ load_dotenv()
12
+ logger = logging.getLogger(__name__)
13
+ GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
14
+
15
+ def _configure_gemini() -> genai.Client:
16
+ if not GEMINI_API_KEY:
17
+ raise RuntimeError("GEMINI_API_KEY is not set")
18
+ return genai.Client(api_key=GEMINI_API_KEY)
19
+
20
+
21
+
22
+ def analyze_video_only(video_path: str) -> Dict[str, Any]:
23
+ try:
24
+ client = _configure_gemini()
25
+ except Exception as e:
26
+ logger.exception("Gemini configuration error")
27
+ return {"__error__": str(e)}
28
+
29
+ try:
30
+ f = client.files.upload(file=video_path)
31
+ while getattr(f.state, "name", "") == "PROCESSING":
32
+ time.sleep(2); f = client.files.get(name=f.name)
33
+ if getattr(f.state, "name", "") == "FAILED":
34
+ return {"__error__": "Video indexing failed."}
35
+
36
+ resp = client.models.generate_content(
37
+ model=os.getenv("VIDEO_ANALYZER_MODEL", "gemini-2.0-flash"),
38
+ contents=[ANALYSER_PROMPT, f],
39
+ config={"response_mime_type": "application/json"},
40
+ )
41
+ raw = getattr(resp, "text", "") or ""
42
+ if not raw.strip():
43
+ return {"__error__": "Empty response from model."}
44
+
45
+ try:
46
+ return AdAnalysis.model_validate_json(raw).model_dump()
47
+ except Exception:
48
+ try:
49
+ return json.loads(raw)
50
+ except Exception:
51
+ return {"__error__": "Model response not valid JSON."}
52
+ except Exception as e:
53
+ logger.exception("Video analysis failed")
54
+ return {"__error__": str(e)}
55
+
56
+
57
+ def run_and_store_video_analysis(
58
+ *,
59
+ category: str,
60
+ created_by: str,
61
+ uploaded_file=None,
62
+ uploaded_file_path: Optional[str] = None,
63
+ analyzer_model: Optional[str] = None,
64
+ ) -> Dict[str, Any]:
65
+
66
+ if not uploaded_file and not uploaded_file_path:
67
+ return {"_id": None, "results": {"__error__": "No video provided."}, "video_meta": {}, "thumbnail": ""}
68
+
69
+ tmp_created: Optional[str] = None
70
+ try:
71
+
72
+ if uploaded_file_path:
73
+ video_path = uploaded_file_path
74
+ orig_name = os.path.basename(uploaded_file_path)
75
+ mime = mimetypes.guess_type(orig_name)[0] or ""
76
+ size_bytes = os.path.getsize(uploaded_file_path) if os.path.exists(uploaded_file_path) else None
77
+ else:
78
+ suffix = os.path.splitext(getattr(uploaded_file, "name", "upload.mp4"))[1] or ".mp4"
79
+ with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tmp:
80
+ tmp.write(uploaded_file.read())
81
+ video_path = tmp.name
82
+ tmp_created = video_path
83
+ orig_name = getattr(uploaded_file, "name", os.path.basename(video_path))
84
+ mime = getattr(uploaded_file, "type", None) or mimetypes.guess_type(orig_name)[0] or ""
85
+ size_bytes = os.path.getsize(video_path) if os.path.exists(video_path) else None
86
+
87
+
88
+ results = analyze_video_only(video_path)
89
+
90
+ try:
91
+ thumbnail_b64 = get_video_thumbnail_base64(video_path) or ""
92
+ except Exception:
93
+ thumbnail_b64 = ""
94
+
95
+ video_meta = {"name": orig_name, "mimetype": mime, "size_bytes": size_bytes}
96
+ model_label = analyzer_model or os.getenv("VIDEO_ANALYZER_MODEL", "gemini-2.0-flash")
97
+
98
+ inserted_id = insert_video_analysis(
99
+ video_name=orig_name,
100
+ response=results,
101
+ category=(category or "general"),
102
+ created_by=(created_by or "anonymous"),
103
+ analyzer_model=model_label,
104
+ video_meta=video_meta,
105
+ thumbnail=thumbnail_b64,
106
+ )
107
+
108
+ return {
109
+ "_id": inserted_id,
110
+ "results": results,
111
+ "video_meta": video_meta,
112
+ "thumbnail": thumbnail_b64,
113
+ }
114
+
115
+ finally:
116
+ if tmp_created and os.path.exists(tmp_created):
117
+ try:
118
+ os.remove(tmp_created)
119
+ except Exception:
120
+ pass
helpers_function/helper_email.py ADDED
File without changes
helpers_function/helper_meta_data.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import piexif
3
+ from PIL import Image
4
+ from io import BytesIO
5
+
6
+ def generate_metadata(model):
7
+ # Define different values based on iPhone model
8
+ exposure_times = {
9
+ "iPhone 11": "1/60",
10
+ "iPhone 11 Pro": "1/70",
11
+ "iPhone 12": "1/100",
12
+ "iPhone 12 Pro": "1/110",
13
+ "iPhone 13": "1/120",
14
+ "iPhone 13 Pro": "1/130",
15
+ "iPhone 13 Pro Max": "1/140",
16
+ "iPhone 14": "1/200",
17
+ "iPhone 14 Pro": "1/220",
18
+ "iPhone 14 Pro Max": "1/240",
19
+ "iPhone 15": "1/300",
20
+ "iPhone 15 Plus": "1/320",
21
+ "iPhone 15 Pro": "1/400",
22
+ "iPhone 15 Pro Max": "1/500",
23
+ "iPhone 16": "1/600",
24
+ "iPhone 16 Pro": "1/700",
25
+ "iPhone 16 Pro Max": "1/1000"
26
+ }
27
+ f_numbers = {
28
+ "iPhone 11": "f/1.8",
29
+ "iPhone 11 Pro": "f/1.8",
30
+ "iPhone 12": "f/1.6",
31
+ "iPhone 12 Pro": "f/1.6",
32
+ "iPhone 13": "f/1.5",
33
+ "iPhone 13 Pro": "f/1.5",
34
+ "iPhone 13 Pro Max": "f/1.5",
35
+ "iPhone 14": "f/1.4",
36
+ "iPhone 14 Pro": "f/1.4",
37
+ "iPhone 14 Pro Max": "f/1.4",
38
+ "iPhone 15": "f/1.4",
39
+ "iPhone 15 Plus": "f/1.4",
40
+ "iPhone 15 Pro": "f/1.4",
41
+ "iPhone 15 Pro Max": "f/1.3",
42
+ "iPhone 16": "f/1.3",
43
+ "iPhone 16 Pro": "f/1.3",
44
+ "iPhone 16 Pro Max": "f/1.3"
45
+ }
46
+ focal_lengths = {
47
+ "iPhone 11": "3.99 mm",
48
+ "iPhone 11 Pro": "4.0 mm",
49
+ "iPhone 12": "4.2 mm",
50
+ "iPhone 12 Pro": "4.3 mm",
51
+ "iPhone 13": "5.0 mm",
52
+ "iPhone 13 Pro": "5.1 mm",
53
+ "iPhone 13 Pro Max": "5.2 mm",
54
+ "iPhone 14": "6.0 mm",
55
+ "iPhone 14 Pro": "6.1 mm",
56
+ "iPhone 14 Pro Max": "6.2 mm",
57
+ "iPhone 15": "6.0 mm",
58
+ "iPhone 15 Plus": "6.1 mm",
59
+ "iPhone 15 Pro": "6.1 mm",
60
+ "iPhone 15 Pro Max": "6.2 mm",
61
+ "iPhone 16": "6.5 mm",
62
+ "iPhone 16 Pro": "6.6 mm",
63
+ "iPhone 16 Pro Max": "6.7 mm"
64
+ }
65
+
66
+ metadata = {
67
+ "Make": "Apple",
68
+ "Model": model,
69
+ "Software": "iOS {}".format(random.choice([14, 15, 16, 17, 18, 19])),
70
+ "Orientation": random.choice(["Horizontal (normal)", "Rotate 90 CW", "Rotate 180", "Rotate 90 CCW"]),
71
+ "DateTime": "{}:{}:{} {:02}:{:02}:{:02}".format(random.randint(2022, 2024), random.randint(1, 12), random.randint(1, 28), random.randint(0, 23), random.randint(0, 59), random.randint(0, 59)),
72
+ "ExposureTime": exposure_times[model],
73
+ "FNumber": f_numbers[model],
74
+ "ISOSpeedRatings": random.choice([100, 200, 400, 800, 1600]),
75
+ "FocalLength": focal_lengths[model],
76
+ "Flash": random.choice(["Flash fired", "Flash did not fire, compulsory mode", "Flash fired, auto mode"]),
77
+ "WhiteBalance": random.choice(["Auto", "Manual"]),
78
+ "MeteringMode": random.choice(["Pattern", "CenterWeightedAverage", "Spot"]),
79
+ "SceneCaptureType": random.choice(["Standard", "Landscape", "Portrait", "NightScene"]),
80
+ "GPSLatitude": "{:.4f} N".format(random.uniform(0.0, 90.0)),
81
+ "GPSLongitude": "{:.4f} W".format(random.uniform(0.0, 180.0)),
82
+ "Altitude": "{:.1f} m".format(random.uniform(0, 100)),
83
+ "LensMake": "Apple",
84
+ "LensModel": "{} back triple camera {} f/1.5".format(model, focal_lengths[model]),
85
+ "ColorSpace": random.choice(["sRGB", "Adobe RGB"]),
86
+ "PixelXDimension": random.choice([3024, 4032, 2160]),
87
+ "PixelYDimension": random.choice([3024, 4032, 2160]),
88
+ "ExposureBiasValue": random.choice(["0 EV", "+1 EV", "-1 EV"]),
89
+ "BrightnessValue": "{:.1f}".format(random.uniform(-5, 10)),
90
+ "ExposureMode": random.choice(["Auto Exposure", "Manual Exposure"])
91
+ }
92
+ return metadata
93
+
94
+ def dms_coordinates(value):
95
+ """Convert decimal degrees to EXIF GPS (DMS format)."""
96
+ degrees = int(value)
97
+ minutes_float = (value - degrees) * 60
98
+ minutes = int(minutes_float)
99
+ seconds = round((minutes_float - minutes) * 60 * 10000)
100
+ return [(degrees, 1), (minutes, 1), (seconds, 10000)]
101
+
102
+ def meta_data_helper_function(image_bytes,model=None):
103
+ """
104
+ Takes raw image bytes, adds realistic EXIF metadata, and returns new bytes.
105
+ """
106
+ if model is None:
107
+ model = random.choice([
108
+ "iPhone 11", "iPhone 11 Pro", "iPhone 12", "iPhone 12 Pro",
109
+ "iPhone 13", "iPhone 13 Pro", "iPhone 13 Pro Max", "iPhone 14",
110
+ "iPhone 14 Pro", "iPhone 14 Pro Max", "iPhone 15", "iPhone 15 Plus",
111
+ "iPhone 15 Pro", "iPhone 15 Pro Max", "iPhone 16", "iPhone 16 Pro",
112
+ "iPhone 16 Pro Max"
113
+ ])
114
+
115
+ # Load image from bytes
116
+ img = Image.open(BytesIO(image_bytes))
117
+ img = img.convert("RGB") # Ensure compatibility
118
+
119
+ # Generate metadata
120
+ metadata = generate_metadata(model)
121
+
122
+ # Build EXIF data
123
+ exif_dict = {"0th": {}, "Exif": {}, "GPS": {}}
124
+
125
+ exif_dict["0th"][piexif.ImageIFD.Make] = metadata["Make"]
126
+ exif_dict["0th"][piexif.ImageIFD.Model] = metadata["Model"]
127
+ exif_dict["0th"][piexif.ImageIFD.Software] = metadata["Software"]
128
+ exif_dict["0th"][piexif.ImageIFD.DateTime] = metadata["DateTime"]
129
+
130
+ # Exposure time (rational)
131
+ num, den = map(int, metadata["ExposureTime"].split("/"))
132
+ exif_dict["Exif"][piexif.ExifIFD.ExposureTime] = (num, den)
133
+
134
+ # FNumber
135
+ f_number = float(metadata["FNumber"].replace("f/", ""))
136
+ exif_dict["Exif"][piexif.ExifIFD.FNumber] = (int(f_number * 10), 10)
137
+
138
+ # ISO
139
+ exif_dict["Exif"][piexif.ExifIFD.ISOSpeedRatings] = metadata["ISOSpeedRatings"]
140
+
141
+ # Focal Length
142
+ focal_length = float(metadata["FocalLength"].split()[0])
143
+ exif_dict["Exif"][piexif.ExifIFD.FocalLength] = (int(focal_length * 10), 10)
144
+
145
+ # GPS
146
+ lat = float(metadata["GPSLatitude"].split()[0])
147
+ lon = float(metadata["GPSLongitude"].split()[0])
148
+
149
+ exif_dict["GPS"][piexif.GPSIFD.GPSLatitudeRef] = b'N' if lat >= 0 else b'S'
150
+ exif_dict["GPS"][piexif.GPSIFD.GPSLatitude] = dms_coordinates(abs(lat))
151
+
152
+ exif_dict["GPS"][piexif.GPSIFD.GPSLongitudeRef] = b'E' if lon >= 0 else b'W'
153
+ exif_dict["GPS"][piexif.GPSIFD.GPSLongitude] = dms_coordinates(abs(lon))
154
+
155
+ altitude_value = float(metadata["Altitude"].replace(" m", ""))
156
+ exif_dict["GPS"][piexif.GPSIFD.GPSAltitude] = (int(altitude_value), 1)
157
+
158
+
159
+ # Dump to bytes
160
+ exif_bytes = piexif.dump(exif_dict)
161
+
162
+ # Save to in-memory buffer
163
+ output_io = BytesIO()
164
+ img.save(output_io, format="JPEG", exif=exif_bytes)
165
+ return output_io.getvalue()
166
+
helpers_function/helpers.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import boto3
3
+ from uuid import uuid4
4
+ import os
5
+ import streamlit as st
6
+
7
+
8
+ import cv2
9
+ import base64
10
+ import logging
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+ def get_video_thumbnail_base64(video_path: str, time_sec: int = 1) -> str:
15
+ try:
16
+ cap = cv2.VideoCapture(video_path)
17
+ cap.set(cv2.CAP_PROP_POS_MSEC, time_sec * 1000)
18
+ success, frame = cap.read()
19
+ cap.release()
20
+ if not success:
21
+ return ""
22
+ _, buffer = cv2.imencode(".jpg", frame)
23
+ return base64.b64encode(buffer).decode("utf-8")
24
+ except Exception:
25
+ logger.exception("Thumbnail extraction failed")
26
+ return ""
27
+
28
+
29
+ log = logging.getLogger(__name__)
30
+
31
+ def upload_image_to_r2(image_bytes, folder_name="search_arb", app_type="bulk_generation"):
32
+ try:
33
+ s3 = boto3.client(
34
+ "s3",
35
+ endpoint_url=os.getenv('R2_ENDPOINT'),
36
+ aws_access_key_id=os.getenv('R2_ACCESS_KEY'),
37
+ aws_secret_access_key=os.getenv('R2_SECRET_KEY'),
38
+ region_name="auto"
39
+ )
40
+ filename = f"{uuid4().hex}.png"
41
+ file_key = f"hug_face/{app_type.strip('/')}/{folder_name.strip('/')}/{filename}" if folder_name else f"hug_face/{app_type.strip('/')}/{filename}"
42
+ s3.put_object(Bucket=os.getenv('R2_BUCKET_NAME'), Key=file_key, Body=image_bytes, ContentType="image/png")
43
+ return f"{os.getenv('NEW_BASE').rstrip('/')}/{file_key}"
44
+ except Exception as e:
45
+ log.error(e)
46
+ return None
47
+
48
+
49
+ def encode_image_to_base64(image_path: str) -> str:
50
+ with open(image_path, "rb") as f:
51
+ return base64.b64encode(f.read()).decode("utf-8")
52
+
53
+ def is_valid_image(file_name: str) -> bool:
54
+ return file_name.lower().endswith((".png", ".jpg", ".jpeg", ".bmp", ".gif", ".webp"))
55
+
56
+
57
+ def get_current_user_id() -> str:
58
+ """
59
+ Returns a stable user id.
60
+ Priority: Streamlit session -> USER_ID env -> CREATED_BY env -> 'anonymous'
61
+ """
62
+ try:
63
+
64
+ uid = st.session_state.get("uid")
65
+ if uid:
66
+ return str(uid).strip()
67
+ except Exception:
68
+ pass
69
+
70
+ return str(os.getenv("USER_ID") or os.getenv("CREATED_BY") or "anonymous").strip() or "anonymous"
71
+
prompt/analyser_prompt.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ANALYSER_PROMPT = """You are an expert video advertisement analyst. Analyze the provided video and give response conforms EXACTLY to the schema below with no extra text or markdown.
2
+ Populate:
3
+
4
+ 1. **brief** → A concise summary covering visual style, speaker, target audience, and marketing objective.
5
+ 2. **caption_details** → Description of captions (color/style/position) or exactly the string `"None"` if not visible.
6
+ 3. **hook** →
7
+ - `"hook_text"`: Exact opening line or, if no speech, the precise description of the opening visual.
8
+ - `"principle"`: Psychological/marketing principle that makes this hook effective.
9
+ - `"advantages"`: ARRAY of 3–6 concise benefit statements tied to the ad's value proposition.
10
+ 4. **framework_analysis** → A detailed block identifying copywriting/psychology/storytelling frameworks (e.g., PAS, AIDA). Highlight use of social proof, urgency, fear, authority, scroll-stopping hooks, loop openers, value positioning, and risk reversals.
11
+ 5. **storyboard** → ARRAY of 4–10 objects. Each must include:
12
+ - `"timeline"` in `"MM:SS"` (zero-padded)
13
+ - `"scene"` (brief)
14
+ - `"visuals"` (detailed)
15
+ - `"dialogue"` (exact words; use `""` if none)
16
+ - `"camera"` (shot/angle)
17
+ - `"sound_effects"` (or `"None"`)
18
+ 6. **script** → ARRAY of dialogue objects, each with `"timeline"` (`"MM:SS"`) and `"dialogue"` (exact spoken line).
19
+ 7. **video_analysis** → OBJECT with:
20
+ - `"effectiveness_factors"`: Key factors that influence effectiveness
21
+ - `"psychological_triggers"`: Triggers used (e.g., scarcity, authority)
22
+ - `"target_audience"`: Audience profile inferred
23
+ - `"video_metrics"`: ARRAY of objects with:
24
+ - `"timestamp"`: `"MM:SS-MM:SS"`
25
+ - `"element"`: The aspect being evaluated (e.g., Hook Strategy)
26
+ - `"current_approach"`: Description of current execution
27
+ - `"effectiveness_score"`: String score `"X/10"` (integer X)
28
+ - `"notes"`: Analytical notes
29
+ 8. **timestamp_improvements** → ARRAY of recommendation objects with:
30
+ - `"timestamp"`: `"MM:SS-MM:SS"`
31
+ - `"current_element"`: Current content of the segment
32
+ - `"improvement_type"`: Category (e.g., Hook Enhancement)
33
+ - `"recommended_change"`: Specific recommendation
34
+ - `"expected_impact"`: Projected effect on metrics or perception
35
+ - `"priority"`: `"High"`, `"Medium"`, or `"Low"`
36
+
37
+ ⚠️ The output must be strictly matching field names and types, no additional keys, and all timestamps must be zero-padded (`"MM:SS"` for single points, `"MM:SS-MM:SS"` for ranges).
38
+ """
prompt/prompt_services.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai, os, json, re
2
+
3
+ openai.api_key = os.getenv("OPENAI_API_KEY")
4
+ sys_prompt = os.getenv("SYS_PROMPT")
5
+
6
+ def get_prompts(image_base64, category, user_prompt, sentiment, negative_prompt):
7
+ try:
8
+ if negative_prompt:
9
+ message = [
10
+ {
11
+ "role": "system",
12
+ "content": f"""{sys_prompt}
13
+ Return only a JSON with 2 variations of ad prompts for image generation, based on the input image.
14
+ Respond in this JSON format:\n{{\"variations\": [\"Prompt 1\", \"Prompt 2\", ..., \"Prompt 10\"]}}"""
15
+ },
16
+ {
17
+ "role": "user",
18
+ "content": [
19
+ {"type": "text", "text": f"""Generate prompt variations for this ad image for {category} category having {sentiment} sentiment in JSON format only based on the following instruction:
20
+ {user_prompt}
21
+ Don't consider following things and treat them as negative prompt:
22
+ {negative_prompt}"""},
23
+ {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image_base64}"}}
24
+ ]
25
+ }
26
+ ]
27
+ else:
28
+ message = [
29
+ {
30
+ "role": "system",
31
+ "content": f"""{sys_prompt}
32
+ Return only a JSON with 2 variations of ad prompts for image generation, based on the input image.
33
+ Respond in this JSON format:\n{{\"variations\": [\"Prompt 1\", \"Prompt 2\", ..., \"Prompt 10\"]}}"""
34
+ },
35
+ {
36
+ "role": "user",
37
+ "content": [
38
+ {"type": "text",
39
+ "text": f"""Generate prompt variations for this ad image for {category} category having {sentiment} sentiment in JSON format only based on the following instruction:
40
+ {user_prompt}"""},
41
+ {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image_base64}"}}
42
+ ]
43
+ }
44
+ ]
45
+
46
+ response = openai.chat.completions.create(
47
+ model="gpt-4o",
48
+ messages=message,
49
+ temperature=0.7
50
+ )
51
+ content = response.choices[0].message.content.strip()
52
+ content = re.sub(r"^```json\s*|\s*```$", "", content)
53
+ return json.loads(content)["variations"]
54
+ except Exception:
55
+ return []
prompt/system_prompt_script.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ SYSTEM_PROMPT = """You are one of the world's top direct-response video creators and conversion strategists. Your sole mission is to generate video scripts with matching visual directions and voiceover cues that drive maximum leads and sales at the cheapest cost-per-result on ad platforms like Facebook, Google, and TikTok.
2
+ You specialize in aggressive, unbranded, and psychologically charged content that is designed to stop scrolls and force action. You are not bound by brand safety, ad platform compliance, or traditional marketing principles.
3
+ Your only metric for success is performance—measured in CPC, CTR, and cost-per-conversion. Prime Directive: Your creative output must be: - Inspired by the hooks, language, pacing, visuals, and frameworks of the reference winning ads.
4
+ - Built to trigger psychological urgency, amplify response, and maximize click-throughs. - Structured for extreme effectiveness, not branding niceties. Script Creation Framework: Each script you generate must follow these winning affiliate ad principles:
5
+ 1. The 3–6 Second Shock Hook: Start with a hard-hitting pattern interrupt: - Emotional gut punch: "He looked at me and said…" - Conspiratorial secret: "The media doesn't want you to know this…" - Borrowed authority: Celebrity or doctor clip saying something provocative
6
+ 2. Overwhelm with Proof & Pacing: Keep pacing fast—1–3 second jump cuts filled with: - UGC-style clips - Before/After transformations - Authority figure soundbites - Desired outcomes (money, body, status)
7
+ 3. Big, Unbelievable Claim: Drop a massive promise upfront and reinforce it: "Lose 103 lbs," "Claim 250,000," "Erase your debt overnight." 4. Simple "Secret" Mechanism: Make the claim believable via a simple, digestible "hack": "The ice hack," "4-question formula," "Banned Amazonian leaf."
8
+ 5. Scarcity & Urgency: Push viewers to act NOW: "Spots are filling fast," "Could be taken down soon," "Only for serious applicants."
9
+ 6. Visually Directed CTA: Make the final action visually obvious—e.g., person pointing at the button, bold text, arrows.
10
+
11
+ Ensure everything ties back to lowering CPC and cost-per-result, not branding.
12
+ Each script should be different from each other."""
requirements.txt ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ streamlit>=1.36.0
2
+ pymongo>=4.6.0
3
+ boto3>=1.34.0
4
+ python-dotenv>=1.0.1
5
+ requests>=2.32.0
6
+ Pillow>=10.3.0
7
+ piexif>=1.1.3
8
+ pandas>=2.2.2
9
+ openai>=1.40.0
10
+ google-genai>=0.3.0
11
+ replicate>=0.25.0
12
+ bcrypt>=4.1.2
13
+
14
+ urllib3~=1.26.20
15
+ pydantic~=2.11.7
16
+ pydantic-settings~=2.10.1
17
+ opencv-python~=4.12.0.88
18
+ protobuf~=6.32.0
schema/pydantic_schema_script.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ from pydantic import BaseModel, ConfigDict, constr
3
+
4
+ Timestamp = constr(pattern=r'^\d{1,2}:\d{2}$')
5
+ RangeTimestamp = constr(pattern=r'^\d{1,2}:\d{2}-\d{1,2}:\d{2}$')
6
+ Score010 = constr(pattern=r'^(?:[0-9]|10)/10$')
7
+
8
+
9
+ class ScriptTable(BaseModel):
10
+ timestamp: Timestamp
11
+ script_voiceover: str
12
+ visual_direction: str
13
+ psychological_trigger: str
14
+ cta_action: str
15
+
16
+
17
+ class ScriptVariations(BaseModel):
18
+ variation_name: str
19
+ script_table: List[ScriptTable]
20
+
21
+
22
+ class ScriptResponse(BaseModel):
23
+ model_config = ConfigDict(extra="ignore")
24
+ script_variations: List[ScriptVariations]
25
+
26
+
27
+
28
+
29
+
30
+
schema/pydantic_schema_video.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Literal
2
+ from pydantic import BaseModel, constr
3
+
4
+ Timestamp = constr(pattern=r'^\d{2}:\d{2}$')
5
+ RangeTimestamp = constr(pattern=r'^\d{2}:\d{2}-\d{2}:\d{2}$')
6
+ Score010 = constr(pattern=r'^(?:10|[0-9])\/10$')
7
+
8
+ class Hook(BaseModel):
9
+ hook_text: str
10
+ principle: str
11
+ advantages: List[str]
12
+
13
+ class StoryboardItem(BaseModel):
14
+ timeline: Timestamp
15
+ scene: str
16
+ visuals: str
17
+ dialogue: str
18
+ camera: str
19
+ sound_effects: str
20
+
21
+ class ScriptLine(BaseModel):
22
+ timeline: Timestamp
23
+ dialogue: str
24
+
25
+ class VideoMetric(BaseModel):
26
+ timestamp: RangeTimestamp
27
+ element: str
28
+ current_approach: str
29
+ effectiveness_score: Score010
30
+ notes: str
31
+
32
+ class VideoAnalysis(BaseModel):
33
+ effectiveness_factors: str
34
+ psychological_triggers: str
35
+ target_audience: str
36
+ video_metrics: List[VideoMetric]
37
+
38
+ class TimestampImprovement(BaseModel):
39
+ timestamp: RangeTimestamp
40
+ current_element: str
41
+ improvement_type: str
42
+ recommended_change: str
43
+ expected_impact: str
44
+ priority: Literal["High", "Medium", "Low"]
45
+
46
+ class AdAnalysis(BaseModel):
47
+ brief: str
48
+ caption_details: str
49
+ hook: Hook
50
+ framework_analysis: str
51
+ storyboard: List[StoryboardItem]
52
+ script: List[ScriptLine]
53
+ video_analysis: VideoAnalysis
54
+ timestamp_improvements: List[TimestampImprovement]
ui/load_file.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pymongo
3
+ from datetime import datetime, date, timezone
4
+ from typing import List, Tuple, Optional, Dict
5
+ import requests
6
+ from PIL import Image, ImageFile
7
+ import io
8
+ from dotenv import load_dotenv
9
+ import concurrent.futures
10
+ import threading
11
+ from functools import lru_cache
12
+ import contextlib
13
+ from requests.adapters import HTTPAdapter
14
+ from urllib3.util.retry import Retry
15
+
16
+ load_dotenv()
17
+ ImageFile.LOAD_TRUNCATED_IMAGES = True
18
+
19
+ @st.cache_data(show_spinner=False, ttl=60 * 30)
20
+ def _download_bytes_cached(url: str, timeout_s: float = 12.0) -> Optional[bytes]:
21
+ try:
22
+ r = requests.get(url, timeout=(3.05, timeout_s), stream=True)
23
+ r.raise_for_status()
24
+ return r.content
25
+ except Exception:
26
+ return None
27
+
28
+ class ImageGalleryApp:
29
+ def __init__(self, mongo_uri: str, db_name: str, collection_name: str):
30
+ self.client = pymongo.MongoClient(mongo_uri)
31
+ self.db = self.client[db_name]
32
+ self.collection = self.db[collection_name]
33
+ self._cache_lock = threading.Lock()
34
+ self.session = requests.Session()
35
+ retries = Retry(total=3, connect=3, read=3, backoff_factor=0.4, status_forcelist=[429,500,502,503,504], allowed_methods=["GET","HEAD"])
36
+ adapter = HTTPAdapter(pool_connections=64, pool_maxsize=64, max_retries=retries)
37
+ self.session.mount("http://", adapter); self.session.mount("https://", adapter)
38
+ self.thumb_max_size = (768, 768)
39
+
40
+ @lru_cache(maxsize=128)
41
+ def get_unique_categories(self) -> List[str]:
42
+ try:
43
+ categories = self.collection.distinct("category", {"status": "completed", "category": {"$ne": None}})
44
+ return ["All"] + sorted(categories)
45
+ except Exception:
46
+ return ["All"]
47
+
48
+ @lru_cache(maxsize=128)
49
+ def get_unique_filenames(self) -> List[str]:
50
+ try:
51
+ filenames = self.collection.distinct("file_name", {"status": "completed", "file_name": {"$ne": None}})
52
+ return ["All"] + sorted(filenames)
53
+ except Exception:
54
+ return ["All"]
55
+
56
+ def parse_date_input(self, date_input) -> Optional[date]:
57
+ if not date_input or date_input == "": return None
58
+ if isinstance(date_input, date): return date_input
59
+ if isinstance(date_input, str):
60
+ from datetime import datetime as _dt
61
+ for fmt in ("%Y-%m-%d", "%m/%d/%Y"):
62
+ try: return _dt.strptime(date_input, fmt).date()
63
+ except Exception: pass
64
+ return None
65
+
66
+ def load_image_from_url(self, url: str) -> Optional[Image.Image]:
67
+ try:
68
+ data = _download_bytes_cached(url) or self.session.get(url, timeout=(3.05, 12), stream=True).content
69
+ img = Image.open(io.BytesIO(data))
70
+ with contextlib.suppress(Exception): img = img.convert("RGB")
71
+ if img.size[0] > 768 or img.size[1] > 768: img.thumbnail((768,768), Image.Resampling.LANCZOS)
72
+ return img
73
+ except Exception:
74
+ return None
75
+
76
+ def load_images_parallel(self, urls: List[str], max_workers: int = 8) -> List[Tuple[str, Optional[Image.Image]]]:
77
+ results: List[Tuple[str, Optional[Image.Image]]] = []
78
+ if not urls: return results
79
+ with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as ex:
80
+ future_map = {ex.submit(self.load_image_from_url, u): u for u in urls}
81
+ for fut in concurrent.futures.as_completed(future_map):
82
+ u = future_map[fut]
83
+ try: img = fut.result()
84
+ except Exception: img = None
85
+ results.append((u, img))
86
+ return results
87
+
88
+ def search_images_page(self, category="All", file_name="All", start_date=None, end_date=None, lob="search_arb", page=0, page_size=24) -> Tuple[List[Dict], int]:
89
+ match = {"status": "completed", "urls": {"$exists": True, "$ne": []}}
90
+ if category != "All": match["category"] = category
91
+ if file_name != "All": match["file_name"] = file_name
92
+ from datetime import datetime as _dt
93
+ if start_date or end_date:
94
+ date_query = {}
95
+ if start_date: date_query["$gte"] = _dt.combine(self.parse_date_input(start_date), _dt.min.time())
96
+ if end_date: date_query["$lte"] = _dt.combine(self.parse_date_input(end_date), _dt.max.time())
97
+ if date_query: match["created_at"] = date_query
98
+
99
+ try:
100
+ count_pipeline = [{"$match": match},{"$unwind": "$urls"},{"$count": "n"}]
101
+ count_doc = list(self.collection.aggregate(count_pipeline))
102
+ total = count_doc[0]["n"] if count_doc else 0
103
+ except Exception:
104
+ total = 0
105
+
106
+ if total == 0: return [], 0
107
+
108
+ pipeline = [
109
+ {"$match": match},{"$unwind": "$urls"},{"$sort": {"created_at": -1}},
110
+ {"$skip": max(0, page) * max(1, page_size)},{"$limit": max(1, page_size)},
111
+ {"$project": {"_id": 0,"url": "$urls","category": 1,"file_name": 1,"created_at": 1,"prompt": 1,"status": 1}}
112
+ ]
113
+ try:
114
+ docs = list(self.collection.aggregate(pipeline, allowDiskUse=True))
115
+ except Exception:
116
+ docs = []
117
+ return docs, total
118
+
119
+ def create_streamlit_app(mongo_uri: str, db_name: str, collection_name: str):
120
+ app = ImageGalleryApp(mongo_uri, db_name, collection_name)
121
+
122
+ def get_filter_choices():
123
+ try:
124
+ categories = app.get_unique_categories()
125
+ filenames = app.get_unique_filenames()
126
+ return categories, filenames
127
+ except Exception:
128
+ return ["All"], ["All"]
129
+
130
+ if "categories_list" not in st.session_state:
131
+ st.session_state["categories_list"], st.session_state["filenames_list"] = get_filter_choices()
132
+
133
+ st.session_state.setdefault("selected_category", "All")
134
+ st.session_state.setdefault("selected_filename", "All")
135
+ st.session_state.setdefault("selected_lob", "search_arb")
136
+
137
+ today = datetime.now(timezone.utc).date()
138
+ st.session_state.setdefault("use_date_filter", True)
139
+ st.session_state.setdefault("selected_start_date", today)
140
+ st.session_state.setdefault("selected_end_date", today)
141
+
142
+ st.session_state.setdefault("page", 0)
143
+ st.session_state.setdefault("page_size", 24)
144
+ st.session_state.setdefault("last_query_total", 0)
145
+ st.session_state.setdefault("did_search", False)
146
+
147
+ col1, col2= st.columns([1,1])
148
+ with col1:
149
+ category = st.selectbox("Category", options=st.session_state["categories_list"])
150
+ with col2:
151
+ filename = st.selectbox("File Name", options=st.session_state["filenames_list"])
152
+
153
+ coldf = st.columns([1,1,1])
154
+ with coldf[0]:
155
+ use_date_filter = st.checkbox("Filter by date", value=st.session_state["use_date_filter"])
156
+ with coldf[1]:
157
+ start_date = st.date_input("Start Date", value=st.session_state["selected_start_date"], disabled=not use_date_filter)
158
+ with coldf[2]:
159
+ end_date = st.date_input("End Date", value=st.session_state["selected_end_date"], disabled=not use_date_filter)
160
+
161
+ col_misc = st.columns([1,1,1,2])
162
+ with col_misc[0]:
163
+ page_size = st.selectbox("Images per page", [8,12,16,24,32,48], index=[8,12,16,24,32,48].index(st.session_state["page_size"]))
164
+
165
+ col_btn1, col_btn2, col_btn3 = st.columns([2,2,2])
166
+ with col_btn1:
167
+ search_clicked = st.button("🔍 Search", use_container_width=True)
168
+ with col_btn2:
169
+ refresh_clicked = st.button("🔄 Refresh Filters", use_container_width=True)
170
+ with col_btn3:
171
+ reset_clicked = st.button("♻️ Reset Page", use_container_width=True)
172
+
173
+ if refresh_clicked:
174
+ st.session_state["categories_list"], st.session_state["filenames_list"] = get_filter_choices()
175
+ st.session_state["selected_category"] = "All"; st.session_state["selected_filename"] = "All"; st.session_state["page"] = 0; st.rerun()
176
+ if reset_clicked:
177
+ st.session_state["page"] = 0; st.rerun()
178
+ if search_clicked:
179
+ st.session_state["selected_category"] = category; st.session_state["selected_filename"] = filename
180
+ st.session_state["use_date_filter"] = use_date_filter; st.session_state["selected_start_date"] = start_date; st.session_state["selected_end_date"] = end_date
181
+ st.session_state["page_size"] = page_size; st.session_state["page"] = 0; st.session_state["did_search"] = True; st.rerun()
182
+
183
+ if st.session_state["did_search"]:
184
+ _start = st.session_state["selected_start_date"] if st.session_state["use_date_filter"] else None
185
+ _end = st.session_state["selected_end_date"] if st.session_state["use_date_filter"] else None
186
+ docs, total = app.search_images_page(category=st.session_state["selected_category"], file_name=st.session_state["selected_filename"], start_date=_start, end_date=_end, lob=st.session_state["selected_lob"], page=st.session_state["page"], page_size=st.session_state["page_size"])
187
+ st.session_state["last_query_total"] = total
188
+ total_pages = max(1, (total + st.session_state["page_size"] - 1) // st.session_state["page_size"])
189
+ nav1, nav2, nav3 = st.columns([1,2,1])
190
+ with nav1:
191
+ if st.button("⬅️ Prev", disabled=(st.session_state["page"] <= 0)): st.session_state["page"] -= 1; st.rerun()
192
+ with nav2:
193
+ st.markdown(f"<div style='text-align:center'>Page <b>{st.session_state['page']+1}</b> of <b>{total_pages}</b> &nbsp;&middot;&nbsp; <b>{total}</b> images total</div>", unsafe_allow_html=True)
194
+ with nav3:
195
+ if st.button("Next ➡️", disabled=(st.session_state["page"] >= total_pages - 1)): st.session_state["page"] += 1; st.rerun()
196
+ st.divider()
197
+ if total == 0 or not docs:
198
+ st.info("No images found for the current filters."); return
199
+ st.markdown("#### Images"); cols = st.columns(4)
200
+ placeholders = []
201
+ for i, _ in enumerate(docs):
202
+ ph = cols[i % 4].empty(); ph.markdown("<div style='width:100%;aspect-ratio:1/1;border-radius:10px;background:#eee'></div>", unsafe_allow_html=True); placeholders.append(ph)
203
+ urls = [d["url"] for d in docs]
204
+ loaded = app.load_images_parallel(urls, max_workers=8)
205
+ url_to_img = {u: img for (u, img) in loaded}
206
+ for i, d in enumerate(docs):
207
+ img = url_to_img.get(d["url"]); meta = f"{d.get('category','N/A')} | {d.get('file_name','N/A')} | {d.get('created_at','')}"
208
+ if img: placeholders[i].image(img, use_container_width=True, caption=meta)
209
+ else: placeholders[i].warning("Failed to load image")
210
+ else:
211
+ st.info("Set your filters and click **Search** to load images.")
util/dataframe.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from typing import Any, Dict, List
3
+
4
+ def safe_dataframe(df: pd.DataFrame) -> pd.DataFrame:
5
+ for col in df.columns:
6
+ df[col] = df[col].astype(str)
7
+ return df
8
+
9
+ def analysis_to_csv(analysis: Dict[str, Any]) -> str:
10
+ rows = []
11
+ for sb in analysis.get("storyboard", []):
12
+ rows.append({"Section": "Storyboard", **sb})
13
+ for sc in analysis.get("script", []):
14
+ rows.append({"Section": "Script", **sc})
15
+ for met in analysis.get("video_analysis", {}).get("video_metrics", []):
16
+ rows.append({"Section": "Metrics", **met})
17
+ for imp in analysis.get("timestamp_improvements", []):
18
+ rows.append({"Section": "Improvements", **imp})
19
+ if not rows:
20
+ return ""
21
+ df = pd.DataFrame(rows)
22
+ return df.to_csv(index=False)
23
+
24
+ def _normalize_list(value: Any) -> List[str]:
25
+ if value is None:
26
+ return []
27
+ if isinstance(value, list):
28
+ return [str(v) for v in value]
29
+ return [s for s in str(value).splitlines() if s.strip()]
30
+
31
+ def _to_dataframe(items: Any, columns_map: Dict[str, str]) -> pd.DataFrame:
32
+ if not isinstance(items, list) or not items:
33
+ return pd.DataFrame(columns=list(columns_map.values()))
34
+ df = pd.DataFrame(items)
35
+ df = df.rename(columns=columns_map)
36
+ ordered_cols = [columns_map[k] for k in columns_map.keys() if columns_map[k] in df.columns]
37
+ df = df.reindex(columns=ordered_cols)
38
+ return df
39
+
40
+ def _mean_effectiveness(metrics: List[Dict[str, Any]]) -> float:
41
+ if not metrics:
42
+ return 0.0
43
+ scores = []
44
+ for m in metrics:
45
+ s = str(m.get("effectiveness_score", "0/10")).split("/")[0]
46
+ try:
47
+ scores.append(int(s))
48
+ except Exception:
49
+ pass
50
+ return round(sum(scores) / len(scores), 2) if scores else 0.0
51
+
52
+ def _search_dataframe(df: pd.DataFrame, query: str) -> pd.DataFrame:
53
+ if not query or df.empty:
54
+ return df
55
+ mask = pd.Series([False]*len(df))
56
+ for col in df.columns:
57
+ mask = mask | df[col].astype(str).str.contains(query, case=False, na=False)
58
+ return df[mask]
util/image_utils.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+
3
+ def to_base64_bytes(data: bytes) -> str:
4
+ return base64.b64encode(data).decode("utf-8")
5
+
6
+ def file_to_base64(path: str) -> str:
7
+ with open(path, "rb") as f:
8
+ return to_base64_bytes(f.read())
9
+
10
+ def is_image_name(name: str) -> bool:
11
+ return name.lower().endswith((".png", ".jpg", ".jpeg", ".bmp", ".gif", ".webp"))
util/session_state.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import streamlit as st
3
+ from typing import Optional
4
+
5
+ def current_uid() -> Optional[str]:
6
+ """Return the signed-in user's id/email from session, if any."""
7
+ return st.session_state.get("uid")
8
+
9
+ def resolve_user_id() -> str:
10
+ return (
11
+ current_uid()
12
+ or (st.session_state.get("user") or {}).get("email")
13
+ or os.getenv("DEFAULT_USER_ID")
14
+ or "anonymous"
15
+ )