Spaces:
Running
Running
Upload 10 files
Browse files- app_pages/image_analyser.py +129 -0
- app_pages/image_analyser_library.py +103 -0
- app_pages/image_generation.py +42 -17
- app_pages/image_generation_library.py +44 -27
- app_pages/script_library.py +42 -36
- app_pages/text_image_generation.py +2 -1
- app_pages/text_image_generation_library.py +25 -16
- app_pages/video_analyser.py +28 -19
- app_pages/video_library.py +10 -10
app_pages/image_analyser.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
import os, base64, json
|
| 3 |
+
import streamlit as st
|
| 4 |
+
from typing import Any, Dict
|
| 5 |
+
from openai import OpenAI
|
| 6 |
+
from dotenv import load_dotenv
|
| 7 |
+
|
| 8 |
+
from database.operations import insert_image_analysis
|
| 9 |
+
from database.connections import get_image_collection
|
| 10 |
+
from schema.pydantic_schema_image import AdAnalysis
|
| 11 |
+
from prompt.image_analyzer_prompt import SYSTEM_PROMPT, USER_PROMPT_TEMPLATE
|
| 12 |
+
from components.image_render_analysis import render_analyzer_results
|
| 13 |
+
|
| 14 |
+
load_dotenv()
|
| 15 |
+
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def get_analysis(img_bytes: bytes, img_ext: str) -> Dict[str, Any]:
|
| 19 |
+
b64 = base64.b64encode(img_bytes).decode("utf-8")
|
| 20 |
+
data_uri = f"data:image/{img_ext};base64,{b64}"
|
| 21 |
+
|
| 22 |
+
messages = [
|
| 23 |
+
{"role": "system", "content": [{"type": "text", "text": SYSTEM_PROMPT}]},
|
| 24 |
+
{
|
| 25 |
+
"role": "user",
|
| 26 |
+
"content": [
|
| 27 |
+
{"type": "text", "text": USER_PROMPT_TEMPLATE},
|
| 28 |
+
{"type": "image_url", "image_url": {"url": data_uri}},
|
| 29 |
+
],
|
| 30 |
+
},
|
| 31 |
+
]
|
| 32 |
+
|
| 33 |
+
completion = client.chat.completions.parse(
|
| 34 |
+
model="gpt-4o",
|
| 35 |
+
messages=messages,
|
| 36 |
+
response_format=AdAnalysis,
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
ad_analysis = completion.choices[0].message.parsed
|
| 40 |
+
return ad_analysis.model_dump()
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def render_image_analyser_page(uid: str) -> None:
|
| 44 |
+
st.subheader("Image Analyzer")
|
| 45 |
+
|
| 46 |
+
category = st.text_input("Category", key="ia_category")
|
| 47 |
+
uploaded = st.file_uploader(
|
| 48 |
+
"Upload image",
|
| 49 |
+
type=["jpg", "jpeg", "png", "webp"],
|
| 50 |
+
key="ia_upload",
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
img_bytes, img_ext, file_name, thumb_b64 = None, "jpg", None, ""
|
| 54 |
+
|
| 55 |
+
if uploaded is not None:
|
| 56 |
+
img_bytes = uploaded.read()
|
| 57 |
+
file_name = uploaded.name
|
| 58 |
+
if "." in file_name:
|
| 59 |
+
img_ext = file_name.rsplit(".", 1)[-1].lower()
|
| 60 |
+
|
| 61 |
+
thumb_b64 = base64.b64encode(img_bytes).decode("utf-8")
|
| 62 |
+
st.caption("Preview")
|
| 63 |
+
st.markdown(
|
| 64 |
+
f"""
|
| 65 |
+
<img src="data:image/{img_ext};base64,{thumb_b64}"
|
| 66 |
+
alt="Uploaded image"
|
| 67 |
+
style="max-width: 400px; border-radius: 8px;"/>
|
| 68 |
+
""",
|
| 69 |
+
unsafe_allow_html=True,
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
go = st.button("Analyze", key="ia_analyse")
|
| 73 |
+
|
| 74 |
+
if go:
|
| 75 |
+
if not img_bytes or not category.strip():
|
| 76 |
+
st.warning("Please provide a category and upload an image.")
|
| 77 |
+
else:
|
| 78 |
+
try:
|
| 79 |
+
with st.spinner("Analyzing image..."):
|
| 80 |
+
result = get_analysis(img_bytes, img_ext)
|
| 81 |
+
st.session_state["analysis_result"] = result
|
| 82 |
+
st.session_state["file_name"] = file_name
|
| 83 |
+
st.session_state["thumb_b64"] = thumb_b64
|
| 84 |
+
st.session_state["category"] = category
|
| 85 |
+
except Exception as e:
|
| 86 |
+
st.error(f"Analysis failed: {e}")
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
if "analysis_result" in st.session_state:
|
| 91 |
+
result = st.session_state["analysis_result"]
|
| 92 |
+
file_name = st.session_state.get("file_name", "uploaded_image")
|
| 93 |
+
thumb_b64 = st.session_state.get("thumb_b64", "")
|
| 94 |
+
category = st.session_state.get("category", "general")
|
| 95 |
+
|
| 96 |
+
st.success("Analysis completed successfully")
|
| 97 |
+
render_analyzer_results(result)
|
| 98 |
+
|
| 99 |
+
res = json.dumps(result, indent=2, ensure_ascii=False)
|
| 100 |
+
col1, col2 = st.columns(2)
|
| 101 |
+
|
| 102 |
+
with col1:
|
| 103 |
+
st.download_button(
|
| 104 |
+
"Download JSON",
|
| 105 |
+
data=res.encode("utf-8"),
|
| 106 |
+
file_name=f"{file_name}_analysis.json",
|
| 107 |
+
mime="application/json",
|
| 108 |
+
use_container_width=True,
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
with col2:
|
| 112 |
+
if st.download_button("Save to DB",
|
| 113 |
+
data=res.encode("utf-8"),
|
| 114 |
+
file_name=f"{file_name}_analysis.json",
|
| 115 |
+
mime="application/json",
|
| 116 |
+
use_container_width=True):
|
| 117 |
+
try:
|
| 118 |
+
doc_id = insert_image_analysis(
|
| 119 |
+
image_name=file_name,
|
| 120 |
+
response=result,
|
| 121 |
+
category=category,
|
| 122 |
+
created_by=uid,
|
| 123 |
+
analyzer_model="gpt-4o",
|
| 124 |
+
thumbnail=thumb_b64,
|
| 125 |
+
)
|
| 126 |
+
st.success(f"Saved to AI Library )")
|
| 127 |
+
|
| 128 |
+
except Exception as e:
|
| 129 |
+
st.error(f"Failed to save to DB: {e}")
|
app_pages/image_analyser_library.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import json
|
| 4 |
+
from datetime import datetime, timedelta, date
|
| 5 |
+
from typing import Any, Dict, List, Optional
|
| 6 |
+
import streamlit as st
|
| 7 |
+
|
| 8 |
+
from database.operations import find_image_analyses, list_image_categories
|
| 9 |
+
from components.image_render_analysis import render_analyzer_results
|
| 10 |
+
|
| 11 |
+
def _coerce_dt(val: Any) -> Optional[datetime]:
|
| 12 |
+
if isinstance(val, datetime):
|
| 13 |
+
return val
|
| 14 |
+
try:
|
| 15 |
+
return datetime.fromisoformat(str(val))
|
| 16 |
+
except Exception:
|
| 17 |
+
return None
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def _label_for_item(doc: Dict[str, Any]) -> str:
|
| 21 |
+
"""Shown in the wide dropdown."""
|
| 22 |
+
ts = _coerce_dt(doc.get("created_at"))
|
| 23 |
+
ts_s = ts.strftime("%Y-%m-%d %H:%M") if ts else "Unknown time"
|
| 24 |
+
cat = doc.get("category") or "—"
|
| 25 |
+
model = doc.get("analyzer_model") or "—"
|
| 26 |
+
return f"{ts_s} · {cat} · {model}"
|
| 27 |
+
|
| 28 |
+
def render_image_analysis_library(uid: Optional[str] = None, prefix: str = "img_ana_lib") -> None:
|
| 29 |
+
st.subheader("Image Analysis Library")
|
| 30 |
+
|
| 31 |
+
today = datetime.utcnow().date()
|
| 32 |
+
default_start = today - timedelta(days=30)
|
| 33 |
+
|
| 34 |
+
c1, c2, c3 = st.columns([1, 1, 1])
|
| 35 |
+
with c1:
|
| 36 |
+
start_date: date = st.date_input("Start date", value=default_start, key=f"{prefix}_start")
|
| 37 |
+
with c2:
|
| 38 |
+
end_date: date = st.date_input("End date", value=today, key=f"{prefix}_end")
|
| 39 |
+
with c3:
|
| 40 |
+
try:
|
| 41 |
+
cats: List[str] = list_image_categories(created_by=uid) # <-- filter per user
|
| 42 |
+
if "All" not in cats:
|
| 43 |
+
cats = ["All"] + cats
|
| 44 |
+
except Exception:
|
| 45 |
+
cats = ["All"]
|
| 46 |
+
category = st.selectbox("Category", options=cats, index=0, key=f"{prefix}_cat")
|
| 47 |
+
|
| 48 |
+
st.markdown("---")
|
| 49 |
+
|
| 50 |
+
try:
|
| 51 |
+
start_dt = datetime.combine(start_date, datetime.min.time())
|
| 52 |
+
end_dt = datetime.combine(end_date + timedelta(days=1), datetime.min.time())
|
| 53 |
+
query_cat = None if (not category or category == "All") else category
|
| 54 |
+
docs: List[Dict[str, Any]] = find_image_analyses(
|
| 55 |
+
category=query_cat, start_date=start_dt, end_date=end_dt, limit=200, created_by=uid
|
| 56 |
+
)
|
| 57 |
+
except Exception as e:
|
| 58 |
+
st.error(f"Failed to load image analyses: {e}")
|
| 59 |
+
return
|
| 60 |
+
|
| 61 |
+
if not docs:
|
| 62 |
+
st.info("No image analysis for the selected filters.")
|
| 63 |
+
return
|
| 64 |
+
|
| 65 |
+
labels = [_label_for_item(d) for d in docs]
|
| 66 |
+
selected_label = st.selectbox(
|
| 67 |
+
"Select an analysis",
|
| 68 |
+
options=labels,
|
| 69 |
+
index=0,
|
| 70 |
+
key="img_lib_sel",
|
| 71 |
+
)
|
| 72 |
+
sel_idx = labels.index(selected_label) if selected_label in labels else 0
|
| 73 |
+
doc = docs[sel_idx]
|
| 74 |
+
|
| 75 |
+
thumb_b64 = doc.get("thumbnail")
|
| 76 |
+
if thumb_b64:
|
| 77 |
+
try:
|
| 78 |
+
st.image(
|
| 79 |
+
f"data:image/jpeg;base64,{thumb_b64}",
|
| 80 |
+
caption="Thumbnail",
|
| 81 |
+
width=220,
|
| 82 |
+
)
|
| 83 |
+
except Exception:
|
| 84 |
+
pass
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
analysis = doc.get("results") or {}
|
| 88 |
+
if analysis:
|
| 89 |
+
render_analyzer_results(analysis)
|
| 90 |
+
try:
|
| 91 |
+
res = json.dumps(analysis, indent=2, ensure_ascii=False)
|
| 92 |
+
st.download_button(
|
| 93 |
+
"Download JSON",
|
| 94 |
+
data=res.encode("utf-8"),
|
| 95 |
+
file_name=f"image_analysis_{doc.get('_id','item')}.json",
|
| 96 |
+
mime="application/json",
|
| 97 |
+
use_container_width=True,
|
| 98 |
+
key="img_lib_json_dl",
|
| 99 |
+
)
|
| 100 |
+
except Exception:
|
| 101 |
+
pass
|
| 102 |
+
else:
|
| 103 |
+
st.info("No analysis stored for this item.")
|
app_pages/image_generation.py
CHANGED
|
@@ -6,19 +6,24 @@ from generator_function.image_processor import process_zip_and_generate_images
|
|
| 6 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s")
|
| 7 |
logger = logging.getLogger(__name__)
|
| 8 |
|
|
|
|
| 9 |
def _zip_gallery_images(gallery_items):
|
| 10 |
-
if not gallery_items:
|
|
|
|
| 11 |
image_urls = [item[0] if isinstance(item, tuple) else item for item in gallery_items]
|
| 12 |
temp_dir = tempfile.mkdtemp()
|
| 13 |
try:
|
| 14 |
zip_path = tempfile.NamedTemporaryFile(delete=False, suffix=".zip").name
|
| 15 |
for i, url in enumerate(image_urls):
|
| 16 |
try:
|
| 17 |
-
ext = url.split('?')[0].split('.')[-1]
|
|
|
|
| 18 |
file_path = os.path.join(temp_dir, f"image_{i}.{ext}")
|
| 19 |
if url.startswith(("http://", "https://")):
|
| 20 |
-
resp = requests.get(url, timeout=10)
|
| 21 |
-
|
|
|
|
|
|
|
| 22 |
elif os.path.exists(url):
|
| 23 |
shutil.copy(url, file_path)
|
| 24 |
except Exception as e:
|
|
@@ -33,26 +38,42 @@ def _zip_gallery_images(gallery_items):
|
|
| 33 |
finally:
|
| 34 |
shutil.rmtree(temp_dir, ignore_errors=True)
|
| 35 |
|
|
|
|
| 36 |
def render_bulk_image_generator(prefix: str = "ig_img"):
|
| 37 |
-
zip_file = st.file_uploader(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
category = st.text_input("Category", key=f"{prefix}_category")
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
with
|
| 42 |
-
|
| 43 |
-
with
|
| 44 |
-
|
| 45 |
-
with
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
user_prompt = st.text_area("User Prompt", height=100, key=f"{prefix}_user_prompt")
|
| 47 |
|
| 48 |
-
colA, colB, colC = st.columns([1,1,1])
|
| 49 |
-
with colA:
|
| 50 |
-
|
| 51 |
-
with
|
|
|
|
|
|
|
|
|
|
| 52 |
|
| 53 |
gallery_key = f"{prefix}_gallery"
|
| 54 |
gallery = st.session_state.setdefault(gallery_key, [])
|
| 55 |
|
|
|
|
| 56 |
if demo_btn or gen_all_btn:
|
| 57 |
if zip_file and category and user_prompt:
|
| 58 |
with st.spinner(" Generating image variations..."):
|
|
@@ -98,7 +119,11 @@ def render_bulk_image_generator(prefix: str = "ig_img"):
|
|
| 98 |
else:
|
| 99 |
st.warning("Please upload a file and fill all required fields.")
|
| 100 |
|
|
|
|
|
|
|
|
|
|
| 101 |
if download_btn:
|
|
|
|
| 102 |
zip_path = _zip_gallery_images(st.session_state.get(gallery_key, []))
|
| 103 |
if zip_path:
|
| 104 |
with open(zip_path, "rb") as f:
|
|
@@ -115,4 +140,4 @@ def render_bulk_image_generator(prefix: str = "ig_img"):
|
|
| 115 |
except Exception:
|
| 116 |
pass
|
| 117 |
else:
|
| 118 |
-
st.warning("No images to zip and download.")
|
|
|
|
| 6 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s")
|
| 7 |
logger = logging.getLogger(__name__)
|
| 8 |
|
| 9 |
+
|
| 10 |
def _zip_gallery_images(gallery_items):
|
| 11 |
+
if not gallery_items:
|
| 12 |
+
return None
|
| 13 |
image_urls = [item[0] if isinstance(item, tuple) else item for item in gallery_items]
|
| 14 |
temp_dir = tempfile.mkdtemp()
|
| 15 |
try:
|
| 16 |
zip_path = tempfile.NamedTemporaryFile(delete=False, suffix=".zip").name
|
| 17 |
for i, url in enumerate(image_urls):
|
| 18 |
try:
|
| 19 |
+
ext = url.split('?')[0].split('.')[-1]
|
| 20 |
+
ext = ext if ext and len(ext) <= 5 else "png"
|
| 21 |
file_path = os.path.join(temp_dir, f"image_{i}.{ext}")
|
| 22 |
if url.startswith(("http://", "https://")):
|
| 23 |
+
resp = requests.get(url, timeout=10)
|
| 24 |
+
resp.raise_for_status()
|
| 25 |
+
with open(file_path, "wb") as f:
|
| 26 |
+
f.write(resp.content)
|
| 27 |
elif os.path.exists(url):
|
| 28 |
shutil.copy(url, file_path)
|
| 29 |
except Exception as e:
|
|
|
|
| 38 |
finally:
|
| 39 |
shutil.rmtree(temp_dir, ignore_errors=True)
|
| 40 |
|
| 41 |
+
|
| 42 |
def render_bulk_image_generator(prefix: str = "ig_img"):
|
| 43 |
+
zip_file = st.file_uploader(
|
| 44 |
+
"Upload Zip or Single File",
|
| 45 |
+
type=["zip", "png", "jpg", "jpeg"],
|
| 46 |
+
key=f"{prefix}_upload"
|
| 47 |
+
)
|
| 48 |
category = st.text_input("Category", key=f"{prefix}_category")
|
| 49 |
+
|
| 50 |
+
col1, col2, col3, col4, col5, col6 = st.columns([2, 2.5, 2.5, 2.5, 2.5, 2])
|
| 51 |
+
with col1:
|
| 52 |
+
blur = st.checkbox("Blur Image", key=f"{prefix}_blur")
|
| 53 |
+
with col2:
|
| 54 |
+
size = st.selectbox("Image Size", ["auto", "1024x1024", "1536x1024", "1024x1536"], key=f"{prefix}_size")
|
| 55 |
+
with col3:
|
| 56 |
+
quality = st.selectbox("Quality", ["auto", "low", "medium"], key=f"{prefix}_quality")
|
| 57 |
+
with col4:
|
| 58 |
+
sentiment = st.selectbox("Sentiment", ["as original image", "positive", "negative"], key=f"{prefix}_sentiment")
|
| 59 |
+
with col5:
|
| 60 |
+
platform = st.selectbox("Platform", ["Facebook", "Native", "Newsbreak", "Google Display Network"], key=f"{prefix}_platform")
|
| 61 |
+
with col6:
|
| 62 |
+
num_images = st.slider("No. of Images to be generated:", 1, 10, value=2, step=1, key=f"{prefix}_num_images")
|
| 63 |
user_prompt = st.text_area("User Prompt", height=100, key=f"{prefix}_user_prompt")
|
| 64 |
|
| 65 |
+
colA, colB, colC = st.columns([1, 1, 1])
|
| 66 |
+
with colA:
|
| 67 |
+
demo_btn = st.button("Generate Demo Image", key=f"{prefix}_demo")
|
| 68 |
+
with colB:
|
| 69 |
+
gen_all_btn = st.button("Generate All Images", key=f"{prefix}_gen_all")
|
| 70 |
+
with colC:
|
| 71 |
+
download_btn = st.button("Download All", key=f"{prefix}_download_all")
|
| 72 |
|
| 73 |
gallery_key = f"{prefix}_gallery"
|
| 74 |
gallery = st.session_state.setdefault(gallery_key, [])
|
| 75 |
|
| 76 |
+
|
| 77 |
if demo_btn or gen_all_btn:
|
| 78 |
if zip_file and category and user_prompt:
|
| 79 |
with st.spinner(" Generating image variations..."):
|
|
|
|
| 119 |
else:
|
| 120 |
st.warning("Please upload a file and fill all required fields.")
|
| 121 |
|
| 122 |
+
# ----------------------------
|
| 123 |
+
# Download all images
|
| 124 |
+
# ----------------------------
|
| 125 |
if download_btn:
|
| 126 |
+
with st.spinner(" Preparing for download..."):
|
| 127 |
zip_path = _zip_gallery_images(st.session_state.get(gallery_key, []))
|
| 128 |
if zip_path:
|
| 129 |
with open(zip_path, "rb") as f:
|
|
|
|
| 140 |
except Exception:
|
| 141 |
pass
|
| 142 |
else:
|
| 143 |
+
st.warning("No images to zip and download.")
|
app_pages/image_generation_library.py
CHANGED
|
@@ -1,16 +1,24 @@
|
|
| 1 |
from __future__ import annotations
|
| 2 |
|
| 3 |
from datetime import datetime, timedelta, date
|
| 4 |
-
from typing import Any, Dict, List
|
| 5 |
|
| 6 |
import streamlit as st
|
| 7 |
|
| 8 |
from database.connections import get_results_collection
|
| 9 |
|
| 10 |
|
| 11 |
-
def _distinct_options(col, field: str, source: str,
|
| 12 |
try:
|
| 13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
vals = [v for v in vals if v not in (None, "", [])]
|
| 15 |
vals = sorted(set(vals))
|
| 16 |
return ["All"] + vals if vals else ["All"]
|
|
@@ -18,16 +26,18 @@ def _distinct_options(col, field: str, source: str, uid: str) -> List[str]:
|
|
| 18 |
return ["All"]
|
| 19 |
|
| 20 |
|
|
|
|
| 21 |
def _query_docs(
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
) -> List[Dict[str, Any]]:
|
|
|
|
| 31 |
try:
|
| 32 |
start_dt = datetime.combine(start_d, datetime.min.time())
|
| 33 |
end_dt = datetime.combine(end_d + timedelta(days=1), datetime.min.time())
|
|
@@ -39,7 +49,13 @@ def _query_docs(
|
|
| 39 |
if category and category != "All":
|
| 40 |
q["category"] = category
|
| 41 |
if platform and platform != "All":
|
| 42 |
-
q["
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
cursor = col.find(q).sort("created_at", -1).limit(limit)
|
| 44 |
return list(cursor)
|
| 45 |
except Exception:
|
|
@@ -64,15 +80,14 @@ def _render_grid(docs: List[Dict[str, Any]]) -> None:
|
|
| 64 |
for i, (url, meta) in enumerate(items):
|
| 65 |
with cols[i % 4]:
|
| 66 |
try:
|
| 67 |
-
st.image(url,
|
| 68 |
cat = meta.get("category") or "—"
|
| 69 |
-
|
| 70 |
plat = (
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
)
|
| 77 |
ts = meta.get("created_at")
|
| 78 |
ts_str = ts.strftime("%Y-%m-%d %H:%M UTC") if isinstance(ts, datetime) else "—"
|
|
@@ -81,7 +96,8 @@ def _render_grid(docs: List[Dict[str, Any]]) -> None:
|
|
| 81 |
st.error("Failed to load image")
|
| 82 |
|
| 83 |
|
| 84 |
-
def render_image_variations_library(uid: str) -> None:
|
|
|
|
| 85 |
col = get_results_collection()
|
| 86 |
if col is None:
|
| 87 |
st.warning("Database not available. Set MONGO_URI and restart.")
|
|
@@ -92,19 +108,20 @@ def render_image_variations_library(uid: str) -> None:
|
|
| 92 |
|
| 93 |
c1, c2 = st.columns(2)
|
| 94 |
with c1:
|
| 95 |
-
start_date = st.date_input("Start date", value=default_start, key="
|
| 96 |
with c2:
|
| 97 |
-
end_date = st.date_input("End date", value=today, key="
|
| 98 |
|
| 99 |
-
cat_opts = _distinct_options(col, "category", source="variation"
|
| 100 |
-
plat_opts = _distinct_options(col, "platform", source="variation"
|
| 101 |
|
| 102 |
r1, r2 = st.columns(2)
|
| 103 |
with r1:
|
| 104 |
-
category = st.selectbox("Category", options=cat_opts, key="
|
| 105 |
with r2:
|
| 106 |
-
platform = st.selectbox("Platform", options=plat_opts, key="
|
| 107 |
|
| 108 |
st.markdown("---")
|
| 109 |
docs = _query_docs(col, "variation", start_date, end_date, category, platform, uid=uid)
|
| 110 |
-
_render_grid(docs)
|
|
|
|
|
|
| 1 |
from __future__ import annotations
|
| 2 |
|
| 3 |
from datetime import datetime, timedelta, date
|
| 4 |
+
from typing import Any, Dict, List,Optional
|
| 5 |
|
| 6 |
import streamlit as st
|
| 7 |
|
| 8 |
from database.connections import get_results_collection
|
| 9 |
|
| 10 |
|
| 11 |
+
def _distinct_options(col, field: str, source: str, created_by: Optional[str] = None) -> List[str]:
|
| 12 |
try:
|
| 13 |
+
q: Dict[str, Any] = {"source": source}
|
| 14 |
+
if created_by:
|
| 15 |
+
q["created_by"] = created_by
|
| 16 |
+
|
| 17 |
+
if field == "platform":
|
| 18 |
+
vals = col.distinct("settings.platform", q) + col.distinct("platform", q)
|
| 19 |
+
else:
|
| 20 |
+
vals = col.distinct(field, q)
|
| 21 |
+
|
| 22 |
vals = [v for v in vals if v not in (None, "", [])]
|
| 23 |
vals = sorted(set(vals))
|
| 24 |
return ["All"] + vals if vals else ["All"]
|
|
|
|
| 26 |
return ["All"]
|
| 27 |
|
| 28 |
|
| 29 |
+
|
| 30 |
def _query_docs(
|
| 31 |
+
col,
|
| 32 |
+
source: str,
|
| 33 |
+
start_d: date,
|
| 34 |
+
end_d: date,
|
| 35 |
+
category: str,
|
| 36 |
+
platform: str,
|
| 37 |
+
uid: str,
|
| 38 |
+
limit: int = 200,
|
| 39 |
) -> List[Dict[str, Any]]:
|
| 40 |
+
|
| 41 |
try:
|
| 42 |
start_dt = datetime.combine(start_d, datetime.min.time())
|
| 43 |
end_dt = datetime.combine(end_d + timedelta(days=1), datetime.min.time())
|
|
|
|
| 49 |
if category and category != "All":
|
| 50 |
q["category"] = category
|
| 51 |
if platform and platform != "All":
|
| 52 |
+
q["$or"] = [
|
| 53 |
+
{"platform": platform},
|
| 54 |
+
{"settings.platform": platform},
|
| 55 |
+
{"provider_settings.platform": platform},
|
| 56 |
+
{"metadata.platform": platform},
|
| 57 |
+
]
|
| 58 |
+
|
| 59 |
cursor = col.find(q).sort("created_at", -1).limit(limit)
|
| 60 |
return list(cursor)
|
| 61 |
except Exception:
|
|
|
|
| 80 |
for i, (url, meta) in enumerate(items):
|
| 81 |
with cols[i % 4]:
|
| 82 |
try:
|
| 83 |
+
st.image(url, use_container_width=True)
|
| 84 |
cat = meta.get("category") or "—"
|
|
|
|
| 85 |
plat = (
|
| 86 |
+
meta.get("platform")
|
| 87 |
+
or (meta.get("settings") or {}).get("platform")
|
| 88 |
+
or (meta.get("provider_settings") or {}).get("platform")
|
| 89 |
+
or (meta.get("metadata") or {}).get("platform")
|
| 90 |
+
or "—"
|
| 91 |
)
|
| 92 |
ts = meta.get("created_at")
|
| 93 |
ts_str = ts.strftime("%Y-%m-%d %H:%M UTC") if isinstance(ts, datetime) else "—"
|
|
|
|
| 96 |
st.error("Failed to load image")
|
| 97 |
|
| 98 |
|
| 99 |
+
def render_image_variations_library(uid: str, prefix: str = "img_var") -> None:
|
| 100 |
+
|
| 101 |
col = get_results_collection()
|
| 102 |
if col is None:
|
| 103 |
st.warning("Database not available. Set MONGO_URI and restart.")
|
|
|
|
| 108 |
|
| 109 |
c1, c2 = st.columns(2)
|
| 110 |
with c1:
|
| 111 |
+
start_date = st.date_input("Start date", value=default_start, key=f"{prefix}_start")
|
| 112 |
with c2:
|
| 113 |
+
end_date = st.date_input("End date", value=today, key=f"{prefix}_end")
|
| 114 |
|
| 115 |
+
cat_opts = _distinct_options(col, "category", source="variation")
|
| 116 |
+
plat_opts = _distinct_options(col, "platform", source="variation")
|
| 117 |
|
| 118 |
r1, r2 = st.columns(2)
|
| 119 |
with r1:
|
| 120 |
+
category = st.selectbox("Category", options=cat_opts, key=f"{prefix}_cat")
|
| 121 |
with r2:
|
| 122 |
+
platform = st.selectbox("Platform", options=plat_opts, key=f"{prefix}_plat")
|
| 123 |
|
| 124 |
st.markdown("---")
|
| 125 |
docs = _query_docs(col, "variation", start_date, end_date, category, platform, uid=uid)
|
| 126 |
+
_render_grid(docs)
|
| 127 |
+
|
app_pages/script_library.py
CHANGED
|
@@ -1,11 +1,13 @@
|
|
| 1 |
from datetime import datetime, timedelta
|
| 2 |
from typing import Any, Dict
|
| 3 |
|
| 4 |
-
from database.operations import find_script_results
|
| 5 |
import streamlit as st
|
| 6 |
import pandas as pd
|
| 7 |
|
| 8 |
-
|
|
|
|
|
|
|
|
|
|
| 9 |
st.subheader("Generated Scripts")
|
| 10 |
|
| 11 |
today = datetime.utcnow().date()
|
|
@@ -13,9 +15,9 @@ def render_script_library(uid: str) ->None:
|
|
| 13 |
|
| 14 |
f1, f2, _ = st.columns([1, 1, 1.2])
|
| 15 |
with f1:
|
| 16 |
-
start_date_scripts = st.date_input("Start date", value=default_start, key="
|
| 17 |
with f2:
|
| 18 |
-
end_date_scripts = st.date_input("End date", value=today, key="
|
| 19 |
|
| 20 |
if "scripts_page" not in st.session_state:
|
| 21 |
st.session_state.scripts_page = 0
|
|
@@ -23,6 +25,7 @@ def render_script_library(uid: str) ->None:
|
|
| 23 |
start_dt = datetime.combine(start_date_scripts, datetime.min.time())
|
| 24 |
end_dt = datetime.combine(end_date_scripts + timedelta(days=1), datetime.min.time())
|
| 25 |
|
|
|
|
| 26 |
records, total_count = find_script_results(
|
| 27 |
start_date=start_dt,
|
| 28 |
end_date=end_dt,
|
|
@@ -33,7 +36,7 @@ def render_script_library(uid: str) ->None:
|
|
| 33 |
|
| 34 |
if total_count == 0 or not records:
|
| 35 |
st.info("No script generations for the selected filters.")
|
| 36 |
-
|
| 37 |
|
| 38 |
start_idx = st.session_state.scripts_page * 20 + 1
|
| 39 |
end_idx = min(start_idx + len(records) - 1, total_count)
|
|
@@ -46,46 +49,49 @@ def render_script_library(uid: str) ->None:
|
|
| 46 |
return f"{ts_s} · {vn}"
|
| 47 |
|
| 48 |
options = [_label(d) for d in records]
|
| 49 |
-
selected_label = st.selectbox("Select generated script", options=options, index=0, key="
|
| 50 |
sel_idx = options.index(selected_label) if selected_label in options else 0
|
| 51 |
doc = records[sel_idx]
|
| 52 |
|
|
|
|
| 53 |
if doc.get("thumbnail"):
|
| 54 |
try:
|
| 55 |
st.image("data:image/jpeg;base64," + doc["thumbnail"], width=160, caption="Thumbnail")
|
| 56 |
except Exception:
|
| 57 |
pass
|
| 58 |
|
|
|
|
| 59 |
json_response = doc.get("response")
|
| 60 |
if not json_response:
|
| 61 |
st.info("No variations saved.")
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
)
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
|
|
|
|
|
| 1 |
from datetime import datetime, timedelta
|
| 2 |
from typing import Any, Dict
|
| 3 |
|
|
|
|
| 4 |
import streamlit as st
|
| 5 |
import pandas as pd
|
| 6 |
|
| 7 |
+
from database.operations import find_script_results
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def render_script_library(uid: str, prefix: str = "scripts") -> None:
|
| 11 |
st.subheader("Generated Scripts")
|
| 12 |
|
| 13 |
today = datetime.utcnow().date()
|
|
|
|
| 15 |
|
| 16 |
f1, f2, _ = st.columns([1, 1, 1.2])
|
| 17 |
with f1:
|
| 18 |
+
start_date_scripts = st.date_input("Start date", value=default_start, key=f"{prefix}_start")
|
| 19 |
with f2:
|
| 20 |
+
end_date_scripts = st.date_input("End date", value=today, key=f"{prefix}_end")
|
| 21 |
|
| 22 |
if "scripts_page" not in st.session_state:
|
| 23 |
st.session_state.scripts_page = 0
|
|
|
|
| 25 |
start_dt = datetime.combine(start_date_scripts, datetime.min.time())
|
| 26 |
end_dt = datetime.combine(end_date_scripts + timedelta(days=1), datetime.min.time())
|
| 27 |
|
| 28 |
+
|
| 29 |
records, total_count = find_script_results(
|
| 30 |
start_date=start_dt,
|
| 31 |
end_date=end_dt,
|
|
|
|
| 36 |
|
| 37 |
if total_count == 0 or not records:
|
| 38 |
st.info("No script generations for the selected filters.")
|
| 39 |
+
return
|
| 40 |
|
| 41 |
start_idx = st.session_state.scripts_page * 20 + 1
|
| 42 |
end_idx = min(start_idx + len(records) - 1, total_count)
|
|
|
|
| 49 |
return f"{ts_s} · {vn}"
|
| 50 |
|
| 51 |
options = [_label(d) for d in records]
|
| 52 |
+
selected_label = st.selectbox("Select generated script", options=options, index=0, key=f"{prefix}_sel_dropdown")
|
| 53 |
sel_idx = options.index(selected_label) if selected_label in options else 0
|
| 54 |
doc = records[sel_idx]
|
| 55 |
|
| 56 |
+
|
| 57 |
if doc.get("thumbnail"):
|
| 58 |
try:
|
| 59 |
st.image("data:image/jpeg;base64," + doc["thumbnail"], width=160, caption="Thumbnail")
|
| 60 |
except Exception:
|
| 61 |
pass
|
| 62 |
|
| 63 |
+
|
| 64 |
json_response = doc.get("response")
|
| 65 |
if not json_response:
|
| 66 |
st.info("No variations saved.")
|
| 67 |
+
return
|
| 68 |
+
|
| 69 |
+
all_tables = []
|
| 70 |
+
if isinstance(json_response, list):
|
| 71 |
+
for round_idx, round_data in enumerate(json_response, 1):
|
| 72 |
+
st.markdown(f"#### Generation Round {round_idx}")
|
| 73 |
+
st.text_input(
|
| 74 |
+
"Prompt used:",
|
| 75 |
+
round_data.get("prompt_used", "N/A"),
|
| 76 |
+
disabled=True,
|
| 77 |
+
key=f"{prefix}_hist_prompt_{doc.get('_id')}_{round_idx}",
|
| 78 |
+
)
|
| 79 |
+
for i, variation in enumerate(round_data.get("variations", []), 1):
|
| 80 |
+
st.markdown(f"**Variation {i}: {variation.get('variation_name', 'Var')}**")
|
| 81 |
+
df = pd.DataFrame(variation.get("script_table", []))
|
| 82 |
+
st.table(df)
|
| 83 |
+
if not df.empty:
|
| 84 |
+
df["Variation"] = variation.get("variation_name", f"Var{i}")
|
| 85 |
+
df["Round"] = round_idx
|
| 86 |
+
all_tables.append(df)
|
| 87 |
+
|
| 88 |
+
if all_tables:
|
| 89 |
+
csv_scripts = pd.concat(all_tables, ignore_index=True).to_csv(index=False)
|
| 90 |
+
st.download_button(
|
| 91 |
+
"Download CSV",
|
| 92 |
+
data=csv_scripts,
|
| 93 |
+
file_name=f"{(doc.get('video_name') or doc.get('file_name') or 'scripts')}_scripts.csv",
|
| 94 |
+
mime="text/csv",
|
| 95 |
+
use_container_width=True,
|
| 96 |
+
key=f"{prefix}_download_csv",
|
| 97 |
+
)
|
app_pages/text_image_generation.py
CHANGED
|
@@ -29,12 +29,13 @@ def render_multimodel_ad_generator(prefix: str = "img_text"):
|
|
| 29 |
prompt = st.text_area("Prompt", placeholder="Describe the image you want to generate...", height=160, key=f"{prefix}_prompt")
|
| 30 |
|
| 31 |
if st.button("Generate Images", type="primary", key=f"{prefix}_go"):
|
|
|
|
| 32 |
handle_image_generation_optimized(
|
|
|
|
| 33 |
model_key=model_key,
|
| 34 |
aspect_ratio=aspect_ratio,
|
| 35 |
prompt=prompt,
|
| 36 |
num_images=num_images,
|
| 37 |
-
debug_mode=False,
|
| 38 |
category=(category.strip() or None),
|
| 39 |
platform=platform or None,
|
| 40 |
)
|
|
|
|
| 29 |
prompt = st.text_area("Prompt", placeholder="Describe the image you want to generate...", height=160, key=f"{prefix}_prompt")
|
| 30 |
|
| 31 |
if st.button("Generate Images", type="primary", key=f"{prefix}_go"):
|
| 32 |
+
uid = st.session_state.get("uid")
|
| 33 |
handle_image_generation_optimized(
|
| 34 |
+
uid=uid,
|
| 35 |
model_key=model_key,
|
| 36 |
aspect_ratio=aspect_ratio,
|
| 37 |
prompt=prompt,
|
| 38 |
num_images=num_images,
|
|
|
|
| 39 |
category=(category.strip() or None),
|
| 40 |
platform=platform or None,
|
| 41 |
)
|
app_pages/text_image_generation_library.py
CHANGED
|
@@ -1,19 +1,25 @@
|
|
| 1 |
from __future__ import annotations
|
| 2 |
|
| 3 |
from datetime import datetime, timedelta, date
|
| 4 |
-
from typing import Any, Dict, List
|
| 5 |
|
| 6 |
import streamlit as st
|
| 7 |
|
| 8 |
from database.connections import get_results_collection
|
| 9 |
|
| 10 |
|
| 11 |
-
def _distinct_options(col, field: str, source: str
|
| 12 |
try:
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
vals = [v for v in vals if v not in (None, "", [])]
|
| 18 |
vals = sorted(set(vals))
|
| 19 |
return ["All"] + vals if vals else ["All"]
|
|
@@ -28,7 +34,6 @@ def _query_docs(
|
|
| 28 |
end_d: date,
|
| 29 |
category: str,
|
| 30 |
platform: str,
|
| 31 |
-
created_by: Optional[str] = None,
|
| 32 |
limit: int = 200,
|
| 33 |
) -> List[Dict[str, Any]]:
|
| 34 |
try:
|
|
@@ -38,16 +43,19 @@ def _query_docs(
|
|
| 38 |
if category and category != "All":
|
| 39 |
q["category"] = category
|
| 40 |
if platform and platform != "All":
|
| 41 |
-
q["
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
|
|
|
|
|
|
| 45 |
cursor = col.find(q).sort("created_at", -1).limit(limit)
|
| 46 |
return list(cursor)
|
| 47 |
except Exception:
|
| 48 |
return []
|
| 49 |
|
| 50 |
|
|
|
|
| 51 |
def _render_grid(docs: List[Dict[str, Any]], key_prefix: str) -> None:
|
| 52 |
seen = set()
|
| 53 |
items = []
|
|
@@ -82,7 +90,8 @@ def _render_grid(docs: List[Dict[str, Any]], key_prefix: str) -> None:
|
|
| 82 |
st.error("Failed to load image")
|
| 83 |
|
| 84 |
|
| 85 |
-
def render_text_image_library(
|
|
|
|
| 86 |
col = get_results_collection()
|
| 87 |
if col is None:
|
| 88 |
st.warning("Database not available. Set MONGO_URI and restart.")
|
|
@@ -97,8 +106,8 @@ def render_text_image_library(uid: Optional[str] = None) -> None:
|
|
| 97 |
with c2:
|
| 98 |
end_date = st.date_input("End date", value=today, key="img_text_end")
|
| 99 |
|
| 100 |
-
cat_opts = _distinct_options(col, "category", source="text"
|
| 101 |
-
plat_opts = _distinct_options(col, "platform", source="text"
|
| 102 |
|
| 103 |
r1, r2 = st.columns(2)
|
| 104 |
with r1:
|
|
@@ -107,5 +116,5 @@ def render_text_image_library(uid: Optional[str] = None) -> None:
|
|
| 107 |
platform = st.selectbox("Platform", options=plat_opts, key="img_text_plat")
|
| 108 |
|
| 109 |
st.markdown("---")
|
| 110 |
-
docs = _query_docs(col, "text", start_date, end_date, category, platform
|
| 111 |
-
_render_grid(docs, key_prefix="text")
|
|
|
|
| 1 |
from __future__ import annotations
|
| 2 |
|
| 3 |
from datetime import datetime, timedelta, date
|
| 4 |
+
from typing import Any, Dict, List
|
| 5 |
|
| 6 |
import streamlit as st
|
| 7 |
|
| 8 |
from database.connections import get_results_collection
|
| 9 |
|
| 10 |
|
| 11 |
+
def _distinct_options(col, field: str, source: str) -> List[str]:
|
| 12 |
try:
|
| 13 |
+
if field == "platform":
|
| 14 |
+
vals = (
|
| 15 |
+
col.distinct("platform", {"source": source})
|
| 16 |
+
+ col.distinct("settings.platform", {"source": source})
|
| 17 |
+
+ col.distinct("provider_settings.platform", {"source": source})
|
| 18 |
+
+ col.distinct("metadata.platform", {"source": source})
|
| 19 |
+
)
|
| 20 |
+
else:
|
| 21 |
+
vals = col.distinct(field, {"source": source})
|
| 22 |
+
|
| 23 |
vals = [v for v in vals if v not in (None, "", [])]
|
| 24 |
vals = sorted(set(vals))
|
| 25 |
return ["All"] + vals if vals else ["All"]
|
|
|
|
| 34 |
end_d: date,
|
| 35 |
category: str,
|
| 36 |
platform: str,
|
|
|
|
| 37 |
limit: int = 200,
|
| 38 |
) -> List[Dict[str, Any]]:
|
| 39 |
try:
|
|
|
|
| 43 |
if category and category != "All":
|
| 44 |
q["category"] = category
|
| 45 |
if platform and platform != "All":
|
| 46 |
+
q["$or"] = [
|
| 47 |
+
{"platform": platform},
|
| 48 |
+
{"settings.platform": platform},
|
| 49 |
+
{"provider_settings.platform": platform},
|
| 50 |
+
{"metadata.platform": platform},
|
| 51 |
+
]
|
| 52 |
cursor = col.find(q).sort("created_at", -1).limit(limit)
|
| 53 |
return list(cursor)
|
| 54 |
except Exception:
|
| 55 |
return []
|
| 56 |
|
| 57 |
|
| 58 |
+
|
| 59 |
def _render_grid(docs: List[Dict[str, Any]], key_prefix: str) -> None:
|
| 60 |
seen = set()
|
| 61 |
items = []
|
|
|
|
| 90 |
st.error("Failed to load image")
|
| 91 |
|
| 92 |
|
| 93 |
+
def render_text_image_library() -> None:
|
| 94 |
+
|
| 95 |
col = get_results_collection()
|
| 96 |
if col is None:
|
| 97 |
st.warning("Database not available. Set MONGO_URI and restart.")
|
|
|
|
| 106 |
with c2:
|
| 107 |
end_date = st.date_input("End date", value=today, key="img_text_end")
|
| 108 |
|
| 109 |
+
cat_opts = _distinct_options(col, "category", source="text")
|
| 110 |
+
plat_opts = _distinct_options(col, "platform", source="text")
|
| 111 |
|
| 112 |
r1, r2 = st.columns(2)
|
| 113 |
with r1:
|
|
|
|
| 116 |
platform = st.selectbox("Platform", options=plat_opts, key="img_text_plat")
|
| 117 |
|
| 118 |
st.markdown("---")
|
| 119 |
+
docs = _query_docs(col, "text", start_date, end_date, category, platform)
|
| 120 |
+
_render_grid(docs, key_prefix="text")
|
app_pages/video_analyser.py
CHANGED
|
@@ -1,18 +1,14 @@
|
|
| 1 |
from __future__ import annotations
|
| 2 |
-
import os, tempfile
|
| 3 |
import streamlit as st
|
| 4 |
-
import base64
|
| 5 |
from typing import Any, Dict
|
|
|
|
| 6 |
from background_task.generation_tasks import run_and_store_video_analysis
|
| 7 |
from app_pages.video_library import render_analyzer_results
|
|
|
|
| 8 |
|
| 9 |
|
| 10 |
-
def
|
| 11 |
-
from app_pages.video_library import render_video_library
|
| 12 |
-
render_video_library()
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
def render_video_analyser_page(task_manager=None, uid: str = "anonymous") -> None:
|
| 16 |
st.markdown("### Video Analyser")
|
| 17 |
|
| 18 |
category_va = st.text_input("Category", key="va_category")
|
|
@@ -26,21 +22,19 @@ def render_video_analyser_page(task_manager=None, uid: str = "anonymous") -> Non
|
|
| 26 |
video_ext = "mp4"
|
| 27 |
|
| 28 |
if uploaded is not None:
|
| 29 |
-
|
| 30 |
video_bytes = uploaded.read()
|
| 31 |
if uploaded.name and "." in uploaded.name:
|
| 32 |
video_ext = uploaded.name.rsplit(".", 1)[-1].lower() or "mp4"
|
| 33 |
|
| 34 |
-
|
| 35 |
b64 = base64.b64encode(video_bytes).decode("utf-8")
|
| 36 |
st.caption("Preview")
|
| 37 |
st.markdown(
|
| 38 |
f"""
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
unsafe_allow_html=True,
|
| 45 |
)
|
| 46 |
|
|
@@ -50,7 +44,6 @@ def render_video_analyser_page(task_manager=None, uid: str = "anonymous") -> Non
|
|
| 50 |
if not video_bytes or not category_va.strip():
|
| 51 |
st.warning("Please provide a category and upload a video.")
|
| 52 |
else:
|
| 53 |
-
|
| 54 |
suffix = f".{video_ext}" if video_ext else ".mp4"
|
| 55 |
with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tmp:
|
| 56 |
tmp.write(video_bytes)
|
|
@@ -63,6 +56,7 @@ def render_video_analyser_page(task_manager=None, uid: str = "anonymous") -> Non
|
|
| 63 |
uploaded_file_path=tmp_path,
|
| 64 |
created_by=uid,
|
| 65 |
)
|
|
|
|
| 66 |
finally:
|
| 67 |
try:
|
| 68 |
os.remove(tmp_path)
|
|
@@ -70,8 +64,23 @@ def render_video_analyser_page(task_manager=None, uid: str = "anonymous") -> Non
|
|
| 70 |
pass
|
| 71 |
|
| 72 |
if isinstance(result, dict) and result.get("results"):
|
| 73 |
-
st.success(
|
| 74 |
render_analyzer_results(result["results"])
|
| 75 |
-
else:
|
| 76 |
-
st.error("Analysis failed. Check GEMINI_API_KEY and try again.")
|
| 77 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
from __future__ import annotations
|
| 2 |
+
import os, tempfile, base64, json
|
| 3 |
import streamlit as st
|
|
|
|
| 4 |
from typing import Any, Dict
|
| 5 |
+
|
| 6 |
from background_task.generation_tasks import run_and_store_video_analysis
|
| 7 |
from app_pages.video_library import render_analyzer_results
|
| 8 |
+
from database.operations import insert_video_analysis
|
| 9 |
|
| 10 |
|
| 11 |
+
def render_video_analyser_page(uid: str ) -> None:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
st.markdown("### Video Analyser")
|
| 13 |
|
| 14 |
category_va = st.text_input("Category", key="va_category")
|
|
|
|
| 22 |
video_ext = "mp4"
|
| 23 |
|
| 24 |
if uploaded is not None:
|
|
|
|
| 25 |
video_bytes = uploaded.read()
|
| 26 |
if uploaded.name and "." in uploaded.name:
|
| 27 |
video_ext = uploaded.name.rsplit(".", 1)[-1].lower() or "mp4"
|
| 28 |
|
|
|
|
| 29 |
b64 = base64.b64encode(video_bytes).decode("utf-8")
|
| 30 |
st.caption("Preview")
|
| 31 |
st.markdown(
|
| 32 |
f"""
|
| 33 |
+
<video controls width="720">
|
| 34 |
+
<source src="data:video/{video_ext};base64,{b64}">
|
| 35 |
+
Your browser does not support the video tag.
|
| 36 |
+
</video>
|
| 37 |
+
""",
|
| 38 |
unsafe_allow_html=True,
|
| 39 |
)
|
| 40 |
|
|
|
|
| 44 |
if not video_bytes or not category_va.strip():
|
| 45 |
st.warning("Please provide a category and upload a video.")
|
| 46 |
else:
|
|
|
|
| 47 |
suffix = f".{video_ext}" if video_ext else ".mp4"
|
| 48 |
with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tmp:
|
| 49 |
tmp.write(video_bytes)
|
|
|
|
| 56 |
uploaded_file_path=tmp_path,
|
| 57 |
created_by=uid,
|
| 58 |
)
|
| 59 |
+
|
| 60 |
finally:
|
| 61 |
try:
|
| 62 |
os.remove(tmp_path)
|
|
|
|
| 64 |
pass
|
| 65 |
|
| 66 |
if isinstance(result, dict) and result.get("results"):
|
| 67 |
+
st.success("Analysis completed successfully ")
|
| 68 |
render_analyzer_results(result["results"])
|
|
|
|
|
|
|
| 69 |
|
| 70 |
+
|
| 71 |
+
res = json.dumps(result["results"], indent=2, ensure_ascii=False)
|
| 72 |
+
st.download_button(
|
| 73 |
+
"Download JSON",
|
| 74 |
+
data=res.encode("utf-8"),
|
| 75 |
+
file_name=f"video_analysis_{result.get('_id', 'item')}.json",
|
| 76 |
+
mime="application/json",
|
| 77 |
+
width='content',
|
| 78 |
+
key="va_json_dl",
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
if st.button("Save to Database", key="va_save_db"):
|
| 82 |
+
try:
|
| 83 |
+
insert_video_analysis(result)
|
| 84 |
+
st.success("Analysis saved to database ")
|
| 85 |
+
except Exception as e:
|
| 86 |
+
st.error(f"Failed to save: {e}")
|
app_pages/video_library.py
CHANGED
|
@@ -27,19 +27,17 @@ def _label_for_item(doc: Dict[str, Any]) -> str:
|
|
| 27 |
model = doc.get("analyzer_model") or "—"
|
| 28 |
return f"{ts_s} · {cat} · {model}"
|
| 29 |
|
| 30 |
-
|
| 31 |
-
def render_video_library(uid: str) -> None:
|
| 32 |
st.subheader("Video Library")
|
| 33 |
|
| 34 |
-
# --- Filters (unchanged layout) ---
|
| 35 |
today = datetime.utcnow().date()
|
| 36 |
default_start = today - timedelta(days=30)
|
| 37 |
|
| 38 |
c1, c2, c3 = st.columns([1, 1, 1])
|
| 39 |
with c1:
|
| 40 |
-
start_date: date = st.date_input("Start date", value=default_start, key="
|
| 41 |
with c2:
|
| 42 |
-
end_date: date = st.date_input("End date", value=today, key="
|
| 43 |
with c3:
|
| 44 |
try:
|
| 45 |
cats: List[str] = list_video_categories()
|
|
@@ -47,7 +45,7 @@ def render_video_library(uid: str) -> None:
|
|
| 47 |
cats = ["All"] + [c for c in cats if c != "All"]
|
| 48 |
except Exception:
|
| 49 |
cats = ["All"]
|
| 50 |
-
category = st.selectbox("Category", options=cats, index=0, key="
|
| 51 |
|
| 52 |
st.markdown("---")
|
| 53 |
|
|
@@ -57,7 +55,7 @@ def render_video_library(uid: str) -> None:
|
|
| 57 |
end_dt = datetime.combine(end_date + timedelta(days=1), datetime.min.time())
|
| 58 |
query_cat = None if (not category or category == "All") else category
|
| 59 |
docs: List[Dict[str, Any]] = find_video_analyses(
|
| 60 |
-
category=query_cat,start_date=start_dt,end_date=end_dt,limit=200,created_by=uid,
|
| 61 |
)
|
| 62 |
except Exception as e:
|
| 63 |
st.error(f"Failed to load video analyses: {e}")
|
|
@@ -94,10 +92,10 @@ def render_video_library(uid: str) -> None:
|
|
| 94 |
if analysis:
|
| 95 |
render_analyzer_results(analysis)
|
| 96 |
try:
|
| 97 |
-
|
| 98 |
st.download_button(
|
| 99 |
"Download JSON",
|
| 100 |
-
data=
|
| 101 |
file_name=f"video_analysis_{doc.get('_id','item')}.json",
|
| 102 |
mime="application/json",
|
| 103 |
use_container_width=True,
|
|
@@ -106,4 +104,6 @@ def render_video_library(uid: str) -> None:
|
|
| 106 |
except Exception:
|
| 107 |
pass
|
| 108 |
else:
|
| 109 |
-
st.info("No analysis stored for this item.")
|
|
|
|
|
|
|
|
|
| 27 |
model = doc.get("analyzer_model") or "—"
|
| 28 |
return f"{ts_s} · {cat} · {model}"
|
| 29 |
|
| 30 |
+
def render_video_library(uid: Optional[str] = None,prefix: str = "vid_lib") -> None:
|
|
|
|
| 31 |
st.subheader("Video Library")
|
| 32 |
|
|
|
|
| 33 |
today = datetime.utcnow().date()
|
| 34 |
default_start = today - timedelta(days=30)
|
| 35 |
|
| 36 |
c1, c2, c3 = st.columns([1, 1, 1])
|
| 37 |
with c1:
|
| 38 |
+
start_date: date = st.date_input("Start date", value=default_start, key=f"{prefix}_start")
|
| 39 |
with c2:
|
| 40 |
+
end_date: date = st.date_input("End date", value=today, key=f"{prefix}_end")
|
| 41 |
with c3:
|
| 42 |
try:
|
| 43 |
cats: List[str] = list_video_categories()
|
|
|
|
| 45 |
cats = ["All"] + [c for c in cats if c != "All"]
|
| 46 |
except Exception:
|
| 47 |
cats = ["All"]
|
| 48 |
+
category = st.selectbox("Category", options=cats, index=0, key=f"{prefix}_cat")
|
| 49 |
|
| 50 |
st.markdown("---")
|
| 51 |
|
|
|
|
| 55 |
end_dt = datetime.combine(end_date + timedelta(days=1), datetime.min.time())
|
| 56 |
query_cat = None if (not category or category == "All") else category
|
| 57 |
docs: List[Dict[str, Any]] = find_video_analyses(
|
| 58 |
+
category=query_cat, start_date=start_dt, end_date=end_dt, limit=200,created_by=uid,
|
| 59 |
)
|
| 60 |
except Exception as e:
|
| 61 |
st.error(f"Failed to load video analyses: {e}")
|
|
|
|
| 92 |
if analysis:
|
| 93 |
render_analyzer_results(analysis)
|
| 94 |
try:
|
| 95 |
+
res = json.dumps(analysis, indent=2, ensure_ascii=False)
|
| 96 |
st.download_button(
|
| 97 |
"Download JSON",
|
| 98 |
+
data=res.encode("utf-8"),
|
| 99 |
file_name=f"video_analysis_{doc.get('_id','item')}.json",
|
| 100 |
mime="application/json",
|
| 101 |
use_container_width=True,
|
|
|
|
| 104 |
except Exception:
|
| 105 |
pass
|
| 106 |
else:
|
| 107 |
+
st.info("No analysis stored for this item.")
|
| 108 |
+
|
| 109 |
+
|