Spaces:
Sleeping
Sleeping
| import os | |
| import json | |
| from videodb import connect | |
| from langchain_google_genai import ChatGoogleGenerativeAI | |
| from langchain_core.prompts import PromptTemplate | |
| from dotenv import load_dotenv | |
| load_dotenv() | |
| # --- 1. Connect to VideoDB --- | |
| # Ensure we grab the key from secrets or env | |
| api_key = os.getenv("VIDEO_DB_API_KEY") | |
| if not api_key: | |
| # Use streamlit secrets if env var is missing (common in Cloud) | |
| import streamlit as st | |
| try: | |
| api_key = st.secrets["VIDEO_DB_API_KEY"] | |
| except: | |
| pass | |
| conn = connect(api_key=api_key) | |
| try: | |
| all_colls = conn.get_collections() | |
| if len(all_colls) > 0: | |
| coll = all_colls[0] | |
| else: | |
| coll = conn.create_collection("Auto Trailer Demo", "Generated by Agent") | |
| print(f"✅ Using Collection ID: {coll.id}") | |
| except Exception as e: | |
| print(f"⚠️ Connection fallback: {e}") | |
| # FIX: Added the missing "description" argument here | |
| try: | |
| coll = conn.create_collection("Fallback Collection", "Emergency Fallback") | |
| except: | |
| # If even creation fails (due to bad key), we can't do much | |
| coll = None | |
| # --- 2. LLM Setup --- | |
| google_key = os.getenv("GOOGLE_API_KEY") | |
| if not google_key: | |
| import streamlit as st | |
| try: | |
| google_key = st.secrets["GOOGLE_API_KEY"] | |
| except: | |
| pass | |
| llm = ChatGoogleGenerativeAI( | |
| model="gemini-2.0-flash", | |
| google_api_key=google_key, | |
| temperature=0.7 | |
| ) | |
| # --- Helper to fetch pre-indexed video --- | |
| def get_video_from_id(video_id): | |
| try: | |
| if not coll: return None | |
| print(f"Fetching Video ID: {video_id}...") | |
| return coll.get_video(video_id) | |
| except Exception as e: | |
| print(f"Error fetching video: {e}") | |
| return None | |
| def upload_and_index(url): | |
| try: | |
| if not coll: return None | |
| print(f"Uploading {url}...") | |
| video = coll.upload(url=url) | |
| print(f"✅ Uploaded! VIDEO ID: {video.id}") | |
| print("Indexing...") | |
| video.index_spoken_words() | |
| return video | |
| except Exception as e: | |
| print(f"Error: {e}") | |
| return None | |
| def get_highlights(transcript, focus="engaging, funny, or insightful"): | |
| if not focus or not focus.strip(): | |
| focus = "engaging, funny, or insightful" | |
| prompt = PromptTemplate( | |
| input_variables=["transcript", "focus"], | |
| template=""" | |
| You are a professional Video Editor. | |
| Analyze the transcript below and find 3 segments that best match this specific criteria: | |
| "{focus}". | |
| Each segment must be between 10 and 50 seconds long. | |
| Transcript: {transcript} | |
| Return ONLY valid JSON format like this: | |
| [ | |
| {{"start": 120, "end": 150, "reason": "Matches criteria because..."}}, | |
| {{"start": 400, "end": 430, "reason": "Matches criteria because..."}} | |
| ] | |
| """ | |
| ) | |
| chain = prompt | llm | |
| try: | |
| response = chain.invoke({"transcript": transcript, "focus": focus}) | |
| clean_json = response.content.replace("```json", "").replace("```", "") | |
| return json.loads(clean_json) | |
| except Exception as e: | |
| print(f"LLM Error: {e}") | |
| return [] | |
| def create_trailer_stream(video, highlights): | |
| shots = [] | |
| for clip in highlights: | |
| shots.append((clip['start'], clip['end'])) | |
| stream_url = video.generate_stream(timeline=shots) | |
| return stream_url | |
| def analyze_virality(transcript): | |
| prompt = PromptTemplate( | |
| input_variables=["transcript"], | |
| template=""" | |
| You are a Viral Content Analyst. | |
| Analyze the provided transcript to determine its virality potential. | |
| Calculate a unique score (0-100) based on: | |
| 1. Hook strength (Intro) | |
| 2. Pacing and Excitement | |
| 3. Clear takeaways or humor | |
| Do NOT return the example values. Calculate real values based on the text. | |
| Transcript: {transcript} | |
| Return valid JSON: | |
| {{"score": <calculated_int>, "keywords": ["<keyword1>", "<keyword2>", "<keyword3>"]}} | |
| """ | |
| ) | |
| chain = prompt | llm | |
| try: | |
| response = chain.invoke({"transcript": transcript}) | |
| clean_json = response.content.replace("```json", "").replace("```", "") | |
| return json.loads(clean_json) | |
| except: | |
| return {"score": 0, "keywords": ["Error"]} | |
| def generate_linkedin_post(transcript): | |
| prompt = PromptTemplate( | |
| input_variables=["text"], | |
| template=""" | |
| Write a punchy, viral LinkedIn post to promote this video. | |
| Use short sentences, emojis, and a 'Hook-Value-CTA' structure. | |
| Context: {text} | |
| """ | |
| ) | |
| chain = prompt | llm | |
| return chain.invoke({"text": transcript}).content |