Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from openai import OpenAI | |
| from clipper_prompts import CLIPPER_SYSTEM_MESSAGE, CLIPPER_USER_MESSAGE | |
| from prompts import SYSTEM_MESSAGE, USER_MESSAGE | |
| import json | |
| import os | |
| # Set Streamlit layout to wide mode | |
| st.set_page_config(layout="wide") | |
| st.title("π¬ AI-Powered Content Planner - Clip Creator") | |
| st.markdown("Paste a transcript on the left and view the generated content plan on the right.") | |
| # Sidebar for OpenAI API Key | |
| OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") | |
| DEFAULT_MODEL = "gpt-4o-2024-08-06" | |
| DEFAULT_GOAL = "Extract multiple self-contained clips by identifying natural narrative peaks, emotional highlights, and shareable moments (relatable struggles, surprising insights, or friendly debates) in their original sequence, optimizing for standalone engagement potential." | |
| # Sidebar: Model Selection for Stage 1 (Clip Plan Generation) | |
| st.sidebar.subheader("π€ Model for Clip Plan Generation") | |
| clip_plan_model = st.sidebar.selectbox( | |
| "Choose model for clip plan:", | |
| [DEFAULT_MODEL, "gpt-4o-mini", "o3-mini"], | |
| index=0 # Default selection | |
| ) | |
| # Sidebar: Model Selection for Stage 2 (Transcript Extraction) | |
| st.sidebar.subheader("π₯ Model for Transcript Clipper") | |
| extraction_model = st.sidebar.selectbox( | |
| "Choose model for transcript clipper:", | |
| [DEFAULT_MODEL, "gpt-4o-mini", "o3-mini"], | |
| index=0 # Default selection | |
| ) | |
| # Sidebar: Goal Customization | |
| st.sidebar.subheader("π― Customize Prompt") | |
| GOAL = st.sidebar.text_area("Specify specific prompt to extract clips (optional):", height=100) | |
| GOAL = GOAL.strip() if GOAL.strip() else DEFAULT_GOAL | |
| if not OPENAI_API_KEY: | |
| st.warning("β οΈ Please enter your OpenAI API key.") | |
| st.stop() | |
| client = OpenAI(api_key=OPENAI_API_KEY) | |
| # Layout: Left (Input) | Right (Output) | |
| col1, col2 = st.columns([1, 1]) | |
| with col1: | |
| st.subheader("π Paste Your Transcript") | |
| transcript = st.text_area("Enter the transcript here:", height=400) | |
| with col2: | |
| st.subheader("π Generated Content Plan") | |
| generated_plan_container = st.container() | |
| generate_button = st.button("Generate Plan") | |
| if generate_button: | |
| if not transcript.strip(): | |
| st.error("β Please enter a transcript.") | |
| else: | |
| with st.spinner("β³ Generating content plan... Please wait."): | |
| try: | |
| # Define prompts | |
| system_prompt = SYSTEM_MESSAGE.format(prompt_goal=GOAL) | |
| user_prompt = USER_MESSAGE.format(source_content=transcript) | |
| messages = [ | |
| {"role": "system", "content": system_prompt}, | |
| {"role": "user", "content": user_prompt}, | |
| ] | |
| # Create placeholder for dynamic streaming | |
| generated_plan_container.empty() | |
| openai_args = { | |
| "model": clip_plan_model, | |
| "messages": messages, | |
| "response_format": {"type": "json_object"}, | |
| } | |
| if clip_plan_model == 'o3-mini': | |
| openai_args['reasoning_effort'] = "low" | |
| else: | |
| openai_args["max_tokens"] = 5000 | |
| openai_args["temperature"] = 0.45 | |
| # Stream OpenAI API Response | |
| response = client.chat.completions.create(**openai_args) | |
| # Parse the response | |
| generated_response = response.choices[0].message.content.strip() | |
| content_plan = json.loads(generated_response) | |
| # Save the content plan in the session state for Stage 2 : Transcript clipper | |
| st.session_state.clip_plan_json = json.dumps(content_plan) | |
| # Extract key (assuming there is only one key in the JSON response) | |
| plan_key = list(content_plan.keys())[0] | |
| clip_plans = content_plan.get(plan_key, []) | |
| # Display final output | |
| if clip_plans: | |
| with generated_plan_container.container(): | |
| for i, clip in enumerate(clip_plans): | |
| st.markdown(f"### π¬ Clip {i + 1}") | |
| st.write(f"**Title:** {clip.get('Title', 'N/A')}") | |
| st.write(f"**Focus Prompt:** {clip.get('Focus Prompt', 'N/A')}") | |
| st.write(f"**Duration:** {clip.get('Duration Target', 'N/A')} seconds") | |
| st.markdown("---") | |
| else: | |
| st.error("β οΈ No clips were generated. Try again.") | |
| except json.JSONDecodeError: | |
| st.error("β οΈ Failed to parse OpenAI response. Try again.") | |
| except Exception as e: | |
| st.error(f"β Error: {str(e)}") | |
| # Stage 2: Extract Transcript Sections for Each Clip | |
| # Only show this if a clip plan was successfully generated | |
| if transcript.strip() and "clip_plan_json" in st.session_state: | |
| st.subheader("βοΈ AI Powered Transcript Clipper") | |
| extract_button = st.button("Extract Transcript Sections") | |
| if extract_button: | |
| with st.spinner("β³ Extracting transcript sections... Please wait."): | |
| try: | |
| clipper_system_prompt = CLIPPER_SYSTEM_MESSAGE | |
| clipper_user_prompt = CLIPPER_USER_MESSAGE.format(source_content=transcript, clip_plan=st.session_state.clip_plan_json) | |
| clipper_messages = [ | |
| {"role": "system", "content": clipper_system_prompt}, | |
| {"role": "user", "content": clipper_user_prompt}, | |
| ] | |
| openai_args = { | |
| "model": extraction_model, | |
| "messages": clipper_messages, | |
| "response_format": {"type": "json_object"}, | |
| } | |
| if extraction_model == 'o3-mini': | |
| openai_args['reasoning_effort'] = "low" | |
| else: | |
| openai_args["max_tokens"] = 10000 | |
| openai_args["temperature"] = 0.45 | |
| # Stream OpenAI API Response | |
| clipper_response = client.chat.completions.create(**openai_args) | |
| # Parse the extraction response | |
| extraction_generated_response = clipper_response.choices[0].message.content.strip() | |
| transcript_extraction = json.loads(extraction_generated_response) | |
| # Display the extracted transcript sections | |
| st.markdown("### π Extracted Transcript Sections") | |
| for clip_title, section in transcript_extraction.items(): | |
| st.markdown(f"#### π¬ {clip_title}") | |
| st.write(section) | |
| st.markdown("---") | |
| except json.JSONDecodeError: | |
| st.error("β οΈ Failed to parse transcript extraction response. Try again.") | |
| except Exception as e: | |
| st.error(f"β Error: {str(e)}") | |