Raj Jayendrakumar Muchhala commited on
Commit
569dd99
Β·
1 Parent(s): 6b560e6

Revert changes after commit e74c2c9 due to messy updates

Browse files
Files changed (1) hide show
  1. app.py +142 -170
app.py CHANGED
@@ -8,32 +8,6 @@ import os
8
  # Set Streamlit layout to wide mode
9
  st.set_page_config(layout="wide")
10
 
11
- # Inject custom CSS to fix the left column and make the right column scrollable
12
- st.markdown(
13
- """
14
- <style>
15
- /* Fixed left container */
16
- .fixed-left {
17
- position: fixed;
18
- top: 0;
19
- left: 0;
20
- width: 45%;
21
- height: 100vh;
22
- overflow-y: auto;
23
- padding: 1rem;
24
- background-color: #f9f9f9;
25
- border-right: 1px solid #ccc;
26
- }
27
- /* Scrollable right container with left margin to avoid overlap */
28
- .scrollable-right {
29
- margin-left: 47%; /* slightly more than 45% to add some gap */
30
- padding: 1rem;
31
- }
32
- </style>
33
- """,
34
- unsafe_allow_html=True,
35
- )
36
-
37
  st.title("🎬 AI-Powered Content Planner - Clip Creator")
38
  st.markdown("Paste a transcript on the left and view the generated content plan and extractions on the right.")
39
 
@@ -70,148 +44,146 @@ if not OPENAI_API_KEY:
70
 
71
  client = OpenAI(api_key=OPENAI_API_KEY)
72
 
73
- ###############################################
74
- # LEFT SIDE: Fixed Container for Transcript & Media
75
- ###############################################
76
- st.markdown('<div class="fixed-left">', unsafe_allow_html=True)
77
-
78
- st.subheader("πŸ“ Paste Your Transcript")
79
- transcript = st.text_area("Enter the transcript here:", height=300)
80
-
81
- st.markdown("---")
82
- st.subheader("πŸŽ₯ Video/Audio Upload & Playback")
83
- media_source = st.radio("Select media source", options=["Upload File", "YouTube/External Link"], key="media_radio")
84
- if media_source == "Upload File":
85
- media_file = st.file_uploader("Upload a video or audio file", type=["mp4", "mov", "avi", "mp3", "wav", "ogg"], key="media_file")
86
- if media_file is not None:
87
- if media_file.type.startswith("video"):
88
- st.video(media_file)
89
- elif media_file.type.startswith("audio"):
90
- st.audio(media_file)
91
- else:
92
- media_link = st.text_input("Enter YouTube or external media link:", key="media_link")
93
- if media_link:
94
- st.video(media_link)
95
-
96
- st.markdown("</div>", unsafe_allow_html=True)
97
-
98
- ###############################################
99
- # RIGHT SIDE: Scrollable Container for Clip Plans & Extractions
100
- ###############################################
101
- st.markdown('<div class="scrollable-right">', unsafe_allow_html=True)
102
- st.subheader("πŸ“‹ Generated Clip Plans")
103
-
104
- # Button to generate clip plans from the transcript
105
- if st.button("Generate Plan"):
106
- if not transcript.strip():
107
- st.error("❌ Please enter a transcript.")
108
  else:
109
- with st.spinner("⏳ Generating content plan... Please wait."):
110
- try:
111
- # Prepare prompts for clip plan generation
112
- system_prompt = SYSTEM_MESSAGE.format(prompt_goal=GOAL)
113
- user_prompt = USER_MESSAGE.format(source_content=transcript)
114
- messages = [
115
- {"role": "system", "content": system_prompt},
116
- {"role": "user", "content": user_prompt},
117
- ]
118
-
119
- openai_args = {
120
- "model": clip_plan_model,
121
- "messages": messages,
122
- "response_format": {"type": "json_object"},
123
- }
124
- if clip_plan_model == "o3-mini":
125
- openai_args["reasoning_effort"] = "low"
126
- else:
127
- openai_args["max_tokens"] = 5000
128
- openai_args["temperature"] = 0.45
129
-
130
- response = client.chat.completions.create(**openai_args)
131
- generated_response = response.choices[0].message.content.strip()
132
- content_plan = json.loads(generated_response)
133
-
134
- # Assume the response JSON has a single key containing a list of clip plans
135
- plan_key = list(content_plan.keys())[0]
136
- clip_plans = content_plan.get(plan_key, [])
137
-
138
- # Save clip plans in session state so they persist
139
- st.session_state.clip_plans = clip_plans
140
-
141
- # Clear any previous extraction outputs
142
- for i in range(len(clip_plans)):
143
- st.session_state.pop(f"extracted_clip_{i}", None)
144
- except json.JSONDecodeError:
145
- st.error("⚠️ Failed to parse OpenAI response. Try again.")
146
- except Exception as e:
147
- st.error(f"❌ Error: {str(e)}")
148
-
149
- # Display clip plans if they exist in session state
150
- if "clip_plans" in st.session_state:
151
- # We'll work with a reference to the clip plans list
152
- updated_clip_plans = st.session_state.clip_plans
153
-
154
- for i, clip in enumerate(updated_clip_plans):
155
- # Each clip is rendered in an expander with editable fields
156
- with st.expander(f"🎬 Clip {i + 1}", expanded=True):
157
- new_title = st.text_input("Title", value=clip.get("Title", "N/A"), key=f"title_{i}")
158
- new_focus = st.text_area("Focus Prompt", value=clip.get("Focus Prompt", "N/A"), key=f"focus_{i}")
159
- new_duration = st.number_input(
160
- "Duration Target (seconds)",
161
- value=float(clip.get("Duration Target", 0)),
162
- key=f"duration_{i}",
163
- step=1.0
164
- )
165
-
166
- # Update the clip plan with the edited values
167
- updated_clip_plans[i]["Title"] = new_title
168
- updated_clip_plans[i]["Focus Prompt"] = new_focus
169
- updated_clip_plans[i]["Duration Target"] = new_duration
170
-
171
- # Button to delete this clip plan
172
- if st.button("Delete Clip", key=f"delete_{i}"):
173
- updated_clip_plans.pop(i)
174
- st.session_state.clip_plans = updated_clip_plans
175
- st.experimental_rerun()
176
-
177
- # Button for transcript extraction for this clip
178
- if st.button("Extract Transcript", key=f"extract_{i}"):
179
- with st.spinner("⏳ Extracting transcript section... Please wait."):
180
- try:
181
- # Send only the specific (and possibly edited) clip plan to the extractor
182
- single_clip_json = json.dumps(updated_clip_plans[i])
183
- clipper_user_prompt = CLIPPER_USER_MESSAGE.format(
184
- source_content=transcript,
185
- clip_plan=single_clip_json
186
- )
187
- clipper_messages = [
188
- {"role": "system", "content": CLIPPER_SYSTEM_MESSAGE},
189
- {"role": "user", "content": clipper_user_prompt},
190
- ]
191
-
192
- extraction_args = {
193
- "model": extraction_model,
194
- "messages": clipper_messages,
195
- "response_format": {"type": "json_object"},
196
- }
197
- if extraction_model == "o3-mini":
198
- extraction_args["reasoning_effort"] = "low"
199
- else:
200
- extraction_args["max_tokens"] = 10000
201
- extraction_args["temperature"] = 0.45
202
-
203
- clipper_response = client.chat.completions.create(**extraction_args)
204
- extraction_response = clipper_response.choices[0].message.content.strip()
205
- extracted_clip = json.loads(extraction_response)
206
-
207
- # Save the extraction result for this clip in session state
208
- st.session_state[f"extracted_clip_{i}"] = extracted_clip
209
- except Exception as e:
210
- st.error(f"❌ Extraction error: {str(e)}")
211
-
212
- # Display extraction output if available
213
- if f"extracted_clip_{i}" in st.session_state:
214
- st.markdown("#### πŸ“ Extracted Transcript Section:")
215
- st.write(st.session_state[f"extracted_clip_{i}"])
216
-
217
- st.markdown('</div>', unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
8
  # Set Streamlit layout to wide mode
9
  st.set_page_config(layout="wide")
10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  st.title("🎬 AI-Powered Content Planner - Clip Creator")
12
  st.markdown("Paste a transcript on the left and view the generated content plan and extractions on the right.")
13
 
 
44
 
45
  client = OpenAI(api_key=OPENAI_API_KEY)
46
 
47
+ # Layout: Two columns - left for transcript, right for clip plans and extraction
48
+ col_transcript, col_output = st.columns([1, 1])
49
+
50
+ # Left Column: Transcript Input
51
+ with col_transcript:
52
+ st.subheader("πŸ“ Paste Your Transcript")
53
+ transcript = st.text_area("Enter the transcript here:", height=400)
54
+
55
+ st.markdown("---")
56
+ st.subheader("πŸŽ₯ Video/Audio Upload & Playback")
57
+
58
+ # Allow users to choose between file upload or external link
59
+ media_source = st.radio("Select media source", options=["Upload File", "YouTube/External Link"], index=0)
60
+
61
+ if media_source == "Upload File":
62
+ media_file = st.file_uploader("Upload a video or audio file", type=["mp4", "mov", "avi", "mp3", "wav", "ogg"])
63
+ if media_file is not None:
64
+ # Detect media type and play accordingly
65
+ if media_file.type.startswith("video"):
66
+ st.video(media_file)
67
+ elif media_file.type.startswith("audio"):
68
+ st.audio(media_file)
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  else:
70
+ media_link = st.text_input("Enter YouTube or external media link:")
71
+ if media_link:
72
+ st.video(media_link)
73
+
74
+ # Right Column: Clip Plan Generation and Extraction
75
+ with col_output:
76
+ st.subheader("πŸ“‹ Generated Clip Plans")
77
+
78
+ # Button to generate clip plans from the transcript
79
+ if st.button("Generate Plan"):
80
+ if not transcript.strip():
81
+ st.error("❌ Please enter a transcript.")
82
+ else:
83
+ with st.spinner("⏳ Generating content plan... Please wait."):
84
+ try:
85
+ # Prepare prompts for clip plan generation
86
+ system_prompt = SYSTEM_MESSAGE.format(prompt_goal=GOAL)
87
+ user_prompt = USER_MESSAGE.format(source_content=transcript)
88
+ messages = [
89
+ {"role": "system", "content": system_prompt},
90
+ {"role": "user", "content": user_prompt},
91
+ ]
92
+
93
+ openai_args = {
94
+ "model": clip_plan_model,
95
+ "messages": messages,
96
+ "response_format": {"type": "json_object"},
97
+ }
98
+ if clip_plan_model == "o3-mini":
99
+ openai_args["reasoning_effort"] = "low"
100
+ else:
101
+ openai_args["max_tokens"] = 5000
102
+ openai_args["temperature"] = 0.45
103
+
104
+ response = client.chat.completions.create(**openai_args)
105
+ generated_response = response.choices[0].message.content.strip()
106
+ content_plan = json.loads(generated_response)
107
+
108
+ # Assume the response JSON has a single key containing a list of clip plans
109
+ plan_key = list(content_plan.keys())[0]
110
+ clip_plans = content_plan.get(plan_key, [])
111
+
112
+ # Save clip plans in session state so they persist
113
+ st.session_state.clip_plans = clip_plans
114
+
115
+ # Clear any previous extraction outputs
116
+ for i in range(len(clip_plans)):
117
+ st.session_state.pop(f"extracted_clip_{i}", None)
118
+ except json.JSONDecodeError:
119
+ st.error("⚠️ Failed to parse OpenAI response. Try again.")
120
+ except Exception as e:
121
+ st.error(f"❌ Error: {str(e)}")
122
+
123
+ # Display clip plans if they exist in session state
124
+ if "clip_plans" in st.session_state:
125
+ # We'll work with a reference to the clip plans list
126
+ updated_clip_plans = st.session_state.clip_plans
127
+
128
+ for i, clip in enumerate(updated_clip_plans):
129
+ # Each clip is rendered in an expander with editable fields
130
+ with st.expander(f"🎬 Clip {i + 1}", expanded=True):
131
+ new_title = st.text_input("Title", value=clip.get("Title", "N/A"), key=f"title_{i}")
132
+ new_focus = st.text_area("Focus Prompt", value=clip.get("Focus Prompt", "N/A"), key=f"focus_{i}")
133
+ new_duration = st.number_input(
134
+ "Duration Target (seconds)",
135
+ value=float(clip.get("Duration Target", 0)),
136
+ key=f"duration_{i}",
137
+ step=1.0
138
+ )
139
+
140
+ # Update the clip plan with the edited values
141
+ updated_clip_plans[i]["Title"] = new_title
142
+ updated_clip_plans[i]["Focus Prompt"] = new_focus
143
+ updated_clip_plans[i]["Duration Target"] = new_duration
144
+
145
+ # Button to delete this clip plan
146
+ if st.button("Delete Clip", key=f"delete_{i}"):
147
+ updated_clip_plans.pop(i)
148
+ st.session_state.clip_plans = updated_clip_plans
149
+ st.experimental_rerun()
150
+
151
+ # Button for transcript extraction for this clip
152
+ if st.button("Extract Transcript", key=f"extract_{i}"):
153
+ with st.spinner("⏳ Extracting transcript section... Please wait."):
154
+ try:
155
+ # Send only the specific (and possibly edited) clip plan to the extractor
156
+ single_clip_json = json.dumps(updated_clip_plans[i])
157
+ clipper_user_prompt = CLIPPER_USER_MESSAGE.format(
158
+ source_content=transcript,
159
+ clip_plan=single_clip_json
160
+ )
161
+ clipper_messages = [
162
+ {"role": "system", "content": CLIPPER_SYSTEM_MESSAGE},
163
+ {"role": "user", "content": clipper_user_prompt},
164
+ ]
165
+
166
+ extraction_args = {
167
+ "model": extraction_model,
168
+ "messages": clipper_messages,
169
+ "response_format": {"type": "json_object"},
170
+ }
171
+ if extraction_model == "o3-mini":
172
+ extraction_args["reasoning_effort"] = "low"
173
+ else:
174
+ extraction_args["max_tokens"] = 10000
175
+ extraction_args["temperature"] = 0.45
176
+
177
+ clipper_response = client.chat.completions.create(**extraction_args)
178
+ extraction_response = clipper_response.choices[0].message.content.strip()
179
+ extracted_clip = json.loads(extraction_response)
180
+
181
+ # Save the extraction result for this clip in session state
182
+ st.session_state[f"extracted_clip_{i}"] = extracted_clip
183
+ except Exception as e:
184
+ st.error(f"❌ Extraction error: {str(e)}")
185
+
186
+ # Display extraction output if available
187
+ if f"extracted_clip_{i}" in st.session_state:
188
+ st.markdown("#### πŸ“ Extracted Transcript Section:")
189
+ st.write(st.session_state[f"extracted_clip_{i}"])