Raj Jayendrakumar Muchhala commited on
Commit
6b560e6
Β·
1 Parent(s): 886632d

scrolling right and static left

Browse files
Files changed (1) hide show
  1. app.py +159 -158
app.py CHANGED
@@ -8,23 +8,26 @@ import os
8
  # Set Streamlit layout to wide mode
9
  st.set_page_config(layout="wide")
10
 
11
- # Inject custom CSS for independent scrolling and styling
12
  st.markdown(
13
  """
14
  <style>
15
- .scrollable-left {
16
- height: 600px; /* Adjust as needed */
17
- overflow-y: auto;
18
- padding: 1rem;
19
- border: 1px solid #ddd;
20
- background-color: #f9f9f9;
 
 
 
 
 
21
  }
 
22
  .scrollable-right {
23
- height: 600px; /* Adjust as needed */
24
- overflow-y: auto;
25
- padding: 1rem;
26
- border: 1px solid #ddd;
27
- background-color: #f1f1f1;
28
  }
29
  </style>
30
  """,
@@ -67,150 +70,148 @@ if not OPENAI_API_KEY:
67
 
68
  client = OpenAI(api_key=OPENAI_API_KEY)
69
 
70
- # Layout: Two columns - left for transcript, right for clip plans and extraction
71
- col_transcript, col_output = st.columns([1, 1])
72
-
73
- # Left Column: Transcript Input
74
- with col_transcript:
75
- st.markdown('<div class="scrollable-left">', unsafe_allow_html=True)
76
- st.subheader("πŸ“ Paste Your Transcript")
77
- transcript = st.text_area("Enter the transcript here:", height=400)
78
-
79
- st.markdown("---")
80
- st.subheader("πŸŽ₯ Video/Audio Upload & Playback")
81
-
82
- # Allow users to choose between file upload or external link
83
- media_source = st.radio("Select media source", options=["Upload File", "YouTube/External Link"], index=0)
84
-
85
- if media_source == "Upload File":
86
- media_file = st.file_uploader("Upload a video or audio file", type=["mp4", "mov", "avi", "mp3", "wav", "ogg"])
87
- if media_file is not None:
88
- # Detect media type and play accordingly
89
- if media_file.type.startswith("video"):
90
- st.video(media_file)
91
- elif media_file.type.startswith("audio"):
92
- st.audio(media_file)
 
 
 
 
 
 
 
 
 
 
 
 
93
  else:
94
- media_link = st.text_input("Enter YouTube or external media link:")
95
- if media_link:
96
- st.video(media_link)
97
- st.markdown('</div>', unsafe_allow_html=True)
98
-
99
- # Right Column: Clip Plan Generation and Extraction
100
- with col_output:
101
- st.markdown('<div class="scrollable-right">', unsafe_allow_html=True)
102
- st.subheader("πŸ“‹ Generated Clip Plans")
103
-
104
- # Button to generate clip plans from the transcript
105
- if st.button("Generate Plan"):
106
- if not transcript.strip():
107
- st.error("❌ Please enter a transcript.")
108
- else:
109
- with st.spinner("⏳ Generating content plan... Please wait."):
110
- try:
111
- # Prepare prompts for clip plan generation
112
- system_prompt = SYSTEM_MESSAGE.format(prompt_goal=GOAL)
113
- user_prompt = USER_MESSAGE.format(source_content=transcript)
114
- messages = [
115
- {"role": "system", "content": system_prompt},
116
- {"role": "user", "content": user_prompt},
117
- ]
118
-
119
- openai_args = {
120
- "model": clip_plan_model,
121
- "messages": messages,
122
- "response_format": {"type": "json_object"},
123
- }
124
- if clip_plan_model == "o3-mini":
125
- openai_args["reasoning_effort"] = "low"
126
- else:
127
- openai_args["max_tokens"] = 5000
128
- openai_args["temperature"] = 0.45
129
-
130
- response = client.chat.completions.create(**openai_args)
131
- generated_response = response.choices[0].message.content.strip()
132
- content_plan = json.loads(generated_response)
133
-
134
- # Assume the response JSON has a single key containing a list of clip plans
135
- plan_key = list(content_plan.keys())[0]
136
- clip_plans = content_plan.get(plan_key, [])
137
-
138
- # Save clip plans in session state so they persist
139
- st.session_state.clip_plans = clip_plans
140
-
141
- # Clear any previous extraction outputs
142
- for i in range(len(clip_plans)):
143
- st.session_state.pop(f"extracted_clip_{i}", None)
144
- except json.JSONDecodeError:
145
- st.error("⚠️ Failed to parse OpenAI response. Try again.")
146
- except Exception as e:
147
- st.error(f"❌ Error: {str(e)}")
148
-
149
- # Display clip plans if they exist in session state
150
- if "clip_plans" in st.session_state:
151
- # We'll work with a reference to the clip plans list
152
- updated_clip_plans = st.session_state.clip_plans
153
-
154
- for i, clip in enumerate(updated_clip_plans):
155
- # Each clip is rendered in an expander with editable fields
156
- with st.expander(f"🎬 Clip {i + 1}", expanded=True):
157
- new_title = st.text_input("Title", value=clip.get("Title", "N/A"), key=f"title_{i}")
158
- new_focus = st.text_area("Focus Prompt", value=clip.get("Focus Prompt", "N/A"), key=f"focus_{i}")
159
- new_duration = st.number_input(
160
- "Duration Target (seconds)",
161
- value=float(clip.get("Duration Target", 0)),
162
- key=f"duration_{i}",
163
- step=1.0
164
- )
165
-
166
- # Update the clip plan with the edited values
167
- updated_clip_plans[i]["Title"] = new_title
168
- updated_clip_plans[i]["Focus Prompt"] = new_focus
169
- updated_clip_plans[i]["Duration Target"] = new_duration
170
-
171
- # Button to delete this clip plan
172
- if st.button("Delete Clip", key=f"delete_{i}"):
173
- updated_clip_plans.pop(i)
174
- st.session_state.clip_plans = updated_clip_plans
175
- st.experimental_rerun()
176
-
177
- # Button for transcript extraction for this clip
178
- if st.button("Extract Transcript", key=f"extract_{i}"):
179
- with st.spinner("⏳ Extracting transcript section... Please wait."):
180
- try:
181
- # Send only the specific (and possibly edited) clip plan to the extractor
182
- single_clip_json = json.dumps(updated_clip_plans[i])
183
- clipper_user_prompt = CLIPPER_USER_MESSAGE.format(
184
- source_content=transcript,
185
- clip_plan=single_clip_json
186
- )
187
- clipper_messages = [
188
- {"role": "system", "content": CLIPPER_SYSTEM_MESSAGE},
189
- {"role": "user", "content": clipper_user_prompt},
190
- ]
191
-
192
- extraction_args = {
193
- "model": extraction_model,
194
- "messages": clipper_messages,
195
- "response_format": {"type": "json_object"},
196
- }
197
- if extraction_model == "o3-mini":
198
- extraction_args["reasoning_effort"] = "low"
199
- else:
200
- extraction_args["max_tokens"] = 10000
201
- extraction_args["temperature"] = 0.45
202
-
203
- clipper_response = client.chat.completions.create(**extraction_args)
204
- extraction_response = clipper_response.choices[0].message.content.strip()
205
- extracted_clip = json.loads(extraction_response)
206
-
207
- # Save the extraction result for this clip in session state
208
- st.session_state[f"extracted_clip_{i}"] = extracted_clip
209
- except Exception as e:
210
- st.error(f"❌ Extraction error: {str(e)}")
211
-
212
- # Display extraction output if available
213
- if f"extracted_clip_{i}" in st.session_state:
214
- st.markdown("#### πŸ“ Extracted Transcript Section:")
215
- st.write(st.session_state[f"extracted_clip_{i}"])
216
- st.markdown('</div>', unsafe_allow_html=True)
 
8
  # Set Streamlit layout to wide mode
9
  st.set_page_config(layout="wide")
10
 
11
+ # Inject custom CSS to fix the left column and make the right column scrollable
12
  st.markdown(
13
  """
14
  <style>
15
+ /* Fixed left container */
16
+ .fixed-left {
17
+ position: fixed;
18
+ top: 0;
19
+ left: 0;
20
+ width: 45%;
21
+ height: 100vh;
22
+ overflow-y: auto;
23
+ padding: 1rem;
24
+ background-color: #f9f9f9;
25
+ border-right: 1px solid #ccc;
26
  }
27
+ /* Scrollable right container with left margin to avoid overlap */
28
  .scrollable-right {
29
+ margin-left: 47%; /* slightly more than 45% to add some gap */
30
+ padding: 1rem;
 
 
 
31
  }
32
  </style>
33
  """,
 
70
 
71
  client = OpenAI(api_key=OPENAI_API_KEY)
72
 
73
+ ###############################################
74
+ # LEFT SIDE: Fixed Container for Transcript & Media
75
+ ###############################################
76
+ st.markdown('<div class="fixed-left">', unsafe_allow_html=True)
77
+
78
+ st.subheader("πŸ“ Paste Your Transcript")
79
+ transcript = st.text_area("Enter the transcript here:", height=300)
80
+
81
+ st.markdown("---")
82
+ st.subheader("πŸŽ₯ Video/Audio Upload & Playback")
83
+ media_source = st.radio("Select media source", options=["Upload File", "YouTube/External Link"], key="media_radio")
84
+ if media_source == "Upload File":
85
+ media_file = st.file_uploader("Upload a video or audio file", type=["mp4", "mov", "avi", "mp3", "wav", "ogg"], key="media_file")
86
+ if media_file is not None:
87
+ if media_file.type.startswith("video"):
88
+ st.video(media_file)
89
+ elif media_file.type.startswith("audio"):
90
+ st.audio(media_file)
91
+ else:
92
+ media_link = st.text_input("Enter YouTube or external media link:", key="media_link")
93
+ if media_link:
94
+ st.video(media_link)
95
+
96
+ st.markdown("</div>", unsafe_allow_html=True)
97
+
98
+ ###############################################
99
+ # RIGHT SIDE: Scrollable Container for Clip Plans & Extractions
100
+ ###############################################
101
+ st.markdown('<div class="scrollable-right">', unsafe_allow_html=True)
102
+ st.subheader("πŸ“‹ Generated Clip Plans")
103
+
104
+ # Button to generate clip plans from the transcript
105
+ if st.button("Generate Plan"):
106
+ if not transcript.strip():
107
+ st.error("❌ Please enter a transcript.")
108
  else:
109
+ with st.spinner("⏳ Generating content plan... Please wait."):
110
+ try:
111
+ # Prepare prompts for clip plan generation
112
+ system_prompt = SYSTEM_MESSAGE.format(prompt_goal=GOAL)
113
+ user_prompt = USER_MESSAGE.format(source_content=transcript)
114
+ messages = [
115
+ {"role": "system", "content": system_prompt},
116
+ {"role": "user", "content": user_prompt},
117
+ ]
118
+
119
+ openai_args = {
120
+ "model": clip_plan_model,
121
+ "messages": messages,
122
+ "response_format": {"type": "json_object"},
123
+ }
124
+ if clip_plan_model == "o3-mini":
125
+ openai_args["reasoning_effort"] = "low"
126
+ else:
127
+ openai_args["max_tokens"] = 5000
128
+ openai_args["temperature"] = 0.45
129
+
130
+ response = client.chat.completions.create(**openai_args)
131
+ generated_response = response.choices[0].message.content.strip()
132
+ content_plan = json.loads(generated_response)
133
+
134
+ # Assume the response JSON has a single key containing a list of clip plans
135
+ plan_key = list(content_plan.keys())[0]
136
+ clip_plans = content_plan.get(plan_key, [])
137
+
138
+ # Save clip plans in session state so they persist
139
+ st.session_state.clip_plans = clip_plans
140
+
141
+ # Clear any previous extraction outputs
142
+ for i in range(len(clip_plans)):
143
+ st.session_state.pop(f"extracted_clip_{i}", None)
144
+ except json.JSONDecodeError:
145
+ st.error("⚠️ Failed to parse OpenAI response. Try again.")
146
+ except Exception as e:
147
+ st.error(f"❌ Error: {str(e)}")
148
+
149
+ # Display clip plans if they exist in session state
150
+ if "clip_plans" in st.session_state:
151
+ # We'll work with a reference to the clip plans list
152
+ updated_clip_plans = st.session_state.clip_plans
153
+
154
+ for i, clip in enumerate(updated_clip_plans):
155
+ # Each clip is rendered in an expander with editable fields
156
+ with st.expander(f"🎬 Clip {i + 1}", expanded=True):
157
+ new_title = st.text_input("Title", value=clip.get("Title", "N/A"), key=f"title_{i}")
158
+ new_focus = st.text_area("Focus Prompt", value=clip.get("Focus Prompt", "N/A"), key=f"focus_{i}")
159
+ new_duration = st.number_input(
160
+ "Duration Target (seconds)",
161
+ value=float(clip.get("Duration Target", 0)),
162
+ key=f"duration_{i}",
163
+ step=1.0
164
+ )
165
+
166
+ # Update the clip plan with the edited values
167
+ updated_clip_plans[i]["Title"] = new_title
168
+ updated_clip_plans[i]["Focus Prompt"] = new_focus
169
+ updated_clip_plans[i]["Duration Target"] = new_duration
170
+
171
+ # Button to delete this clip plan
172
+ if st.button("Delete Clip", key=f"delete_{i}"):
173
+ updated_clip_plans.pop(i)
174
+ st.session_state.clip_plans = updated_clip_plans
175
+ st.experimental_rerun()
176
+
177
+ # Button for transcript extraction for this clip
178
+ if st.button("Extract Transcript", key=f"extract_{i}"):
179
+ with st.spinner("⏳ Extracting transcript section... Please wait."):
180
+ try:
181
+ # Send only the specific (and possibly edited) clip plan to the extractor
182
+ single_clip_json = json.dumps(updated_clip_plans[i])
183
+ clipper_user_prompt = CLIPPER_USER_MESSAGE.format(
184
+ source_content=transcript,
185
+ clip_plan=single_clip_json
186
+ )
187
+ clipper_messages = [
188
+ {"role": "system", "content": CLIPPER_SYSTEM_MESSAGE},
189
+ {"role": "user", "content": clipper_user_prompt},
190
+ ]
191
+
192
+ extraction_args = {
193
+ "model": extraction_model,
194
+ "messages": clipper_messages,
195
+ "response_format": {"type": "json_object"},
196
+ }
197
+ if extraction_model == "o3-mini":
198
+ extraction_args["reasoning_effort"] = "low"
199
+ else:
200
+ extraction_args["max_tokens"] = 10000
201
+ extraction_args["temperature"] = 0.45
202
+
203
+ clipper_response = client.chat.completions.create(**extraction_args)
204
+ extraction_response = clipper_response.choices[0].message.content.strip()
205
+ extracted_clip = json.loads(extraction_response)
206
+
207
+ # Save the extraction result for this clip in session state
208
+ st.session_state[f"extracted_clip_{i}"] = extracted_clip
209
+ except Exception as e:
210
+ st.error(f"❌ Extraction error: {str(e)}")
211
+
212
+ # Display extraction output if available
213
+ if f"extracted_clip_{i}" in st.session_state:
214
+ st.markdown("#### πŸ“ Extracted Transcript Section:")
215
+ st.write(st.session_state[f"extracted_clip_{i}"])
216
+
217
+ st.markdown('</div>', unsafe_allow_html=True)