rairo commited on
Commit
d70a97a
·
verified ·
1 Parent(s): 1caa3bc

Update sozo_gen.py

Browse files
Files changed (1) hide show
  1. sozo_gen.py +6 -10
sozo_gen.py CHANGED
@@ -966,14 +966,10 @@ def generate_single_chart(df: pd.DataFrame, description: str, uid: str, project_
966
  def generate_video_from_project(df: pd.DataFrame, raw_md: str, uid: str, project_id: str, voice_model: str, bucket):
967
  logging.info(f"Generating video for project {project_id} with voice {voice_model}")
968
  llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", google_api_key=API_KEY, temperature=0.2)
969
-
970
- # Modified prompt to output script directly without prefacing
971
- story_prompt = f"Generate a {VIDEO_SCENES}-scene video script. Each scene must be separated by '[SCENE_BREAK]' and contain narration and one chart tag. Report: {raw_md}"
972
-
973
  script = llm.invoke(story_prompt).content
974
  scenes = [s.strip() for s in script.split("[SCENE_BREAK]") if s.strip()]
975
  video_parts, audio_parts, temps = [], [], []
976
-
977
  for sc in scenes:
978
  descs, narrative = extract_chart_tags(sc), clean_narration(sc)
979
  audio_bytes = deepgram_tts(narrative, voice_model)
@@ -991,10 +987,10 @@ def generate_video_from_project(df: pd.DataFrame, raw_md: str, uid: str, project
991
  img_cv = cv2.cvtColor(np.array(img.resize((WIDTH, HEIGHT))), cv2.COLOR_RGB2BGR)
992
  animate_image_fade(img_cv, dur, mp4)
993
  video_parts.append(str(mp4)); temps.append(mp4)
994
-
995
  with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as temp_vid, \
996
- tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as temp_aud, \
997
- tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as final_vid:
998
 
999
  silent_vid_path = Path(temp_vid.name)
1000
  audio_mix_path = Path(temp_aud.name)
@@ -1005,8 +1001,8 @@ def generate_video_from_project(df: pd.DataFrame, raw_md: str, uid: str, project
1005
 
1006
  subprocess.run(
1007
  ["ffmpeg", "-y", "-i", str(silent_vid_path), "-i", str(audio_mix_path),
1008
- "-c:v", "libx264", "-pix_fmt", "yuv420p", "-c:a", "aac",
1009
- "-map", "0:v:0", "-map", "1:a:0", "-shortest", str(final_vid_path)],
1010
  check=True, capture_output=True,
1011
  )
1012
 
 
966
  def generate_video_from_project(df: pd.DataFrame, raw_md: str, uid: str, project_id: str, voice_model: str, bucket):
967
  logging.info(f"Generating video for project {project_id} with voice {voice_model}")
968
  llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", google_api_key=API_KEY, temperature=0.2)
969
+ story_prompt = f"Based on the following report, create a script for a {VIDEO_SCENES}-scene video. Each scene must be separated by '[SCENE_BREAK]' and contain narration and one chart tag. Report: {raw_md}. only output the script no quips"
 
 
 
970
  script = llm.invoke(story_prompt).content
971
  scenes = [s.strip() for s in script.split("[SCENE_BREAK]") if s.strip()]
972
  video_parts, audio_parts, temps = [], [], []
 
973
  for sc in scenes:
974
  descs, narrative = extract_chart_tags(sc), clean_narration(sc)
975
  audio_bytes = deepgram_tts(narrative, voice_model)
 
987
  img_cv = cv2.cvtColor(np.array(img.resize((WIDTH, HEIGHT))), cv2.COLOR_RGB2BGR)
988
  animate_image_fade(img_cv, dur, mp4)
989
  video_parts.append(str(mp4)); temps.append(mp4)
990
+
991
  with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as temp_vid, \
992
+ tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as temp_aud, \
993
+ tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as final_vid:
994
 
995
  silent_vid_path = Path(temp_vid.name)
996
  audio_mix_path = Path(temp_aud.name)
 
1001
 
1002
  subprocess.run(
1003
  ["ffmpeg", "-y", "-i", str(silent_vid_path), "-i", str(audio_mix_path),
1004
+ "-c:v", "libx264", "-pix_fmt", "yuv420p", "-c:a", "aac",
1005
+ "-map", "0:v:0", "-map", "1:a:0", "-shortest", str(final_vid_path)],
1006
  check=True, capture_output=True,
1007
  )
1008