from google import genai from google.genai import types from tqdm import tqdm from pycocotools.coco import COCO import pandas as pd import matplotlib.pyplot as plt import time import os import wave import subprocess from PIL import Image import time from google.genai.errors import ClientError def prRed(s): print("\033[1;31m {}\033[0m".format(s)) def prGreen(s): print("\033[92m {}\033[00m".format(s)) def prYellow(s): print("\033[93m {}\033[00m".format(s)) def prBlue(s): print("\033[94m {}\033[00m".format(s)) def prOrange(s): print("\033[38;5;214m {}\033[00m".format(s)) def prPurple(s): print("\033[95m {}\033[00m".format(s)) def prCyan(s): print("\033[96m {}\033[00m".format(s)) def prLightGray(s): print("\033[97m {}\033[00m".format(s)) def prBlack(s): print("\033[90m {}\033[00m".format(s)) def get_id_with_filename(file_name): img_id = os.path.basename(file_name).split('.')[0] img_id = img_id.split('_')[-1] return int(img_id) def get_filename_with_id(id, format='jpg'): return f'COCO_val2014_{str(id).zfill(12)}.{format}' def plot_grouped_imgs(loi, ncols=5, figsize=(20, 20)): nrows = (len(loi) + ncols - 1) // ncols fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize) for i, img_path in enumerate(loi): row = i // ncols col = i % ncols axes[row, col].imshow(Image.open(img_path)) axes[row, col].set_title(os.path.basename(img_path)) axes[row, col].axis('off') plt.tight_layout() plt.show() return fig def generate_video_with_retry(client, image, text, max_retries=20): for attempt in range(max_retries): prYellow(f'Attempt {attempt} ...') try: return generate_video_from_image_and_text(client, image, text) except ClientError as e: if e.code != 429: raise if attempt < max_retries - 1: wait_time = min(2 ** attempt, 64) # Exponential backoff: 1s, 2s, 4s prRed(f"Connection error: {e}. Retrying in {wait_time}s... (attempt {attempt + 1}/{max_retries})") time.sleep(wait_time) else: prRed(f"Failed after {max_retries} attempts") raise def generate_audio_with_retry(client, text, max_retries=20): for attempt in range(max_retries): prYellow(f'Attempt {attempt+1} ...') try: return generate_audio_from_text(client, text) except ClientError as e: if e.code != 429: raise if attempt < max_retries - 1: wait_time = min(2 ** attempt, 64) prRed(f"Connection error: {e}. Retrying in {wait_time}s... (attempt {attempt + 1}/{max_retries})") time.sleep(wait_time) else: prRed(f"Failed after {max_retries} attempts") raise def load_image(image_path): with open(image_path, "rb") as f: image_bytes = f.read() return types.Image(mime_type="image/jpeg", image_bytes=image_bytes) def generate_prompt(client, prompt): transcript = client.models.generate_content( model="gemini-2.5-flash", contents="""Generate a short transcript based on the following description: """ + prompt).text return transcript def generate_image_from_text(client, text): response = client.models.generate_content( model="gemini-2.5-flash-image", contents=[text], config={"response_modalities":['IMAGE']} ) return response.candidates[0].content.parts[0].as_image() def wave_file(filename, pcm, channels=1, rate=24000, sample_width=2): """Save PCM audio data to a .wav file. """ with wave.open(filename, "wb") as wf: wf.setnchannels(channels) wf.setsampwidth(sample_width) wf.setframerate(rate) wf.writeframes(pcm) def generate_audio_from_text(client, text): response = client.models.generate_content( model="gemini-2.5-flash-preview-tts", contents=text, config=types.GenerateContentConfig( response_modalities=["AUDIO"], speech_config=types.SpeechConfig( voice_config=types.VoiceConfig( prebuilt_voice_config=types.PrebuiltVoiceConfig( voice_name='Kore', ) ) ), ) ) data = response.candidates[0].content.parts[0].inline_data.data return data def generate_video_from_image_and_text(client, image, text): operation = client.models.generate_videos( model="veo-3.1-fast-generate-preview", prompt=text, image=image, config=types.GenerateVideosConfig( aspect_ratio="16:9", resolution="720p", duration_seconds="8", ) ) # Poll the operation status until the video is ready. while not operation.done: prCyan("Waiting for video generation to complete...") time.sleep(10) operation = client.operations.get(operation) if operation.result and operation.result.generated_videos: generated_video = operation.result.generated_videos[0] return generated_video.video elif operation.response and operation.response.generated_videos: generated_video = operation.response.generated_videos[0] return generated_video.video else: # Handle cases where the model might fail (e.g. safety filters) prRed(f"Generation failed: {operation}") if not operation.response: raise RuntimeError(f"Unknown reasons for {operation} to be None") raise RuntimeError(f"Generation failed or was filtered: {operation.response.rai_media_filtered_reasons}") # def main(): # with open('allowed_ids.json', 'r') as f: # allowed_ids = json.load(f) # num = 100 # data = load_dataset('json', data_files='MSCOCO_t2i_test.jsonl', split='train') # data = data.shuffle(seed=42).select(range(num)) # client = genai.Client() # ttv_prompt_instruct = "Generate a video based on the following image and description: " # tts_prompt_instruct = "Read aloud the following sentence in a natural and expressive way, in a warm and friendly tone: " # save_dir = './output/' # os.makedirs(save_dir, exist_ok=True) # infos = [] # for item in tqdm(data): # image_path = item['tgt_img_path'][0] # save_name = os.path.basename(image_path).split('.')[0] # save_name_mp4 = f'{save_name}.mp4' # save_name_wav = f'{save_name}.wav' # caption = item['qry_text'] # print(f"Processing {save_name} with caption: {caption}") # if save_name not in allowed_ids: # print(f"🛑 {save_name} not in allowed IDs, skipping this item.") # continue # existing_objects = list(set([obj.split('.')[0] for obj in os.listdir(save_dir)])) # if save_name in existing_objects: # continue # image = load_image(os.path.join(data_dir, image_path)) # try: # video_prompt = ttv_prompt_instruct + caption # # transcript = generate_prompt(client, video_prompt) # # print(f"Generated transcript: {transcript}") # video_data = generate_video_from_image_and_text(client, image, video_prompt) # video_filename = os.path.join(save_dir, save_name_mp4) # client.files.download(file=video_data) # video_data.save(video_filename) # print(f"Generated video saved to {video_filename}") # audio_prompt = tts_prompt_instruct + caption # audio_data = generate_audio_from_text(client, audio_prompt) # audio_filename = os.path.join(save_dir, save_name_wav) # wave_file(audio_filename, audio_data) # print(f"Generated audio saved to {audio_filename}") # except (TypeError, RuntimeError) as e: # print(f"🛑 Error processing {save_name}: {e}, skipping this item.") # continue # # always update negative videos, even if the video already exists # raw_neg_objs = [os.path.basename(img_path).split('.')[0] for img_path in item['tgt_img_path'][1:]] # neg_obj_list = list(set(raw_neg_objs) & set(existing_objects)) # infos.append({ # "id": save_name, # "qry_text": caption, # "qry_image_path": image_path, # "negatives": neg_obj_list, # }) # results = pd.DataFrame(infos) # results.to_json('MSCOCO_ti2v.jsonl', orient='records', lines=True) # print("✅ All done! Results saved to MSCOCO_ti2v.jsonl") def main(): coco_caps = COCO("annotations/captions_val2014.json") client = genai.Client() ttv_prompt_instruct = "Generate a video based on the following image and description: " tts_prompt_instruct = "Read aloud the following sentence in a natural and expressive way, in a warm and friendly tone: " data_dir = 'val2014' video_save_dir = 'videos' audio_save_dir = 'audios' os.makedirs(video_save_dir, exist_ok=True) os.makedirs(audio_save_dir, exist_ok=True) infos = pd.read_json('mscoco_cmret_all.jsonl', lines=True) infos = infos[['image_id', 'file_name', 'hard_negatives']].set_index('image_id', drop=False) records = [] with open('invalid.log') as f: invalids = f.read().splitlines() prBlue(f"Invalid ids: {invalids}") for info in tqdm(infos.itertuples(), total=len(infos)): image_id = info.image_id file_name = info.file_name negatives = info.hard_negatives captions = [ann['caption'] for ann in coco_caps.loadAnns(coco_caps.getAnnIds(imgIds=image_id))] qry_text = captions[-1] # we use different caption as query text, to make it harder prBlue(f"Processing {file_name}\nQuery text: {qry_text}") image = load_image(os.path.join(data_dir, file_name)) generated_negatives = [] for cand_id in tqdm([image_id]+negatives[:10], leave=True, desc="Waiting for API responses ... "): if str(cand_id) in invalids: prYellow(f"{cand_id} contains restricted content, skipping ... ") continue captions = [ann['caption'] for ann in coco_caps.loadAnns(coco_caps.getAnnIds(imgIds=cand_id))] caption = captions[0] # use first caption to generate video and audio image = load_image(os.path.join(data_dir, get_filename_with_id(cand_id))) cand_video_name = get_filename_with_id(cand_id, format='mp4') cand_audio_name = get_filename_with_id(cand_id, format='wav') try: video_save_path = os.path.join(video_save_dir, cand_video_name) if os.path.exists(video_save_path): prGreen(f"Video {cand_id} already exists, skipping generation.") else: prYellow(f"Generating video ...Caption: {caption}") video_prompt = ttv_prompt_instruct + caption video_data = generate_video_with_retry(client, image, video_prompt, max_retries=20) # change max_retries if needed client.files.download(file=video_data) video_data.save(video_save_path) prGreen(f"Generated video saved to {video_save_path}") audio_save_path = os.path.join(audio_save_dir, cand_audio_name) if os.path.exists(audio_save_path): prGreen(f"Audio {cand_id} already exists, skipping generation.") else: prYellow(f"Generating audio ...Caption: {caption}") audio_prompt = tts_prompt_instruct + caption audio_data = generate_audio_with_retry(client, audio_prompt, max_retries=20) # change max_retries if needed wave_file(audio_save_path, audio_data) prGreen(f"Generated audio saved to {audio_save_path}") generated_negatives.append(cand_id) except (TypeError, RuntimeError) as e: invalids.append(cand_id) with open('invalid.log', 'a') as f: f.write(f'{cand_id}\n') prRed(f"🛑 Error processing {cand_video_name}: {e}, skipping this item.") continue records.append({ "image_id": image_id, "qry_text": qry_text, "hard_negatives": [idx for idx in generated_negatives if idx != image_id] }) records = pd.DataFrame(records) records.to_json('mscoco_cmret.jsonl', orient='records', lines=True) if __name__ == "__main__": if not os.path.exists("annotations/instances_val2014.json"): subprocess.run(["wget", "http://images.cocodataset.org/annotations/annotations_trainval2014.zip"]) subprocess.run(["unzip", "annotations_trainval2014.zip", "-d", "annotations"]) subprocess.run(["rm", "annotations_trainval2014.zip"]) subprocess.run(["wget", "http://images.cocodataset.org/zips/val2014.zip"]) subprocess.run(["unzip", "val2014.zip", "-d", "."]) subprocess.run(["rm", "val2014.zip"]) main() # client = genai.Client() # video = generate_video_with_retry(client, load_image('val2014/COCO_val2014_000000176744.jpg'), "Generate a video based on the following image and description: Croweded area on the beach with many kites being flown in the air.") # client.files.download(file=video) # video.save('mscoco_omini/COCO_val2014_000000176744.mp4') # audio = generate_audio_with_retry(client, "Read aloud the following sentence in a natural and expressive way, in a warm and friendly tone: Croweded area on the beach with many kites being flown in the air.") # wave_file('mscoco_omini/COCO_val2014_000000176744.wav', audio)