| from google import genai |
| from google.genai import types |
| from tqdm import tqdm |
| from pycocotools.coco import COCO |
| import pandas as pd |
| import matplotlib.pyplot as plt |
| import time |
| import os |
| import wave |
| import subprocess |
| from PIL import Image |
|
|
| import time |
| from google.genai.errors import ClientError |
|
|
| def prRed(s): print("\033[1;31m {}\033[0m".format(s)) |
| def prGreen(s): print("\033[92m {}\033[00m".format(s)) |
| def prYellow(s): print("\033[93m {}\033[00m".format(s)) |
| def prBlue(s): print("\033[94m {}\033[00m".format(s)) |
| def prOrange(s): print("\033[38;5;214m {}\033[00m".format(s)) |
| def prPurple(s): print("\033[95m {}\033[00m".format(s)) |
| def prCyan(s): print("\033[96m {}\033[00m".format(s)) |
| def prLightGray(s): print("\033[97m {}\033[00m".format(s)) |
| def prBlack(s): print("\033[90m {}\033[00m".format(s)) |
|
|
| def get_id_with_filename(file_name): |
| img_id = os.path.basename(file_name).split('.')[0] |
| img_id = img_id.split('_')[-1] |
| return int(img_id) |
|
|
| def get_filename_with_id(id, format='jpg'): |
| return f'COCO_val2014_{str(id).zfill(12)}.{format}' |
|
|
| def plot_grouped_imgs(loi, ncols=5, figsize=(20, 20)): |
| nrows = (len(loi) + ncols - 1) // ncols |
| fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize) |
| for i, img_path in enumerate(loi): |
| row = i // ncols |
| col = i % ncols |
| axes[row, col].imshow(Image.open(img_path)) |
| axes[row, col].set_title(os.path.basename(img_path)) |
| axes[row, col].axis('off') |
| plt.tight_layout() |
| plt.show() |
| return fig |
|
|
| def generate_video_with_retry(client, image, text, max_retries=20): |
| for attempt in range(max_retries): |
| prYellow(f'Attempt {attempt} ...') |
| try: |
| return generate_video_from_image_and_text(client, image, text) |
| except ClientError as e: |
| if e.code != 429: |
| raise |
| if attempt < max_retries - 1: |
| wait_time = min(2 ** attempt, 64) |
| prRed(f"Connection error: {e}. Retrying in {wait_time}s... (attempt {attempt + 1}/{max_retries})") |
| time.sleep(wait_time) |
| else: |
| prRed(f"Failed after {max_retries} attempts") |
| raise |
|
|
| def generate_audio_with_retry(client, text, max_retries=20): |
| for attempt in range(max_retries): |
| prYellow(f'Attempt {attempt+1} ...') |
| try: |
| return generate_audio_from_text(client, text) |
| except ClientError as e: |
| if e.code != 429: |
| raise |
| if attempt < max_retries - 1: |
| wait_time = min(2 ** attempt, 64) |
| prRed(f"Connection error: {e}. Retrying in {wait_time}s... (attempt {attempt + 1}/{max_retries})") |
| time.sleep(wait_time) |
| else: |
| prRed(f"Failed after {max_retries} attempts") |
| raise |
|
|
|
|
|
|
| def load_image(image_path): |
| with open(image_path, "rb") as f: |
| image_bytes = f.read() |
| return types.Image(mime_type="image/jpeg", image_bytes=image_bytes) |
|
|
| def generate_prompt(client, prompt): |
| transcript = client.models.generate_content( |
| model="gemini-2.5-flash", |
| contents="""Generate a short transcript based on the following description: """ + prompt).text |
| return transcript |
|
|
| def generate_image_from_text(client, text): |
| response = client.models.generate_content( |
| model="gemini-2.5-flash-image", |
| contents=[text], |
| config={"response_modalities":['IMAGE']} |
| ) |
| return response.candidates[0].content.parts[0].as_image() |
|
|
| def wave_file(filename, pcm, channels=1, rate=24000, sample_width=2): |
| """Save PCM audio data to a .wav file. |
| """ |
| with wave.open(filename, "wb") as wf: |
| wf.setnchannels(channels) |
| wf.setsampwidth(sample_width) |
| wf.setframerate(rate) |
| wf.writeframes(pcm) |
|
|
| def generate_audio_from_text(client, text): |
| response = client.models.generate_content( |
| model="gemini-2.5-flash-preview-tts", |
| contents=text, |
| config=types.GenerateContentConfig( |
| response_modalities=["AUDIO"], |
| speech_config=types.SpeechConfig( |
| voice_config=types.VoiceConfig( |
| prebuilt_voice_config=types.PrebuiltVoiceConfig( |
| voice_name='Kore', |
| ) |
| ) |
| ), |
| ) |
| ) |
|
|
| data = response.candidates[0].content.parts[0].inline_data.data |
| return data |
|
|
| def generate_video_from_image_and_text(client, image, text): |
| operation = client.models.generate_videos( |
| model="veo-3.1-fast-generate-preview", |
| prompt=text, |
| image=image, |
| config=types.GenerateVideosConfig( |
| aspect_ratio="16:9", |
| resolution="720p", |
| duration_seconds="8", |
| ) |
| ) |
|
|
| |
| while not operation.done: |
| prCyan("Waiting for video generation to complete...") |
| time.sleep(10) |
| operation = client.operations.get(operation) |
|
|
| if operation.result and operation.result.generated_videos: |
| generated_video = operation.result.generated_videos[0] |
| return generated_video.video |
| elif operation.response and operation.response.generated_videos: |
| generated_video = operation.response.generated_videos[0] |
| return generated_video.video |
| else: |
| |
| prRed(f"Generation failed: {operation}") |
| if not operation.response: |
| raise RuntimeError(f"Unknown reasons for {operation} to be None") |
| raise RuntimeError(f"Generation failed or was filtered: {operation.response.rai_media_filtered_reasons}") |
|
|
| |
| |
| |
|
|
| |
| |
| |
|
|
| |
| |
| |
|
|
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| def main(): |
| coco_caps = COCO("annotations/captions_val2014.json") |
|
|
| client = genai.Client() |
| ttv_prompt_instruct = "Generate a video based on the following image and description: " |
| tts_prompt_instruct = "Read aloud the following sentence in a natural and expressive way, in a warm and friendly tone: " |
|
|
| data_dir = 'val2014' |
| video_save_dir = 'videos' |
| audio_save_dir = 'audios' |
| os.makedirs(video_save_dir, exist_ok=True) |
| os.makedirs(audio_save_dir, exist_ok=True) |
|
|
| infos = pd.read_json('mscoco_cmret_all.jsonl', lines=True) |
| infos = infos[['image_id', 'file_name', 'hard_negatives']].set_index('image_id', drop=False) |
| records = [] |
| with open('invalid.log') as f: |
| invalids = f.read().splitlines() |
| prBlue(f"Invalid ids: {invalids}") |
|
|
| for info in tqdm(infos.itertuples(), total=len(infos)): |
| image_id = info.image_id |
| file_name = info.file_name |
| negatives = info.hard_negatives |
|
|
| captions = [ann['caption'] for ann in coco_caps.loadAnns(coco_caps.getAnnIds(imgIds=image_id))] |
| qry_text = captions[-1] |
|
|
| prBlue(f"Processing {file_name}\nQuery text: {qry_text}") |
|
|
| image = load_image(os.path.join(data_dir, file_name)) |
| generated_negatives = [] |
| for cand_id in tqdm([image_id]+negatives[:10], leave=True, desc="Waiting for API responses ... "): |
| if str(cand_id) in invalids: |
| prYellow(f"{cand_id} contains restricted content, skipping ... ") |
| continue |
| |
| captions = [ann['caption'] for ann in coco_caps.loadAnns(coco_caps.getAnnIds(imgIds=cand_id))] |
| caption = captions[0] |
| image = load_image(os.path.join(data_dir, get_filename_with_id(cand_id))) |
| cand_video_name = get_filename_with_id(cand_id, format='mp4') |
| cand_audio_name = get_filename_with_id(cand_id, format='wav') |
| |
| try: |
| video_save_path = os.path.join(video_save_dir, cand_video_name) |
| if os.path.exists(video_save_path): |
| prGreen(f"Video {cand_id} already exists, skipping generation.") |
| else: |
| prYellow(f"Generating video ...Caption: {caption}") |
| video_prompt = ttv_prompt_instruct + caption |
| video_data = generate_video_with_retry(client, image, video_prompt, max_retries=20) |
| client.files.download(file=video_data) |
| video_data.save(video_save_path) |
| prGreen(f"Generated video saved to {video_save_path}") |
|
|
| audio_save_path = os.path.join(audio_save_dir, cand_audio_name) |
| if os.path.exists(audio_save_path): |
| prGreen(f"Audio {cand_id} already exists, skipping generation.") |
| else: |
| prYellow(f"Generating audio ...Caption: {caption}") |
| audio_prompt = tts_prompt_instruct + caption |
| audio_data = generate_audio_with_retry(client, audio_prompt, max_retries=20) |
| wave_file(audio_save_path, audio_data) |
| prGreen(f"Generated audio saved to {audio_save_path}") |
| generated_negatives.append(cand_id) |
|
|
|
|
| except (TypeError, RuntimeError) as e: |
| invalids.append(cand_id) |
| with open('invalid.log', 'a') as f: |
| f.write(f'{cand_id}\n') |
| prRed(f"🛑 Error processing {cand_video_name}: {e}, skipping this item.") |
| continue |
|
|
| records.append({ |
| "image_id": image_id, |
| "qry_text": qry_text, |
| "hard_negatives": [idx for idx in generated_negatives if idx != image_id] |
| }) |
| records = pd.DataFrame(records) |
| records.to_json('mscoco_cmret.jsonl', orient='records', lines=True) |
|
|
| if __name__ == "__main__": |
| if not os.path.exists("annotations/instances_val2014.json"): |
| subprocess.run(["wget", "http://images.cocodataset.org/annotations/annotations_trainval2014.zip"]) |
| subprocess.run(["unzip", "annotations_trainval2014.zip", "-d", "annotations"]) |
| subprocess.run(["rm", "annotations_trainval2014.zip"]) |
| subprocess.run(["wget", "http://images.cocodataset.org/zips/val2014.zip"]) |
| subprocess.run(["unzip", "val2014.zip", "-d", "."]) |
| subprocess.run(["rm", "val2014.zip"]) |
| main() |
| |
| |
| |
| |
|
|
| |
| |