|
|
""" |
|
|
Pre-computes T5 text embeddings for the MotionStreamer dataset. |
|
|
This script is GUARANTEED to be correct because it imports the |
|
|
original base Text2MotionDataset class and scans its internal 'data_dict' |
|
|
to discover every possible caption, including all sub-clips. |
|
|
""" |
|
|
import os |
|
|
import torch |
|
|
import numpy as np |
|
|
from sentence_transformers import SentenceTransformer |
|
|
from tqdm import tqdm |
|
|
import argparse |
|
|
import hashlib |
|
|
import json |
|
|
import sys |
|
|
|
|
|
|
|
|
try: |
|
|
from humanml3d_272.dataset_TM_train_motionstreamer import Text2MotionDataset |
|
|
except ImportError as e: |
|
|
print("FATAL ERROR: Could not import the 'Text2MotionDataset' class.") |
|
|
print("Please make sure you run this script from the root of your project directory.") |
|
|
print(f"Original error: {e}") |
|
|
sys.exit(1) |
|
|
|
|
|
def get_args_parser(): |
|
|
parser = argparse.ArgumentParser(description='Pre-compute T5 Embeddings') |
|
|
|
|
|
|
|
|
|
|
|
parser.add_argument('--dataname', type=str, default='t2m_babel_272', help='Dataset name (for dataset init)') |
|
|
parser.add_argument('--latent_dir', type=str, default='babel_272_stream/t2m_babel_latents', help='Latent dir (for dataset init)') |
|
|
parser.add_argument('--unit_length', type=int, default=4, help='Unit length (for dataset init, 4 is a common default)') |
|
|
|
|
|
|
|
|
parser.add_argument('--output_file', type=str, default='babel_272_stream/text_embeddings.npy', help='Path to save the output .npy file.') |
|
|
parser.add_argument('--t5_model_path', type=str, default='sentence-t5-xl', help='Path or HF name for the Sentence-T5-XL model') |
|
|
parser.add_argument('--batch_size', type=int, default=256, help='Batch size for T5 encoding.') |
|
|
return parser |
|
|
|
|
|
def main(): |
|
|
parser = get_args_parser() |
|
|
args = parser.parse_args() |
|
|
print(f"Configuration:\n{json.dumps(vars(args), indent=4, sort_keys=True)}") |
|
|
|
|
|
output_dir = os.path.dirname(args.output_file) |
|
|
os.makedirs(output_dir, exist_ok=True) |
|
|
print(f"Embeddings will be saved to: {args.output_file}") |
|
|
|
|
|
|
|
|
print(f"Loading T5 model from: {args.t5_model_path}") |
|
|
device = 'cuda' if torch.cuda.is_available() else 'cpu' |
|
|
|
|
|
if device == 'cuda' and torch.cuda.is_bf16_supported(): |
|
|
print("bfloat16 is supported, loading model in bf16.") |
|
|
t5_model = SentenceTransformer(args.t5_model_path, device=device, model_kwargs={'torch_dtype': torch.bfloat16}) |
|
|
else: |
|
|
print("bfloat16 not supported or not on CUDA, loading model in fp32.") |
|
|
t5_model = SentenceTransformer(args.t5_model_path, device=device) |
|
|
t5_model.eval() |
|
|
for p in t5_model.parameters(): |
|
|
p.requires_grad = False |
|
|
print("T5 model loaded successfully.") |
|
|
|
|
|
|
|
|
print("Instantiating the Text2MotionDataset to scan for all captions...") |
|
|
dataset = Text2MotionDataset( |
|
|
dataset_name=args.dataname, |
|
|
latent_dir=args.latent_dir, |
|
|
unit_length=args.unit_length |
|
|
) |
|
|
|
|
|
if not hasattr(dataset, 'data_dict') or not isinstance(dataset.data_dict, dict): |
|
|
print("FATAL ERROR: The imported Text2MotionDataset does not have 'data_dict'.") |
|
|
sys.exit(1) |
|
|
|
|
|
unique_captions = set() |
|
|
unique_captions.add('') |
|
|
print("Extracting all unique captions from the dataset's internal dictionary...") |
|
|
|
|
|
for data_item in tqdm(dataset.data_dict.values(), desc="Scanning discovered samples"): |
|
|
for text_dict in data_item['text']: |
|
|
unique_captions.add(text_dict['caption']) |
|
|
|
|
|
captions_list = list(unique_captions) |
|
|
print(f"Found {len(captions_list)} unique captions to encode.") |
|
|
|
|
|
|
|
|
print(f"Encoding {len(captions_list)} captions in batches of {args.batch_size}...") |
|
|
with torch.no_grad(): |
|
|
all_embeddings = t5_model.encode( |
|
|
captions_list, |
|
|
batch_size=args.batch_size, |
|
|
convert_to_tensor=True, |
|
|
show_progress_bar=True |
|
|
) |
|
|
|
|
|
all_embeddings_fp32 = all_embeddings.to(torch.float32).cpu().numpy() |
|
|
|
|
|
|
|
|
embeddings_dict = {} |
|
|
for i, final_caption in enumerate(captions_list): |
|
|
|
|
|
caption_hash = hashlib.sha256(final_caption.encode('utf-8')).hexdigest() |
|
|
embeddings_dict[caption_hash] = all_embeddings_fp32[i] |
|
|
|
|
|
print(f"Saving {len(embeddings_dict)} embeddings to {args.output_file}...") |
|
|
np.save(args.output_file, embeddings_dict, allow_pickle=True) |
|
|
|
|
|
print("--- All text embeddings have been pre-computed and saved. ---") |
|
|
|
|
|
if __name__ == '__main__': |
|
|
main() |
|
|
|