# Model Training and Optimization transformers>=4.49.0 diffusers>=0.31.0 accelerate>=1.1.1 tokenizers>=0.20.3 flash-attn @ https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.4cxx11abiFALSE-cp310-cp310-linux_x86_64.whl # Configuration and Experiment Management hydra-core>=1.3.0 omegaconf>=2.3.0 pyyaml>=6.0 wandb>=0.15.0 # Video/Image Processing av==14.1.0 pillow>=9.5.0,<10.0.0 # Pinned for compatibility einops>=0.7.0 # Text Processing and Tokenization sentencepiece>=0.2.0 ftfy>=6.1.0 # For text cleaning huggingface-hub>=0.20.0 tqdm>=4.65.0 colorama>=0.4.6 click>=8.1.0 easydict>=1.10 msgpack>=1.0.5 # For message serialization pyzmq>=25.0.0 # For ZeroMQ (used in serving) gradio>=5.0.0