|
|
#!/usr/bin/env bash |
|
|
set -euo pipefail |
|
|
|
|
|
cd tensorflow |
|
|
export TF_NEED_CUDA=1 |
|
|
export TF_NEED_TENSORRT=1 |
|
|
export TF_TENSORRT_VERSION=8 |
|
|
export TF_CUDA_PATHS=/usr,/usr/local/cuda |
|
|
export TF_CUDA_VERSION=12.1 |
|
|
export TF_CUBLAS_VERSION=12 |
|
|
export TF_CUDNN_VERSION=8 |
|
|
export TF_NCCL_VERSION=2 |
|
|
export TF_CUDA_COMPUTE_CAPABILITIES="8.0,9.0" |
|
|
export TF_ENABLE_XLA=1 |
|
|
export TF_NEED_HDFS=0 |
|
|
export CC_OPT_FLAGS="-march=sandybridge -mtune=broadwell" |
|
|
yes "" | ./configure |
|
|
|
|
|
|
|
|
|
|
|
tree -L 4 /app |
|
|
tree -L 4 /data |
|
|
|
|
|
echo "🚀 Iniciando o script de setup e lançamento do LTX-Video..." |
|
|
echo "Usuário atual: $(whoami)" |
|
|
|
|
|
|
|
|
export HF_HOME="${HF_HOME:-/data/.cache/huggingface}" |
|
|
export OUTPUT_ROOT="${OUTPUT_ROOT:-/app/outputs/ltx}" |
|
|
export LTXV_FRAME_LOG_EVERY=8 |
|
|
export LTXV_DEBUG=1 |
|
|
|
|
|
|
|
|
|
|
|
nvcc -V |
|
|
|
|
|
pip install --extra-index-url=https://pypi.ngc.nvidia.com --trusted-host pypi.ngc.nvidia.com nvidia-tensorflow |
|
|
|
|
|
|
|
|
python -c "import torch; print('PyTorch CUDA disponível:', torch.cuda.is_available()); print('Dispositivos:', torch.cuda.device_count())" |
|
|
|
|
|
|
|
|
|
|
|
mkdir -p "$OUTPUT_ROOT" "$HF_HOME" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
export OUTPUT_ROOT="${OUTPUT_ROOT:-/app/outputs}" |
|
|
export INPUT_ROOT="${INPUT_ROOT:-/app/inputs}" |
|
|
|
|
|
mkdir -p "$OUTPUT_ROOT" "$INPUT_ROOT" |
|
|
echo "[aduc][start] Verificando ambiente como usuário: $(whoami)" |
|
|
|
|
|
|
|
|
export GRADIO_SERVER_NAME="0.0.0.0" |
|
|
export GRADIO_SERVER_PORT="${PORT:-7860}" |
|
|
export GRADIO_ENABLE_QUEUE="True" |
|
|
|
|
|
echo "[ltx][start] Lançando app_ltx.py..." |
|
|
|
|
|
exec python /app/app_seedvr.py |