| | name: Fast GPU Tests on PR |
| |
|
| | on: |
| | pull_request: |
| | branches: main |
| | paths: |
| | - "src/diffusers/models/modeling_utils.py" |
| | - "src/diffusers/models/model_loading_utils.py" |
| | - "src/diffusers/pipelines/pipeline_utils.py" |
| | - "src/diffusers/pipeline_loading_utils.py" |
| | - "src/diffusers/loaders/lora_base.py" |
| | - "src/diffusers/loaders/lora_pipeline.py" |
| | - "src/diffusers/loaders/peft.py" |
| | - "tests/pipelines/test_pipelines_common.py" |
| | - "tests/models/test_modeling_common.py" |
| | - "examples/**/*.py" |
| | workflow_dispatch: |
| |
|
| | concurrency: |
| | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} |
| | cancel-in-progress: true |
| |
|
| | env: |
| | DIFFUSERS_IS_CI: yes |
| | OMP_NUM_THREADS: 8 |
| | MKL_NUM_THREADS: 8 |
| | HF_XET_HIGH_PERFORMANCE: 1 |
| | PYTEST_TIMEOUT: 600 |
| | PIPELINE_USAGE_CUTOFF: 1000000000 |
| |
|
| | jobs: |
| | check_code_quality: |
| | runs-on: ubuntu-22.04 |
| | steps: |
| | - uses: actions/checkout@v6 |
| | - name: Set up Python |
| | uses: actions/setup-python@v6 |
| | with: |
| | python-version: "3.8" |
| | - name: Install dependencies |
| | run: | |
| | pip install --upgrade pip |
| | pip install .[quality] |
| | - name: Check quality |
| | run: make quality |
| | - name: Check if failure |
| | if: ${{ failure() }} |
| | run: | |
| | echo "Quality check failed. Please ensure the right dependency versions are installed with 'pip install -e .[quality]' and run 'make style && make quality'" >> $GITHUB_STEP_SUMMARY |
| | |
| | check_repository_consistency: |
| | needs: check_code_quality |
| | runs-on: ubuntu-22.04 |
| | steps: |
| | - uses: actions/checkout@v6 |
| | - name: Set up Python |
| | uses: actions/setup-python@v6 |
| | with: |
| | python-version: "3.8" |
| | - name: Install dependencies |
| | run: | |
| | pip install --upgrade pip |
| | pip install .[quality] |
| | - name: Check repo consistency |
| | run: | |
| | python utils/check_copies.py |
| | python utils/check_dummies.py |
| | python utils/check_support_list.py |
| | make deps_table_check_updated |
| | - name: Check if failure |
| | if: ${{ failure() }} |
| | run: | |
| | echo "Repo consistency check failed. Please ensure the right dependency versions are installed with 'pip install -e .[quality]' and run 'make fix-copies'" >> $GITHUB_STEP_SUMMARY |
| | |
| | setup_torch_cuda_pipeline_matrix: |
| | needs: [check_code_quality, check_repository_consistency] |
| | name: Setup Torch Pipelines CUDA Slow Tests Matrix |
| | runs-on: |
| | group: aws-general-8-plus |
| | container: |
| | image: diffusers/diffusers-pytorch-cpu |
| | outputs: |
| | pipeline_test_matrix: ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }} |
| | steps: |
| | - name: Checkout diffusers |
| | uses: actions/checkout@v6 |
| | with: |
| | fetch-depth: 2 |
| | - name: Install dependencies |
| | run: | |
| | uv pip install -e ".[quality]" |
| | - name: Environment |
| | run: | |
| | python utils/print_env.py |
| | - name: Fetch Pipeline Matrix |
| | id: fetch_pipeline_matrix |
| | run: | |
| | matrix=$(python utils/fetch_torch_cuda_pipeline_test_matrix.py) |
| | echo $matrix |
| | echo "pipeline_test_matrix=$matrix" >> $GITHUB_OUTPUT |
| | - name: Pipeline Tests Artifacts |
| | if: ${{ always() }} |
| | uses: actions/upload-artifact@v6 |
| | with: |
| | name: test-pipelines.json |
| | path: reports |
| |
|
| | torch_pipelines_cuda_tests: |
| | name: Torch Pipelines CUDA Tests |
| | needs: setup_torch_cuda_pipeline_matrix |
| | strategy: |
| | fail-fast: false |
| | max-parallel: 8 |
| | matrix: |
| | module: ${{ fromJson(needs.setup_torch_cuda_pipeline_matrix.outputs.pipeline_test_matrix) }} |
| | runs-on: |
| | group: aws-g4dn-2xlarge |
| | container: |
| | image: diffusers/diffusers-pytorch-cuda |
| | options: --shm-size "16gb" --ipc host --gpus all |
| | steps: |
| | - name: Checkout diffusers |
| | uses: actions/checkout@v6 |
| | with: |
| | fetch-depth: 2 |
| |
|
| | - name: NVIDIA-SMI |
| | run: | |
| | nvidia-smi |
| | - name: Install dependencies |
| | run: | |
| | uv pip install -e ".[quality]" |
| | uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git |
| | #uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git |
| | uv pip uninstall transformers huggingface_hub && uv pip install transformers==4.57.1 |
| | |
| | - name: Environment |
| | run: | |
| | python utils/print_env.py |
| | - name: Extract tests |
| | id: extract_tests |
| | run: | |
| | pattern=$(python utils/extract_tests_from_mixin.py --type pipeline) |
| | echo "$pattern" > /tmp/test_pattern.txt |
| | echo "pattern_file=/tmp/test_pattern.txt" >> $GITHUB_OUTPUT |
| | |
| | - name: PyTorch CUDA checkpoint tests on Ubuntu |
| | env: |
| | HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }} |
| | |
| | CUBLAS_WORKSPACE_CONFIG: :16:8 |
| | run: | |
| | if [ "${{ matrix.module }}" = "ip_adapters" ]; then |
| | pytest -n 1 --max-worker-restart=0 --dist=loadfile \ |
| | -k "not Flax and not Onnx" \ |
| | --make-reports=tests_pipeline_${{ matrix.module }}_cuda \ |
| | tests/pipelines/${{ matrix.module }} |
| | else |
| | pattern=$(cat ${{ steps.extract_tests.outputs.pattern_file }}) |
| | pytest -n 1 --max-worker-restart=0 --dist=loadfile \ |
| | -k "not Flax and not Onnx and $pattern" \ |
| | --make-reports=tests_pipeline_${{ matrix.module }}_cuda \ |
| | tests/pipelines/${{ matrix.module }} |
| | fi |
| | |
| | - name: Failure short reports |
| | if: ${{ failure() }} |
| | run: | |
| | cat reports/tests_pipeline_${{ matrix.module }}_cuda_stats.txt |
| | cat reports/tests_pipeline_${{ matrix.module }}_cuda_failures_short.txt |
| | - name: Test suite reports artifacts |
| | if: ${{ always() }} |
| | uses: actions/upload-artifact@v6 |
| | with: |
| | name: pipeline_${{ matrix.module }}_test_reports |
| | path: reports |
| |
|
| | torch_cuda_tests: |
| | name: Torch CUDA Tests |
| | needs: [check_code_quality, check_repository_consistency] |
| | runs-on: |
| | group: aws-g4dn-2xlarge |
| | container: |
| | image: diffusers/diffusers-pytorch-cuda |
| | options: --shm-size "16gb" --ipc host --gpus all |
| | defaults: |
| | run: |
| | shell: bash |
| | strategy: |
| | fail-fast: false |
| | max-parallel: 4 |
| | matrix: |
| | module: [models, schedulers, lora, others] |
| | steps: |
| | - name: Checkout diffusers |
| | uses: actions/checkout@v6 |
| | with: |
| | fetch-depth: 2 |
| |
|
| | - name: Install dependencies |
| | run: | |
| | uv pip install -e ".[quality]" |
| | uv pip install peft@git+https://github.com/huggingface/peft.git |
| | uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git |
| | #uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git |
| | uv pip uninstall transformers huggingface_hub && uv pip install transformers==4.57.1 |
| | |
| | - name: Environment |
| | run: | |
| | python utils/print_env.py |
| | |
| | - name: Extract tests |
| | id: extract_tests |
| | run: | |
| | pattern=$(python utils/extract_tests_from_mixin.py --type ${{ matrix.module }}) |
| | echo "$pattern" > /tmp/test_pattern.txt |
| | echo "pattern_file=/tmp/test_pattern.txt" >> $GITHUB_OUTPUT |
| | |
| | - name: Run PyTorch CUDA tests |
| | env: |
| | HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }} |
| | |
| | CUBLAS_WORKSPACE_CONFIG: :16:8 |
| | run: | |
| | pattern=$(cat ${{ steps.extract_tests.outputs.pattern_file }}) |
| | if [ -z "$pattern" ]; then |
| | pytest -n 1 --max-worker-restart=0 --dist=loadfile -k "not Flax and not Onnx" tests/${{ matrix.module }} \ |
| | --make-reports=tests_torch_cuda_${{ matrix.module }} |
| | else |
| | pytest -n 1 --max-worker-restart=0 --dist=loadfile -k "not Flax and not Onnx and $pattern" tests/${{ matrix.module }} \ |
| | --make-reports=tests_torch_cuda_${{ matrix.module }} |
| | fi |
| | |
| | - name: Failure short reports |
| | if: ${{ failure() }} |
| | run: | |
| | cat reports/tests_torch_cuda_${{ matrix.module }}_stats.txt |
| | cat reports/tests_torch_cuda_${{ matrix.module }}_failures_short.txt |
| | |
| | - name: Test suite reports artifacts |
| | if: ${{ always() }} |
| | uses: actions/upload-artifact@v6 |
| | with: |
| | name: torch_cuda_test_reports_${{ matrix.module }} |
| | path: reports |
| |
|
| | run_examples_tests: |
| | name: Examples PyTorch CUDA tests on Ubuntu |
| | needs: [check_code_quality, check_repository_consistency] |
| | runs-on: |
| | group: aws-g4dn-2xlarge |
| |
|
| | container: |
| | image: diffusers/diffusers-pytorch-cuda |
| | options: --gpus all --shm-size "16gb" --ipc host |
| | steps: |
| | - name: Checkout diffusers |
| | uses: actions/checkout@v6 |
| | with: |
| | fetch-depth: 2 |
| |
|
| | - name: NVIDIA-SMI |
| | run: | |
| | nvidia-smi |
| | - name: Install dependencies |
| | run: | |
| | #uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git |
| | uv pip uninstall transformers huggingface_hub && uv pip install transformers==4.57.1 |
| | uv pip install -e ".[quality,training]" |
| | |
| | - name: Environment |
| | run: | |
| | python utils/print_env.py |
| | |
| | - name: Run example tests on GPU |
| | env: |
| | HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }} |
| | run: | |
| | uv pip install ".[training]" |
| | pytest -n 1 --max-worker-restart=0 --dist=loadfile --make-reports=examples_torch_cuda examples/ |
| | |
| | - name: Failure short reports |
| | if: ${{ failure() }} |
| | run: | |
| | cat reports/examples_torch_cuda_stats.txt |
| | cat reports/examples_torch_cuda_failures_short.txt |
| | |
| | - name: Test suite reports artifacts |
| | if: ${{ always() }} |
| | uses: actions/upload-artifact@v6 |
| | with: |
| | name: examples_test_reports |
| | path: reports |
| |
|
| |
|