Commit
·
56cb6bb
1
Parent(s):
86ddb80
Reintroduce flash-attn installation command in app.py and update requirements.txt for dependency management
Browse files- app.py +2 -2
- requirements.txt +2 -1
app.py
CHANGED
|
@@ -11,6 +11,8 @@ import spaces
|
|
| 11 |
import torch
|
| 12 |
import yaml
|
| 13 |
|
|
|
|
|
|
|
| 14 |
# Get the PyTorch and CUDA versions
|
| 15 |
torch_version = torch.__version__.split("+")[0] # Strips any "+cuXXX" suffix
|
| 16 |
cuda_version = torch.version.cuda
|
|
@@ -138,8 +140,6 @@ class UniRigDemo:
|
|
| 138 |
Returns:
|
| 139 |
Tuple of status messages and file paths for each step
|
| 140 |
"""
|
| 141 |
-
subprocess.run('pip install flash-attn --no-build-isolation', shell=True)
|
| 142 |
-
|
| 143 |
# Validate input file
|
| 144 |
if not self.validate_input_file(input_file):
|
| 145 |
raise gr.Error(f"Error: Invalid or unsupported file format. Supported formats: {', '.join(self.supported_formats)}")
|
|
|
|
| 11 |
import torch
|
| 12 |
import yaml
|
| 13 |
|
| 14 |
+
subprocess.run('pip install flash-attn --no-build-isolation', shell=True)
|
| 15 |
+
|
| 16 |
# Get the PyTorch and CUDA versions
|
| 17 |
torch_version = torch.__version__.split("+")[0] # Strips any "+cuXXX" suffix
|
| 18 |
cuda_version = torch.version.cuda
|
|
|
|
| 140 |
Returns:
|
| 141 |
Tuple of status messages and file paths for each step
|
| 142 |
"""
|
|
|
|
|
|
|
| 143 |
# Validate input file
|
| 144 |
if not self.validate_input_file(input_file):
|
| 145 |
raise gr.Error(f"Error: Invalid or unsupported file format. Supported formats: {', '.join(self.supported_formats)}")
|
requirements.txt
CHANGED
|
@@ -18,4 +18,5 @@ scipy
|
|
| 18 |
matplotlib
|
| 19 |
plotly
|
| 20 |
pyyaml
|
| 21 |
-
spaces
|
|
|
|
|
|
| 18 |
matplotlib
|
| 19 |
plotly
|
| 20 |
pyyaml
|
| 21 |
+
spaces
|
| 22 |
+
https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.2cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
|