Update app.py
Browse files
app.py
CHANGED
|
@@ -28,24 +28,24 @@ import sys
|
|
| 28 |
import tempfile
|
| 29 |
from huggingface_hub import hf_hub_download
|
| 30 |
|
| 31 |
-
def install_cuda_toolkit():
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
|
| 50 |
import shutil
|
| 51 |
|
|
@@ -68,7 +68,7 @@ def find_cuda():
|
|
| 68 |
|
| 69 |
|
| 70 |
|
| 71 |
-
install_cuda_toolkit()
|
| 72 |
|
| 73 |
cuda_path = find_cuda()
|
| 74 |
|
|
@@ -76,7 +76,13 @@ if cuda_path:
|
|
| 76 |
print(f"CUDA installation found at: {cuda_path}")
|
| 77 |
else:
|
| 78 |
print("CUDA installation not found")
|
| 79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
print(f"GPU: {torch.cuda.is_available()}")
|
| 81 |
a = torch.tensor([0]).cuda()
|
| 82 |
if torch.cuda.is_available() and torch.cuda.device_count() >= 2:
|
|
|
|
| 28 |
import tempfile
|
| 29 |
from huggingface_hub import hf_hub_download
|
| 30 |
|
| 31 |
+
# def install_cuda_toolkit():
|
| 32 |
+
# print("==> install cuda 118")
|
| 33 |
+
# # CUDA_TOOLKIT_URL = "https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run"
|
| 34 |
+
# CUDA_TOOLKIT_URL = "https://developer.download.nvidia.com/compute/cuda/12.2.0/local_installers/cuda_12.2.0_535.54.03_linux.run"
|
| 35 |
+
# CUDA_TOOLKIT_FILE = "/tmp/%s" % os.path.basename(CUDA_TOOLKIT_URL)
|
| 36 |
+
# subprocess.call(["wget", "-q", CUDA_TOOLKIT_URL, "-O", CUDA_TOOLKIT_FILE])
|
| 37 |
+
# subprocess.call(["chmod", "+x", CUDA_TOOLKIT_FILE])
|
| 38 |
+
# subprocess.call([CUDA_TOOLKIT_FILE, "--silent", "--toolkit"])
|
| 39 |
|
| 40 |
+
# os.environ["CUDA_HOME"] = "/usr/local/cuda"
|
| 41 |
+
# os.environ["PATH"] = "%s/bin:%s" % (os.environ["CUDA_HOME"], os.environ["PATH"])
|
| 42 |
+
# os.environ["LD_LIBRARY_PATH"] = "%s/lib:%s" % (
|
| 43 |
+
# os.environ["CUDA_HOME"],
|
| 44 |
+
# "" if "LD_LIBRARY_PATH" not in os.environ else os.environ["LD_LIBRARY_PATH"],
|
| 45 |
+
# )
|
| 46 |
+
# # Fix: arch_list[-1] += '+PTX'; IndexError: list index out of range
|
| 47 |
+
# os.environ["TORCH_CUDA_ARCH_LIST"] = "8.0;8.6"
|
| 48 |
+
# print("==> finfish install")
|
| 49 |
|
| 50 |
import shutil
|
| 51 |
|
|
|
|
| 68 |
|
| 69 |
|
| 70 |
|
| 71 |
+
# install_cuda_toolkit()
|
| 72 |
|
| 73 |
cuda_path = find_cuda()
|
| 74 |
|
|
|
|
| 76 |
print(f"CUDA installation found at: {cuda_path}")
|
| 77 |
else:
|
| 78 |
print("CUDA installation not found")
|
| 79 |
+
import os
|
| 80 |
+
|
| 81 |
+
# 确保 CUDA_HOME 设置正确
|
| 82 |
+
os.environ["CUDA_HOME"] = "/usr/local/cuda"
|
| 83 |
+
os.environ["PATH"] += os.pathsep + os.path.join(os.environ["CUDA_HOME"], "bin")
|
| 84 |
+
os.environ["LD_LIBRARY_PATH"] = os.environ.get("LD_LIBRARY_PATH", "") + os.pathsep + os.path.join(os.environ["CUDA_HOME"], "lib64")
|
| 85 |
+
|
| 86 |
print(f"GPU: {torch.cuda.is_available()}")
|
| 87 |
a = torch.tensor([0]).cuda()
|
| 88 |
if torch.cuda.is_available() and torch.cuda.device_count() >= 2:
|