Spaces:
Sleeping
Sleeping
add cuda information retrieval + a variable to force using cuda for ONNX
Browse files- Dockerfile +1 -1
- tasks/image.py +8 -0
Dockerfile
CHANGED
|
@@ -13,6 +13,6 @@ WORKDIR /app
|
|
| 13 |
|
| 14 |
COPY --chown=user ./requirements.txt requirements.txt
|
| 15 |
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
| 16 |
-
|
| 17 |
COPY --chown=user . /app
|
| 18 |
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
|
|
|
| 13 |
|
| 14 |
COPY --chown=user ./requirements.txt requirements.txt
|
| 15 |
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
| 16 |
+
RUN ONNX_MODE=cuda
|
| 17 |
COPY --chown=user . /app
|
| 18 |
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
tasks/image.py
CHANGED
|
@@ -28,6 +28,14 @@ print(12)
|
|
| 28 |
router = APIRouter()
|
| 29 |
print(13)
|
| 30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
#MODEL_TYPE = "YOLOv11n"
|
| 32 |
DESCRIPTION = f"YOLOv8n model with batch 128 inference on CPU"
|
| 33 |
print(14)
|
|
|
|
| 28 |
router = APIRouter()
|
| 29 |
print(13)
|
| 30 |
|
| 31 |
+
import torch
|
| 32 |
+
|
| 33 |
+
# Get CUDA version (the one PyTorch was compiled with)
|
| 34 |
+
print("CUDA version:", torch.version.cuda)
|
| 35 |
+
|
| 36 |
+
# Get cuDNN version
|
| 37 |
+
print("cuDNN version:", torch.backends.cudnn.version())
|
| 38 |
+
|
| 39 |
#MODEL_TYPE = "YOLOv11n"
|
| 40 |
DESCRIPTION = f"YOLOv8n model with batch 128 inference on CPU"
|
| 41 |
print(14)
|