Zeid-Ali-Imigine commited on
Commit
6a7137a
·
1 Parent(s): bada7a8

app.py fixed

Browse files
Files changed (3) hide show
  1. Dockerfile +6 -10
  2. app.py +48 -83
  3. requirements.txt +7 -5
Dockerfile CHANGED
@@ -1,18 +1,14 @@
1
- FROM python:3.10-slim
2
-
3
- WORKDIR /app
4
 
5
  RUN apt-get update && apt-get install -y \
 
 
6
  git \
7
  && rm -rf /var/lib/apt/lists/*
8
 
9
- COPY requirements.txt .
 
10
 
11
- RUN pip install --no-cache-dir --upgrade pip
12
  RUN pip install --no-cache-dir -r requirements.txt
13
 
14
- COPY app.py .
15
-
16
- EXPOSE 7860
17
-
18
- CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
 
1
+ FROM python:3.10
 
 
2
 
3
  RUN apt-get update && apt-get install -y \
4
+ libgl1 \
5
+ libglib2.0-0 \
6
  git \
7
  && rm -rf /var/lib/apt/lists/*
8
 
9
+ WORKDIR /app
10
+ COPY . .
11
 
 
12
  RUN pip install --no-cache-dir -r requirements.txt
13
 
14
+ CMD ["uvicorn","app:app","--host","0.0.0.0","--port","7860"]
 
 
 
 
app.py CHANGED
@@ -1,96 +1,61 @@
1
- import io
 
2
  import torch
3
  import numpy as np
4
- from PIL import Image
5
- from fastapi import FastAPI, UploadFile, File, HTTPException
6
- from fastapi.responses import JSONResponse
7
- from contextlib import asynccontextmanager
8
- from transformers import AutoImageProcessor, AutoModelForDepthEstimation
9
 
10
- MODEL_ID = "depth-anything/Depth-Anything-V2-Metric-Indoor-Base-hf"
11
 
12
- model = None
13
- processor = None
14
- device = None
15
 
 
 
 
16
 
17
- @asynccontextmanager
18
- async def lifespan(app: FastAPI):
19
- global model, processor, device
20
 
21
- print("Loading Metric Depth Anything...")
22
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
 
 
 
 
 
 
23
 
24
- processor = AutoImageProcessor.from_pretrained(MODEL_ID)
25
- model = AutoModelForDepthEstimation.from_pretrained(MODEL_ID).to(device)
26
- model.eval()
27
 
28
- print(f"Model loaded on {device}")
29
- yield
 
30
 
 
31
 
32
- app = FastAPI(
33
- title="Metric Depth Anything API",
34
- version="1.0.0",
35
- lifespan=lifespan,
36
- )
37
 
 
38
 
39
- @app.get("/")
40
- def root():
41
- return {"message": "Metric Depth API running"}
42
-
43
-
44
- @app.post("/predict")
45
- async def predict(file: UploadFile = File(...)):
46
- try:
47
- contents = await file.read()
48
- image = Image.open(io.BytesIO(contents)).convert("RGB")
49
- except Exception as e:
50
- raise HTTPException(status_code=400, detail=f"Invalid image: {e}")
51
-
52
- try:
53
- inputs = processor(images=image, return_tensors="pt").to(device)
54
-
55
- with torch.no_grad():
56
- outputs = model(**inputs)
57
-
58
- post = processor.post_process_depth_estimation(
59
- outputs,
60
- target_sizes=[(image.height, image.width)],
61
- )
62
-
63
- depth = post[0]["predicted_depth"].cpu().numpy() # in meters
64
-
65
- h, w = depth.shape
66
-
67
- # pixel le plus proche (plus petite distance en mètres)
68
- closest_idx = np.unravel_index(np.argmin(depth), depth.shape)
69
- closest_y, closest_x = int(closest_idx[0]), int(closest_idx[1])
70
- closest_distance = float(depth[closest_y, closest_x])
71
-
72
- # pixel central
73
- cy, cx = h // 2, w // 2
74
- center_distance = float(depth[cy, cx])
75
-
76
- return JSONResponse({
77
- "image_size": {"width": w, "height": h},
78
- "closest_pixel": {
79
- "x": closest_x,
80
- "y": closest_y,
81
- "distance_meters": closest_distance,
82
- },
83
- "center_pixel": {
84
- "x": cx,
85
- "y": cy,
86
- "distance_meters": center_distance,
87
- },
88
- })
89
-
90
- except Exception as e:
91
- raise HTTPException(status_code=500, detail=f"Inference error: {e}")
92
-
93
-
94
- if __name__ == "__main__":
95
- import uvicorn
96
- uvicorn.run("app:app", host="0.0.0.0", port=7860)
 
1
+ from fastapi import FastAPI, UploadFile
2
+ from PIL import Image
3
  import torch
4
  import numpy as np
5
+ import cv2
6
+ from torchvision import transforms
7
+ from depth_anything_v2.dpt import DepthAnythingV2
 
 
8
 
9
+ app = FastAPI()
10
 
11
+ # -------- LOAD MODEL --------
12
+ device = "cuda" if torch.cuda.is_available() else "cpu"
 
13
 
14
+ model = DepthAnythingV2.from_pretrained(
15
+ "LiheYoung/depth-anything-v2-base"
16
+ ).to(device)
17
 
18
+ model.eval()
 
 
19
 
20
+ # -------- TRANSFORM --------
21
+ transform = transforms.Compose([
22
+ transforms.Resize((518, 518)),
23
+ transforms.ToTensor(),
24
+ transforms.Normalize(
25
+ mean=[0.485, 0.456, 0.406],
26
+ std=[0.229, 0.224, 0.225]
27
+ )
28
+ ])
29
 
30
+ @app.get("/")
31
+ def root():
32
+ return {"message": "Depth Anything V2 API running"}
33
 
34
+ # -------- DEPTH ENDPOINT --------
35
+ @app.post("/depth")
36
+ async def depth(file: UploadFile):
37
 
38
+ img = Image.open(file.file).convert("RGB")
39
 
40
+ original_w, original_h = img.size
 
 
 
 
41
 
42
+ x = transform(img).unsqueeze(0).to(device)
43
 
44
+ with torch.no_grad():
45
+ depth = model(x)
46
+
47
+ depth = depth.squeeze().cpu().numpy()
48
+
49
+ # resize depth back to original resolution
50
+ depth = cv2.resize(depth, (original_w, original_h))
51
+
52
+ closest_distance = float(np.min(depth))
53
+
54
+ h, w = depth.shape
55
+ center_distance = float(depth[h//2, w//2])
56
+
57
+ return {
58
+ "closest_distance": closest_distance,
59
+ "center_distance": center_distance,
60
+ "resolution": [w, h]
61
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,8 +1,10 @@
1
  fastapi
2
  uvicorn
3
- torch
4
- transformers
5
- Pillow
6
  numpy
7
- accelerate
8
- python-multipart
 
 
 
 
 
1
  fastapi
2
  uvicorn
3
+ pillow
 
 
4
  numpy
5
+ opencv-python-headless
6
+ torch
7
+ torchvision
8
+ timm
9
+ huggingface_hub
10
+ git+https://github.com/DepthAnything/Depth-Anything-V2.git