afdx2 commited on
Commit
0153285
·
verified ·
1 Parent(s): 7982cea

Update server1.py

Browse files
Files changed (1) hide show
  1. server1.py +38 -25
server1.py CHANGED
@@ -2,54 +2,67 @@
2
 
3
  import io
4
  import numpy as np
5
- import cv2
6
  from fastapi import FastAPI, UploadFile, File
7
  from fastapi.responses import JSONResponse
8
  from PIL import Image
 
 
 
9
 
10
  app = FastAPI(title="Accudoctor Strip Analyzer")
11
 
12
- def contar_cuadrados(image_bytes):
 
 
 
 
13
 
14
- img = Image.open(io.BytesIO(image_bytes)).convert("RGB")
15
- np_img = np.array(img)
16
 
17
- gray = cv2.cvtColor(np_img, cv2.COLOR_RGB2GRAY)
18
- blur = cv2.GaussianBlur(gray, (5,5), 0)
 
19
 
20
- # bordes
21
- edges = cv2.Canny(blur, 50, 150)
22
 
23
- # contornos
24
- contours, _ = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
 
25
 
26
- cuadrados = []
 
 
 
 
27
 
28
- for cnt in contours:
29
- approx = cv2.approxPolyDP(cnt, 0.04 * cv2.arcLength(cnt, True), True)
30
 
31
- # queremos formas cuadradas / rectangulares
32
- if len(approx) == 4 and cv2.contourArea(cnt) > 200:
33
- x, y, w, h = cv2.boundingRect(cnt)
 
 
34
 
35
- if w > 10 and h > 10:
36
- cuadrados.append({
37
- "bbox": [int(x), int(y), int(x+w), int(y+h)]
38
- })
39
 
40
- return cuadrados
 
 
41
 
42
 
43
  @app.post("/strip/")
44
  async def strip(front: UploadFile = File(...)):
45
  try:
46
- bytes_img = await front.read()
47
- cuadrados = contar_cuadrados(bytes_img)
48
 
49
  return JSONResponse(content={
50
  "code": 200,
51
- "count": len(cuadrados),
52
- "cuadrados": cuadrados
53
  })
 
54
  except Exception as e:
55
  return JSONResponse(content={"code": 500, "error": str(e)})
 
2
 
3
  import io
4
  import numpy as np
 
5
  from fastapi import FastAPI, UploadFile, File
6
  from fastapi.responses import JSONResponse
7
  from PIL import Image
8
+ import torch
9
+
10
+ from transformers import SamModel, SamProcessor
11
 
12
  app = FastAPI(title="Accudoctor Strip Analyzer")
13
 
14
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
15
+
16
+ model = SamModel.from_pretrained("facebook/sam-vit-base").to(DEVICE)
17
+ processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
18
+
19
 
20
+ def detect_blocks(image_bytes):
 
21
 
22
+ img = Image.open(io.BytesIO(image_bytes))
23
+ if img.mode != "RGB":
24
+ img = img.convert("RGB")
25
 
26
+ # preparar imagen
27
+ inputs = processor(img, return_tensors="pt").to(DEVICE)
28
 
29
+ # obtener mascaras
30
+ with torch.no_grad():
31
+ outputs = model(**inputs)
32
 
33
+ masks = processor.post_process_masks(
34
+ outputs.pred_masks.cpu(),
35
+ inputs["original_sizes"].cpu(),
36
+ inputs["reshaped_input_sizes"].cpu()
37
+ )[0].numpy()
38
 
39
+ blocks = []
 
40
 
41
+ # convertir mascaras en bounding boxes
42
+ for mask in masks:
43
+ ys, xs = np.where(mask > 0.5)
44
+ if len(xs) == 0:
45
+ continue
46
 
47
+ x1, x2 = int(xs.min()), int(xs.max())
48
+ y1, y2 = int(ys.min()), int(ys.max())
 
 
49
 
50
+ blocks.append([x1, y1, x2, y2])
51
+
52
+ return blocks
53
 
54
 
55
  @app.post("/strip/")
56
  async def strip(front: UploadFile = File(...)):
57
  try:
58
+ img_bytes = await front.read()
59
+ blocks = detect_blocks(img_bytes)
60
 
61
  return JSONResponse(content={
62
  "code": 200,
63
+ "num_blocks": len(blocks),
64
+ "blocks": blocks
65
  })
66
+
67
  except Exception as e:
68
  return JSONResponse(content={"code": 500, "error": str(e)})