Spaces:
Sleeping
Sleeping
File size: 2,115 Bytes
cebb2ac 036181f 7d17252 cebb2ac 7d17252 0153285 e177d10 7d17252 cebb2ac 0153285 cebb2ac 882b1f1 0153285 882b1f1 e177d10 0153285 882b1f1 0153285 7d17252 882b1f1 0153285 7d17252 882b1f1 2689e8f c87d020 882b1f1 c87d020 882b1f1 2689e8f 882b1f1 7d17252 882b1f1 e177d10 882b1f1 0153285 7d17252 cebb2ac 0153285 7d17252 7982cea 0153285 7982cea 0153285 3868453 036181f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
# comentarios sin tildes / sin enye
import io
import numpy as np
from fastapi import FastAPI, UploadFile, File
from fastapi.responses import JSONResponse
from PIL import Image
import torch
from transformers import SamModel, SamProcessor
app = FastAPI(title="Accudoctor Strip Analyzer")
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
model = SamModel.from_pretrained("facebook/sam-vit-base").to(DEVICE)
processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
def detect_strip_mask(img):
inputs = processor(img, return_tensors="pt").to(DEVICE)
with torch.no_grad():
out = model(**inputs)
masks = processor.post_process_masks(
out.pred_masks.cpu(),
inputs["original_sizes"].cpu(),
inputs["reshaped_input_sizes"].cpu()
)[0].numpy()
# cojo la mascara mas grande (la tira)
best_mask = max(masks, key=lambda m: np.sum(m))
best_mask = np.squeeze(best_mask)
ys, xs = np.where(best_mask > 0.5)
x1, x2 = xs.min(), xs.max()
y1, y2 = ys.min(), ys.max()
return x1, y1, x2, y2
def split_into_11(img_strip):
w, h = img_strip.size
block_h = h // 11
blocks = []
for i in range(11):
y1 = i * block_h
y2 = (i + 1) * block_h
crop = img_strip.crop((0, y1, w, y2))
blocks.append({
"index": i + 1,
"bbox": [0, y1, w, y2]
})
return blocks
def detect_blocks(image_bytes):
img = Image.open(io.BytesIO(image_bytes)).convert("RGB")
# 1) SAM detecta la tira completa
x1, y1, x2, y2 = detect_strip_mask(img)
strip = img.crop((x1, y1, x2, y2))
# 2) se divide en 11 bloques
blocks = split_into_11(strip)
return blocks
@app.post("/strip/")
async def strip(front: UploadFile = File(...)):
try:
img_bytes = await front.read()
blocks = detect_blocks(img_bytes)
return JSONResponse(content={
"code": 200,
"num_blocks": len(blocks),
"blocks": blocks
})
except Exception as e:
return JSONResponse(content={"code": 500, "error": str(e)})
|