kengboon
First commit
7ff0bff
import torch
from torchvision.utils import draw_bounding_boxes, draw_keypoints
from PIL import Image
import numpy as np
from .keypt_det.model import get_model, get_transforms, load_model_weight
from .roi_calib.custom.trousers import fix_trouser_orientation
device = "cuda" if torch.cuda.is_available() else "cpu"
model = get_model(num_classes=2, num_keypoints=14).to(device)
model = load_model_weight(model, device=device)
model.eval()
transforms = get_transforms()
def predict(input):
if isinstance(input, np.ndarray):
input = Image.fromarray(input)
input = transforms(input).unsqueeze(0).to(device)
with torch.no_grad():
outputs = model(input)[0]
image = (input[0].cpu() * 255.0).to(torch.uint8)
_, h, w = image.shape
max_edge = max(h, w)
filter = outputs["scores"] >= 0.8 # Objectness threshold
boxes = outputs["boxes"][filter]
keypoints = outputs["keypoints"][filter].cpu().detach().numpy()
#keypoints = np.array([fix_trouser_orientation(kypts) for kypts in keypoints])
keypoints = torch.tensor(keypoints)
keypoints, visibility = keypoints.split([2, 1], dim=-1)
visibility = visibility.bool()
connect_skeleton = [
(0, 3),
*[(i, i+1) for i in range(3, 13)],
(13, 2),
(2, 1),
(1, 0)
]
output = draw_bounding_boxes(
image=image,
boxes=boxes,
colors="orange",
width=int(max_edge * 0.0033)
)
output = draw_keypoints(
image=output,
keypoints=keypoints,
visibility=visibility,
connectivity=connect_skeleton,
colors="blue",
radius=int(max_edge * 0.01),
width=int(max_edge * 0.005)
)
output = output.transpose(0, 2).transpose(0, 1).cpu().numpy()
output = Image.fromarray(output)
return output