|
|
from PIL import Image |
|
|
import numpy as np |
|
|
import torch |
|
|
|
|
|
def preprocess_image_with_pil(image, crop_pct=0.875, do_resize=True, do_normalize=True, size=224, image_mean=[0.485, 0.456, 0.406], image_std=[0.229, 0.224, 0.225]): |
|
|
|
|
|
if do_resize: |
|
|
original_size = min(image.size) |
|
|
target_size = int(original_size * crop_pct) |
|
|
image = image.resize((target_size, target_size), Image.Resampling.BILINEAR) |
|
|
|
|
|
|
|
|
width, height = image.size |
|
|
left = (width - size) // 2 |
|
|
top = (height - size) // 2 |
|
|
right = left + size |
|
|
bottom = top + size |
|
|
image = image.crop((left, top, right, bottom)) |
|
|
|
|
|
|
|
|
image = np.array(image).astype(np.float32) / 255.0 |
|
|
if do_normalize: |
|
|
image = (image - image_mean) / image_std |
|
|
|
|
|
|
|
|
image = np.transpose(image, (2, 0, 1)) |
|
|
tensor_image = torch.tensor(image, dtype=torch.float32) |
|
|
tensor_image = tensor_image.unsqueeze(0) |
|
|
|
|
|
return tensor_image |
|
|
|