|
|
|
|
|
""" |
|
|
ONNX ๋ชจ๋ธ์ ์ฌ์ฉํ ์ด๋ฏธ์ง ์ ์ฌ๋ ์ถ๋ก ์์ |
|
|
""" |
|
|
|
|
|
import onnxruntime as ort |
|
|
import numpy as np |
|
|
from PIL import Image |
|
|
import torchvision.transforms as transforms |
|
|
|
|
|
|
|
|
transform = transforms.Compose([ |
|
|
transforms.Resize((224, 224)), |
|
|
transforms.CenterCrop(224), |
|
|
transforms.ToTensor(), |
|
|
transforms.Normalize(mean=[0.485, 0.456, 0.406], |
|
|
std=[0.229, 0.224, 0.225]) |
|
|
]) |
|
|
|
|
|
def preprocess_image(image_path): |
|
|
"""์ด๋ฏธ์ง ์ ์ฒ๋ฆฌ""" |
|
|
image = Image.open(image_path).convert('RGB') |
|
|
tensor = transform(image) |
|
|
return tensor.unsqueeze(0).numpy() |
|
|
|
|
|
def predict_similarity(onnx_model_path, image1_path, image2_path): |
|
|
"""์ด๋ฏธ์ง ์ ์ ์ฌ๋ ์์ธก""" |
|
|
|
|
|
|
|
|
session = ort.InferenceSession(onnx_model_path) |
|
|
|
|
|
|
|
|
img1 = preprocess_image(image1_path) |
|
|
img2 = preprocess_image(image2_path) |
|
|
|
|
|
|
|
|
inputs = {'image1': img1, 'image2': img2} |
|
|
logits = session.run(None, inputs)[0] |
|
|
|
|
|
|
|
|
similarity = 1 / (1 + np.exp(-logits[0][0])) |
|
|
|
|
|
return similarity |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
onnx_path = "room_image_comparator.onnx" |
|
|
img1_path = "room1.jpg" |
|
|
img2_path = "room2.jpg" |
|
|
|
|
|
similarity = predict_similarity(onnx_path, img1_path, img2_path) |
|
|
print(f"์ ์ฌ๋: {similarity:.4f}") |
|
|
|