Update New_file.txt
Browse files- New_file.txt +52 -110
New_file.txt
CHANGED
|
@@ -1,112 +1,54 @@
|
|
| 1 |
-
import
|
| 2 |
-
import torchvision.transforms as transforms
|
| 3 |
-
from PIL import Image
|
| 4 |
import numpy as np
|
| 5 |
-
from sklearn.metrics.pairwise import cosine_similarity
|
| 6 |
-
from torchvision.models import resnet50
|
| 7 |
-
from torchvision.datasets import ImageFolder
|
| 8 |
-
from torch.utils.data import DataLoader
|
| 9 |
-
|
| 10 |
-
# Load a pre-trained ResNet-50 model
|
| 11 |
-
model = resnet50(pretrained=True)
|
| 12 |
-
model.eval()
|
| 13 |
-
|
| 14 |
-
# Define a function to preprocess images
|
| 15 |
-
def preprocess_image(image_path):
|
| 16 |
-
transform = transforms.Compose([
|
| 17 |
-
transforms.Resize((224, 224)),
|
| 18 |
-
transforms.ToTensor(),
|
| 19 |
-
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
| 20 |
-
])
|
| 21 |
-
image = Image.open(image_path)
|
| 22 |
-
image = transform(image).unsqueeze(0) # Add a batch dimension
|
| 23 |
-
return image
|
| 24 |
-
|
| 25 |
-
# Load your ideal subset of images
|
| 26 |
-
ideal_image_paths = ["/content/trunck.jpg", "t4.jpg"] # Replace with your ideal image file paths
|
| 27 |
-
ideal_embeddings = []
|
| 28 |
-
|
| 29 |
-
for image_path in ideal_image_paths:
|
| 30 |
-
image = preprocess_image(image_path)
|
| 31 |
-
with torch.no_grad():
|
| 32 |
-
embedding = model(image).squeeze().numpy()
|
| 33 |
-
ideal_embeddings.append(embedding)
|
| 34 |
-
|
| 35 |
-
# Load a set of candidate images
|
| 36 |
-
candidate_image_paths = ["/content/trunck2.jpg", "t3.jpg", "car.jpg",] # Replace with your candidate image file paths
|
| 37 |
-
candidate_embeddings = []
|
| 38 |
-
|
| 39 |
-
for image_path in candidate_image_paths:
|
| 40 |
-
image = preprocess_image(image_path)
|
| 41 |
-
with torch.no_grad():
|
| 42 |
-
embedding = model(image).squeeze().numpy()
|
| 43 |
-
candidate_embeddings.append(embedding)
|
| 44 |
-
|
| 45 |
-
# Calculate similarities between ideal and candidate images using cosine similarity
|
| 46 |
-
similarities = cosine_similarity(ideal_embeddings, candidate_embeddings)
|
| 47 |
-
|
| 48 |
-
# Print the similarity matrix
|
| 49 |
-
print(similarities)
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
import torch
|
| 53 |
-
from transformers import SwinTransformer, SwinTransformerImageProcessor
|
| 54 |
-
import torchvision.transforms as transforms
|
| 55 |
-
from PIL import Image
|
| 56 |
-
import numpy as np
|
| 57 |
-
from sklearn.metrics.pairwise import cosine_similarity
|
| 58 |
-
|
| 59 |
-
# Load the pretrained Swin Transformer model and image processor
|
| 60 |
-
model_name = "microsoft/Swin-Transformer-base-patch4-in22k"
|
| 61 |
-
model = SwinTransformer.from_pretrained(model_name)
|
| 62 |
-
processor = SwinTransformerImageProcessor.from_pretrained(model_name)
|
| 63 |
-
|
| 64 |
-
# Define a function to preprocess images
|
| 65 |
-
def preprocess_image(image_path):
|
| 66 |
-
transform = transforms.Compose([
|
| 67 |
-
transforms.Resize((224, 224)),
|
| 68 |
-
transforms.ToTensor(),
|
| 69 |
-
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
| 70 |
-
])
|
| 71 |
-
image = Image.open(image_path)
|
| 72 |
-
inputs = processor(images=image, return_tensors="pt")
|
| 73 |
-
return inputs
|
| 74 |
-
|
| 75 |
-
# Load your ideal and candidate subsets of images
|
| 76 |
-
ideal_image_paths = ["ideal_image1.jpg", "ideal_image2.jpg", "ideal_image3.jpg"] # Replace with your ideal image file paths
|
| 77 |
-
candidate_image_paths = ["candidate_image1.jpg", "candidate_image2.jpg", "candidate_image3.jpg"] # Replace with your candidate image file paths
|
| 78 |
-
|
| 79 |
-
# Calculate cosine similarities between ideal and candidate images
|
| 80 |
-
similarities = []
|
| 81 |
-
|
| 82 |
-
for ideal_path in ideal_image_paths:
|
| 83 |
-
ideal_embedding = None
|
| 84 |
-
inputs_ideal = preprocess_image(ideal_path)
|
| 85 |
-
with torch.no_grad():
|
| 86 |
-
output_ideal = model(**inputs_ideal)
|
| 87 |
-
ideal_embedding = output_ideal['pixel_values'][0].cpu().numpy()
|
| 88 |
-
|
| 89 |
-
for candidate_path in candidate_image_paths:
|
| 90 |
-
candidate_embedding = None
|
| 91 |
-
inputs_candidate = preprocess_image(candidate_path)
|
| 92 |
-
with torch.no_grad():
|
| 93 |
-
output_candidate = model(**inputs_candidate)
|
| 94 |
-
candidate_embedding = output_candidate['pixel_values'][0].cpu().numpy()
|
| 95 |
-
|
| 96 |
-
# Calculate cosine similarity between ideal and candidate embeddings
|
| 97 |
-
similarity = cosine_similarity([ideal_embedding], [candidate_embedding])[0][0]
|
| 98 |
-
similarities.append((ideal_path, candidate_path, similarity))
|
| 99 |
-
|
| 100 |
-
# Set a similarity threshold (e.g., 0.7)
|
| 101 |
-
threshold = 0.7
|
| 102 |
-
|
| 103 |
-
# Find similar image pairs based on the threshold
|
| 104 |
-
similar_pairs = []
|
| 105 |
-
for ideal_path, candidate_path, similarity in similarities:
|
| 106 |
-
if similarity > threshold:
|
| 107 |
-
similar_pairs.append((ideal_path, candidate_path))
|
| 108 |
-
|
| 109 |
-
# Print similar image pairs
|
| 110 |
-
for pair in similar_pairs:
|
| 111 |
-
print(f"Similar images: {pair[0]} and {pair[1]}")
|
| 112 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
|
|
|
|
|
|
| 2 |
import numpy as np
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
|
| 4 |
+
# Load the images
|
| 5 |
+
image1_path = 'path_to_your_first_image.jpg'
|
| 6 |
+
image2_path = 'path_to_your_second_image.jpg'
|
| 7 |
+
image1 = cv2.imread(image1_path)
|
| 8 |
+
image2 = cv2.imread(image2_path)
|
| 9 |
+
|
| 10 |
+
# Resize images to the same height for concatenation
|
| 11 |
+
height1, width1, _ = image1.shape
|
| 12 |
+
height2, width2, _ = image2.shape
|
| 13 |
+
|
| 14 |
+
# Define the desired height for both images (e.g., height of the first image)
|
| 15 |
+
desired_height = height1
|
| 16 |
+
|
| 17 |
+
# Resize images
|
| 18 |
+
image1_resized = cv2.resize(image1, (width1, desired_height))
|
| 19 |
+
image2_resized = cv2.resize(image2, (width2, desired_height))
|
| 20 |
+
|
| 21 |
+
# Combine images side by side
|
| 22 |
+
combined_image = np.hstack((image1_resized, image2_resized))
|
| 23 |
+
|
| 24 |
+
# Add labels to the top of each image
|
| 25 |
+
label1 = 'Image 1'
|
| 26 |
+
label2 = 'Image 2'
|
| 27 |
+
font = cv2.FONT_HERSHEY_SIMPLEX
|
| 28 |
+
font_scale = 1
|
| 29 |
+
color = (255, 255, 255) # White color for the text
|
| 30 |
+
thickness = 2
|
| 31 |
+
|
| 32 |
+
# Calculate the position for the labels
|
| 33 |
+
label1_size = cv2.getTextSize(label1, font, font_scale, thickness)[0]
|
| 34 |
+
label2_size = cv2.getTextSize(label2, font, font_scale, thickness)[0]
|
| 35 |
+
|
| 36 |
+
# Position for label1
|
| 37 |
+
x1 = 10
|
| 38 |
+
y1 = label1_size[1] + 10
|
| 39 |
+
|
| 40 |
+
# Position for label2
|
| 41 |
+
x2 = width1 + 10
|
| 42 |
+
y2 = label2_size[1] + 10
|
| 43 |
+
|
| 44 |
+
# Add labels to the combined image
|
| 45 |
+
cv2.putText(combined_image, label1, (x1, y1), font, font_scale, color, thickness)
|
| 46 |
+
cv2.putText(combined_image, label2, (x2, y2), font, font_scale, color, thickness)
|
| 47 |
+
|
| 48 |
+
# Display the combined image
|
| 49 |
+
cv2.imshow('Combined Image', combined_image)
|
| 50 |
+
cv2.waitKey(0)
|
| 51 |
+
cv2.destroyAllWindows()
|
| 52 |
+
|
| 53 |
+
# Save the combined image
|
| 54 |
+
cv2.imwrite('combined_image.jpg', combined_image)
|