pepecalero's picture
Upload folder using huggingface_hub
bab98a2 verified
raw
history blame
4.62 kB
import os
import time
import torch
import torch.nn.functional as F
from torchvision import transforms
from PIL import Image
import io
def read_paths_images(directory, dataset_size):
paths = [directory + f for f in os.listdir(directory) if
os.path.isfile(os.path.join(directory, f))]
paths = paths[:dataset_size]
return paths
def load_images(urls):
images_data = []
for image in urls:
with open(image, 'rb') as file:
file_content = file.read()
images_data.append(file_content)
return images_data
def transform_images(images_data):
# Define the transformations
composed_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: F.grid_sample(x.unsqueeze(0), torch.nn.functional.affine_grid(torch.eye(2, 3, dtype=torch.float32).unsqueeze(0), [1, 3, 224, 224], True), mode='bilinear', padding_mode='reflection', align_corners=True)),
transforms.Lambda(lambda x: x.squeeze(0)),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
tensors = []
for image_data in images_data:
image=Image.open(io.BytesIO(image_data)).convert('RGB')
transformed_img = composed_transforms(image)
tensors.append(transformed_img)
stacked_tensors = torch.stack(tensors)
return stacked_tensors
def transform(images_data):
start= time.time()
normalized_batch = transform_images(images_data)
end = time.time()
# print("Transformation time: ", end-start)
return normalized_batch
def inference(model, normalized_batch):
labels = []
probabilities = []
start = time.time()
model.eval()
with torch.no_grad():
out = model(normalized_batch)
end = time.time()
out = torch.softmax(out, dim=1)
pred_probs = out.numpy()
preds = pred_probs.argmax(axis=1)
for i in range(len(pred_probs)):
probabilities.append(pred_probs[i][0])
if (preds[i] == 0):
labels.append('off')
else:
labels.append('on')
results = [{'prob': float(prob), 'label': label} for prob, label in zip(probabilities, labels)]
# print("Inference time: ", end - start)
return results
def inference_two_models(model1, model2, normalized_batch):
labels = []
probabilities = []
start = time.time()
model1.eval()
model2.eval()
with torch.no_grad():
out = model1(normalized_batch)
out = model2(out)
end = time.time()
out = torch.softmax(out, dim=1)
pred_probs = out.numpy()
preds = pred_probs.argmax(axis=1)
for i in range(len(pred_probs)):
probabilities.append(pred_probs[i][0])
if (preds[i] == 0):
labels.append('off')
else:
labels.append('on')
results = [{'prob': float(prob), 'label': label} for prob, label in zip(probabilities, labels)]
# print("Inference time: ", end - start)
return results
def inference_three_models(model1, model2, model3, normalized_batch):
labels = []
probabilities = []
start = time.time()
model1.eval()
model2.eval()
model3.eval()
with torch.no_grad():
out = model1(normalized_batch)
out = model2(out)
out = model3(out)
end = time.time()
out = torch.softmax(out, dim=1)
pred_probs = out.numpy()
preds = pred_probs.argmax(axis=1)
for i in range(len(pred_probs)):
probabilities.append(pred_probs[i][0])
if (preds[i] == 0):
labels.append('off')
else:
labels.append('on')
results = [{'prob': float(prob), 'label': label} for prob, label in zip(probabilities, labels)]
# print("Inference time: ", end - start)
return results
def inference_mult_models(models, normalized_batch):
labels = []
probabilities = []
times_parts = []
for model in models:
model.eval()
out = normalized_batch
with torch.no_grad():
for model in models:
start = time.time()
out = model(out)
end = time.time()
times_parts.append(end-start)
out = torch.softmax(out, dim=1)
pred_probs = out.numpy()
preds = pred_probs.argmax(axis=1)
for i in range(len(pred_probs)):
probabilities.append(pred_probs[i][0])
if (preds[i] == 0):
labels.append('off')
else:
labels.append('on')
results = [{'prob': float(prob), 'label': label} for prob, label in zip(probabilities, labels)]
# print("Inference time: ", end - start)
return results, times_parts