Delete testRunningModel.py
Browse files- testRunningModel.py +0 -97
testRunningModel.py
DELETED
|
@@ -1,97 +0,0 @@
|
|
| 1 |
-
import PIL.Image
|
| 2 |
-
import gradio as gr
|
| 3 |
-
import numpy as np
|
| 4 |
-
import random
|
| 5 |
-
import PIL
|
| 6 |
-
import subprocess
|
| 7 |
-
import sys
|
| 8 |
-
import os
|
| 9 |
-
|
| 10 |
-
# os.system("ls -a")
|
| 11 |
-
# print("1")
|
| 12 |
-
# os.system("pip install ./dlib-19.24.99-cp36-cp36m-linux_x86_64.whl")
|
| 13 |
-
# print("2")
|
| 14 |
-
# os.system("pip install dlib-19.24.99-cp36-cp36m-linux_x86_64.whl")
|
| 15 |
-
# os.system("pip install face-recognition")
|
| 16 |
-
# import face_detection
|
| 17 |
-
|
| 18 |
-
# import spaces #[uncomment to use ZeroGPU]
|
| 19 |
-
from diffusers import DiffusionPipeline
|
| 20 |
-
import torch
|
| 21 |
-
|
| 22 |
-
import face_detection
|
| 23 |
-
|
| 24 |
-
# # Function to display the uploaded image
|
| 25 |
-
# def process_image(image : PIL.Image.Image):
|
| 26 |
-
# outputs = face_detection.getCroppedImages(image)
|
| 27 |
-
# # do AI stuff here
|
| 28 |
-
# return gr.Image(outputs[0])
|
| 29 |
-
|
| 30 |
-
model_repo_id = "CSSE416-final-project/faceRecogModel"
|
| 31 |
-
weight_file_id = "matTemp.bin"
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
# 1. Load the model from Hugging Face Hub
|
| 35 |
-
def load_model(repo_id):
|
| 36 |
-
# Download the model weights from the repo
|
| 37 |
-
weights_path = hf_hub_download(repo_id=model_repo_id, filename=weight_file_id)
|
| 38 |
-
|
| 39 |
-
# Initialize the ResNet-18 architecture
|
| 40 |
-
model = models.resnet18(pretrained=True) # TODO: does it matter if this is set to true or false?
|
| 41 |
-
num_ftrs = model.fc.in_features
|
| 42 |
-
model.fc = nn.Linear(num_ftrs, 100) # Adjust for your task (e.g., 128 classes)
|
| 43 |
-
# TODO: check if this number^^ corresponds to the number of classes
|
| 44 |
-
|
| 45 |
-
# Load the model weights
|
| 46 |
-
state_dict = torch.load(weights_path, map_location=torch.device("cpu"))
|
| 47 |
-
model.load_state_dict(state_dict)
|
| 48 |
-
model.eval() # Set the model to evaluation mode
|
| 49 |
-
return model
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
# 2. Load model
|
| 53 |
-
model = load_model(model_repo_id)
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
# 3. Define how to transform image
|
| 57 |
-
transforms = transforms.Compose(
|
| 58 |
-
[
|
| 59 |
-
transforms.ToTensor()
|
| 60 |
-
])
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
# 4. Preprocess and display the image
|
| 64 |
-
def process_image_str(groupImageFilePath: str):
|
| 65 |
-
groupImage = PIL.Image.open(groupImageFilePath)
|
| 66 |
-
locations, images = face_detection.getCroppedImages(groupImage)
|
| 67 |
-
|
| 68 |
-
outputLabels = []
|
| 69 |
-
for image in images:
|
| 70 |
-
# Process the image
|
| 71 |
-
intputTensor = transforms(image).unsqueeze(0) # unsqueeze? add batch dimension??
|
| 72 |
-
|
| 73 |
-
# do AI stuff here
|
| 74 |
-
with torch.no_grad():
|
| 75 |
-
outputs_t = model(intputTensor)
|
| 76 |
-
_, pred_t = torch.max(outputs_t, dim=1)
|
| 77 |
-
outputLabels.append(pred_t.item())
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
#return gr.Image(image)
|
| 81 |
-
return outputLabels.pop(0)
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
# 5. Create the Gradio interface
|
| 85 |
-
interface = gr.Interface(
|
| 86 |
-
fn=process_image_str, # Function to process the image
|
| 87 |
-
inputs=gr.Image(type='filepath'), # Upload input
|
| 88 |
-
# outputs=gr.Image(), # Display output
|
| 89 |
-
outputs='text',
|
| 90 |
-
allow_flagging='never',
|
| 91 |
-
title="Celebrity Face Detector",
|
| 92 |
-
description="Upload a picture of a celebrity or group of celebrities to identify them"
|
| 93 |
-
)
|
| 94 |
-
|
| 95 |
-
# 6. Launch the app
|
| 96 |
-
if __name__ == "__main__":
|
| 97 |
-
interface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|