Spaces:
Sleeping
Sleeping
File size: 1,181 Bytes
5642006 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 | from PIL import Image
import torch
import joblib
import numpy as np
from transformers import CLIPProcessor, CLIPModel
import gradio as gr
from config import DEVICE, MODEL_SAVE_PATH
# Load models
clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32").to(DEVICE)
clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
classifier = joblib.load(MODEL_SAVE_PATH)
label_encoder = joblib.load("label_encoder.joblib")
def classify_image(image):
image = image.convert("RGB")
inputs = clip_processor(images=image, return_tensors="pt").to(DEVICE)
with torch.no_grad():
image_features = clip_model.get_image_features(**inputs)
image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True)
features = image_features.cpu().numpy()
pred = classifier.predict(features)
label = label_encoder.inverse_transform(pred)[0]
return f"Predicted Class: {label}"
gr.Interface(
fn=classify_image,
inputs=gr.Image(type="pil"),
outputs="text",
title="CLIP Model Classifier",
description="Upload an image. The model will classify it using CLIP + a custom classifier."
).launch()
|