Spaces:
Sleeping
Sleeping
| from PIL import Image | |
| import torch | |
| import joblib | |
| import numpy as np | |
| from transformers import CLIPProcessor, CLIPModel | |
| import gradio as gr | |
| from config import DEVICE, MODEL_SAVE_PATH | |
| # Load models | |
| clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32").to(DEVICE) | |
| clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") | |
| classifier = joblib.load(MODEL_SAVE_PATH) | |
| label_encoder = joblib.load("label_encoder.joblib") | |
| def classify_image(image): | |
| image = image.convert("RGB") | |
| inputs = clip_processor(images=image, return_tensors="pt").to(DEVICE) | |
| with torch.no_grad(): | |
| image_features = clip_model.get_image_features(**inputs) | |
| image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True) | |
| features = image_features.cpu().numpy() | |
| pred = classifier.predict(features) | |
| label = label_encoder.inverse_transform(pred)[0] | |
| return f"Predicted Class: {label}" | |
| gr.Interface( | |
| fn=classify_image, | |
| inputs=gr.Image(type="pil"), | |
| outputs="text", | |
| title="CLIP Model Classifier", | |
| description="Upload an image. The model will classify it using CLIP + a custom classifier." | |
| ).launch() | |