import gradio as gr from PIL import Image import torch from torchvision import transforms from transformers import AutoModelForImageClassification # Define model names and corresponding labels MODEL_CONFIGS = [ { "name": "anismizi/skin-type-classifier", "labels": ["dry", "oily"], "key": "oil_vs_dry" }, { "name": "imfarzanansari/skintelligent-acne", "labels": ["no_acne", "acne"], "key": "acne" }, { "name": "imfarzanansari/skintelligent-wrinkles", "labels": ["no_wrinkles", "wrinkles"], "key": "wrinkles" }, ] # Load all models at startup MODELS = [] for config in MODEL_CONFIGS: model = AutoModelForImageClassification.from_pretrained(config["name"]) model.eval() MODELS.append(model) # Common preprocessing (adjust if any model requires different input specs) preprocess = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) def analyze_skin(image: Image.Image): image = image.convert("RGB") input_tensor = preprocess(image) input_batch = input_tensor.unsqueeze(0) # add batch dimension results = {} with torch.no_grad(): for idx, config in enumerate(MODEL_CONFIGS): model, labels, key = MODELS[idx], config["labels"], config["key"] outputs = model(input_batch) logits = outputs.logits probs = torch.softmax(logits, dim=1) confidence, pred_idx = torch.max(probs, dim=1) predicted_label = labels[pred_idx.item()] confidence_score = confidence.item() results[key] = { "label": predicted_label, "confidence": f"{confidence_score:.2%}" } return results iface = gr.Interface( fn=analyze_skin, inputs=gr.Image(type="pil"), outputs=gr.JSON(label="Skin Analysis Results"), title="Comprehensive Skin Condition Analyzer", description="Classifies skin image for oily/dry, acne, redness, wrinkles using multiple models." ) if __name__ == "__main__": iface.launch()