Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import CLIPProcessor, CLIPModel | |
| from PIL import Image | |
| import torch | |
| # Load a pre-trained CLIP model and processor from Hugging Face | |
| model_name = "openai/clip-vit-base-patch32" | |
| model = CLIPModel.from_pretrained(model_name) | |
| processor = CLIPProcessor.from_pretrained(model_name) | |
| # Define possible personas mapped to interests | |
| predefined_personas = { | |
| "Fashion": { | |
| "Age": "18-30 years", | |
| "Gender": "Likely Female", | |
| "Interests": ["Fashion", "Trendy Clothing", "Social Media"], | |
| "Income Level": "Medium", | |
| "Psychographics": ["Fashion-conscious", "Socially active", "Trend-seeker"], | |
| "Behavioral Traits": ["Follows fashion influencers", "Buys seasonal items", "Active on Instagram"] | |
| }, | |
| "Technology": { | |
| "Age": "20-40 years", | |
| "Gender": "Likely Male", | |
| "Interests": ["Technology", "Gadgets", "Software"], | |
| "Income Level": "High", | |
| "Psychographics": ["Tech-savvy", "Innovative", "Early adopter"], | |
| "Behavioral Traits": ["Follows tech blogs", "Interested in product launches", "Buys online frequently"] | |
| }, | |
| "Sports": { | |
| "Age": "15-40 years", | |
| "Gender": "Likely Male", | |
| "Interests": ["Sports", "Fitness", "Outdoor Activities"], | |
| "Income Level": "Medium", | |
| "Psychographics": ["Active lifestyle", "Health-conscious", "Competitive"], | |
| "Behavioral Traits": ["Watches sports events", "Goes to the gym", "Buys athletic gear"] | |
| }, | |
| "Travel": { | |
| "Age": "25-45 years", | |
| "Gender": "Likely Female or Male", | |
| "Interests": ["Travel", "Adventure", "Culture"], | |
| "Income Level": "Medium to High", | |
| "Psychographics": ["Experience-seeker", "Open-minded", "Curious"], | |
| "Behavioral Traits": ["Follows travel blogs", "Likes outdoor activities", "Prefers unique experiences"] | |
| }, | |
| "Luxury": { | |
| "Age": "30-50 years", | |
| "Gender": "Likely Female or Male", | |
| "Interests": ["Luxury Goods", "High-end Fashion", "Fine Dining"], | |
| "Income Level": "High", | |
| "Psychographics": ["Status-conscious", "Exclusive-taste", "Prestige-oriented"], | |
| "Behavioral Traits": ["Buys luxury brands", "Frequent traveler", "Invests in high-end products"] | |
| }, | |
| "Fitness": { | |
| "Age": "20-40 years", | |
| "Gender": "Likely Female or Male", | |
| "Interests": ["Fitness", "Healthy Living", "Nutrition"], | |
| "Income Level": "Medium", | |
| "Psychographics": ["Health-conscious", "Goal-oriented", "Disciplined"], | |
| "Behavioral Traits": ["Goes to the gym regularly", "Buys fitness equipment", "Follows nutrition plans"] | |
| }, | |
| "Education": { | |
| "Age": "18-35 years", | |
| "Gender": "Likely Female or Male", | |
| "Interests": ["Education", "Learning", "Career Development"], | |
| "Income Level": "Medium", | |
| "Psychographics": ["Growth-oriented", "Knowledge-seeker", "Curious"], | |
| "Behavioral Traits": ["Follows online courses", "Interested in higher education", "Learns new skills"] | |
| }, | |
| "Entertainment": { | |
| "Age": "15-35 years", | |
| "Gender": "Likely Female or Male", | |
| "Interests": ["Movies", "Music", "Video Games"], | |
| "Income Level": "Medium", | |
| "Psychographics": ["Fun-seeking", "Relaxed", "Entertainment-driven"], | |
| "Behavioral Traits": ["Watches Netflix", "Plays video games", "Follows pop culture trends"] | |
| } | |
| } | |
| # List of interest categories to compare against | |
| interest_categories = list(predefined_personas.keys()) | |
| # Function to calculate similarity between the image and each interest category | |
| def get_most_similar_persona(image): | |
| """Infers the most similar predefined persona for the given image.""" | |
| inputs = processor(images=image, return_tensors="pt") | |
| with torch.no_grad(): | |
| image_features = model.get_image_features(**inputs) | |
| # Define descriptions of interest categories (texts) | |
| category_descriptions = [ | |
| f"A photo that represents {category.lower()}." for category in interest_categories | |
| ] | |
| # Tokenize the category descriptions and get text features | |
| text_inputs = processor(text=category_descriptions, return_tensors="pt", padding=True) | |
| text_features = model.get_text_features(**text_inputs) | |
| # Calculate similarity between image and text features | |
| image_features /= image_features.norm(dim=-1, keepdim=True) | |
| text_features /= text_features.norm(dim=-1, keepdim=True) | |
| similarities = torch.matmul(image_features, text_features.T) | |
| # Find the most similar category | |
| most_similar_idx = similarities.argmax().item() | |
| most_similar_category = interest_categories[most_similar_idx] | |
| return predefined_personas[most_similar_category] | |
| # Gradio interface function | |
| def persona_analysis(image): | |
| """Generates a marketing persona based on the image.""" | |
| persona = get_most_similar_persona(image) | |
| return "\n".join([f"{key}: {value}" for key, value in persona.items()]) | |
| # Build the Gradio interface | |
| iface = gr.Interface( | |
| fn=persona_analysis, | |
| inputs=gr.Image(type="pil"), # Accept image input in PIL format | |
| outputs=gr.Textbox(), # Display persona as text output | |
| title="Marketing Persona Generator", | |
| description="Upload an image to generate a marketing persona based on the ad subject." | |
| ) | |
| # Launch the Gradio interface | |
| iface.launch() | |