Spaces:
Runtime error
Runtime error
Updated style
Browse files
app.py
CHANGED
|
@@ -6,12 +6,15 @@ import gradio as gr
|
|
| 6 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 7 |
model, preprocess = clip.load("ViT-B/32", device=device)
|
| 8 |
|
|
|
|
| 9 |
def allure(image, gender):
|
| 10 |
image = Image.fromarray(image.astype("uint8"), "RGB")
|
| 11 |
gender = gender.lower()
|
| 12 |
image = preprocess(image).unsqueeze(0).to(device)
|
| 13 |
-
positive_terms = [f'a hot {gender}',
|
| 14 |
-
|
|
|
|
|
|
|
| 15 |
|
| 16 |
pairs = list(zip(positive_terms, negative_terms))
|
| 17 |
|
|
@@ -24,7 +27,7 @@ def allure(image, gender):
|
|
| 24 |
return probs[0]
|
| 25 |
|
| 26 |
probs = [evaluate(pair) for pair in pairs]
|
| 27 |
-
|
| 28 |
positive_probs = [prob[0] for prob in probs]
|
| 29 |
negative_probs = [prob[1] for prob in probs]
|
| 30 |
|
|
@@ -38,7 +41,16 @@ def allure(image, gender):
|
|
| 38 |
composite = round(composite, 2)
|
| 39 |
return composite, hotness_score, beauty_score, attractiveness_score
|
| 40 |
|
| 41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
fn=allure,
|
| 43 |
inputs=[
|
| 44 |
gr.inputs.Image(label="Image"),
|
|
@@ -56,11 +68,18 @@ iface = gr.Interface(
|
|
| 56 |
gr.Textbox(label="Beauty (%)"),
|
| 57 |
gr.Textbox(label="Allure (%)"),
|
| 58 |
],
|
| 59 |
-
examples
|
| 60 |
['Mansib_01_x2048.png', 'Man'],
|
| 61 |
['Mansib_02_x2048.png', 'Man']
|
| 62 |
],
|
| 63 |
title="Attractiveness Evaluator (powered by OpenAI CLIP)",
|
| 64 |
-
description="A simple attractiveness evaluation app using OpenAI's CLIP model.
|
| 65 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
iface.launch()
|
|
|
|
| 6 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 7 |
model, preprocess = clip.load("ViT-B/32", device=device)
|
| 8 |
|
| 9 |
+
|
| 10 |
def allure(image, gender):
|
| 11 |
image = Image.fromarray(image.astype("uint8"), "RGB")
|
| 12 |
gender = gender.lower()
|
| 13 |
image = preprocess(image).unsqueeze(0).to(device)
|
| 14 |
+
positive_terms = [f'a hot {gender}',
|
| 15 |
+
f'a beautiful {gender}', f'an alluring {gender}']
|
| 16 |
+
negative_terms = [f'a gross {gender}',
|
| 17 |
+
f'an ugly {gender}', f'a hideous {gender}']
|
| 18 |
|
| 19 |
pairs = list(zip(positive_terms, negative_terms))
|
| 20 |
|
|
|
|
| 27 |
return probs[0]
|
| 28 |
|
| 29 |
probs = [evaluate(pair) for pair in pairs]
|
| 30 |
+
|
| 31 |
positive_probs = [prob[0] for prob in probs]
|
| 32 |
negative_probs = [prob[1] for prob in probs]
|
| 33 |
|
|
|
|
| 41 |
composite = round(composite, 2)
|
| 42 |
return composite, hotness_score, beauty_score, attractiveness_score
|
| 43 |
|
| 44 |
+
|
| 45 |
+
with gr.Interface(
|
| 46 |
+
theme=gr.themes.Soft(
|
| 47 |
+
font=[gr.themes.GoogleFont("Quicksand"),
|
| 48 |
+
"ui-sans-serif", "sans-serif"],
|
| 49 |
+
font_mono=[fonts.GoogleFont("IBM Plex Mono"),
|
| 50 |
+
"ui-monospace", "monospace"],
|
| 51 |
+
primary_hue="cyan",
|
| 52 |
+
secondary_hue="cyan",
|
| 53 |
+
radius_size="lg"),
|
| 54 |
fn=allure,
|
| 55 |
inputs=[
|
| 56 |
gr.inputs.Image(label="Image"),
|
|
|
|
| 68 |
gr.Textbox(label="Beauty (%)"),
|
| 69 |
gr.Textbox(label="Allure (%)"),
|
| 70 |
],
|
| 71 |
+
examples=[
|
| 72 |
['Mansib_01_x2048.png', 'Man'],
|
| 73 |
['Mansib_02_x2048.png', 'Man']
|
| 74 |
],
|
| 75 |
title="Attractiveness Evaluator (powered by OpenAI CLIP)",
|
| 76 |
+
description="A simple attractiveness evaluation app using OpenAI's CLIP model.",
|
| 77 |
+
) as iface:
|
| 78 |
+
with gr.Accordion("How does it work?"):
|
| 79 |
+
gr.Markdown(
|
| 80 |
+
"""The input image is passed to OpenAI's CLIP image captioning model and evaluated for how much it conforms to the model's idea of hotness, beauty, and attractiveness.
|
| 81 |
+
These values are then combined to produce a composite score on a scale of 0 to 100.
|
| 82 |
+
# ⚠️ WARNING: This is meant solely for educational use.""")
|
| 83 |
+
|
| 84 |
+
iface.queue() # Add `api_open = False` to disable direct API access.
|
| 85 |
iface.launch()
|