Spaces:
Runtime error
Runtime error
Froze Gradio package
Browse files- .history/app_20230528143707.py +0 -61
- .history/app_20230528145539.py +0 -66
- .history/app_20230528145615.py +0 -66
- app.py +2 -1
- requirements.txt +1 -1
.history/app_20230528143707.py
DELETED
|
@@ -1,61 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
import clip
|
| 3 |
-
from PIL import Image
|
| 4 |
-
import gradio as gr
|
| 5 |
-
|
| 6 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 7 |
-
model, preprocess = clip.load("ViT-B/32", device=device)
|
| 8 |
-
|
| 9 |
-
def allure(image, gender):
|
| 10 |
-
image = Image.fromarray(image.astype("uint8"), "RGB")
|
| 11 |
-
gender = gender.lower()
|
| 12 |
-
image = preprocess(image).unsqueeze(0).to(device)
|
| 13 |
-
positive_terms = [f'a hot {gender}', f'a beautiful {gender}', f'an alluring {gender}']
|
| 14 |
-
negative_terms = [f'a gross {gender}', f'an ugly {gender}', f'a hideous {gender}']
|
| 15 |
-
|
| 16 |
-
pairs = list(zip(positive_terms, negative_terms))
|
| 17 |
-
|
| 18 |
-
def evaluate(terms):
|
| 19 |
-
text = clip.tokenize(terms).to(device)
|
| 20 |
-
|
| 21 |
-
with torch.no_grad():
|
| 22 |
-
logits_per_image, logits_per_text = model(image, text)
|
| 23 |
-
probs = logits_per_image.softmax(dim=-1).cpu().numpy()
|
| 24 |
-
return probs[0]
|
| 25 |
-
|
| 26 |
-
probs = [evaluate(pair) for pair in pairs]
|
| 27 |
-
|
| 28 |
-
positive_probs = [prob[0] for prob in probs]
|
| 29 |
-
negative_probs = [prob[1] for prob in probs]
|
| 30 |
-
|
| 31 |
-
hotness_score = round((probs[0][0] - probs[0][1] + 1) * 50, 2)
|
| 32 |
-
beauty_score = round((probs[1][0] - probs[1][1] + 1) * 50, 2)
|
| 33 |
-
attractiveness_score = round((probs[2][0] - probs[2][1] + 1) * 50, 2)
|
| 34 |
-
|
| 35 |
-
hot_score = sum(positive_probs)/len(positive_probs)
|
| 36 |
-
ugly_score = sum(negative_probs)/len(negative_probs)
|
| 37 |
-
composite = ((hot_score - ugly_score)+1) * 50
|
| 38 |
-
composite = round(composite, 2)
|
| 39 |
-
return composite, hotness_score, beauty_score, attractiveness_score
|
| 40 |
-
|
| 41 |
-
iface = gr.Interface(
|
| 42 |
-
fn=allure,
|
| 43 |
-
inputs=[
|
| 44 |
-
gr.inputs.Image(label="Image"),
|
| 45 |
-
gr.inputs.Dropdown(
|
| 46 |
-
[
|
| 47 |
-
'Person', 'Man', 'Woman'
|
| 48 |
-
],
|
| 49 |
-
default='Person',
|
| 50 |
-
)
|
| 51 |
-
],
|
| 52 |
-
outputs=[
|
| 53 |
-
gr.Textbox(label="Composite (%)"),
|
| 54 |
-
gr.Textbox(label="Hotness (%)"),
|
| 55 |
-
gr.Textbox(label="Beauty (%)"),
|
| 56 |
-
gr.Textbox(label="Allure (%)"),
|
| 57 |
-
],
|
| 58 |
-
title="Attractiveness Evaluator (using OpenAI CLIP)",
|
| 59 |
-
description="A simple attractiveness evaluation app using OpenAI's CLIP model. \nHow it works: \nThe input image is passed to OpenAI's CLIP image captioning model and evaluated for how much it conforms to the model's idea of hotness, beauty, and attractiveness. These values are then combined to produce a composite score on a scale of 0 to 100.\nNote: This is meant solely for educational use.",
|
| 60 |
-
)
|
| 61 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.history/app_20230528145539.py
DELETED
|
@@ -1,66 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
import clip
|
| 3 |
-
from PIL import Image
|
| 4 |
-
import gradio as gr
|
| 5 |
-
|
| 6 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 7 |
-
model, preprocess = clip.load("ViT-B/32", device=device)
|
| 8 |
-
|
| 9 |
-
def allure(image, gender):
|
| 10 |
-
image = Image.fromarray(image.astype("uint8"), "RGB")
|
| 11 |
-
gender = gender.lower()
|
| 12 |
-
image = preprocess(image).unsqueeze(0).to(device)
|
| 13 |
-
positive_terms = [f'a hot {gender}', f'a beautiful {gender}', f'an alluring {gender}']
|
| 14 |
-
negative_terms = [f'a gross {gender}', f'an ugly {gender}', f'a hideous {gender}']
|
| 15 |
-
|
| 16 |
-
pairs = list(zip(positive_terms, negative_terms))
|
| 17 |
-
|
| 18 |
-
def evaluate(terms):
|
| 19 |
-
text = clip.tokenize(terms).to(device)
|
| 20 |
-
|
| 21 |
-
with torch.no_grad():
|
| 22 |
-
logits_per_image, logits_per_text = model(image, text)
|
| 23 |
-
probs = logits_per_image.softmax(dim=-1).cpu().numpy()
|
| 24 |
-
return probs[0]
|
| 25 |
-
|
| 26 |
-
probs = [evaluate(pair) for pair in pairs]
|
| 27 |
-
|
| 28 |
-
positive_probs = [prob[0] for prob in probs]
|
| 29 |
-
negative_probs = [prob[1] for prob in probs]
|
| 30 |
-
|
| 31 |
-
hotness_score = round((probs[0][0] - probs[0][1] + 1) * 50, 2)
|
| 32 |
-
beauty_score = round((probs[1][0] - probs[1][1] + 1) * 50, 2)
|
| 33 |
-
attractiveness_score = round((probs[2][0] - probs[2][1] + 1) * 50, 2)
|
| 34 |
-
|
| 35 |
-
hot_score = sum(positive_probs)/len(positive_probs)
|
| 36 |
-
ugly_score = sum(negative_probs)/len(negative_probs)
|
| 37 |
-
composite = ((hot_score - ugly_score)+1) * 50
|
| 38 |
-
composite = round(composite, 2)
|
| 39 |
-
return composite, hotness_score, beauty_score, attractiveness_score
|
| 40 |
-
|
| 41 |
-
iface = gr.Interface(
|
| 42 |
-
fn=allure,
|
| 43 |
-
inputs=[
|
| 44 |
-
gr.inputs.Image(label="Image"),
|
| 45 |
-
gr.inputs.Dropdown(
|
| 46 |
-
[
|
| 47 |
-
'Person', 'Man', 'Woman'
|
| 48 |
-
],
|
| 49 |
-
default='Person',
|
| 50 |
-
label="Gender"
|
| 51 |
-
)
|
| 52 |
-
],
|
| 53 |
-
outputs=[
|
| 54 |
-
gr.Textbox(label="Composite (%)"),
|
| 55 |
-
gr.Textbox(label="Hotness (%)"),
|
| 56 |
-
gr.Textbox(label="Beauty (%)"),
|
| 57 |
-
gr.Textbox(label="Allure (%)"),
|
| 58 |
-
],
|
| 59 |
-
examples = [
|
| 60 |
-
['Mansib_01_x2048.png', 'Man'],
|
| 61 |
-
['Mansib_02_x2048.png', 'Man']
|
| 62 |
-
],
|
| 63 |
-
title="Attractiveness Evaluator (using OpenAI CLIP)",
|
| 64 |
-
description="A simple attractiveness evaluation app using OpenAI's CLIP model. \nHow it works: \nThe input image is passed to OpenAI's CLIP image captioning model and evaluated for how much it conforms to the model's idea of hotness, beauty, and attractiveness. These values are then combined to produce a composite score on a scale of 0 to 100.\nNote: This is meant solely for educational use.",
|
| 65 |
-
)
|
| 66 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.history/app_20230528145615.py
DELETED
|
@@ -1,66 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
import clip
|
| 3 |
-
from PIL import Image
|
| 4 |
-
import gradio as gr
|
| 5 |
-
|
| 6 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 7 |
-
model, preprocess = clip.load("ViT-B/32", device=device)
|
| 8 |
-
|
| 9 |
-
def allure(image, gender):
|
| 10 |
-
image = Image.fromarray(image.astype("uint8"), "RGB")
|
| 11 |
-
gender = gender.lower()
|
| 12 |
-
image = preprocess(image).unsqueeze(0).to(device)
|
| 13 |
-
positive_terms = [f'a hot {gender}', f'a beautiful {gender}', f'an alluring {gender}']
|
| 14 |
-
negative_terms = [f'a gross {gender}', f'an ugly {gender}', f'a hideous {gender}']
|
| 15 |
-
|
| 16 |
-
pairs = list(zip(positive_terms, negative_terms))
|
| 17 |
-
|
| 18 |
-
def evaluate(terms):
|
| 19 |
-
text = clip.tokenize(terms).to(device)
|
| 20 |
-
|
| 21 |
-
with torch.no_grad():
|
| 22 |
-
logits_per_image, logits_per_text = model(image, text)
|
| 23 |
-
probs = logits_per_image.softmax(dim=-1).cpu().numpy()
|
| 24 |
-
return probs[0]
|
| 25 |
-
|
| 26 |
-
probs = [evaluate(pair) for pair in pairs]
|
| 27 |
-
|
| 28 |
-
positive_probs = [prob[0] for prob in probs]
|
| 29 |
-
negative_probs = [prob[1] for prob in probs]
|
| 30 |
-
|
| 31 |
-
hotness_score = round((probs[0][0] - probs[0][1] + 1) * 50, 2)
|
| 32 |
-
beauty_score = round((probs[1][0] - probs[1][1] + 1) * 50, 2)
|
| 33 |
-
attractiveness_score = round((probs[2][0] - probs[2][1] + 1) * 50, 2)
|
| 34 |
-
|
| 35 |
-
hot_score = sum(positive_probs)/len(positive_probs)
|
| 36 |
-
ugly_score = sum(negative_probs)/len(negative_probs)
|
| 37 |
-
composite = ((hot_score - ugly_score)+1) * 50
|
| 38 |
-
composite = round(composite, 2)
|
| 39 |
-
return composite, hotness_score, beauty_score, attractiveness_score
|
| 40 |
-
|
| 41 |
-
iface = gr.Interface(
|
| 42 |
-
fn=allure,
|
| 43 |
-
inputs=[
|
| 44 |
-
gr.inputs.Image(label="Image"),
|
| 45 |
-
gr.inputs.Dropdown(
|
| 46 |
-
[
|
| 47 |
-
'Person', 'Man', 'Woman'
|
| 48 |
-
],
|
| 49 |
-
default='Person',
|
| 50 |
-
label="Gender"
|
| 51 |
-
)
|
| 52 |
-
],
|
| 53 |
-
outputs=[
|
| 54 |
-
gr.Textbox(label="Composite (%)"),
|
| 55 |
-
gr.Textbox(label="Hotness (%)"),
|
| 56 |
-
gr.Textbox(label="Beauty (%)"),
|
| 57 |
-
gr.Textbox(label="Allure (%)"),
|
| 58 |
-
],
|
| 59 |
-
examples = [
|
| 60 |
-
['Mansib_01_x2048.png', 'Man'],
|
| 61 |
-
['Mansib_02_x2048.png', 'Man']
|
| 62 |
-
],
|
| 63 |
-
title="Attractiveness Evaluator (powered by OpenAI CLIP)",
|
| 64 |
-
description="A simple attractiveness evaluation app using OpenAI's CLIP model. \nHow it works: \nThe input image is passed to OpenAI's CLIP image captioning model and evaluated for how much it conforms to the model's idea of hotness, beauty, and attractiveness. These values are then combined to produce a composite score on a scale of 0 to 100.\nNote: This is meant solely for educational use.",
|
| 65 |
-
)
|
| 66 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
CHANGED
|
@@ -2,6 +2,7 @@ import torch
|
|
| 2 |
import clip
|
| 3 |
from PIL import Image
|
| 4 |
import gradio as gr
|
|
|
|
| 5 |
|
| 6 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 7 |
model, preprocess = clip.load("ViT-B/32", device=device)
|
|
@@ -117,7 +118,7 @@ with gr.Interface(
|
|
| 117 |
['Mansib_02_x2048.png', 'Man']
|
| 118 |
],
|
| 119 |
title="Attractiveness Evaluator (powered by OpenAI CLIP)",
|
| 120 |
-
description="A simple attractiveness evaluation app using OpenAI's CLIP model.",
|
| 121 |
) as iface:
|
| 122 |
with gr.Accordion("How does it work?"):
|
| 123 |
gr.Markdown(
|
|
|
|
| 2 |
import clip
|
| 3 |
from PIL import Image
|
| 4 |
import gradio as gr
|
| 5 |
+
import datetime
|
| 6 |
|
| 7 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 8 |
model, preprocess = clip.load("ViT-B/32", device=device)
|
|
|
|
| 118 |
['Mansib_02_x2048.png', 'Man']
|
| 119 |
],
|
| 120 |
title="Attractiveness Evaluator (powered by OpenAI CLIP)",
|
| 121 |
+
description=f"""A simple attractiveness evaluation app using the latest, current (newest stable version as of {datetime.datetime().now().strftime('%A, %b %d %Y %I:%M:%S%p')}) version of OpenAI's CLIP model.""",
|
| 122 |
) as iface:
|
| 123 |
with gr.Accordion("How does it work?"):
|
| 124 |
gr.Markdown(
|
requirements.txt
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
gradio==3.
|
| 2 |
torch
|
| 3 |
git+https://github.com/openai/CLIP.git
|
| 4 |
Pillow
|
|
|
|
| 1 |
+
gradio==3.33.1
|
| 2 |
torch
|
| 3 |
git+https://github.com/openai/CLIP.git
|
| 4 |
Pillow
|