Mansib commited on
Commit
afad819
·
1 Parent(s): b43460b

Added examples

Browse files
.gitattributes CHANGED
@@ -32,3 +32,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ *.png filter=lfs diff=lfs merge=lfs -text
36
+ *.jpg filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .history
.history/app_20230528143707.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import clip
3
+ from PIL import Image
4
+ import gradio as gr
5
+
6
+ device = "cuda" if torch.cuda.is_available() else "cpu"
7
+ model, preprocess = clip.load("ViT-B/32", device=device)
8
+
9
+ def allure(image, gender):
10
+ image = Image.fromarray(image.astype("uint8"), "RGB")
11
+ gender = gender.lower()
12
+ image = preprocess(image).unsqueeze(0).to(device)
13
+ positive_terms = [f'a hot {gender}', f'a beautiful {gender}', f'an alluring {gender}']
14
+ negative_terms = [f'a gross {gender}', f'an ugly {gender}', f'a hideous {gender}']
15
+
16
+ pairs = list(zip(positive_terms, negative_terms))
17
+
18
+ def evaluate(terms):
19
+ text = clip.tokenize(terms).to(device)
20
+
21
+ with torch.no_grad():
22
+ logits_per_image, logits_per_text = model(image, text)
23
+ probs = logits_per_image.softmax(dim=-1).cpu().numpy()
24
+ return probs[0]
25
+
26
+ probs = [evaluate(pair) for pair in pairs]
27
+
28
+ positive_probs = [prob[0] for prob in probs]
29
+ negative_probs = [prob[1] for prob in probs]
30
+
31
+ hotness_score = round((probs[0][0] - probs[0][1] + 1) * 50, 2)
32
+ beauty_score = round((probs[1][0] - probs[1][1] + 1) * 50, 2)
33
+ attractiveness_score = round((probs[2][0] - probs[2][1] + 1) * 50, 2)
34
+
35
+ hot_score = sum(positive_probs)/len(positive_probs)
36
+ ugly_score = sum(negative_probs)/len(negative_probs)
37
+ composite = ((hot_score - ugly_score)+1) * 50
38
+ composite = round(composite, 2)
39
+ return composite, hotness_score, beauty_score, attractiveness_score
40
+
41
+ iface = gr.Interface(
42
+ fn=allure,
43
+ inputs=[
44
+ gr.inputs.Image(label="Image"),
45
+ gr.inputs.Dropdown(
46
+ [
47
+ 'Person', 'Man', 'Woman'
48
+ ],
49
+ default='Person',
50
+ )
51
+ ],
52
+ outputs=[
53
+ gr.Textbox(label="Composite (%)"),
54
+ gr.Textbox(label="Hotness (%)"),
55
+ gr.Textbox(label="Beauty (%)"),
56
+ gr.Textbox(label="Allure (%)"),
57
+ ],
58
+ title="Attractiveness Evaluator (using OpenAI CLIP)",
59
+ description="A simple attractiveness evaluation app using OpenAI's CLIP model. \nHow it works: \nThe input image is passed to OpenAI's CLIP image captioning model and evaluated for how much it conforms to the model's idea of hotness, beauty, and attractiveness. These values are then combined to produce a composite score on a scale of 0 to 100.\nNote: This is meant solely for educational use.",
60
+ )
61
+ iface.launch()
.history/app_20230528145539.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import clip
3
+ from PIL import Image
4
+ import gradio as gr
5
+
6
+ device = "cuda" if torch.cuda.is_available() else "cpu"
7
+ model, preprocess = clip.load("ViT-B/32", device=device)
8
+
9
+ def allure(image, gender):
10
+ image = Image.fromarray(image.astype("uint8"), "RGB")
11
+ gender = gender.lower()
12
+ image = preprocess(image).unsqueeze(0).to(device)
13
+ positive_terms = [f'a hot {gender}', f'a beautiful {gender}', f'an alluring {gender}']
14
+ negative_terms = [f'a gross {gender}', f'an ugly {gender}', f'a hideous {gender}']
15
+
16
+ pairs = list(zip(positive_terms, negative_terms))
17
+
18
+ def evaluate(terms):
19
+ text = clip.tokenize(terms).to(device)
20
+
21
+ with torch.no_grad():
22
+ logits_per_image, logits_per_text = model(image, text)
23
+ probs = logits_per_image.softmax(dim=-1).cpu().numpy()
24
+ return probs[0]
25
+
26
+ probs = [evaluate(pair) for pair in pairs]
27
+
28
+ positive_probs = [prob[0] for prob in probs]
29
+ negative_probs = [prob[1] for prob in probs]
30
+
31
+ hotness_score = round((probs[0][0] - probs[0][1] + 1) * 50, 2)
32
+ beauty_score = round((probs[1][0] - probs[1][1] + 1) * 50, 2)
33
+ attractiveness_score = round((probs[2][0] - probs[2][1] + 1) * 50, 2)
34
+
35
+ hot_score = sum(positive_probs)/len(positive_probs)
36
+ ugly_score = sum(negative_probs)/len(negative_probs)
37
+ composite = ((hot_score - ugly_score)+1) * 50
38
+ composite = round(composite, 2)
39
+ return composite, hotness_score, beauty_score, attractiveness_score
40
+
41
+ iface = gr.Interface(
42
+ fn=allure,
43
+ inputs=[
44
+ gr.inputs.Image(label="Image"),
45
+ gr.inputs.Dropdown(
46
+ [
47
+ 'Person', 'Man', 'Woman'
48
+ ],
49
+ default='Person',
50
+ label="Gender"
51
+ )
52
+ ],
53
+ outputs=[
54
+ gr.Textbox(label="Composite (%)"),
55
+ gr.Textbox(label="Hotness (%)"),
56
+ gr.Textbox(label="Beauty (%)"),
57
+ gr.Textbox(label="Allure (%)"),
58
+ ],
59
+ examples = [
60
+ ['Mansib_01_x2048.png', 'Man'],
61
+ ['Mansib_02_x2048.png', 'Man']
62
+ ],
63
+ title="Attractiveness Evaluator (using OpenAI CLIP)",
64
+ description="A simple attractiveness evaluation app using OpenAI's CLIP model. \nHow it works: \nThe input image is passed to OpenAI's CLIP image captioning model and evaluated for how much it conforms to the model's idea of hotness, beauty, and attractiveness. These values are then combined to produce a composite score on a scale of 0 to 100.\nNote: This is meant solely for educational use.",
65
+ )
66
+ iface.launch()
.history/app_20230528145615.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import clip
3
+ from PIL import Image
4
+ import gradio as gr
5
+
6
+ device = "cuda" if torch.cuda.is_available() else "cpu"
7
+ model, preprocess = clip.load("ViT-B/32", device=device)
8
+
9
+ def allure(image, gender):
10
+ image = Image.fromarray(image.astype("uint8"), "RGB")
11
+ gender = gender.lower()
12
+ image = preprocess(image).unsqueeze(0).to(device)
13
+ positive_terms = [f'a hot {gender}', f'a beautiful {gender}', f'an alluring {gender}']
14
+ negative_terms = [f'a gross {gender}', f'an ugly {gender}', f'a hideous {gender}']
15
+
16
+ pairs = list(zip(positive_terms, negative_terms))
17
+
18
+ def evaluate(terms):
19
+ text = clip.tokenize(terms).to(device)
20
+
21
+ with torch.no_grad():
22
+ logits_per_image, logits_per_text = model(image, text)
23
+ probs = logits_per_image.softmax(dim=-1).cpu().numpy()
24
+ return probs[0]
25
+
26
+ probs = [evaluate(pair) for pair in pairs]
27
+
28
+ positive_probs = [prob[0] for prob in probs]
29
+ negative_probs = [prob[1] for prob in probs]
30
+
31
+ hotness_score = round((probs[0][0] - probs[0][1] + 1) * 50, 2)
32
+ beauty_score = round((probs[1][0] - probs[1][1] + 1) * 50, 2)
33
+ attractiveness_score = round((probs[2][0] - probs[2][1] + 1) * 50, 2)
34
+
35
+ hot_score = sum(positive_probs)/len(positive_probs)
36
+ ugly_score = sum(negative_probs)/len(negative_probs)
37
+ composite = ((hot_score - ugly_score)+1) * 50
38
+ composite = round(composite, 2)
39
+ return composite, hotness_score, beauty_score, attractiveness_score
40
+
41
+ iface = gr.Interface(
42
+ fn=allure,
43
+ inputs=[
44
+ gr.inputs.Image(label="Image"),
45
+ gr.inputs.Dropdown(
46
+ [
47
+ 'Person', 'Man', 'Woman'
48
+ ],
49
+ default='Person',
50
+ label="Gender"
51
+ )
52
+ ],
53
+ outputs=[
54
+ gr.Textbox(label="Composite (%)"),
55
+ gr.Textbox(label="Hotness (%)"),
56
+ gr.Textbox(label="Beauty (%)"),
57
+ gr.Textbox(label="Allure (%)"),
58
+ ],
59
+ examples = [
60
+ ['Mansib_01_x2048.png', 'Man'],
61
+ ['Mansib_02_x2048.png', 'Man']
62
+ ],
63
+ title="Attractiveness Evaluator (powered by OpenAI CLIP)",
64
+ description="A simple attractiveness evaluation app using OpenAI's CLIP model. \nHow it works: \nThe input image is passed to OpenAI's CLIP image captioning model and evaluated for how much it conforms to the model's idea of hotness, beauty, and attractiveness. These values are then combined to produce a composite score on a scale of 0 to 100.\nNote: This is meant solely for educational use.",
65
+ )
66
+ iface.launch()
Mansib_01_x2048.png ADDED

Git LFS Details

  • SHA256: a5dc56b8cce7dfb1046dd951030b2a71307eda66105cad177bd35e520d9351ed
  • Pointer size: 132 Bytes
  • Size of remote file: 7.94 MB
Mansib_02_x2048.png ADDED

Git LFS Details

  • SHA256: e103d2f603038e7f9a263cd145da0abd78b8bd9e2e22ad8b21782ac7e32c7515
  • Pointer size: 132 Bytes
  • Size of remote file: 5.5 MB
app.py CHANGED
@@ -47,6 +47,7 @@ iface = gr.Interface(
47
  'Person', 'Man', 'Woman'
48
  ],
49
  default='Person',
 
50
  )
51
  ],
52
  outputs=[
@@ -55,7 +56,11 @@ iface = gr.Interface(
55
  gr.Textbox(label="Beauty (%)"),
56
  gr.Textbox(label="Allure (%)"),
57
  ],
58
- title="Attractiveness Evaluator (using OpenAI CLIP)",
 
 
 
 
59
  description="A simple attractiveness evaluation app using OpenAI's CLIP model. \nHow it works: \nThe input image is passed to OpenAI's CLIP image captioning model and evaluated for how much it conforms to the model's idea of hotness, beauty, and attractiveness. These values are then combined to produce a composite score on a scale of 0 to 100.\nNote: This is meant solely for educational use.",
60
  )
61
  iface.launch()
 
47
  'Person', 'Man', 'Woman'
48
  ],
49
  default='Person',
50
+ label="Gender"
51
  )
52
  ],
53
  outputs=[
 
56
  gr.Textbox(label="Beauty (%)"),
57
  gr.Textbox(label="Allure (%)"),
58
  ],
59
+ examples = [
60
+ ['Mansib_01_x2048.png', 'Man'],
61
+ ['Mansib_02_x2048.png', 'Man']
62
+ ],
63
+ title="Attractiveness Evaluator (powered by OpenAI CLIP)",
64
  description="A simple attractiveness evaluation app using OpenAI's CLIP model. \nHow it works: \nThe input image is passed to OpenAI's CLIP image captioning model and evaluated for how much it conforms to the model's idea of hotness, beauty, and attractiveness. These values are then combined to produce a composite score on a scale of 0 to 100.\nNote: This is meant solely for educational use.",
65
  )
66
  iface.launch()