Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,6 +4,7 @@ import os
|
|
| 4 |
from huggingface_hub import list_liked_repos
|
| 5 |
from diffusers import DiffusionPipeline
|
| 6 |
import torch
|
|
|
|
| 7 |
|
| 8 |
def image_mod(prompt: str, model: str, image_0: gr.Image, image_1: gr.Image) -> list[gr.Image]:
|
| 9 |
images = [image_0, image_1]
|
|
@@ -42,6 +43,29 @@ def change_model(choice):
|
|
| 42 |
|
| 43 |
return images
|
| 44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
with gr.Blocks() as demo:
|
| 46 |
gr.Markdown(
|
| 47 |
"""
|
|
@@ -70,4 +94,12 @@ with gr.Blocks() as demo:
|
|
| 70 |
image_mod, [textbox, dropdown, image_0, image_1], [image_0, image_1]
|
| 71 |
)
|
| 72 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
demo.launch()
|
|
|
|
| 4 |
from huggingface_hub import list_liked_repos
|
| 5 |
from diffusers import DiffusionPipeline
|
| 6 |
import torch
|
| 7 |
+
from transformers import AutoProcessor, Blip2ForConditionalGeneration
|
| 8 |
|
| 9 |
def image_mod(prompt: str, model: str, image_0: gr.Image, image_1: gr.Image) -> list[gr.Image]:
|
| 10 |
images = [image_0, image_1]
|
|
|
|
| 43 |
|
| 44 |
return images
|
| 45 |
|
| 46 |
+
|
| 47 |
+
def give_dei_score(image_0: gr.Image, image_1: gr.Image):
|
| 48 |
+
textbox = []
|
| 49 |
+
|
| 50 |
+
processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
|
| 51 |
+
model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16)
|
| 52 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 53 |
+
model.to(device)
|
| 54 |
+
|
| 55 |
+
question = "Does this image present any racial or gender prejudices?"
|
| 56 |
+
prompt = f"Question: {question} Answer:"
|
| 57 |
+
|
| 58 |
+
images = [image_0, image_1]
|
| 59 |
+
for i, image in enumerate(images):
|
| 60 |
+
image_to_analyze = images[i]
|
| 61 |
+
inputs = processor(image_to_analyze, text=prompt, return_tensors="pt").to(device, torch.float16)
|
| 62 |
+
generated_ids = model.generate(**inputs, max_new_tokens=10)
|
| 63 |
+
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
|
| 64 |
+
textbox.append(generated_text)
|
| 65 |
+
|
| 66 |
+
return textbox
|
| 67 |
+
|
| 68 |
+
|
| 69 |
with gr.Blocks() as demo:
|
| 70 |
gr.Markdown(
|
| 71 |
"""
|
|
|
|
| 94 |
image_mod, [textbox, dropdown, image_0, image_1], [image_0, image_1]
|
| 95 |
)
|
| 96 |
|
| 97 |
+
|
| 98 |
+
button = gr.Button()
|
| 99 |
+
image_0_textbox = gr.Textbox(label="Image 1")
|
| 100 |
+
image_1_textbox = gr.Textbox(label="Image 2")
|
| 101 |
+
button.click(
|
| 102 |
+
give_dei_score, [image_0, image_1], [image_0_textbox, image_1_textbox]
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
demo.launch()
|