Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
f7a6c5f
1
Parent(s):
c9bc111
Adding port from museum
Browse files- model/fetch_museum_results/__init__.py +58 -0
- model/fetch_museum_results/imagen_museum/__init__.py +129 -0
- model/model_manager.py +88 -1
- serve/gradio_web.py +40 -1
- serve/gradio_web_image_editing.py +55 -0
- serve/vote_utils.py +363 -16
model/fetch_museum_results/__init__.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .imagen_museum import TASK_DICT, DOMAIN
|
| 2 |
+
from .imagen_museum import fetch_indexes
|
| 3 |
+
import random
|
| 4 |
+
|
| 5 |
+
ARENA_TO_IG_MUSEUM = {"LCM(v1.5/XL)":"LCM",
|
| 6 |
+
"PlayGroundV2.5": "PlayGroundV2_5"}
|
| 7 |
+
|
| 8 |
+
def draw2_from_imagen_museum(task, model_name1, model_name2):
|
| 9 |
+
task_name = TASK_DICT[task]
|
| 10 |
+
model_name1 = ARENA_TO_IG_MUSEUM[model_name1] if model_name1 in ARENA_TO_IG_MUSEUM else model_name1
|
| 11 |
+
model_name2 = ARENA_TO_IG_MUSEUM[model_name2] if model_name2 in ARENA_TO_IG_MUSEUM else model_name2
|
| 12 |
+
|
| 13 |
+
domain = DOMAIN
|
| 14 |
+
baselink = domain + task_name
|
| 15 |
+
|
| 16 |
+
matched_results = fetch_indexes(baselink)
|
| 17 |
+
r = random.Random()
|
| 18 |
+
uid, value = r.choice(list(matched_results.items()))
|
| 19 |
+
image_link_1 = baselink + "/" + model_name1 + "/" + uid
|
| 20 |
+
image_link_2 = baselink + "/" + model_name2 + "/" + uid
|
| 21 |
+
|
| 22 |
+
if task == "t2i": # Image Gen
|
| 23 |
+
prompt = value['prompt']
|
| 24 |
+
return [[image_link_1, image_link_2], [prompt]]
|
| 25 |
+
if task == "tie": # Image Edit
|
| 26 |
+
instruction = value['instruction']
|
| 27 |
+
input_caption = value['source_global_caption']
|
| 28 |
+
output_caption = value['target_global_caption']
|
| 29 |
+
source_image_link = baselink + "/" + "input" + "/" + uid
|
| 30 |
+
return [[source_image_link, image_link_1, image_link_2], [input_caption, output_caption, instruction]]
|
| 31 |
+
else:
|
| 32 |
+
raise ValueError("Task not supported")
|
| 33 |
+
|
| 34 |
+
def draw_from_imagen_museum(task, model_name):
|
| 35 |
+
task_name = TASK_DICT[task]
|
| 36 |
+
model_name = ARENA_TO_IG_MUSEUM[model_name] if model_name in ARENA_TO_IG_MUSEUM else model_name
|
| 37 |
+
|
| 38 |
+
domain = DOMAIN
|
| 39 |
+
baselink = domain + task_name
|
| 40 |
+
|
| 41 |
+
matched_results = fetch_indexes(baselink)
|
| 42 |
+
r = random.Random()
|
| 43 |
+
uid, value = r.choice(list(matched_results.items()))
|
| 44 |
+
model = model_name
|
| 45 |
+
image_link = baselink + "/" + model + "/" + uid
|
| 46 |
+
print(image_link)
|
| 47 |
+
|
| 48 |
+
if task == "t2i": # Image Gen
|
| 49 |
+
prompt = value['prompt']
|
| 50 |
+
return [image_link, prompt]
|
| 51 |
+
if task == "tie": # Image Edit
|
| 52 |
+
instruction = value['instruction']
|
| 53 |
+
input_caption = value['source_global_caption']
|
| 54 |
+
output_caption = value['target_global_caption']
|
| 55 |
+
source_image_link = baselink + "/" + "input" + "/" + uid
|
| 56 |
+
return [[source_image_link, image_link], [input_caption, output_caption, instruction]]
|
| 57 |
+
else:
|
| 58 |
+
raise ValueError("Task not supported")
|
model/fetch_museum_results/imagen_museum/__init__.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import csv
|
| 2 |
+
import requests
|
| 3 |
+
from io import StringIO
|
| 4 |
+
from typing import Union, Optional, Tuple
|
| 5 |
+
from PIL import Image
|
| 6 |
+
import random
|
| 7 |
+
|
| 8 |
+
__version__ = "0.0.1_GenAI_Arena"
|
| 9 |
+
|
| 10 |
+
DOMAIN = "https://chromaica.github.io/Museum/"
|
| 11 |
+
|
| 12 |
+
TASK_DICT = {
|
| 13 |
+
"t2i": "ImagenHub_Text-Guided_IG",
|
| 14 |
+
"tie": "ImagenHub_Text-Guided_IE",
|
| 15 |
+
"mie": "ImagenHub_Control-Guided_IG",
|
| 16 |
+
"cig": "ImagenHub_Control-Guided_IE",
|
| 17 |
+
"msdig": "ImagenHub_Multi-Concept_IC",
|
| 18 |
+
"sdig": "ImagenHub_Subject-Driven_IG",
|
| 19 |
+
"sdie": "ImagenHub_Subject-Driven_IE"
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
t2i_models= [
|
| 23 |
+
"SD",
|
| 24 |
+
"SDXL",
|
| 25 |
+
"OpenJourney",
|
| 26 |
+
"DeepFloydIF",
|
| 27 |
+
"DALLE2"
|
| 28 |
+
]
|
| 29 |
+
|
| 30 |
+
mie_models = [
|
| 31 |
+
"Glide",
|
| 32 |
+
"SDInpaint",
|
| 33 |
+
"BlendedDiffusion",
|
| 34 |
+
"SDXLInpaint"
|
| 35 |
+
]
|
| 36 |
+
|
| 37 |
+
tie_models = [
|
| 38 |
+
"DiffEdit",
|
| 39 |
+
"MagicBrush",
|
| 40 |
+
"InstructPix2Pix",
|
| 41 |
+
"Prompt2prompt",
|
| 42 |
+
"Text2Live",
|
| 43 |
+
"SDEdit",
|
| 44 |
+
"CycleDiffusion",
|
| 45 |
+
"Pix2PixZero"
|
| 46 |
+
]
|
| 47 |
+
|
| 48 |
+
sdig_models = [
|
| 49 |
+
"DreamBooth",
|
| 50 |
+
"DreamBoothLora",
|
| 51 |
+
"TextualInversion",
|
| 52 |
+
"BLIPDiffusion_Gen"
|
| 53 |
+
]
|
| 54 |
+
|
| 55 |
+
sdie_models = [
|
| 56 |
+
"PhotoSwap",
|
| 57 |
+
"DreamEdit",
|
| 58 |
+
"BLIPDiffusion_Edit"
|
| 59 |
+
]
|
| 60 |
+
|
| 61 |
+
msdig_models = [
|
| 62 |
+
"DreamBooth",
|
| 63 |
+
"CustomDiffusion",
|
| 64 |
+
"TextualInversion"
|
| 65 |
+
]
|
| 66 |
+
|
| 67 |
+
cig_models = [
|
| 68 |
+
"ControlNet",
|
| 69 |
+
"UniControl"
|
| 70 |
+
]
|
| 71 |
+
|
| 72 |
+
def fetch_csv_keys(url):
|
| 73 |
+
"""
|
| 74 |
+
Fetches a CSV file from a given URL and parses it into a list of keys,
|
| 75 |
+
ignoring the header line.
|
| 76 |
+
"""
|
| 77 |
+
response = requests.get(url)
|
| 78 |
+
response.raise_for_status() # Ensure we notice bad responses
|
| 79 |
+
|
| 80 |
+
# Use StringIO to turn the fetched text data into a file-like object
|
| 81 |
+
csv_file = StringIO(response.text)
|
| 82 |
+
|
| 83 |
+
# Create a CSV reader
|
| 84 |
+
csv_reader = csv.reader(csv_file)
|
| 85 |
+
|
| 86 |
+
# Skip the header
|
| 87 |
+
next(csv_reader, None)
|
| 88 |
+
|
| 89 |
+
# Return the list of keys
|
| 90 |
+
return [row[0] for row in csv_reader if row]
|
| 91 |
+
|
| 92 |
+
def fetch_json_data(url):
|
| 93 |
+
"""
|
| 94 |
+
Fetches JSON data from a given URL.
|
| 95 |
+
"""
|
| 96 |
+
response = requests.get(url)
|
| 97 |
+
response.raise_for_status()
|
| 98 |
+
return response.json()
|
| 99 |
+
|
| 100 |
+
def fetch_data_and_match(csv_url, json_url):
|
| 101 |
+
"""
|
| 102 |
+
Fetches a list of keys from a CSV and then fetches JSON data and matches the keys to the JSON.
|
| 103 |
+
"""
|
| 104 |
+
# Fetch keys from CSV
|
| 105 |
+
keys = fetch_csv_keys(csv_url)
|
| 106 |
+
|
| 107 |
+
# Fetch JSON data
|
| 108 |
+
json_data = fetch_json_data(json_url)
|
| 109 |
+
|
| 110 |
+
# Extract relevant data using keys
|
| 111 |
+
matched_data = {key: json_data.get(key) for key in keys if key in json_data}
|
| 112 |
+
|
| 113 |
+
return matched_data
|
| 114 |
+
|
| 115 |
+
def fetch_indexes(baselink):
|
| 116 |
+
matched_results = fetch_data_and_match(baselink+"/dataset_lookup.csv", baselink+"/dataset_lookup.json")
|
| 117 |
+
return matched_results
|
| 118 |
+
|
| 119 |
+
if __name__ == "__main__":
|
| 120 |
+
domain = "https://chromaica.github.io/Museum/"
|
| 121 |
+
baselink = domain + "ImagenHub_Text-Guided_IE"
|
| 122 |
+
matched_results = fetch_indexes(baselink)
|
| 123 |
+
for uid, value in matched_results.items():
|
| 124 |
+
print(uid)
|
| 125 |
+
model = "CycleDiffusion"
|
| 126 |
+
image_link = baselink + "/" + model + "/" + uid
|
| 127 |
+
print(image_link)
|
| 128 |
+
instruction = value['instruction']
|
| 129 |
+
print(instruction)
|
model/model_manager.py
CHANGED
|
@@ -6,6 +6,7 @@ import io, base64, json
|
|
| 6 |
import spaces
|
| 7 |
from PIL import Image
|
| 8 |
from .models import IMAGE_GENERATION_MODELS, IMAGE_EDITION_MODELS, VIDEO_GENERATION_MODELS, load_pipeline
|
|
|
|
| 9 |
|
| 10 |
class ModelManager:
|
| 11 |
def __init__(self):
|
|
@@ -33,6 +34,15 @@ class ModelManager:
|
|
| 33 |
result = pipe(prompt=prompt)
|
| 34 |
return result
|
| 35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
def generate_image_ig_parallel_anony(self, prompt, model_A, model_B):
|
| 37 |
if model_A == "" and model_B == "":
|
| 38 |
model_names = random.sample([model for model in self.model_ig_list], 2)
|
|
@@ -45,6 +55,21 @@ class ModelManager:
|
|
| 45 |
results = [future.result() for future in futures]
|
| 46 |
return results[0], results[1], model_names[0], model_names[1]
|
| 47 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
def generate_image_ig_parallel(self, prompt, model_A, model_B):
|
| 49 |
model_names = [model_A, model_B]
|
| 50 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
|
@@ -53,12 +78,31 @@ class ModelManager:
|
|
| 53 |
results = [future.result() for future in futures]
|
| 54 |
return results[0], results[1]
|
| 55 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
@spaces.GPU(duration=200)
|
| 57 |
def generate_image_ie(self, textbox_source, textbox_target, textbox_instruct, source_image, model_name):
|
| 58 |
pipe = self.load_model_pipe(model_name)
|
| 59 |
result = pipe(src_image = source_image, src_prompt = textbox_source, target_prompt = textbox_target, instruct_prompt = textbox_instruct)
|
| 60 |
return result
|
| 61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
def generate_image_ie_parallel(self, textbox_source, textbox_target, textbox_instruct, source_image, model_A, model_B):
|
| 63 |
model_names = [model_A, model_B]
|
| 64 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
|
@@ -68,6 +112,18 @@ class ModelManager:
|
|
| 68 |
results = [future.result() for future in futures]
|
| 69 |
return results[0], results[1]
|
| 70 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
def generate_image_ie_parallel_anony(self, textbox_source, textbox_target, textbox_instruct, source_image, model_A, model_B):
|
| 72 |
if model_A == "" and model_B == "":
|
| 73 |
model_names = random.sample([model for model in self.model_ie_list], 2)
|
|
@@ -78,6 +134,21 @@ class ModelManager:
|
|
| 78 |
results = [future.result() for future in futures]
|
| 79 |
return results[0], results[1], model_names[0], model_names[1]
|
| 80 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
@spaces.GPU(duration=120)
|
| 82 |
def generate_video_vg(self, prompt, model_name):
|
| 83 |
pipe = self.load_model_pipe(model_name)
|
|
@@ -89,6 +160,9 @@ class ModelManager:
|
|
| 89 |
result = pipe(prompt=prompt)
|
| 90 |
return result
|
| 91 |
|
|
|
|
|
|
|
|
|
|
| 92 |
def generate_video_vg_parallel_anony(self, prompt, model_A, model_B):
|
| 93 |
if model_A == "" and model_B == "":
|
| 94 |
model_names = random.sample([model for model in self.model_vg_list], 2)
|
|
@@ -101,10 +175,23 @@ class ModelManager:
|
|
| 101 |
results = [future.result() for future in futures]
|
| 102 |
return results[0], results[1], model_names[0], model_names[1]
|
| 103 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
def generate_video_vg_parallel(self, prompt, model_A, model_B):
|
| 105 |
model_names = [model_A, model_B]
|
| 106 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
| 107 |
futures = [executor.submit(self.generate_video_vg, prompt, model) if model.startswith("videogenhub")
|
| 108 |
else executor.submit(self.generate_video_vg_api, prompt, model) for model in model_names]
|
| 109 |
results = [future.result() for future in futures]
|
| 110 |
-
return results[0], results[1]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
import spaces
|
| 7 |
from PIL import Image
|
| 8 |
from .models import IMAGE_GENERATION_MODELS, IMAGE_EDITION_MODELS, VIDEO_GENERATION_MODELS, load_pipeline
|
| 9 |
+
from .fetch_museum_results import draw_from_imagen_museum, draw2_from_imagen_museum
|
| 10 |
|
| 11 |
class ModelManager:
|
| 12 |
def __init__(self):
|
|
|
|
| 34 |
result = pipe(prompt=prompt)
|
| 35 |
return result
|
| 36 |
|
| 37 |
+
def generate_image_ig_museum(self, model_name):
|
| 38 |
+
model_name = model_name.split('_')[1]
|
| 39 |
+
result_list = draw_from_imagen_museum("t2i", model_name)
|
| 40 |
+
image_link = result_list[0]
|
| 41 |
+
prompt = result_list[1]
|
| 42 |
+
|
| 43 |
+
return image_link, prompt
|
| 44 |
+
|
| 45 |
+
|
| 46 |
def generate_image_ig_parallel_anony(self, prompt, model_A, model_B):
|
| 47 |
if model_A == "" and model_B == "":
|
| 48 |
model_names = random.sample([model for model in self.model_ig_list], 2)
|
|
|
|
| 55 |
results = [future.result() for future in futures]
|
| 56 |
return results[0], results[1], model_names[0], model_names[1]
|
| 57 |
|
| 58 |
+
def generate_image_ig_museum_parallel_anony(self, model_A, model_B):
|
| 59 |
+
if model_A == "" and model_B == "":
|
| 60 |
+
model_names = random.sample([model for model in self.model_ig_list], 2)
|
| 61 |
+
else:
|
| 62 |
+
model_names = [model_A, model_B]
|
| 63 |
+
|
| 64 |
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
| 65 |
+
model_1 = model_names[0].split('_')[1]
|
| 66 |
+
model_2 = model_names[1].split('_')[1]
|
| 67 |
+
result_list = draw2_from_imagen_museum("t2i", model_1, model_2)
|
| 68 |
+
image_links = result_list[0]
|
| 69 |
+
prompt_list = result_list[1]
|
| 70 |
+
|
| 71 |
+
return image_links[0], image_links[1], model_names[0], model_names[1], prompt_list[0]
|
| 72 |
+
|
| 73 |
def generate_image_ig_parallel(self, prompt, model_A, model_B):
|
| 74 |
model_names = [model_A, model_B]
|
| 75 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
|
|
|
| 78 |
results = [future.result() for future in futures]
|
| 79 |
return results[0], results[1]
|
| 80 |
|
| 81 |
+
def generate_image_ig_museum_parallel(self, model_A, model_B):
|
| 82 |
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
| 83 |
+
model_1 = model_A.split('_')[1]
|
| 84 |
+
model_2 = model_B.split('_')[1]
|
| 85 |
+
result_list = draw2_from_imagen_museum("t2i", model_1, model_2)
|
| 86 |
+
image_links = result_list[0]
|
| 87 |
+
prompt_list = result_list[1]
|
| 88 |
+
return image_links[0], image_links[1], prompt_list[0]
|
| 89 |
+
|
| 90 |
+
|
| 91 |
@spaces.GPU(duration=200)
|
| 92 |
def generate_image_ie(self, textbox_source, textbox_target, textbox_instruct, source_image, model_name):
|
| 93 |
pipe = self.load_model_pipe(model_name)
|
| 94 |
result = pipe(src_image = source_image, src_prompt = textbox_source, target_prompt = textbox_target, instruct_prompt = textbox_instruct)
|
| 95 |
return result
|
| 96 |
|
| 97 |
+
def generate_image_ie_museum(self, model_name):
|
| 98 |
+
model_name = model_name.split('_')[1]
|
| 99 |
+
result_list = draw_from_imagen_museum("tie", model_name)
|
| 100 |
+
image_links = result_list[0]
|
| 101 |
+
prompt_list = result_list[1]
|
| 102 |
+
# image_links = [src, model]
|
| 103 |
+
# prompt_list = [source_caption, target_caption, instruction]
|
| 104 |
+
return image_links[0], image_links[1], prompt_list[0], prompt_list[1], prompt_list[2]
|
| 105 |
+
|
| 106 |
def generate_image_ie_parallel(self, textbox_source, textbox_target, textbox_instruct, source_image, model_A, model_B):
|
| 107 |
model_names = [model_A, model_B]
|
| 108 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
|
|
|
| 112 |
results = [future.result() for future in futures]
|
| 113 |
return results[0], results[1]
|
| 114 |
|
| 115 |
+
def generate_image_ie_museum_parallel(self, model_A, model_B):
|
| 116 |
+
model_names = [model_A, model_B]
|
| 117 |
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
| 118 |
+
model_1 = model_names[0].split('_')[1]
|
| 119 |
+
model_2 = model_names[1].split('_')[1]
|
| 120 |
+
result_list = draw2_from_imagen_museum("tie", model_1, model_2)
|
| 121 |
+
image_links = result_list[0]
|
| 122 |
+
prompt_list = result_list[1]
|
| 123 |
+
# image_links = [src, model_A, model_B]
|
| 124 |
+
# prompt_list = [source_caption, target_caption, instruction]
|
| 125 |
+
return image_links[0], image_links[1], image_links[2], prompt_list[0], prompt_list[1], prompt_list[2]
|
| 126 |
+
|
| 127 |
def generate_image_ie_parallel_anony(self, textbox_source, textbox_target, textbox_instruct, source_image, model_A, model_B):
|
| 128 |
if model_A == "" and model_B == "":
|
| 129 |
model_names = random.sample([model for model in self.model_ie_list], 2)
|
|
|
|
| 134 |
results = [future.result() for future in futures]
|
| 135 |
return results[0], results[1], model_names[0], model_names[1]
|
| 136 |
|
| 137 |
+
def generate_image_ie_museum_parallel_anony(self, model_A, model_B):
|
| 138 |
+
if model_A == "" and model_B == "":
|
| 139 |
+
model_names = random.sample([model for model in self.model_ie_list], 2)
|
| 140 |
+
else:
|
| 141 |
+
model_names = [model_A, model_B]
|
| 142 |
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
| 143 |
+
model_1 = model_names[0].split('_')[1]
|
| 144 |
+
model_2 = model_names[1].split('_')[1]
|
| 145 |
+
result_list = draw2_from_imagen_museum("tie", model_1, model_2)
|
| 146 |
+
image_links = result_list[0]
|
| 147 |
+
prompt_list = result_list[1]
|
| 148 |
+
# image_links = [src, model_A, model_B]
|
| 149 |
+
# prompt_list = [source_caption, target_caption, instruction]
|
| 150 |
+
return image_links[0], image_links[1], image_links[2], prompt_list[0], prompt_list[1], prompt_list[2], model_names[0], model_names[1]
|
| 151 |
+
|
| 152 |
@spaces.GPU(duration=120)
|
| 153 |
def generate_video_vg(self, prompt, model_name):
|
| 154 |
pipe = self.load_model_pipe(model_name)
|
|
|
|
| 160 |
result = pipe(prompt=prompt)
|
| 161 |
return result
|
| 162 |
|
| 163 |
+
def generate_video_vg_museum(self, model_name):
|
| 164 |
+
raise NotImplementedError
|
| 165 |
+
|
| 166 |
def generate_video_vg_parallel_anony(self, prompt, model_A, model_B):
|
| 167 |
if model_A == "" and model_B == "":
|
| 168 |
model_names = random.sample([model for model in self.model_vg_list], 2)
|
|
|
|
| 175 |
results = [future.result() for future in futures]
|
| 176 |
return results[0], results[1], model_names[0], model_names[1]
|
| 177 |
|
| 178 |
+
def generate_video_vg_museum_parallel_anony(self, model_A, model_B):
|
| 179 |
+
if model_A == "" and model_B == "":
|
| 180 |
+
model_names = random.sample([model for model in self.model_vg_list], 2)
|
| 181 |
+
else:
|
| 182 |
+
model_names = [model_A, model_B]
|
| 183 |
+
|
| 184 |
+
raise NotImplementedError
|
| 185 |
+
|
| 186 |
def generate_video_vg_parallel(self, prompt, model_A, model_B):
|
| 187 |
model_names = [model_A, model_B]
|
| 188 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
| 189 |
futures = [executor.submit(self.generate_video_vg, prompt, model) if model.startswith("videogenhub")
|
| 190 |
else executor.submit(self.generate_video_vg_api, prompt, model) for model in model_names]
|
| 191 |
results = [future.result() for future in futures]
|
| 192 |
+
return results[0], results[1]
|
| 193 |
+
|
| 194 |
+
def generate_video_vg_museum_parallel(self, model_A, model_B):
|
| 195 |
+
model_names = [model_A, model_B]
|
| 196 |
+
|
| 197 |
+
raise NotImplementedError
|
serve/gradio_web.py
CHANGED
|
@@ -9,8 +9,11 @@ from .vote_utils import (
|
|
| 9 |
bothbad_vote_last_response_igm as bothbad_vote_last_response,
|
| 10 |
share_click_igm as share_click,
|
| 11 |
generate_ig,
|
|
|
|
| 12 |
generate_igm,
|
|
|
|
| 13 |
generate_igm_annoy,
|
|
|
|
| 14 |
share_js
|
| 15 |
)
|
| 16 |
from functools import partial
|
|
@@ -37,6 +40,7 @@ Find out who is the 🥇conditional image generation models! More models are goi
|
|
| 37 |
state0 = gr.State()
|
| 38 |
state1 = gr.State()
|
| 39 |
gen_func = partial(generate_igm_annoy, models.generate_image_ig_parallel_anony)
|
|
|
|
| 40 |
|
| 41 |
gr.Markdown(notice_markdown, elem_id="notice_markdown")
|
| 42 |
|
|
@@ -78,6 +82,7 @@ Find out who is the 🥇conditional image generation models! More models are goi
|
|
| 78 |
elem_id="input_box",
|
| 79 |
)
|
| 80 |
send_btn = gr.Button(value="Send", variant="primary", scale=0)
|
|
|
|
| 81 |
|
| 82 |
with gr.Row():
|
| 83 |
clear_btn = gr.Button(value="🎲 New Round", interactive=False)
|
|
@@ -117,6 +122,16 @@ Find out who is the 🥇conditional image generation models! More models are goi
|
|
| 117 |
inputs=None,
|
| 118 |
outputs=btn_list
|
| 119 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 120 |
|
| 121 |
clear_btn.click(
|
| 122 |
clear_history_side_by_side_anony,
|
|
@@ -189,6 +204,7 @@ def build_side_by_side_ui_named(models):
|
|
| 189 |
state1 = gr.State()
|
| 190 |
anony = False
|
| 191 |
gen_func = partial(generate_igm, models.generate_image_ig_parallel)
|
|
|
|
| 192 |
gr.Markdown(notice_markdown, elem_id="notice_markdown")
|
| 193 |
|
| 194 |
with gr.Group(elem_id="share-region-named"):
|
|
@@ -240,6 +256,7 @@ def build_side_by_side_ui_named(models):
|
|
| 240 |
elem_id="input_box"
|
| 241 |
)
|
| 242 |
send_btn = gr.Button(value="Send", variant="primary", scale=0)
|
|
|
|
| 243 |
|
| 244 |
with gr.Row():
|
| 245 |
clear_btn = gr.Button(value="🗑️ Clear history", interactive=False)
|
|
@@ -282,6 +299,16 @@ def build_side_by_side_ui_named(models):
|
|
| 282 |
inputs=None,
|
| 283 |
outputs=btn_list
|
| 284 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 285 |
regenerate_btn.click(
|
| 286 |
gen_func,
|
| 287 |
inputs=[state0, state1, textbox, model_selector_left, model_selector_right],
|
|
@@ -350,6 +377,7 @@ def build_single_model_ui(models, add_promotion_links=False):
|
|
| 350 |
|
| 351 |
state = gr.State()
|
| 352 |
gen_func = partial(generate_ig, models.generate_image_ig)
|
|
|
|
| 353 |
gr.Markdown(notice_markdown, elem_id="notice_markdown")
|
| 354 |
|
| 355 |
model_list = models.model_ig_list
|
|
@@ -379,6 +407,7 @@ def build_single_model_ui(models, add_promotion_links=False):
|
|
| 379 |
)
|
| 380 |
|
| 381 |
send_btn = gr.Button(value="Send", variant="primary", scale=0)
|
|
|
|
| 382 |
|
| 383 |
with gr.Row():
|
| 384 |
chatbot = gr.Image()
|
|
@@ -431,7 +460,17 @@ def build_single_model_ui(models, add_promotion_links=False):
|
|
| 431 |
inputs=None,
|
| 432 |
outputs=btn_list
|
| 433 |
)
|
| 434 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 435 |
upvote_btn.click(
|
| 436 |
upvote_last_response,
|
| 437 |
inputs=[state, model_selector],
|
|
|
|
| 9 |
bothbad_vote_last_response_igm as bothbad_vote_last_response,
|
| 10 |
share_click_igm as share_click,
|
| 11 |
generate_ig,
|
| 12 |
+
generate_ig_museum,
|
| 13 |
generate_igm,
|
| 14 |
+
generate_igm_museum,
|
| 15 |
generate_igm_annoy,
|
| 16 |
+
generate_igm_annoy_museum,
|
| 17 |
share_js
|
| 18 |
)
|
| 19 |
from functools import partial
|
|
|
|
| 40 |
state0 = gr.State()
|
| 41 |
state1 = gr.State()
|
| 42 |
gen_func = partial(generate_igm_annoy, models.generate_image_ig_parallel_anony)
|
| 43 |
+
gen_func_random = partial(generate_igm_annoy_museum, models.generate_image_ig_museum_parallel_anony)
|
| 44 |
|
| 45 |
gr.Markdown(notice_markdown, elem_id="notice_markdown")
|
| 46 |
|
|
|
|
| 82 |
elem_id="input_box",
|
| 83 |
)
|
| 84 |
send_btn = gr.Button(value="Send", variant="primary", scale=0)
|
| 85 |
+
draw_btn = gr.Button(value="🎲 Random sample", variant="primary", scale=0)
|
| 86 |
|
| 87 |
with gr.Row():
|
| 88 |
clear_btn = gr.Button(value="🎲 New Round", interactive=False)
|
|
|
|
| 122 |
inputs=None,
|
| 123 |
outputs=btn_list
|
| 124 |
)
|
| 125 |
+
draw_btn.click(
|
| 126 |
+
gen_func_random,
|
| 127 |
+
inputs=[state0, state1, model_selector_left, model_selector_right],
|
| 128 |
+
outputs=[state0, state1, chatbot_left, chatbot_right, textbox, model_selector_left, model_selector_right],
|
| 129 |
+
api_name="draw_btn_annony"
|
| 130 |
+
).then(
|
| 131 |
+
enable_buttons_side_by_side,
|
| 132 |
+
inputs=None,
|
| 133 |
+
outputs=btn_list
|
| 134 |
+
)
|
| 135 |
|
| 136 |
clear_btn.click(
|
| 137 |
clear_history_side_by_side_anony,
|
|
|
|
| 204 |
state1 = gr.State()
|
| 205 |
anony = False
|
| 206 |
gen_func = partial(generate_igm, models.generate_image_ig_parallel)
|
| 207 |
+
gen_func_random = partial(generate_igm_museum, models.generate_image_ig_museum_parallel)
|
| 208 |
gr.Markdown(notice_markdown, elem_id="notice_markdown")
|
| 209 |
|
| 210 |
with gr.Group(elem_id="share-region-named"):
|
|
|
|
| 256 |
elem_id="input_box"
|
| 257 |
)
|
| 258 |
send_btn = gr.Button(value="Send", variant="primary", scale=0)
|
| 259 |
+
draw_btn = gr.Button(value="🎲 Random sample", variant="primary", scale=0)
|
| 260 |
|
| 261 |
with gr.Row():
|
| 262 |
clear_btn = gr.Button(value="🗑️ Clear history", interactive=False)
|
|
|
|
| 299 |
inputs=None,
|
| 300 |
outputs=btn_list
|
| 301 |
)
|
| 302 |
+
draw_btn.click(
|
| 303 |
+
gen_func_random,
|
| 304 |
+
inputs=[state0, state1, model_selector_left, model_selector_right],
|
| 305 |
+
outputs=[state0, state1, chatbot_left, chatbot_right, textbox],
|
| 306 |
+
api_name="draw_side_by_side"
|
| 307 |
+
).then(
|
| 308 |
+
enable_buttons_side_by_side,
|
| 309 |
+
inputs=None,
|
| 310 |
+
outputs=btn_list
|
| 311 |
+
)
|
| 312 |
regenerate_btn.click(
|
| 313 |
gen_func,
|
| 314 |
inputs=[state0, state1, textbox, model_selector_left, model_selector_right],
|
|
|
|
| 377 |
|
| 378 |
state = gr.State()
|
| 379 |
gen_func = partial(generate_ig, models.generate_image_ig)
|
| 380 |
+
gen_func_random = partial(generate_ig_museum, models.generate_image_ig_museum)
|
| 381 |
gr.Markdown(notice_markdown, elem_id="notice_markdown")
|
| 382 |
|
| 383 |
model_list = models.model_ig_list
|
|
|
|
| 407 |
)
|
| 408 |
|
| 409 |
send_btn = gr.Button(value="Send", variant="primary", scale=0)
|
| 410 |
+
draw_btn = gr.Button(value="🎲 Random sample", variant="primary", scale=0)
|
| 411 |
|
| 412 |
with gr.Row():
|
| 413 |
chatbot = gr.Image()
|
|
|
|
| 460 |
inputs=None,
|
| 461 |
outputs=btn_list
|
| 462 |
)
|
| 463 |
+
draw_btn.click(
|
| 464 |
+
gen_func_random,
|
| 465 |
+
inputs=[state, model_selector],
|
| 466 |
+
outputs=[state, chatbot, textbox],
|
| 467 |
+
api_name="draw_btn_single",
|
| 468 |
+
show_progress = "full"
|
| 469 |
+
).success(
|
| 470 |
+
enable_buttons,
|
| 471 |
+
inputs=None,
|
| 472 |
+
outputs=btn_list
|
| 473 |
+
)
|
| 474 |
upvote_btn.click(
|
| 475 |
upvote_last_response,
|
| 476 |
inputs=[state, model_selector],
|
serve/gradio_web_image_editing.py
CHANGED
|
@@ -9,8 +9,11 @@ from .vote_utils import (
|
|
| 9 |
bothbad_vote_last_response_iem as bothbad_vote_last_response,
|
| 10 |
share_click_iem as share_click,
|
| 11 |
generate_ie,
|
|
|
|
| 12 |
generate_iem,
|
|
|
|
| 13 |
generate_iem_annoy,
|
|
|
|
| 14 |
share_js
|
| 15 |
)
|
| 16 |
from functools import partial
|
|
@@ -40,6 +43,7 @@ Find out who is the 🥇conditional image edition models!
|
|
| 40 |
|
| 41 |
state0, state1 = gr.State(), gr.State()
|
| 42 |
gen_func = partial(generate_iem_annoy, models.generate_image_ie_parallel_anony)
|
|
|
|
| 43 |
gr.Markdown(notice_markdown, elem_id="notice_markdown")
|
| 44 |
|
| 45 |
with gr.Group(elem_id="share-region-anony"):
|
|
@@ -93,6 +97,7 @@ Find out who is the 🥇conditional image edition models!
|
|
| 93 |
with gr.Row():
|
| 94 |
source_image = gr.Image(type="pil")
|
| 95 |
send_btn = gr.Button(value="Send", variant="primary", scale=0)
|
|
|
|
| 96 |
|
| 97 |
with gr.Row() as button_row:
|
| 98 |
clear_btn = gr.Button(value="🎲 New Round", interactive=False)
|
|
@@ -138,6 +143,23 @@ Find out who is the 🥇conditional image edition models!
|
|
| 138 |
inputs=None,
|
| 139 |
outputs=btn_list
|
| 140 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 141 |
regenerate_btn.click(
|
| 142 |
gen_func,
|
| 143 |
inputs=[
|
|
@@ -219,6 +241,7 @@ def build_side_by_side_ui_named_ie(models):
|
|
| 219 |
|
| 220 |
state0, state1 = gr.State(), gr.State()
|
| 221 |
gen_func = partial(generate_iem, models.generate_image_ie_parallel)
|
|
|
|
| 222 |
gr.Markdown(notice_markdown, elem_id="notice_markdown")
|
| 223 |
|
| 224 |
with gr.Group(elem_id="share-region-named"):
|
|
@@ -283,6 +306,7 @@ def build_side_by_side_ui_named_ie(models):
|
|
| 283 |
with gr.Row():
|
| 284 |
source_image = gr.Image(type="pil")
|
| 285 |
send_btn = gr.Button(value="Send", variant="primary", scale=0)
|
|
|
|
| 286 |
|
| 287 |
with gr.Row() as button_row:
|
| 288 |
clear_btn = gr.Button(value="🗑️ Clear history", interactive=False)
|
|
@@ -334,6 +358,23 @@ def build_side_by_side_ui_named_ie(models):
|
|
| 334 |
inputs=None,
|
| 335 |
outputs=btn_list
|
| 336 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 337 |
regenerate_btn.click(
|
| 338 |
gen_func,
|
| 339 |
inputs=[
|
|
@@ -409,6 +450,7 @@ def build_single_model_ui_ie(models, add_promotion_links=False):
|
|
| 409 |
|
| 410 |
state = gr.State()
|
| 411 |
gen_func = partial(generate_ie, models.generate_image_ie)
|
|
|
|
| 412 |
model_list = models.model_ie_list
|
| 413 |
|
| 414 |
with gr.Row(elem_id="model_selector_row"):
|
|
@@ -448,6 +490,7 @@ def build_single_model_ui_ie(models, add_promotion_links=False):
|
|
| 448 |
with gr.Row():
|
| 449 |
source_image = gr.Image(type="pil")
|
| 450 |
send_btn = gr.Button(value="Send", variant="primary", scale=0)
|
|
|
|
| 451 |
|
| 452 |
with gr.Row():
|
| 453 |
chatbot = gr.Image()
|
|
@@ -493,7 +536,19 @@ def build_single_model_ui_ie(models, add_promotion_links=False):
|
|
| 493 |
inputs=None,
|
| 494 |
outputs=btn_list
|
| 495 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 496 |
|
|
|
|
| 497 |
vote_btns = [upvote_btn, downvote_btn, flag_btn]
|
| 498 |
model_inputs = [textbox_source, textbox_instruct, source_image, textbox_target]
|
| 499 |
upvote_btn.click(
|
|
|
|
| 9 |
bothbad_vote_last_response_iem as bothbad_vote_last_response,
|
| 10 |
share_click_iem as share_click,
|
| 11 |
generate_ie,
|
| 12 |
+
generate_ie_museum,
|
| 13 |
generate_iem,
|
| 14 |
+
generate_iem_museum,
|
| 15 |
generate_iem_annoy,
|
| 16 |
+
generate_iem_annoy_museum,
|
| 17 |
share_js
|
| 18 |
)
|
| 19 |
from functools import partial
|
|
|
|
| 43 |
|
| 44 |
state0, state1 = gr.State(), gr.State()
|
| 45 |
gen_func = partial(generate_iem_annoy, models.generate_image_ie_parallel_anony)
|
| 46 |
+
gen_func_random = partial(generate_iem_annoy_museum, models.generate_image_ie_museum_parallel_anony)
|
| 47 |
gr.Markdown(notice_markdown, elem_id="notice_markdown")
|
| 48 |
|
| 49 |
with gr.Group(elem_id="share-region-anony"):
|
|
|
|
| 97 |
with gr.Row():
|
| 98 |
source_image = gr.Image(type="pil")
|
| 99 |
send_btn = gr.Button(value="Send", variant="primary", scale=0)
|
| 100 |
+
draw_btn = gr.Button(value="🎲 Random sample", variant="primary", scale=0)
|
| 101 |
|
| 102 |
with gr.Row() as button_row:
|
| 103 |
clear_btn = gr.Button(value="🎲 New Round", interactive=False)
|
|
|
|
| 143 |
inputs=None,
|
| 144 |
outputs=btn_list
|
| 145 |
)
|
| 146 |
+
draw_btn.click(
|
| 147 |
+
gen_func_random,
|
| 148 |
+
inputs=[
|
| 149 |
+
state0,
|
| 150 |
+
state1,
|
| 151 |
+
model_selector_left,
|
| 152 |
+
model_selector_right
|
| 153 |
+
],
|
| 154 |
+
outputs=[state0, state1, chatbot_left, chatbot_right,
|
| 155 |
+
source_image, textbox_source, textbox_target, textbox_instruct,
|
| 156 |
+
model_selector_left, model_selector_right],
|
| 157 |
+
api_name="draw_btn_side_by_side_anony"
|
| 158 |
+
).then(
|
| 159 |
+
enable_buttons_side_by_side,
|
| 160 |
+
inputs=None,
|
| 161 |
+
outputs=btn_list
|
| 162 |
+
)
|
| 163 |
regenerate_btn.click(
|
| 164 |
gen_func,
|
| 165 |
inputs=[
|
|
|
|
| 241 |
|
| 242 |
state0, state1 = gr.State(), gr.State()
|
| 243 |
gen_func = partial(generate_iem, models.generate_image_ie_parallel)
|
| 244 |
+
gen_func_random = partial(generate_iem_museum, models.generate_image_ie_museum_parallel)
|
| 245 |
gr.Markdown(notice_markdown, elem_id="notice_markdown")
|
| 246 |
|
| 247 |
with gr.Group(elem_id="share-region-named"):
|
|
|
|
| 306 |
with gr.Row():
|
| 307 |
source_image = gr.Image(type="pil")
|
| 308 |
send_btn = gr.Button(value="Send", variant="primary", scale=0)
|
| 309 |
+
draw_btn = gr.Button(value="🎲 Random sample", variant="primary", scale=0)
|
| 310 |
|
| 311 |
with gr.Row() as button_row:
|
| 312 |
clear_btn = gr.Button(value="🗑️ Clear history", interactive=False)
|
|
|
|
| 358 |
inputs=None,
|
| 359 |
outputs=btn_list
|
| 360 |
)
|
| 361 |
+
draw_btn.click(
|
| 362 |
+
gen_func_random,
|
| 363 |
+
inputs=[
|
| 364 |
+
state0,
|
| 365 |
+
state1,
|
| 366 |
+
model_selector_left,
|
| 367 |
+
model_selector_right
|
| 368 |
+
],
|
| 369 |
+
outputs=[state0, state1, chatbot_left, chatbot_right,
|
| 370 |
+
source_image, textbox_source, textbox_target, textbox_instruct
|
| 371 |
+
],
|
| 372 |
+
api_name="draw_btn_side_by_side"
|
| 373 |
+
).then(
|
| 374 |
+
enable_buttons_side_by_side,
|
| 375 |
+
inputs=None,
|
| 376 |
+
outputs=btn_list
|
| 377 |
+
)
|
| 378 |
regenerate_btn.click(
|
| 379 |
gen_func,
|
| 380 |
inputs=[
|
|
|
|
| 450 |
|
| 451 |
state = gr.State()
|
| 452 |
gen_func = partial(generate_ie, models.generate_image_ie)
|
| 453 |
+
gen_func_random = partial(generate_ie_museum, models.generate_image_ie_museum)
|
| 454 |
model_list = models.model_ie_list
|
| 455 |
|
| 456 |
with gr.Row(elem_id="model_selector_row"):
|
|
|
|
| 490 |
with gr.Row():
|
| 491 |
source_image = gr.Image(type="pil")
|
| 492 |
send_btn = gr.Button(value="Send", variant="primary", scale=0)
|
| 493 |
+
draw_btn = gr.Button(value="🎲 Random sample", variant="primary", scale=0)
|
| 494 |
|
| 495 |
with gr.Row():
|
| 496 |
chatbot = gr.Image()
|
|
|
|
| 536 |
inputs=None,
|
| 537 |
outputs=btn_list
|
| 538 |
)
|
| 539 |
+
draw_btn.click(
|
| 540 |
+
gen_func_random,
|
| 541 |
+
inputs=[state, model_selector],
|
| 542 |
+
outputs=[state, chatbot, source_image, textbox_source, textbox_target, textbox_instruct],
|
| 543 |
+
api_name="send_btn_single",
|
| 544 |
+
show_progress = "full"
|
| 545 |
+
).then(
|
| 546 |
+
enable_buttons,
|
| 547 |
+
inputs=None,
|
| 548 |
+
outputs=btn_list
|
| 549 |
+
)
|
| 550 |
|
| 551 |
+
|
| 552 |
vote_btns = [upvote_btn, downvote_btn, flag_btn]
|
| 553 |
model_inputs = [textbox_source, textbox_instruct, source_image, textbox_target]
|
| 554 |
upvote_btn.click(
|
serve/vote_utils.py
CHANGED
|
@@ -9,6 +9,7 @@ from .utils import *
|
|
| 9 |
from .log_utils import build_logger
|
| 10 |
from .constants import IMAGE_DIR, VIDEO_DIR
|
| 11 |
import imageio
|
|
|
|
| 12 |
|
| 13 |
ig_logger = build_logger("gradio_web_server_image_generation", "gr_web_image_generation.log") # ig = image generation, loggers for single model direct chat
|
| 14 |
igm_logger = build_logger("gradio_web_server_image_generation_multi", "gr_web_image_generation_multi.log") # igm = image generation multi, loggers for side-by-side and battle
|
|
@@ -17,6 +18,13 @@ iem_logger = build_logger("gradio_web_server_image_editing_multi", "gr_web_image
|
|
| 17 |
vg_logger = build_logger("gradio_web_server_video_generation", "gr_web_video_generation.log") # vg = video generation, loggers for single model direct chat
|
| 18 |
vgm_logger = build_logger("gradio_web_server_video_generation_multi", "gr_web_video_generation_multi.log") # vgm = video generation multi, loggers for side-by-side and battle
|
| 19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
def vote_last_response_ig(state, vote_type, model_selector, request: gr.Request):
|
| 21 |
with open(get_conv_log_filename(), "a") as fout:
|
| 22 |
data = {
|
|
@@ -30,7 +38,7 @@ def vote_last_response_ig(state, vote_type, model_selector, request: gr.Request)
|
|
| 30 |
append_json_item_on_log_server(data, get_conv_log_filename())
|
| 31 |
output_file = f'{IMAGE_DIR}/generation/{state.conv_id}.jpg'
|
| 32 |
with open(output_file, 'w') as f:
|
| 33 |
-
state.output
|
| 34 |
save_image_file_on_log_server(output_file)
|
| 35 |
|
| 36 |
def vote_last_response_igm(states, vote_type, model_selectors, request: gr.Request):
|
|
@@ -47,7 +55,7 @@ def vote_last_response_igm(states, vote_type, model_selectors, request: gr.Reque
|
|
| 47 |
for state in states:
|
| 48 |
output_file = f'{IMAGE_DIR}/generation/{state.conv_id}.jpg'
|
| 49 |
with open(output_file, 'w') as f:
|
| 50 |
-
state.output
|
| 51 |
save_image_file_on_log_server(output_file)
|
| 52 |
|
| 53 |
def vote_last_response_ie(state, vote_type, model_selector, request: gr.Request):
|
|
@@ -64,9 +72,9 @@ def vote_last_response_ie(state, vote_type, model_selector, request: gr.Request)
|
|
| 64 |
output_file = f'{IMAGE_DIR}/edition/{state.conv_id}.jpg'
|
| 65 |
source_file = f'{IMAGE_DIR}/edition/{state.conv_id}_source.jpg'
|
| 66 |
with open(output_file, 'w') as f:
|
| 67 |
-
state.output
|
| 68 |
with open(source_file, 'w') as sf:
|
| 69 |
-
state.source_image
|
| 70 |
save_image_file_on_log_server(output_file)
|
| 71 |
save_image_file_on_log_server(source_file)
|
| 72 |
|
|
@@ -85,9 +93,9 @@ def vote_last_response_iem(states, vote_type, model_selectors, request: gr.Reque
|
|
| 85 |
output_file = f'{IMAGE_DIR}/edition/{state.conv_id}.jpg'
|
| 86 |
source_file = f'{IMAGE_DIR}/edition/{state.conv_id}_source.jpg'
|
| 87 |
with open(output_file, 'w') as f:
|
| 88 |
-
state.output
|
| 89 |
with open(source_file, 'w') as sf:
|
| 90 |
-
state.source_image
|
| 91 |
save_image_file_on_log_server(output_file)
|
| 92 |
save_image_file_on_log_server(source_file)
|
| 93 |
|
|
@@ -522,9 +530,47 @@ def generate_ig(gen_func, state, text, model_name, request: gr.Request):
|
|
| 522 |
output_file = f'{IMAGE_DIR}/generation/{state.conv_id}.jpg'
|
| 523 |
os.makedirs(os.path.dirname(output_file), exist_ok=True)
|
| 524 |
with open(output_file, 'w') as f:
|
| 525 |
-
state.output
|
| 526 |
save_image_file_on_log_server(output_file)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 527 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 528 |
def generate_igm(gen_func, state0, state1, text, model_name0, model_name1, request: gr.Request):
|
| 529 |
if not text:
|
| 530 |
raise gr.Warning("Prompt cannot be empty.")
|
|
@@ -585,9 +631,71 @@ def generate_igm(gen_func, state0, state1, text, model_name0, model_name1, reque
|
|
| 585 |
output_file = f'{IMAGE_DIR}/generation/{state.conv_id}.jpg'
|
| 586 |
os.makedirs(os.path.dirname(output_file), exist_ok=True)
|
| 587 |
with open(output_file, 'w') as f:
|
| 588 |
-
state.output
|
| 589 |
save_image_file_on_log_server(output_file)
|
| 590 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 591 |
def generate_igm_annoy(gen_func, state0, state1, text, model_name0, model_name1, request: gr.Request):
|
| 592 |
if not text:
|
| 593 |
raise gr.Warning("Prompt cannot be empty.")
|
|
@@ -644,9 +752,65 @@ def generate_igm_annoy(gen_func, state0, state1, text, model_name0, model_name1,
|
|
| 644 |
output_file = f'{IMAGE_DIR}/generation/{state.conv_id}.jpg'
|
| 645 |
os.makedirs(os.path.dirname(output_file), exist_ok=True)
|
| 646 |
with open(output_file, 'w') as f:
|
| 647 |
-
state.output
|
| 648 |
save_image_file_on_log_server(output_file)
|
| 649 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 650 |
|
| 651 |
def generate_ie(gen_func, state, source_text, target_text, instruct_text, source_image, model_name, request: gr.Request):
|
| 652 |
if not source_text:
|
|
@@ -694,13 +858,59 @@ def generate_ie(gen_func, state, source_text, target_text, instruct_text, source
|
|
| 694 |
src_img_file = f'{IMAGE_DIR}/edition/{state.conv_id}_src.jpg'
|
| 695 |
os.makedirs(os.path.dirname(src_img_file), exist_ok=True)
|
| 696 |
with open(src_img_file, 'w') as f:
|
| 697 |
-
state.source_image
|
| 698 |
output_file = f'{IMAGE_DIR}/edition/{state.conv_id}_out.jpg'
|
| 699 |
with open(output_file, 'w') as f:
|
| 700 |
-
state.output
|
| 701 |
save_image_file_on_log_server(src_img_file)
|
| 702 |
save_image_file_on_log_server(output_file)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 703 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 704 |
def generate_iem(gen_func, state0, state1, source_text, target_text, instruct_text, source_image, model_name0, model_name1, request: gr.Request):
|
| 705 |
if not source_text:
|
| 706 |
raise gr.Warning("Source prompt cannot be empty.")
|
|
@@ -772,10 +982,80 @@ def generate_iem(gen_func, state0, state1, source_text, target_text, instruct_te
|
|
| 772 |
src_img_file = f'{IMAGE_DIR}/edition/{state.conv_id}_src.jpg'
|
| 773 |
os.makedirs(os.path.dirname(src_img_file), exist_ok=True)
|
| 774 |
with open(src_img_file, 'w') as f:
|
| 775 |
-
state.source_image
|
| 776 |
output_file = f'{IMAGE_DIR}/edition/{state.conv_id}_out.jpg'
|
| 777 |
with open(output_file, 'w') as f:
|
| 778 |
-
state.output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 779 |
save_image_file_on_log_server(src_img_file)
|
| 780 |
save_image_file_on_log_server(output_file)
|
| 781 |
|
|
@@ -848,10 +1128,77 @@ def generate_iem_annoy(gen_func, state0, state1, source_text, target_text, instr
|
|
| 848 |
src_img_file = f'{IMAGE_DIR}/edition/{state.conv_id}_src.jpg'
|
| 849 |
os.makedirs(os.path.dirname(src_img_file), exist_ok=True)
|
| 850 |
with open(src_img_file, 'w') as f:
|
| 851 |
-
state.source_image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 852 |
output_file = f'{IMAGE_DIR}/edition/{state.conv_id}_out.jpg'
|
| 853 |
with open(output_file, 'w') as f:
|
| 854 |
-
state.output
|
| 855 |
save_image_file_on_log_server(src_img_file)
|
| 856 |
save_image_file_on_log_server(output_file)
|
| 857 |
|
|
|
|
| 9 |
from .log_utils import build_logger
|
| 10 |
from .constants import IMAGE_DIR, VIDEO_DIR
|
| 11 |
import imageio
|
| 12 |
+
from diffusers.utils import load_image
|
| 13 |
|
| 14 |
ig_logger = build_logger("gradio_web_server_image_generation", "gr_web_image_generation.log") # ig = image generation, loggers for single model direct chat
|
| 15 |
igm_logger = build_logger("gradio_web_server_image_generation_multi", "gr_web_image_generation_multi.log") # igm = image generation multi, loggers for side-by-side and battle
|
|
|
|
| 18 |
vg_logger = build_logger("gradio_web_server_video_generation", "gr_web_video_generation.log") # vg = video generation, loggers for single model direct chat
|
| 19 |
vgm_logger = build_logger("gradio_web_server_video_generation_multi", "gr_web_video_generation_multi.log") # vgm = video generation multi, loggers for side-by-side and battle
|
| 20 |
|
| 21 |
+
def save_any_image(image_file, file_path):
|
| 22 |
+
if isinstance(image_file, str):
|
| 23 |
+
image = load_image(image_file)
|
| 24 |
+
image.save(file_path, 'JPEG')
|
| 25 |
+
else:
|
| 26 |
+
image_file.save(file_path, 'JPEG')
|
| 27 |
+
|
| 28 |
def vote_last_response_ig(state, vote_type, model_selector, request: gr.Request):
|
| 29 |
with open(get_conv_log_filename(), "a") as fout:
|
| 30 |
data = {
|
|
|
|
| 38 |
append_json_item_on_log_server(data, get_conv_log_filename())
|
| 39 |
output_file = f'{IMAGE_DIR}/generation/{state.conv_id}.jpg'
|
| 40 |
with open(output_file, 'w') as f:
|
| 41 |
+
save_any_image(state.output, f)
|
| 42 |
save_image_file_on_log_server(output_file)
|
| 43 |
|
| 44 |
def vote_last_response_igm(states, vote_type, model_selectors, request: gr.Request):
|
|
|
|
| 55 |
for state in states:
|
| 56 |
output_file = f'{IMAGE_DIR}/generation/{state.conv_id}.jpg'
|
| 57 |
with open(output_file, 'w') as f:
|
| 58 |
+
save_any_image(state.output, f)
|
| 59 |
save_image_file_on_log_server(output_file)
|
| 60 |
|
| 61 |
def vote_last_response_ie(state, vote_type, model_selector, request: gr.Request):
|
|
|
|
| 72 |
output_file = f'{IMAGE_DIR}/edition/{state.conv_id}.jpg'
|
| 73 |
source_file = f'{IMAGE_DIR}/edition/{state.conv_id}_source.jpg'
|
| 74 |
with open(output_file, 'w') as f:
|
| 75 |
+
save_any_image(state.output, f)
|
| 76 |
with open(source_file, 'w') as sf:
|
| 77 |
+
save_any_image(state.source_image, sf)
|
| 78 |
save_image_file_on_log_server(output_file)
|
| 79 |
save_image_file_on_log_server(source_file)
|
| 80 |
|
|
|
|
| 93 |
output_file = f'{IMAGE_DIR}/edition/{state.conv_id}.jpg'
|
| 94 |
source_file = f'{IMAGE_DIR}/edition/{state.conv_id}_source.jpg'
|
| 95 |
with open(output_file, 'w') as f:
|
| 96 |
+
save_any_image(state.output, f)
|
| 97 |
with open(source_file, 'w') as sf:
|
| 98 |
+
save_any_image(state.source_image, sf)
|
| 99 |
save_image_file_on_log_server(output_file)
|
| 100 |
save_image_file_on_log_server(source_file)
|
| 101 |
|
|
|
|
| 530 |
output_file = f'{IMAGE_DIR}/generation/{state.conv_id}.jpg'
|
| 531 |
os.makedirs(os.path.dirname(output_file), exist_ok=True)
|
| 532 |
with open(output_file, 'w') as f:
|
| 533 |
+
save_any_image(state.output, f)
|
| 534 |
save_image_file_on_log_server(output_file)
|
| 535 |
+
|
| 536 |
+
def generate_ig_museum(gen_func, state, model_name, request: gr.Request):
|
| 537 |
+
if not model_name:
|
| 538 |
+
raise gr.Warning("Model name cannot be empty.")
|
| 539 |
+
if state is None:
|
| 540 |
+
state = ImageStateIG(model_name)
|
| 541 |
+
ip = get_ip(request)
|
| 542 |
+
ig_logger.info(f"generate. ip: {ip}")
|
| 543 |
+
start_tstamp = time.time()
|
| 544 |
+
generated_image, text = gen_func(model_name)
|
| 545 |
+
state.prompt = text
|
| 546 |
+
state.output = generated_image
|
| 547 |
+
state.model_name = model_name
|
| 548 |
+
|
| 549 |
+
yield state, generated_image, text
|
| 550 |
+
|
| 551 |
+
finish_tstamp = time.time()
|
| 552 |
+
# logger.info(f"===output===: {output}")
|
| 553 |
+
|
| 554 |
+
with open(get_conv_log_filename(), "a") as fout:
|
| 555 |
+
data = {
|
| 556 |
+
"tstamp": round(finish_tstamp, 4),
|
| 557 |
+
"type": "chat",
|
| 558 |
+
"model": model_name,
|
| 559 |
+
"gen_params": {},
|
| 560 |
+
"start": round(start_tstamp, 4),
|
| 561 |
+
"finish": round(finish_tstamp, 4),
|
| 562 |
+
"state": state.dict(),
|
| 563 |
+
"ip": get_ip(request),
|
| 564 |
+
}
|
| 565 |
+
fout.write(json.dumps(data) + "\n")
|
| 566 |
+
append_json_item_on_log_server(data, get_conv_log_filename())
|
| 567 |
|
| 568 |
+
output_file = f'{IMAGE_DIR}/generation/{state.conv_id}.jpg'
|
| 569 |
+
os.makedirs(os.path.dirname(output_file), exist_ok=True)
|
| 570 |
+
with open(output_file, 'w') as f:
|
| 571 |
+
save_any_image(state.output, f)
|
| 572 |
+
save_image_file_on_log_server(output_file)
|
| 573 |
+
|
| 574 |
def generate_igm(gen_func, state0, state1, text, model_name0, model_name1, request: gr.Request):
|
| 575 |
if not text:
|
| 576 |
raise gr.Warning("Prompt cannot be empty.")
|
|
|
|
| 631 |
output_file = f'{IMAGE_DIR}/generation/{state.conv_id}.jpg'
|
| 632 |
os.makedirs(os.path.dirname(output_file), exist_ok=True)
|
| 633 |
with open(output_file, 'w') as f:
|
| 634 |
+
save_any_image(state.output, f)
|
| 635 |
save_image_file_on_log_server(output_file)
|
| 636 |
+
|
| 637 |
+
def generate_igm_museum(gen_func, state0, state1, model_name0, model_name1, request: gr.Request):
|
| 638 |
+
if not model_name0:
|
| 639 |
+
raise gr.Warning("Model name A cannot be empty.")
|
| 640 |
+
if not model_name1:
|
| 641 |
+
raise gr.Warning("Model name B cannot be empty.")
|
| 642 |
+
if state0 is None:
|
| 643 |
+
state0 = ImageStateIG(model_name0)
|
| 644 |
+
if state1 is None:
|
| 645 |
+
state1 = ImageStateIG(model_name1)
|
| 646 |
+
ip = get_ip(request)
|
| 647 |
+
igm_logger.info(f"generate. ip: {ip}")
|
| 648 |
+
start_tstamp = time.time()
|
| 649 |
+
# Remove ### Model (A|B): from model name
|
| 650 |
+
model_name0 = re.sub(r"### Model A: ", "", model_name0)
|
| 651 |
+
model_name1 = re.sub(r"### Model B: ", "", model_name1)
|
| 652 |
+
generated_image0, generated_image1, text = gen_func(model_name0, model_name1)
|
| 653 |
+
state0.prompt = text
|
| 654 |
+
state1.prompt = text
|
| 655 |
+
state0.output = generated_image0
|
| 656 |
+
state1.output = generated_image1
|
| 657 |
+
state0.model_name = model_name0
|
| 658 |
+
state1.model_name = model_name1
|
| 659 |
+
|
| 660 |
+
yield state0, state1, generated_image0, generated_image1, text
|
| 661 |
+
|
| 662 |
+
finish_tstamp = time.time()
|
| 663 |
+
# logger.info(f"===output===: {output}")
|
| 664 |
+
|
| 665 |
+
with open(get_conv_log_filename(), "a") as fout:
|
| 666 |
+
data = {
|
| 667 |
+
"tstamp": round(finish_tstamp, 4),
|
| 668 |
+
"type": "chat",
|
| 669 |
+
"model": model_name0,
|
| 670 |
+
"gen_params": {},
|
| 671 |
+
"start": round(start_tstamp, 4),
|
| 672 |
+
"finish": round(finish_tstamp, 4),
|
| 673 |
+
"state": state0.dict(),
|
| 674 |
+
"ip": get_ip(request),
|
| 675 |
+
}
|
| 676 |
+
fout.write(json.dumps(data) + "\n")
|
| 677 |
+
append_json_item_on_log_server(data, get_conv_log_filename())
|
| 678 |
+
data = {
|
| 679 |
+
"tstamp": round(finish_tstamp, 4),
|
| 680 |
+
"type": "chat",
|
| 681 |
+
"model": model_name1,
|
| 682 |
+
"gen_params": {},
|
| 683 |
+
"start": round(start_tstamp, 4),
|
| 684 |
+
"finish": round(finish_tstamp, 4),
|
| 685 |
+
"state": state1.dict(),
|
| 686 |
+
"ip": get_ip(request),
|
| 687 |
+
}
|
| 688 |
+
fout.write(json.dumps(data) + "\n")
|
| 689 |
+
append_json_item_on_log_server(data, get_conv_log_filename())
|
| 690 |
+
|
| 691 |
+
for i, state in enumerate([state0, state1]):
|
| 692 |
+
output_file = f'{IMAGE_DIR}/generation/{state.conv_id}.jpg'
|
| 693 |
+
os.makedirs(os.path.dirname(output_file), exist_ok=True)
|
| 694 |
+
with open(output_file, 'w') as f:
|
| 695 |
+
save_any_image(state.output, f)
|
| 696 |
+
save_image_file_on_log_server(output_file)
|
| 697 |
+
|
| 698 |
+
|
| 699 |
def generate_igm_annoy(gen_func, state0, state1, text, model_name0, model_name1, request: gr.Request):
|
| 700 |
if not text:
|
| 701 |
raise gr.Warning("Prompt cannot be empty.")
|
|
|
|
| 752 |
output_file = f'{IMAGE_DIR}/generation/{state.conv_id}.jpg'
|
| 753 |
os.makedirs(os.path.dirname(output_file), exist_ok=True)
|
| 754 |
with open(output_file, 'w') as f:
|
| 755 |
+
save_any_image(state.output, f)
|
| 756 |
save_image_file_on_log_server(output_file)
|
| 757 |
|
| 758 |
+
def generate_igm_annoy_museum(gen_func, state0, state1, model_name0, model_name1, request: gr.Request):
|
| 759 |
+
if state0 is None:
|
| 760 |
+
state0 = ImageStateIG(model_name0)
|
| 761 |
+
if state1 is None:
|
| 762 |
+
state1 = ImageStateIG(model_name1)
|
| 763 |
+
ip = get_ip(request)
|
| 764 |
+
igm_logger.info(f"generate. ip: {ip}")
|
| 765 |
+
start_tstamp = time.time()
|
| 766 |
+
model_name0 = re.sub(r"### Model A: ", "", model_name0)
|
| 767 |
+
model_name1 = re.sub(r"### Model B: ", "", model_name1)
|
| 768 |
+
generated_image0, generated_image1, model_name0, model_name1, text = gen_func(model_name0, model_name1)
|
| 769 |
+
state0.prompt = text
|
| 770 |
+
state1.prompt = text
|
| 771 |
+
state0.output = generated_image0
|
| 772 |
+
state1.output = generated_image1
|
| 773 |
+
state0.model_name = model_name0
|
| 774 |
+
state1.model_name = model_name1
|
| 775 |
+
|
| 776 |
+
yield state0, state1, generated_image0, generated_image1, text,\
|
| 777 |
+
gr.Markdown(f"### Model A: {model_name0}"), gr.Markdown(f"### Model B: {model_name1}")
|
| 778 |
+
|
| 779 |
+
finish_tstamp = time.time()
|
| 780 |
+
# logger.info(f"===output===: {output}")
|
| 781 |
+
|
| 782 |
+
with open(get_conv_log_filename(), "a") as fout:
|
| 783 |
+
data = {
|
| 784 |
+
"tstamp": round(finish_tstamp, 4),
|
| 785 |
+
"type": "chat",
|
| 786 |
+
"model": model_name0,
|
| 787 |
+
"gen_params": {},
|
| 788 |
+
"start": round(start_tstamp, 4),
|
| 789 |
+
"finish": round(finish_tstamp, 4),
|
| 790 |
+
"state": state0.dict(),
|
| 791 |
+
"ip": get_ip(request),
|
| 792 |
+
}
|
| 793 |
+
fout.write(json.dumps(data) + "\n")
|
| 794 |
+
append_json_item_on_log_server(data, get_conv_log_filename())
|
| 795 |
+
data = {
|
| 796 |
+
"tstamp": round(finish_tstamp, 4),
|
| 797 |
+
"type": "chat",
|
| 798 |
+
"model": model_name1,
|
| 799 |
+
"gen_params": {},
|
| 800 |
+
"start": round(start_tstamp, 4),
|
| 801 |
+
"finish": round(finish_tstamp, 4),
|
| 802 |
+
"state": state1.dict(),
|
| 803 |
+
"ip": get_ip(request),
|
| 804 |
+
}
|
| 805 |
+
fout.write(json.dumps(data) + "\n")
|
| 806 |
+
append_json_item_on_log_server(data, get_conv_log_filename())
|
| 807 |
+
|
| 808 |
+
for i, state in enumerate([state0, state1]):
|
| 809 |
+
output_file = f'{IMAGE_DIR}/generation/{state.conv_id}.jpg'
|
| 810 |
+
os.makedirs(os.path.dirname(output_file), exist_ok=True)
|
| 811 |
+
with open(output_file, 'w') as f:
|
| 812 |
+
save_any_image(state.output, f)
|
| 813 |
+
save_image_file_on_log_server(output_file)
|
| 814 |
|
| 815 |
def generate_ie(gen_func, state, source_text, target_text, instruct_text, source_image, model_name, request: gr.Request):
|
| 816 |
if not source_text:
|
|
|
|
| 858 |
src_img_file = f'{IMAGE_DIR}/edition/{state.conv_id}_src.jpg'
|
| 859 |
os.makedirs(os.path.dirname(src_img_file), exist_ok=True)
|
| 860 |
with open(src_img_file, 'w') as f:
|
| 861 |
+
save_any_image(state.source_image, f)
|
| 862 |
output_file = f'{IMAGE_DIR}/edition/{state.conv_id}_out.jpg'
|
| 863 |
with open(output_file, 'w') as f:
|
| 864 |
+
save_any_image(state.output, f)
|
| 865 |
save_image_file_on_log_server(src_img_file)
|
| 866 |
save_image_file_on_log_server(output_file)
|
| 867 |
+
|
| 868 |
+
def generate_ie_museum(gen_func, state, model_name, request: gr.Request):
|
| 869 |
+
if not model_name:
|
| 870 |
+
raise gr.Warning("Model name cannot be empty.")
|
| 871 |
+
if state is None:
|
| 872 |
+
state = ImageStateIE(model_name)
|
| 873 |
+
ip = get_ip(request)
|
| 874 |
+
ig_logger.info(f"generate. ip: {ip}")
|
| 875 |
+
start_tstamp = time.time()
|
| 876 |
+
source_image, generated_image, source_text, target_text, instruct_text = gen_func(model_name)
|
| 877 |
+
state.source_prompt = source_text
|
| 878 |
+
state.target_prompt = target_text
|
| 879 |
+
state.instruct_prompt = instruct_text
|
| 880 |
+
state.source_image = source_image
|
| 881 |
+
state.output = generated_image
|
| 882 |
+
state.model_name = model_name
|
| 883 |
+
|
| 884 |
+
yield state, generated_image, source_image, source_text, target_text, instruct_text
|
| 885 |
+
|
| 886 |
+
finish_tstamp = time.time()
|
| 887 |
+
# logger.info(f"===output===: {output}")
|
| 888 |
+
|
| 889 |
+
with open(get_conv_log_filename(), "a") as fout:
|
| 890 |
+
data = {
|
| 891 |
+
"tstamp": round(finish_tstamp, 4),
|
| 892 |
+
"type": "chat",
|
| 893 |
+
"model": model_name,
|
| 894 |
+
"gen_params": {},
|
| 895 |
+
"start": round(start_tstamp, 4),
|
| 896 |
+
"finish": round(finish_tstamp, 4),
|
| 897 |
+
"state": state.dict(),
|
| 898 |
+
"ip": get_ip(request),
|
| 899 |
+
}
|
| 900 |
+
fout.write(json.dumps(data) + "\n")
|
| 901 |
+
append_json_item_on_log_server(data, get_conv_log_filename())
|
| 902 |
|
| 903 |
+
src_img_file = f'{IMAGE_DIR}/edition/{state.conv_id}_src.jpg'
|
| 904 |
+
os.makedirs(os.path.dirname(src_img_file), exist_ok=True)
|
| 905 |
+
with open(src_img_file, 'w') as f:
|
| 906 |
+
save_any_image(state.source_image, f)
|
| 907 |
+
output_file = f'{IMAGE_DIR}/edition/{state.conv_id}_out.jpg'
|
| 908 |
+
with open(output_file, 'w') as f:
|
| 909 |
+
save_any_image(state.output, f)
|
| 910 |
+
save_image_file_on_log_server(src_img_file)
|
| 911 |
+
save_image_file_on_log_server(output_file)
|
| 912 |
+
|
| 913 |
+
|
| 914 |
def generate_iem(gen_func, state0, state1, source_text, target_text, instruct_text, source_image, model_name0, model_name1, request: gr.Request):
|
| 915 |
if not source_text:
|
| 916 |
raise gr.Warning("Source prompt cannot be empty.")
|
|
|
|
| 982 |
src_img_file = f'{IMAGE_DIR}/edition/{state.conv_id}_src.jpg'
|
| 983 |
os.makedirs(os.path.dirname(src_img_file), exist_ok=True)
|
| 984 |
with open(src_img_file, 'w') as f:
|
| 985 |
+
save_any_image(state.source_image, f)
|
| 986 |
output_file = f'{IMAGE_DIR}/edition/{state.conv_id}_out.jpg'
|
| 987 |
with open(output_file, 'w') as f:
|
| 988 |
+
save_any_image(state.output, f)
|
| 989 |
+
save_image_file_on_log_server(src_img_file)
|
| 990 |
+
save_image_file_on_log_server(output_file)
|
| 991 |
+
|
| 992 |
+
def generate_iem_museum(gen_func, state0, state1, model_name0, model_name1, request: gr.Request):
|
| 993 |
+
if not model_name0:
|
| 994 |
+
raise gr.Warning("Model name A cannot be empty.")
|
| 995 |
+
if not model_name1:
|
| 996 |
+
raise gr.Warning("Model name B cannot be empty.")
|
| 997 |
+
if state0 is None:
|
| 998 |
+
state0 = ImageStateIE(model_name0)
|
| 999 |
+
if state1 is None:
|
| 1000 |
+
state1 = ImageStateIE(model_name1)
|
| 1001 |
+
ip = get_ip(request)
|
| 1002 |
+
igm_logger.info(f"generate. ip: {ip}")
|
| 1003 |
+
start_tstamp = time.time()
|
| 1004 |
+
model_name0 = re.sub(r"### Model A: ", "", model_name0)
|
| 1005 |
+
model_name1 = re.sub(r"### Model B: ", "", model_name1)
|
| 1006 |
+
source_image, generated_image0, generated_image1, source_text, target_text, instruct_text = gen_func(model_name0, model_name1)
|
| 1007 |
+
state0.source_prompt = source_text
|
| 1008 |
+
state0.target_prompt = target_text
|
| 1009 |
+
state0.instruct_prompt = instruct_text
|
| 1010 |
+
state0.source_image = source_image
|
| 1011 |
+
state0.output = generated_image0
|
| 1012 |
+
state0.model_name = model_name0
|
| 1013 |
+
state1.source_prompt = source_text
|
| 1014 |
+
state1.target_prompt = target_text
|
| 1015 |
+
state1.instruct_prompt = instruct_text
|
| 1016 |
+
state1.source_image = source_image
|
| 1017 |
+
state1.output = generated_image1
|
| 1018 |
+
state1.model_name = model_name1
|
| 1019 |
+
|
| 1020 |
+
yield state0, state1, generated_image0, generated_image1, source_image, source_text, target_text, instruct_text
|
| 1021 |
+
|
| 1022 |
+
finish_tstamp = time.time()
|
| 1023 |
+
# logger.info(f"===output===: {output}")
|
| 1024 |
+
|
| 1025 |
+
with open(get_conv_log_filename(), "a") as fout:
|
| 1026 |
+
data = {
|
| 1027 |
+
"tstamp": round(finish_tstamp, 4),
|
| 1028 |
+
"type": "chat",
|
| 1029 |
+
"model": model_name0,
|
| 1030 |
+
"gen_params": {},
|
| 1031 |
+
"start": round(start_tstamp, 4),
|
| 1032 |
+
"finish": round(finish_tstamp, 4),
|
| 1033 |
+
"state": state0.dict(),
|
| 1034 |
+
"ip": get_ip(request),
|
| 1035 |
+
}
|
| 1036 |
+
fout.write(json.dumps(data) + "\n")
|
| 1037 |
+
append_json_item_on_log_server(data, get_conv_log_filename())
|
| 1038 |
+
data = {
|
| 1039 |
+
"tstamp": round(finish_tstamp, 4),
|
| 1040 |
+
"type": "chat",
|
| 1041 |
+
"model": model_name1,
|
| 1042 |
+
"gen_params": {},
|
| 1043 |
+
"start": round(start_tstamp, 4),
|
| 1044 |
+
"finish": round(finish_tstamp, 4),
|
| 1045 |
+
"state": state1.dict(),
|
| 1046 |
+
"ip": get_ip(request),
|
| 1047 |
+
}
|
| 1048 |
+
fout.write(json.dumps(data) + "\n")
|
| 1049 |
+
append_json_item_on_log_server(data, get_conv_log_filename())
|
| 1050 |
+
|
| 1051 |
+
for i, state in enumerate([state0, state1]):
|
| 1052 |
+
src_img_file = f'{IMAGE_DIR}/edition/{state.conv_id}_src.jpg'
|
| 1053 |
+
os.makedirs(os.path.dirname(src_img_file), exist_ok=True)
|
| 1054 |
+
with open(src_img_file, 'w') as f:
|
| 1055 |
+
save_any_image(state.source_image, f)
|
| 1056 |
+
output_file = f'{IMAGE_DIR}/edition/{state.conv_id}_out.jpg'
|
| 1057 |
+
with open(output_file, 'w') as f:
|
| 1058 |
+
save_any_image(state.output, f)
|
| 1059 |
save_image_file_on_log_server(src_img_file)
|
| 1060 |
save_image_file_on_log_server(output_file)
|
| 1061 |
|
|
|
|
| 1128 |
src_img_file = f'{IMAGE_DIR}/edition/{state.conv_id}_src.jpg'
|
| 1129 |
os.makedirs(os.path.dirname(src_img_file), exist_ok=True)
|
| 1130 |
with open(src_img_file, 'w') as f:
|
| 1131 |
+
save_any_image(state.source_image, f)
|
| 1132 |
+
output_file = f'{IMAGE_DIR}/edition/{state.conv_id}_out.jpg'
|
| 1133 |
+
with open(output_file, 'w') as f:
|
| 1134 |
+
save_any_image(state.output, f)
|
| 1135 |
+
save_image_file_on_log_server(src_img_file)
|
| 1136 |
+
save_image_file_on_log_server(output_file)
|
| 1137 |
+
|
| 1138 |
+
def generate_iem_annoy_museum(gen_func, state0, state1, model_name0, model_name1, request: gr.Request):
|
| 1139 |
+
if state0 is None:
|
| 1140 |
+
state0 = ImageStateIE(model_name0)
|
| 1141 |
+
if state1 is None:
|
| 1142 |
+
state1 = ImageStateIE(model_name1)
|
| 1143 |
+
ip = get_ip(request)
|
| 1144 |
+
igm_logger.info(f"generate. ip: {ip}")
|
| 1145 |
+
start_tstamp = time.time()
|
| 1146 |
+
model_name0 = ""
|
| 1147 |
+
model_name1 = ""
|
| 1148 |
+
source_image, generated_image0, generated_image1, source_text, target_text, instruct_text, model_name0, model_name1 = gen_func(model_name0, model_name1)
|
| 1149 |
+
state0.source_prompt = source_text
|
| 1150 |
+
state0.target_prompt = target_text
|
| 1151 |
+
state0.instruct_prompt = instruct_text
|
| 1152 |
+
state0.source_image = source_image
|
| 1153 |
+
state0.output = generated_image0
|
| 1154 |
+
state0.model_name = model_name0
|
| 1155 |
+
state1.source_prompt = source_text
|
| 1156 |
+
state1.target_prompt = target_text
|
| 1157 |
+
state1.instruct_prompt = instruct_text
|
| 1158 |
+
state1.source_image = source_image
|
| 1159 |
+
state1.output = generated_image1
|
| 1160 |
+
state1.model_name = model_name1
|
| 1161 |
+
|
| 1162 |
+
yield state0, state1, generated_image0, generated_image1, source_image, source_text, target_text, instruct_text, \
|
| 1163 |
+
gr.Markdown(f"### Model A: {model_name0}", visible=False), gr.Markdown(f"### Model B: {model_name1}", visible=False)
|
| 1164 |
+
|
| 1165 |
+
finish_tstamp = time.time()
|
| 1166 |
+
# logger.info(f"===output===: {output}")
|
| 1167 |
+
|
| 1168 |
+
with open(get_conv_log_filename(), "a") as fout:
|
| 1169 |
+
data = {
|
| 1170 |
+
"tstamp": round(finish_tstamp, 4),
|
| 1171 |
+
"type": "chat",
|
| 1172 |
+
"model": model_name0,
|
| 1173 |
+
"gen_params": {},
|
| 1174 |
+
"start": round(start_tstamp, 4),
|
| 1175 |
+
"finish": round(finish_tstamp, 4),
|
| 1176 |
+
"state": state0.dict(),
|
| 1177 |
+
"ip": get_ip(request),
|
| 1178 |
+
}
|
| 1179 |
+
fout.write(json.dumps(data) + "\n")
|
| 1180 |
+
append_json_item_on_log_server(data, get_conv_log_filename())
|
| 1181 |
+
data = {
|
| 1182 |
+
"tstamp": round(finish_tstamp, 4),
|
| 1183 |
+
"type": "chat",
|
| 1184 |
+
"model": model_name1,
|
| 1185 |
+
"gen_params": {},
|
| 1186 |
+
"start": round(start_tstamp, 4),
|
| 1187 |
+
"finish": round(finish_tstamp, 4),
|
| 1188 |
+
"state": state1.dict(),
|
| 1189 |
+
"ip": get_ip(request),
|
| 1190 |
+
}
|
| 1191 |
+
fout.write(json.dumps(data) + "\n")
|
| 1192 |
+
append_json_item_on_log_server(data, get_conv_log_filename())
|
| 1193 |
+
|
| 1194 |
+
for i, state in enumerate([state0, state1]):
|
| 1195 |
+
src_img_file = f'{IMAGE_DIR}/edition/{state.conv_id}_src.jpg'
|
| 1196 |
+
os.makedirs(os.path.dirname(src_img_file), exist_ok=True)
|
| 1197 |
+
with open(src_img_file, 'w') as f:
|
| 1198 |
+
save_any_image(state.source_image, f)
|
| 1199 |
output_file = f'{IMAGE_DIR}/edition/{state.conv_id}_out.jpg'
|
| 1200 |
with open(output_file, 'w') as f:
|
| 1201 |
+
save_any_image(state.output, f)
|
| 1202 |
save_image_file_on_log_server(src_img_file)
|
| 1203 |
save_image_file_on_log_server(output_file)
|
| 1204 |
|