import os
import datetime
import threading
import time
import json
import toml
import gradio as gr
import shutil
from huggingface_hub import HfApi, snapshot_download
import pandas as pd
from gradio.processing_utils import save_bytes_to_cache
from gradio.utils import get_upload_folder
from pnginfo import read_info_from_image, send_paras
from images_history import img_history_ui
from director_tools import director_ui, send_outputs
from tagger import tagger_ui
from utils import set_token, generate_novelai_image, image_from_bytes, get_remain_anlas, calculate_cost, vibe_encode, vibe_to_json, base642image
client_config = toml.load("config.toml")['client']
today_count = 0
today = datetime.date.today().strftime('%Y-%m-%d')
api = HfApi()
def get_count():
global today_count, today
now = datetime.date.today().strftime('%Y-%m-%d')
if now != today:
today = now
today_count = 0
return today_count
def change_schedule(sampler):
if sampler == "ddim_v3":
return gr.Dropdown(value="native", interactive=False)
elif sampler == "k_dpmpp_2m":
return gr.Dropdown(value="exponential", interactive=True)
else:
return gr.Dropdown(value="karras", interactive=True)
def draw_position(x, y):
string = ('⬜'*(5*(y - 1) + x -1) + '⬛').ljust(25, '⬜')
out = '示例图
' + '
'.join([string[i:i+5] for i in range(0, 25, 5)]) + "
"
return out
def dynparas(*args):
return list(args)
def control_ui():
model = gr.Dropdown(choices=["nai-diffusion-3", "nai-diffusion-4-curated-preview", "nai-diffusion-furry-3", "nai-diffusion-4-full", "nai-diffusion-4-5-curated", "nai-diffusion-4-5-full"], value="nai-diffusion-4-5-full", label="模型")
prompt = gr.TextArea(elem_id='txt2img_prompt', label="提示词", lines=3)
quality_tags = gr.TextArea(
elem_id='txt2img_qua_prompt', label="质量词", lines=1,
value=client_config['default_quality'],
)
neg_prompt = gr.TextArea(
elem_id='txt2img_neg_prompt', label="负面词", lines=1,
value=client_config['default_neg'],
)
prompt.focus(fn=None, inputs=None, js="() => {run();}")
quality_tags.focus(fn=None, inputs=None, js="() => {run();}")
neg_prompt.focus(fn=None, inputs=None, js="() => {run();}")
with gr.Column():
with gr.Accordion("参数设置", open=False):
with gr.Row():
sampler = gr.Dropdown(
choices=[
"k_euler", "k_euler_ancestral", "k_dpmpp_2s_ancestral", "k_dpmpp_2m_sde",
"k_dpmpp_2m", "k_dpmpp_sde", "ddim_v3"
],
value="k_euler",
label="采样器",
interactive=True
)
scale = gr.Slider(label="CFG Scale", value=5.0, minimum=0, maximum=10, step=0.1)
steps = gr.Slider(label="步数", value=28, minimum=1, maximum=50, step=1)
with gr.Row():
seed = gr.Number(label="种子", value=-1, step=1, maximum=2**32-1, minimum=-1, scale=3)
rand_seed = gr.Button('🎲️', scale=1)
reuse_seed = gr.Button('♻️', scale=1)
with gr.Row():
presets = gr.Dropdown(choices=['1024x1024', '832x1216', '1216x832', '768x1344', '1344x768'], value='1024x1024', label='预设比例', scale=1)
width = gr.Slider(label="宽度", value=1024, minimum=64, maximum=2048, step=64, scale=3)
height = gr.Slider(label="高度", value=1024, minimum=64, maximum=2048, step=64, scale=3)
presets.change(lambda s:tuple(map(int, s.split('x'))), inputs=presets, outputs=[width, height])
with gr.Accordion('风格迁移', open=False) as vibe_tab:
str_norm = gr.Checkbox(label='参考强度归一化', value=True)
ref_images = gr.Gallery(label="上传单(多)张图片", format="png", value=None, interactive=True, type="pil", show_share_button=False)
info_extracts = gr.State([])
ref_strs = gr.State([])
encode = gr.Button('生成vibe文件(仅nai4,每张图消耗2点)')
vibe_files = gr.Files(label='nai4 vibe文件', value=None, file_types=['.naiv4vibe'])
vibe_thumbs = gr.Gallery(label='vibes预览图', interactive=False, visible=False)
encode.click(vibe_transfer, inputs=[model, ref_images, info_extracts, vibe_files], outputs=[ref_images, info_extracts, vibe_files], concurrency_limit=1)
vibe_files.change(display_vibe_thumbs, inputs=vibe_files, outputs=vibe_thumbs)
@gr.render(inputs=[ref_images, vibe_files, info_extracts, ref_strs], triggers=[ref_images.change, vibe_files.change])
def multiple_vibes(images, vibes, extracts, strengths):
exts = []
strs = []
if images != None:
for i, _ in enumerate(images):
with gr.Row():
extract = gr.Slider(label=f"图片{i + 1} 参考信息提取", elem_id=f"ref_info_{i + 1}", value=extracts[i] if i < len(extracts) else 1, minimum=0, maximum=1, step=0.1, interactive=True)
strength = gr.Slider(label=f"图片{i + 1} 参考强度", elem_id=f"ref_strength_{i + 1}", value=strengths[i] if i < len(extracts) else 0.6, minimum=0, maximum=1, step=0.1, interactive=True)
exts.append(extract)
strs.append(strength)
if vibes != None:
for i, _ in enumerate(vibes):
with gr.Row():
strength = gr.Slider(label=f"Vibe{i + 1} 参考强度", elem_id=f"vibe_strength_{i + 1}", value=strengths[len(extracts) + i] if len(extracts) + i < len(strengths) else 0.6, minimum=0, maximum=1, step=0.1, interactive=True)
strs.append(strength)
for e in exts:
e.change(fn=dynparas, inputs=exts, outputs=info_extracts)
for s in strs:
s.change(fn=dynparas, inputs=strs, outputs=ref_strs)
def change_values(items, base, default, prefix=False):
if prefix:
postfix = base[len(prefix):] if len(prefix) < len(base) else []
if items is None:
return postfix
elif len(items) <= len(prefix):
return base[:len(items)] + postfix
else:
return base[:len(prefix)] + [default] * (len(items) - len(prefix)) + postfix
else:
if items is None:
return []
elif len(items) <= len(base):
return base[:len(items)]
else:
return base + [default] * (len(items) - len(base))
ref_images.change(lambda i, e, s: (change_values(i, e, 1), change_values(i, s, 0.6, e)), inputs=[ref_images, info_extracts, ref_strs], outputs=[info_extracts, ref_strs])
vibe_files.change(change_values, inputs=[vibe_files, info_extracts, ref_strs], outputs=ref_strs)
with gr.Accordion('附加输入', open=False, elem_id="i2i_tab") as i2i_tab:
with gr.Tab('图生图', elem_id="i2i_block") as i2i:
i2i_image = gr.Image(label="上传图片", value=None, sources=["upload", "clipboard", "webcam"], interactive=True, type="pil", show_share_button=False)
i2i_str = gr.Slider(label='去噪强度', value=0.7, minimum=0, maximum=0.99, step=0.01)
i2i_noise = gr.Slider(label='噪声', value=0, minimum=0, maximum=1, step=0.1)
reuse_img_i2i = gr.Button(value='使用上一次生成的图片')
with gr.Tab('局部重绘', elem_id="inp_block") as inp:
overlay = gr.Checkbox(label='覆盖原图', value=True)
inp_img = gr.ImageMask(label="上传图片", value=None, sources=["upload", "clipboard", "webcam"], interactive=True, type="pil", eraser=False, transforms=None, brush=gr.Brush(colors=['#FFFFFF'], color_mode='fixed'), layers=False, show_share_button=False, show_download_button=False)
inp_str = gr.Slider(label="重绘强度", value=1, minimum=0.01, maximum=1, step=0.01)
reuse_img_inp = gr.Button(value='使用上一次生成的图片')
selection = gr.Radio(choices=['i2i', 'inp'], value='i2i', visible=False)
with gr.Accordion("角色控制(仅nai4有效)", open=False, elem_id="v4chars_tab") as chars:
with gr.Row():
num_chars = gr.Number(value=0, label="角色数量", minimum=0, maximum=6)
auto_pos = gr.Checkbox(value=True, label="AI自动决定位置")
char_prompts = gr.State(['']*6)
char_ucs = gr.State(['']*6)
char_coords_x = gr.State([3]*6)
char_coords_y = gr.State([3]*6)
rerender = gr.Checkbox(value=False, visible=False)
@gr.render(inputs=[num_chars, auto_pos, char_prompts, char_ucs, char_coords_x, char_coords_y], triggers=[num_chars.change, auto_pos.change, rerender.change])
def characterPrompts(num, is_auto, char_p, char_np, cds_x, cds_y):
prompts = []
neg_prompts = []
coords_x = []
coords_y = []
for i in range(num):
with gr.Row():
with gr.Column():
p = gr.TextArea(lines=2, value=char_p[i], elem_id=f"txt2img_char{i + 1}_p", label=f"角色{i + 1}提示词")
np = gr.TextArea(lines=2, value=char_np[i], elem_id=f"txt2img_char{i + 1}_n", label=f"角色{i + 1}负面提示词")
p.focus(fn=None, inputs=None, js="() => {run();}")
np.focus(fn=None, inputs=None, js="() => {run();}")
with gr.Row():
pos_visual = gr.HTML(value=draw_position(cds_y[i], cds_x[i]))
with gr.Column():
coord_y = gr.Number(value=cds_y[i], label="行", minimum=1, maximum=5, interactive=not is_auto)
coord_x = gr.Number(value=cds_x[i], label="列", minimum=1, maximum=5, interactive=not is_auto)
coord_x.change(draw_position, inputs=[coord_x, coord_y], outputs=pos_visual)
coord_y.change(draw_position, inputs=[coord_x, coord_y], outputs=pos_visual)
prompts.append(p)
neg_prompts.append(np)
coords_x.append(coord_x)
coords_y.append(coord_y)
for i in range(num):
def change_one(orignal, new_one, index=i):
orignal[index] = new_one
return orignal
prompts[i].input(fn=change_one, inputs=[char_prompts, prompts[i]], outputs=char_prompts)
neg_prompts[i].input(fn=change_one, inputs=[char_ucs, neg_prompts[i]], outputs=char_ucs)
coords_x[i].input(fn=change_one, inputs=[char_coords_x, coords_x[i]], outputs=char_coords_x)
coords_y[i].input(fn=change_one, inputs=[char_coords_y, coords_y[i]], outputs=char_coords_y)
num_chars.change(lambda: gr.Accordion(open=True), inputs=None, outputs=chars)
auto_pos.change(lambda: gr.Accordion(open=True), inputs=None, outputs=chars)
with gr.Accordion('角色参考(仅nai4.5有效)', open=False, elem_id="v4.5char_tab") as v45char_tab:
chr_image = gr.Image(label="上传图片", value=None, sources=["upload", "clipboard", "webcam"], interactive=True, type="pil", show_share_button=False)
fidelity = gr.Slider(label='参考度', value=1, minimum=0, maximum=1, step=0.05)
style_aware = gr.Checkbox(label='参考风格', value=True)
with gr.Accordion('高级选项', open=False):
scheduler = gr.Dropdown(
choices=[
"native", "karras", "exponential", "polyexponential"
],
value="karras",
label="Scheduler",
interactive=True
)
with gr.Row():
smea = gr.Checkbox(False, label="SMEA")
dyn = gr.Checkbox(False, label="SMEA DYN")
variety = gr.Checkbox(False, label="Variety+")
legacy = gr.Checkbox(False, label="Legacy Prompt (nai4)")
with gr.Row():
dyn_threshold = gr.Checkbox(False, label="Decrisp")
cfg_rescale = gr.Slider(0, 1, 0, step=0.01, label="CFG rescale")
save = gr.Checkbox(value=True, label='云端保存图片')
gen_btn = gr.Button(value="生成", variant="primary")
stop_btn = gr.Button(value="取消", variant="stop", visible=False)
sampler.change(change_schedule, sampler, scheduler)
rand_seed.click(fn=lambda: -1, inputs=None, outputs=seed)
i2i.select(lambda: 'i2i', inputs=None, outputs=selection)
inp.select(lambda: 'inp', inputs=None, outputs=selection)
return gen_btn, stop_btn, \
[model, prompt, quality_tags, neg_prompt, seed, scale, width, height, steps, sampler, scheduler, smea, dyn, dyn_threshold, cfg_rescale, variety, \
ref_images, info_extracts, ref_strs, vibe_files, str_norm, i2i_image, i2i_str, i2i_noise, overlay, inp_img, inp_str, selection, \
num_chars, auto_pos, char_prompts, char_ucs, char_coords_x, char_coords_y, legacy, chr_image, fidelity, style_aware], \
[save, rand_seed, reuse_seed, reuse_img_i2i, reuse_img_inp, vibe_tab, i2i_tab, rerender]
def generate(model, prompt, quality_tags, neg_prompt, seed, scale, width, height, steps, sampler, scheduler, smea, dyn, dyn_threshold, cfg_rescale, variety, ref_images, info_extracts, ref_strs, vibe_files, str_norm, i2i_image, i2i_str, i2i_noise, overlay, inp_img, inp_str, selection, num_chars, auto_pos, char_prompts, char_ucs, char_coords_x, char_coords_y, legacy, chr_image, fidelity, style_aware):
global today_count
set_token(os.environ.get('token'))
img_data, payload = generate_novelai_image(
model, f"{prompt}, {quality_tags}", neg_prompt, seed, scale,
width, height, steps, sampler, scheduler,
smea, dyn, dyn_threshold, cfg_rescale, variety, ref_images, info_extracts, ref_strs, vibe_files, str_norm,
i2i_image, i2i_str, i2i_noise, overlay, inp_img, inp_str, selection,
auto_pos, char_prompts[:num_chars], char_ucs[:num_chars], char_coords_x[:num_chars], char_coords_y[:num_chars], legacy,
chr_image, fidelity, style_aware
)
if not isinstance(img_data, bytes):
return gr.Image(value=None), payload
today_count = get_count() + 1
img = image_from_bytes(img_data)
return img, payload
def preview_ui():
with gr.Blocks(css='#preview_image { height: 100%;}'):
image = gr.Image(format='png', elem_id='preview_image', interactive=False, type='filepath', show_share_button=False)
send_dtool = gr.Button(value="发送到定向修图", visible=False)
image.change(lambda i: gr.Button(visible=False) if i is None else gr.Button(visible=True), inputs=image, outputs=send_dtool)
info = gr.JSON(value={}, label="生成信息")
return image, info, send_dtool
def rename_save_img(path, payload, save):
if path is None:
return None
default = os.path.basename(path)
filename = str(today_count).rjust(5, '0') + '-' + str(payload['parameters']['seed']) + '.png'
renamed_path = path.replace(default, filename)
if os.path.exists(renamed_path):
return renamed_path
os.replace(path, renamed_path)
if save:
save_path = client_config['save_path']
today = datetime.date.today().strftime('%Y-%m-%d')
today_path = os.path.join(save_path, today)
os.makedirs(today_path, exist_ok=True)
file_path = os.path.join(today_path, filename)
shutil.copy(renamed_path, file_path)
api.upload_file(path_or_fileobj=file_path, path_in_repo=file_path, repo_id="P01yH3dr0n/naimages", repo_type="dataset", token=os.environ.get("hf_token"))
return renamed_path
def vibe_transfer(model, ref_images, info_extracts, vibe_files):
if (not model.startswith('nai-diffusion-4')) or ref_images is None:
return ref_images, info_extracts, vibe_files
orig, vibes = vibe_encode(model, ref_images, info_extracts)
files = []
for i in range(len(ref_images)):
data, name = vibe_to_json(model, ref_images[i][0], info_extracts[i], orig[i], vibes[i])
tmp_path = save_bytes_to_cache(data.encode('utf-8'), name + '.naiv4vibe', get_upload_folder())
files.append(tmp_path)
if vibe_files is None:
vibe_files = []
return None, [], vibe_files + files
def display_vibe_thumbs(vibe_files):
thumbs = []
if vibe_files is None:
return gr.Gallery(value=None, visible=False)
for v in vibe_files:
with open(v) as f:
data = json.load(f)['thumbnail'].replace('data:image/jpeg;base64,', '')
thumbs.append(base642image(data))
return gr.Gallery(value=thumbs, visible=True)
def update_btn_cost(model, w, h, s, sm, dyn, ref_imgs, i2i_img, i2i_str, inp_img, inp_str, selection, chr_img):
if selection == 'i2i' and i2i_img != None:
cost = calculate_cost(w, h, s, False, False, i2i_str)
elif selection == 'inp' and inp_img['background'] != None:
cost = calculate_cost(w, h, s, False, False, inp_str if model.startswith('nai-diffusion-4') else 1)
else:
cost = calculate_cost(w, h, s, sm, dyn)
if len(ref_imgs) and model.startswith('nai-diffusion-4'):
cost += len(ref_imgs) * 2
if chr_img != None and model.startswith('nai-diffusion-4-5'):
cost += 5
return gr.Button(value=f"生成(预计消耗{cost}点数)")
def main_ui():
with gr.Blocks():
with gr.Row(variant="panel"):
with gr.Column():
gen_btn, stop_btn, paras, others = control_ui()
with gr.Column():
image, info, send_dtool = preview_ui()
cost_list = [paras[0]] + paras[6:9] + paras[11:13] + [paras[17]] + paras[21:23] + paras[25:28] + [paras[35]]
for component in cost_list:
component.change(update_btn_cost, inputs=cost_list, outputs=gen_btn)
gen = gen_btn.click(lambda: (gr.Button(visible=False), gr.Button(visible=True)), inputs=None, outputs=[gen_btn, stop_btn]).then(
vibe_transfer, inputs=[paras[0], paras[16], paras[17], paras[19]], outputs=[paras[16], paras[17], paras[19]]).then(
generate, inputs=paras, outputs=[image, info], concurrency_limit=1, concurrency_id="generate").then(
rename_save_img, inputs=[image, info, others[0]], outputs=image, trigger_mode="once").then(
lambda: (gr.Button(visible=True), gr.Button(visible=False)), inputs=None, outputs=[gen_btn, stop_btn])
others[2].click(lambda o, s: o if len(s) == 0 else s['parameters']['seed'], inputs=[paras[4], info], outputs=paras[4])
others[3].click(lambda i: i, inputs=image, outputs=paras[21])
others[4].click(lambda i: gr.ImageEditor(value=i), inputs=image, outputs=paras[25])
stop_btn.click(lambda: (gr.Button(visible=True), gr.Button(visible=False)), inputs=None, outputs=[gen_btn, stop_btn], cancels=[gen])
return image, paras, others, send_dtool
def util_ui():
with gr.Blocks():
with gr.Row(equal_height=False):
with gr.Column(variant='panel'):
image = gr.Image(label="上传图片", image_mode="RGBA", sources=["upload"], interactive=True, type="pil")
with gr.Column(variant='panel'):
info = gr.HTML('')
items = gr.JSON(value={}, visible=False)
png2main = gr.Button('参数发送到文生图', visible=False)
items.change(lambda i: gr.Button(visible=True) if len(i) else gr.Button(visible=False), inputs=items, outputs=png2main)
return png2main, items, info, image
def load_javascript():
head = ''
for f in sorted(os.listdir('./tagcomplete/javascript')):
head += f'\n'
share = gr.routes.templates.TemplateResponse
def template_response(*args, **kwargs):
res = share(*args, **kwargs)
res.body = res.body.replace(b'