Spaces:
Paused
Paused
Commit ·
e774b98
0
Parent(s):
Duplicate from pikto/prodia
Browse filesCo-authored-by: pikto kenn <pikto@users.noreply.huggingface.co>
- .gitattributes +35 -0
- README.md +14 -0
- app.py +304 -0
- back-app.py +104 -0
- config.json +17 -0
- cutter.py +98 -0
- flipper.py +31 -0
- play.py +82 -0
- pob +63 -0
- requirements.txt +4 -0
- theme_dropdown.py +57 -0
- themes/theme_schema@0.0.1.json +1 -0
- transform.py +13 -0
- utils.py +6 -0
.gitattributes
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Prodia
|
| 3 |
+
emoji: 🔥
|
| 4 |
+
colorFrom: pink
|
| 5 |
+
colorTo: blue
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 3.39.0
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
license: apache-2.0
|
| 11 |
+
duplicated_from: pikto/prodia
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
|
@@ -0,0 +1,304 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import gradio as gr
|
| 3 |
+
import ast
|
| 4 |
+
import requests
|
| 5 |
+
|
| 6 |
+
import logging
|
| 7 |
+
from rembg import new_session
|
| 8 |
+
from cutter import remove, make_label
|
| 9 |
+
from utils import *
|
| 10 |
+
|
| 11 |
+
API_URL_INITIAL = "https://ysharma-playground-ai-exploration.hf.space/run/initial_dataframe"
|
| 12 |
+
API_URL_NEXT10 = "https://ysharma-playground-ai-exploration.hf.space/run/next_10_rows"
|
| 13 |
+
|
| 14 |
+
from theme_dropdown import create_theme_dropdown # noqa: F401
|
| 15 |
+
|
| 16 |
+
dropdown, js = create_theme_dropdown()
|
| 17 |
+
|
| 18 |
+
models = [
|
| 19 |
+
{"name": "Stable Diffusion 2", "url": "stabilityai/stable-diffusion-2-1"},
|
| 20 |
+
{"name": "stability AI", "url": "stabilityai/stable-diffusion-2-1-base"},
|
| 21 |
+
{"name": "Compressed-S-D", "url": "nota-ai/bk-sdm-small"},
|
| 22 |
+
{"name": "Future Diffusion", "url": "nitrosocke/Future-Diffusion"},
|
| 23 |
+
{"name": "JWST Deep Space Diffusion", "url": "dallinmackay/JWST-Deep-Space-diffusion"},
|
| 24 |
+
{"name": "Robo Diffusion 3 Base", "url": "nousr/robo-diffusion-2-base"},
|
| 25 |
+
{"name": "Robo Diffusion", "url": "nousr/robo-diffusion"},
|
| 26 |
+
{"name": "Tron Legacy Diffusion", "url": "dallinmackay/Tron-Legacy-diffusion"},
|
| 27 |
+
]
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
#### REM-BG
|
| 31 |
+
|
| 32 |
+
remove_bg_models = {
|
| 33 |
+
"TracerUniversalB7": "TracerUniversalB7",
|
| 34 |
+
"U2NET": "u2net",
|
| 35 |
+
"U2NET Human Seg": "u2net_human_seg",
|
| 36 |
+
"U2NET Cloth Seg": "u2net_cloth_seg"
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
model_choices = keys(remove_bg_models)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def predict(image, session, smoot, matting, bg_color):
|
| 43 |
+
|
| 44 |
+
session = new_session(remove_bg_models[session])
|
| 45 |
+
|
| 46 |
+
try:
|
| 47 |
+
return remove(session, image, smoot, matting, bg_color)
|
| 48 |
+
except ValueError as err:
|
| 49 |
+
logging.error(err)
|
| 50 |
+
return make_label(str(err)), None
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def change_show_mask(chk_state):
|
| 54 |
+
return gr.Image.update(visible=chk_state)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def change_include_matting(chk_state):
|
| 58 |
+
return gr.Box.update(visible=chk_state), (0, 0, 0), 0, 0, 0
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def change_foreground_threshold(fg_value, value):
|
| 62 |
+
fg, bg, erode = value
|
| 63 |
+
return fg_value, bg, erode
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def change_background_threshold(bg_value, value):
|
| 67 |
+
fg, bg, erode = value
|
| 68 |
+
return fg, bg_value, erode
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def change_erode_size(erode_value, value):
|
| 72 |
+
fg, bg, erode = value
|
| 73 |
+
return fg, bg, erode_value
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def set_dominant_color(chk_state):
|
| 77 |
+
return chk_state, gr.ColorPicker.update(value=False, visible=not chk_state)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def change_picker_color(picker, dominant):
|
| 81 |
+
if not dominant:
|
| 82 |
+
return picker
|
| 83 |
+
return dominant
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def change_background_mode(chk_state):
|
| 87 |
+
return gr.ColorPicker.update(value=False, visible=chk_state), \
|
| 88 |
+
gr.Checkbox.update(value=False, visible=chk_state)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
###########
|
| 93 |
+
|
| 94 |
+
text_gen = gr.Interface.load("spaces/daspartho/prompt-extend")
|
| 95 |
+
|
| 96 |
+
current_model = models[0]
|
| 97 |
+
|
| 98 |
+
models2 = []
|
| 99 |
+
for model in models:
|
| 100 |
+
model_url = f"models/{model['url']}"
|
| 101 |
+
loaded_model = gr.Interface.load(model_url, live=True, preprocess=True)
|
| 102 |
+
models2.append(loaded_model)
|
| 103 |
+
|
| 104 |
+
def text_it(inputs, text_gen=text_gen):
|
| 105 |
+
return text_gen(inputs)
|
| 106 |
+
|
| 107 |
+
def flip_text(x):
|
| 108 |
+
return x[::-1]
|
| 109 |
+
|
| 110 |
+
def send_it(inputs, model_choice):
|
| 111 |
+
proc = models2[model_choice]
|
| 112 |
+
return proc(inputs)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def flip_image(x):
|
| 116 |
+
return np.fliplr(x)
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def set_model(current_model_index):
|
| 120 |
+
global current_model
|
| 121 |
+
current_model = models[current_model_index]
|
| 122 |
+
return gr.update(value=f"{current_model['name']}")
|
| 123 |
+
|
| 124 |
+
#define inference function
|
| 125 |
+
#First: Get initial images for the grid display
|
| 126 |
+
def get_initial_images():
|
| 127 |
+
response = requests.post(API_URL_INITIAL, json={
|
| 128 |
+
"data": []
|
| 129 |
+
}).json()
|
| 130 |
+
#data = response["data"][0]['data'][0][0][:-1]
|
| 131 |
+
response_dict = response['data'][0]
|
| 132 |
+
return response_dict #, [resp[0][:-1] for resp in response["data"][0]["data"]]
|
| 133 |
+
|
| 134 |
+
#Second: Process response dictionary to get imges as hyperlinked image tags
|
| 135 |
+
def process_response(response_dict):
|
| 136 |
+
return [resp[0][:-1] for resp in response_dict["data"]]
|
| 137 |
+
|
| 138 |
+
response_dict = get_initial_images()
|
| 139 |
+
initial = process_response(response_dict)
|
| 140 |
+
initial_imgs = '<div style="display: grid; grid-template-columns: repeat(3, 1fr); grid-template-rows: repeat(3, 1fr); grid-gap: 0; background-color: #fff; padding: 20px; box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);">\n' + "\n".join(initial[:-1])
|
| 141 |
+
|
| 142 |
+
#Third: Load more images for the grid
|
| 143 |
+
def get_next10_images(response_dict, row_count):
|
| 144 |
+
row_count = int(row_count)
|
| 145 |
+
#print("(1)",type(response_dict))
|
| 146 |
+
#Convert the string to a dictionary
|
| 147 |
+
if isinstance(response_dict, dict) == False :
|
| 148 |
+
response_dict = ast.literal_eval(response_dict)
|
| 149 |
+
response = requests.post(API_URL_NEXT10, json={
|
| 150 |
+
"data": [response_dict, row_count ] #len(initial)-1
|
| 151 |
+
}).json()
|
| 152 |
+
row_count+=10
|
| 153 |
+
response_dict = response['data'][0]
|
| 154 |
+
#print("(2)",type(response))
|
| 155 |
+
#print("(3)",type(response['data'][0]))
|
| 156 |
+
next_set = [resp[0][:-1] for resp in response_dict["data"]]
|
| 157 |
+
next_set_images = '<div style="display: grid; grid-template-columns: repeat(3, 1fr); grid-template-rows: repeat(3, 1fr); grid-gap: 0; background-color: #fff; padding: 20px; box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); ">\n' + "\n".join(next_set[:-1])
|
| 158 |
+
return response_dict, row_count, next_set_images #response['data'][0]
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
with gr.Blocks(theme='pikto/theme@>=0.0.1,<0.0.3') as pan:
|
| 162 |
+
gr.Markdown("AI CONTENT TOOLS.")
|
| 163 |
+
|
| 164 |
+
with gr.Tab("T-to-I"):
|
| 165 |
+
|
| 166 |
+
##model = ("stabilityai/stable-diffusion-2-1")
|
| 167 |
+
model_name1 = gr.Dropdown(
|
| 168 |
+
label="Choose Model",
|
| 169 |
+
choices=[m["name"] for m in models],
|
| 170 |
+
type="index",
|
| 171 |
+
value=current_model["name"],
|
| 172 |
+
interactive=True,
|
| 173 |
+
)
|
| 174 |
+
input_text = gr.Textbox(label="Prompt idea",)
|
| 175 |
+
|
| 176 |
+
## run = gr.Button("Generate Images")
|
| 177 |
+
with gr.Row():
|
| 178 |
+
see_prompts = gr.Button("Generate Prompts")
|
| 179 |
+
run = gr.Button("Generate Images", variant="primary")
|
| 180 |
+
|
| 181 |
+
with gr.Row():
|
| 182 |
+
magic1 = gr.Textbox(label="Generated Prompt", lines=2)
|
| 183 |
+
output1 = gr.Image(label="")
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
with gr.Row():
|
| 187 |
+
magic2 = gr.Textbox(label="Generated Prompt", lines=2)
|
| 188 |
+
output2 = gr.Image(label="")
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
run.click(send_it, inputs=[magic1, model_name1], outputs=[output1])
|
| 192 |
+
run.click(send_it, inputs=[magic2, model_name1], outputs=[output2])
|
| 193 |
+
see_prompts.click(text_it, inputs=[input_text], outputs=[magic1])
|
| 194 |
+
see_prompts.click(text_it, inputs=[input_text], outputs=[magic2])
|
| 195 |
+
|
| 196 |
+
model_name1.change(set_model, inputs=model_name1, outputs=[output1, output2,])
|
| 197 |
+
|
| 198 |
+
with gr.Tab("AI Library"):
|
| 199 |
+
#Using Gradio Demos as API - This is Hot!
|
| 200 |
+
#get_next10_images(response_dict=response_dict, row_count=9)
|
| 201 |
+
#position: fixed; top: 0; left: 0; width: 100%; background-color: #fff; padding: 20px; box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);
|
| 202 |
+
|
| 203 |
+
#Defining the Blocks layout
|
| 204 |
+
# with gr.Blocks(css = """#img_search img {width: 100%; height: 100%; object-fit: cover;}""") as demo:
|
| 205 |
+
gr.HTML(value="top of page", elem_id="top",visible=False)
|
| 206 |
+
gr.HTML("""<div style="text-align: center; max-width: 700px; margin: 0 auto;">
|
| 207 |
+
<div
|
| 208 |
+
style="
|
| 209 |
+
display: inline-flex;
|
| 210 |
+
align-items: center;
|
| 211 |
+
gap: 0.8rem;
|
| 212 |
+
font-size: 1.75rem;
|
| 213 |
+
"
|
| 214 |
+
>
|
| 215 |
+
<h1 style="font-weight: 900; margin-bottom: 7px; margin-top: 5px;">
|
| 216 |
+
Using Gradio API - 2 </h1><br></div>
|
| 217 |
+
<div><h4 style="font-weight: 500; margin-bottom: 7px; margin-top: 5px;">
|
| 218 |
+
Stream <a href="https://github.com/playgroundai/liked_images" target="_blank">PlaygroundAI Images</a> ina beautiful grid</h4><br>
|
| 219 |
+
</div>""")
|
| 220 |
+
with gr.Tab("AI Library"):
|
| 221 |
+
#with gr.Tab(): #(elem_id = "col-container"):
|
| 222 |
+
#gr.Column(): #(elem_id = "col-container"):
|
| 223 |
+
b1 = gr.Button("Load More Images").style(full_width=False)
|
| 224 |
+
df = gr.Textbox(visible=False,elem_id='dataframe', value=response_dict)
|
| 225 |
+
row_count = gr.Number(visible=False, value=19 )
|
| 226 |
+
img_search = gr.HTML(label = 'Images from PlaygroundAI dataset', elem_id="img_search",
|
| 227 |
+
value=initial_imgs ) #initial[:-1] )
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
b1.click(get_next10_images, [df, row_count], [df, row_count, img_search], api_name = "load_playgroundai_images" )
|
| 231 |
+
|
| 232 |
+
########################## REM-BG
|
| 233 |
+
with gr.Tab("Rem_BG"):
|
| 234 |
+
|
| 235 |
+
color_state = gr.State(value=False)
|
| 236 |
+
matting_state = gr.State(value=(0, 0, 0))
|
| 237 |
+
gr.HTML("<center><h1>Remove Background Tool</h1></center>")
|
| 238 |
+
|
| 239 |
+
with gr.Row(equal_height=False):
|
| 240 |
+
with gr.Column():
|
| 241 |
+
input_img = gr.Image(type="pil", label="Input image")
|
| 242 |
+
drp_models = gr.Dropdown(choices=model_choices, label="Model Segment", value="TracerUniversalB7")
|
| 243 |
+
|
| 244 |
+
with gr.Row():
|
| 245 |
+
chk_include_matting = gr.Checkbox(label="Matting", value=False)
|
| 246 |
+
chk_smoot_mask = gr.Checkbox(label="Smoot Mask", value=False)
|
| 247 |
+
chk_show_mask = gr.Checkbox(label="Show Mask", value=False)
|
| 248 |
+
with gr.Box(visible=False) as slider_matting:
|
| 249 |
+
slr_fg_threshold = gr.Slider(0, 300, value=270, step=1, label="Alpha matting foreground threshold")
|
| 250 |
+
slr_bg_threshold = gr.Slider(0, 50, value=20, step=1, label="Alpha matting background threshold")
|
| 251 |
+
slr_erode_size = gr.Slider(0, 20, value=11, step=1, label="Alpha matting erode size")
|
| 252 |
+
with gr.Box():
|
| 253 |
+
with gr.Row():
|
| 254 |
+
chk_change_color = gr.Checkbox(label="Change background color", value=False)
|
| 255 |
+
pkr_color = gr.ColorPicker(label="Pick a new color", visible=False)
|
| 256 |
+
chk_dominant = gr.Checkbox(label="Use dominant color", value=False, visible=False)
|
| 257 |
+
|
| 258 |
+
#######################
|
| 259 |
+
############################
|
| 260 |
+
#############################
|
| 261 |
+
run_btn = gr.Button(value="Remove background", variant="primary")
|
| 262 |
+
|
| 263 |
+
with gr.Column():
|
| 264 |
+
output_img = gr.Image(type="pil", label="Image Result")
|
| 265 |
+
mask_img = gr.Image(type="pil", label="Image Mask", visible=False)
|
| 266 |
+
gr.ClearButton(components=[input_img, output_img, mask_img])
|
| 267 |
+
|
| 268 |
+
chk_include_matting.change(change_include_matting, inputs=[chk_include_matting],
|
| 269 |
+
outputs=[slider_matting, matting_state,
|
| 270 |
+
slr_fg_threshold, slr_bg_threshold, slr_erode_size])
|
| 271 |
+
|
| 272 |
+
slr_bg_threshold.change(change_background_threshold, inputs=[slr_bg_threshold, matting_state],
|
| 273 |
+
outputs=[matting_state])
|
| 274 |
+
|
| 275 |
+
slr_fg_threshold.change(change_foreground_threshold, inputs=[slr_fg_threshold, matting_state],
|
| 276 |
+
outputs=[matting_state])
|
| 277 |
+
|
| 278 |
+
slr_erode_size.change(change_erode_size, inputs=[slr_erode_size, matting_state],
|
| 279 |
+
outputs=[matting_state])
|
| 280 |
+
|
| 281 |
+
chk_show_mask.change(change_show_mask, inputs=[chk_show_mask], outputs=[mask_img])
|
| 282 |
+
|
| 283 |
+
chk_change_color.change(change_background_mode, inputs=[chk_change_color],
|
| 284 |
+
outputs=[pkr_color, chk_dominant])
|
| 285 |
+
|
| 286 |
+
pkr_color.change(change_picker_color, inputs=[pkr_color, chk_dominant], outputs=[color_state])
|
| 287 |
+
|
| 288 |
+
chk_dominant.change(set_dominant_color, inputs=[chk_dominant], outputs=[color_state, pkr_color])
|
| 289 |
+
|
| 290 |
+
run_btn.click(predict, inputs=[input_img, drp_models, chk_smoot_mask, matting_state, color_state],
|
| 291 |
+
outputs=[output_img, mask_img])
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
# text_input = gr.Textbox() ## Diffuser
|
| 296 |
+
# image_output = gr.Image()
|
| 297 |
+
# image_button = gr.Button("Flip")
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
# text_button.click(flip_text, inputs=text_input, outputs=text_output)
|
| 302 |
+
# image_button.click(flip_image, inputs=image_input, outputs=image_output)
|
| 303 |
+
pan.queue(concurrency_count=200)
|
| 304 |
+
pan.launch(inline=True, show_api=True, max_threads=400)
|
back-app.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import gradio as gr
|
| 3 |
+
import ast
|
| 4 |
+
import requests
|
| 5 |
+
|
| 6 |
+
from theme_dropdown import create_theme_dropdown # noqa: F401
|
| 7 |
+
|
| 8 |
+
dropdown, js = create_theme_dropdown()
|
| 9 |
+
|
| 10 |
+
models = [
|
| 11 |
+
{"name": "Stable Diffusion 2", "url": "stabilityai/stable-diffusion-2-1"},
|
| 12 |
+
{"name": "stability AI", "url": "stabilityai/stable-diffusion-2-1-base"},
|
| 13 |
+
{"name": "Compressed-S-D", "url": "nota-ai/bk-sdm-small"},
|
| 14 |
+
{"name": "Future Diffusion", "url": "nitrosocke/Future-Diffusion"},
|
| 15 |
+
{"name": "JWST Deep Space Diffusion", "url": "dallinmackay/JWST-Deep-Space-diffusion"},
|
| 16 |
+
{"name": "Robo Diffusion 3 Base", "url": "nousr/robo-diffusion-2-base"},
|
| 17 |
+
{"name": "Robo Diffusion", "url": "nousr/robo-diffusion"},
|
| 18 |
+
{"name": "Tron Legacy Diffusion", "url": "dallinmackay/Tron-Legacy-diffusion"},
|
| 19 |
+
]
|
| 20 |
+
|
| 21 |
+
text_gen = gr.Interface.load("spaces/daspartho/prompt-extend")
|
| 22 |
+
|
| 23 |
+
current_model = models[0]
|
| 24 |
+
|
| 25 |
+
models2 = []
|
| 26 |
+
for model in models:
|
| 27 |
+
model_url = f"models/{model['url']}"
|
| 28 |
+
loaded_model = gr.Interface.load(model_url, live=True, preprocess=True)
|
| 29 |
+
models2.append(loaded_model)
|
| 30 |
+
|
| 31 |
+
def text_it(inputs, text_gen=text_gen):
|
| 32 |
+
return text_gen(inputs)
|
| 33 |
+
|
| 34 |
+
def flip_text(x):
|
| 35 |
+
return x[::-1]
|
| 36 |
+
|
| 37 |
+
def send_it(inputs, model_choice):
|
| 38 |
+
proc = models2[model_choice]
|
| 39 |
+
return proc(inputs)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def flip_image(x):
|
| 43 |
+
return np.fliplr(x)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def set_model(current_model_index):
|
| 47 |
+
global current_model
|
| 48 |
+
current_model = models[current_model_index]
|
| 49 |
+
return gr.update(value=f"{current_model['name']}")
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
with gr.Blocks(theme='pikto/theme@>=0.0.1,<0.0.3') as pan:
|
| 53 |
+
gr.Markdown("AI CONTENT TOOLS.")
|
| 54 |
+
|
| 55 |
+
with gr.Tab("T-to-I"):
|
| 56 |
+
|
| 57 |
+
##model = ("stabilityai/stable-diffusion-2-1")
|
| 58 |
+
model_name1 = gr.Dropdown(
|
| 59 |
+
label="Choose Model",
|
| 60 |
+
choices=[m["name"] for m in models],
|
| 61 |
+
type="index",
|
| 62 |
+
value=current_model["name"],
|
| 63 |
+
interactive=True,
|
| 64 |
+
)
|
| 65 |
+
input_text = gr.Textbox(label="Prompt idea",)
|
| 66 |
+
|
| 67 |
+
## run = gr.Button("Generate Images")
|
| 68 |
+
with gr.Row():
|
| 69 |
+
see_prompts = gr.Button("Generate Prompts")
|
| 70 |
+
run = gr.Button("Generate Images", variant="primary")
|
| 71 |
+
|
| 72 |
+
with gr.Row():
|
| 73 |
+
magic1 = gr.Textbox(label="Generated Prompt", lines=2)
|
| 74 |
+
output1 = gr.Image(label="")
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
with gr.Row():
|
| 78 |
+
magic2 = gr.Textbox(label="Generated Prompt", lines=2)
|
| 79 |
+
output2 = gr.Image(label="")
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
run.click(send_it, inputs=[magic1, model_name1], outputs=[output1])
|
| 83 |
+
run.click(send_it, inputs=[magic2, model_name1], outputs=[output2])
|
| 84 |
+
see_prompts.click(text_it, inputs=[input_text], outputs=[magic1])
|
| 85 |
+
see_prompts.click(text_it, inputs=[input_text], outputs=[magic2])
|
| 86 |
+
|
| 87 |
+
model_name1.change(set_model, inputs=model_name1, outputs=[output1, output2,])
|
| 88 |
+
|
| 89 |
+
#with gr.Tab("Flip Image"):
|
| 90 |
+
#Using Gradio Demos as API - This is Hot!
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
with gr.Tab("Diffuser"):
|
| 94 |
+
with gr.Row():
|
| 95 |
+
text_input = gr.Textbox() ## Diffuser
|
| 96 |
+
image_output = gr.Image()
|
| 97 |
+
image_button = gr.Button("Flip")
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
# text_button.click(flip_text, inputs=text_input, outputs=text_output)
|
| 102 |
+
# image_button.click(flip_image, inputs=image_input, outputs=image_output)
|
| 103 |
+
pan.queue(concurrency_count=200)
|
| 104 |
+
pan.launch(inline=True, show_api=True, max_threads=400)
|
config.json
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
let job = await createJob({
|
| 2 |
+
prompt: "puppies in a cloud, 4k",
|
| 3 |
+
});
|
| 4 |
+
|
| 5 |
+
console.log("Job Created! Waiting...");
|
| 6 |
+
|
| 7 |
+
while (job.status !== "succeeded" && job.status !== "failed") {
|
| 8 |
+
await new Promise((resolve) => setTimeout(resolve, 250));
|
| 9 |
+
|
| 10 |
+
job = await getJob(job.job);
|
| 11 |
+
}
|
| 12 |
+
|
| 13 |
+
if(job.status !== "succeeded") {
|
| 14 |
+
throw new Error("Job failed!");
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
console.log("Generation completed!", job.imageUrl);
|
cutter.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import PIL
|
| 2 |
+
import numpy as np
|
| 3 |
+
from PIL import Image, ImageColor, ImageDraw
|
| 4 |
+
from PIL.Image import Image as PILImage
|
| 5 |
+
from pymatting.alpha.estimate_alpha_cf import estimate_alpha_cf
|
| 6 |
+
from pymatting.foreground.estimate_foreground_ml import estimate_foreground_ml
|
| 7 |
+
from pymatting.util.util import stack_images
|
| 8 |
+
from rembg.bg import post_process, naive_cutout, apply_background_color
|
| 9 |
+
from scipy.ndimage import binary_erosion
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def alpha_matting_cutout(img: PILImage, trimap: np.ndarray) -> PILImage:
|
| 13 |
+
if img.mode == "RGBA" or img.mode == "CMYK":
|
| 14 |
+
img = img.convert("RGB")
|
| 15 |
+
|
| 16 |
+
img = np.asarray(img)
|
| 17 |
+
|
| 18 |
+
img_normalized = img / 255.0
|
| 19 |
+
trimap_normalized = trimap / 255.0
|
| 20 |
+
|
| 21 |
+
alpha = estimate_alpha_cf(img_normalized, trimap_normalized)
|
| 22 |
+
foreground = estimate_foreground_ml(img_normalized, alpha)
|
| 23 |
+
cutout = stack_images(foreground, alpha)
|
| 24 |
+
|
| 25 |
+
cutout = np.clip(cutout * 255, 0, 255).astype(np.uint8)
|
| 26 |
+
return Image.fromarray(cutout)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def generate_trimap(
|
| 30 |
+
mask: PILImage,
|
| 31 |
+
foreground_threshold: int,
|
| 32 |
+
background_threshold: int,
|
| 33 |
+
erode_structure_size: int,
|
| 34 |
+
) -> np.ndarray:
|
| 35 |
+
mask = np.asarray(mask)
|
| 36 |
+
|
| 37 |
+
is_foreground = mask > foreground_threshold
|
| 38 |
+
is_background = mask < background_threshold
|
| 39 |
+
|
| 40 |
+
structure = None
|
| 41 |
+
if erode_structure_size > 0:
|
| 42 |
+
structure = np.ones(
|
| 43 |
+
(erode_structure_size, erode_structure_size), dtype=np.uint8
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
is_foreground = binary_erosion(is_foreground, structure=structure)
|
| 47 |
+
is_background = binary_erosion(is_background, structure=structure, border_value=1)
|
| 48 |
+
|
| 49 |
+
trimap = np.full(mask.shape, dtype=np.uint8, fill_value=128)
|
| 50 |
+
trimap[is_foreground] = 255
|
| 51 |
+
trimap[is_background] = 0
|
| 52 |
+
|
| 53 |
+
return trimap
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def get_background_dominant_color(img: PILImage, mask: PILImage) -> tuple:
|
| 57 |
+
negative_img = img.copy()
|
| 58 |
+
negative_mask = PIL.ImageOps.invert(mask)
|
| 59 |
+
negative_img.putalpha(negative_mask)
|
| 60 |
+
negative_img = negative_img.resize((1, 1))
|
| 61 |
+
r, g, b, a = negative_img.getpixel((0, 0))
|
| 62 |
+
return r, g, b, 255
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def remove(session, img: PILImage, smoot: bool, matting: tuple, color) -> (PILImage, PILImage):
|
| 66 |
+
mask = session.predict(img)[0]
|
| 67 |
+
|
| 68 |
+
if smoot:
|
| 69 |
+
mask = PIL.Image.fromarray(post_process(np.array(mask)))
|
| 70 |
+
|
| 71 |
+
fg_t, bg_t, erode = matting
|
| 72 |
+
|
| 73 |
+
if fg_t > 0 or bg_t > 0 or erode > 0:
|
| 74 |
+
mask = generate_trimap(mask, *matting)
|
| 75 |
+
try:
|
| 76 |
+
cutout = alpha_matting_cutout(img, mask)
|
| 77 |
+
mask = PIL.Image.fromarray(mask)
|
| 78 |
+
except ValueError as err:
|
| 79 |
+
raise err
|
| 80 |
+
else:
|
| 81 |
+
cutout = naive_cutout(img, mask)
|
| 82 |
+
|
| 83 |
+
if color is True:
|
| 84 |
+
color = get_background_dominant_color(img, mask)
|
| 85 |
+
cutout = apply_background_color(cutout, color)
|
| 86 |
+
elif isinstance(color, str):
|
| 87 |
+
r, g, b = ImageColor.getcolor(color, "RGB")
|
| 88 |
+
cutout = apply_background_color(cutout, (r, g, b, 255))
|
| 89 |
+
|
| 90 |
+
return cutout, mask
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def make_label(text, width=600, height=200, color="black") -> PILImage:
|
| 94 |
+
image = Image.new("RGB", (width, height), color)
|
| 95 |
+
draw = ImageDraw.Draw(image)
|
| 96 |
+
text_width, text_height = draw.textsize(text)
|
| 97 |
+
draw.text(((width-text_width)/2, height/2), text)
|
| 98 |
+
return image
|
flipper.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import gradio as gr
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def flip_text(x):
|
| 6 |
+
return x[::-1]
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def flip_image(x):
|
| 10 |
+
return np.fliplr(x)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
with gr.Blocks() as demo:
|
| 14 |
+
gr.Markdown("Flip text or image files using this demo.")
|
| 15 |
+
with gr.Tab("Flip Text"):
|
| 16 |
+
text_input = gr.Textbox()
|
| 17 |
+
text_output = gr.Textbox()
|
| 18 |
+
text_button = gr.Button("Flip")
|
| 19 |
+
with gr.Tab("Flip Image"):
|
| 20 |
+
with gr.Row():
|
| 21 |
+
image_input = gr.Image()
|
| 22 |
+
image_output = gr.Image()
|
| 23 |
+
image_button = gr.Button("Flip")
|
| 24 |
+
|
| 25 |
+
with gr.Accordion("Open for More!"):
|
| 26 |
+
gr.Markdown("Look at me...")
|
| 27 |
+
|
| 28 |
+
text_button.click(flip_text, inputs=text_input, outputs=text_output)
|
| 29 |
+
image_button.click(flip_image, inputs=image_input, outputs=image_output)
|
| 30 |
+
|
| 31 |
+
demo.launch()
|
play.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ast
|
| 2 |
+
import requests
|
| 3 |
+
|
| 4 |
+
#Using Gradio Demos as API - This is Hot!
|
| 5 |
+
API_URL_INITIAL = "https://ysharma-playground-ai-exploration.hf.space/run/initial_dataframe"
|
| 6 |
+
API_URL_NEXT10 = "https://ysharma-playground-ai-exploration.hf.space/run/next_10_rows"
|
| 7 |
+
|
| 8 |
+
#define inference function
|
| 9 |
+
#First: Get initial images for the grid display
|
| 10 |
+
def get_initial_images():
|
| 11 |
+
response = requests.post(API_URL_INITIAL, json={
|
| 12 |
+
"data": []
|
| 13 |
+
}).json()
|
| 14 |
+
#data = response["data"][0]['data'][0][0][:-1]
|
| 15 |
+
response_dict = response['data'][0]
|
| 16 |
+
return response_dict #, [resp[0][:-1] for resp in response["data"][0]["data"]]
|
| 17 |
+
|
| 18 |
+
#Second: Process response dictionary to get imges as hyperlinked image tags
|
| 19 |
+
def process_response(response_dict):
|
| 20 |
+
return [resp[0][:-1] for resp in response_dict["data"]]
|
| 21 |
+
|
| 22 |
+
response_dict = get_initial_images()
|
| 23 |
+
initial = process_response(response_dict)
|
| 24 |
+
initial_imgs = '<div style="display: grid; grid-template-columns: repeat(3, 1fr); grid-template-rows: repeat(3, 1fr); grid-gap: 0; background-color: #fff; padding: 20px; box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);">\n' + "\n".join(initial[:-1])
|
| 25 |
+
|
| 26 |
+
#Third: Load more images for the grid
|
| 27 |
+
def get_next10_images(response_dict, row_count):
|
| 28 |
+
row_count = int(row_count)
|
| 29 |
+
#print("(1)",type(response_dict))
|
| 30 |
+
#Convert the string to a dictionary
|
| 31 |
+
if isinstance(response_dict, dict) == False :
|
| 32 |
+
response_dict = ast.literal_eval(response_dict)
|
| 33 |
+
response = requests.post(API_URL_NEXT10, json={
|
| 34 |
+
"data": [response_dict, row_count ] #len(initial)-1
|
| 35 |
+
}).json()
|
| 36 |
+
row_count+=10
|
| 37 |
+
response_dict = response['data'][0]
|
| 38 |
+
#print("(2)",type(response))
|
| 39 |
+
#print("(3)",type(response['data'][0]))
|
| 40 |
+
next_set = [resp[0][:-1] for resp in response_dict["data"]]
|
| 41 |
+
next_set_images = '<div style="display: grid; grid-template-columns: repeat(3, 1fr); grid-template-rows: repeat(3, 1fr); grid-gap: 0; background-color: #fff; padding: 20px; box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); ">\n' + "\n".join(next_set[:-1])
|
| 42 |
+
return response_dict, row_count, next_set_images #response['data'][0]
|
| 43 |
+
|
| 44 |
+
#get_next10_images(response_dict=response_dict, row_count=9)
|
| 45 |
+
#position: fixed; top: 0; left: 0; width: 100%; background-color: #fff; padding: 20px; box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);
|
| 46 |
+
|
| 47 |
+
#Defining the Blocks layout
|
| 48 |
+
with gr.Blocks(css = """#img_search img {width: 100%; height: 100%; object-fit: cover;}""") as demo:
|
| 49 |
+
gr.HTML(value="top of page", elem_id="top",visible=False)
|
| 50 |
+
gr.HTML("""<div style="text-align: center; max-width: 700px; margin: 0 auto;">
|
| 51 |
+
<div
|
| 52 |
+
style="
|
| 53 |
+
display: inline-flex;
|
| 54 |
+
align-items: center;
|
| 55 |
+
gap: 0.8rem;
|
| 56 |
+
font-size: 1.75rem;
|
| 57 |
+
"
|
| 58 |
+
>
|
| 59 |
+
<h1 style="font-weight: 900; margin-bottom: 7px; margin-top: 5px;">
|
| 60 |
+
Using Gradio Demos as API - 2 </h1><br></div>
|
| 61 |
+
<div><h4 style="font-weight: 500; margin-bottom: 7px; margin-top: 5px;">
|
| 62 |
+
Stream <a href="https://github.com/playgroundai/liked_images" target="_blank">PlaygroundAI Images</a> ina beautiful grid</h4><br>
|
| 63 |
+
</div>""")
|
| 64 |
+
with gr.Accordion(label="Details about the working:", open=False, elem_id='accordion'):
|
| 65 |
+
gr.HTML("""
|
| 66 |
+
<p style="margin-bottom: 10px; font-size: 90%"><br>
|
| 67 |
+
▶️Do you see the "view api" link located in the footer of this application?
|
| 68 |
+
By clicking on this link, a page will open which provides documentation on the REST API that developers can use to query the Interface function / Block events.<br>
|
| 69 |
+
▶️In this demo, I am making such an API request to the <a href="https://huggingface.co/spaces/ysharma/Playground_AI_Exploration" target="_blank">Playground_AI_Exploration</a> Space.<br>
|
| 70 |
+
▶️I am exposing an API endpoint of this Gradio app as well. This can easily be done by one line of code, just set the api_name parameter of the event listener.
|
| 71 |
+
</p></div>""")
|
| 72 |
+
|
| 73 |
+
with gr.Column(): #(elem_id = "col-container"):
|
| 74 |
+
b1 = gr.Button("Load More Images").style(full_width=False)
|
| 75 |
+
df = gr.Textbox(visible=False,elem_id='dataframe', value=response_dict)
|
| 76 |
+
row_count = gr.Number(visible=False, value=19 )
|
| 77 |
+
img_search = gr.HTML(label = 'Images from PlaygroundAI dataset', elem_id="img_search",
|
| 78 |
+
value=initial_imgs ) #initial[:-1] )
|
| 79 |
+
|
| 80 |
+
gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/Stream_PlaygroundAI_Images?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a></center>
|
| 81 |
+
</p></div>''')
|
| 82 |
+
b1.click(get_next10_images, [df, row_count], [df, row_count, img_search], api_name = "load_playgroundai_images" )
|
pob
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import requests
|
| 3 |
+
import json
|
| 4 |
+
url = "https://api.prodia.com/v1/job"
|
| 5 |
+
|
| 6 |
+
headers = {
|
| 7 |
+
"accept": "application/json",
|
| 8 |
+
"content-type": "application/json",
|
| 9 |
+
"X-Prodia-Key": "69e66898-010d-4cd1-9e22-090f73ad007b"
|
| 10 |
+
}
|
| 11 |
+
models = [
|
| 12 |
+
{"name": "Timeless", "url": "timeless-1.0.ckpt 1.0.ckpt [7c4971d4]"},
|
| 13 |
+
{"name": "Dreamlike-diffusion-2.0.", "url": "dreamlike-diffusion-2.0.safetensors [fdcf65e7]"},
|
| 14 |
+
{"name": "Deliberate_v2", "url": "deliberate_v2.safetensors [10ec4b29]"},
|
| 15 |
+
{"name": "Anything-v4.5-pruned", "url": "anything-v4.5-pruned.ckpt [65745d25]"},
|
| 16 |
+
]
|
| 17 |
+
|
| 18 |
+
current_model = models[0]
|
| 19 |
+
|
| 20 |
+
models2 = []
|
| 21 |
+
for model in models:
|
| 22 |
+
model_url = f"models/{model['url']}"
|
| 23 |
+
loaded_model = gr.Interface.load(model_url, live=True, preprocess=True)
|
| 24 |
+
models2.append(loaded_model)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def text_it(inputs, text_gen=text_gen):
|
| 28 |
+
return text_gen(inputs)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def set_model(current_model_index):
|
| 32 |
+
global current_model
|
| 33 |
+
current_model = models[current_model_index]
|
| 34 |
+
return gr.update(value=f"{current_model['name']}")
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def send_it(inputs, model_choice):
|
| 38 |
+
proc = models2[model_choice]
|
| 39 |
+
return proc(inputs)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
with gr.Blocks() as myface:
|
| 43 |
+
gr.HTML(
|
| 44 |
+
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
with gr.Row():
|
| 48 |
+
with gr.Row():
|
| 49 |
+
input_text = gr.Textbox(label="Iput Prompt", placeholder="", lines=1)
|
| 50 |
+
# Model selection dropdown
|
| 51 |
+
model_name1 = gr.Dropdown(
|
| 52 |
+
label="Choose Model",
|
| 53 |
+
choices=[m["name"] for m in models],
|
| 54 |
+
type="index",
|
| 55 |
+
value=current_model["name"],
|
| 56 |
+
interactive=True,
|
| 57 |
+
)
|
| 58 |
+
with gr.Row():
|
| 59 |
+
see_prompts = gr.Button("Generate Prompts")
|
| 60 |
+
run = gr.Button("Generate Images", variant="primary")
|
| 61 |
+
|
| 62 |
+
if __name__ == "__main__":
|
| 63 |
+
demo.launch()
|
requirements.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
rembg~=2.0.47
|
| 2 |
+
pillow~=9.5.0
|
| 3 |
+
pymatting
|
| 4 |
+
opencv-python-headless
|
theme_dropdown.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import pathlib
|
| 3 |
+
|
| 4 |
+
from gradio.themes.utils import ThemeAsset
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def create_theme_dropdown():
|
| 8 |
+
import gradio as gr
|
| 9 |
+
|
| 10 |
+
asset_path = pathlib.Path(__file__).parent / "themes"
|
| 11 |
+
themes = []
|
| 12 |
+
for theme_asset in os.listdir(str(asset_path)):
|
| 13 |
+
themes.append(
|
| 14 |
+
(ThemeAsset(theme_asset), gr.Theme.load(str(asset_path / theme_asset)))
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
def make_else_if(theme_asset):
|
| 18 |
+
return f"""
|
| 19 |
+
else if (theme == '{str(theme_asset[0].version)}') {{
|
| 20 |
+
var theme_css = `{theme_asset[1]._get_theme_css()}`
|
| 21 |
+
}}"""
|
| 22 |
+
|
| 23 |
+
head, tail = themes[0], themes[1:]
|
| 24 |
+
if_statement = f"""
|
| 25 |
+
if (theme == "{str(head[0].version)}") {{
|
| 26 |
+
var theme_css = `{head[1]._get_theme_css()}`
|
| 27 |
+
}} {" ".join(make_else_if(t) for t in tail)}
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
latest_to_oldest = sorted([t[0] for t in themes], key=lambda asset: asset.version)[
|
| 31 |
+
::-1
|
| 32 |
+
]
|
| 33 |
+
latest_to_oldest = [str(t.version) for t in latest_to_oldest]
|
| 34 |
+
|
| 35 |
+
component = gr.Dropdown(
|
| 36 |
+
choices=latest_to_oldest,
|
| 37 |
+
value=latest_to_oldest[0],
|
| 38 |
+
render=False,
|
| 39 |
+
label="Select Version",
|
| 40 |
+
).style(container=False)
|
| 41 |
+
|
| 42 |
+
return (
|
| 43 |
+
component,
|
| 44 |
+
f"""
|
| 45 |
+
(theme) => {{
|
| 46 |
+
if (!document.querySelector('.theme-css')) {{
|
| 47 |
+
var theme_elem = document.createElement('style');
|
| 48 |
+
theme_elem.classList.add('theme-css');
|
| 49 |
+
document.head.appendChild(theme_elem);
|
| 50 |
+
}} else {{
|
| 51 |
+
var theme_elem = document.querySelector('.theme-css');
|
| 52 |
+
}}
|
| 53 |
+
{if_statement}
|
| 54 |
+
theme_elem.innerHTML = theme_css;
|
| 55 |
+
}}
|
| 56 |
+
""",
|
| 57 |
+
)
|
themes/theme_schema@0.0.1.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"theme": {"_font": [{"__gradio_font__": true, "name": "Poppins", "class": "google"}, {"__gradio_font__": true, "name": "Source Sans Pro", "class": "google"}, {"__gradio_font__": true, "name": "system-ui", "class": "font"}, {"__gradio_font__": true, "name": "sans-system-ui", "class": "font"}], "_font_mono": [{"__gradio_font__": true, "name": "DM Mono", "class": "google"}, {"__gradio_font__": true, "name": "ui-monospace", "class": "font"}, {"__gradio_font__": true, "name": "Consolas", "class": "font"}, {"__gradio_font__": true, "name": "monospace", "class": "font"}], "_stylesheets": ["https://fonts.googleapis.com/css2?family=Poppins:wght@400;600&display=swap", "https://fonts.googleapis.com/css2?family=Source+Sans+Pro:wght@400;600&display=swap", "https://fonts.googleapis.com/css2?family=DM+Mono:wght@400;600&display=swap"], "background_fill_primary": "white", "background_fill_primary_dark": "*neutral_950", "background_fill_secondary": "*neutral_50", "background_fill_secondary_dark": "*neutral_900", "block_background_fill": "*background_fill_primary", "block_background_fill_dark": "*neutral_800", "block_border_color": "*border_color_primary", "block_border_color_dark": "*border_color_primary", "block_border_width": "1px", "block_info_text_color": "*body_text_color_subdued", "block_info_text_color_dark": "*body_text_color_subdued", "block_info_text_size": "*text_sm", "block_info_text_weight": "400", "block_label_background_fill": "*background_fill_primary", "block_label_background_fill_dark": "*background_fill_secondary", "block_label_border_color": "*border_color_primary", "block_label_border_color_dark": "*border_color_primary", "block_label_border_width": "1px", "block_label_margin": "0", "block_label_padding": "*spacing_sm *spacing_lg", "block_label_radius": "calc(*radius_lg - 1px) 0 calc(*radius_lg - 1px) 0", "block_label_right_radius": "0 calc(*radius_lg - 1px) 0 calc(*radius_lg - 1px)", "block_label_text_color": "*neutral_500", "block_label_text_color_dark": "*neutral_200", "block_label_text_size": "*text_sm", "block_label_text_weight": "400", "block_padding": "*spacing_xl calc(*spacing_xl + 2px)", "block_radius": "*radius_lg", "block_shadow": "none", "block_title_background_fill": "none", "block_title_border_color": "none", "block_title_border_width": "0px", "block_title_padding": "0", "block_title_radius": "none", "block_title_text_color": "*neutral_500", "block_title_text_color_dark": "*neutral_200", "block_title_text_size": "*text_md", "block_title_text_weight": "400", "body_background_fill": "*background_fill_primary", "body_background_fill_dark": "*background_fill_primary", "body_text_color": "*neutral_800", "body_text_color_dark": "*neutral_100", "body_text_color_subdued": "*neutral_400", "body_text_color_subdued_dark": "*neutral_400", "body_text_size": "*text_md", "body_text_weight": "400", "border_color_accent": "*primary_300", "border_color_accent_dark": "*neutral_600", "border_color_primary": "*neutral_200", "border_color_primary_dark": "*neutral_700", "button_border_width": "*input_border_width", "button_border_width_dark": "*input_border_width", "button_cancel_background_fill": "*button_secondary_background_fill", "button_cancel_background_fill_dark": "*button_secondary_background_fill", "button_cancel_background_fill_hover": "*button_cancel_background_fill", "button_cancel_background_fill_hover_dark": "*button_cancel_background_fill", "button_cancel_border_color": "*button_secondary_border_color", "button_cancel_border_color_dark": "*button_secondary_border_color", "button_cancel_border_color_hover": "*button_cancel_border_color", "button_cancel_border_color_hover_dark": "*button_cancel_border_color", "button_cancel_text_color": "*button_secondary_text_color", "button_cancel_text_color_dark": "*button_secondary_text_color", "button_cancel_text_color_hover": "*button_cancel_text_color", "button_cancel_text_color_hover_dark": "*button_cancel_text_color", "button_large_padding": "*spacing_lg calc(2 * *spacing_lg)", "button_large_radius": "*radius_lg", "button_large_text_size": "*text_lg", "button_large_text_weight": "600", "button_primary_background_fill": "*primary_200", "button_primary_background_fill_dark": "*primary_700", "button_primary_background_fill_hover": "*button_primary_background_fill", "button_primary_background_fill_hover_dark": "*button_primary_background_fill", "button_primary_border_color": "*primary_200", "button_primary_border_color_dark": "*primary_600", "button_primary_border_color_hover": "*button_primary_border_color", "button_primary_border_color_hover_dark": "*button_primary_border_color", "button_primary_text_color": "*primary_600", "button_primary_text_color_dark": "white", "button_primary_text_color_hover": "*button_primary_text_color", "button_primary_text_color_hover_dark": "*button_primary_text_color", "button_secondary_background_fill": "*neutral_200", "button_secondary_background_fill_dark": "*neutral_600", "button_secondary_background_fill_hover": "*button_secondary_background_fill", "button_secondary_background_fill_hover_dark": "*button_secondary_background_fill", "button_secondary_border_color": "*neutral_200", "button_secondary_border_color_dark": "*neutral_600", "button_secondary_border_color_hover": "*button_secondary_border_color", "button_secondary_border_color_hover_dark": "*button_secondary_border_color", "button_secondary_text_color": "*neutral_700", "button_secondary_text_color_dark": "white", "button_secondary_text_color_hover": "*button_secondary_text_color", "button_secondary_text_color_hover_dark": "*button_secondary_text_color", "button_shadow": "none", "button_shadow_active": "none", "button_shadow_hover": "none", "button_small_padding": "*spacing_sm calc(2 * *spacing_sm)", "button_small_radius": "*radius_lg", "button_small_text_size": "*text_md", "button_small_text_weight": "400", "button_transition": "background-color 0.2s ease", "checkbox_background_color": "*background_fill_primary", "checkbox_background_color_dark": "*neutral_800", "checkbox_background_color_focus": "*checkbox_background_color", "checkbox_background_color_focus_dark": "*checkbox_background_color", "checkbox_background_color_hover": "*checkbox_background_color", "checkbox_background_color_hover_dark": "*checkbox_background_color", "checkbox_background_color_selected": "*secondary_600", "checkbox_background_color_selected_dark": "*secondary_600", "checkbox_border_color": "*neutral_300", "checkbox_border_color_dark": "*neutral_700", "checkbox_border_color_focus": "*secondary_500", "checkbox_border_color_focus_dark": "*secondary_500", "checkbox_border_color_hover": "*neutral_300", "checkbox_border_color_hover_dark": "*neutral_600", "checkbox_border_color_selected": "*secondary_600", "checkbox_border_color_selected_dark": "*secondary_600", "checkbox_border_radius": "*radius_sm", "checkbox_border_width": "*input_border_width", "checkbox_border_width_dark": "*input_border_width", "checkbox_check": "url(\"data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M12.207 4.793a1 1 0 010 1.414l-5 5a1 1 0 01-1.414 0l-2-2a1 1 0 011.414-1.414L6.5 9.086l4.293-4.293a1 1 0 011.414 0z'/%3e%3c/svg%3e\")", "checkbox_label_background_fill": "*button_secondary_background_fill", "checkbox_label_background_fill_dark": "*button_secondary_background_fill", "checkbox_label_background_fill_hover": "*button_secondary_background_fill_hover", "checkbox_label_background_fill_hover_dark": "*button_secondary_background_fill_hover", "checkbox_label_background_fill_selected": "*checkbox_label_background_fill", "checkbox_label_background_fill_selected_dark": "*checkbox_label_background_fill", "checkbox_label_border_color": "*border_color_primary", "checkbox_label_border_color_dark": "*border_color_primary", "checkbox_label_border_color_hover": "*checkbox_label_border_color", "checkbox_label_border_color_hover_dark": "*checkbox_label_border_color", "checkbox_label_border_width": "*input_border_width", "checkbox_label_border_width_dark": "*input_border_width", "checkbox_label_gap": "*spacing_lg", "checkbox_label_padding": "*spacing_md calc(2 * *spacing_md)", "checkbox_label_shadow": "none", "checkbox_label_text_color": "*body_text_color", "checkbox_label_text_color_dark": "*body_text_color", "checkbox_label_text_color_selected": "*checkbox_label_text_color", "checkbox_label_text_color_selected_dark": "*checkbox_label_text_color", "checkbox_label_text_size": "*text_md", "checkbox_label_text_weight": "400", "checkbox_shadow": "*input_shadow", "color_accent": "*primary_500", "color_accent_soft": "*primary_50", "color_accent_soft_dark": "*neutral_700", "container_radius": "*radius_lg", "embed_radius": "*radius_lg", "error_background_fill": "#fee2e2", "error_background_fill_dark": "*background_fill_primary", "error_border_color": "#fecaca", "error_border_color_dark": "*border_color_primary", "error_border_width": "1px", "error_text_color": "#ef4444", "error_text_color_dark": "#ef4444", "font": "'Poppins', 'Source Sans Pro', 'system-ui', 'sans-system-ui'", "font_mono": "'DM Mono', 'ui-monospace', 'Consolas', monospace", "form_gap_width": "0px", "input_background_fill": "*neutral_100", "input_background_fill_dark": "*neutral_700", "input_background_fill_focus": "*secondary_500", "input_background_fill_focus_dark": "*secondary_600", "input_background_fill_hover": "*input_background_fill", "input_background_fill_hover_dark": "*input_background_fill", "input_border_color": "*border_color_primary", "input_border_color_dark": "*border_color_primary", "input_border_color_focus": "*secondary_300", "input_border_color_focus_dark": "*neutral_700", "input_border_color_hover": "*input_border_color", "input_border_color_hover_dark": "*input_border_color", "input_border_width": "0px", "input_padding": "*spacing_xl", "input_placeholder_color": "*neutral_400", "input_placeholder_color_dark": "*neutral_500", "input_radius": "*radius_lg", "input_shadow": "none", "input_shadow_focus": "*input_shadow", "input_text_size": "*text_md", "input_text_weight": "400", "layout_gap": "*spacing_xxl", "link_text_color": "*secondary_600", "link_text_color_active": "*secondary_600", "link_text_color_active_dark": "*secondary_500", "link_text_color_dark": "*secondary_500", "link_text_color_hover": "*secondary_700", "link_text_color_hover_dark": "*secondary_400", "link_text_color_visited": "*secondary_500", "link_text_color_visited_dark": "*secondary_600", "loader_color": "*color_accent", "name": "base", "neutral_100": "#f4f4f5", "neutral_200": "#e4e4e7", "neutral_300": "#d4d4d8", "neutral_400": "#a1a1aa", "neutral_50": "#fafafa", "neutral_500": "#71717a", "neutral_600": "#52525b", "neutral_700": "#3f3f46", "neutral_800": "#27272a", "neutral_900": "#18181b", "neutral_950": "#0f0f11", "panel_background_fill": "*background_fill_secondary", "panel_background_fill_dark": "*background_fill_secondary", "panel_border_color": "*border_color_primary", "panel_border_color_dark": "*border_color_primary", "panel_border_width": "0", "primary_100": "#d6def5", "primary_200": "#adbeeb", "primary_300": "#819adf", "primary_400": "#5879d5", "primary_50": "#ebeffa", "primary_500": "#3e5ac8", "primary_600": "#28489f", "primary_700": "#112344", "primary_800": "#0b172d", "primary_900": "#060d18", "primary_950": "#03060c", "prose_header_text_weight": "600", "prose_text_size": "*text_md", "prose_text_weight": "400", "radio_circle": "url(\"data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle cx='8' cy='8' r='3'/%3e%3c/svg%3e\")", "radius_lg": "12px", "radius_md": "8px", "radius_sm": "6px", "radius_xl": "16px", "radius_xs": "4px", "radius_xxl": "24px", "radius_xxs": "2px", "secondary_100": "#f3f4f6", "secondary_200": "#e5e7eb", "secondary_300": "#d1d5db", "secondary_400": "#9ca3af", "secondary_50": "#f9fafb", "secondary_500": "#6b7280", "secondary_600": "#4b5563", "secondary_700": "#374151", "secondary_800": "#1f2937", "secondary_900": "#111827", "secondary_950": "#0b0f19", "section_header_text_size": "*text_md", "section_header_text_weight": "400", "shadow_drop": "rgba(0,0,0,0.05) 0px 1px 2px 0px", "shadow_drop_lg": "0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1)", "shadow_inset": "rgba(0,0,0,0.05) 0px 2px 4px 0px inset", "shadow_spread": "3px", "shadow_spread_dark": "1px", "slider_color": "auto", "spacing_lg": "10px", "spacing_md": "8px", "spacing_sm": "6px", "spacing_xl": "14px", "spacing_xs": "4px", "spacing_xxl": "28px", "spacing_xxs": "2px", "stat_background_fill": "*primary_300", "stat_background_fill_dark": "*primary_500", "table_border_color": "*neutral_300", "table_border_color_dark": "*neutral_700", "table_even_background_fill": "white", "table_even_background_fill_dark": "*neutral_950", "table_odd_background_fill": "*neutral_50", "table_odd_background_fill_dark": "*neutral_900", "table_radius": "*radius_lg", "table_row_focus": "*color_accent_soft", "table_row_focus_dark": "*color_accent_soft", "text_lg": "16px", "text_md": "14px", "text_sm": "12px", "text_xl": "22px", "text_xs": "10px", "text_xxl": "26px", "text_xxs": "9px"}, "version": "0.0.1"}
|
transform.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
|
| 3 |
+
url = "https://api.prodia.com/v1/transform"
|
| 4 |
+
|
| 5 |
+
headers = {
|
| 6 |
+
"accept": "application/json",
|
| 7 |
+
"content-type": "application/json",
|
| 8 |
+
"X-Prodia-Key": "69e66898-010d-4cd1-9e22-090f73ad007b"
|
| 9 |
+
}
|
| 10 |
+
|
| 11 |
+
response = requests.post(url, headers=headers)
|
| 12 |
+
|
| 13 |
+
print(response.text)
|
utils.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def keys(dictionary: dict):
|
| 2 |
+
return [k for k, v in dictionary.items()]
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def split_numbers(numbers: str):
|
| 6 |
+
return [int(i) for i in numbers.split(",")]
|