Spaces:
Running
Running
Update app-BACKUP-LAST.py
Browse files- app-BACKUP-LAST.py +298 -40
app-BACKUP-LAST.py
CHANGED
|
@@ -91,47 +91,237 @@ def load_json_from_text_or_file(json_text: str, file_obj) -> typing.Tuple[typing
|
|
| 91 |
except Exception as e:
|
| 92 |
return None, f"β Error: {str(e)}"
|
| 93 |
|
| 94 |
-
def create_sample_workflow():
|
| 95 |
"""μν μν¬νλ‘μ° μμ±"""
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
"
|
| 104 |
-
"
|
| 105 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
}
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
"id": "llm_1",
|
| 111 |
-
"
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 120 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
}
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
"id": "
|
| 125 |
-
"
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 135 |
|
| 136 |
# UI μ€νμ μν μ€μ μν¬νλ‘μ° μ€ν ν¨μ
|
| 137 |
def execute_workflow_simple(workflow_data: dict, input_values: dict) -> dict:
|
|
@@ -181,6 +371,26 @@ def execute_workflow_simple(workflow_data: dict, input_values: dict) -> dict:
|
|
| 181 |
default_value = template.get("input_value", {}).get("value", "")
|
| 182 |
results[node_id] = default_value
|
| 183 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 184 |
elif node_type in ["llmNode", "OpenAIModel", "ChatModel"]:
|
| 185 |
# LLM λ
Έλ μ²λ¦¬
|
| 186 |
template = node_data.get("template", {})
|
|
@@ -346,6 +556,10 @@ CSS = """
|
|
| 346 |
text-align:center;color:#64748b;font-size:14px;
|
| 347 |
margin-top:8px;font-style:italic;
|
| 348 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
| 349 |
"""
|
| 350 |
|
| 351 |
# -------------------------------------------------------------------
|
|
@@ -381,6 +595,12 @@ with gr.Blocks(title="π MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
|
|
| 381 |
- OpenAI: gpt-4.1-mini (fixed)
|
| 382 |
- VIDraft: Gemma-3-r1984-27B (model ID: dep89a2fld32mcm)
|
| 383 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 384 |
*Note: Without API keys, the UI will simulate AI responses.*
|
| 385 |
""")
|
| 386 |
|
|
@@ -419,7 +639,15 @@ with gr.Blocks(title="π MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
|
|
| 419 |
type="filepath"
|
| 420 |
)
|
| 421 |
btn_load = gr.Button("π₯ Load Workflow", variant="primary", size="lg")
|
| 422 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 423 |
|
| 424 |
# Status
|
| 425 |
status_text = gr.Textbox(
|
|
@@ -472,6 +700,9 @@ with gr.Blocks(title="π MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
|
|
| 472 |
input_nodes.append(node)
|
| 473 |
elif node_type in ["ChatOutput", "textOutput", "Output"]:
|
| 474 |
output_nodes.append(node)
|
|
|
|
|
|
|
|
|
|
| 475 |
|
| 476 |
# Create input components
|
| 477 |
input_components = {}
|
|
@@ -618,9 +849,36 @@ with gr.Blocks(title="π MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
|
|
| 618 |
outputs=trigger_update
|
| 619 |
)
|
| 620 |
|
| 621 |
-
# Load
|
| 622 |
-
|
| 623 |
-
fn=lambda: (create_sample_workflow(), "β
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 624 |
outputs=[loaded_data, status_text, import_json_text]
|
| 625 |
).then(
|
| 626 |
fn=lambda current_trigger: not current_trigger,
|
|
|
|
| 91 |
except Exception as e:
|
| 92 |
return None, f"β Error: {str(e)}"
|
| 93 |
|
| 94 |
+
def create_sample_workflow(example_type="basic"):
|
| 95 |
"""μν μν¬νλ‘μ° μμ±"""
|
| 96 |
+
|
| 97 |
+
if example_type == "basic":
|
| 98 |
+
# κΈ°λ³Έ μμ : κ°λ¨ν Q&A
|
| 99 |
+
return {
|
| 100 |
+
"nodes": [
|
| 101 |
+
{
|
| 102 |
+
"id": "input_1",
|
| 103 |
+
"type": "ChatInput",
|
| 104 |
+
"position": {"x": 100, "y": 200},
|
| 105 |
+
"data": {
|
| 106 |
+
"label": "User Question",
|
| 107 |
+
"template": {
|
| 108 |
+
"input_value": {"value": "What is the capital of Korea?"}
|
| 109 |
+
}
|
| 110 |
+
}
|
| 111 |
+
},
|
| 112 |
+
{
|
| 113 |
+
"id": "llm_1",
|
| 114 |
+
"type": "llmNode",
|
| 115 |
+
"position": {"x": 400, "y": 200},
|
| 116 |
+
"data": {
|
| 117 |
+
"label": "AI Processing",
|
| 118 |
+
"template": {
|
| 119 |
+
"provider": {"value": "OpenAI"},
|
| 120 |
+
"model": {"value": "gpt-4.1-mini"},
|
| 121 |
+
"temperature": {"value": 0.7},
|
| 122 |
+
"system_prompt": {"value": "You are a helpful assistant."}
|
| 123 |
+
}
|
| 124 |
}
|
| 125 |
+
},
|
| 126 |
+
{
|
| 127 |
+
"id": "output_1",
|
| 128 |
+
"type": "ChatOutput",
|
| 129 |
+
"position": {"x": 700, "y": 200},
|
| 130 |
+
"data": {"label": "Answer"}
|
| 131 |
}
|
| 132 |
+
],
|
| 133 |
+
"edges": [
|
| 134 |
+
{"id": "e1", "source": "input_1", "target": "llm_1"},
|
| 135 |
+
{"id": "e2", "source": "llm_1", "target": "output_1"}
|
| 136 |
+
]
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
elif example_type == "vidraft":
|
| 140 |
+
# VIDraft μμ
|
| 141 |
+
return {
|
| 142 |
+
"nodes": [
|
| 143 |
+
{
|
| 144 |
+
"id": "input_1",
|
| 145 |
+
"type": "ChatInput",
|
| 146 |
+
"position": {"x": 100, "y": 200},
|
| 147 |
+
"data": {
|
| 148 |
+
"label": "User Input",
|
| 149 |
+
"template": {
|
| 150 |
+
"input_value": {"value": "AIμ λ¨Έμ λ¬λμ μ°¨μ΄μ μ μ€λͺ
ν΄μ£ΌμΈμ."}
|
| 151 |
+
}
|
| 152 |
+
}
|
| 153 |
+
},
|
| 154 |
+
{
|
| 155 |
+
"id": "llm_1",
|
| 156 |
+
"type": "llmNode",
|
| 157 |
+
"position": {"x": 400, "y": 200},
|
| 158 |
+
"data": {
|
| 159 |
+
"label": "VIDraft AI (Gemma)",
|
| 160 |
+
"template": {
|
| 161 |
+
"provider": {"value": "VIDraft"},
|
| 162 |
+
"model": {"value": "Gemma-3-r1984-27B"},
|
| 163 |
+
"temperature": {"value": 0.8},
|
| 164 |
+
"system_prompt": {"value": "λΉμ μ μ λ¬Έμ μ΄κ³ μΉμ ν AI κ΅μ‘μμ
λλ€. 볡μ‘ν κ°λ
μ μ½κ² μ€λͺ
ν΄μ£ΌμΈμ."}
|
| 165 |
+
}
|
| 166 |
}
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"id": "output_1",
|
| 170 |
+
"type": "ChatOutput",
|
| 171 |
+
"position": {"x": 700, "y": 200},
|
| 172 |
+
"data": {"label": "AI Explanation"}
|
| 173 |
}
|
| 174 |
+
],
|
| 175 |
+
"edges": [
|
| 176 |
+
{"id": "e1", "source": "input_1", "target": "llm_1"},
|
| 177 |
+
{"id": "e2", "source": "llm_1", "target": "output_1"}
|
| 178 |
+
]
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
elif example_type == "multi_input":
|
| 182 |
+
# λ€μ€ μ
λ ₯ μμ
|
| 183 |
+
return {
|
| 184 |
+
"nodes": [
|
| 185 |
+
{
|
| 186 |
+
"id": "name_input",
|
| 187 |
+
"type": "textInput",
|
| 188 |
+
"position": {"x": 100, "y": 100},
|
| 189 |
+
"data": {
|
| 190 |
+
"label": "Your Name",
|
| 191 |
+
"template": {
|
| 192 |
+
"input_value": {"value": "John"}
|
| 193 |
+
}
|
| 194 |
+
}
|
| 195 |
+
},
|
| 196 |
+
{
|
| 197 |
+
"id": "topic_input",
|
| 198 |
+
"type": "textInput",
|
| 199 |
+
"position": {"x": 100, "y": 250},
|
| 200 |
+
"data": {
|
| 201 |
+
"label": "Topic",
|
| 202 |
+
"template": {
|
| 203 |
+
"input_value": {"value": "Python programming"}
|
| 204 |
+
}
|
| 205 |
+
}
|
| 206 |
+
},
|
| 207 |
+
{
|
| 208 |
+
"id": "level_input",
|
| 209 |
+
"type": "textInput",
|
| 210 |
+
"position": {"x": 100, "y": 400},
|
| 211 |
+
"data": {
|
| 212 |
+
"label": "Skill Level",
|
| 213 |
+
"template": {
|
| 214 |
+
"input_value": {"value": "beginner"}
|
| 215 |
+
}
|
| 216 |
+
}
|
| 217 |
+
},
|
| 218 |
+
{
|
| 219 |
+
"id": "combiner",
|
| 220 |
+
"type": "textNode",
|
| 221 |
+
"position": {"x": 350, "y": 250},
|
| 222 |
+
"data": {
|
| 223 |
+
"label": "Combine Inputs",
|
| 224 |
+
"template": {
|
| 225 |
+
"text": {"value": "Create a personalized learning plan"}
|
| 226 |
+
}
|
| 227 |
+
}
|
| 228 |
+
},
|
| 229 |
+
{
|
| 230 |
+
"id": "llm_1",
|
| 231 |
+
"type": "llmNode",
|
| 232 |
+
"position": {"x": 600, "y": 250},
|
| 233 |
+
"data": {
|
| 234 |
+
"label": "Generate Learning Plan",
|
| 235 |
+
"template": {
|
| 236 |
+
"provider": {"value": "OpenAI"},
|
| 237 |
+
"model": {"value": "gpt-4.1-mini"},
|
| 238 |
+
"temperature": {"value": 0.7},
|
| 239 |
+
"system_prompt": {"value": "You are an expert educational consultant. Create personalized learning plans based on the user's name, topic of interest, and skill level."}
|
| 240 |
+
}
|
| 241 |
+
}
|
| 242 |
+
},
|
| 243 |
+
{
|
| 244 |
+
"id": "output_1",
|
| 245 |
+
"type": "ChatOutput",
|
| 246 |
+
"position": {"x": 900, "y": 250},
|
| 247 |
+
"data": {"label": "Your Learning Plan"}
|
| 248 |
+
}
|
| 249 |
+
],
|
| 250 |
+
"edges": [
|
| 251 |
+
{"id": "e1", "source": "name_input", "target": "combiner"},
|
| 252 |
+
{"id": "e2", "source": "topic_input", "target": "combiner"},
|
| 253 |
+
{"id": "e3", "source": "level_input", "target": "combiner"},
|
| 254 |
+
{"id": "e4", "source": "combiner", "target": "llm_1"},
|
| 255 |
+
{"id": "e5", "source": "llm_1", "target": "output_1"}
|
| 256 |
+
]
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
elif example_type == "chain":
|
| 260 |
+
# μ²΄μΈ μ²λ¦¬ μμ
|
| 261 |
+
return {
|
| 262 |
+
"nodes": [
|
| 263 |
+
{
|
| 264 |
+
"id": "input_1",
|
| 265 |
+
"type": "ChatInput",
|
| 266 |
+
"position": {"x": 50, "y": 200},
|
| 267 |
+
"data": {
|
| 268 |
+
"label": "Original Text",
|
| 269 |
+
"template": {
|
| 270 |
+
"input_value": {"value": "The quick brown fox jumps over the lazy dog."}
|
| 271 |
+
}
|
| 272 |
+
}
|
| 273 |
+
},
|
| 274 |
+
{
|
| 275 |
+
"id": "translator",
|
| 276 |
+
"type": "llmNode",
|
| 277 |
+
"position": {"x": 300, "y": 200},
|
| 278 |
+
"data": {
|
| 279 |
+
"label": "Translate to Korean",
|
| 280 |
+
"template": {
|
| 281 |
+
"provider": {"value": "VIDraft"},
|
| 282 |
+
"model": {"value": "Gemma-3-r1984-27B"},
|
| 283 |
+
"temperature": {"value": 0.3},
|
| 284 |
+
"system_prompt": {"value": "You are a professional translator. Translate the given English text to Korean accurately."}
|
| 285 |
+
}
|
| 286 |
+
}
|
| 287 |
+
},
|
| 288 |
+
{
|
| 289 |
+
"id": "analyzer",
|
| 290 |
+
"type": "llmNode",
|
| 291 |
+
"position": {"x": 600, "y": 200},
|
| 292 |
+
"data": {
|
| 293 |
+
"label": "Analyze Translation",
|
| 294 |
+
"template": {
|
| 295 |
+
"provider": {"value": "OpenAI"},
|
| 296 |
+
"model": {"value": "gpt-4.1-mini"},
|
| 297 |
+
"temperature": {"value": 0.5},
|
| 298 |
+
"system_prompt": {"value": "You are a linguistic expert. Analyze the Korean translation and explain its nuances and cultural context."}
|
| 299 |
+
}
|
| 300 |
+
}
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"id": "output_translation",
|
| 304 |
+
"type": "ChatOutput",
|
| 305 |
+
"position": {"x": 450, "y": 350},
|
| 306 |
+
"data": {"label": "Korean Translation"}
|
| 307 |
+
},
|
| 308 |
+
{
|
| 309 |
+
"id": "output_analysis",
|
| 310 |
+
"type": "ChatOutput",
|
| 311 |
+
"position": {"x": 900, "y": 200},
|
| 312 |
+
"data": {"label": "Translation Analysis"}
|
| 313 |
+
}
|
| 314 |
+
],
|
| 315 |
+
"edges": [
|
| 316 |
+
{"id": "e1", "source": "input_1", "target": "translator"},
|
| 317 |
+
{"id": "e2", "source": "translator", "target": "analyzer"},
|
| 318 |
+
{"id": "e3", "source": "translator", "target": "output_translation"},
|
| 319 |
+
{"id": "e4", "source": "analyzer", "target": "output_analysis"}
|
| 320 |
+
]
|
| 321 |
+
}
|
| 322 |
+
|
| 323 |
+
# κΈ°λ³Έκ°μ basic
|
| 324 |
+
return create_sample_workflow("basic")
|
| 325 |
|
| 326 |
# UI μ€νμ μν μ€μ μν¬νλ‘μ° μ€ν ν¨μ
|
| 327 |
def execute_workflow_simple(workflow_data: dict, input_values: dict) -> dict:
|
|
|
|
| 371 |
default_value = template.get("input_value", {}).get("value", "")
|
| 372 |
results[node_id] = default_value
|
| 373 |
|
| 374 |
+
elif node_type == "textNode":
|
| 375 |
+
# ν
μ€νΈ λ
Έλλ μ°κ²°λ λͺ¨λ μ
λ ₯μ κ²°ν©
|
| 376 |
+
template = node_data.get("template", {})
|
| 377 |
+
base_text = template.get("text", {}).get("value", "")
|
| 378 |
+
|
| 379 |
+
# μ°κ²°λ μ
λ ₯λ€ μμ§
|
| 380 |
+
connected_inputs = []
|
| 381 |
+
for edge in edges:
|
| 382 |
+
if edge.get("target") == node_id:
|
| 383 |
+
source_id = edge.get("source")
|
| 384 |
+
if source_id in results:
|
| 385 |
+
connected_inputs.append(f"{source_id}: {results[source_id]}")
|
| 386 |
+
|
| 387 |
+
# κ²°ν©λ ν
μ€νΈ μμ±
|
| 388 |
+
if connected_inputs:
|
| 389 |
+
combined_text = f"{base_text}\n\nInputs:\n" + "\n".join(connected_inputs)
|
| 390 |
+
results[node_id] = combined_text
|
| 391 |
+
else:
|
| 392 |
+
results[node_id] = base_text
|
| 393 |
+
|
| 394 |
elif node_type in ["llmNode", "OpenAIModel", "ChatModel"]:
|
| 395 |
# LLM λ
Έλ μ²λ¦¬
|
| 396 |
template = node_data.get("template", {})
|
|
|
|
| 556 |
text-align:center;color:#64748b;font-size:14px;
|
| 557 |
margin-top:8px;font-style:italic;
|
| 558 |
}
|
| 559 |
+
.sample-buttons{
|
| 560 |
+
display:grid;grid-template-columns:1fr 1fr;gap:0.5rem;
|
| 561 |
+
margin-top:0.5rem;
|
| 562 |
+
}
|
| 563 |
"""
|
| 564 |
|
| 565 |
# -------------------------------------------------------------------
|
|
|
|
| 595 |
- OpenAI: gpt-4.1-mini (fixed)
|
| 596 |
- VIDraft: Gemma-3-r1984-27B (model ID: dep89a2fld32mcm)
|
| 597 |
|
| 598 |
+
**Sample Workflows:**
|
| 599 |
+
- Basic Q&A: Simple question-answer flow
|
| 600 |
+
- VIDraft: Korean language example with Gemma model
|
| 601 |
+
- Multi-Input: Combine multiple inputs for personalized output
|
| 602 |
+
- Chain: Sequential processing with multiple outputs
|
| 603 |
+
|
| 604 |
*Note: Without API keys, the UI will simulate AI responses.*
|
| 605 |
""")
|
| 606 |
|
|
|
|
| 639 |
type="filepath"
|
| 640 |
)
|
| 641 |
btn_load = gr.Button("π₯ Load Workflow", variant="primary", size="lg")
|
| 642 |
+
|
| 643 |
+
# Sample buttons
|
| 644 |
+
gr.Markdown("**Sample Workflows:**")
|
| 645 |
+
with gr.Row():
|
| 646 |
+
btn_sample_basic = gr.Button("π― Basic Q&A", variant="secondary", scale=1)
|
| 647 |
+
btn_sample_vidraft = gr.Button("π€ VIDraft", variant="secondary", scale=1)
|
| 648 |
+
with gr.Row():
|
| 649 |
+
btn_sample_multi = gr.Button("π Multi-Input", variant="secondary", scale=1)
|
| 650 |
+
btn_sample_chain = gr.Button("π Chain", variant="secondary", scale=1)
|
| 651 |
|
| 652 |
# Status
|
| 653 |
status_text = gr.Textbox(
|
|
|
|
| 700 |
input_nodes.append(node)
|
| 701 |
elif node_type in ["ChatOutput", "textOutput", "Output"]:
|
| 702 |
output_nodes.append(node)
|
| 703 |
+
elif node_type == "textNode":
|
| 704 |
+
# textNodeλ μ€κ° μ²λ¦¬ λ
Έλλ‘, UIμλ νμνμ§ μμ
|
| 705 |
+
pass
|
| 706 |
|
| 707 |
# Create input components
|
| 708 |
input_components = {}
|
|
|
|
| 849 |
outputs=trigger_update
|
| 850 |
)
|
| 851 |
|
| 852 |
+
# Load samples
|
| 853 |
+
btn_sample_basic.click(
|
| 854 |
+
fn=lambda: (create_sample_workflow("basic"), "β
Basic Q&A sample loaded", export_pretty(create_sample_workflow("basic"))),
|
| 855 |
+
outputs=[loaded_data, status_text, import_json_text]
|
| 856 |
+
).then(
|
| 857 |
+
fn=lambda current_trigger: not current_trigger,
|
| 858 |
+
inputs=trigger_update,
|
| 859 |
+
outputs=trigger_update
|
| 860 |
+
)
|
| 861 |
+
|
| 862 |
+
btn_sample_vidraft.click(
|
| 863 |
+
fn=lambda: (create_sample_workflow("vidraft"), "β
VIDraft sample loaded", export_pretty(create_sample_workflow("vidraft"))),
|
| 864 |
+
outputs=[loaded_data, status_text, import_json_text]
|
| 865 |
+
).then(
|
| 866 |
+
fn=lambda current_trigger: not current_trigger,
|
| 867 |
+
inputs=trigger_update,
|
| 868 |
+
outputs=trigger_update
|
| 869 |
+
)
|
| 870 |
+
|
| 871 |
+
btn_sample_multi.click(
|
| 872 |
+
fn=lambda: (create_sample_workflow("multi_input"), "β
Multi-input sample loaded", export_pretty(create_sample_workflow("multi_input"))),
|
| 873 |
+
outputs=[loaded_data, status_text, import_json_text]
|
| 874 |
+
).then(
|
| 875 |
+
fn=lambda current_trigger: not current_trigger,
|
| 876 |
+
inputs=trigger_update,
|
| 877 |
+
outputs=trigger_update
|
| 878 |
+
)
|
| 879 |
+
|
| 880 |
+
btn_sample_chain.click(
|
| 881 |
+
fn=lambda: (create_sample_workflow("chain"), "β
Chain processing sample loaded", export_pretty(create_sample_workflow("chain"))),
|
| 882 |
outputs=[loaded_data, status_text, import_json_text]
|
| 883 |
).then(
|
| 884 |
fn=lambda current_trigger: not current_trigger,
|