Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -104,7 +104,7 @@ def create_sample_workflow(example_type="basic"):
|
|
| 104 |
"""μν μν¬νλ‘μ° μμ±"""
|
| 105 |
|
| 106 |
if example_type == "basic":
|
| 107 |
-
# κΈ°λ³Έ μμ : κ°λ¨ν Q&A
|
| 108 |
return {
|
| 109 |
"nodes": [
|
| 110 |
{
|
|
@@ -125,8 +125,8 @@ def create_sample_workflow(example_type="basic"):
|
|
| 125 |
"data": {
|
| 126 |
"label": "AI Processing",
|
| 127 |
"template": {
|
| 128 |
-
"provider": {"value": "
|
| 129 |
-
"model": {"value": "
|
| 130 |
"temperature": {"value": 0.7},
|
| 131 |
"system_prompt": {"value": "You are a helpful assistant."}
|
| 132 |
}
|
|
@@ -242,8 +242,8 @@ def create_sample_workflow(example_type="basic"):
|
|
| 242 |
"data": {
|
| 243 |
"label": "Generate Learning Plan",
|
| 244 |
"template": {
|
| 245 |
-
"provider": {"value": "
|
| 246 |
-
"model": {"value": "
|
| 247 |
"temperature": {"value": 0.7},
|
| 248 |
"system_prompt": {"value": "You are an expert educational consultant. Create personalized learning plans based on the user's name, topic of interest, and skill level."}
|
| 249 |
}
|
|
@@ -396,8 +396,10 @@ def execute_workflow(*input_values):
|
|
| 396 |
results[node_id] = base_text
|
| 397 |
|
| 398 |
elif node_type in ["llmNode", "OpenAIModel", "ChatModel"]:
|
| 399 |
-
# Get provider and model
|
| 400 |
-
provider = template.get("provider", {{}}).get("value", "
|
|
|
|
|
|
|
| 401 |
temperature = template.get("temperature", {{}}).get("value", 0.7)
|
| 402 |
system_prompt = template.get("system_prompt", {{}}).get("value", "")
|
| 403 |
|
|
@@ -473,7 +475,7 @@ def execute_workflow(*input_values):
|
|
| 473 |
elif provider == "VIDraft":
|
| 474 |
results[node_id] = "[VIDraft API key not found. Please set FRIENDLI_TOKEN in Space secrets]"
|
| 475 |
else:
|
| 476 |
-
results[node_id] = f"[
|
| 477 |
|
| 478 |
elif node_type in ["ChatOutput", "textOutput", "Output"]:
|
| 479 |
# Get connected result
|
|
@@ -497,14 +499,29 @@ with gr.Blocks(title="{app_name}", theme=gr.themes.Soft()) as demo:
|
|
| 497 |
vidraft_token = os.getenv("FRIENDLI_TOKEN")
|
| 498 |
openai_key = os.getenv("OPENAI_API_KEY")
|
| 499 |
|
| 500 |
-
|
| 501 |
-
|
| 502 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 503 |
|
| 504 |
-
|
| 505 |
-
|
| 506 |
-
|
| 507 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 508 |
|
| 509 |
# Extract nodes
|
| 510 |
nodes = WORKFLOW_DATA.get("nodes", [])
|
|
@@ -559,12 +576,15 @@ requests
|
|
| 559 |
|
| 560 |
def deploy_to_huggingface(workflow_data: dict, app_name: str, app_description: str,
|
| 561 |
hf_token: str, space_name: str, is_private: bool = False,
|
| 562 |
-
|
| 563 |
-
"""Deploy workflow to Hugging Face Space"""
|
| 564 |
|
| 565 |
if not HF_HUB_AVAILABLE:
|
| 566 |
return {"success": False, "error": "huggingface-hub library not installed"}
|
| 567 |
|
|
|
|
|
|
|
|
|
|
| 568 |
try:
|
| 569 |
# Initialize HF API
|
| 570 |
api = HfApi(token=hf_token)
|
|
@@ -578,11 +598,32 @@ def deploy_to_huggingface(workflow_data: dict, app_name: str, app_description: s
|
|
| 578 |
exist_ok=True
|
| 579 |
)
|
| 580 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 581 |
# Generate files
|
| 582 |
app_code = generate_standalone_app(workflow_data, app_name, app_description)
|
| 583 |
requirements = generate_requirements_txt()
|
| 584 |
|
| 585 |
# README with API setup instructions
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 586 |
readme = f"""---
|
| 587 |
title: {app_name}
|
| 588 |
emoji: π
|
|
@@ -598,25 +639,17 @@ pinned: false
|
|
| 598 |
|
| 599 |
{app_description}
|
| 600 |
|
| 601 |
-
## π API Configuration
|
| 602 |
|
| 603 |
-
|
| 604 |
|
| 605 |
-
|
| 606 |
-
- **Secret Name**: `FRIENDLI_TOKEN`
|
| 607 |
-
- **Secret Value**: Your VIDraft API token (starts with `flp_`)
|
| 608 |
-
{f"- **Status**: β
Already configured" if include_friendli_token and friendli_token else "- **Status**: β Needs to be added manually"}
|
| 609 |
|
| 610 |
-
|
| 611 |
-
- **Secret Name**: `OPENAI_API_KEY`
|
| 612 |
-
- **Secret Value**: Your OpenAI API key (starts with `sk-`)
|
| 613 |
-
- **Status**: β Needs to be added manually
|
| 614 |
|
| 615 |
-
##
|
| 616 |
-
|
| 617 |
-
|
| 618 |
-
3. Add the required secrets
|
| 619 |
-
4. Restart your Space
|
| 620 |
|
| 621 |
---
|
| 622 |
Generated by MOUSE Workflow
|
|
@@ -644,17 +677,22 @@ Generated by MOUSE Workflow
|
|
| 644 |
repo_type="space"
|
| 645 |
)
|
| 646 |
|
| 647 |
-
# Add
|
| 648 |
-
|
| 649 |
-
|
| 650 |
-
|
| 651 |
-
|
| 652 |
-
|
| 653 |
-
|
| 654 |
-
|
| 655 |
-
|
| 656 |
-
|
| 657 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 658 |
|
| 659 |
space_url = f"https://huggingface.co/spaces/{repo_id.repo_id}"
|
| 660 |
|
|
@@ -662,7 +700,9 @@ Generated by MOUSE Workflow
|
|
| 662 |
"success": True,
|
| 663 |
"space_url": space_url,
|
| 664 |
"message": f"Successfully deployed to {space_url}",
|
| 665 |
-
"
|
|
|
|
|
|
|
| 666 |
}
|
| 667 |
|
| 668 |
except Exception as e:
|
|
@@ -745,11 +785,11 @@ def execute_workflow_simple(workflow_data: dict, input_values: dict) -> dict:
|
|
| 745 |
|
| 746 |
# νλ‘λ°μ΄λ μ 보 μΆμΆ - VIDraft λλ OpenAIλ§ νμ©
|
| 747 |
provider_info = template.get("provider", {})
|
| 748 |
-
provider = provider_info.get("value", "
|
| 749 |
|
| 750 |
-
# providerκ° VIDraft λλ OpenAIκ° μλ κ²½μ°
|
| 751 |
if provider not in ["VIDraft", "OpenAI"]:
|
| 752 |
-
provider = "
|
| 753 |
|
| 754 |
# λͺ¨λΈ μ 보 μΆμΆ
|
| 755 |
if provider == "OpenAI":
|
|
@@ -759,7 +799,7 @@ def execute_workflow_simple(workflow_data: dict, input_values: dict) -> dict:
|
|
| 759 |
# VIDraftλ Gemma-3-r1984-27Bλ‘ κ³ μ
|
| 760 |
model = "Gemma-3-r1984-27B"
|
| 761 |
else:
|
| 762 |
-
model = "
|
| 763 |
|
| 764 |
# μ¨λ μ 보 μΆμΆ
|
| 765 |
temp_info = template.get("temperature", {})
|
|
@@ -913,6 +953,25 @@ CSS = """
|
|
| 913 |
padding:24px;border-radius:12px;margin:24px 0;
|
| 914 |
border:1px solid #fbbf24;
|
| 915 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 916 |
"""
|
| 917 |
|
| 918 |
# -------------------------------------------------------------------
|
|
@@ -925,10 +984,11 @@ with gr.Blocks(title="π MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
|
|
| 925 |
gr.Markdown("**Visual Workflow Builder with Interactive UI Execution**")
|
| 926 |
gr.HTML('<p class="powered-by">@Powered by VIDraft & Huggingface gradio</p>')
|
| 927 |
|
| 928 |
-
|
| 929 |
"""
|
| 930 |
<div class="component-description">
|
| 931 |
-
<p style="font-size:16px;margin:0;">Build sophisticated workflows visually β’ Import/Export JSON β’ Generate interactive UI for end-users</p>
|
|
|
|
| 932 |
</div>
|
| 933 |
"""
|
| 934 |
)
|
|
@@ -950,34 +1010,56 @@ with gr.Blocks(title="π MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
|
|
| 950 |
- VIDraft: Gemma-3-r1984-27B (model ID: dep89a2fld32mcm)
|
| 951 |
|
| 952 |
**Sample Workflows:**
|
| 953 |
-
- Basic Q&A: Simple question-answer flow
|
| 954 |
- VIDraft: Korean language example with Gemma model
|
| 955 |
-
- Multi-Input: Combine multiple inputs for personalized output
|
| 956 |
-
- Chain: Sequential processing with multiple outputs
|
| 957 |
|
| 958 |
-
|
| 959 |
""")
|
| 960 |
|
| 961 |
# State for storing workflow data
|
| 962 |
loaded_data = gr.State(None)
|
| 963 |
trigger_update = gr.State(False)
|
|
|
|
| 964 |
|
| 965 |
# βββ Dynamic Workflow Container βββ
|
| 966 |
with gr.Column(elem_classes=["workflow-container"]):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 967 |
@gr.render(inputs=[loaded_data, trigger_update])
|
| 968 |
def render_workflow(data, trigger):
|
| 969 |
"""λμ μΌλ‘ WorkflowBuilder λ λλ§"""
|
| 970 |
workflow_value = data if data else {"nodes": [], "edges": []}
|
| 971 |
|
| 972 |
-
|
| 973 |
-
label="
|
| 974 |
-
info="Drag
|
| 975 |
value=workflow_value,
|
| 976 |
elem_id="main_workflow"
|
| 977 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 978 |
|
| 979 |
# βββ Import Section βββ
|
| 980 |
with gr.Accordion("π₯ Import Workflow", open=True):
|
|
|
|
| 981 |
with gr.Row():
|
| 982 |
with gr.Column(scale=2):
|
| 983 |
import_json_text = gr.Code(
|
|
@@ -1012,30 +1094,39 @@ with gr.Blocks(title="π MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
|
|
| 1012 |
)
|
| 1013 |
|
| 1014 |
# βββ Export Section βββ
|
| 1015 |
-
gr.Markdown("## πΎ Export")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1016 |
|
| 1017 |
with gr.Row():
|
| 1018 |
with gr.Column(scale=3):
|
| 1019 |
export_preview = gr.Code(
|
| 1020 |
language="json",
|
| 1021 |
-
label="Current Workflow JSON",
|
| 1022 |
-
lines=8
|
|
|
|
| 1023 |
)
|
|
|
|
| 1024 |
with gr.Column(scale=1):
|
| 1025 |
-
btn_preview = gr.Button("
|
| 1026 |
btn_download = gr.DownloadButton(
|
| 1027 |
"πΎ Download JSON",
|
| 1028 |
size="lg",
|
|
|
|
| 1029 |
visible=True
|
| 1030 |
)
|
| 1031 |
|
| 1032 |
# βββ Deploy Section βββ
|
| 1033 |
with gr.Accordion("π Deploy to Hugging Face Space", open=False, elem_classes=["deploy-section"]):
|
| 1034 |
gr.Markdown("""
|
| 1035 |
-
Deploy your workflow as an independent Hugging Face Space app.
|
| 1036 |
-
|
| 1037 |
""")
|
| 1038 |
|
|
|
|
|
|
|
| 1039 |
with gr.Row():
|
| 1040 |
with gr.Column(scale=2):
|
| 1041 |
deploy_name = gr.Textbox(
|
|
@@ -1063,22 +1154,26 @@ with gr.Blocks(title="π MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
|
|
| 1063 |
info="Get your token from huggingface.co/settings/tokens"
|
| 1064 |
)
|
| 1065 |
|
|
|
|
|
|
|
|
|
|
| 1066 |
# FRIENDLI_TOKEN μ€μ
|
| 1067 |
-
|
| 1068 |
-
|
| 1069 |
-
|
| 1070 |
-
|
| 1071 |
-
|
| 1072 |
-
|
| 1073 |
-
|
| 1074 |
-
|
| 1075 |
-
|
| 1076 |
-
|
| 1077 |
-
|
| 1078 |
-
|
| 1079 |
-
|
| 1080 |
-
|
| 1081 |
-
|
|
|
|
| 1082 |
|
| 1083 |
deploy_private = gr.Checkbox(
|
| 1084 |
label="Make Space Private",
|
|
@@ -1098,19 +1193,12 @@ with gr.Blocks(title="π MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
|
|
| 1098 |
lines=20
|
| 1099 |
)
|
| 1100 |
|
| 1101 |
-
# Toggle FRIENDLI token input visibility
|
| 1102 |
-
include_friendli.change(
|
| 1103 |
-
fn=lambda x: gr.update(visible=x),
|
| 1104 |
-
inputs=include_friendli,
|
| 1105 |
-
outputs=friendli_token_input
|
| 1106 |
-
)
|
| 1107 |
-
|
| 1108 |
# βββ UI Execution Section βββ
|
| 1109 |
with gr.Column(elem_classes=["ui-execution-section"]):
|
| 1110 |
gr.Markdown("## π UI Execution")
|
| 1111 |
-
gr.Markdown("
|
| 1112 |
|
| 1113 |
-
btn_execute_ui = gr.Button("βΆοΈ Generate & Run UI", variant="primary", size="lg")
|
| 1114 |
|
| 1115 |
# UI execution state
|
| 1116 |
ui_workflow_data = gr.State(None)
|
|
@@ -1215,6 +1303,10 @@ with gr.Blocks(title="π MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
|
|
| 1215 |
if not vidraft_token and not openai_key:
|
| 1216 |
log += "\nβ οΈ No API keys found. Results will be simulated.\n"
|
| 1217 |
log += "To get real AI responses, set API keys in environment variables.\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1218 |
|
| 1219 |
log += "\n--- Processing Nodes ---\n"
|
| 1220 |
|
|
@@ -1258,14 +1350,15 @@ with gr.Blocks(title="π MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
|
|
| 1258 |
def load_workflow(json_text, file_obj):
|
| 1259 |
data, status = load_json_from_text_or_file(json_text, file_obj)
|
| 1260 |
if data:
|
| 1261 |
-
|
|
|
|
| 1262 |
else:
|
| 1263 |
-
return None, status, gr.update()
|
| 1264 |
|
| 1265 |
btn_load.click(
|
| 1266 |
fn=load_workflow,
|
| 1267 |
inputs=[import_json_text, file_upload],
|
| 1268 |
-
outputs=[loaded_data, status_text, import_json_text]
|
| 1269 |
).then(
|
| 1270 |
fn=lambda current_trigger: not current_trigger,
|
| 1271 |
inputs=trigger_update,
|
|
@@ -1276,7 +1369,7 @@ with gr.Blocks(title="π MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
|
|
| 1276 |
file_upload.change(
|
| 1277 |
fn=load_workflow,
|
| 1278 |
inputs=[import_json_text, file_upload],
|
| 1279 |
-
outputs=[loaded_data, status_text, import_json_text]
|
| 1280 |
).then(
|
| 1281 |
fn=lambda current_trigger: not current_trigger,
|
| 1282 |
inputs=trigger_update,
|
|
@@ -1285,8 +1378,8 @@ with gr.Blocks(title="π MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
|
|
| 1285 |
|
| 1286 |
# Load samples
|
| 1287 |
btn_sample_basic.click(
|
| 1288 |
-
fn=lambda: (create_sample_workflow("basic"), "β
Basic Q&A sample loaded", export_pretty(create_sample_workflow("basic"))),
|
| 1289 |
-
outputs=[loaded_data, status_text, import_json_text]
|
| 1290 |
).then(
|
| 1291 |
fn=lambda current_trigger: not current_trigger,
|
| 1292 |
inputs=trigger_update,
|
|
@@ -1294,8 +1387,8 @@ with gr.Blocks(title="π MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
|
|
| 1294 |
)
|
| 1295 |
|
| 1296 |
btn_sample_vidraft.click(
|
| 1297 |
-
fn=lambda: (create_sample_workflow("vidraft"), "β
VIDraft sample loaded", export_pretty(create_sample_workflow("vidraft"))),
|
| 1298 |
-
outputs=[loaded_data, status_text, import_json_text]
|
| 1299 |
).then(
|
| 1300 |
fn=lambda current_trigger: not current_trigger,
|
| 1301 |
inputs=trigger_update,
|
|
@@ -1303,8 +1396,8 @@ with gr.Blocks(title="π MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
|
|
| 1303 |
)
|
| 1304 |
|
| 1305 |
btn_sample_multi.click(
|
| 1306 |
-
fn=lambda: (create_sample_workflow("multi_input"), "β
Multi-input sample loaded", export_pretty(create_sample_workflow("multi_input"))),
|
| 1307 |
-
outputs=[loaded_data, status_text, import_json_text]
|
| 1308 |
).then(
|
| 1309 |
fn=lambda current_trigger: not current_trigger,
|
| 1310 |
inputs=trigger_update,
|
|
@@ -1312,47 +1405,65 @@ with gr.Blocks(title="π MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
|
|
| 1312 |
)
|
| 1313 |
|
| 1314 |
btn_sample_chain.click(
|
| 1315 |
-
fn=lambda: (create_sample_workflow("chain"), "β
Chain processing sample loaded", export_pretty(create_sample_workflow("chain"))),
|
| 1316 |
-
outputs=[loaded_data, status_text, import_json_text]
|
| 1317 |
).then(
|
| 1318 |
fn=lambda current_trigger: not current_trigger,
|
| 1319 |
inputs=trigger_update,
|
| 1320 |
outputs=trigger_update
|
| 1321 |
)
|
| 1322 |
|
| 1323 |
-
# Preview current workflow
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1324 |
btn_preview.click(
|
| 1325 |
-
fn=
|
| 1326 |
inputs=loaded_data,
|
| 1327 |
-
outputs=export_preview
|
| 1328 |
)
|
| 1329 |
|
| 1330 |
-
# Download workflow
|
| 1331 |
-
def prepare_download(data):
|
| 1332 |
-
"""λ€μ΄λ‘λλ₯Ό μν νμΌ μ€λΉ"""
|
| 1333 |
-
if not data:
|
| 1334 |
-
return None
|
| 1335 |
-
return export_file(data)
|
| 1336 |
|
| 1337 |
-
#
|
| 1338 |
-
|
| 1339 |
-
|
| 1340 |
-
|
| 1341 |
-
|
| 1342 |
-
|
|
|
|
| 1343 |
|
| 1344 |
-
# Generate UI execution
|
| 1345 |
btn_execute_ui.click(
|
| 1346 |
-
fn=
|
| 1347 |
inputs=loaded_data,
|
| 1348 |
outputs=ui_workflow_data
|
| 1349 |
)
|
| 1350 |
|
| 1351 |
# Auto-update export preview when workflow changes
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1352 |
loaded_data.change(
|
| 1353 |
-
fn=
|
| 1354 |
inputs=loaded_data,
|
| 1355 |
-
outputs=export_preview
|
| 1356 |
)
|
| 1357 |
|
| 1358 |
# βββ Deploy Event Handlers βββ
|
|
@@ -1360,13 +1471,16 @@ with gr.Blocks(title="π MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
|
|
| 1360 |
# Preview generated code
|
| 1361 |
def preview_generated_code(workflow_data, app_name, app_description):
|
| 1362 |
if not workflow_data:
|
| 1363 |
-
return "No workflow loaded"
|
|
|
|
|
|
|
|
|
|
| 1364 |
|
| 1365 |
try:
|
| 1366 |
code = generate_standalone_app(workflow_data, app_name, app_description)
|
| 1367 |
return code
|
| 1368 |
except Exception as e:
|
| 1369 |
-
return f"Error generating code
|
| 1370 |
|
| 1371 |
# Update preview when inputs change
|
| 1372 |
deploy_name.change(
|
|
@@ -1381,6 +1495,7 @@ with gr.Blocks(title="π MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
|
|
| 1381 |
outputs=generated_code_preview
|
| 1382 |
)
|
| 1383 |
|
|
|
|
| 1384 |
loaded_data.change(
|
| 1385 |
fn=preview_generated_code,
|
| 1386 |
inputs=[loaded_data, deploy_name, deploy_description],
|
|
@@ -1389,9 +1504,12 @@ with gr.Blocks(title="π MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
|
|
| 1389 |
|
| 1390 |
# Deploy handler
|
| 1391 |
def handle_deploy(workflow_data, app_name, app_description, hf_token, space_name,
|
| 1392 |
-
|
| 1393 |
if not workflow_data:
|
| 1394 |
-
return "β No workflow loaded. Please load a workflow first."
|
|
|
|
|
|
|
|
|
|
| 1395 |
|
| 1396 |
if not hf_token:
|
| 1397 |
return "β Hugging Face token is required. Get yours at huggingface.co/settings/tokens"
|
|
@@ -1410,9 +1528,20 @@ with gr.Blocks(title="π MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
|
|
| 1410 |
# Show deploying status
|
| 1411 |
yield "π Deploying to Hugging Face Space..."
|
| 1412 |
|
| 1413 |
-
#
|
| 1414 |
-
|
|
|
|
|
|
|
|
|
|
| 1415 |
friendli_token = os.getenv("FRIENDLI_TOKEN", "")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1416 |
|
| 1417 |
# Deploy
|
| 1418 |
result = deploy_to_huggingface(
|
|
@@ -1422,16 +1551,27 @@ with gr.Blocks(title="π MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
|
|
| 1422 |
hf_token=hf_token,
|
| 1423 |
space_name=space_name,
|
| 1424 |
is_private=is_private,
|
| 1425 |
-
|
| 1426 |
-
friendli_token=friendli_token
|
| 1427 |
)
|
| 1428 |
|
| 1429 |
if result["success"]:
|
| 1430 |
-
|
| 1431 |
-
|
| 1432 |
-
|
| 1433 |
-
|
| 1434 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1435 |
|
| 1436 |
yield f"""β
**Deployment Successful!**
|
| 1437 |
|
|
@@ -1439,17 +1579,14 @@ with gr.Blocks(title="π MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
|
|
| 1439 |
[{result['space_url']}]({result['space_url']})
|
| 1440 |
|
| 1441 |
β±οΈ The Space will be ready in a few minutes. Building usually takes 2-5 minutes.
|
| 1442 |
-
{secret_msg}
|
| 1443 |
|
| 1444 |
-
|
| 1445 |
-
|
| 1446 |
-
|
| 1447 |
-
|
| 1448 |
-
4. You can edit the code directly on Hugging Face if needed
|
| 1449 |
|
| 1450 |
-
|
| 1451 |
-
|
| 1452 |
-
- OPENAI_API_KEY (for OpenAI): β Add manually in Space settings if needed
|
| 1453 |
|
| 1454 |
π **Space Management:**
|
| 1455 |
- To update secrets: Go to Space settings β Repository secrets
|
|
@@ -1462,7 +1599,7 @@ with gr.Blocks(title="π MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
|
|
| 1462 |
btn_deploy.click(
|
| 1463 |
fn=handle_deploy,
|
| 1464 |
inputs=[loaded_data, deploy_name, deploy_description, deploy_token, deploy_space_name,
|
| 1465 |
-
|
| 1466 |
outputs=deploy_status
|
| 1467 |
)
|
| 1468 |
|
|
|
|
| 104 |
"""μν μν¬νλ‘μ° μμ±"""
|
| 105 |
|
| 106 |
if example_type == "basic":
|
| 107 |
+
# κΈ°λ³Έ μμ : κ°λ¨ν Q&A - VIDraft μ¬μ©
|
| 108 |
return {
|
| 109 |
"nodes": [
|
| 110 |
{
|
|
|
|
| 125 |
"data": {
|
| 126 |
"label": "AI Processing",
|
| 127 |
"template": {
|
| 128 |
+
"provider": {"value": "VIDraft"}, # κΈ°λ³Έκ°μ VIDraftλ‘ λ³κ²½
|
| 129 |
+
"model": {"value": "Gemma-3-r1984-27B"},
|
| 130 |
"temperature": {"value": 0.7},
|
| 131 |
"system_prompt": {"value": "You are a helpful assistant."}
|
| 132 |
}
|
|
|
|
| 242 |
"data": {
|
| 243 |
"label": "Generate Learning Plan",
|
| 244 |
"template": {
|
| 245 |
+
"provider": {"value": "VIDraft"}, # κΈ°λ³Έκ°μ VIDraftλ‘ λ³κ²½
|
| 246 |
+
"model": {"value": "Gemma-3-r1984-27B"},
|
| 247 |
"temperature": {"value": 0.7},
|
| 248 |
"system_prompt": {"value": "You are an expert educational consultant. Create personalized learning plans based on the user's name, topic of interest, and skill level."}
|
| 249 |
}
|
|
|
|
| 396 |
results[node_id] = base_text
|
| 397 |
|
| 398 |
elif node_type in ["llmNode", "OpenAIModel", "ChatModel"]:
|
| 399 |
+
# Get provider and model - VIDraft as default
|
| 400 |
+
provider = template.get("provider", {{}}).get("value", "VIDraft")
|
| 401 |
+
if provider not in ["VIDraft", "OpenAI"]:
|
| 402 |
+
provider = "VIDraft" # Default to VIDraft
|
| 403 |
temperature = template.get("temperature", {{}}).get("value", 0.7)
|
| 404 |
system_prompt = template.get("system_prompt", {{}}).get("value", "")
|
| 405 |
|
|
|
|
| 475 |
elif provider == "VIDraft":
|
| 476 |
results[node_id] = "[VIDraft API key not found. Please set FRIENDLI_TOKEN in Space secrets]"
|
| 477 |
else:
|
| 478 |
+
results[node_id] = f"[No API key found for {provider}. Using simulated response: {input_text[:50]}...]"
|
| 479 |
|
| 480 |
elif node_type in ["ChatOutput", "textOutput", "Output"]:
|
| 481 |
# Get connected result
|
|
|
|
| 499 |
vidraft_token = os.getenv("FRIENDLI_TOKEN")
|
| 500 |
openai_key = os.getenv("OPENAI_API_KEY")
|
| 501 |
|
| 502 |
+
with gr.Accordion("π API Status", open=False):
|
| 503 |
+
if vidraft_token:
|
| 504 |
+
gr.Markdown("β
**VIDraft API**: Connected (Gemma-3-r1984-27B)")
|
| 505 |
+
else:
|
| 506 |
+
gr.Markdown("β **VIDraft API**: Not configured")
|
| 507 |
+
|
| 508 |
+
if openai_key:
|
| 509 |
+
gr.Markdown("β
**OpenAI API**: Connected (gpt-4.1-mini)")
|
| 510 |
+
else:
|
| 511 |
+
gr.Markdown("β οΈ **OpenAI API**: Not configured (optional)")
|
| 512 |
|
| 513 |
+
if not vidraft_token:
|
| 514 |
+
gr.Markdown("""
|
| 515 |
+
**β οΈ Important**: Please add FRIENDLI_TOKEN to Space secrets for the app to work properly.
|
| 516 |
+
|
| 517 |
+
Go to: Space settings β Repository secrets β Add secret
|
| 518 |
+
""")
|
| 519 |
+
elif not openai_key:
|
| 520 |
+
gr.Markdown("""
|
| 521 |
+
**π‘ Tip**: The app will work with VIDraft alone. Add OPENAI_API_KEY if you need OpenAI features.
|
| 522 |
+
""")
|
| 523 |
+
else:
|
| 524 |
+
gr.Markdown("**β¨ All APIs configured! Your app is fully functional.**")
|
| 525 |
|
| 526 |
# Extract nodes
|
| 527 |
nodes = WORKFLOW_DATA.get("nodes", [])
|
|
|
|
| 576 |
|
| 577 |
def deploy_to_huggingface(workflow_data: dict, app_name: str, app_description: str,
|
| 578 |
hf_token: str, space_name: str, is_private: bool = False,
|
| 579 |
+
api_keys: dict = None) -> dict:
|
| 580 |
+
"""Deploy workflow to Hugging Face Space with API keys"""
|
| 581 |
|
| 582 |
if not HF_HUB_AVAILABLE:
|
| 583 |
return {"success": False, "error": "huggingface-hub library not installed"}
|
| 584 |
|
| 585 |
+
if api_keys is None:
|
| 586 |
+
api_keys = {}
|
| 587 |
+
|
| 588 |
try:
|
| 589 |
# Initialize HF API
|
| 590 |
api = HfApi(token=hf_token)
|
|
|
|
| 598 |
exist_ok=True
|
| 599 |
)
|
| 600 |
|
| 601 |
+
# Detect which providers are used in the workflow
|
| 602 |
+
providers_used = set()
|
| 603 |
+
nodes = workflow_data.get("nodes", [])
|
| 604 |
+
for node in nodes:
|
| 605 |
+
if node.get("type") in ["llmNode", "OpenAIModel", "ChatModel"]:
|
| 606 |
+
template = node.get("data", {}).get("template", {})
|
| 607 |
+
provider = template.get("provider", {}).get("value", "")
|
| 608 |
+
if provider:
|
| 609 |
+
providers_used.add(provider)
|
| 610 |
+
|
| 611 |
# Generate files
|
| 612 |
app_code = generate_standalone_app(workflow_data, app_name, app_description)
|
| 613 |
requirements = generate_requirements_txt()
|
| 614 |
|
| 615 |
# README with API setup instructions
|
| 616 |
+
api_status = []
|
| 617 |
+
if "FRIENDLI_TOKEN" in api_keys and api_keys["FRIENDLI_TOKEN"]:
|
| 618 |
+
api_status.append("- **FRIENDLI_TOKEN**: β
Will be configured automatically")
|
| 619 |
+
else:
|
| 620 |
+
api_status.append("- **FRIENDLI_TOKEN**: β οΈ Not provided (VIDraft won't work)")
|
| 621 |
+
|
| 622 |
+
if "OPENAI_API_KEY" in api_keys and api_keys["OPENAI_API_KEY"]:
|
| 623 |
+
api_status.append("- **OPENAI_API_KEY**: β
Will be configured automatically")
|
| 624 |
+
elif "OpenAI" in providers_used:
|
| 625 |
+
api_status.append("- **OPENAI_API_KEY**: β Required but not provided")
|
| 626 |
+
|
| 627 |
readme = f"""---
|
| 628 |
title: {app_name}
|
| 629 |
emoji: π
|
|
|
|
| 639 |
|
| 640 |
{app_description}
|
| 641 |
|
| 642 |
+
## π API Configuration Status
|
| 643 |
|
| 644 |
+
{chr(10).join(api_status)}
|
| 645 |
|
| 646 |
+
## π Providers Used in This Workflow
|
|
|
|
|
|
|
|
|
|
| 647 |
|
| 648 |
+
{', '.join(providers_used) if providers_used else 'No LLM providers detected'}
|
|
|
|
|
|
|
|
|
|
| 649 |
|
| 650 |
+
## π Default Configuration
|
| 651 |
+
|
| 652 |
+
This app is configured to use **VIDraft (Gemma-3-r1984-27B)** as the default LLM provider for optimal performance.
|
|
|
|
|
|
|
| 653 |
|
| 654 |
---
|
| 655 |
Generated by MOUSE Workflow
|
|
|
|
| 677 |
repo_type="space"
|
| 678 |
)
|
| 679 |
|
| 680 |
+
# Add all provided API keys as secrets
|
| 681 |
+
added_secrets = []
|
| 682 |
+
failed_secrets = []
|
| 683 |
+
|
| 684 |
+
for key_name, key_value in api_keys.items():
|
| 685 |
+
if key_value: # Only add non-empty keys
|
| 686 |
+
try:
|
| 687 |
+
api.add_space_secret(
|
| 688 |
+
repo_id=repo_id.repo_id,
|
| 689 |
+
key=key_name,
|
| 690 |
+
value=key_value
|
| 691 |
+
)
|
| 692 |
+
added_secrets.append(key_name)
|
| 693 |
+
except Exception as e:
|
| 694 |
+
failed_secrets.append(f"{key_name}: {str(e)}")
|
| 695 |
+
print(f"Warning: Could not add {key_name} secret: {e}")
|
| 696 |
|
| 697 |
space_url = f"https://huggingface.co/spaces/{repo_id.repo_id}"
|
| 698 |
|
|
|
|
| 700 |
"success": True,
|
| 701 |
"space_url": space_url,
|
| 702 |
"message": f"Successfully deployed to {space_url}",
|
| 703 |
+
"added_secrets": added_secrets,
|
| 704 |
+
"failed_secrets": failed_secrets,
|
| 705 |
+
"providers_used": list(providers_used)
|
| 706 |
}
|
| 707 |
|
| 708 |
except Exception as e:
|
|
|
|
| 785 |
|
| 786 |
# νλ‘λ°μ΄λ μ 보 μΆμΆ - VIDraft λλ OpenAIλ§ νμ©
|
| 787 |
provider_info = template.get("provider", {})
|
| 788 |
+
provider = provider_info.get("value", "VIDraft") if isinstance(provider_info, dict) else "VIDraft" # κΈ°λ³Έκ° VIDraft
|
| 789 |
|
| 790 |
+
# providerκ° VIDraft λλ OpenAIκ° μλ κ²½μ° VIDraftλ‘ κΈ°λ³Έ μ€μ
|
| 791 |
if provider not in ["VIDraft", "OpenAI"]:
|
| 792 |
+
provider = "VIDraft"
|
| 793 |
|
| 794 |
# λͺ¨λΈ μ 보 μΆμΆ
|
| 795 |
if provider == "OpenAI":
|
|
|
|
| 799 |
# VIDraftλ Gemma-3-r1984-27Bλ‘ κ³ μ
|
| 800 |
model = "Gemma-3-r1984-27B"
|
| 801 |
else:
|
| 802 |
+
model = "Gemma-3-r1984-27B" # κΈ°λ³Έκ° VIDraft λͺ¨λΈ
|
| 803 |
|
| 804 |
# μ¨λ μ 보 μΆμΆ
|
| 805 |
temp_info = template.get("temperature", {})
|
|
|
|
| 953 |
padding:24px;border-radius:12px;margin:24px 0;
|
| 954 |
border:1px solid #fbbf24;
|
| 955 |
}
|
| 956 |
+
.save-indicator{
|
| 957 |
+
text-align:right;
|
| 958 |
+
font-size:14px;
|
| 959 |
+
color:#16a34a;
|
| 960 |
+
padding:8px 16px;
|
| 961 |
+
background:#f0fdf4;
|
| 962 |
+
border-radius:20px;
|
| 963 |
+
display:inline-block;
|
| 964 |
+
margin-left:auto;
|
| 965 |
+
}
|
| 966 |
+
.workflow-info{
|
| 967 |
+
font-size:14px;
|
| 968 |
+
color:#475569;
|
| 969 |
+
background:#f8fafc;
|
| 970 |
+
padding:8px 16px;
|
| 971 |
+
border-radius:8px;
|
| 972 |
+
display:inline-block;
|
| 973 |
+
margin-bottom:16px;
|
| 974 |
+
}
|
| 975 |
"""
|
| 976 |
|
| 977 |
# -------------------------------------------------------------------
|
|
|
|
| 984 |
gr.Markdown("**Visual Workflow Builder with Interactive UI Execution**")
|
| 985 |
gr.HTML('<p class="powered-by">@Powered by VIDraft & Huggingface gradio</p>')
|
| 986 |
|
| 987 |
+
gr.HTML(
|
| 988 |
"""
|
| 989 |
<div class="component-description">
|
| 990 |
+
<p style="font-size:16px;margin:0;">Build sophisticated workflows visually β’ Import/Export JSON β’ Generate interactive UI for end-users β’ Default LLM: VIDraft (Gemma-3-r1984-27B)</p>
|
| 991 |
+
<p style="font-size:14px;margin-top:8px;color:#64748b;">π‘ Tip: Your workflow is automatically saved as you make changes. The JSON preview updates in real-time!</p>
|
| 992 |
</div>
|
| 993 |
"""
|
| 994 |
)
|
|
|
|
| 1010 |
- VIDraft: Gemma-3-r1984-27B (model ID: dep89a2fld32mcm)
|
| 1011 |
|
| 1012 |
**Sample Workflows:**
|
| 1013 |
+
- Basic Q&A: Simple question-answer flow (VIDraft)
|
| 1014 |
- VIDraft: Korean language example with Gemma model
|
| 1015 |
+
- Multi-Input: Combine multiple inputs for personalized output (VIDraft)
|
| 1016 |
+
- Chain: Sequential processing with multiple outputs (VIDraft + OpenAI)
|
| 1017 |
|
| 1018 |
+
**Note**: All examples prioritize VIDraft for optimal performance. Friendli API token will be automatically configured during deployment.
|
| 1019 |
""")
|
| 1020 |
|
| 1021 |
# State for storing workflow data
|
| 1022 |
loaded_data = gr.State(None)
|
| 1023 |
trigger_update = gr.State(False)
|
| 1024 |
+
save_status = gr.State("Ready")
|
| 1025 |
|
| 1026 |
# βββ Dynamic Workflow Container βββ
|
| 1027 |
with gr.Column(elem_classes=["workflow-container"]):
|
| 1028 |
+
# Auto-save status indicator
|
| 1029 |
+
with gr.Row():
|
| 1030 |
+
gr.Markdown("### π¨ Visual Workflow Designer")
|
| 1031 |
+
save_indicator = gr.Markdown("πΎ Auto-save: Ready", elem_classes=["save-indicator"])
|
| 1032 |
+
|
| 1033 |
@gr.render(inputs=[loaded_data, trigger_update])
|
| 1034 |
def render_workflow(data, trigger):
|
| 1035 |
"""λμ μΌλ‘ WorkflowBuilder λ λλ§"""
|
| 1036 |
workflow_value = data if data else {"nodes": [], "edges": []}
|
| 1037 |
|
| 1038 |
+
wb = WorkflowBuilder(
|
| 1039 |
+
label="",
|
| 1040 |
+
info="Drag nodes β Connect edges β Edit properties β Changes auto-save!",
|
| 1041 |
value=workflow_value,
|
| 1042 |
elem_id="main_workflow"
|
| 1043 |
)
|
| 1044 |
+
|
| 1045 |
+
# WorkflowBuilder λ³κ²½μ¬νμ μλμΌλ‘ loaded_dataμ μ μ₯
|
| 1046 |
+
def update_workflow_data(workflow_data):
|
| 1047 |
+
"""μν¬νοΏ½οΏ½οΏ½μ° λ°μ΄ν° μ
λ°μ΄νΈ λ° μν νμ"""
|
| 1048 |
+
import time
|
| 1049 |
+
# μ¦μ μ μ₯ μν νμ
|
| 1050 |
+
return workflow_data, f"πΎ Auto-save: Saved β ({time.strftime('%H:%M:%S')})"
|
| 1051 |
+
|
| 1052 |
+
wb.change(
|
| 1053 |
+
fn=update_workflow_data,
|
| 1054 |
+
inputs=wb,
|
| 1055 |
+
outputs=[loaded_data, save_indicator]
|
| 1056 |
+
)
|
| 1057 |
+
|
| 1058 |
+
return wb
|
| 1059 |
|
| 1060 |
# βββ Import Section βββ
|
| 1061 |
with gr.Accordion("π₯ Import Workflow", open=True):
|
| 1062 |
+
gr.Markdown("*Load an existing workflow from JSON or start with a sample template*")
|
| 1063 |
with gr.Row():
|
| 1064 |
with gr.Column(scale=2):
|
| 1065 |
import_json_text = gr.Code(
|
|
|
|
| 1094 |
)
|
| 1095 |
|
| 1096 |
# βββ Export Section βββ
|
| 1097 |
+
gr.Markdown("## πΎ Export / Live Preview")
|
| 1098 |
+
gr.Markdown("*Your workflow is automatically saved. The JSON below shows your current workflow in real-time.*")
|
| 1099 |
+
|
| 1100 |
+
# Workflow info display
|
| 1101 |
+
workflow_info = gr.Markdown("π Empty workflow", elem_classes=["workflow-info"])
|
| 1102 |
|
| 1103 |
with gr.Row():
|
| 1104 |
with gr.Column(scale=3):
|
| 1105 |
export_preview = gr.Code(
|
| 1106 |
language="json",
|
| 1107 |
+
label="Current Workflow JSON (Live Preview)",
|
| 1108 |
+
lines=8,
|
| 1109 |
+
interactive=False
|
| 1110 |
)
|
| 1111 |
+
gr.Markdown("*π‘ This JSON updates automatically as you modify the workflow above*")
|
| 1112 |
with gr.Column(scale=1):
|
| 1113 |
+
btn_preview = gr.Button("π Force Refresh", size="lg", variant="secondary")
|
| 1114 |
btn_download = gr.DownloadButton(
|
| 1115 |
"πΎ Download JSON",
|
| 1116 |
size="lg",
|
| 1117 |
+
variant="primary",
|
| 1118 |
visible=True
|
| 1119 |
)
|
| 1120 |
|
| 1121 |
# βββ Deploy Section βββ
|
| 1122 |
with gr.Accordion("π Deploy to Hugging Face Space", open=False, elem_classes=["deploy-section"]):
|
| 1123 |
gr.Markdown("""
|
| 1124 |
+
Deploy your **current workflow** as an independent Hugging Face Space app.
|
| 1125 |
+
The workflow shown in the JSON preview above will be deployed exactly as is.
|
| 1126 |
""")
|
| 1127 |
|
| 1128 |
+
gr.Markdown("*β οΈ Make sure to save/finalize your workflow design before deploying!*")
|
| 1129 |
+
|
| 1130 |
with gr.Row():
|
| 1131 |
with gr.Column(scale=2):
|
| 1132 |
deploy_name = gr.Textbox(
|
|
|
|
| 1154 |
info="Get your token from huggingface.co/settings/tokens"
|
| 1155 |
)
|
| 1156 |
|
| 1157 |
+
# API Keys μ€μ μΉμ
|
| 1158 |
+
gr.Markdown("### π API Keys Configuration")
|
| 1159 |
+
|
| 1160 |
# FRIENDLI_TOKEN μ€μ
|
| 1161 |
+
friendli_token_input = gr.Textbox(
|
| 1162 |
+
label="FRIENDLI_TOKEN (VIDraft/Gemma)",
|
| 1163 |
+
type="password",
|
| 1164 |
+
placeholder="flp_...",
|
| 1165 |
+
value=os.getenv("FRIENDLI_TOKEN", ""),
|
| 1166 |
+
info="Required for VIDraft. Will be added as secret."
|
| 1167 |
+
)
|
| 1168 |
+
|
| 1169 |
+
# OpenAI API Key μ€μ
|
| 1170 |
+
openai_token_input = gr.Textbox(
|
| 1171 |
+
label="OPENAI_API_KEY (Optional)",
|
| 1172 |
+
type="password",
|
| 1173 |
+
placeholder="sk-...",
|
| 1174 |
+
value=os.getenv("OPENAI_API_KEY", ""),
|
| 1175 |
+
info="Optional. Leave empty if not using OpenAI."
|
| 1176 |
+
)
|
| 1177 |
|
| 1178 |
deploy_private = gr.Checkbox(
|
| 1179 |
label="Make Space Private",
|
|
|
|
| 1193 |
lines=20
|
| 1194 |
)
|
| 1195 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1196 |
# βββ UI Execution Section βββ
|
| 1197 |
with gr.Column(elem_classes=["ui-execution-section"]):
|
| 1198 |
gr.Markdown("## π UI Execution")
|
| 1199 |
+
gr.Markdown("Test your workflow instantly! Click below to generate and run the UI from your current workflow design.")
|
| 1200 |
|
| 1201 |
+
btn_execute_ui = gr.Button("βΆοΈ Generate & Run UI from Current Workflow", variant="primary", size="lg")
|
| 1202 |
|
| 1203 |
# UI execution state
|
| 1204 |
ui_workflow_data = gr.State(None)
|
|
|
|
| 1303 |
if not vidraft_token and not openai_key:
|
| 1304 |
log += "\nβ οΈ No API keys found. Results will be simulated.\n"
|
| 1305 |
log += "To get real AI responses, set API keys in environment variables.\n"
|
| 1306 |
+
log += "Minimum requirement: FRIENDLI_TOKEN for VIDraft\n"
|
| 1307 |
+
elif vidraft_token and not openai_key:
|
| 1308 |
+
log += "\nβ
VIDraft API connected - Basic functionality available\n"
|
| 1309 |
+
log += "π‘ Add OPENAI_API_KEY for full functionality\n"
|
| 1310 |
|
| 1311 |
log += "\n--- Processing Nodes ---\n"
|
| 1312 |
|
|
|
|
| 1350 |
def load_workflow(json_text, file_obj):
|
| 1351 |
data, status = load_json_from_text_or_file(json_text, file_obj)
|
| 1352 |
if data:
|
| 1353 |
+
# λ‘λ μ±κ³΅μ μλμΌλ‘ 미리보기 μ
λ°μ΄νΈ
|
| 1354 |
+
return data, status, json_text if not file_obj else export_pretty(data), "πΎ Auto-save: Loaded β"
|
| 1355 |
else:
|
| 1356 |
+
return None, status, gr.update(), gr.update()
|
| 1357 |
|
| 1358 |
btn_load.click(
|
| 1359 |
fn=load_workflow,
|
| 1360 |
inputs=[import_json_text, file_upload],
|
| 1361 |
+
outputs=[loaded_data, status_text, import_json_text, save_indicator]
|
| 1362 |
).then(
|
| 1363 |
fn=lambda current_trigger: not current_trigger,
|
| 1364 |
inputs=trigger_update,
|
|
|
|
| 1369 |
file_upload.change(
|
| 1370 |
fn=load_workflow,
|
| 1371 |
inputs=[import_json_text, file_upload],
|
| 1372 |
+
outputs=[loaded_data, status_text, import_json_text, save_indicator]
|
| 1373 |
).then(
|
| 1374 |
fn=lambda current_trigger: not current_trigger,
|
| 1375 |
inputs=trigger_update,
|
|
|
|
| 1378 |
|
| 1379 |
# Load samples
|
| 1380 |
btn_sample_basic.click(
|
| 1381 |
+
fn=lambda: (create_sample_workflow("basic"), "β
Basic Q&A sample loaded", export_pretty(create_sample_workflow("basic")), "πΎ Auto-save: Sample loaded β"),
|
| 1382 |
+
outputs=[loaded_data, status_text, import_json_text, save_indicator]
|
| 1383 |
).then(
|
| 1384 |
fn=lambda current_trigger: not current_trigger,
|
| 1385 |
inputs=trigger_update,
|
|
|
|
| 1387 |
)
|
| 1388 |
|
| 1389 |
btn_sample_vidraft.click(
|
| 1390 |
+
fn=lambda: (create_sample_workflow("vidraft"), "β
VIDraft sample loaded", export_pretty(create_sample_workflow("vidraft")), "πΎ Auto-save: Sample loaded β"),
|
| 1391 |
+
outputs=[loaded_data, status_text, import_json_text, save_indicator]
|
| 1392 |
).then(
|
| 1393 |
fn=lambda current_trigger: not current_trigger,
|
| 1394 |
inputs=trigger_update,
|
|
|
|
| 1396 |
)
|
| 1397 |
|
| 1398 |
btn_sample_multi.click(
|
| 1399 |
+
fn=lambda: (create_sample_workflow("multi_input"), "β
Multi-input sample loaded", export_pretty(create_sample_workflow("multi_input")), "πΎ Auto-save: Sample loaded β"),
|
| 1400 |
+
outputs=[loaded_data, status_text, import_json_text, save_indicator]
|
| 1401 |
).then(
|
| 1402 |
fn=lambda current_trigger: not current_trigger,
|
| 1403 |
inputs=trigger_update,
|
|
|
|
| 1405 |
)
|
| 1406 |
|
| 1407 |
btn_sample_chain.click(
|
| 1408 |
+
fn=lambda: (create_sample_workflow("chain"), "β
Chain processing sample loaded", export_pretty(create_sample_workflow("chain")), "πΎ Auto-save: Sample loaded β"),
|
| 1409 |
+
outputs=[loaded_data, status_text, import_json_text, save_indicator]
|
| 1410 |
).then(
|
| 1411 |
fn=lambda current_trigger: not current_trigger,
|
| 1412 |
inputs=trigger_update,
|
| 1413 |
outputs=trigger_update
|
| 1414 |
)
|
| 1415 |
|
| 1416 |
+
# Preview current workflow - κ°μ μλ‘κ³ μΉ¨
|
| 1417 |
+
def force_refresh_preview(current_data):
|
| 1418 |
+
"""νμ¬ μν¬νλ‘μ° λ°μ΄ν°λ₯Ό κ°μ λ‘ μλ‘κ³ μΉ¨"""
|
| 1419 |
+
if current_data:
|
| 1420 |
+
node_count = len(current_data.get("nodes", []))
|
| 1421 |
+
edge_count = len(current_data.get("edges", []))
|
| 1422 |
+
info = f"π Workflow contains {node_count} nodes and {edge_count} edges"
|
| 1423 |
+
return export_pretty(current_data), "πΎ Auto-save: Refreshed β", info
|
| 1424 |
+
return "No workflow data available", "πΎ Auto-save: No data", "π Empty workflow"
|
| 1425 |
+
|
| 1426 |
btn_preview.click(
|
| 1427 |
+
fn=force_refresh_preview,
|
| 1428 |
inputs=loaded_data,
|
| 1429 |
+
outputs=[export_preview, save_indicator, workflow_info]
|
| 1430 |
)
|
| 1431 |
|
| 1432 |
+
# Download workflowλ μ΄λ―Έ loaded_data.changeμμ μ²λ¦¬λ¨
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1433 |
|
| 1434 |
+
# Generate UI execution - νμ¬ μν¬νλ‘μ° μ¬μ©
|
| 1435 |
+
def prepare_ui_execution(current_data):
|
| 1436 |
+
"""νμ¬ μν¬νλ‘μ°λ₯Ό UI μ€νμ©μΌλ‘ μ€λΉ"""
|
| 1437 |
+
if not current_data or not current_data.get("nodes"):
|
| 1438 |
+
gr.Warning("Please create a workflow first!")
|
| 1439 |
+
return None
|
| 1440 |
+
return current_data
|
| 1441 |
|
|
|
|
| 1442 |
btn_execute_ui.click(
|
| 1443 |
+
fn=prepare_ui_execution,
|
| 1444 |
inputs=loaded_data,
|
| 1445 |
outputs=ui_workflow_data
|
| 1446 |
)
|
| 1447 |
|
| 1448 |
# Auto-update export preview when workflow changes
|
| 1449 |
+
def update_preview_and_download(data):
|
| 1450 |
+
"""μν¬νλ‘μ° λ³κ²½μ 미리보기μ λ€μ΄λ‘λ μ
λ°μ΄νΈ"""
|
| 1451 |
+
if data:
|
| 1452 |
+
preview = export_pretty(data)
|
| 1453 |
+
download_file = export_file(data)
|
| 1454 |
+
node_count = len(data.get("nodes", []))
|
| 1455 |
+
edge_count = len(data.get("edges", []))
|
| 1456 |
+
status = f"π Workflow contains {node_count} nodes and {edge_count} edges"
|
| 1457 |
+
return preview, download_file, status
|
| 1458 |
+
return "No workflow data", None, "π Empty workflow"
|
| 1459 |
+
|
| 1460 |
+
# Status display for workflow info
|
| 1461 |
+
workflow_info = gr.Markdown("π Empty workflow", elem_classes=["workflow-info"])
|
| 1462 |
+
|
| 1463 |
loaded_data.change(
|
| 1464 |
+
fn=update_preview_and_download,
|
| 1465 |
inputs=loaded_data,
|
| 1466 |
+
outputs=[export_preview, btn_download, workflow_info]
|
| 1467 |
)
|
| 1468 |
|
| 1469 |
# βββ Deploy Event Handlers βββ
|
|
|
|
| 1471 |
# Preview generated code
|
| 1472 |
def preview_generated_code(workflow_data, app_name, app_description):
|
| 1473 |
if not workflow_data:
|
| 1474 |
+
return "# No workflow loaded\n# Create or load a workflow first"
|
| 1475 |
+
|
| 1476 |
+
if not workflow_data.get("nodes"):
|
| 1477 |
+
return "# Empty workflow\n# Add some nodes to see the generated code"
|
| 1478 |
|
| 1479 |
try:
|
| 1480 |
code = generate_standalone_app(workflow_data, app_name, app_description)
|
| 1481 |
return code
|
| 1482 |
except Exception as e:
|
| 1483 |
+
return f"# Error generating code\n# {str(e)}"
|
| 1484 |
|
| 1485 |
# Update preview when inputs change
|
| 1486 |
deploy_name.change(
|
|
|
|
| 1495 |
outputs=generated_code_preview
|
| 1496 |
)
|
| 1497 |
|
| 1498 |
+
# Update preview when workflow changes too
|
| 1499 |
loaded_data.change(
|
| 1500 |
fn=preview_generated_code,
|
| 1501 |
inputs=[loaded_data, deploy_name, deploy_description],
|
|
|
|
| 1504 |
|
| 1505 |
# Deploy handler
|
| 1506 |
def handle_deploy(workflow_data, app_name, app_description, hf_token, space_name,
|
| 1507 |
+
friendli_token, openai_token, is_private):
|
| 1508 |
if not workflow_data:
|
| 1509 |
+
return "β No workflow loaded. Please create or load a workflow first."
|
| 1510 |
+
|
| 1511 |
+
if not workflow_data.get("nodes"):
|
| 1512 |
+
return "β Empty workflow. Please add some nodes to your workflow."
|
| 1513 |
|
| 1514 |
if not hf_token:
|
| 1515 |
return "β Hugging Face token is required. Get yours at huggingface.co/settings/tokens"
|
|
|
|
| 1528 |
# Show deploying status
|
| 1529 |
yield "π Deploying to Hugging Face Space..."
|
| 1530 |
|
| 1531 |
+
# Prepare API keys
|
| 1532 |
+
api_keys = {}
|
| 1533 |
+
|
| 1534 |
+
# Always include FRIENDLI_TOKEN (even if empty)
|
| 1535 |
+
if not friendli_token:
|
| 1536 |
friendli_token = os.getenv("FRIENDLI_TOKEN", "")
|
| 1537 |
+
if friendli_token:
|
| 1538 |
+
api_keys["FRIENDLI_TOKEN"] = friendli_token
|
| 1539 |
+
|
| 1540 |
+
# Include OpenAI key if provided
|
| 1541 |
+
if not openai_token:
|
| 1542 |
+
openai_token = os.getenv("OPENAI_API_KEY", "")
|
| 1543 |
+
if openai_token:
|
| 1544 |
+
api_keys["OPENAI_API_KEY"] = openai_token
|
| 1545 |
|
| 1546 |
# Deploy
|
| 1547 |
result = deploy_to_huggingface(
|
|
|
|
| 1551 |
hf_token=hf_token,
|
| 1552 |
space_name=space_name,
|
| 1553 |
is_private=is_private,
|
| 1554 |
+
api_keys=api_keys
|
|
|
|
| 1555 |
)
|
| 1556 |
|
| 1557 |
if result["success"]:
|
| 1558 |
+
# Build secrets status message
|
| 1559 |
+
secrets_msg = "\n\n**π API Keys Status:**"
|
| 1560 |
+
|
| 1561 |
+
if result.get("added_secrets"):
|
| 1562 |
+
for secret in result["added_secrets"]:
|
| 1563 |
+
secrets_msg += f"\n- {secret}: β
Successfully added"
|
| 1564 |
+
|
| 1565 |
+
if result.get("failed_secrets"):
|
| 1566 |
+
for failure in result["failed_secrets"]:
|
| 1567 |
+
secrets_msg += f"\n- {failure}: β Failed to add"
|
| 1568 |
+
|
| 1569 |
+
# Check for missing required keys
|
| 1570 |
+
providers = result.get("providers_used", [])
|
| 1571 |
+
if "VIDraft" in providers and "FRIENDLI_TOKEN" not in result.get("added_secrets", []):
|
| 1572 |
+
secrets_msg += "\n- FRIENDLI_TOKEN: β οΈ Required for VIDraft but not provided"
|
| 1573 |
+
if "OpenAI" in providers and "OPENAI_API_KEY" not in result.get("added_secrets", []):
|
| 1574 |
+
secrets_msg += "\n- OPENAI_API_KEY: β οΈ Required for OpenAI but not provided"
|
| 1575 |
|
| 1576 |
yield f"""β
**Deployment Successful!**
|
| 1577 |
|
|
|
|
| 1579 |
[{result['space_url']}]({result['space_url']})
|
| 1580 |
|
| 1581 |
β±οΈ The Space will be ready in a few minutes. Building usually takes 2-5 minutes.
|
|
|
|
| 1582 |
|
| 1583 |
+
{secrets_msg}
|
| 1584 |
+
|
| 1585 |
+
π **Providers Detected in Workflow:**
|
| 1586 |
+
{', '.join(result.get('providers_used', [])) if result.get('providers_used') else 'No LLM providers detected'}
|
|
|
|
| 1587 |
|
| 1588 |
+
π **Default Configuration:**
|
| 1589 |
+
The app is configured to prioritize VIDraft (Gemma-3-r1984-27B) for optimal performance.
|
|
|
|
| 1590 |
|
| 1591 |
π **Space Management:**
|
| 1592 |
- To update secrets: Go to Space settings β Repository secrets
|
|
|
|
| 1599 |
btn_deploy.click(
|
| 1600 |
fn=handle_deploy,
|
| 1601 |
inputs=[loaded_data, deploy_name, deploy_description, deploy_token, deploy_space_name,
|
| 1602 |
+
friendli_token_input, openai_token_input, deploy_private],
|
| 1603 |
outputs=deploy_status
|
| 1604 |
)
|
| 1605 |
|