Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -34,6 +34,13 @@ except ImportError:
|
|
| 34 |
REQUESTS_AVAILABLE = False
|
| 35 |
print("Requests library not available. Install with: pip install requests")
|
| 36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
# -------------------------------------------------------------------
|
| 38 |
# π οΈ ν¬νΌ ν¨μλ€
|
| 39 |
# -------------------------------------------------------------------
|
|
@@ -323,6 +330,287 @@ def create_sample_workflow(example_type="basic"):
|
|
| 323 |
# κΈ°λ³Έκ°μ basic
|
| 324 |
return create_sample_workflow("basic")
|
| 325 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 326 |
# UI μ€νμ μν μ€μ μν¬νλ‘μ° μ€ν ν¨μ
|
| 327 |
def execute_workflow_simple(workflow_data: dict, input_values: dict) -> dict:
|
| 328 |
"""μν¬νλ‘μ° μ€μ μ€ν"""
|
|
|
|
| 34 |
REQUESTS_AVAILABLE = False
|
| 35 |
print("Requests library not available. Install with: pip install requests")
|
| 36 |
|
| 37 |
+
try:
|
| 38 |
+
from huggingface_hub import HfApi, create_repo, upload_file
|
| 39 |
+
HF_HUB_AVAILABLE = True
|
| 40 |
+
except ImportError:
|
| 41 |
+
HF_HUB_AVAILABLE = False
|
| 42 |
+
print("Huggingface Hub not available. Install with: pip install huggingface-hub")
|
| 43 |
+
|
| 44 |
# -------------------------------------------------------------------
|
| 45 |
# π οΈ ν¬νΌ ν¨μλ€
|
| 46 |
# -------------------------------------------------------------------
|
|
|
|
| 330 |
# κΈ°λ³Έκ°μ basic
|
| 331 |
return create_sample_workflow("basic")
|
| 332 |
|
| 333 |
+
# λ°°ν¬λ₯Ό μν λ
립 μ± μμ± ν¨μ
|
| 334 |
+
def generate_standalone_app(workflow_data: dict, app_name: str, app_description: str) -> str:
|
| 335 |
+
"""μν¬νλ‘μ°λ₯Ό λ
립μ μΈ Gradio μ±μΌλ‘ λ³ν"""
|
| 336 |
+
|
| 337 |
+
app_code = f'''"""
|
| 338 |
+
{app_name}
|
| 339 |
+
{app_description}
|
| 340 |
+
Generated by MOUSE Workflow
|
| 341 |
+
"""
|
| 342 |
+
|
| 343 |
+
import os
|
| 344 |
+
import json
|
| 345 |
+
import gradio as gr
|
| 346 |
+
import requests
|
| 347 |
+
|
| 348 |
+
# Workflow configuration
|
| 349 |
+
WORKFLOW_DATA = {json.dumps(workflow_data, indent=2)}
|
| 350 |
+
|
| 351 |
+
def execute_workflow(*input_values):
|
| 352 |
+
"""Execute the workflow with given inputs"""
|
| 353 |
+
|
| 354 |
+
# API keys from environment
|
| 355 |
+
vidraft_token = os.getenv("FRIENDLI_TOKEN")
|
| 356 |
+
openai_key = os.getenv("OPENAI_API_KEY")
|
| 357 |
+
|
| 358 |
+
nodes = WORKFLOW_DATA.get("nodes", [])
|
| 359 |
+
edges = WORKFLOW_DATA.get("edges", [])
|
| 360 |
+
|
| 361 |
+
results = {{}}
|
| 362 |
+
|
| 363 |
+
# Get input nodes
|
| 364 |
+
input_nodes = [n for n in nodes if n.get("type") in ["ChatInput", "textInput", "Input", "numberInput"]]
|
| 365 |
+
|
| 366 |
+
# Map inputs to node IDs
|
| 367 |
+
for i, node in enumerate(input_nodes):
|
| 368 |
+
if i < len(input_values):
|
| 369 |
+
results[node["id"]] = input_values[i]
|
| 370 |
+
|
| 371 |
+
# Process nodes
|
| 372 |
+
for node in nodes:
|
| 373 |
+
node_id = node.get("id")
|
| 374 |
+
node_type = node.get("type", "")
|
| 375 |
+
node_data = node.get("data", {{}})
|
| 376 |
+
template = node_data.get("template", {{}})
|
| 377 |
+
|
| 378 |
+
if node_type == "textNode":
|
| 379 |
+
# Combine connected inputs
|
| 380 |
+
base_text = template.get("text", {{}}).get("value", "")
|
| 381 |
+
connected_inputs = []
|
| 382 |
+
|
| 383 |
+
for edge in edges:
|
| 384 |
+
if edge.get("target") == node_id:
|
| 385 |
+
source_id = edge.get("source")
|
| 386 |
+
if source_id in results:
|
| 387 |
+
connected_inputs.append(f"{{source_id}}: {{results[source_id]}}")
|
| 388 |
+
|
| 389 |
+
if connected_inputs:
|
| 390 |
+
results[node_id] = f"{{base_text}}\\n\\nInputs:\\n" + "\\n".join(connected_inputs)
|
| 391 |
+
else:
|
| 392 |
+
results[node_id] = base_text
|
| 393 |
+
|
| 394 |
+
elif node_type in ["llmNode", "OpenAIModel", "ChatModel"]:
|
| 395 |
+
# Get provider and model
|
| 396 |
+
provider = template.get("provider", {{}}).get("value", "OpenAI")
|
| 397 |
+
temperature = template.get("temperature", {{}}).get("value", 0.7)
|
| 398 |
+
system_prompt = template.get("system_prompt", {{}}).get("value", "")
|
| 399 |
+
|
| 400 |
+
# Get input text
|
| 401 |
+
input_text = ""
|
| 402 |
+
for edge in edges:
|
| 403 |
+
if edge.get("target") == node_id:
|
| 404 |
+
source_id = edge.get("source")
|
| 405 |
+
if source_id in results:
|
| 406 |
+
input_text = results[source_id]
|
| 407 |
+
break
|
| 408 |
+
|
| 409 |
+
# Call API
|
| 410 |
+
if provider == "OpenAI" and openai_key:
|
| 411 |
+
try:
|
| 412 |
+
from openai import OpenAI
|
| 413 |
+
client = OpenAI(api_key=openai_key)
|
| 414 |
+
|
| 415 |
+
messages = []
|
| 416 |
+
if system_prompt:
|
| 417 |
+
messages.append({{"role": "system", "content": system_prompt}})
|
| 418 |
+
messages.append({{"role": "user", "content": input_text}})
|
| 419 |
+
|
| 420 |
+
response = client.chat.completions.create(
|
| 421 |
+
model="gpt-4.1-mini",
|
| 422 |
+
messages=messages,
|
| 423 |
+
temperature=temperature,
|
| 424 |
+
max_tokens=1000
|
| 425 |
+
)
|
| 426 |
+
|
| 427 |
+
results[node_id] = response.choices[0].message.content
|
| 428 |
+
except Exception as e:
|
| 429 |
+
results[node_id] = f"[OpenAI Error: {{str(e)}}]"
|
| 430 |
+
|
| 431 |
+
elif provider == "VIDraft" and vidraft_token:
|
| 432 |
+
try:
|
| 433 |
+
headers = {{
|
| 434 |
+
"Authorization": f"Bearer {{vidraft_token}}",
|
| 435 |
+
"Content-Type": "application/json"
|
| 436 |
+
}}
|
| 437 |
+
|
| 438 |
+
messages = []
|
| 439 |
+
if system_prompt:
|
| 440 |
+
messages.append({{"role": "system", "content": system_prompt}})
|
| 441 |
+
messages.append({{"role": "user", "content": input_text}})
|
| 442 |
+
|
| 443 |
+
payload = {{
|
| 444 |
+
"model": "dep89a2fld32mcm",
|
| 445 |
+
"messages": messages,
|
| 446 |
+
"max_tokens": 16384,
|
| 447 |
+
"temperature": temperature,
|
| 448 |
+
"top_p": 0.8,
|
| 449 |
+
"stream": False
|
| 450 |
+
}}
|
| 451 |
+
|
| 452 |
+
response = requests.post(
|
| 453 |
+
"https://api.friendli.ai/dedicated/v1/chat/completions",
|
| 454 |
+
headers=headers,
|
| 455 |
+
json=payload,
|
| 456 |
+
timeout=30
|
| 457 |
+
)
|
| 458 |
+
|
| 459 |
+
if response.status_code == 200:
|
| 460 |
+
results[node_id] = response.json()["choices"][0]["message"]["content"]
|
| 461 |
+
else:
|
| 462 |
+
results[node_id] = f"[VIDraft Error: {{response.status_code}}]"
|
| 463 |
+
except Exception as e:
|
| 464 |
+
results[node_id] = f"[VIDraft Error: {{str(e)}}]"
|
| 465 |
+
else:
|
| 466 |
+
results[node_id] = f"[Simulated Response: {{input_text[:50]}}...]"
|
| 467 |
+
|
| 468 |
+
elif node_type in ["ChatOutput", "textOutput", "Output"]:
|
| 469 |
+
# Get connected result
|
| 470 |
+
for edge in edges:
|
| 471 |
+
if edge.get("target") == node_id:
|
| 472 |
+
source_id = edge.get("source")
|
| 473 |
+
if source_id in results:
|
| 474 |
+
results[node_id] = results[source_id]
|
| 475 |
+
break
|
| 476 |
+
|
| 477 |
+
# Return outputs
|
| 478 |
+
output_nodes = [n for n in nodes if n.get("type") in ["ChatOutput", "textOutput", "Output"]]
|
| 479 |
+
return [results.get(n["id"], "") for n in output_nodes]
|
| 480 |
+
|
| 481 |
+
# Build UI
|
| 482 |
+
with gr.Blocks(title="{app_name}", theme=gr.themes.Soft()) as demo:
|
| 483 |
+
gr.Markdown(f"# {{app_name}}")
|
| 484 |
+
gr.Markdown(f"{{app_description}}")
|
| 485 |
+
|
| 486 |
+
# Extract nodes
|
| 487 |
+
nodes = WORKFLOW_DATA.get("nodes", [])
|
| 488 |
+
input_nodes = [n for n in nodes if n.get("type") in ["ChatInput", "textInput", "Input", "numberInput"]]
|
| 489 |
+
output_nodes = [n for n in nodes if n.get("type") in ["ChatOutput", "textOutput", "Output"]]
|
| 490 |
+
|
| 491 |
+
# Create inputs
|
| 492 |
+
inputs = []
|
| 493 |
+
if input_nodes:
|
| 494 |
+
gr.Markdown("### π₯ Inputs")
|
| 495 |
+
for node in input_nodes:
|
| 496 |
+
label = node.get("data", {{}}).get("label", node.get("id"))
|
| 497 |
+
template = node.get("data", {{}}).get("template", {{}})
|
| 498 |
+
default_value = template.get("input_value", {{}}).get("value", "")
|
| 499 |
+
|
| 500 |
+
if node.get("type") == "numberInput":
|
| 501 |
+
inp = gr.Number(label=label, value=float(default_value) if default_value else 0)
|
| 502 |
+
else:
|
| 503 |
+
inp = gr.Textbox(label=label, value=default_value, lines=2)
|
| 504 |
+
inputs.append(inp)
|
| 505 |
+
|
| 506 |
+
# Execute button
|
| 507 |
+
btn = gr.Button("π Execute Workflow", variant="primary")
|
| 508 |
+
|
| 509 |
+
# Create outputs
|
| 510 |
+
outputs = []
|
| 511 |
+
if output_nodes:
|
| 512 |
+
gr.Markdown("### π€ Outputs")
|
| 513 |
+
for node in output_nodes:
|
| 514 |
+
label = node.get("data", {{}}).get("label", node.get("id"))
|
| 515 |
+
out = gr.Textbox(label=label, interactive=False, lines=3)
|
| 516 |
+
outputs.append(out)
|
| 517 |
+
|
| 518 |
+
# Connect
|
| 519 |
+
btn.click(fn=execute_workflow, inputs=inputs, outputs=outputs)
|
| 520 |
+
|
| 521 |
+
gr.Markdown("---")
|
| 522 |
+
gr.Markdown("*Powered by MOUSE Workflow*")
|
| 523 |
+
|
| 524 |
+
if __name__ == "__main__":
|
| 525 |
+
demo.launch()
|
| 526 |
+
'''
|
| 527 |
+
|
| 528 |
+
return app_code
|
| 529 |
+
|
| 530 |
+
def generate_requirements_txt() -> str:
|
| 531 |
+
"""Generate requirements.txt for the standalone app"""
|
| 532 |
+
return """gradio>=4.0.0
|
| 533 |
+
openai>=1.0.0
|
| 534 |
+
requests>=2.31.0
|
| 535 |
+
"""
|
| 536 |
+
|
| 537 |
+
def deploy_to_huggingface(workflow_data: dict, app_name: str, app_description: str,
|
| 538 |
+
hf_token: str, space_name: str, is_private: bool = False) -> dict:
|
| 539 |
+
"""Deploy workflow to Hugging Face Space"""
|
| 540 |
+
|
| 541 |
+
if not HF_HUB_AVAILABLE:
|
| 542 |
+
return {"success": False, "error": "huggingface-hub library not installed"}
|
| 543 |
+
|
| 544 |
+
try:
|
| 545 |
+
# Initialize HF API
|
| 546 |
+
api = HfApi(token=hf_token)
|
| 547 |
+
|
| 548 |
+
# Create repository
|
| 549 |
+
repo_id = api.create_repo(
|
| 550 |
+
repo_id=space_name,
|
| 551 |
+
repo_type="space",
|
| 552 |
+
space_sdk="gradio",
|
| 553 |
+
private=is_private,
|
| 554 |
+
exist_ok=True
|
| 555 |
+
)
|
| 556 |
+
|
| 557 |
+
# Generate files
|
| 558 |
+
app_code = generate_standalone_app(workflow_data, app_name, app_description)
|
| 559 |
+
requirements = generate_requirements_txt()
|
| 560 |
+
readme = f"""---
|
| 561 |
+
title: {app_name}
|
| 562 |
+
emoji: π
|
| 563 |
+
colorFrom: blue
|
| 564 |
+
colorTo: green
|
| 565 |
+
sdk: gradio
|
| 566 |
+
sdk_version: 4.0.0
|
| 567 |
+
app_file: app.py
|
| 568 |
+
pinned: false
|
| 569 |
+
---
|
| 570 |
+
|
| 571 |
+
# {app_name}
|
| 572 |
+
|
| 573 |
+
{app_description}
|
| 574 |
+
|
| 575 |
+
Generated by MOUSE Workflow
|
| 576 |
+
"""
|
| 577 |
+
|
| 578 |
+
# Upload files
|
| 579 |
+
api.upload_file(
|
| 580 |
+
path_or_fileobj=app_code.encode(),
|
| 581 |
+
path_in_repo="app.py",
|
| 582 |
+
repo_id=repo_id.repo_id,
|
| 583 |
+
repo_type="space"
|
| 584 |
+
)
|
| 585 |
+
|
| 586 |
+
api.upload_file(
|
| 587 |
+
path_or_fileobj=requirements.encode(),
|
| 588 |
+
path_in_repo="requirements.txt",
|
| 589 |
+
repo_id=repo_id.repo_id,
|
| 590 |
+
repo_type="space"
|
| 591 |
+
)
|
| 592 |
+
|
| 593 |
+
api.upload_file(
|
| 594 |
+
path_or_fileobj=readme.encode(),
|
| 595 |
+
path_in_repo="README.md",
|
| 596 |
+
repo_id=repo_id.repo_id,
|
| 597 |
+
repo_type="space"
|
| 598 |
+
)
|
| 599 |
+
|
| 600 |
+
space_url = f"https://huggingface.co/spaces/{repo_id.repo_id}"
|
| 601 |
+
|
| 602 |
+
return {
|
| 603 |
+
"success": True,
|
| 604 |
+
"space_url": space_url,
|
| 605 |
+
"message": f"Successfully deployed to {space_url}"
|
| 606 |
+
}
|
| 607 |
+
|
| 608 |
+
except Exception as e:
|
| 609 |
+
return {
|
| 610 |
+
"success": False,
|
| 611 |
+
"error": str(e)
|
| 612 |
+
}
|
| 613 |
+
|
| 614 |
# UI μ€νμ μν μ€μ μν¬νλ‘μ° μ€ν ν¨μ
|
| 615 |
def execute_workflow_simple(workflow_data: dict, input_values: dict) -> dict:
|
| 616 |
"""μν¬νλ‘μ° μ€μ μ€ν"""
|