openfree commited on
Commit
7829828
ยท
verified ยท
1 Parent(s): 0767a47

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -1750
app.py CHANGED
@@ -1,1761 +1,35 @@
1
- import pathlib, sys
2
- LOCAL_BACKEND = pathlib.Path(__file__).parent / "src" / "backend"
3
- if str(LOCAL_BACKEND) not in sys.path:
4
- sys.path.insert(0, str(LOCAL_BACKEND))
5
- # --------------------------------------------------
6
- from gradio_workflowbuilder import WorkflowBuilder
7
-
8
- import os, json, typing, tempfile, traceback
9
- import gradio as gr
10
-
11
- # Optional imports for LLM APIs -------------------------------------------------
12
- try:
13
- from openai import OpenAI
14
- OPENAI_AVAILABLE = True
15
- except ImportError:
16
- OPENAI_AVAILABLE = False
17
- print("OpenAI library not available. Install with: pip install openai")
18
-
19
- ANTHROPIC_AVAILABLE = False # anthropic ๋ถ€๋ถ„ ์ƒ๋žต
20
-
21
- try:
22
- import requests
23
- REQUESTS_AVAILABLE = True
24
- except ImportError:
25
- REQUESTS_AVAILABLE = False
26
- print("Requests library not available. Install with: pip install requests")
27
-
28
- try:
29
- from huggingface_hub import HfApi, create_repo, upload_file
30
- HF_HUB_AVAILABLE = True
31
- except ImportError:
32
- HF_HUB_AVAILABLE = False
33
- print("Huggingface Hub not available. Install with: pip install huggingface-hub")
34
-
35
- # -----------------------------------------------------------------------------
36
- # patch WorkflowBuilder โ†’ custom_palette ์ง€์›
37
- from gradio_workflowbuilder import WorkflowBuilder as _WB
38
-
39
- if not getattr(_WB, "_patched_for_custom_palette", False):
40
- _orig_init = _WB.__init__
41
-
42
- def _patched_init(self, *args, custom_palette=None, **kwargs):
43
- _orig_init(self, *args, **kwargs)
44
- if custom_palette:
45
- if not hasattr(self, "palette"):
46
- self.palette = []
47
- self.palette.extend(custom_palette)
48
- self._custom_palette = custom_palette or []
49
-
50
- _WB.__init__ = _patched_init
51
- _WB._patched_for_custom_palette = True
52
- # -----------------------------------------------------------------------------
53
-
54
- import uuid
55
-
56
- # ---------- ํŒ”๋ ˆํŠธ๋ฅผ ์ฝ”๋“œ๋กœ ์ง์ ‘ ์„ ์–ธ ----------------------------------------
57
- BEST_AI_PALETTE = [
58
- {
59
- "category": "Best AI",
60
- "items": [
61
- {
62
- "type": "llmNode",
63
- "label": "AI Processing",
64
- "template": {
65
- "provider": {"value": "VIDraft"},
66
- "model": {"value": "Gemma-3-r1984-27B"},
67
- "temperature": {"value": 0.7},
68
- "system_prompt":{"value": "You are a helpful assistant."}
69
- }
70
- },
71
- {
72
- "type": "textNode",
73
- "label": "Markdown โฌ‡๏ธ",
74
- "template": {
75
- "text": {"value": "### Write any markdown here"}
76
- }
77
- },
78
- ],
79
- },
80
- ]
81
-
82
- # ๋‹ค๋ฅธ ์ฝ”๋“œ์™€์˜ ํ˜ธํ™˜์„ ์œ„ํ•ด ๋™์ผ ๋ณ€์ˆ˜๋ช… ์œ ์ง€
83
- best_ai_palette = BEST_AI_PALETTE
84
- # -----------------------------------------------------------------------------
85
-
86
- # util โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
87
- def add_node_to_canvas(current_wf: dict | None,
88
- node_template: dict,
89
- at=(100, 100)) -> dict:
90
- fld = (current_wf or {"nodes": [], "edges": []}).copy()
91
- fld.setdefault("nodes", []).append(
92
- {
93
- "id": f"custom_{uuid.uuid4().hex[:8]}",
94
- "type": node_template["type"],
95
- "position": {"x": at[0], "y": at[1]},
96
- "data": {
97
- "label": node_template["label"],
98
- "template": node_template["template"],
99
- },
100
- }
101
- )
102
- return fld
103
- # -----------------------------------------------------------------------------
104
-
105
- AI_PROCESSING_TEMPLATE = {
106
- "type": "llmNode",
107
- "label": "AI Processing",
108
- "template": {
109
- "provider": {"display_name": "Provider", "type": "options",
110
- "options": ["VIDraft", "OpenAI"], "value": "VIDraft"},
111
- "model": {"display_name": "Model", "type": "string",
112
- "value": "Gemma-3-r1984-27B"},
113
- "temperature": {"display_name": "Temperature","type": "number",
114
- "value": 0.7},
115
- "system_prompt": {"display_name": "System Prompt","type": "string",
116
- "value": "You are a helpful assistant."},
117
- "user_prompt": {"display_name": "User Prompt","type": "string",
118
- "value": "", "is_handle": True},
119
- "response": {"display_name": "Response","type": "string",
120
- "value": "", "is_handle": True}
121
- }
122
- }
123
-
124
- MARKDOWN_TEMPLATE = {
125
- "type": "textNode",
126
- "label": "Markdown โฌ‡๏ธ",
127
- "template": {
128
- "text": {"display_name": "Markdown", "type": "string",
129
- "value": "### Write any markdown here", "is_handle": True}
130
- }
131
- }
132
-
133
- # โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
134
- # โฌ‡๏ธ ์ดํ•˜ **๋ชจ๋“  ์ฝ”๋“œ**(์ƒ˜ํ”Œ์›Œํฌํ”Œ๋กœ์šฐ/Gradio UI/๋ฐฐํฌ ๋กœ์ง ๋“ฑ)๋Š”
135
- # ๊ธฐ์กด app.py ์™€ ๋™์ผํ•ฉ๋‹ˆ๋‹ค. ํŒ”๋ ˆํŠธ ๊ด€๋ จ ๋ถ€๋ถ„ ์™ธ์—๋Š” ์ˆ˜์ •-์‚ญ์ œ-๋ˆ„๋ฝ ์—†์Œ!
136
- # โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
137
-
138
- # -------------------------------------------------------------------
139
- # ๐Ÿ› ๏ธ ํ—ฌํผ ํ•จ์ˆ˜๋“ค
140
- # -------------------------------------------------------------------
141
- def export_pretty(data: typing.Dict[str, typing.Any]) -> str:
142
- return json.dumps(data, indent=2, ensure_ascii=False) if data else "No workflow to export"
143
-
144
- def export_file(data: typing.Dict[str, typing.Any]) -> typing.Optional[str]:
145
- """์›Œํฌํ”Œ๋กœ์šฐ๋ฅผ JSON ํŒŒ์ผ๋กœ ๋‚ด๋ณด๋‚ด๊ธฐ"""
146
- if not data:
147
- return None
148
-
149
- try:
150
- # ์ž„์‹œ ํŒŒ์ผ ์ƒ์„ฑ
151
- fd, path = tempfile.mkstemp(suffix=".json", prefix="workflow_", text=True)
152
- with os.fdopen(fd, "w", encoding="utf-8") as f:
153
- json.dump(data, f, ensure_ascii=False, indent=2)
154
- return path
155
- except Exception as e:
156
- print(f"Error exporting file: {e}")
157
- return None
158
-
159
- def load_json_from_text_or_file(json_text: str, file_obj) -> typing.Tuple[typing.Dict[str, typing.Any], str]:
160
- """ํ…์ŠคํŠธ ๋˜๋Š” ํŒŒ์ผ์—์„œ JSON ๋กœ๋“œ"""
161
- # ํŒŒ์ผ์ด ์žˆ์œผ๋ฉด ํŒŒ์ผ ์šฐ์„ 
162
- if file_obj is not None:
163
- try:
164
- with open(file_obj.name, "r", encoding="utf-8") as f:
165
- json_text = f.read()
166
- except Exception as e:
167
- return None, f"โŒ Error reading file: {str(e)}"
168
-
169
- # JSON ํ…์ŠคํŠธ๊ฐ€ ์—†๊ฑฐ๋‚˜ ๋น„์–ด์žˆ์œผ๋ฉด
170
- if not json_text or json_text.strip() == "":
171
- return None, "No JSON data provided"
172
-
173
- try:
174
- # JSON ํŒŒ์‹ฑ
175
- data = json.loads(json_text.strip())
176
-
177
- # ๋ฐ์ดํ„ฐ ๊ฒ€์ฆ
178
- if not isinstance(data, dict):
179
- return None, "Invalid format: not a dictionary"
180
-
181
- # ํ•„์ˆ˜ ํ•„๋“œ ํ™•์ธ
182
- if 'nodes' not in data:
183
- data['nodes'] = []
184
- if 'edges' not in data:
185
- data['edges'] = []
186
-
187
- nodes_count = len(data.get('nodes', []))
188
- edges_count = len(data.get('edges', []))
189
-
190
- return data, f"โœ… Loaded: {nodes_count} nodes, {edges_count} edges"
191
-
192
- except json.JSONDecodeError as e:
193
- return None, f"โŒ JSON parsing error: {str(e)}"
194
- except Exception as e:
195
- return None, f"โŒ Error: {str(e)}"
196
-
197
- def create_sample_workflow(example_type="basic"):
198
- """์ƒ˜ํ”Œ ์›Œํฌํ”Œ๋กœ์šฐ ์ƒ์„ฑ"""
199
-
200
- if example_type == "basic":
201
- # ๊ธฐ๋ณธ ์˜ˆ์ œ: ๊ฐ„๋‹จํ•œ Q&A - VIDraft ์‚ฌ์šฉ
202
- return {
203
- "nodes": [
204
-
205
- { # ๐Ÿ†• ์บ”๋ฒ„์Šค์— ๊ธฐ๋ณธ์œผ๋กœ ๋ฐฐ์น˜๋  Best-AI ๋…ธ๋“œ
206
- "id": "best_ai_default",
207
- "type": "llmNode",
208
- "position": {"x": 80, "y": 40},
209
- "data": {
210
- "label": "AI Processing",
211
- "template": {
212
- "provider": {"value": "VIDraft"},
213
- "model": {"value": "Gemma-3-r1984-27B"},
214
- "temperature": {"value": 0.7},
215
- "system_prompt":{"value": "You are a helpful assistant."}
216
- }
217
- }
218
- },
219
-
220
-
221
-
222
- {
223
- "id": "input_1",
224
- "type": "ChatInput",
225
- "position": {"x": 100, "y": 200},
226
- "data": {
227
- "label": "User Question",
228
- "template": {
229
- "input_value": {"value": "What is the capital of Korea?"}
230
- }
231
- }
232
- },
233
- {
234
- "id": "llm_1",
235
- "type": "llmNode",
236
- "position": {"x": 400, "y": 200},
237
- "data": {
238
- "label": "AI Processing",
239
- "template": {
240
- "provider": {"value": "VIDraft"}, # ๊ธฐ๋ณธ๊ฐ’์„ VIDraft๋กœ ๋ณ€๊ฒฝ
241
- "model": {"value": "Gemma-3-r1984-27B"},
242
- "temperature": {"value": 0.7},
243
- "system_prompt": {"value": "You are a helpful assistant."}
244
- }
245
- }
246
- },
247
- {
248
- "id": "output_1",
249
- "type": "ChatOutput",
250
- "position": {"x": 700, "y": 200},
251
- "data": {"label": "Answer"}
252
- }
253
- ],
254
- "edges": [
255
- {"id": "e1", "source": "input_1", "target": "llm_1"},
256
- {"id": "e2", "source": "llm_1", "target": "output_1"}
257
- ]
258
- }
259
-
260
- elif example_type == "vidraft":
261
- # VIDraft ์˜ˆ์ œ
262
- return {
263
- "nodes": [
264
- {
265
- "id": "input_1",
266
- "type": "ChatInput",
267
- "position": {"x": 100, "y": 200},
268
- "data": {
269
- "label": "User Input",
270
- "template": {
271
- "input_value": {"value": "AI์™€ ๋จธ์‹ ๋Ÿฌ๋‹์˜ ์ฐจ์ด์ ์„ ์„ค๋ช…ํ•ด์ฃผ์„ธ์š”."}
272
- }
273
- }
274
- },
275
- {
276
- "id": "llm_1",
277
- "type": "llmNode",
278
- "position": {"x": 400, "y": 200},
279
- "data": {
280
- "label": "VIDraft AI (Gemma)",
281
- "template": {
282
- "provider": {"value": "VIDraft"},
283
- "model": {"value": "Gemma-3-r1984-27B"},
284
- "temperature": {"value": 0.8},
285
- "system_prompt": {"value": "๋‹น์‹ ์€ ์ „๋ฌธ์ ์ด๊ณ  ์นœ์ ˆํ•œ AI ๊ต์œก์ž์ž…๋‹ˆ๋‹ค. ๋ณต์žกํ•œ ๊ฐœ๋…์„ ์‰ฝ๊ฒŒ ์„ค๋ช…ํ•ด์ฃผ์„ธ์š”."}
286
- }
287
- }
288
- },
289
- {
290
- "id": "output_1",
291
- "type": "ChatOutput",
292
- "position": {"x": 700, "y": 200},
293
- "data": {"label": "AI Explanation"}
294
- }
295
- ],
296
- "edges": [
297
- {"id": "e1", "source": "input_1", "target": "llm_1"},
298
- {"id": "e2", "source": "llm_1", "target": "output_1"}
299
- ]
300
- }
301
-
302
- elif example_type == "multi_input":
303
- # ๋‹ค์ค‘ ์ž…๋ ฅ ์˜ˆ์ œ
304
- return {
305
- "nodes": [
306
- {
307
- "id": "name_input",
308
- "type": "textInput",
309
- "position": {"x": 100, "y": 100},
310
- "data": {
311
- "label": "Your Name",
312
- "template": {
313
- "input_value": {"value": "John"}
314
- }
315
- }
316
- },
317
- {
318
- "id": "topic_input",
319
- "type": "textInput",
320
- "position": {"x": 100, "y": 250},
321
- "data": {
322
- "label": "Topic",
323
- "template": {
324
- "input_value": {"value": "Python programming"}
325
- }
326
- }
327
- },
328
- {
329
- "id": "level_input",
330
- "type": "textInput",
331
- "position": {"x": 100, "y": 400},
332
- "data": {
333
- "label": "Skill Level",
334
- "template": {
335
- "input_value": {"value": "beginner"}
336
- }
337
- }
338
- },
339
- {
340
- "id": "combiner",
341
- "type": "textNode",
342
- "position": {"x": 350, "y": 250},
343
- "data": {
344
- "label": "Combine Inputs",
345
- "template": {
346
- "text": {"value": "Create a personalized learning plan"}
347
- }
348
- }
349
- },
350
- {
351
- "id": "llm_1",
352
- "type": "llmNode",
353
- "position": {"x": 600, "y": 250},
354
- "data": {
355
- "label": "Generate Learning Plan",
356
- "template": {
357
- "provider": {"value": "VIDraft"}, # ๊ธฐ๋ณธ๊ฐ’์„ VIDraft๋กœ ๋ณ€๊ฒฝ
358
- "model": {"value": "Gemma-3-r1984-27B"},
359
- "temperature": {"value": 0.7},
360
- "system_prompt": {"value": "You are an expert educational consultant. Create personalized learning plans based on the user's name, topic of interest, and skill level."}
361
- }
362
- }
363
- },
364
- {
365
- "id": "output_1",
366
- "type": "ChatOutput",
367
- "position": {"x": 900, "y": 250},
368
- "data": {"label": "Your Learning Plan"}
369
- }
370
- ],
371
- "edges": [
372
- {"id": "e1", "source": "name_input", "target": "combiner"},
373
- {"id": "e2", "source": "topic_input", "target": "combiner"},
374
- {"id": "e3", "source": "level_input", "target": "combiner"},
375
- {"id": "e4", "source": "combiner", "target": "llm_1"},
376
- {"id": "e5", "source": "llm_1", "target": "output_1"}
377
- ]
378
- }
379
-
380
- elif example_type == "chain":
381
- # ์ฒด์ธ ์ฒ˜๋ฆฌ ์˜ˆ์ œ
382
- return {
383
- "nodes": [
384
- {
385
- "id": "input_1",
386
- "type": "ChatInput",
387
- "position": {"x": 50, "y": 200},
388
- "data": {
389
- "label": "Original Text",
390
- "template": {
391
- "input_value": {"value": "The quick brown fox jumps over the lazy dog."}
392
- }
393
- }
394
- },
395
- {
396
- "id": "translator",
397
- "type": "llmNode",
398
- "position": {"x": 300, "y": 200},
399
- "data": {
400
- "label": "Translate to Korean",
401
- "template": {
402
- "provider": {"value": "VIDraft"},
403
- "model": {"value": "Gemma-3-r1984-27B"},
404
- "temperature": {"value": 0.3},
405
- "system_prompt": {"value": "You are a professional translator. Translate the given English text to Korean accurately."}
406
- }
407
- }
408
- },
409
- {
410
- "id": "analyzer",
411
- "type": "llmNode",
412
- "position": {"x": 600, "y": 200},
413
- "data": {
414
- "label": "Analyze Translation",
415
- "template": {
416
- "provider": {"value": "OpenAI"},
417
- "model": {"value": "gpt-4.1-mini"},
418
- "temperature": {"value": 0.5},
419
- "system_prompt": {"value": "You are a linguistic expert. Analyze the Korean translation and explain its nuances and cultural context."}
420
- }
421
- }
422
- },
423
- {
424
- "id": "output_translation",
425
- "type": "ChatOutput",
426
- "position": {"x": 450, "y": 350},
427
- "data": {"label": "Korean Translation"}
428
- },
429
- {
430
- "id": "output_analysis",
431
- "type": "ChatOutput",
432
- "position": {"x": 900, "y": 200},
433
- "data": {"label": "Translation Analysis"}
434
- }
435
- ],
436
- "edges": [
437
- {"id": "e1", "source": "input_1", "target": "translator"},
438
- {"id": "e2", "source": "translator", "target": "analyzer"},
439
- {"id": "e3", "source": "translator", "target": "output_translation"},
440
- {"id": "e4", "source": "analyzer", "target": "output_analysis"}
441
- ]
442
- }
443
-
444
- # ๊ธฐ๋ณธ๊ฐ’์€ basic
445
- return create_sample_workflow("basic")
446
-
447
- # ๋ฐฐํฌ๋ฅผ ์œ„ํ•œ ๋…๋ฆฝ ์•ฑ ์ƒ์„ฑ ํ•จ์ˆ˜
448
- def generate_standalone_app(workflow_data: dict, app_name: str, app_description: str) -> str:
449
- """์›Œํฌํ”Œ๋กœ์šฐ๋ฅผ ๋…๋ฆฝ์ ์ธ Gradio ์•ฑ์œผ๋กœ ๋ณ€ํ™˜"""
450
-
451
- # JSON ๋ฐ์ดํ„ฐ๋ฅผ ๋ฌธ์ž์—ด๋กœ ๋ณ€ํ™˜
452
- workflow_json = json.dumps(workflow_data, indent=2)
453
-
454
- app_code = f'''"""
455
- {app_name}
456
- {app_description}
457
- Generated by MOUSE Workflow
458
- """
459
-
460
  import os
461
- import json
462
- import gradio as gr
463
- import requests
464
-
465
- # Workflow configuration
466
- WORKFLOW_DATA = {workflow_json}
467
 
468
- def execute_workflow(*input_values):
469
- """Execute the workflow with given inputs"""
470
-
471
- # API keys from environment
472
- vidraft_token = os.getenv("FRIENDLI_TOKEN")
473
- openai_key = os.getenv("OPENAI_API_KEY")
474
-
475
- nodes = WORKFLOW_DATA.get("nodes", [])
476
- edges = WORKFLOW_DATA.get("edges", [])
477
-
478
- results = {{}}
479
-
480
- # Get input nodes
481
- input_nodes = [n for n in nodes if n.get("type") in ["ChatInput", "textInput", "Input", "numberInput"]]
482
-
483
- # Map inputs to node IDs
484
- for i, node in enumerate(input_nodes):
485
- if i < len(input_values):
486
- results[node["id"]] = input_values[i]
487
-
488
- # Process nodes
489
- for node in nodes:
490
- node_id = node.get("id")
491
- node_type = node.get("type", "")
492
- node_data = node.get("data", {{}})
493
- template = node_data.get("template", {{}})
494
-
495
- if node_type == "textNode":
496
- # Combine connected inputs
497
- base_text = template.get("text", {{}}).get("value", "")
498
- connected_inputs = []
499
-
500
- for edge in edges:
501
- if edge.get("target") == node_id:
502
- source_id = edge.get("source")
503
- if source_id in results:
504
- connected_inputs.append(f"{{source_id}}: {{results[source_id]}}")
505
-
506
- if connected_inputs:
507
- results[node_id] = f"{{base_text}}\\n\\nInputs:\\n" + "\\n".join(connected_inputs)
508
- else:
509
- results[node_id] = base_text
510
-
511
- elif node_type in ["llmNode", "OpenAIModel", "ChatModel"]:
512
- # Get provider and model - VIDraft as default
513
- provider = template.get("provider", {{}}).get("value", "VIDraft")
514
- if provider not in ["VIDraft", "OpenAI"]:
515
- provider = "VIDraft" # Default to VIDraft
516
- temperature = template.get("temperature", {{}}).get("value", 0.7)
517
- system_prompt = template.get("system_prompt", {{}}).get("value", "")
518
-
519
- # Get input text
520
- input_text = ""
521
- for edge in edges:
522
- if edge.get("target") == node_id:
523
- source_id = edge.get("source")
524
- if source_id in results:
525
- input_text = results[source_id]
526
- break
527
-
528
- # Call API
529
- if provider == "OpenAI" and openai_key:
530
- try:
531
- from openai import OpenAI
532
- client = OpenAI(api_key=openai_key)
533
-
534
- messages = []
535
- if system_prompt:
536
- messages.append({{"role": "system", "content": system_prompt}})
537
- messages.append({{"role": "user", "content": input_text}})
538
-
539
- response = client.chat.completions.create(
540
- model="gpt-4.1-mini",
541
- messages=messages,
542
- temperature=temperature,
543
- max_tokens=1000
544
- )
545
-
546
- results[node_id] = response.choices[0].message.content
547
- except Exception as e:
548
- results[node_id] = f"[OpenAI Error: {{str(e)}}]"
549
-
550
- elif provider == "VIDraft" and vidraft_token:
551
- try:
552
- headers = {{
553
- "Authorization": f"Bearer {{vidraft_token}}",
554
- "Content-Type": "application/json"
555
- }}
556
-
557
- messages = []
558
- if system_prompt:
559
- messages.append({{"role": "system", "content": system_prompt}})
560
- messages.append({{"role": "user", "content": input_text}})
561
-
562
- payload = {{
563
- "model": "dep89a2fld32mcm",
564
- "messages": messages,
565
- "max_tokens": 16384,
566
- "temperature": temperature,
567
- "top_p": 0.8,
568
- "stream": False
569
- }}
570
-
571
- response = requests.post(
572
- "https://api.friendli.ai/dedicated/v1/chat/completions",
573
- headers=headers,
574
- json=payload,
575
- timeout=30
576
- )
577
-
578
- if response.status_code == 200:
579
- results[node_id] = response.json()["choices"][0]["message"]["content"]
580
- else:
581
- results[node_id] = f"[VIDraft Error: {{response.status_code}}]"
582
- except Exception as e:
583
- results[node_id] = f"[VIDraft Error: {{str(e)}}]"
584
- else:
585
- # Show which API key is missing
586
- if provider == "OpenAI":
587
- results[node_id] = "[OpenAI API key not found. Please set OPENAI_API_KEY in Space secrets]"
588
- elif provider == "VIDraft":
589
- results[node_id] = "[VIDraft API key not found. Please set FRIENDLI_TOKEN in Space secrets]"
590
- else:
591
- results[node_id] = f"[No API key found for {{provider}}. Using simulated response: {{input_text[:50]}}...]"
592
-
593
- elif node_type in ["ChatOutput", "textOutput", "Output"]:
594
- # Get connected result
595
- for edge in edges:
596
- if edge.get("target") == node_id:
597
- source_id = edge.get("source")
598
- if source_id in results:
599
- results[node_id] = results[source_id]
600
- break
601
-
602
- # Return outputs
603
- output_nodes = [n for n in nodes if n.get("type") in ["ChatOutput", "textOutput", "Output"]]
604
- return [results.get(n["id"], "") for n in output_nodes]
605
-
606
- # Build UI
607
- with gr.Blocks(title="{app_name}", theme=gr.themes.Soft()) as demo:
608
- gr.Markdown("# {app_name}")
609
- gr.Markdown("{app_description}")
610
-
611
- # API Status Check
612
- vidraft_token = os.getenv("FRIENDLI_TOKEN")
613
- openai_key = os.getenv("OPENAI_API_KEY")
614
-
615
- with gr.Accordion("๐Ÿ”‘ API Status", open=False):
616
- if vidraft_token:
617
- gr.Markdown("โœ… **VIDraft API**: Connected (Gemma-3-r1984-27B)")
618
- else:
619
- gr.Markdown("โŒ **VIDraft API**: Not configured")
620
-
621
- if openai_key:
622
- gr.Markdown("โœ… **OpenAI API**: Connected (gpt-4.1-mini)")
623
- else:
624
- gr.Markdown("โš ๏ธ **OpenAI API**: Not configured (optional)")
625
-
626
- if not vidraft_token:
627
- gr.Markdown("""
628
- **โš ๏ธ Important**: Please add FRIENDLI_TOKEN to Space secrets for the app to work properly.
629
-
630
- Go to: Space settings โ†’ Repository secrets โ†’ Add secret
631
- """)
632
- elif not openai_key:
633
- gr.Markdown("""
634
- **๐Ÿ’ก Tip**: The app will work with VIDraft alone. Add OPENAI_API_KEY if you need OpenAI features.
635
- """)
636
- else:
637
- gr.Markdown("**โœจ All APIs configured! Your app is fully functional.**")
638
-
639
- # Extract nodes
640
- nodes = WORKFLOW_DATA.get("nodes", [])
641
- input_nodes = [n for n in nodes if n.get("type") in ["ChatInput", "textInput", "Input", "numberInput"]]
642
- output_nodes = [n for n in nodes if n.get("type") in ["ChatOutput", "textOutput", "Output"]]
643
-
644
- # Create inputs
645
- inputs = []
646
- if input_nodes:
647
- gr.Markdown("### ๐Ÿ“ฅ Inputs")
648
- for node in input_nodes:
649
- label = node.get("data", {{}}).get("label", node.get("id"))
650
- template = node.get("data", {{}}).get("template", {{}})
651
- default_value = template.get("input_value", {{}}).get("value", "")
652
-
653
- if node.get("type") == "numberInput":
654
- inp = gr.Number(label=label, value=float(default_value) if default_value else 0)
655
- else:
656
- inp = gr.Textbox(label=label, value=default_value, lines=2)
657
- inputs.append(inp)
658
-
659
- # Execute button
660
- btn = gr.Button("๐Ÿš€ Execute Workflow", variant="primary")
661
-
662
- # Create outputs
663
- outputs = []
664
- if output_nodes:
665
- gr.Markdown("### ๐Ÿ“ค Outputs")
666
- for node in output_nodes:
667
- label = node.get("data", {{}}).get("label", node.get("id"))
668
- out = gr.Textbox(label=label, interactive=False, lines=3)
669
- outputs.append(out)
670
-
671
- # Connect
672
- btn.click(fn=execute_workflow, inputs=inputs, outputs=outputs)
673
-
674
- gr.Markdown("---")
675
- gr.Markdown("*Powered by MOUSE Workflow*")
676
-
677
- if __name__ == "__main__":
678
- demo.launch()
679
- '''
680
-
681
- return app_code
682
-
683
- def generate_requirements_txt() -> str:
684
- """Generate requirements.txt for the standalone app"""
685
- return """gradio==5.34.2
686
- openai
687
- requests
688
- """
689
-
690
- def deploy_to_huggingface(workflow_data: dict, app_name: str, app_description: str,
691
- hf_token: str, space_name: str, is_private: bool = False,
692
- api_keys: dict = None) -> dict:
693
- """Deploy workflow to Hugging Face Space with API keys"""
694
-
695
- if not HF_HUB_AVAILABLE:
696
- return {"success": False, "error": "huggingface-hub library not installed"}
697
-
698
- if api_keys is None:
699
- api_keys = {}
700
-
701
  try:
702
- # Initialize HF API
703
- api = HfApi(token=hf_token)
704
-
705
- # Create repository
706
- repo_id = api.create_repo(
707
- repo_id=space_name,
708
- repo_type="space",
709
- space_sdk="gradio",
710
- private=is_private,
711
- exist_ok=True
712
- )
713
-
714
- # Detect which providers are used in the workflow
715
- providers_used = set()
716
- nodes = workflow_data.get("nodes", [])
717
- for node in nodes:
718
- if node.get("type") in ["llmNode", "OpenAIModel", "ChatModel"]:
719
- template = node.get("data", {}).get("template", {})
720
- provider = template.get("provider", {}).get("value", "")
721
- if provider:
722
- providers_used.add(provider)
723
-
724
- # Generate files
725
- app_code = generate_standalone_app(workflow_data, app_name, app_description)
726
- requirements = generate_requirements_txt()
727
-
728
- # README with API setup instructions
729
- api_status = []
730
- if "FRIENDLI_TOKEN" in api_keys and api_keys["FRIENDLI_TOKEN"]:
731
- api_status.append("- **FRIENDLI_TOKEN**: โœ… Will be configured automatically")
732
- else:
733
- api_status.append("- **FRIENDLI_TOKEN**: โš ๏ธ Not provided (VIDraft won't work)")
734
-
735
- if "OPENAI_API_KEY" in api_keys and api_keys["OPENAI_API_KEY"]:
736
- api_status.append("- **OPENAI_API_KEY**: โœ… Will be configured automatically")
737
- elif "OpenAI" in providers_used:
738
- api_status.append("- **OPENAI_API_KEY**: โŒ Required but not provided")
739
-
740
- readme = f"""---
741
- title: {app_name}
742
- emoji: ๐Ÿญ
743
- colorFrom: blue
744
- colorTo: green
745
- sdk: gradio
746
- sdk_version: 5.34.2
747
- app_file: app.py
748
- pinned: false
749
- ---
750
-
751
- # {app_name}
752
-
753
- {app_description}
754
-
755
- ## ๐Ÿ”‘ API Configuration Status
756
-
757
- {chr(10).join(api_status)}
758
-
759
- ## ๐Ÿ“ Providers Used in This Workflow
760
-
761
- {', '.join(providers_used) if providers_used else 'No LLM providers detected'}
762
-
763
- ## ๐Ÿš€ Default Configuration
764
-
765
- This app is configured to use **VIDraft (Gemma-3-r1984-27B)** as the default LLM provider for optimal performance.
766
-
767
- ---
768
- Generated by MOUSE Workflow
769
- """
770
-
771
- # Upload files
772
- api.upload_file(
773
- path_or_fileobj=app_code.encode(),
774
- path_in_repo="app.py",
775
- repo_id=repo_id.repo_id,
776
- repo_type="space"
777
- )
778
-
779
- api.upload_file(
780
- path_or_fileobj=requirements.encode(),
781
- path_in_repo="requirements.txt",
782
- repo_id=repo_id.repo_id,
783
- repo_type="space"
784
- )
785
-
786
- api.upload_file(
787
- path_or_fileobj=readme.encode(),
788
- path_in_repo="README.md",
789
- repo_id=repo_id.repo_id,
790
- repo_type="space"
791
- )
792
-
793
- # Add all provided API keys as secrets
794
- added_secrets = []
795
- failed_secrets = []
796
 
797
- for key_name, key_value in api_keys.items():
798
- if key_value: # Only add non-empty keys
799
- try:
800
- api.add_space_secret(
801
- repo_id=repo_id.repo_id,
802
- key=key_name,
803
- value=key_value
804
- )
805
- added_secrets.append(key_name)
806
- except Exception as e:
807
- failed_secrets.append(f"{key_name}: {str(e)}")
808
- print(f"Warning: Could not add {key_name} secret: {e}")
809
 
810
- space_url = f"https://huggingface.co/spaces/{repo_id.repo_id}"
 
 
 
811
 
812
- return {
813
- "success": True,
814
- "space_url": space_url,
815
- "message": f"Successfully deployed to {space_url}",
816
- "added_secrets": added_secrets,
817
- "failed_secrets": failed_secrets,
818
- "providers_used": list(providers_used)
819
- }
820
-
821
- except Exception as e:
822
- return {
823
- "success": False,
824
- "error": str(e)
825
- }
826
-
827
- # UI ์‹คํ–‰์„ ์œ„ํ•œ ์‹ค์ œ ์›Œํฌํ”Œ๋กœ์šฐ ์‹คํ–‰ ํ•จ์ˆ˜
828
- def execute_workflow_simple(workflow_data: dict, input_values: dict) -> dict:
829
- """์›Œํฌํ”Œ๋กœ์šฐ ์‹ค์ œ ์‹คํ–‰"""
830
- import traceback
831
-
832
- # API ํ‚ค ํ™•์ธ
833
- vidraft_token = os.getenv("FRIENDLI_TOKEN") # VIDraft/Friendli token
834
- openai_key = os.getenv("OPENAI_API_KEY")
835
- # anthropic_key = os.getenv("ANTHROPIC_API_KEY") # ์ฃผ์„ ์ฒ˜๋ฆฌ
836
-
837
- # OpenAI ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ ํ™•์ธ
838
- try:
839
- from openai import OpenAI
840
- openai_available = True
841
- except ImportError:
842
- openai_available = False
843
- print("OpenAI library not available")
844
-
845
- # Anthropic ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ ํ™•์ธ - ์ฃผ์„ ์ฒ˜๋ฆฌ
846
- # try:
847
- # import anthropic
848
- # anthropic_available = True
849
- # except ImportError:
850
- # anthropic_available = False
851
- # print("Anthropic library not available")
852
- anthropic_available = False
853
-
854
- results = {}
855
- nodes = workflow_data.get("nodes", [])
856
- edges = workflow_data.get("edges", [])
857
-
858
- # ๋…ธ๋“œ๋ฅผ ์ˆœ์„œ๋Œ€๋กœ ์ฒ˜๋ฆฌ
859
- for node in nodes:
860
- node_id = node.get("id")
861
- node_type = node.get("type", "")
862
- node_data = node.get("data", {})
863
 
 
864
  try:
865
- if node_type in ["ChatInput", "textInput", "Input"]:
866
- # UI์—์„œ ์ œ๊ณต๋œ ์ž…๋ ฅ๊ฐ’ ์‚ฌ์šฉ
867
- if node_id in input_values:
868
- results[node_id] = input_values[node_id]
869
- else:
870
- # ๊ธฐ๋ณธ๊ฐ’ ์‚ฌ์šฉ
871
- template = node_data.get("template", {})
872
- default_value = template.get("input_value", {}).get("value", "")
873
- results[node_id] = default_value
874
-
875
- elif node_type == "textNode":
876
- # ํ…์ŠคํŠธ ๋…ธ๋“œ๋Š” ์—ฐ๊ฒฐ๋œ ๋ชจ๋“  ์ž…๋ ฅ์„ ๊ฒฐํ•ฉ
877
- template = node_data.get("template", {})
878
- base_text = template.get("text", {}).get("value", "")
879
-
880
- # ์—ฐ๊ฒฐ๋œ ์ž…๋ ฅ๋“ค ์ˆ˜์ง‘
881
- connected_inputs = []
882
- for edge in edges:
883
- if edge.get("target") == node_id:
884
- source_id = edge.get("source")
885
- if source_id in results:
886
- connected_inputs.append(f"{source_id}: {results[source_id]}")
887
-
888
- # ๊ฒฐํ•ฉ๋œ ํ…์ŠคํŠธ ์ƒ์„ฑ
889
- if connected_inputs:
890
- combined_text = f"{base_text}\n\nInputs:\n" + "\n".join(connected_inputs)
891
- results[node_id] = combined_text
892
- else:
893
- results[node_id] = base_text
894
 
895
- elif node_type in ["llmNode", "OpenAIModel", "ChatModel"]:
896
- # LLM ๋…ธ๋“œ ์ฒ˜๋ฆฌ
897
- template = node_data.get("template", {})
898
-
899
- # ํ”„๋กœ๋ฐ”์ด๋” ์ •๋ณด ์ถ”์ถœ - VIDraft ๋˜๋Š” OpenAI๋งŒ ํ—ˆ์šฉ
900
- provider_info = template.get("provider", {})
901
- provider = provider_info.get("value", "VIDraft") if isinstance(provider_info, dict) else "VIDraft" # ๊ธฐ๋ณธ๊ฐ’ VIDraft
902
-
903
- # provider๊ฐ€ VIDraft ๋˜๋Š” OpenAI๊ฐ€ ์•„๋‹Œ ๊ฒฝ์šฐ VIDraft๋กœ ๊ธฐ๋ณธ ์„ค์ •
904
- if provider not in ["VIDraft", "OpenAI"]:
905
- provider = "VIDraft"
906
-
907
- # ๋ชจ๋ธ ์ •๋ณด ์ถ”์ถœ
908
- if provider == "OpenAI":
909
- # OpenAI๋Š” gpt-4.1-mini๋กœ ๊ณ ์ •
910
- model = "gpt-4.1-mini"
911
- elif provider == "VIDraft":
912
- # VIDraft๋Š” Gemma-3-r1984-27B๋กœ ๊ณ ์ •
913
- model = "Gemma-3-r1984-27B"
914
- else:
915
- model = "Gemma-3-r1984-27B" # ๊ธฐ๋ณธ๊ฐ’ VIDraft ๋ชจ๋ธ
916
-
917
- # ์˜จ๋„ ์ •๋ณด ์ถ”์ถœ
918
- temp_info = template.get("temperature", {})
919
- temperature = temp_info.get("value", 0.7) if isinstance(temp_info, dict) else 0.7
920
-
921
- # ์‹œ์Šคํ…œ ํ”„๋กฌํ”„ํŠธ ์ถ”์ถœ
922
- prompt_info = template.get("system_prompt", {})
923
- system_prompt = prompt_info.get("value", "") if isinstance(prompt_info, dict) else ""
924
-
925
- # ์ž…๋ ฅ ํ…์ŠคํŠธ ์ฐพ๊ธฐ
926
- input_text = ""
927
- for edge in edges:
928
- if edge.get("target") == node_id:
929
- source_id = edge.get("source")
930
- if source_id in results:
931
- input_text = results[source_id]
932
- break
933
-
934
- # ์‹ค์ œ API ํ˜ธ์ถœ
935
- if provider == "OpenAI" and openai_key and openai_available:
936
- try:
937
- client = OpenAI(api_key=openai_key)
938
-
939
- messages = []
940
- if system_prompt:
941
- messages.append({"role": "system", "content": system_prompt})
942
- messages.append({"role": "user", "content": input_text})
943
-
944
- response = client.chat.completions.create(
945
- model="gpt-4.1-mini", # ๊ณ ์ •๋œ ๋ชจ๋ธ๋ช…
946
- messages=messages,
947
- temperature=temperature,
948
- max_tokens=1000
949
- )
950
-
951
- results[node_id] = response.choices[0].message.content
952
-
953
- except Exception as e:
954
- results[node_id] = f"[OpenAI Error: {str(e)}]"
955
-
956
- # Anthropic ๊ด€๋ จ ์ฝ”๋“œ ์ฃผ์„ ์ฒ˜๋ฆฌ
957
- # elif provider == "Anthropic" and anthropic_key and anthropic_available:
958
- # try:
959
- # client = anthropic.Anthropic(api_key=anthropic_key)
960
- #
961
- # message = client.messages.create(
962
- # model="claude-3-haiku-20240307",
963
- # max_tokens=1000,
964
- # temperature=temperature,
965
- # system=system_prompt if system_prompt else None,
966
- # messages=[{"role": "user", "content": input_text}]
967
- # )
968
- #
969
- # results[node_id] = message.content[0].text
970
- #
971
- # except Exception as e:
972
- # results[node_id] = f"[Anthropic Error: {str(e)}]"
973
-
974
- elif provider == "VIDraft" and vidraft_token:
975
- try:
976
- import requests
977
-
978
- headers = {
979
- "Authorization": f"Bearer {vidraft_token}",
980
- "Content-Type": "application/json"
981
- }
982
-
983
- # ๋ฉ”์‹œ์ง€ ๊ตฌ์„ฑ
984
- messages = []
985
- if system_prompt:
986
- messages.append({"role": "system", "content": system_prompt})
987
- messages.append({"role": "user", "content": input_text})
988
-
989
- payload = {
990
- "model": "dep89a2fld32mcm", # VIDraft ๋ชจ๋ธ ID
991
- "messages": messages,
992
- "max_tokens": 16384,
993
- "temperature": temperature,
994
- "top_p": 0.8,
995
- "stream": False # ๋™๊ธฐ ์‹คํ–‰์„ ์œ„ํ•ด False๋กœ ์„ค์ •
996
- }
997
-
998
- # VIDraft API endpoint
999
- response = requests.post(
1000
- "https://api.friendli.ai/dedicated/v1/chat/completions",
1001
- headers=headers,
1002
- json=payload,
1003
- timeout=30
1004
- )
1005
-
1006
- if response.status_code == 200:
1007
- response_json = response.json()
1008
- results[node_id] = response_json["choices"][0]["message"]["content"]
1009
- else:
1010
- results[node_id] = f"[VIDraft API Error: {response.status_code} - {response.text}]"
1011
-
1012
- except Exception as e:
1013
- results[node_id] = f"[VIDraft Error: {str(e)}]"
1014
-
1015
- else:
1016
- # API ํ‚ค๊ฐ€ ์—†๋Š” ๊ฒฝ์šฐ ์‹œ๋ฎฌ๋ ˆ์ด์…˜
1017
- results[node_id] = f"[Simulated {provider} Response to: {input_text[:50]}...]"
1018
-
1019
- elif node_type in ["ChatOutput", "textOutput", "Output"]:
1020
- # ์ถœ๋ ฅ ๋…ธ๋“œ๋Š” ์—ฐ๊ฒฐ๋œ ๋…ธ๋“œ์˜ ๊ฒฐ๊ณผ๋ฅผ ๊ฐ€์ ธ์˜ด
1021
- for edge in edges:
1022
- if edge.get("target") == node_id:
1023
- source_id = edge.get("source")
1024
- if source_id in results:
1025
- results[node_id] = results[source_id]
1026
- break
1027
-
1028
- except Exception as e:
1029
- results[node_id] = f"[Node Error: {str(e)}]"
1030
- print(f"Error processing node {node_id}: {traceback.format_exc()}")
1031
-
1032
- return results
1033
-
1034
- # -------------------------------------------------------------------
1035
- # ๐ŸŽจ CSS
1036
- # -------------------------------------------------------------------
1037
- CSS = """
1038
- .main-container{max-width:1600px;margin:0 auto;}
1039
- .workflow-section{margin-bottom:2rem;min-height:500px;}
1040
- .button-row{display:flex;gap:1rem;justify-content:center;margin:1rem 0;}
1041
- .status-box{
1042
- padding:10px;border-radius:5px;margin-top:10px;
1043
- background:#f0f9ff;border:1px solid #3b82f6;color:#1e40af;
1044
- }
1045
- .component-description{
1046
- padding:24px;background:linear-gradient(135deg,#f8fafc 0%,#e2e8f0 100%);
1047
- border-left:4px solid #3b82f6;border-radius:12px;
1048
- box-shadow:0 2px 8px rgba(0,0,0,.05);margin:16px 0;
1049
- }
1050
- .workflow-container{position:relative;}
1051
- .ui-execution-section{
1052
- background:linear-gradient(135deg,#f0fdf4 0%,#dcfce7 100%);
1053
- padding:24px;border-radius:12px;margin:24px 0;
1054
- border:1px solid #86efac;
1055
- }
1056
- .powered-by{
1057
- text-align:center;color:#64748b;font-size:14px;
1058
- margin-top:8px;font-style:italic;
1059
- }
1060
- .sample-buttons{
1061
- display:grid;grid-template-columns:1fr 1fr;gap:0.5rem;
1062
- margin-top:0.5rem;
1063
- }
1064
- .deploy-section{
1065
- background:linear-gradient(135deg,#fef3c7 0%,#fde68a 100%);
1066
- padding:24px;border-radius:12px;margin:24px 0;
1067
- border:1px solid #fbbf24;
1068
- }
1069
- .save-indicator{
1070
- text-align:right;
1071
- font-size:14px;
1072
- color:#16a34a;
1073
- padding:8px 16px;
1074
- background:#f0fdf4;
1075
- border-radius:20px;
1076
- display:inline-block;
1077
- margin-left:auto;
1078
- }
1079
- .workflow-info{
1080
- font-size:14px;
1081
- color:#475569;
1082
- background:#f8fafc;
1083
- padding:8px 16px;
1084
- border-radius:8px;
1085
- display:inline-block;
1086
- margin-bottom:16px;
1087
- }
1088
- """
1089
-
1090
- # -------------------------------------------------------------------
1091
- # ๐Ÿ–ฅ๏ธ Gradio ์•ฑ
1092
- # -------------------------------------------------------------------
1093
- with gr.Blocks(title="๐Ÿญ MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as demo:
1094
-
1095
- with gr.Column(elem_classes=["main-container"]):
1096
- gr.Markdown("# ๐Ÿญ MOUSE Workflow")
1097
- gr.Markdown("**Visual Workflow Builder with Interactive UI Execution**")
1098
- gr.HTML('<p class="powered-by">@Powered by VIDraft & Huggingface gradio</p>')
1099
-
1100
- html_content = """<div class="component-description">
1101
- <p style="font-size:16px;margin:0;">Build sophisticated workflows visually โ€ข Import/Export JSON โ€ข Generate interactive UI for end-users โ€ข Default LLM: VIDraft (Gemma-3-r1984-27B)</p>
1102
- <p style="font-size:14px;margin-top:8px;color:#64748b;">๐Ÿ’ก Tip: Your workflow is automatically saved as you make changes. The JSON preview updates in real-time!</p>
1103
- </div>"""
1104
- gr.HTML(html_content)
1105
-
1106
- # API Status Display
1107
- with gr.Accordion("๐Ÿ”Œ API Status", open=False):
1108
- gr.Markdown(f"""
1109
- **Available APIs:**
1110
- - FRIENDLI_TOKEN (VIDraft): {'โœ… Connected' if os.getenv("FRIENDLI_TOKEN") else 'โŒ Not found'}
1111
- - OPENAI_API_KEY: {'โœ… Connected' if os.getenv("OPENAI_API_KEY") else 'โŒ Not found'}
1112
-
1113
- **Libraries:**
1114
- - OpenAI: {'โœ… Installed' if OPENAI_AVAILABLE else 'โŒ Not installed'}
1115
- - Requests: {'โœ… Installed' if REQUESTS_AVAILABLE else 'โŒ Not installed'}
1116
- - Hugging Face Hub: {'โœ… Installed' if HF_HUB_AVAILABLE else 'โŒ Not installed (needed for deployment)'}
1117
-
1118
- **Available Models:**
1119
- - OpenAI: gpt-4.1-mini (fixed)
1120
- - VIDraft: Gemma-3-r1984-27B (model ID: dep89a2fld32mcm)
1121
-
1122
- **Sample Workflows:**
1123
- - Basic Q&A: Simple question-answer flow (VIDraft)
1124
- - VIDraft: Korean language example with Gemma model
1125
- - Multi-Input: Combine multiple inputs for personalized output (VIDraft)
1126
- - Chain: Sequential processing with multiple outputs (VIDraft + OpenAI)
1127
-
1128
- **Note**: All examples prioritize VIDraft for optimal performance. Friendli API token will be automatically configured during deployment.
1129
- """)
1130
-
1131
- # State for storing workflow data
1132
- loaded_data = gr.State(None)
1133
- trigger_update = gr.State(False)
1134
- save_status = gr.State("Ready")
1135
-
1136
-
1137
- # โ”€โ”€โ”€ Dynamic Workflow Container โ”€
1138
- # โ”€โ”€โ”€ Dynamic Workflow Container โ”€โ”€โ”€
1139
- with gr.Column(elem_classes=["workflow-container"]):
1140
-
1141
- # Auto-save status indicator
1142
- with gr.Row():
1143
- gr.Markdown("### ๐ŸŽจ Visual Workflow Designer")
1144
- save_indicator = gr.Markdown("๐Ÿ’พ Auto-save: Ready",
1145
- elem_classes=["save-indicator"])
1146
-
1147
- @gr.render(inputs=[loaded_data, trigger_update])
1148
- def render_workflow(data, trigger):
1149
- """
1150
- ๋™์ ์œผ๋กœ WorkflowBuilder ๋ Œ๋”๋ง + 10์ดˆ ์ž๋™ ์ €์žฅ
1151
- """
1152
- workflow_value = data if data else {"nodes": [], "edges": []}
1153
-
1154
- # โ–ผ ์šฐ๋ฆฌ๊ฐ€ ๋ณด์—ฌ์ฃผ๊ณ  ์‹ถ์€ ์ƒˆ ํŒ”๋ ˆํŠธ
1155
- best_ai_palette = [
1156
- {
1157
- "category": "Best AI",
1158
- "items": [
1159
- {
1160
- "type": "llmNode",
1161
- "label": "AI Processing",
1162
- "template": {
1163
- "provider": {"value": "VIDraft"},
1164
- "model": {"value": "Gemma-3-r1984-27B"},
1165
- "temperature": {"value": 0.7},
1166
- "system_prompt":{"value": "You are a helpful assistant."}
1167
- }
1168
- },
1169
- {
1170
- "type": "textNode",
1171
- "label": "Markdown โฌ‡๏ธ",
1172
- "template": {
1173
- "text": {"value": "### Write any markdown here"}
1174
- }
1175
- }
1176
- ]
1177
- }
1178
- ]
1179
-
1180
- # ๐Ÿšฉ ์—ฌ๊ธฐ! custom_palette ์ธ์ž๋ฅผ ๋‹ค์‹œ ๋„ฃ์–ด ์ค€๋‹ค
1181
- wb = WorkflowBuilder(
1182
- label = "",
1183
- info = "Drag nodes โ†’ Connect edges โ†’ Edit properties โ†’ Auto-save!",
1184
- value = workflow_value,
1185
- elem_id = "main_workflow",
1186
- custom_palette = best_ai_palette # โ† ํ•„์ˆ˜
1187
- )
1188
-
1189
- # ---------- ์ €์žฅ ๋กœ์ง ----------
1190
- def periodic_save(workflow_data):
1191
- import json, time, copy
1192
- if isinstance(workflow_data, str):
1193
- try:
1194
- workflow_data = json.loads(workflow_data)
1195
- except json.JSONDecodeError:
1196
- workflow_data = {"nodes": [], "edges": []}
1197
- ts = time.strftime("%H:%M:%S")
1198
- return copy.deepcopy(workflow_data), f"๐Ÿ’พ Auto-save: Saved โœ“ ({ts})"
1199
-
1200
- wb.change(fn=periodic_save, inputs=[wb], outputs=[loaded_data, save_indicator])
1201
-
1202
- auto_timer = gr.Timer(10)
1203
- auto_timer.tick(fn=periodic_save, inputs=[wb], outputs=[loaded_data, save_indicator])
1204
-
1205
- return wb
1206
-
1207
-
1208
-
1209
-
1210
-
1211
-
1212
-
1213
- # โ”€โ”€โ”€ Import Section โ”€โ”€โ”€
1214
- with gr.Accordion("๐Ÿ“ฅ Import Workflow", open=True):
1215
- gr.Markdown("*Load an existing workflow from JSON or start with a sample template*")
1216
- with gr.Row():
1217
- with gr.Column(scale=2):
1218
- import_json_text = gr.Code(
1219
- language="json",
1220
- label="Paste JSON here",
1221
- lines=8,
1222
- value='{\n "nodes": [],\n "edges": []\n}'
1223
- )
1224
- with gr.Column(scale=1):
1225
- file_upload = gr.File(
1226
- label="Or upload JSON file",
1227
- file_types=[".json"],
1228
- type="filepath"
1229
- )
1230
- btn_load = gr.Button("๐Ÿ“ฅ Load Workflow", variant="primary", size="lg")
1231
-
1232
- # Sample buttons
1233
- gr.Markdown("**Sample Workflows:**")
1234
- with gr.Row():
1235
- btn_sample_basic = gr.Button("๐ŸŽฏ Basic Q&A", variant="secondary", scale=1)
1236
- btn_sample_vidraft = gr.Button("๐Ÿค– VIDraft", variant="secondary", scale=1)
1237
- with gr.Row():
1238
- btn_sample_multi = gr.Button("๐Ÿ“ Multi-Input", variant="secondary", scale=1)
1239
- btn_sample_chain = gr.Button("๐Ÿ”— Chain", variant="secondary", scale=1)
1240
-
1241
- # Status
1242
- status_text = gr.Textbox(
1243
- label="Status",
1244
- value="Ready",
1245
- elem_classes=["status-box"],
1246
- interactive=False
1247
- )
1248
-
1249
- # โ”€โ”€โ”€ Export Section โ”€โ”€โ”€
1250
- gr.Markdown("## ๐Ÿ’พ Export / Live Preview")
1251
- gr.Markdown("*Your workflow is automatically saved. The JSON below shows your current workflow in real-time.*")
1252
-
1253
- # Workflow info display
1254
- workflow_info = gr.Markdown("๐Ÿ“Š Empty workflow", elem_classes=["workflow-info"])
1255
-
1256
- with gr.Row():
1257
- with gr.Column(scale=3):
1258
- export_preview = gr.Code(
1259
- language="json",
1260
- label="Current Workflow JSON (Live Preview)",
1261
- lines=8,
1262
- interactive=False
1263
- )
1264
- gr.Markdown("*๐Ÿ’ก This JSON updates automatically as you modify the workflow above*")
1265
- with gr.Column(scale=1):
1266
- btn_preview = gr.Button("๐Ÿ”„ Force Refresh", size="lg", variant="secondary")
1267
- btn_download = gr.DownloadButton(
1268
- "๐Ÿ’พ Download JSON",
1269
- size="lg",
1270
- variant="primary",
1271
- visible=True
1272
- )
1273
-
1274
- # โ”€โ”€โ”€ Deploy Section โ”€โ”€โ”€
1275
- with gr.Accordion("๐Ÿš€ Deploy to Hugging Face Space", open=False, elem_classes=["deploy-section"]):
1276
- gr.Markdown("""
1277
- Deploy your **current workflow** as an independent Hugging Face Space app.
1278
- The workflow shown in the JSON preview above will be deployed exactly as is.
1279
- """)
1280
-
1281
- gr.Markdown("*โš ๏ธ Make sure to save/finalize your workflow design before deploying!*")
1282
-
1283
- with gr.Row():
1284
- with gr.Column(scale=2):
1285
- deploy_name = gr.Textbox(
1286
- label="App Name",
1287
- placeholder="My Awesome Workflow App",
1288
- value="My Workflow App"
1289
- )
1290
- deploy_description = gr.Textbox(
1291
- label="App Description",
1292
- placeholder="Describe what your workflow does...",
1293
- lines=3,
1294
- value="A workflow application created with MOUSE Workflow builder."
1295
- )
1296
- deploy_space_name = gr.Textbox(
1297
- label="Space Name (your-username/space-name)",
1298
- placeholder="username/my-workflow-app",
1299
- info="This will be the URL of your Space"
1300
- )
1301
-
1302
- with gr.Column(scale=1):
1303
- deploy_token = gr.Textbox(
1304
- label="Hugging Face Token",
1305
- type="password",
1306
- placeholder="hf_...",
1307
- info="Get your token from huggingface.co/settings/tokens"
1308
- )
1309
-
1310
- # API Keys ์„ค์ • ์„น์…˜
1311
- gr.Markdown("### ๐Ÿ”‘ API Keys Configuration")
1312
-
1313
- # FRIENDLI_TOKEN ์„ค์ •
1314
- friendli_token_input = gr.Textbox(
1315
- label="FRIENDLI_TOKEN (VIDraft/Gemma)",
1316
- type="password",
1317
- placeholder="flp_...",
1318
- value=os.getenv("FRIENDLI_TOKEN", ""),
1319
- info="Required for VIDraft. Will be added as secret."
1320
- )
1321
-
1322
- # OpenAI API Key ์„ค์ •
1323
- openai_token_input = gr.Textbox(
1324
- label="OPENAI_API_KEY (Optional)",
1325
- type="password",
1326
- placeholder="sk-...",
1327
- value=os.getenv("OPENAI_API_KEY", ""),
1328
- info="Optional. Leave empty if not using OpenAI."
1329
- )
1330
-
1331
- deploy_private = gr.Checkbox(
1332
- label="Make Space Private",
1333
- value=False
1334
- )
1335
-
1336
- btn_deploy = gr.Button("๐Ÿš€ Deploy to HF Space", variant="primary", size="lg")
1337
-
1338
- # Deploy status
1339
- deploy_status = gr.Markdown("")
1340
-
1341
- # Preview generated code
1342
- with gr.Accordion("๐Ÿ“„ Preview Generated Code", open=False):
1343
- generated_code_preview = gr.Code(
1344
- language="python",
1345
- label="app.py (This will be deployed)",
1346
- lines=20
1347
- )
1348
-
1349
- # โ”€โ”€โ”€ UI Execution Section โ”€โ”€โ”€
1350
- with gr.Column(elem_classes=["ui-execution-section"]):
1351
- gr.Markdown("## ๐Ÿš€ UI Execution")
1352
- gr.Markdown("Test your workflow instantly! Click below to generate and run the UI from your current workflow design.")
1353
-
1354
- btn_execute_ui = gr.Button("โ–ถ๏ธ Generate & Run UI from Current Workflow", variant="primary", size="lg")
1355
-
1356
- # UI execution state
1357
- ui_workflow_data = gr.State(None)
1358
-
1359
- # Dynamic UI container
1360
- @gr.render(inputs=[ui_workflow_data])
1361
- def render_execution_ui(workflow_data):
1362
- if not workflow_data or not workflow_data.get("nodes"):
1363
- gr.Markdown("*Load a workflow first, then click 'Generate & Run UI'*")
1364
- return
1365
-
1366
- gr.Markdown("### ๐Ÿ“‹ Generated UI")
1367
-
1368
- # Extract input and output nodes
1369
- input_nodes = []
1370
- output_nodes = []
1371
-
1372
- for node in workflow_data.get("nodes", []):
1373
- node_type = node.get("type", "")
1374
- if node_type in ["ChatInput", "textInput", "Input", "numberInput"]:
1375
- input_nodes.append(node)
1376
- elif node_type in ["ChatOutput", "textOutput", "Output"]:
1377
- output_nodes.append(node)
1378
- elif node_type == "textNode":
1379
- # textNode๋Š” ์ค‘๊ฐ„ ์ฒ˜๋ฆฌ ๋…ธ๋“œ๋กœ, UI์—๋Š” ํ‘œ์‹œํ•˜์ง€ ์•Š์Œ
1380
- pass
1381
-
1382
- # Create input components
1383
- input_components = {}
1384
-
1385
- if input_nodes:
1386
- gr.Markdown("#### ๐Ÿ“ฅ Inputs")
1387
- for node in input_nodes:
1388
- node_id = node.get("id")
1389
- label = node.get("data", {}).get("label", node_id)
1390
- node_type = node.get("type")
1391
-
1392
- # Get default value
1393
- template = node.get("data", {}).get("template", {})
1394
- default_value = template.get("input_value", {}).get("value", "")
1395
-
1396
- if node_type == "numberInput":
1397
- input_components[node_id] = gr.Number(
1398
- label=label,
1399
- value=float(default_value) if default_value else 0
1400
- )
1401
- else:
1402
- input_components[node_id] = gr.Textbox(
1403
- label=label,
1404
- value=default_value,
1405
- lines=2,
1406
- placeholder="Enter your input..."
1407
- )
1408
-
1409
- # Execute button
1410
- execute_btn = gr.Button("๐ŸŽฏ Execute", variant="primary")
1411
-
1412
- # Create output components
1413
- output_components = {}
1414
-
1415
- if output_nodes:
1416
- gr.Markdown("#### ๐Ÿ“ค Outputs")
1417
- for node in output_nodes:
1418
- node_id = node.get("id")
1419
- label = node.get("data", {}).get("label", node_id)
1420
-
1421
- output_components[node_id] = gr.Textbox(
1422
- label=label,
1423
- interactive=False,
1424
- lines=3
1425
- )
1426
-
1427
- # Execution log
1428
- gr.Markdown("#### ๐Ÿ“Š Execution Log")
1429
- log_output = gr.Textbox(
1430
- label="Log",
1431
- interactive=False,
1432
- lines=5
1433
- )
1434
-
1435
- # Define execution handler
1436
- def execute_ui_workflow(*input_values):
1437
- # Create input dictionary
1438
- inputs_dict = {}
1439
- input_keys = list(input_components.keys())
1440
- for i, key in enumerate(input_keys):
1441
- if i < len(input_values):
1442
- inputs_dict[key] = input_values[i]
1443
-
1444
- # Check API status
1445
- log = "=== Workflow Execution Started ===\n"
1446
- log += f"Inputs provided: {len(inputs_dict)}\n"
1447
-
1448
- # API ์ƒํƒœ ํ™•์ธ
1449
- vidraft_token = os.getenv("FRIENDLI_TOKEN")
1450
- openai_key = os.getenv("OPENAI_API_KEY")
1451
-
1452
- log += "\nAPI Status:\n"
1453
- log += f"- FRIENDLI_TOKEN (VIDraft): {'โœ… Found' if vidraft_token else 'โŒ Not found'}\n"
1454
- log += f"- OPENAI_API_KEY: {'โœ… Found' if openai_key else 'โŒ Not found'}\n"
1455
-
1456
- if not vidraft_token and not openai_key:
1457
- log += "\nโš ๏ธ No API keys found. Results will be simulated.\n"
1458
- log += "To get real AI responses, set API keys in environment variables.\n"
1459
- log += "Minimum requirement: FRIENDLI_TOKEN for VIDraft\n"
1460
- elif vidraft_token and not openai_key:
1461
- log += "\nโœ… VIDraft API connected - Basic functionality available\n"
1462
- log += "๐Ÿ’ก Add OPENAI_API_KEY for full functionality\n"
1463
-
1464
- log += "\n--- Processing Nodes ---\n"
1465
-
1466
- try:
1467
- results = execute_workflow_simple(workflow_data, inputs_dict)
1468
-
1469
- # Prepare outputs
1470
- output_values = []
1471
- for node_id in output_components.keys():
1472
- value = results.get(node_id, "No output")
1473
- output_values.append(value)
1474
-
1475
- # Log ๊ธธ์ด ์ œํ•œ
1476
- display_value = value[:100] + "..." if len(str(value)) > 100 else value
1477
- log += f"\nOutput [{node_id}]: {display_value}\n"
1478
-
1479
- log += "\n=== Execution Completed Successfully! ===\n"
1480
- output_values.append(log)
1481
-
1482
- return output_values
1483
-
1484
- except Exception as e:
1485
- error_msg = f"โŒ Error: {str(e)}"
1486
- log += f"\n{error_msg}\n"
1487
- log += "=== Execution Failed ===\n"
1488
- return [error_msg] * len(output_components) + [log]
1489
-
1490
- # Connect execution
1491
- all_inputs = list(input_components.values())
1492
- all_outputs = list(output_components.values()) + [log_output]
1493
-
1494
- execute_btn.click(
1495
- fn=execute_ui_workflow,
1496
- inputs=all_inputs,
1497
- outputs=all_outputs
1498
- )
1499
-
1500
- # โ”€โ”€โ”€ Event Handlers โ”€โ”€โ”€
1501
-
1502
- # Load workflow (from text or file)
1503
- def load_workflow(json_text, file_obj):
1504
- data, status = load_json_from_text_or_file(json_text, file_obj)
1505
- if data:
1506
- # ๋กœ๋“œ ์„ฑ๊ณต์‹œ ์ž๋™์œผ๋กœ ๋ฏธ๋ฆฌ๋ณด๊ธฐ ์—…๋ฐ์ดํŠธ
1507
- return data, status, json_text if not file_obj else export_pretty(data), "๐Ÿ’พ Auto-save: Loaded โœ“"
1508
- else:
1509
- return None, status, gr.update(), gr.update()
1510
-
1511
- btn_load.click(
1512
- fn=load_workflow,
1513
- inputs=[import_json_text, file_upload],
1514
- outputs=[loaded_data, status_text, import_json_text, save_indicator]
1515
- ).then(
1516
- fn=lambda current_trigger: not current_trigger,
1517
- inputs=trigger_update,
1518
- outputs=trigger_update
1519
- )
1520
-
1521
- # Auto-load when file is uploaded
1522
- file_upload.change(
1523
- fn=load_workflow,
1524
- inputs=[import_json_text, file_upload],
1525
- outputs=[loaded_data, status_text, import_json_text, save_indicator]
1526
- ).then(
1527
- fn=lambda current_trigger: not current_trigger,
1528
- inputs=trigger_update,
1529
- outputs=trigger_update
1530
- )
1531
-
1532
- # Load samples
1533
- btn_sample_basic.click(
1534
- fn=lambda: (create_sample_workflow("basic"), "โœ… Basic Q&A sample loaded", export_pretty(create_sample_workflow("basic")), "๐Ÿ’พ Auto-save: Sample loaded โœ“"),
1535
- outputs=[loaded_data, status_text, import_json_text, save_indicator]
1536
- ).then(
1537
- fn=lambda current_trigger: not current_trigger,
1538
- inputs=trigger_update,
1539
- outputs=trigger_update
1540
- )
1541
-
1542
- btn_sample_vidraft.click(
1543
- fn=lambda: (create_sample_workflow("vidraft"), "โœ… VIDraft sample loaded", export_pretty(create_sample_workflow("vidraft")), "๐Ÿ’พ Auto-save: Sample loaded โœ“"),
1544
- outputs=[loaded_data, status_text, import_json_text, save_indicator]
1545
- ).then(
1546
- fn=lambda current_trigger: not current_trigger,
1547
- inputs=trigger_update,
1548
- outputs=trigger_update
1549
- )
1550
-
1551
- btn_sample_multi.click(
1552
- fn=lambda: (create_sample_workflow("multi_input"), "โœ… Multi-input sample loaded", export_pretty(create_sample_workflow("multi_input")), "๐Ÿ’พ Auto-save: Sample loaded โœ“"),
1553
- outputs=[loaded_data, status_text, import_json_text, save_indicator]
1554
- ).then(
1555
- fn=lambda current_trigger: not current_trigger,
1556
- inputs=trigger_update,
1557
- outputs=trigger_update
1558
- )
1559
-
1560
- btn_sample_chain.click(
1561
- fn=lambda: (create_sample_workflow("chain"), "โœ… Chain processing sample loaded", export_pretty(create_sample_workflow("chain")), "๐Ÿ’พ Auto-save: Sample loaded โœ“"),
1562
- outputs=[loaded_data, status_text, import_json_text, save_indicator]
1563
- ).then(
1564
- fn=lambda current_trigger: not current_trigger,
1565
- inputs=trigger_update,
1566
- outputs=trigger_update
1567
- )
1568
-
1569
- # Preview current workflow - ๊ฐ•์ œ ์ƒˆ๋กœ๊ณ ์นจ
1570
- def force_refresh_preview(current_data):
1571
- """ํ˜„์žฌ ์›Œํฌํ”Œ๋กœ์šฐ ๋ฐ์ดํ„ฐ๋ฅผ ๊ฐ•์ œ๋กœ ์ƒˆ๋กœ๊ณ ์นจ"""
1572
- if current_data:
1573
- node_count = len(current_data.get("nodes", []))
1574
- edge_count = len(current_data.get("edges", []))
1575
- info = f"๐Ÿ“Š Workflow contains {node_count} nodes and {edge_count} edges"
1576
- return export_pretty(current_data), "๐Ÿ’พ Auto-save: Refreshed โœ“", info
1577
- return "No workflow data available", "๐Ÿ’พ Auto-save: No data", "๐Ÿ“Š Empty workflow"
1578
-
1579
- btn_preview.click(
1580
- fn=force_refresh_preview,
1581
- inputs=loaded_data,
1582
- outputs=[export_preview, save_indicator, workflow_info]
1583
- )
1584
-
1585
- # Download workflow๋Š” ์ด๋ฏธ loaded_data.change์—์„œ ์ฒ˜๋ฆฌ๋จ
1586
-
1587
- # Auto-update export preview when workflow changes
1588
- def update_preview_and_download(data):
1589
- """์›Œํฌํ”Œ๋กœ์šฐ ๋ณ€๊ฒฝ์‹œ ๋ฏธ๋ฆฌ๋ณด๊ธฐ์™€ ๋‹ค์šด๋กœ๋“œ ์—…๋ฐ์ดํŠธ"""
1590
- if data:
1591
- preview = export_pretty(data)
1592
- download_file = export_file(data)
1593
- node_count = len(data.get("nodes", []))
1594
- edge_count = len(data.get("edges", []))
1595
- status = f"๐Ÿ“Š Workflow contains {node_count} nodes and {edge_count} edges"
1596
- return preview, download_file, status
1597
- return "No workflow data", None, "๐Ÿ“Š Empty workflow"
1598
-
1599
- loaded_data.change(
1600
- fn=update_preview_and_download,
1601
- inputs=loaded_data,
1602
- outputs=[export_preview, btn_download, workflow_info]
1603
- )
1604
-
1605
- # Generate UI execution - ํ˜„์žฌ ์›Œํฌํ”Œ๋กœ์šฐ ์‚ฌ์šฉ
1606
- def prepare_ui_execution(current_data):
1607
- """ํ˜„์žฌ ์›Œํฌํ”Œ๋กœ์šฐ๋ฅผ UI ์‹คํ–‰์šฉ์œผ๋กœ ์ค€๋น„"""
1608
- if not current_data or not current_data.get("nodes"):
1609
- gr.Warning("Please create a workflow first!")
1610
- return None
1611
- return current_data
1612
-
1613
- btn_execute_ui.click(
1614
- fn=prepare_ui_execution,
1615
- inputs=loaded_data,
1616
- outputs=ui_workflow_data
1617
- )
1618
-
1619
- # โ”€โ”€โ”€ Deploy Event Handlers โ”€โ”€โ”€
1620
-
1621
- # Preview generated code
1622
- def preview_generated_code(workflow_data, app_name, app_description):
1623
- if not workflow_data:
1624
- return "# No workflow loaded\n# Create or load a workflow first"
1625
-
1626
- if not workflow_data.get("nodes"):
1627
- return "# Empty workflow\n# Add some nodes to see the generated code"
1628
-
1629
- try:
1630
- code = generate_standalone_app(workflow_data, app_name, app_description)
1631
- return code
1632
- except Exception as e:
1633
- return f"# Error generating code\n# {str(e)}"
1634
-
1635
- # Update preview when inputs change
1636
- deploy_name.change(
1637
- fn=preview_generated_code,
1638
- inputs=[loaded_data, deploy_name, deploy_description],
1639
- outputs=generated_code_preview
1640
- )
1641
-
1642
- deploy_description.change(
1643
- fn=preview_generated_code,
1644
- inputs=[loaded_data, deploy_name, deploy_description],
1645
- outputs=generated_code_preview
1646
- )
1647
-
1648
- # Update preview when workflow changes too
1649
- loaded_data.change(
1650
- fn=preview_generated_code,
1651
- inputs=[loaded_data, deploy_name, deploy_description],
1652
- outputs=generated_code_preview
1653
- )
1654
-
1655
- # Deploy handler
1656
- def handle_deploy(workflow_data, app_name, app_description, hf_token, space_name,
1657
- friendli_token, openai_token, is_private):
1658
- if not workflow_data:
1659
- return "โŒ No workflow loaded. Please create or load a workflow first."
1660
-
1661
- if not workflow_data.get("nodes"):
1662
- return "โŒ Empty workflow. Please add some nodes to your workflow."
1663
-
1664
- if not hf_token:
1665
- return "โŒ Hugging Face token is required. Get yours at huggingface.co/settings/tokens"
1666
-
1667
- if not space_name:
1668
- return "โŒ Space name is required. Format: username/space-name"
1669
-
1670
- # Validate space name format
1671
- if "/" not in space_name:
1672
- return "โŒ Invalid space name format. Use: username/space-name"
1673
-
1674
- # Check if huggingface-hub is available
1675
- if not HF_HUB_AVAILABLE:
1676
- return "โŒ huggingface-hub library not installed. Install with: pip install huggingface-hub"
1677
-
1678
- # Show deploying status
1679
- yield "๐Ÿ”„ Deploying to Hugging Face Space..."
1680
-
1681
- # Prepare API keys
1682
- api_keys = {}
1683
-
1684
- # Always include FRIENDLI_TOKEN (even if empty)
1685
- if not friendli_token:
1686
- friendli_token = os.getenv("FRIENDLI_TOKEN", "")
1687
- if friendli_token:
1688
- api_keys["FRIENDLI_TOKEN"] = friendli_token
1689
-
1690
- # Include OpenAI key if provided
1691
- if not openai_token:
1692
- openai_token = os.getenv("OPENAI_API_KEY", "")
1693
- if openai_token:
1694
- api_keys["OPENAI_API_KEY"] = openai_token
1695
-
1696
- # Deploy
1697
- result = deploy_to_huggingface(
1698
- workflow_data=workflow_data,
1699
- app_name=app_name,
1700
- app_description=app_description,
1701
- hf_token=hf_token,
1702
- space_name=space_name,
1703
- is_private=is_private,
1704
- api_keys=api_keys
1705
- )
1706
-
1707
- if result["success"]:
1708
- # Build secrets status message
1709
- secrets_msg = "\n\n**๐Ÿ”‘ API Keys Status:**"
1710
-
1711
- if result.get("added_secrets"):
1712
- for secret in result["added_secrets"]:
1713
- secrets_msg += f"\n- {secret}: โœ… Successfully added"
1714
-
1715
- if result.get("failed_secrets"):
1716
- for failure in result["failed_secrets"]:
1717
- secrets_msg += f"\n- {failure}: โŒ Failed to add"
1718
-
1719
- # Check for missing required keys
1720
- providers = result.get("providers_used", [])
1721
- if "VIDraft" in providers and "FRIENDLI_TOKEN" not in result.get("added_secrets", []):
1722
- secrets_msg += "\n- FRIENDLI_TOKEN: โš ๏ธ Required for VIDraft but not provided"
1723
- if "OpenAI" in providers and "OPENAI_API_KEY" not in result.get("added_secrets", []):
1724
- secrets_msg += "\n- OPENAI_API_KEY: โš ๏ธ Required for OpenAI but not provided"
1725
-
1726
- yield f"""โœ… **Deployment Successful!**
1727
-
1728
- ๐ŸŽ‰ Your workflow has been deployed to:
1729
- [{result['space_url']}]({result['space_url']})
1730
-
1731
- โฑ๏ธ The Space will be ready in a few minutes. Building usually takes 2-5 minutes.
1732
-
1733
- {secrets_msg}
1734
-
1735
- ๐Ÿ“ **Providers Detected in Workflow:**
1736
- {', '.join(result.get('providers_used', [])) if result.get('providers_used') else 'No LLM providers detected'}
1737
-
1738
- ๐Ÿš€ **Default Configuration:**
1739
- The app is configured to prioritize VIDraft (Gemma-3-r1984-27B) for optimal performance.
1740
-
1741
- ๐Ÿ“š **Space Management:**
1742
- - To update secrets: Go to Space settings โ†’ Repository secrets
1743
- - To restart Space: Go to Space settings โ†’ Factory reboot
1744
- - To make changes: Edit files directly in the Space repository
1745
- """
1746
- else:
1747
- yield f"โŒ **Deployment Failed**\n\nError: {result['error']}"
1748
-
1749
- btn_deploy.click(
1750
- fn=handle_deploy,
1751
- inputs=[loaded_data, deploy_name, deploy_description, deploy_token, deploy_space_name,
1752
- friendli_token_input, openai_token_input, deploy_private],
1753
- outputs=deploy_status
1754
- )
1755
-
1756
 
1757
- # -------------------------------------------------------------------
1758
- # ๐Ÿš€ ์‹คํ–‰
1759
- # -------------------------------------------------------------------
1760
  if __name__ == "__main__":
1761
- demo.launch(server_name="0.0.0.0", show_error=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
+ import sys
3
+ import streamlit as st
4
+ from tempfile import NamedTemporaryFile
 
 
 
5
 
6
+ def main():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  try:
8
+ # Get the code from secrets
9
+ code = os.environ.get("MAIN_CODE")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
+ if not code:
12
+ st.error("โš ๏ธ The application code wasn't found in secrets. Please add the MAIN_CODE secret.")
13
+ return
 
 
 
 
 
 
 
 
 
14
 
15
+ # Create a temporary Python file
16
+ with NamedTemporaryFile(suffix='.py', delete=False, mode='w') as tmp:
17
+ tmp.write(code)
18
+ tmp_path = tmp.name
19
 
20
+ # Execute the code
21
+ exec(compile(code, tmp_path, 'exec'), globals())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
+ # Clean up the temporary file
24
  try:
25
+ os.unlink(tmp_path)
26
+ except:
27
+ pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
+ except Exception as e:
30
+ st.error(f"โš ๏ธ Error loading or executing the application: {str(e)}")
31
+ import traceback
32
+ st.code(traceback.format_exc())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
 
 
 
34
  if __name__ == "__main__":
35
+ main()