fantaxy commited on
Commit
8ab24a9
ยท
verified ยท
1 Parent(s): 794a971

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +223 -42
app.py CHANGED
@@ -6,10 +6,32 @@ MOUSE Workflow - Visual Workflow Builder with UI Execution
6
  โœ“ Auto-generate UI from workflow for end-user execution
7
  """
8
 
9
- import os, json, typing, tempfile
10
  import gradio as gr
11
  from gradio_workflowbuilder import WorkflowBuilder
12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  # -------------------------------------------------------------------
14
  # ๐Ÿ› ๏ธ ํ—ฌํผ ํ•จ์ˆ˜๋“ค
15
  # -------------------------------------------------------------------
@@ -108,50 +130,172 @@ def create_sample_workflow():
108
  ]
109
  }
110
 
111
- # UI ์‹คํ–‰์„ ์œ„ํ•œ ๊ฐ„๋‹จํ•œ ์‹คํ–‰ ํ•จ์ˆ˜
112
  def execute_workflow_simple(workflow_data: dict, input_values: dict) -> dict:
113
- """์›Œํฌํ”Œ๋กœ์šฐ ์‹คํ–‰ ์‹œ๋ฎฌ๋ ˆ์ด์…˜"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  results = {}
 
 
115
 
116
- # ์ž…๋ ฅ ๋…ธ๋“œ ์ฒ˜๋ฆฌ
117
- for node in workflow_data.get("nodes", []):
118
  node_id = node.get("id")
119
  node_type = node.get("type", "")
 
120
 
121
- if node_type in ["ChatInput", "textInput", "Input"]:
122
- # UI์—์„œ ์ œ๊ณต๋œ ์ž…๋ ฅ๊ฐ’ ์‚ฌ์šฉ
123
- if node_id in input_values:
124
- results[node_id] = input_values[node_id]
125
- else:
126
- # ๊ธฐ๋ณธ๊ฐ’ ์‚ฌ์šฉ
127
- template = node.get("data", {}).get("template", {})
128
- default_value = template.get("input_value", {}).get("value", "")
129
- results[node_id] = default_value
130
-
131
- elif node_type == "llmNode":
132
- # LLM ์ฒ˜๋ฆฌ ์‹œ๋ฎฌ๋ ˆ์ด์…˜
133
- # ์‹ค์ œ๋กœ๋Š” ์—ฌ๊ธฐ์„œ API ํ˜ธ์ถœ์„ ํ•˜๊ฒ ์ง€๋งŒ, ์ง€๊ธˆ์€ ์‹œ๋ฎฌ๋ ˆ์ด์…˜
134
- input_text = ""
135
 
136
- # ์—ฐ๊ฒฐ๋œ ์ž…๋ ฅ ์ฐพ๊ธฐ
137
- for edge in workflow_data.get("edges", []):
138
- if edge.get("target") == node_id:
139
- source_id = edge.get("source")
140
- if source_id in results:
141
- input_text = results[source_id]
142
- break
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
 
144
- # ๊ฐ„๋‹จํ•œ ์‘๋‹ต ์ƒ์„ฑ
145
- results[node_id] = f"[AI Response to: {input_text}]"
146
-
147
- elif node_type in ["ChatOutput", "textOutput", "Output"]:
148
- # ์ถœ๋ ฅ ๋…ธ๋“œ๋Š” ์—ฐ๊ฒฐ๋œ ๋…ธ๋“œ์˜ ๊ฒฐ๊ณผ๋ฅผ ๊ฐ€์ ธ์˜ด
149
- for edge in workflow_data.get("edges", []):
150
- if edge.get("target") == node_id:
151
- source_id = edge.get("source")
152
- if source_id in results:
153
- results[node_id] = results[source_id]
154
- break
 
155
 
156
  return results
157
 
@@ -201,6 +345,22 @@ with gr.Blocks(title="๐Ÿญ MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
201
  """
202
  )
203
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
  # State for storing workflow data
205
  loaded_data = gr.State(None)
206
  trigger_update = gr.State(False)
@@ -352,8 +512,25 @@ with gr.Blocks(title="๐Ÿญ MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
352
  if i < len(input_values):
353
  inputs_dict[key] = input_values[i]
354
 
355
- # Execute workflow
356
- log = f"Executing workflow with {len(inputs_dict)} inputs...\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
357
 
358
  try:
359
  results = execute_workflow_simple(workflow_data, inputs_dict)
@@ -363,16 +540,20 @@ with gr.Blocks(title="๐Ÿญ MOUSE Workflow", theme=gr.themes.Soft(), css=CSS) as
363
  for node_id in output_components.keys():
364
  value = results.get(node_id, "No output")
365
  output_values.append(value)
366
- log += f"Output {node_id}: {value}\n"
 
 
 
367
 
368
- log += "\nโœ… Execution completed!"
369
  output_values.append(log)
370
 
371
  return output_values
372
 
373
  except Exception as e:
374
  error_msg = f"โŒ Error: {str(e)}"
375
- log += error_msg
 
376
  return [error_msg] * len(output_components) + [log]
377
 
378
  # Connect execution
 
6
  โœ“ Auto-generate UI from workflow for end-user execution
7
  """
8
 
9
+ import os, json, typing, tempfile, traceback
10
  import gradio as gr
11
  from gradio_workflowbuilder import WorkflowBuilder
12
 
13
+ # Optional imports for LLM APIs
14
+ try:
15
+ from openai import OpenAI
16
+ OPENAI_AVAILABLE = True
17
+ except ImportError:
18
+ OPENAI_AVAILABLE = False
19
+ print("OpenAI library not available. Install with: pip install openai")
20
+
21
+ try:
22
+ import anthropic
23
+ ANTHROPIC_AVAILABLE = True
24
+ except ImportError:
25
+ ANTHROPIC_AVAILABLE = False
26
+ print("Anthropic library not available. Install with: pip install anthropic")
27
+
28
+ try:
29
+ import requests
30
+ REQUESTS_AVAILABLE = True
31
+ except ImportError:
32
+ REQUESTS_AVAILABLE = False
33
+ print("Requests library not available. Install with: pip install requests")
34
+
35
  # -------------------------------------------------------------------
36
  # ๐Ÿ› ๏ธ ํ—ฌํผ ํ•จ์ˆ˜๋“ค
37
  # -------------------------------------------------------------------
 
130
  ]
131
  }
132
 
133
+ # UI ์‹คํ–‰์„ ์œ„ํ•œ ์‹ค์ œ ์›Œํฌํ”Œ๋กœ์šฐ ์‹คํ–‰ ํ•จ์ˆ˜
134
  def execute_workflow_simple(workflow_data: dict, input_values: dict) -> dict:
135
+ """์›Œํฌํ”Œ๋กœ์šฐ ์‹ค์ œ ์‹คํ–‰"""
136
+ import traceback
137
+
138
+ # API ํ‚ค ํ™•์ธ
139
+ friendli_token = os.getenv("FRIENDLI_TOKEN")
140
+ openai_key = os.getenv("OPENAI_API_KEY")
141
+ anthropic_key = os.getenv("ANTHROPIC_API_KEY")
142
+
143
+ # OpenAI ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ ํ™•์ธ
144
+ try:
145
+ from openai import OpenAI
146
+ openai_available = True
147
+ except ImportError:
148
+ openai_available = False
149
+ print("OpenAI library not available")
150
+
151
+ # Anthropic ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ ํ™•์ธ
152
+ try:
153
+ import anthropic
154
+ anthropic_available = True
155
+ except ImportError:
156
+ anthropic_available = False
157
+ print("Anthropic library not available")
158
+
159
  results = {}
160
+ nodes = workflow_data.get("nodes", [])
161
+ edges = workflow_data.get("edges", [])
162
 
163
+ # ๋…ธ๋“œ๋ฅผ ์ˆœ์„œ๋Œ€๋กœ ์ฒ˜๋ฆฌ
164
+ for node in nodes:
165
  node_id = node.get("id")
166
  node_type = node.get("type", "")
167
+ node_data = node.get("data", {})
168
 
169
+ try:
170
+ if node_type in ["ChatInput", "textInput", "Input"]:
171
+ # UI์—์„œ ์ œ๊ณต๋œ ์ž…๋ ฅ๊ฐ’ ์‚ฌ์šฉ
172
+ if node_id in input_values:
173
+ results[node_id] = input_values[node_id]
174
+ else:
175
+ # ๊ธฐ๋ณธ๊ฐ’ ์‚ฌ์šฉ
176
+ template = node_data.get("template", {})
177
+ default_value = template.get("input_value", {}).get("value", "")
178
+ results[node_id] = default_value
 
 
 
 
179
 
180
+ elif node_type in ["llmNode", "OpenAIModel", "ChatModel"]:
181
+ # LLM ๋…ธ๋“œ ์ฒ˜๋ฆฌ
182
+ template = node_data.get("template", {})
183
+
184
+ # ๋ชจ๋ธ ์ •๋ณด ์ถ”์ถœ
185
+ model_info = template.get("model", {})
186
+ model = model_info.get("value", "gpt-3.5-turbo") if isinstance(model_info, dict) else "gpt-3.5-turbo"
187
+
188
+ # ์˜จ๋„ ์ •๋ณด ์ถ”์ถœ
189
+ temp_info = template.get("temperature", {})
190
+ temperature = temp_info.get("value", 0.7) if isinstance(temp_info, dict) else 0.7
191
+
192
+ # ์‹œ์Šคํ…œ ํ”„๋กฌํ”„ํŠธ ์ถ”์ถœ
193
+ prompt_info = template.get("system_prompt", {})
194
+ system_prompt = prompt_info.get("value", "") if isinstance(prompt_info, dict) else ""
195
+
196
+ # ํ”„๋กœ๋ฐ”์ด๋” ์ •๋ณด ์ถ”์ถœ
197
+ provider_info = template.get("provider", {})
198
+ provider = provider_info.get("value", "OpenAI") if isinstance(provider_info, dict) else "OpenAI"
199
+
200
+ # ์ž…๋ ฅ ํ…์ŠคํŠธ ์ฐพ๊ธฐ
201
+ input_text = ""
202
+ for edge in edges:
203
+ if edge.get("target") == node_id:
204
+ source_id = edge.get("source")
205
+ if source_id in results:
206
+ input_text = results[source_id]
207
+ break
208
+
209
+ # ์‹ค์ œ API ํ˜ธ์ถœ
210
+ if provider == "OpenAI" and openai_key and openai_available:
211
+ try:
212
+ client = OpenAI(api_key=openai_key)
213
+
214
+ messages = []
215
+ if system_prompt:
216
+ messages.append({"role": "system", "content": system_prompt})
217
+ messages.append({"role": "user", "content": input_text})
218
+
219
+ response = client.chat.completions.create(
220
+ model=model,
221
+ messages=messages,
222
+ temperature=temperature,
223
+ max_tokens=1000
224
+ )
225
+
226
+ results[node_id] = response.choices[0].message.content
227
+
228
+ except Exception as e:
229
+ results[node_id] = f"[OpenAI Error: {str(e)}]"
230
+
231
+ elif provider == "Anthropic" and anthropic_key and anthropic_available:
232
+ try:
233
+ client = anthropic.Anthropic(api_key=anthropic_key)
234
+
235
+ message = client.messages.create(
236
+ model="claude-3-haiku-20240307",
237
+ max_tokens=1000,
238
+ temperature=temperature,
239
+ system=system_prompt if system_prompt else None,
240
+ messages=[{"role": "user", "content": input_text}]
241
+ )
242
+
243
+ results[node_id] = message.content[0].text
244
+
245
+ except Exception as e:
246
+ results[node_id] = f"[Anthropic Error: {str(e)}]"
247
+
248
+ elif provider == "Friendly" and friendli_token:
249
+ try:
250
+ import requests
251
+
252
+ headers = {
253
+ "Authorization": f"Bearer {friendli_token}",
254
+ "Content-Type": "application/json"
255
+ }
256
+
257
+ payload = {
258
+ "model": "dep89a2fld32mcm",
259
+ "messages": [
260
+ {"role": "system", "content": system_prompt} if system_prompt else {"role": "system", "content": "You are a helpful assistant."},
261
+ {"role": "user", "content": input_text}
262
+ ],
263
+ "max_tokens": 1000,
264
+ "temperature": temperature
265
+ }
266
+
267
+ response = requests.post(
268
+ "https://api.friendli.ai/dedicated/v1/chat/completions",
269
+ headers=headers,
270
+ json=payload,
271
+ timeout=30
272
+ )
273
+
274
+ if response.status_code == 200:
275
+ response_json = response.json()
276
+ results[node_id] = response_json["choices"][0]["message"]["content"]
277
+ else:
278
+ results[node_id] = f"[Friendly API Error: {response.status_code}]"
279
+
280
+ except Exception as e:
281
+ results[node_id] = f"[Friendly Error: {str(e)}]"
282
+
283
+ else:
284
+ # API ํ‚ค๊ฐ€ ์—†๋Š” ๊ฒฝ์šฐ ์‹œ๋ฎฌ๋ ˆ์ด์…˜
285
+ results[node_id] = f"[Simulated AI Response to: {input_text[:50]}...]"
286
 
287
+ elif node_type in ["ChatOutput", "textOutput", "Output"]:
288
+ # ์ถœ๋ ฅ ๋…ธ๋“œ๋Š” ์—ฐ๊ฒฐ๋œ ๋…ธ๋“œ์˜ ๊ฒฐ๊ณผ๋ฅผ ๊ฐ€์ ธ์˜ด
289
+ for edge in edges:
290
+ if edge.get("target") == node_id:
291
+ source_id = edge.get("source")
292
+ if source_id in results:
293
+ results[node_id] = results[source_id]
294
+ break
295
+
296
+ except Exception as e:
297
+ results[node_id] = f"[Node Error: {str(e)}]"
298
+ print(f"Error processing node {node_id}: {traceback.format_exc()}")
299
 
300
  return results
301
 
 
345
  """
346
  )
347
 
348
+ # API Status Display
349
+ with gr.Accordion("๐Ÿ”Œ API Status", open=False):
350
+ gr.Markdown(f"""
351
+ **Available APIs:**
352
+ - FRIENDLI_TOKEN: {'โœ… Connected' if os.getenv("FRIENDLI_TOKEN") else 'โŒ Not found'}
353
+ - OPENAI_API_KEY: {'โœ… Connected' if os.getenv("OPENAI_API_KEY") else 'โŒ Not found'}
354
+ - ANTHROPIC_API_KEY: {'โœ… Connected' if os.getenv("ANTHROPIC_API_KEY") else 'โŒ Not found'}
355
+
356
+ **Libraries:**
357
+ - OpenAI: {'โœ… Installed' if OPENAI_AVAILABLE else 'โŒ Not installed'}
358
+ - Anthropic: {'โœ… Installed' if ANTHROPIC_AVAILABLE else 'โŒ Not installed'}
359
+ - Requests: {'โœ… Installed' if REQUESTS_AVAILABLE else 'โŒ Not installed'}
360
+
361
+ *Note: Without API keys, the UI will simulate AI responses.*
362
+ """)
363
+
364
  # State for storing workflow data
365
  loaded_data = gr.State(None)
366
  trigger_update = gr.State(False)
 
512
  if i < len(input_values):
513
  inputs_dict[key] = input_values[i]
514
 
515
+ # Check API status
516
+ log = "=== Workflow Execution Started ===\n"
517
+ log += f"Inputs provided: {len(inputs_dict)}\n"
518
+
519
+ # API ์ƒํƒœ ํ™•์ธ
520
+ friendli_token = os.getenv("FRIENDLI_TOKEN")
521
+ openai_key = os.getenv("OPENAI_API_KEY")
522
+ anthropic_key = os.getenv("ANTHROPIC_API_KEY")
523
+
524
+ log += "\nAPI Status:\n"
525
+ log += f"- FRIENDLI_TOKEN: {'โœ… Found' if friendli_token else 'โŒ Not found'}\n"
526
+ log += f"- OPENAI_API_KEY: {'โœ… Found' if openai_key else 'โŒ Not found'}\n"
527
+ log += f"- ANTHROPIC_API_KEY: {'โœ… Found' if anthropic_key else 'โŒ Not found'}\n"
528
+
529
+ if not friendli_token and not openai_key and not anthropic_key:
530
+ log += "\nโš ๏ธ No API keys found. Results will be simulated.\n"
531
+ log += "To get real AI responses, set API keys in environment variables.\n"
532
+
533
+ log += "\n--- Processing Nodes ---\n"
534
 
535
  try:
536
  results = execute_workflow_simple(workflow_data, inputs_dict)
 
540
  for node_id in output_components.keys():
541
  value = results.get(node_id, "No output")
542
  output_values.append(value)
543
+
544
+ # Log ๊ธธ์ด ์ œํ•œ
545
+ display_value = value[:100] + "..." if len(str(value)) > 100 else value
546
+ log += f"\nOutput [{node_id}]: {display_value}\n"
547
 
548
+ log += "\n=== Execution Completed Successfully! ===\n"
549
  output_values.append(log)
550
 
551
  return output_values
552
 
553
  except Exception as e:
554
  error_msg = f"โŒ Error: {str(e)}"
555
+ log += f"\n{error_msg}\n"
556
+ log += "=== Execution Failed ===\n"
557
  return [error_msg] * len(output_components) + [log]
558
 
559
  # Connect execution