shaheerawan3 commited on
Commit
e30b15f
·
verified ·
1 Parent(s): 8b06363

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +230 -14
app.py CHANGED
@@ -23,10 +23,20 @@ CHATS = {"Main Chat": []}
23
  CURRENT_CHAT = "Main Chat"
24
 
25
  # System prompt and generation config
26
- SYSTEM_PROMPT = "You are a helpful AI assistant based on the Mistral-7B-Instruct model. You specialize in creating structured JSON data for automation workflows like n8n. When asked, format JSON properly with correct indentation and structure."
 
 
 
 
 
 
 
 
 
 
27
  GENERATE_CONFIG = {
28
- "max_new_tokens": 512,
29
- "temperature": 0.7,
30
  "top_p": 0.95,
31
  "top_k": 50,
32
  "repetition_penalty": 1.1,
@@ -77,7 +87,10 @@ def generate_response(prompt, chat_history, progress=gr.Progress()):
77
  global MODEL, TOKENIZER, PIPE, CHATS, CURRENT_CHAT, SYSTEM_PROMPT, GENERATE_CONFIG, FILE_DATA, ANALYZED_DATA
78
 
79
  if not MODEL_LOADED:
80
- return "Model is still loading. Please wait a moment before sending messages."
 
 
 
81
 
82
  try:
83
  # Use the current chat's history
@@ -95,6 +108,10 @@ def generate_response(prompt, chat_history, progress=gr.Progress()):
95
  if msg["role"] != "system": # Skip system messages in the history
96
  conversation.append({"role": msg["role"], "content": msg["content"]})
97
 
 
 
 
 
98
  # Handle file-related queries by including context
99
  if ANALYZED_DATA is not None and any(keyword in prompt.lower()
100
  for keyword in ["file", "data", "analyze", "show", "tell me about", "json"]):
@@ -125,6 +142,10 @@ def generate_response(prompt, chat_history, progress=gr.Progress()):
125
  enhanced_prompt = f"{prompt}\n\nContext about the file: {file_context}"
126
  else:
127
  enhanced_prompt = prompt
 
 
 
 
128
 
129
  # Add current prompt
130
  conversation.append({"role": "user", "content": enhanced_prompt})
@@ -167,15 +188,31 @@ def generate_response(prompt, chat_history, progress=gr.Progress()):
167
  "content": generated_text
168
  })
169
 
170
- # Update the chat history for the Gradio component
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
  chat_history.append((prompt, generated_text))
172
 
173
  return chat_history
174
 
175
  except Exception as e:
176
  error_message = f"Error generating response: {str(e)}"
177
- chat_history.append((prompt, error_message))
178
- return chat_history
179
 
180
  # Function to create a new chat
181
  def create_new_chat(chat_name):
@@ -284,6 +321,118 @@ def analyze_uploaded_file(file):
284
  except Exception as e:
285
  return f"Error analyzing file: {str(e)}"
286
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
  # Function to update system prompt
288
  def update_system_prompt(new_prompt):
289
  global SYSTEM_PROMPT
@@ -374,15 +523,17 @@ def create_gradio_interface():
374
  .chat-message-user {background-color: #e0f7fa; padding: 12px; border-radius: 8px; margin-bottom: 8px}
375
  .chat-message-bot {background-color: #f1f8e9; padding: 12px; border-radius: 8px; margin-bottom: 8px}
376
  .file-info {border: 1px solid #ddd; padding: 15px; border-radius: 5px; margin-top: 10px}
 
377
  """
378
 
379
  # Setup tabs for different functionalities
380
  with gr.Blocks(css=css) as app:
381
- gr.Markdown("# 🤖 Advanced Mistral-7B-Instruct Chatbot")
382
 
383
  with gr.Tab("Chat"):
384
  with gr.Row():
385
  with gr.Column(scale=3):
 
386
  chatbot = gr.Chatbot(
387
  [],
388
  elem_id="chatbot",
@@ -464,27 +615,86 @@ def create_gradio_interface():
464
  )
465
  update_params_btn = gr.Button("Update Parameters", variant="secondary")
466
 
467
- with gr.Tab("File Analysis"):
468
  with gr.Row():
469
  with gr.Column(scale=1):
470
  file_upload = gr.File(label="Upload a file to analyze")
471
  analyze_btn = gr.Button("Analyze File", variant="primary")
 
472
 
473
  with gr.Column(scale=2):
474
- file_analysis_output = gr.Markdown(label="File Analysis Results")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
475
 
476
  # Set up event handlers
477
  send_btn.click(
478
  generate_response,
479
  inputs=[msg, chatbot],
480
- outputs=[chatbot],
481
  api_name="chat"
482
  )
483
 
484
  msg.submit(
485
  generate_response,
486
  inputs=[msg, chatbot],
487
- outputs=[chatbot],
488
  api_name=False
489
  )
490
 
@@ -519,6 +729,12 @@ def create_gradio_interface():
519
  api_name=False
520
  )
521
 
 
 
 
 
 
 
522
  chat_selector.change(
523
  select_chat,
524
  inputs=chat_selector,
@@ -543,8 +759,8 @@ def create_gradio_interface():
543
  api_name="clear_chat"
544
  )
545
 
546
- # Refresh example messages every few seconds
547
- chat_selector.change(lambda: [], outputs=chatbot)
548
 
549
  return app
550
 
 
23
  CURRENT_CHAT = "Main Chat"
24
 
25
  # System prompt and generation config
26
+ SYSTEM_PROMPT = """You are a helpful AI assistant based on the Mistral-7B-Instruct model.
27
+ You specialize in creating structured JSON data for automation workflows like n8n.
28
+ When asked to create JSON for n8n workflows:
29
+ 1. Structure the data in valid JSON format with proper nesting
30
+ 2. Include all necessary fields and properties for nodes
31
+ 3. Format with correct indentation and structure
32
+ 4. Use proper n8n node syntax and follow their data structure requirements
33
+ 5. Always validate the JSON before returning it
34
+
35
+ For JSON workflow nodes, be attentive to detail and include all necessary fields."""
36
+
37
  GENERATE_CONFIG = {
38
+ "max_new_tokens": 1024, # Increased for complex JSON responses
39
+ "temperature": 0.5, # Slightly lower for more precise JSON
40
  "top_p": 0.95,
41
  "top_k": 50,
42
  "repetition_penalty": 1.1,
 
87
  global MODEL, TOKENIZER, PIPE, CHATS, CURRENT_CHAT, SYSTEM_PROMPT, GENERATE_CONFIG, FILE_DATA, ANALYZED_DATA
88
 
89
  if not MODEL_LOADED:
90
+ if MODEL_LOADING:
91
+ return chat_history + [("Your message", "Model is still loading. Please wait a moment before sending messages.")]
92
+ else:
93
+ return chat_history + [("Your message", "Model not loaded. Please click 'Load Mistral-7B Model' first.")]
94
 
95
  try:
96
  # Use the current chat's history
 
108
  if msg["role"] != "system": # Skip system messages in the history
109
  conversation.append({"role": msg["role"], "content": msg["content"]})
110
 
111
+ # Check if JSON formatting is specifically requested
112
+ is_json_request = any(keyword in prompt.lower()
113
+ for keyword in ["json", "n8n", "workflow", "automation", "format"])
114
+
115
  # Handle file-related queries by including context
116
  if ANALYZED_DATA is not None and any(keyword in prompt.lower()
117
  for keyword in ["file", "data", "analyze", "show", "tell me about", "json"]):
 
142
  enhanced_prompt = f"{prompt}\n\nContext about the file: {file_context}"
143
  else:
144
  enhanced_prompt = prompt
145
+
146
+ # If this is a JSON request, add special instructions
147
+ if is_json_request:
148
+ enhanced_prompt += "\n\nPlease generate a valid, properly formatted JSON response suitable for n8n workflows. Include all necessary fields and ensure correct formatting. The JSON should be valid and ready to be imported directly into n8n."
149
 
150
  # Add current prompt
151
  conversation.append({"role": "user", "content": enhanced_prompt})
 
188
  "content": generated_text
189
  })
190
 
191
+ # Validate and format JSON if it appears to be a JSON response
192
+ if is_json_request and "```json" in generated_text:
193
+ try:
194
+ # Try to extract JSON from code blocks
195
+ json_match = re.search(r'```json\s*([\s\S]*?)\s*```', generated_text)
196
+ if json_match:
197
+ json_string = json_match.group(1)
198
+ # Parse and re-stringify to ensure proper formatting
199
+ parsed_json = json.loads(json_string)
200
+ formatted_json = json.dumps(parsed_json, indent=2)
201
+
202
+ # Replace the original JSON with the properly formatted one
203
+ generated_text = generated_text.replace(json_match.group(0), f"```json\n{formatted_json}\n```")
204
+ except json.JSONDecodeError:
205
+ # If JSON parsing fails, we keep the original response
206
+ pass
207
+
208
+ # Update the chat history for the Gradio component (fix the tuple format)
209
  chat_history.append((prompt, generated_text))
210
 
211
  return chat_history
212
 
213
  except Exception as e:
214
  error_message = f"Error generating response: {str(e)}"
215
+ return chat_history + [(prompt, error_message)]
 
216
 
217
  # Function to create a new chat
218
  def create_new_chat(chat_name):
 
321
  except Exception as e:
322
  return f"Error analyzing file: {str(e)}"
323
 
324
+ # Function to convert data to n8n JSON format
325
+ def convert_to_n8n_json():
326
+ global ANALYZED_DATA
327
+
328
+ if ANALYZED_DATA is None:
329
+ return "No file has been analyzed yet. Please upload a file first."
330
+
331
+ try:
332
+ if ANALYZED_DATA['type'] in ['csv', 'excel']:
333
+ # Convert DataFrame to n8n compatible JSON
334
+ data = ANALYZED_DATA['data']
335
+ records = data.to_dict(orient='records')
336
+
337
+ # Generate n8n workflow JSON template
338
+ n8n_json = {
339
+ "name": "Generated Data Workflow",
340
+ "nodes": [
341
+ {
342
+ "parameters": {
343
+ "jsCode": f"return {json.dumps(records, indent=2)};"
344
+ },
345
+ "id": "1",
346
+ "name": "Code",
347
+ "type": "n8n-nodes-base.code",
348
+ "typeVersion": 1,
349
+ "position": [
350
+ 250,
351
+ 300
352
+ ]
353
+ }
354
+ ],
355
+ "connections": {},
356
+ "active": False,
357
+ "settings": {},
358
+ "version": 1,
359
+ "meta": {
360
+ "instanceId": "GENERATED"
361
+ }
362
+ }
363
+
364
+ return json.dumps(n8n_json, indent=2)
365
+
366
+ elif ANALYZED_DATA['type'] == 'json':
367
+ # The data is already in JSON format, just need to wrap it in n8n structure
368
+ data = ANALYZED_DATA['data']
369
+
370
+ n8n_json = {
371
+ "name": "JSON Workflow",
372
+ "nodes": [
373
+ {
374
+ "parameters": {
375
+ "jsCode": f"return {json.dumps(data, indent=2)};"
376
+ },
377
+ "id": "1",
378
+ "name": "Code",
379
+ "type": "n8n-nodes-base.code",
380
+ "typeVersion": 1,
381
+ "position": [
382
+ 250,
383
+ 300
384
+ ]
385
+ }
386
+ ],
387
+ "connections": {},
388
+ "active": False,
389
+ "settings": {},
390
+ "version": 1,
391
+ "meta": {
392
+ "instanceId": "GENERATED"
393
+ }
394
+ }
395
+
396
+ return json.dumps(n8n_json, indent=2)
397
+
398
+ elif ANALYZED_DATA['type'] == 'text':
399
+ # Convert text to a simple n8n workflow
400
+ text_data = ANALYZED_DATA['data']
401
+
402
+ n8n_json = {
403
+ "name": "Text Processing Workflow",
404
+ "nodes": [
405
+ {
406
+ "parameters": {
407
+ "jsCode": f"return {{ text: {json.dumps(text_data)} }};"
408
+ },
409
+ "id": "1",
410
+ "name": "Code",
411
+ "type": "n8n-nodes-base.code",
412
+ "typeVersion": 1,
413
+ "position": [
414
+ 250,
415
+ 300
416
+ ]
417
+ }
418
+ ],
419
+ "connections": {},
420
+ "active": False,
421
+ "settings": {},
422
+ "version": 1,
423
+ "meta": {
424
+ "instanceId": "GENERATED"
425
+ }
426
+ }
427
+
428
+ return json.dumps(n8n_json, indent=2)
429
+
430
+ else:
431
+ return "Cannot convert this file type to n8n JSON format."
432
+
433
+ except Exception as e:
434
+ return f"Error generating n8n JSON: {str(e)}"
435
+
436
  # Function to update system prompt
437
  def update_system_prompt(new_prompt):
438
  global SYSTEM_PROMPT
 
523
  .chat-message-user {background-color: #e0f7fa; padding: 12px; border-radius: 8px; margin-bottom: 8px}
524
  .chat-message-bot {background-color: #f1f8e9; padding: 12px; border-radius: 8px; margin-bottom: 8px}
525
  .file-info {border: 1px solid #ddd; padding: 15px; border-radius: 5px; margin-top: 10px}
526
+ .json-output {font-family: monospace; white-space: pre; overflow-x: auto; background-color: #f5f5f5; padding: 15px; border-radius: 5px;}
527
  """
528
 
529
  # Setup tabs for different functionalities
530
  with gr.Blocks(css=css) as app:
531
+ gr.Markdown("# 🤖 Advanced Mistral-7B-Instruct Chatbot for n8n JSON Generation")
532
 
533
  with gr.Tab("Chat"):
534
  with gr.Row():
535
  with gr.Column(scale=3):
536
+ # Initialize with empty list to fix the tuple format error
537
  chatbot = gr.Chatbot(
538
  [],
539
  elem_id="chatbot",
 
615
  )
616
  update_params_btn = gr.Button("Update Parameters", variant="secondary")
617
 
618
+ with gr.Tab("File Analysis & JSON Conversion"):
619
  with gr.Row():
620
  with gr.Column(scale=1):
621
  file_upload = gr.File(label="Upload a file to analyze")
622
  analyze_btn = gr.Button("Analyze File", variant="primary")
623
+ convert_json_btn = gr.Button("Convert to n8n JSON", variant="primary")
624
 
625
  with gr.Column(scale=2):
626
+ with gr.Tabs():
627
+ with gr.TabItem("File Analysis"):
628
+ file_analysis_output = gr.Markdown(label="File Analysis Results")
629
+ with gr.TabItem("n8n JSON Output"):
630
+ n8n_json_output = gr.Code(
631
+ language="json",
632
+ label="n8n Compatible JSON",
633
+ lines=20
634
+ )
635
+
636
+ with gr.Tab("JSON Formatting Guide"):
637
+ gr.Markdown("""
638
+ # n8n JSON Formatting Guide
639
+
640
+ This tab provides guidance on creating well-structured JSON for n8n workflows.
641
+
642
+ ## Basic n8n Workflow Structure
643
+
644
+ ```json
645
+ {
646
+ "name": "My Workflow",
647
+ "nodes": [
648
+ {
649
+ "parameters": { /* Node-specific parameters */ },
650
+ "id": "1",
651
+ "name": "Start Node",
652
+ "type": "n8n-nodes-base.some-node-type",
653
+ "typeVersion": 1,
654
+ "position": [250, 300]
655
+ }
656
+ // Additional nodes...
657
+ ],
658
+ "connections": {
659
+ "Start Node": {
660
+ "main": [
661
+ [
662
+ {
663
+ "node": "Second Node",
664
+ "type": "main",
665
+ "index": 0
666
+ }
667
+ ]
668
+ ]
669
+ }
670
+ // Additional connections...
671
+ }
672
+ }
673
+ ```
674
+
675
+ ## Tips for Creating n8n-Compatible JSON
676
+
677
+ 1. Ensure all JSON keys and values are properly quoted
678
+ 2. Use proper nesting for workflow components
679
+ 3. Define unique IDs for each node
680
+ 4. Properly define connections between nodes
681
+ 5. Include all required parameters for each node type
682
+
683
+ Use the chat interface to ask for specific n8n node configurations or workflow patterns.
684
+ """)
685
 
686
  # Set up event handlers
687
  send_btn.click(
688
  generate_response,
689
  inputs=[msg, chatbot],
690
+ outputs=chatbot,
691
  api_name="chat"
692
  )
693
 
694
  msg.submit(
695
  generate_response,
696
  inputs=[msg, chatbot],
697
+ outputs=chatbot,
698
  api_name=False
699
  )
700
 
 
729
  api_name=False
730
  )
731
 
732
+ convert_json_btn.click(
733
+ convert_to_n8n_json,
734
+ outputs=n8n_json_output,
735
+ api_name="convert_to_n8n"
736
+ )
737
+
738
  chat_selector.change(
739
  select_chat,
740
  inputs=chat_selector,
 
759
  api_name="clear_chat"
760
  )
761
 
762
+ # Initialize empty chatbot
763
+ chatbot.value = []
764
 
765
  return app
766