resumesearch commited on
Commit
451eed0
Β·
verified Β·
1 Parent(s): 290299d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +85 -25
app.py CHANGED
@@ -472,15 +472,31 @@ with gr.Blocks(css=custom_css, title="Ultimate LLM Prompt Builder", theme=gr.the
472
 
473
  with gr.Column(scale=1):
474
  gr.Markdown("### 🧠 Cognitive Instructions")
475
- cognitive_instructions = []
476
- for category, instructions in ADVANCED_CATEGORIES.items():
477
- with gr.Accordion(f"πŸ“Œ {category}", open=False):
478
- checkbox_group = gr.CheckboxGroup(
479
- choices=instructions,
480
- label=category,
481
- value=[]
482
- )
483
- cognitive_instructions.append(checkbox_group)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
484
 
485
  # Style & Filtering Tab
486
  with gr.TabItem("🎨 Style & Filtering"):
@@ -573,6 +589,8 @@ with gr.Blocks(css=custom_css, title="Ultimate LLM Prompt Builder", theme=gr.the
573
  clear_chat_btn = gr.Button("πŸ—‘οΈ Clear", variant="secondary")
574
  save_prompt_btn = gr.Button("πŸ’Ύ Save Prompt", variant="secondary")
575
 
 
 
576
  chatbot = gr.Chatbot(
577
  label="πŸ€– AI Assistant",
578
  type="messages",
@@ -624,10 +642,14 @@ with gr.Blocks(css=custom_css, title="Ultimate LLM Prompt Builder", theme=gr.the
624
  lines=8,
625
  interactive=False
626
  )
 
 
 
 
 
627
 
628
  # State management
629
  chat_history = gr.State([])
630
- current_template_vars = gr.State({})
631
 
632
  # Helper functions
633
  def update_template_info(category, template_name):
@@ -686,7 +708,7 @@ with gr.Blocks(css=custom_css, title="Ultimate LLM Prompt Builder", theme=gr.the
686
 
687
  return "", "", {}
688
 
689
- def create_final_prompt(system, main_prompt, cognitive_selected, examples, tone, length, format_pref, role_persona, custom_system):
690
  components = []
691
 
692
  # System prompt
@@ -699,7 +721,7 @@ with gr.Blocks(css=custom_css, title="Ultimate LLM Prompt Builder", theme=gr.the
699
 
700
  # Add cognitive instructions
701
  all_selected = []
702
- for selected_list in cognitive_selected:
703
  if selected_list:
704
  all_selected.extend(selected_list)
705
 
@@ -733,13 +755,13 @@ with gr.Blocks(css=custom_css, title="Ultimate LLM Prompt Builder", theme=gr.the
733
 
734
  return "\n\n".join(components)
735
 
736
- def process_chat_message(message, history, model, temp, system, cognitive_selected, tone, length, format_pref, role, custom_sys, banned_w, banned_p, *examples):
737
  if not message.strip():
738
  return history, history, "", {}, ""
739
 
740
  # Create final prompt
741
  final_prompt = create_final_prompt(
742
- system, message, cognitive_selected, examples,
743
  tone, length, format_pref, role, custom_sys
744
  )
745
 
@@ -796,9 +818,12 @@ with gr.Blocks(css=custom_css, title="Ultimate LLM Prompt Builder", theme=gr.the
796
 
797
  return response_a, response_b
798
 
799
- def save_current_prompt(prompt_name, final_prompt, model, stats):
800
- if not prompt_name or not final_prompt:
801
- return "❌ Please provide a name and ensure there's a prompt to save"
 
 
 
802
 
803
  prompt_data = {
804
  "prompt": final_prompt,
@@ -808,14 +833,23 @@ with gr.Blocks(css=custom_css, title="Ultimate LLM Prompt Builder", theme=gr.the
808
 
809
  state.save_prompt(prompt_name, prompt_data)
810
 
811
- # Update dropdown choices
812
- choices = list(state.saved_prompts.keys())
813
- return f"βœ… Saved prompt '{prompt_name}'", gr.update(choices=choices)
814
 
815
  def load_saved_prompt(prompt_name):
816
  if prompt_name in state.saved_prompts:
817
  return state.saved_prompts[prompt_name]
818
  return {}
 
 
 
 
 
 
 
 
 
 
 
819
 
820
  def refresh_analytics():
821
  analytics = calculate_analytics()
@@ -893,8 +927,8 @@ with gr.Blocks(css=custom_css, title="Ultimate LLM Prompt Builder", theme=gr.the
893
  process_chat_message,
894
  inputs=[
895
  chat_input, chat_history, selected_model, temperature,
896
- custom_system, cognitive_instructions, tone_style, length_preference,
897
- format_preference, role_persona, custom_system, banned_words, banned_patterns,
898
  example_1_input, example_1_output, example_2_input, example_2_output
899
  ],
900
  outputs=[chatbot, chat_history, final_prompt_preview, prompt_stats, optimization_display]
@@ -905,6 +939,12 @@ with gr.Blocks(css=custom_css, title="Ultimate LLM Prompt Builder", theme=gr.the
905
  outputs=[chatbot, chat_history]
906
  )
907
 
 
 
 
 
 
 
908
  # A/B Testing
909
  test_btn.click(
910
  run_ab_test,
@@ -924,10 +964,30 @@ with gr.Blocks(css=custom_css, title="Ultimate LLM Prompt Builder", theme=gr.the
924
  outputs=[export_data]
925
  )
926
 
927
- # Auto-refresh analytics on load
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
928
  demo.load(
929
- refresh_analytics,
930
- outputs=[analytics_display, analytics_json]
931
  )
932
 
933
  if __name__ == "__main__":
 
472
 
473
  with gr.Column(scale=1):
474
  gr.Markdown("### 🧠 Cognitive Instructions")
475
+ # Create individual components instead of a list
476
+ with gr.Accordion("πŸ“Œ Cognitive Techniques", open=False):
477
+ cognitive_techniques = gr.CheckboxGroup(
478
+ choices=ADVANCED_CATEGORIES["Cognitive Techniques"],
479
+ label="Cognitive Techniques",
480
+ value=[]
481
+ )
482
+ with gr.Accordion("πŸ“Œ Output Structure", open=False):
483
+ output_structure = gr.CheckboxGroup(
484
+ choices=ADVANCED_CATEGORIES["Output Structure"],
485
+ label="Output Structure",
486
+ value=[]
487
+ )
488
+ with gr.Accordion("πŸ“Œ Quality Assurance", open=False):
489
+ quality_assurance = gr.CheckboxGroup(
490
+ choices=ADVANCED_CATEGORIES["Quality Assurance"],
491
+ label="Quality Assurance",
492
+ value=[]
493
+ )
494
+ with gr.Accordion("πŸ“Œ Audience Adaptation", open=False):
495
+ audience_adaptation = gr.CheckboxGroup(
496
+ choices=ADVANCED_CATEGORIES["Audience Adaptation"],
497
+ label="Audience Adaptation",
498
+ value=[]
499
+ )
500
 
501
  # Style & Filtering Tab
502
  with gr.TabItem("🎨 Style & Filtering"):
 
589
  clear_chat_btn = gr.Button("πŸ—‘οΈ Clear", variant="secondary")
590
  save_prompt_btn = gr.Button("πŸ’Ύ Save Prompt", variant="secondary")
591
 
592
+ save_status = gr.Textbox(label="πŸ’Ύ Save Status", visible=False, interactive=False)
593
+
594
  chatbot = gr.Chatbot(
595
  label="πŸ€– AI Assistant",
596
  type="messages",
 
642
  lines=8,
643
  interactive=False
644
  )
645
+
646
+ import_status = gr.Textbox(
647
+ label="πŸ“₯ Import Status",
648
+ interactive=False
649
+ )
650
 
651
  # State management
652
  chat_history = gr.State([])
 
653
 
654
  # Helper functions
655
  def update_template_info(category, template_name):
 
708
 
709
  return "", "", {}
710
 
711
+ def create_final_prompt(system, main_prompt, cognitive_tech, output_struct, quality_assur, audience_adapt, examples, tone, length, format_pref, role_persona, custom_system):
712
  components = []
713
 
714
  # System prompt
 
721
 
722
  # Add cognitive instructions
723
  all_selected = []
724
+ for selected_list in [cognitive_tech, output_struct, quality_assur, audience_adapt]:
725
  if selected_list:
726
  all_selected.extend(selected_list)
727
 
 
755
 
756
  return "\n\n".join(components)
757
 
758
+ def process_chat_message(message, history, model, temp, system, cognitive_tech, output_struct, quality_assur, audience_adapt, tone, length, format_pref, role, custom_sys, banned_w, banned_p, *examples):
759
  if not message.strip():
760
  return history, history, "", {}, ""
761
 
762
  # Create final prompt
763
  final_prompt = create_final_prompt(
764
+ system, message, cognitive_tech, output_struct, quality_assur, audience_adapt, examples,
765
  tone, length, format_pref, role, custom_sys
766
  )
767
 
 
818
 
819
  return response_a, response_b
820
 
821
+ def save_current_prompt(final_prompt, model, stats):
822
+ # Simple prompt saving - in a real app you'd want a name input
823
+ prompt_name = f"Prompt_{len(state.saved_prompts)+1}_{datetime.now().strftime('%H%M')}"
824
+
825
+ if not final_prompt:
826
+ return gr.update(value="❌ No prompt to save", visible=True)
827
 
828
  prompt_data = {
829
  "prompt": final_prompt,
 
833
 
834
  state.save_prompt(prompt_name, prompt_data)
835
 
836
+ return gr.update(value=f"βœ… Saved as '{prompt_name}'", visible=True)
 
 
837
 
838
  def load_saved_prompt(prompt_name):
839
  if prompt_name in state.saved_prompts:
840
  return state.saved_prompts[prompt_name]
841
  return {}
842
+
843
+ def delete_saved_prompt(prompt_name):
844
+ if prompt_name in state.saved_prompts:
845
+ del state.saved_prompts[prompt_name]
846
+ choices = list(state.saved_prompts.keys())
847
+ return gr.update(choices=choices, value=None), {}
848
+ return gr.update(), {}
849
+
850
+ def update_saved_prompts_dropdown():
851
+ choices = list(state.saved_prompts.keys())
852
+ return gr.update(choices=choices)
853
 
854
  def refresh_analytics():
855
  analytics = calculate_analytics()
 
927
  process_chat_message,
928
  inputs=[
929
  chat_input, chat_history, selected_model, temperature,
930
+ custom_system, cognitive_techniques, output_structure, quality_assurance, audience_adaptation,
931
+ tone_style, length_preference, format_preference, role_persona, custom_system, banned_words, banned_patterns,
932
  example_1_input, example_1_output, example_2_input, example_2_output
933
  ],
934
  outputs=[chatbot, chat_history, final_prompt_preview, prompt_stats, optimization_display]
 
939
  outputs=[chatbot, chat_history]
940
  )
941
 
942
+ save_prompt_btn.click(
943
+ save_current_prompt,
944
+ inputs=[final_prompt_preview, selected_model, prompt_stats],
945
+ outputs=[save_status]
946
+ )
947
+
948
  # A/B Testing
949
  test_btn.click(
950
  run_ab_test,
 
964
  outputs=[export_data]
965
  )
966
 
967
+ # Load and delete saved prompts
968
+ load_prompt_btn.click(
969
+ load_saved_prompt,
970
+ inputs=[saved_prompts_list],
971
+ outputs=[saved_prompt_display]
972
+ )
973
+
974
+ delete_prompt_btn.click(
975
+ delete_saved_prompt,
976
+ inputs=[saved_prompts_list],
977
+ outputs=[saved_prompts_list, saved_prompt_display]
978
+ )
979
+
980
+ # Import functionality
981
+ import_file.upload(
982
+ import_data,
983
+ inputs=[import_file],
984
+ outputs=[import_status]
985
+ )
986
+
987
+ # Auto-refresh analytics on load and update saved prompts
988
  demo.load(
989
+ lambda: (refresh_analytics()[0], refresh_analytics()[1], gr.update(choices=list(state.saved_prompts.keys()))),
990
+ outputs=[analytics_display, analytics_json, saved_prompts_list]
991
  )
992
 
993
  if __name__ == "__main__":