# ================================================================================================== # SYNAPSE v0.9 (Codename: "Prometheus") # The Dynamic Prompting Environment # # PHILOSOPHY: # We don't generate prompts. We architect instructions. We build workflows. # This is not a tool, it is a laboratory for command and control of generative AI. # ================================================================================================== import gradio as gr import uuid import json # -------------------------------------------------------------------------------------------------- # I. CORE BLUEPRINTS & CONFIGURATION # The DNA of the Synapse Environment. Defines the capabilities of each Node. # -------------------------------------------------------------------------------------------------- class NodeBlueprint: """Defines the structure and capabilities of a functional node in the Synapse chain.""" NODE_TEMPLATES = { "strategic_comm": { "name": "Strategic Communicator", "description": "For crafting complex, high-stakes communication (emails, memos, announcements).", "icon": "📨", "inputs": [ {"label": "Audience Profile", "placeholder": "E.g., 'C-Suite Executives, skeptical but data-driven'"}, {"label": "Primary Objective", "placeholder": "E.g., 'Secure funding for Project Phoenix'"}, {"label": "Key Information Payloads (one per line)", "lines": 4, "placeholder": "- Q2 results exceeded projections by 15%.\n- The project has a 6-month ROI timeline.\n- We have a lead engineer ready to start."}, {"label": "Desired Tone & Voice", "choices": ["Assertive & Confident", "Empathetic & Collaborative", "Urgent & Action-Oriented", "Formal & Deferential"], "value": "Assertive & Confident"}, ], "refinement_actions": ["make_more_concise", "increase_persuasion", "add_data_focus", "soften_tone"] }, "creative_ideation": { "name": "Creative Ideation Engine", "description": "For brainstorming and generating novel concepts from a single spark.", "icon": "💡", "inputs": [ {"label": "Core Concept or Theme", "placeholder": "E.g., 'The future of decentralized cities'"}, {"label": "Output Format", "choices": ["5 Blog Post Titles", "3 YouTube Video Concepts", "10 Tweet-sized Ideas", "A one-paragraph story starter"], "value": "5 Blog Post Titles"}, {"label": "Creative Constraint", "placeholder": "E.g., 'Must be optimistic in tone', 'Avoid using tech jargon'"}, ], "refinement_actions": ["make_more_provocative", "explore_contrarian_angle", "target_a_niche_audience", "simplify_for_beginners"] }, "technical_architect": { "name": "Technical Architect", "description": "For designing code logic, solving bugs, and explaining complex systems.", "icon": "⚙️", "inputs": [ {"label": "Programming Language / Tech Stack", "value": "Python"}, {"label": "Core Task or Problem", "lines": 3, "placeholder": "E.g., 'Design a Python function to asynchronously fetch data from multiple APIs and aggregate the results.'"}, {"label": "Key Constraints or Requirements", "placeholder": "E.g., 'Must use the asyncio library, must include error handling for failed requests.'"}, ], "refinement_actions": ["add_step_by_step_explanation", "generate_code_comments", "write_testing_plan", "optimize_for_performance"] } } # -------------------------------------------------------------------------------------------------- # II. THE SYNAPSE ENGINE # The core logic that powers the creation and refinement of prompt chains. # -------------------------------------------------------------------------------------------------- class SynapseEngine: """The intelligent engine that translates user intent into architected prompt instructions.""" def _render_prompt_anatomy(self, prompt_text: str) -> str: """Visualizes the prompt's structure with Markdown and HTML for clarity.""" # This is a simplified visualization. A more complex version could use regex to tag parts. return f"""
🔬 Ver Anatomía del Prompt
{prompt_text}
""" def craft_node_output(self, node_id: str, *args) -> dict: """Crafts a single, structured node for the chain.""" template = NodeBlueprint.NODE_TEMPLATES.get(node_id) if not template: return None # Basic validation if not all(str(arg).strip() for arg in args): return None prompt = f"## PROMPT DIRECTIVE: ACT AS {template['name'].upper()} ##\n\n" prompt += f"**MISSION CONTEXT:**\n" for i, input_field in enumerate(template['inputs']): prompt += f"- {input_field['label']}: {args[i]}\n" prompt += f"\n**PRIMARY EXECUTION DIRECTIVE:**\nBased on the context, generate the specified output, adhering strictly to the defined format and constraints. The output must be of expert-level quality, demonstrating deep domain knowledge and strategic insight." explanation = f"This node establishes a high-level **Expert Persona** ('{template['name']}') and provides a structured **Mission Context**. This forces the AI to move beyond generic responses and adopt a specific, professional mindset, leading to vastly superior results." return { "id": str(uuid.uuid4()), "node_type": template['name'], "icon": template['icon'], "prompt": prompt, "explanation": explanation, "refinements_applied": [] } def refine_node_prompt(self, prompt: str, action: str) -> str: """Applies a refinement action to an existing prompt string.""" refinement_map = { "make_more_concise": "\n\n**REFINEMENT DIRECTIVE:** Review the entire response and condense it by 25-40% without losing critical information. Prioritize clarity and impact.", "increase_persuasion": "\n\n**REFINEMENT DIRECTIVE:** Re-evaluate the prompt's output to maximize its persuasive power. Employ rhetorical devices, strengthen the call-to-action, and appeal directly to the audience's motivations.", "add_data_focus": "\n\n**REFINEMENT DIRECTIVE:** Enhance the output by integrating real or hypothetical quantitative data and metrics to support all claims. The tone should be analytical and evidence-based.", "soften_tone": "\n\n**REFINEMENT DIRECTIVE:** Adjust the language to be more collaborative, empathetic, and less confrontational. Use inclusive phrasing and acknowledge potential alternative viewpoints.", "make_more_provocative": "\n\n**REFINEMENT DIRECTIVE:** Inject a bold, contrarian, or highly provocative element into the output. Challenge a common assumption. The goal is to spark debate and capture attention.", "add_step_by_step_explanation": "\n\n**REFINEMENT DIRECTIVE:** Append a detailed, step-by-step explanation to the primary output. Assume the reader is intelligent but a novice in this specific domain. Use analogies.", } # Add a placeholder for unimplemented actions refinement_instruction = refinement_map.get(action, f"\n\n**REFINEMENT DIRECTIVE:** Apply the concept of '{action.replace('_', ' ')}' to the output.") # Avoid duplicating refinement instructions if refinement_instruction in prompt: return prompt return prompt + refinement_instruction # -------------------------------------------------------------------------------------------------- # III. THE LAB ENVIRONMENT (UI) # The Gradio interface. Designed to feel like a professional, dynamic workspace. # -------------------------------------------------------------------------------------------------- class SynapseUI: """Builds and manages the Gradio user interface for the Synapse Environment.""" def __init__(self): self.engine = SynapseEngine() self.css = """ :root { --syn-c-primary: #00A9FF; --syn-c-secondary: #89CFF3; --syn-c-background: #0d1117; --syn-c-surface: #161b22; --syn-c-text: #c9d1d9; } body, .gradio-container { background-color: var(--syn-c-background); color: var(--syn-c-text); font-family: 'Inter', sans-serif; } #chain_display { border: 1px solid #30363d; border-radius: 8px; padding: 1em; min-height: 400px; background-color: var(--syn-c-surface); } .node-card { border: 1px solid #30363d; border-radius: 12px; padding: 1em; margin-bottom: 1em; background-color: rgba(0, 169, 255, 0.05); } .node-header { display: flex; align-items: center; font-size: 1.5em; font-weight: 600; color: var(--syn-c-secondary); } .node-icon { font-size: 1.5em; margin-right: 0.5em; } button { border-radius: 8px !important; } """ self.theme = gr.themes.Base( primary_hue="blue", secondary_hue="cyan", font="'Inter', sans-serif" ).set( body_background_fill="var(--syn-c-background)", block_background_fill="var(--syn-c-surface)", block_border_width="0px", body_text_color="var(--syn-c-text)", button_primary_background_fill="var(--syn-c-primary)", button_primary_text_color="#FFFFFF", button_secondary_background_fill="#30363d", button_secondary_text_color="#FFFFFF", ) def _render_chain_display(self, chain_data: list) -> str: """Renders the current prompt chain as a rich Markdown string.""" if not chain_data: return "

Your Prompt Chain is empty.

Select a Node Type from the Workshop to begin.

" html = "" for i, node in enumerate(chain_data): refinements_str = ", ".join(r.replace("_", " ").title() for r in node['refinements_applied']) refinements_html = f"

Refinements: {refinements_str}

" if refinements_str else "" html += f"""
{node['icon']}Node {i+1}: {node['node_type']}

// Rationale //
{node['explanation']}

{refinements_html} {self.engine._render_prompt_anatomy(node['prompt'])}
""" if i < len(chain_data) - 1: html += "
" return html def launch(self): """Builds and launches the full Gradio application.""" with gr.Blocks(theme=self.theme, css=self.css, title="SYNAPSE") as app: # STATE MANAGEMENT chain_state = gr.State([]) # Holds the list of node dicts active_node_id = gr.State(None) # HEADER gr.HTML("""

S Y N A P S E

The Dynamic Prompting Environment

""") with gr.Row(): # LEFT COLUMN: THE WORKSHOP (INPUTS) with gr.Column(scale=1): gr.Markdown("### 1. The Workshop") node_selector = gr.Dropdown( label="Select Node Type to Add", choices=[(d['name'], k) for k, d in NodeBlueprint.NODE_TEMPLATES.items()], value=None ) # Create input groups for all node types, initially hidden input_groups = {} for node_id, template in NodeBlueprint.NODE_TEMPLATES.items(): with gr.Group(visible=False) as input_groups[node_id]: gr.Markdown(f"#### {template['icon']} {template['name']}") gr.Markdown(f"

{template['description']}

") inputs = [] for field in template['inputs']: if 'choices' in field: inputs.append(gr.Dropdown(label=field['label'], choices=field['choices'], value=field.get('value'))) else: inputs.append(gr.Textbox(label=field['label'], placeholder=field.get('placeholder', ''), lines=field.get('lines', 1))) input_groups[node_id].__setattr__("inputs", inputs) add_node_button = gr.Button("Add Node to Chain", variant="primary", visible=False) # REFINEMENT WORKBENCH (dynamically populated) with gr.Group(visible=False) as refinement_group: gr.Markdown("### 3. Refinement Workbench") gr.Markdown("

Mutate the **last node** in the chain.

") refinement_buttons = [] all_actions = set(a for t in NodeBlueprint.NODE_TEMPLATES.values() for a in t['refinement_actions']) for action in all_actions: btn = gr.Button(action.replace("_", " ").title(), visible=False) refinement_buttons.append((action, btn)) clear_chain_button = gr.Button("Clear Entire Chain", variant="secondary") # RIGHT COLUMN: THE CHAIN DISPLAY with gr.Column(scale=2): gr.Markdown("### 2. The Prompt Chain") chain_display = gr.HTML(self._render_chain_display([]), elem_id="chain_display") # EVENT HANDLING LOGIC def on_node_select(node_id): active_node_id.value = node_id updates = {group: gr.update(visible=k==node_id) for k, group in input_groups.items()} updates[add_node_button] = gr.update(visible=bool(node_id)) return updates def add_node_to_chain(chain_data, node_id, *inputs): node_output = self.engine.craft_node_output(node_id, *inputs) if node_output: chain_data.append(node_output) # Update refinement buttons visibility refinement_updates = {} last_node_actions = set(NodeBlueprint.NODE_TEMPLATES[node_id]['refinement_actions']) for action, btn in refinement_buttons: refinement_updates[btn] = gr.update(visible=action in last_node_actions) return { chain_state: chain_data, chain_display: self._render_chain_display(chain_data), refinement_group: gr.update(visible=True), **refinement_updates } def apply_refinement(chain_data, action): if not chain_data: return chain_data, self._render_chain_display(chain_data) last_node = chain_data[-1] # Avoid applying the same refinement twice if action not in last_node["refinements_applied"]: original_prompt = last_node["prompt"] refined_prompt = self.engine.refine_node_prompt(original_prompt, action) last_node["prompt"] = refined_prompt last_node["refinements_applied"].append(action) return chain_data, self._render_chain_display(chain_data) # Connect events to functions node_selector.change(on_node_select, inputs=node_selector, outputs=[active_node_id, add_node_button] + list(input_groups.values())) add_node_button.click( add_node_to_chain, inputs=[chain_state, active_node_id] + list(inp for grp in input_groups.values() for inp in grp.inputs), outputs=[chain_state, chain_display, refinement_group] + [b for _, b in refinement_buttons] ) for action, btn in refinement_buttons: btn.click( apply_refinement, inputs=[chain_state, gr.State(action)], outputs=[chain_state, chain_display] ) clear_chain_button.click( lambda: ([], self._render_chain_display([]), gr.update(visible=False)), None, [chain_state, chain_display, refinement_group] ) # FOOTER gr.Markdown("
SYNAPSE v0.9 // Forging the future of human-AI interaction. Let's connect.
") app.queue().launch(show_error=True) if __name__ == "__main__": ui = SynapseUI() ui.launch()