Spaces:
Running
Running
| import gradio as gr | |
| import os | |
| import sys | |
| import json | |
| from typing import List, Dict, Any | |
| # Ajout du répertoire racine au path pour permettre les imports absolus 'src.xxx' | |
| sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))) | |
| from src.mcp_server import tools | |
| from src.mcp_server.playground import get_playground_ui_handlers | |
| from src.core.builder.proposal_generator import proposal_generator | |
| # --- Configuration des Modèles --- | |
| # Modèles simplifiés et performants pour le code | |
| COMMON_MODELS = [ | |
| "openai/gpt-oss-120b", | |
| "moonshotai/Kimi-K2-Instruct-0905" | |
| ] | |
| PROVIDER_MODELS = { | |
| "together": COMMON_MODELS, | |
| "hyperbolic": COMMON_MODELS, | |
| "None": COMMON_MODELS, | |
| # Fallback pour les autres | |
| "default": COMMON_MODELS | |
| } | |
| # --- Wrappers pour Gradio UI (Exposed as MCP Tools) --- | |
| # Ces wrappers permettent d'avoir une UI conviviale tout en exposant les fonctions via MCP avec des noms explicites pour les agents. | |
| def step_1_initialisation_and_proposal(project_name, description, model_id, provider_id): | |
| """ | |
| STEP 1: Starts a new tool project and uses AI to propose draft code. | |
| Call this AFTER `step_0...`. It initializes the project and sets the optional HF_TOKEN. | |
| This is the entry point for creating a new MCP tool. It returns a draft_id and a code proposal based on the description. | |
| Args: | |
| project_name: The technical name of the tool (e.g., 'weather-fetcher'). | |
| description: A natural language description of what the tool should do, or a raw Swagger/OpenAPI JSON specification. | |
| model_id: The LLM model to use for code generation (default : 'moonshotai/Kimi-K2-Instruct-0905'). | |
| provider_id: The inference provider to use. Options: 'together', 'hyperbolic'. | |
| """ | |
| # 1. Initialisation du projet (type 'adhoc' par défaut) | |
| init_result = tools.init_project(project_name, description, type="adhoc") | |
| draft_id = init_result.get("draft_id", "") | |
| # 2. AI Proposal Generation | |
| gr.Info("AI code generation in progress...") | |
| print(f"🤖 Generating proposal for: {project_name} (Model: {model_id}, Provider: {provider_id})...") | |
| proposal = proposal_generator.generate_from_description(project_name, description, model=model_id, provider=provider_id) | |
| gr.Info("Proposal generated! Please validate in the next tab.") | |
| # 3. Retourne les données pour mettre à jour l'UI | |
| # Gère le cas où 'requirements' n'est pas renvoyé par le LLM | |
| reqs = proposal.get("requirements", []) | |
| out_comp = proposal.get("output_component", "text") | |
| # Conversion des objets complexes en string JSON pour l'UI Code | |
| inputs_str = json.dumps(proposal["inputs"], indent=2) | |
| reqs_str = json.dumps(reqs, indent=2) | |
| return ( | |
| init_result, # out_init (JSON) | |
| draft_id, # draft_id_logic (Textbox) | |
| proposal["python_code"], # python_code (Code) | |
| inputs_str, # inputs_dict (Code) | |
| proposal["output_desc"], # output_desc (Textbox) | |
| reqs_str, # requirements_box (Code) | |
| out_comp # output_component_ui (Dropdown) | |
| ) | |
| def step_2_logic_definition(draft_id: str, python_code: str, inputs: Any, output_desc: str, requirements: List[str], output_component: str = "text"): | |
| """ | |
| STEP 2: Validates and saves the tool code. | |
| Call this AFTER `step_1...`. It saves the Python implementation into the draft before deployment. | |
| Args: | |
| draft_id: The unique ID of the project draft (returned by Step 1). | |
| python_code: The complete Python source code for the tool function. | |
| inputs: A dictionary (or JSON string) describing the input parameters (e.g. {"city": "Name of the city"}). | |
| output_desc: A description of what the tool returns. | |
| requirements: A list of Python dependencies (pip packages) required by the code (e.g. ["requests", "pandas"]). | |
| output_component: The type of Gradio component for output (text, image, audio, etc.). | |
| """ | |
| # Pour l'interface UI, inputs est un dict. | |
| # Pour l'API MCP, inputs peut être un dict ou une string JSON. | |
| # tools.define_logic gère les deux cas maintenant. | |
| # On s'assure que inputs est bien transmis | |
| print(f"DEBUG [step_2_logic_definition]: inputs type={type(inputs)}") | |
| result = tools.define_logic(draft_id, python_code, inputs, output_desc, requirements, output_component) | |
| if "error" not in result: | |
| gr.Info("Code validated and saved! Ready to deploy.") | |
| else: | |
| gr.Info(f"Error: {result['error']}") | |
| return result | |
| def step_3_deployment(draft_id): | |
| """ | |
| STEP 3: Deploys the tool to a Hugging Face Space. | |
| Call this AFTER `step_2...`. It creates a new public Space (or updates it) with the tool's code. | |
| It requires the draf_id that you got from previous steps. | |
| When success, it gives some info to give to the user to help him add the resulting MCP server in his clients. | |
| Args: | |
| draft_id: The unique ID of the project draft (from Step 1). | |
| """ | |
| gr.Info("Deployment in progress... This may take a few minutes.") | |
| # Simplification: Always public, always new (overwrite/create), space name = project name | |
| result = tools.deploy_to_space(draft_id, visibility="public", space_target="new", target_space_name=None) | |
| status_msg = "" | |
| space_url_val = "" | |
| mcp_url_val = "" | |
| claude_config_val = "" | |
| if "error" not in result: | |
| space_url_val = result.get('url', '') | |
| gr.Info(f"Deployment successful! URL: {space_url_val}") | |
| status_msg = "### 🚀 Deployment successful!" | |
| # Construction de l'URL MCP | |
| mcp_url_val = space_url_val | |
| tool_name = "my-tool" | |
| try: | |
| if "huggingface.co/spaces/" in space_url_val: | |
| parts = space_url_val.split("huggingface.co/spaces/") | |
| if len(parts) > 1: | |
| path = parts[1].strip("/") | |
| if "/" in path: | |
| user, space = path.split("/", 1) | |
| tool_name = space | |
| # Format direct url : https://user-space.hf.space | |
| # Note: pour mcp-remote on utilise le endpoint /gradio_api/mcp/ | |
| mcp_url_val = f"https://{user}-{space}.hf.space/gradio_api/mcp/" | |
| except Exception: | |
| pass | |
| # Construction de la config Claude | |
| config_dict = { | |
| "mcpServers": { | |
| tool_name: { | |
| "command": "npx", | |
| "args": [ | |
| "mcp-remote", | |
| mcp_url_val, | |
| "--transport", | |
| "streamable-http" | |
| ] | |
| } | |
| } | |
| } | |
| claude_config_val = json.dumps(config_dict, indent=2) | |
| # Ajout d'un message proactif pour l'agent | |
| result["instructions_for_agent"] = ( | |
| f"Deployment successful! Please inform the user that the tool is deployed on Hugging Face Spaces: {space_url_val} " | |
| f"and available via MCP: {mcp_url_val}. " | |
| "IMPORTANT: Tell the user it takes about 30 seconds for the Space to start up and become operational." | |
| ) | |
| else: | |
| gr.Info(f"Deployment failed: {result.get('error')}") | |
| status_msg = f"### ❌ Deployment failed\n\nError: {result.get('error')}" | |
| # Retourne : | |
| # 1. JSON result (pour out_deploy) | |
| # 2. Markdown status | |
| # 3. Space URL | |
| # 4. MCP URL | |
| # 5. Claude Config Code | |
| return json.dumps(result, indent=2), status_msg, space_url_val, mcp_url_val, claude_config_val | |
| # Récupération des handlers du playground | |
| reload_tools_handler, chat_response_handler = get_playground_ui_handlers() | |
| # --- Exposition des outils MCP (API pure) --- | |
| # Ces fonctions sont exposées directement aux LLMs via MCP, en plus de l'UI | |
| def step_0_configuration(hf_user: str = None, hf_token: str = None, default_space: str = None): | |
| """ | |
| STEP 0: Configures the MCEPTION server environment. | |
| This step is needed to set up the Hugging Face environment. | |
| After that follow steps 1, 2, 3 in this order and keep track of the draft_id that you will receive. | |
| The process for each tool finishes when step 3 is a success. | |
| You have to follow all the steps for a tool before handling the next tool. | |
| Args: | |
| hf_user: The Hugging Face username or organization (namespace) where Spaces will be deployed. | |
| default_space: The name of the default toolbox Space to use if no specific target is provided during deployment (e.g. 'my-tools'). | |
| hf_token: (IF OVERRIDE EXPLICITLY NEEDED) The Hugging Face Write Token. If not provided here, it must be set in the server's environment variables. | |
| """ | |
| # Note: In a real app with multi-user, this should be session-scoped or persistent. | |
| # Here we set env vars for the current process. | |
| if hf_user: | |
| os.environ["HF_USER"] = hf_user | |
| if hf_token: | |
| os.environ["HF_TOKEN"] = hf_token | |
| if default_space: | |
| os.environ["DEFAULT_SPACE"] = default_space | |
| return { | |
| "status": "success", | |
| "message": f"Configuration updated. User: {os.environ.get('HF_USER')}, Space: {os.environ.get('DEFAULT_SPACE')}" | |
| } | |
| def expert_step1_propose_implementation(project_name: str, description: str): | |
| """ | |
| [Expert Tool - Step 1] Generates a Python implementation proposal without initializing a UI draft. | |
| Use this tool if you are an AI agent wanting to generate code from a spec before deciding to create a draft. | |
| Args: | |
| project_name: Name of the intended tool. | |
| description: The tool description or Swagger/OpenAPI specification. | |
| """ | |
| return tools.propose_implementation(project_name, description) | |
| def expert_step2_define_logic(draft_id: str, python_code: str, inputs_json: str, output_desc: str, requirements_json: str = "[]"): | |
| """ | |
| [Expert Tool - Step 2] Defines the logic for a tool using JSON strings for complex arguments. | |
| Use this tool instead of `step_2_logic_definition` to avoid schema validation issues with complex nested JSON inputs. | |
| Args: | |
| draft_id: The draft ID returned by init. | |
| python_code: The complete Python code. | |
| inputs_json: A JSON string representing the inputs dictionary (e.g. '{"arg": "desc"}'). | |
| output_desc: Description of the output. | |
| requirements_json: A JSON string representing the list of requirements (e.g. '["requests"]'). | |
| """ | |
| import json | |
| try: | |
| inputs = json.loads(inputs_json) | |
| except: | |
| inputs = inputs_json # Fallback if already dict or invalid | |
| try: | |
| if requirements_json: | |
| requirements = json.loads(requirements_json) | |
| else: | |
| requirements = [] | |
| except: | |
| requirements = [requirements_json] if requirements_json else [] | |
| return tools.define_logic(draft_id, python_code, inputs, output_desc, requirements) | |
| def util_delete_tool(space_name: str, tool_name: str): | |
| """ | |
| [Utility Tool] Deletes an existing tool from a deployed Space. | |
| Use this to clean up test tools or remove deprecated ones. | |
| Args: | |
| space_name: Name of the Space (e.g. 'my-toolbox' or 'user/my-toolbox'). | |
| tool_name: Name of the tool to delete (e.g. 'strawberry_counter'). | |
| """ | |
| return tools.delete_tool(space_name, tool_name) | |
| def util_get_tool_code(space_name: str, tool_name: str): | |
| """ | |
| [Utility Tool] Retrieves the source code of an existing tool from a deployed Space. | |
| Use this to inspect or improve an existing tool. | |
| Args: | |
| space_name: Name of the Space. | |
| tool_name: Name of the tool. | |
| """ | |
| return tools.get_tool_code(space_name, tool_name) | |
| # --- Construction de l'interface --- | |
| with gr.Blocks(title="MCePtion") as demo: | |
| # Calcul dynamique de l'URL de l'image pour éviter les problèmes de CORS sur HF Spaces | |
| _space_id = os.environ.get("SPACE_ID") | |
| if _space_id: | |
| # Sur un Space : lien absolu vers le fichier raw | |
| _header_image_url = f"https://huggingface.co/spaces/{_space_id}/resolve/main/assets/images/header_bg.jpeg" | |
| else: | |
| # En local : lien local via Gradio | |
| _header_image_url = "/file=assets/images/header_bg.jpeg" | |
| # Bandeau haut (Image croppée à ~40% de hauteur, focus haut) | |
| gr.HTML(f""" | |
| <div style="width: 100%; overflow: hidden; margin-bottom: 20px;"> | |
| <img src="{_header_image_url}" style="width: 100%; height: 260px; object-fit: cover; object-position: top; display: block; border-radius: 8px;" alt="MCePtion Header"> | |
| </div> | |
| """) | |
| gr.Markdown("# 🏭 MCEPTION is the MCP of your MCPs") | |
| gr.Markdown("This server allows you to create and deploy other MCP servers on Hugging Face Spaces.") | |
| with gr.Tab("0. Setup & How-to"): | |
| gr.Markdown("## Global Configuration") | |
| # Détermination de l'utilisateur par défaut | |
| # Priorité : HF_USER > SPACE_AUTHOR_NAME > SPACE_ID > vide | |
| _default_user = os.environ.get("HF_USER") | |
| if not _default_user: | |
| _default_user = os.environ.get("SPACE_AUTHOR_NAME") | |
| if not _default_user and os.environ.get("SPACE_ID"): | |
| try: | |
| _default_user = os.environ.get("SPACE_ID").split("/")[0] | |
| except: | |
| pass | |
| hf_user_profile = gr.Textbox( | |
| label="HF User Profile / Namespace", | |
| value=_default_user or "", | |
| placeholder="e.g. alihmaou", | |
| info="Your default Hugging Face username or organization." | |
| ) | |
| default_mcp_space_name = gr.Textbox( | |
| label="Default Toolbox Name", | |
| value=os.environ.get("DEFAULT_SPACE", "mymcpserver"), | |
| placeholder="e.g. mymcpserver", | |
| info="Default Space (Toolbox) name for additions (will be concatenated with user)." | |
| ) | |
| hf_token_input = gr.Textbox( | |
| label="HF Write Token (Optional override)", | |
| type="password", | |
| placeholder="hf_...", | |
| info="Deployment token. If empty, uses the server's HF_TOKEN environment variable." | |
| ) | |
| # Button to apply config (simple update of global variables/env for the session) | |
| btn_save_config = gr.Button("Save Configuration") | |
| def save_config_ui(user: str, space: str, token: str): | |
| if user: os.environ["HF_USER"] = user | |
| if space: os.environ["DEFAULT_SPACE"] = space | |
| if token: os.environ["HF_TOKEN"] = token | |
| gr.Info("Configuration saved!") | |
| return f"Configuration saved! User: {user}, Default Space: {space}" | |
| config_status = gr.Markdown("") | |
| btn_save_config.click(save_config_ui, inputs=[hf_user_profile, default_mcp_space_name, hf_token_input], outputs=config_status) | |
| gr.Markdown("## How to use this MCePtion server?") | |
| with gr.Row(): | |
| with gr.Column("User Guide"): | |
| gr.Markdown(""" | |
| ## Human Interface User Guide | |
| ### 1. Tool Creation | |
| * Go to tab **1. Initialization**. | |
| * Provide a name and describe what you want (or paste a Swagger). | |
| * Click on "Initialize & Generate". | |
| ### 2. Code Validation | |
| * Go to tab **2. Logic Definition**. | |
| * Check the generated Python code and dependencies. | |
| * Click on "Validate Code" to validate. | |
| ### 3. Deployment | |
| * Go to tab **3. Deployment**. | |
| * Choose "New" to create a new Space or "Existing" to add to a Toolbox. | |
| * Click on "Deploy". | |
| ### 4. Test | |
| * Use the **4. Playground** tab to test your new tool after initialization (approx. 1 minute). | |
| """) | |
| with gr.Column(): | |
| # Calcul dynamique des URLs pour affichage | |
| _c_space_id = os.environ.get("SPACE_ID", None) | |
| _c_space_host = os.environ.get("SPACE_HOST", "localhost:7860") | |
| if _c_space_id: | |
| _c_space_url = f"https://huggingface.co/spaces/{_c_space_id}" | |
| _c_mcp_url = f"https://{_c_space_host}/gradio_api/mcp/" | |
| _c_server_name = _c_space_id.split("/")[-1] if "/" in _c_space_id else _c_space_id | |
| else: | |
| _c_space_url = "http://localhost:7860" | |
| _c_mcp_url = "http://localhost:7860/gradio_api/mcp/" | |
| _c_server_name = "metamcp-local" | |
| _c_claude_config = { | |
| "mcpServers": { | |
| _c_server_name: { | |
| "command": "npx", | |
| "args": [ | |
| "mcp-remote", | |
| _c_mcp_url, | |
| "--transport", | |
| "streamable-http" | |
| ] | |
| } | |
| } | |
| } | |
| _c_claude_config_str = json.dumps(_c_claude_config, indent=2) | |
| gr.Markdown("""## MCP Integration Settings""") | |
| gr.Code(label="URL of this space :", value=_c_space_url, language=None, interactive=False, lines=1) | |
| gr.Code(label="URL of MCP endpoint :", value=_c_mcp_url, language=None, interactive=False, lines=1) | |
| gr.Code(label="Claude Desktop Configuration", value=_c_claude_config_str, language="json", interactive=False) | |
| with gr.Tab("1. Initialization"): | |
| gr.Markdown("Start by initializing a new project.") | |
| project_name = gr.Textbox(label="e.g. Project Name (e.g. strawberry-counter, city-weather)...") | |
| project_desc = gr.Textbox( | |
| label="Tool Description or Specification (Swagger/OpenAPI JSON)", | |
| lines=10, | |
| placeholder="Describe what the tool should do, or paste the content of a swagger.json file here to generate an API client automatically." | |
| ) | |
| with gr.Accordion("AI Settings (Advanced)", open=False): | |
| provider_id = gr.Dropdown( | |
| label="Inference Provider", | |
| choices=["sambanova", "together", "None", "hyperbolic", "fal-ai", "replicate", "novita", "nebius", "cerebras", "fireworks", "groq"], | |
| value="together", | |
| info="Select a specific provider." | |
| ) | |
| model_id = gr.Dropdown( | |
| label="LLM Model", | |
| value="moonshotai/Kimi-K2-Instruct-0905", | |
| choices=COMMON_MODELS, | |
| allow_custom_value=True, | |
| info="Choose a code-optimized model or type a new one." | |
| ) | |
| # Dynamic model update | |
| def update_models(provider: str): | |
| models = PROVIDER_MODELS.get(provider, PROVIDER_MODELS["default"]) | |
| return gr.update(choices=models, value=models[0] if models else "") | |
| provider_id.change(update_models, inputs=[provider_id], outputs=[model_id]) | |
| btn_init = gr.Button("Initialize Project & Propose Code (AI)") | |
| out_init = gr.JSON(label="Result (Copy the draft_id)") | |
| with gr.Tab("2. Logic Definition"): | |
| gr.Markdown("Verify and refine the Python code and interface of your tool.") | |
| # Display draft_id as read-only to ensure propagation | |
| draft_id_logic = gr.Textbox(label="Draft ID", interactive=False) | |
| with gr.Row(): | |
| # Left Column: Code | |
| with gr.Column(scale=2): | |
| python_code = gr.Code(language="python", label="Python Code (e.g. def count_r(word): ...)") | |
| # Right Column: Requirements, Inputs, Outputs | |
| with gr.Column(scale=1): | |
| # 1. Requirements | |
| requirements_box = gr.Code(language="json", label="Requirements (JSON List)", value='[]') | |
| # 2. Inputs | |
| inputs_dict = gr.Code(language="json", label="Inputs (JSON)", value='{"word": "text"}') | |
| # 3. Outputs | |
| output_desc = gr.Textbox(label="Output Description") | |
| output_component_ui = gr.Dropdown( | |
| label="Output Type (Gradio Component)", | |
| choices=["text", "image", "audio", "video", "html", "json", "file"], | |
| value="text", | |
| interactive=True | |
| ) | |
| btn_logic = gr.Button("Validate Code") | |
| out_logic = gr.JSON(label="Result") | |
| btn_logic.click( | |
| step_2_logic_definition, | |
| inputs=[draft_id_logic, python_code, inputs_dict, output_desc, requirements_box, output_component_ui], | |
| outputs=out_logic, | |
| api_name="step_2_logic_definition" | |
| ) | |
| with gr.Tab("3. Deployment"): | |
| gr.Markdown("Deploy your tool to Hugging Face Spaces.") | |
| with gr.Row(): | |
| draft_id_deploy = gr.Textbox(label="Draft ID") | |
| # Simplification: No other inputs needed | |
| # Deployment plan summary (dynamically calculated) | |
| deployment_summary = gr.Markdown("Waiting for Draft ID...") | |
| def update_deployment_summary(draft_id: str): | |
| if not draft_id: | |
| return "Waiting..." | |
| # Simplified logic mirroring tools.deploy_to_space | |
| default_space = os.environ.get("DEFAULT_SPACE") | |
| target = default_space if default_space else "New Space (Project Name)" | |
| mode = "ADD (Toolbox)" if default_space else "CREATE (New Space)" | |
| return f""" | |
| ### 📋 Deployment Summary | |
| * **Mode:** {mode} | |
| * **Target:** `{target}` | |
| * **Visibility:** Public | |
| If you use a `DEFAULT_SPACE`, the tool will be added to your existing toolbox without overwriting other tools. | |
| Otherwise, a new dedicated Space will be created. | |
| """ | |
| btn_deploy = gr.Button("Deploy to Spaces", variant="primary") | |
| out_status = gr.Markdown("") | |
| with gr.Row(): | |
| # Using gr.Code because gr.Textbox(show_copy_button=True) is not supported in this Gradio version | |
| out_space_url = gr.Code(language=None, label="Hugging Face Space URL", interactive=False, lines=1) | |
| out_mcp_url = gr.Code(language=None, label="MCP Endpoint URL", interactive=False, lines=1) | |
| out_claude_config = gr.Code(language="json", label="Claude Desktop Configuration (add to claude_desktop_config.json)") | |
| with gr.Accordion("JSON Details (Debug)", open=False): | |
| out_deploy = gr.Code(language="json", label="Raw Result") | |
| # Mise à jour du résumé quand le draft_id change | |
| draft_id_deploy.change(update_deployment_summary, inputs=[draft_id_deploy], outputs=[deployment_summary]) | |
| # Fonction pour extraire l'URL MCP directe et préremplir le playground | |
| def auto_fill_playground(mcp_url_val: str): | |
| if not mcp_url_val: | |
| return gr.update() | |
| return mcp_url_val | |
| # Câblage global des événements (une fois tous les composants définis) | |
| # 1. Init -> Remplissage auto de l'onglet 2 (Logic) et copie de l'ID vers onglet 3 (Deploy) | |
| btn_init.click( | |
| step_1_initialisation_and_proposal, | |
| inputs=[project_name, project_desc, model_id, provider_id], | |
| outputs=[out_init, draft_id_logic, python_code, inputs_dict, output_desc, requirements_box, output_component_ui], | |
| api_name="step_1_initialisation_and_proposal" | |
| ).then( | |
| fn=lambda x: x, | |
| inputs=[draft_id_logic], | |
| outputs=[draft_id_deploy] | |
| ) | |
| with gr.Tab("4. Test & Playground (Smolagents)"): | |
| gr.Markdown("Immediately test your deployed MCP server.") | |
| with gr.Column(): | |
| mcp_url_input = gr.Textbox( | |
| label="MCP Server URL", | |
| placeholder="e.g. https://your-user-your-space.hf.space/gradio_api/mcp/sse", | |
| scale=3 | |
| ) | |
| btn_reload = gr.Button("🔄 Load Tools", scale=1) | |
| status_msg = gr.Markdown("") | |
| # Table adapted for tool display (wrap=True) | |
| tool_table = gr.DataFrame( | |
| headers=["Tool name", "Description", "Params"], | |
| label="Detected Tools", | |
| wrap=True, | |
| interactive=False | |
| ) | |
| gr.Markdown(""" | |
| ### ⚙️ Smolagents Configuration | |
| To use this tool with smolagents in your code: | |
| ```python | |
| from smolagents import MCPClient | |
| # Direct HTTP Mode (recommended) | |
| client = MCPClient(url="SERVER_URL", structured_output=False) | |
| ``` | |
| """) | |
| gr.Markdown("### 🤖 Chat with your MCP Agent") | |
| chatbot = gr.ChatInterface( | |
| fn=chat_response_handler | |
| ) | |
| btn_reload.click( | |
| fn=reload_tools_handler, | |
| inputs=[mcp_url_input], | |
| outputs=[tool_table, status_msg] | |
| ) | |
| with gr.Tab("README"): | |
| # Lecture du fichier README.md | |
| readme_content = "" | |
| try: | |
| with open("README.md", "r", encoding="utf-8") as f: | |
| readme_content = f.read() | |
| # Remove Hugging Face YAML frontmatter if present | |
| if readme_content.startswith("---"): | |
| try: | |
| # Find the end of the frontmatter (second '---') | |
| # We start searching from index 3 to skip the first '---' | |
| end_index = readme_content.find("---", 3) | |
| if end_index != -1: | |
| # Slice content after the second '---' and strip leading whitespace | |
| readme_content = readme_content[end_index + 3:].lstrip() | |
| except Exception: | |
| pass | |
| except Exception as e: | |
| readme_content = f"Unable to load README.md: {str(e)}" | |
| # Le conteneur Row pour aligner les 3 colonnes horizontalement | |
| with gr.Row(): | |
| # 1. Colonne vide à gauche (1 part) | |
| # min_width=0 est important pour que la colonne puisse rétrécir si besoin | |
| with gr.Column(scale=1, min_width=0): | |
| pass | |
| # 2. Colonne centrale avec le contenu (3 parts) | |
| with gr.Column(scale=3): | |
| gr.Markdown(readme_content) | |
| # 3. Colonne vide à droite (1 part) | |
| with gr.Column(scale=1, min_width=0): | |
| pass | |
| with gr.Tab("EXAMPLES"): | |
| # Lecture du fichier Examples.md | |
| example_content = "" | |
| try: | |
| with open("assets/Examples.MD", "r", encoding="utf-8") as f: | |
| example_content = f.read() | |
| except Exception as e: | |
| example_content = f"Unable to load Examples.md: {str(e)}" | |
| # Le conteneur Row pour aligner les 3 colonnes horizontalement | |
| with gr.Row(): | |
| # 1. Colonne vide à gauche (1 part) | |
| # min_width=0 est important pour que la colonne puisse rétrécir si besoin | |
| with gr.Column(scale=1, min_width=0): | |
| pass | |
| # 2. Colonne centrale avec le contenu (3 parts) | |
| with gr.Column(scale=3): | |
| gr.Markdown(example_content) | |
| # 3. Colonne vide à droite (1 part) | |
| with gr.Column(scale=1, min_width=0): | |
| pass | |
| # Câblage différé du déploiement (pour avoir accès à mcp_url_input défini dans le Tab 4) | |
| btn_deploy.click( | |
| step_3_deployment, | |
| inputs=[draft_id_deploy], | |
| outputs=[out_deploy, out_status, out_space_url, out_mcp_url, out_claude_config], | |
| api_name="step_3_deployment" | |
| ).then( | |
| fn=auto_fill_playground, | |
| inputs=[out_mcp_url], | |
| outputs=[mcp_url_input] | |
| ) | |
| # Exposition explicite des outils pour les agents MCP sans UI | |
| # Cela permet à ChatGPT/Claude d'appeler ces fonctions directement | |
| # Note: Les fonctions liées à l'UI sont déjà exposées, mais celles-ci sont plus propres pour une API. | |
| # Gradio expose automatiquement les fonctions utilisées dans l'interface, mais on peut ajouter des endpoints API spécifiques. | |
| # Cependant, avec mcp_server=True, Gradio expose TOUT ce qui est triggué. | |
| # Pour être sûr que 'propose_implementation' est dispo, on l'ajoute via un composant invisible ou une API route si possible. | |
| # Dans la version actuelle de Gradio MCP, seules les fonctions liées à des événements sont exposées. | |
| # On va donc créer une "API Box" invisible pour exposer cet outil spécifique. | |
| with gr.Accordion("API Tools (Invisible)", visible=False): | |
| api_input_name = gr.Textbox() | |
| api_input_desc = gr.Textbox() | |
| api_output = gr.JSON() | |
| # Configuration Tool | |
| api_conf_user = gr.Textbox() | |
| api_conf_token = gr.Textbox() | |
| api_conf_space = gr.Textbox() | |
| btn_api_conf = gr.Button("Configure API") | |
| btn_api_conf.click( | |
| step_0_configuration, | |
| inputs=[api_conf_user, api_conf_token, api_conf_space], | |
| outputs=[api_output], | |
| api_name="step_0_configuration" | |
| ) | |
| btn_api_propose = gr.Button("Propose Implementation API") | |
| btn_api_propose.click( | |
| expert_step1_propose_implementation, | |
| inputs=[api_input_name, api_input_desc], | |
| outputs=[api_output], | |
| api_name="expert_step1_propose_implementation" # Nom de l'outil pour le LLM | |
| ) | |
| # Exposition de mcp_define_logic | |
| api_draft_id = gr.Textbox() | |
| api_code = gr.Textbox() | |
| api_inputs_json = gr.Textbox() | |
| api_out_desc = gr.Textbox() | |
| api_reqs_json = gr.Textbox() | |
| btn_api_define = gr.Button("Define Logic API") | |
| btn_api_define.click( | |
| expert_step2_define_logic, | |
| inputs=[api_draft_id, api_code, api_inputs_json, api_out_desc, api_reqs_json], | |
| outputs=[api_output], | |
| api_name="expert_step2_define_logic" | |
| ) | |
| # Utils | |
| api_util_space = gr.Textbox() | |
| api_util_tool = gr.Textbox() | |
| btn_util_delete = gr.Button("Delete Tool API") | |
| btn_util_delete.click( | |
| util_delete_tool, | |
| inputs=[api_util_space, api_util_tool], | |
| outputs=[api_output], | |
| api_name="util_delete_tool" | |
| ) | |
| btn_util_get = gr.Button("Get Tool Code API") | |
| btn_util_get.click( | |
| util_get_tool_code, | |
| inputs=[api_util_space, api_util_tool], | |
| outputs=[api_output], | |
| api_name="util_get_tool_code" | |
| ) | |
| # --- Définition des Ressources et Prompts MCP --- | |
| # On active les décorateurs s'ils sont dispos | |
| if hasattr(gr, "mcp"): | |
| def list_active_drafts() -> str: | |
| """Returns a list of currently active project drafts.""" | |
| # Note: In a real app, this would query the session manager | |
| return "Active Drafts: [draft_id_1, draft_id_2]" | |
| def help_create_tool(topic: str = "general") -> str: | |
| """ | |
| Provides a prompt template to help users create a new tool. | |
| Args: | |
| topic: The topic of the tool (e.g. 'data', 'fun', 'utility') | |
| """ | |
| return f"I want to create a new MCP tool related to {topic}. Can you guide me through the initialization, logic definition, and deployment steps using the available tools?" | |
| # Point d'entrée | |
| if __name__ == "__main__": | |
| # Lancement avec mcp_server=True pour exposer les outils aux LLMs | |
| demo.launch(server_name="0.0.0.0", server_port=7860, mcp_server=True, show_error=True) | |