Spaces:
Running
Running
| import os | |
| import json | |
| from huggingface_hub import InferenceClient | |
| class ProposalGenerator: | |
| def __init__(self): | |
| self.token = os.environ.get("HF_TOKEN") | |
| # Client par défaut | |
| self.client = InferenceClient(token=self.token) | |
| def generate_from_description(self, project_name: str, description: str, model: str = "Qwen/Qwen2.5-Coder-32B-Instruct", provider: str = None): | |
| """ | |
| Generates a code and configuration proposal from a description. | |
| Uses chat_completion for better compatibility. | |
| """ | |
| # Dynamic client configuration if necessary (e.g. provider change) | |
| # Note: InferenceClient is lightweight, we can instantiate it on demand or use the existing one | |
| # If provider is specified, use it. Otherwise let HF choose. | |
| # "None" string from UI should be converted to None type | |
| if provider == "None" or provider == "": | |
| provider = None | |
| print(f"🤖 LLM Call with Model: {model}, Provider: {provider}") | |
| # Use current environment variable if available (supports UI updates), otherwise fallback to init token | |
| current_token = os.environ.get("HF_TOKEN", self.token) | |
| client = InferenceClient(model=model, token=current_token, provider=provider) | |
| # print(self.token) # Avoid printing token in logs | |
| messages = [ | |
| { | |
| "role": "system", | |
| "content": """You are an expert Python developer creating a tool for an MCP server via Gradio. | |
| Your goal is to generate production-ready Python code that is fully typed and documented. | |
| You MUST return ONLY a valid JSON object.""" | |
| }, | |
| { | |
| "role": "user", | |
| "content": f"""Create a tool named '{project_name}' that does the following: {description} | |
| Requirements: | |
| 1. The function MUST have a clear and descriptive docstring (Google style preferred) explaining what it does, its arguments, and its return value. This docstring will be used as the tool description for the LLM. | |
| 2. The function arguments MUST be fully typed (e.g. `word: str`, `count: int`). | |
| 3. The function return type MUST be specified (e.g. `-> str`). | |
| 4. The function name should match '{project_name}' (normalized to python snake_case). | |
| 5. If the code requires external libraries (like `requests`, `pandas`, `numpy`), list them. | |
| 6. If the function calls an external API that needs a token, handle the token as an argument and precise it in the docstring. | |
| Return ONLY a valid JSON object with the following structure: | |
| {{ | |
| "python_code": "def function_name(arg1: type) -> type:\\n \\"\\"\\"Docstring here...\\"\\"\\"\\n ...", | |
| "inputs": {{ "arg1": "Description for UI label" }}, | |
| "output_desc": "Description for UI label of the output", | |
| "output_component": "text", # Choose from: text, image, audio, video, json, html, file | |
| "requirements": ["requests", "pandas"] | |
| }} | |
| Make sure the python_code is a valid, complete, standalone Python function with all necessary imports inside (e.g. `import requests` inside the function or at top level if compatible). | |
| If the user provides an API Specification (Swagger/OpenAPI), generate a client function that implements the main operation described. | |
| Do not use markdown formatting (no ```json). Just the raw JSON string. | |
| """ | |
| } | |
| ] | |
| try: | |
| response = client.chat_completion( | |
| messages, | |
| max_tokens=4096, # Augmenté pour éviter la troncature | |
| temperature=0.2, | |
| stream=False | |
| ) | |
| # Content extraction | |
| content = response.choices[0].message.content.strip() | |
| # Robust JSON extraction via Regex | |
| import re | |
| # Finds the first { and the last } | |
| match = re.search(r'\{.*\}', content, re.DOTALL) | |
| if match: | |
| json_content = match.group(0) | |
| return json.loads(json_content) | |
| # Fallback: direct parsing attempt if regex fails (e.g. list or other format) | |
| return json.loads(content) | |
| except Exception as e: | |
| print(f"Error generating proposal: {e}") | |
| import traceback | |
| traceback.print_exc() | |
| # Fallback en cas d'erreur | |
| return { | |
| "python_code": f"# Error generating code: {str(e)}\n# Try changing the Inference Provider or Model.\ndef {project_name.replace('-', '_')}():\n return 'Error'", | |
| "inputs": {}, | |
| "output_desc": "Error fallback" | |
| } | |
| # Singleton | |
| proposal_generator = ProposalGenerator() | |