Spaces:
Sleeping
Sleeping
| from flask import Flask, jsonify, request, Response | |
| import json | |
| import requests | |
| import os | |
| from datetime import datetime | |
| import uuid | |
| from urllib.parse import quote | |
| app = Flask(__name__) | |
| # MCP Server Info (HF Style) | |
| SERVER_INFO = { | |
| "name": "MCPollinations", | |
| "version": "1.0.0", | |
| "description": "MCP Server for AI Image and Text Generation via Pollinations" | |
| } | |
| # MCP Tools Definition (Official Format) | |
| MCP_TOOLS = [ | |
| { | |
| "name": "generate_image", | |
| "description": "Generate images from text prompts using Pollinations AI", | |
| "inputSchema": { | |
| "type": "object", | |
| "properties": { | |
| "prompt": { | |
| "type": "string", | |
| "description": "Text description of the image to generate" | |
| }, | |
| "model": { | |
| "type": "string", | |
| "description": "Model to use (flux, gptimage, etc.)", | |
| "enum": ["flux", "gptimage", "playground", "stable-diffusion"], | |
| "default": "flux" | |
| }, | |
| "width": { | |
| "type": "integer", | |
| "description": "Image width in pixels", | |
| "minimum": 256, | |
| "maximum": 2048, | |
| "default": 1024 | |
| }, | |
| "height": { | |
| "type": "integer", | |
| "description": "Image height in pixels", | |
| "minimum": 256, | |
| "maximum": 2048, | |
| "default": 1024 | |
| } | |
| }, | |
| "required": ["prompt"] | |
| } | |
| }, | |
| { | |
| "name": "generate_text", | |
| "description": "Generate text responses using Pollinations AI", | |
| "inputSchema": { | |
| "type": "object", | |
| "properties": { | |
| "prompt": { | |
| "type": "string", | |
| "description": "Text prompt for generation" | |
| }, | |
| "model": { | |
| "type": "string", | |
| "description": "Text model to use", | |
| "enum": ["openai", "claude", "gemini"], | |
| "default": "openai" | |
| }, | |
| "max_tokens": { | |
| "type": "integer", | |
| "description": "Maximum tokens to generate", | |
| "minimum": 1, | |
| "maximum": 4000, | |
| "default": 1000 | |
| } | |
| }, | |
| "required": ["prompt"] | |
| } | |
| } | |
| ] | |
| def is_browser_request(request): | |
| """Detect if request is from browser (HF Style)""" | |
| user_agent = request.headers.get('User-Agent', '').lower() | |
| accept = request.headers.get('Accept', '').lower() | |
| return 'text/html' in accept or 'mozilla' in user_agent | |
| def create_mcp_response(id=None, result=None, error=None): | |
| """Create JSON-RPC 2.0 MCP response""" | |
| response = { | |
| "jsonrpc": "2.0" | |
| } | |
| if id is not None: | |
| response["id"] = id | |
| if result is not None: | |
| response["result"] = result | |
| if error is not None: | |
| response["error"] = error | |
| return response | |
| def root(): | |
| """Main MCP endpoint (HF Style)""" | |
| if is_browser_request(request): | |
| # Return user-friendly page for browsers | |
| return """ | |
| <!DOCTYPE html> | |
| <html> | |
| <head> | |
| <title>MCPollinations MCP Server</title> | |
| <meta charset="utf-8"> | |
| <style> | |
| body { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif; | |
| margin: 0; padding: 40px; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| color: white; min-height: 100vh; } | |
| .container { max-width: 800px; margin: 0 auto; background: rgba(255,255,255,0.1); | |
| padding: 40px; border-radius: 20px; backdrop-filter: blur(10px); } | |
| h1 { font-size: 3em; margin-bottom: 20px; text-align: center; } | |
| .badge { background: rgba(255,255,255,0.2); padding: 8px 16px; border-radius: 20px; | |
| display: inline-block; margin: 5px; } | |
| .endpoint { background: rgba(255,255,255,0.1); padding: 20px; margin: 15px 0; | |
| border-radius: 10px; border-left: 4px solid #00d4aa; } | |
| .code { background: rgba(0,0,0,0.3); padding: 15px; border-radius: 8px; | |
| font-family: 'Monaco', 'Courier New', monospace; margin: 10px 0; } | |
| .status { color: #00d4aa; font-weight: bold; font-size: 1.2em; } | |
| </style> | |
| </head> | |
| <body> | |
| <div class="container"> | |
| <h1>🤖 MCPollinations MCP Server</h1> | |
| <p class="status">✅ Server Status: Running</p> | |
| <p>Official Model Context Protocol server for AI image and text generation via Pollinations AI.</p> | |
| <h2>🛠️ Available Tools</h2> | |
| <div class="endpoint"> | |
| <strong>generate_image</strong> - Generate images from text prompts | |
| <div class="badge">Models: flux, gptimage, playground</div> | |
| </div> | |
| <div class="endpoint"> | |
| <strong>generate_text</strong> - Generate text using AI models | |
| <div class="badge">Models: openai, claude, gemini</div> | |
| </div> | |
| <h2>🔗 MCP Client Setup</h2> | |
| <p><strong>Server URL:</strong></p> | |
| <div class="code">https://huggingface.co/spaces/legends810/mcp-server</div> | |
| <h2>📚 Integration Examples</h2> | |
| <div class="endpoint"> | |
| <strong>Claude Desktop</strong> | |
| <div class="code">{ | |
| "mcpServers": { | |
| "mcpollinations": { | |
| "command": "npx", | |
| "args": ["mcp-remote", "https://huggingface.co/spaces/legends810/mcp-server"] | |
| } | |
| } | |
| }</div> | |
| </div> | |
| <div class="endpoint"> | |
| <strong>n8n MCP Client</strong> | |
| <div class="code">URL: https://huggingface.co/spaces/legends810/mcp-server | |
| Transport: HTTP</div> | |
| </div> | |
| <h2>🚀 Quick Test</h2> | |
| <div class="code">curl -X POST https://huggingface.co/spaces/legends810/mcp-server/call \\ | |
| -H "Content-Type: application/json" \\ | |
| -d '{ | |
| "jsonrpc": "2.0", | |
| "method": "tools/call", | |
| "params": { | |
| "name": "generate_image", | |
| "arguments": {"prompt": "beautiful sunset"} | |
| }, | |
| "id": 1 | |
| }'</div> | |
| </div> | |
| </body> | |
| </html> | |
| """ | |
| else: | |
| # Return MCP protocol response for clients | |
| return jsonify(create_mcp_response( | |
| result={ | |
| "protocolVersion": "2024-11-05", | |
| "capabilities": { | |
| "tools": {"listChanged": True}, | |
| "logging": {} | |
| }, | |
| "serverInfo": SERVER_INFO | |
| } | |
| )) | |
| def initialize(): | |
| """MCP Initialize endpoint""" | |
| try: | |
| data = request.get_json() | |
| request_id = data.get('id') | |
| return jsonify(create_mcp_response( | |
| id=request_id, | |
| result={ | |
| "protocolVersion": "2024-11-05", | |
| "capabilities": { | |
| "tools": {"listChanged": True}, | |
| "logging": {} | |
| }, | |
| "serverInfo": SERVER_INFO | |
| } | |
| )) | |
| except Exception as e: | |
| return jsonify(create_mcp_response( | |
| error={"code": -32603, "message": f"Internal error: {str(e)}"} | |
| )), 500 | |
| def tools_list(): | |
| """MCP Tools List endpoint""" | |
| try: | |
| data = request.get_json() | |
| request_id = data.get('id') | |
| return jsonify(create_mcp_response( | |
| id=request_id, | |
| result={"tools": MCP_TOOLS} | |
| )) | |
| except Exception as e: | |
| return jsonify(create_mcp_response( | |
| error={"code": -32603, "message": f"Internal error: {str(e)}"} | |
| )), 500 | |
| def tools_call(): | |
| """MCP Tools Call endpoint (Main functionality)""" | |
| try: | |
| data = request.get_json() | |
| request_id = data.get('id') | |
| params = data.get('params', {}) | |
| tool_name = params.get('name') | |
| arguments = params.get('arguments', {}) | |
| if tool_name == 'generate_image': | |
| result = handle_image_generation(arguments) | |
| elif tool_name == 'generate_text': | |
| result = handle_text_generation(arguments) | |
| else: | |
| return jsonify(create_mcp_response( | |
| id=request_id, | |
| error={"code": -32601, "message": f"Unknown tool: {tool_name}"} | |
| )), 400 | |
| return jsonify(create_mcp_response( | |
| id=request_id, | |
| result=result | |
| )) | |
| except Exception as e: | |
| return jsonify(create_mcp_response( | |
| error={"code": -32603, "message": f"Tool execution failed: {str(e)}"} | |
| )), 500 | |
| def handle_image_generation(arguments): | |
| """Handle image generation with Pollinations API""" | |
| prompt = arguments.get('prompt', 'beautiful landscape') | |
| model = arguments.get('model', 'flux') | |
| width = arguments.get('width', 1024) | |
| height = arguments.get('height', 1024) | |
| # Build Pollinations URL | |
| params = [] | |
| if width != 1024: | |
| params.append(f"width={width}") | |
| if height != 1024: | |
| params.append(f"height={height}") | |
| if model != 'flux': | |
| params.append(f"model={model}") | |
| image_url = f"https://image.pollinations.ai/{quote(prompt)}" | |
| if params: | |
| image_url += "?" + "&".join(params) | |
| return { | |
| "content": [ | |
| { | |
| "type": "image", | |
| "data": image_url, | |
| "mimeType": "image/png" | |
| }, | |
| { | |
| "type": "text", | |
| "text": f"Generated image: {prompt}\nModel: {model}\nDimensions: {width}x{height}\nURL: {image_url}" | |
| } | |
| ] | |
| } | |
| def handle_text_generation(arguments): | |
| """Handle text generation with Pollinations API""" | |
| prompt = arguments.get('prompt', 'Hello world') | |
| model = arguments.get('model', 'openai') | |
| max_tokens = arguments.get('max_tokens', 1000) | |
| # Call Pollinations text API | |
| try: | |
| text_url = f"https://text.pollinations.ai/{quote(prompt)}?model={model}" | |
| response = requests.get(text_url, timeout=30) | |
| generated_text = response.text[:max_tokens] # Limit tokens | |
| except: | |
| generated_text = f"Generated response for: {prompt} (using {model} model)" | |
| return { | |
| "content": [ | |
| { | |
| "type": "text", | |
| "text": generated_text | |
| } | |
| ] | |
| } | |
| # Legacy endpoints for backward compatibility | |
| def status(): | |
| return jsonify({ | |
| "status": "running", | |
| "server": SERVER_INFO["name"], | |
| "version": SERVER_INFO["version"], | |
| "mcp_protocol": "2024-11-05", | |
| "timestamp": datetime.now().isoformat() | |
| }) | |
| def health(): | |
| return jsonify({"status": "healthy"}) | |
| if __name__ == '__main__': | |
| port = int(os.environ.get('PORT', 7860)) | |
| app.run(host='0.0.0.0', port=port, debug=False, threaded=True) | |