borodache commited on
Commit
975e67a
Β·
verified Β·
1 Parent(s): 0674ddc

Upload 4 files

Browse files
Files changed (3) hide show
  1. agent-config.py +209 -0
  2. agent.json +11 -9
  3. requirements.txt +3 -0
agent-config.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import asyncio
3
+ from typing import Any
4
+ from textblob import TextBlob
5
+ import gradio as gr
6
+ from mcp import Tool
7
+ from mcp.server import Server
8
+ from mcp.types import TextContent
9
+
10
+ # Try to import SSE server
11
+ try:
12
+ from mcp.server.sse import SseServerTransport
13
+ from starlette.applications import Starlette
14
+ from starlette.routing import Route, Mount
15
+ from starlette.responses import Response, JSONResponse
16
+ from starlette.middleware.cors import CORSMiddleware
17
+ import uvicorn
18
+ SSE_AVAILABLE = True
19
+ except ImportError:
20
+ SSE_AVAILABLE = False
21
+ print("Warning: SSE dependencies not available.")
22
+
23
+ # Shared sentiment analysis function
24
+ def sentiment_analysis(text: str) -> dict:
25
+ """Analyze the sentiment of the given text."""
26
+ if not text or not text.strip():
27
+ return {
28
+ "error": "No text provided",
29
+ "polarity": 0,
30
+ "subjectivity": 0,
31
+ "assessment": "neutral"
32
+ }
33
+
34
+ blob = TextBlob(text)
35
+ sentiment = blob.sentiment
36
+
37
+ result = {
38
+ "polarity": round(sentiment.polarity, 2),
39
+ "subjectivity": round(sentiment.subjectivity, 2),
40
+ "assessment": "positive" if sentiment.polarity > 0 else "negative" if sentiment.polarity < 0 else "neutral"
41
+ }
42
+ return result
43
+
44
+ # Gradio wrapper
45
+ def sentiment_analysis_gradio(text: str) -> str:
46
+ """Gradio-compatible wrapper that returns JSON string."""
47
+ result = sentiment_analysis(text)
48
+ return json.dumps(result, indent=2)
49
+
50
+ # Create MCP server
51
+ mcp_server = Server("sentiment-analysis")
52
+
53
+ @mcp_server.list_tools()
54
+ async def handle_list_tools() -> list[Tool]:
55
+ """List available tools."""
56
+ return [
57
+ Tool(
58
+ name="analyze_sentiment",
59
+ description="Analyze the sentiment of text using TextBlob. Returns polarity (-1 to 1), subjectivity (0 to 1), and assessment (positive/negative/neutral).",
60
+ inputSchema={
61
+ "type": "object",
62
+ "properties": {
63
+ "text": {
64
+ "type": "string",
65
+ "description": "The text to analyze for sentiment"
66
+ }
67
+ },
68
+ "required": ["text"]
69
+ }
70
+ )
71
+ ]
72
+
73
+ @mcp_server.call_tool()
74
+ async def handle_call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
75
+ """Handle tool calls."""
76
+ if name == "analyze_sentiment":
77
+ text = arguments.get("text", "")
78
+ result = sentiment_analysis(text)
79
+ return [TextContent(type="text", text=json.dumps(result, indent=2))]
80
+
81
+ return [TextContent(type="text", text=f"Unknown tool: {name}")]
82
+
83
+ # Create Gradio interface with API access enabled
84
+ gradio_interface = gr.Interface(
85
+ fn=sentiment_analysis_gradio,
86
+ inputs=gr.Textbox(
87
+ placeholder="Enter text to analyze...",
88
+ label="Input Text",
89
+ lines=5
90
+ ),
91
+ outputs=gr.Textbox(
92
+ label="Sentiment Analysis Result (JSON)",
93
+ lines=10
94
+ ),
95
+ title="Text Sentiment Analysis",
96
+ description="Analyze sentiment using TextBlob. MCP server available at http://localhost:8000/sse",
97
+ examples=[
98
+ ["I absolutely love this product! It's amazing and works perfectly."],
99
+ ["This is the worst experience I've ever had. Terrible service."],
100
+ ["The weather today is cloudy with a chance of rain."],
101
+ ],
102
+ api_name="analyze" # Enable API access
103
+ )
104
+
105
+ if SSE_AVAILABLE:
106
+ # Create SSE endpoint for MCP
107
+ async def handle_sse(request):
108
+ """Handle SSE connections for MCP."""
109
+ transport = SseServerTransport("/messages")
110
+
111
+ async with transport.connect_sse(
112
+ request.scope,
113
+ request.receive,
114
+ request._send
115
+ ) as streams:
116
+ await mcp_server.run(
117
+ streams[0],
118
+ streams[1],
119
+ mcp_server.create_initialization_options()
120
+ )
121
+
122
+ return Response()
123
+
124
+ async def handle_messages(request):
125
+ """Handle message endpoint."""
126
+ return Response("MCP Server Ready", media_type="text/plain")
127
+
128
+ async def handle_health(request):
129
+ """Health check endpoint."""
130
+ return JSONResponse({"status": "healthy", "service": "sentiment-analysis-mcp"})
131
+
132
+ # Create Starlette app for MCP
133
+ starlette_app = Starlette(
134
+ routes=[
135
+ Route("/sse", endpoint=handle_sse),
136
+ Route("/messages", endpoint=handle_messages, methods=["POST"]),
137
+ Route("/health", endpoint=handle_health, methods=["GET"]),
138
+ ]
139
+ )
140
+
141
+ # Add CORS middleware
142
+ starlette_app.add_middleware(
143
+ CORSMiddleware,
144
+ allow_origins=["*"],
145
+ allow_credentials=True,
146
+ allow_methods=["*"],
147
+ allow_headers=["*"],
148
+ )
149
+
150
+ def run_mcp_http_server():
151
+ """Run MCP server over HTTP."""
152
+ print("Starting MCP server on http://localhost:8000")
153
+ uvicorn.run(starlette_app, host="0.0.0.0", port=8000, log_level="warning")
154
+
155
+ def main():
156
+ """Run both servers."""
157
+ import threading
158
+ import time
159
+
160
+ print("=" * 70)
161
+ print("πŸš€ Starting Combined Sentiment Analysis Server")
162
+ print("=" * 70)
163
+ print("πŸ“Š Gradio UI: http://localhost:7860")
164
+ print("πŸ”Œ MCP Server: http://localhost:8000/sse")
165
+ print("❀️ Health Check: http://localhost:8000/health")
166
+ print("πŸ“‘ Gradio API: http://localhost:7860/api/analyze")
167
+ print("=" * 70)
168
+ print("\nConfiguration for agent.json:")
169
+ print(json.dumps({
170
+ "servers": [{
171
+ "type": "sse",
172
+ "url": "http://localhost:8000/sse"
173
+ }]
174
+ }, indent=2))
175
+ print("=" * 70)
176
+
177
+ # Start MCP server in separate thread
178
+ mcp_thread = threading.Thread(target=run_mcp_http_server, daemon=True)
179
+ mcp_thread.start()
180
+
181
+ # Give MCP server time to start
182
+ time.sleep(2)
183
+
184
+ # Start Gradio (blocking)
185
+ gradio_interface.launch(
186
+ server_name="0.0.0.0",
187
+ server_port=7860,
188
+ share=False,
189
+ show_error=True
190
+ )
191
+
192
+ else:
193
+ # Fallback: Gradio only if SSE not available
194
+ def main():
195
+ print("=" * 70)
196
+ print("⚠️ SSE dependencies not installed. Running Gradio only.")
197
+ print("To enable MCP over HTTP, install: pip install sse-starlette uvicorn")
198
+ print("=" * 70)
199
+ print("πŸ“Š Gradio UI: http://localhost:7860")
200
+ print("=" * 70)
201
+
202
+ gradio_interface.launch(
203
+ server_name="0.0.0.0",
204
+ server_port=7860,
205
+ share=False
206
+ )
207
+
208
+ if __name__ == "__main__":
209
+ main()
agent.json CHANGED
@@ -1,11 +1,13 @@
1
  {
2
- "model": "Qwen/Qwen3-32B",
3
- "endpointUrl": "https://borodache-sentiment-analysis-mcp.huggingface.co/v1/",
4
- "servers": [
5
- {
6
- "type": "stdio",
7
- "command": "npx",
8
- "args": ["http://localhost:7860"]
9
- }
10
- ]
 
 
11
  }
 
1
  {
2
+ "model": "Qwen/Qwen3-32B",
3
+ "endpointUrl": "https://borodache-sentiment-analysis-mcp.huggingface.co/v1/",
4
+ "servers":
5
+ [
6
+ {
7
+ "type": "sse",
8
+ "url": "http://localhost:8000/sse",
9
+ "name": "sentiment-analysis",
10
+ "description": "Sentiment analysis using TextBlob"
11
+ }
12
+ ]
13
  }
requirements.txt CHANGED
@@ -2,3 +2,6 @@ textblob
2
  mcp
3
  gradio
4
  starlette
 
 
 
 
2
  mcp
3
  gradio
4
  starlette
5
+ sse-starlette
6
+ uvicorn
7
+ starlette