Alibrown commited on
Commit
36aad6f
Β·
verified Β·
1 Parent(s): d33822e

Delete app

Browse files
Files changed (10) hide show
  1. app/.pyfun +0 -313
  2. app/__init__.py +0 -1
  3. app/app.py +0 -224
  4. app/config.py +0 -293
  5. app/db_sync.py +0 -1
  6. app/mcp.py +0 -379
  7. app/models.py +0 -1
  8. app/polymarket.py +0 -611
  9. app/provider.py +0 -1
  10. app/tools.py +0 -1
app/.pyfun DELETED
@@ -1,313 +0,0 @@
1
- # =============================================================================
2
- # .pyfun β€” PyFundaments App Configuration
3
- # Single source of truth for app/* modules (provider, models, tools, hub)
4
- # Part of: Universal MCP Hub on PyFundaments
5
- # =============================================================================
6
- # RULES:
7
- # - All values in double quotes "value"
8
- # - NO secrets here! Keys stay in .env β†’ only ENV-VAR NAMES referenced here
9
- # - Comment-out unused sections with # β†’ keep structure, parsers need it!
10
- # - DO NOT DELETE headers or [X_END] β†’ parsers rely on these markers
11
- # - Empty/unused values: "" β†’ never leave bare =
12
- # =============================================================================
13
- # TIERS:
14
- # LAZY: fill [HUB] + one [LLM_PROVIDER.*] only β†’ works
15
- # NORMAL: + [SEARCH_PROVIDER.*] + [MODELS.*] β†’ works better
16
- # PRODUCTIVE: + [TOOLS] + [FALLBACK] + [HUB_LIMITS] β†’ full power
17
- # =============================================================================
18
- # DO NOT DELETE β€” file identifier used by all parsers
19
- [PYFUN_FILE = .pyfun]
20
-
21
- # =============================================================================
22
- # HUB β€” Core identity & transport config
23
- # =============================================================================
24
- [HUB]
25
- HUB_NAME = "Universal MCP Hub"
26
- HUB_VERSION = "1.0.0"
27
- HUB_DESCRIPTION = "Universal MCP Hub built on PyFundaments"
28
-
29
- # Transport: stdio (local/Claude Desktop) | sse (HuggingFace/Remote)
30
- # Override via ENV: MCP_TRANSPORT
31
- HUB_TRANSPORT = "stdio"
32
- HUB_HOST = "0.0.0.0"
33
- HUB_PORT = "7860"
34
-
35
- # App mode: mcp | app
36
- # Override via ENV: APP_MODE
37
- HUB_MODE = "mcp"
38
-
39
- # HuggingFace Space URL (used as HTTP-Referer for some APIs)
40
- HUB_SPACE_URL = ""
41
- [HUB_END]
42
-
43
- # =============================================================================
44
- # HUB_LIMITS β€” Request & retry behavior
45
- # =============================================================================
46
- [HUB_LIMITS]
47
- MAX_PARALLEL_REQUESTS = "5"
48
- RETRY_COUNT = "3"
49
- RETRY_DELAY_SEC = "2"
50
- REQUEST_TIMEOUT_SEC = "60"
51
- SEARCH_TIMEOUT_SEC = "30"
52
- [HUB_LIMITS_END]
53
-
54
- # =============================================================================
55
- # PROVIDERS β€” All external API providers
56
- # Secrets stay in .env! Only ENV-VAR NAMES are referenced here.
57
- # =============================================================================
58
- [PROVIDERS]
59
-
60
- # ── LLM Providers ─────────────────────────────────────────────────────────────
61
- [LLM_PROVIDERS]
62
-
63
- [LLM_PROVIDER.anthropic]
64
- active = "true"
65
- base_url = "https://api.anthropic.com/v1"
66
- env_key = "ANTHROPIC_API_KEY" # β†’ .env: ANTHROPIC_API_KEY=sk-ant-...
67
- api_version_header = "2023-06-01" # anthropic-version header
68
- default_model = "claude-haiku-4-5-20251001"
69
- models = "claude-opus-4-6, claude-sonnet-4-6, claude-haiku-4-5-20251001"
70
- fallback_to = "openrouter" # if this provider fails β†’ try next
71
- [LLM_PROVIDER.anthropic_END]
72
-
73
- [LLM_PROVIDER.gemini]
74
- active = "true"
75
- base_url = "https://generativelanguage.googleapis.com/v1beta"
76
- env_key = "GEMINI_API_KEY" # β†’ .env: GEMINI_API_KEY=...
77
- default_model = "gemini-2.0-flash"
78
- models = "gemini-2.0-flash, gemini-1.5-pro, gemini-1.5-flash"
79
- fallback_to = "openrouter"
80
- [LLM_PROVIDER.gemini_END]
81
-
82
- [LLM_PROVIDER.openrouter]
83
- active = "true"
84
- base_url = "https://openrouter.ai/api/v1"
85
- env_key = "OPENROUTER_API_KEY" # β†’ .env: OPENROUTER_API_KEY=sk-or-...
86
- default_model = "mistralai/mistral-7b-instruct"
87
- models = "openai/gpt-4o, meta-llama/llama-3-8b-instruct, mistralai/mistral-7b-instruct"
88
- fallback_to = "" # last in chain, no further fallback
89
- [LLM_PROVIDER.openrouter_END]
90
-
91
- [LLM_PROVIDER.huggingface]
92
- active = "true"
93
- base_url = "https://api-inference.huggingface.co/models"
94
- env_key = "HF_TOKEN" # β†’ .env: HF_TOKEN=hf_...
95
- default_model = "mistralai/Mistral-7B-Instruct-v0.3"
96
- models = "mistralai/Mistral-7B-Instruct-v0.3, meta-llama/Llama-3.3-70B-Instruct"
97
- fallback_to = ""
98
- [LLM_PROVIDER.huggingface_END]
99
-
100
- # ── Add more LLM providers below ──────────────────────────────────────────
101
- # [LLM_PROVIDER.mistral]
102
- # active = "false"
103
- # base_url = "https://api.mistral.ai/v1"
104
- # env_key = "MISTRAL_API_KEY"
105
- # default_model = "mistral-large-latest"
106
- # models = "mistral-large-latest, mistral-small-latest"
107
- # fallback_to = ""
108
- # [LLM_PROVIDER.mistral_END]
109
-
110
- # [LLM_PROVIDER.openai]
111
- # active = "false"
112
- # base_url = "https://api.openai.com/v1"
113
- # env_key = "OPENAI_API_KEY"
114
- # default_model = "gpt-4o"
115
- # models = "gpt-4o, gpt-4o-mini, gpt-3.5-turbo"
116
- # fallback_to = ""
117
- # [LLM_PROVIDER.openai_END]
118
-
119
- [LLM_PROVIDERS_END]
120
-
121
- # ── Search Providers ───────────────────────────────────────────────────────────
122
- [SEARCH_PROVIDERS]
123
-
124
- [SEARCH_PROVIDER.brave]
125
- active = "true"
126
- base_url = "https://api.search.brave.com/res/v1/web/search"
127
- env_key = "BRAVE_API_KEY" # β†’ .env: BRAVE_API_KEY=BSA...
128
- default_results = "5"
129
- max_results = "20"
130
- fallback_to = "tavily"
131
- [SEARCH_PROVIDER.brave_END]
132
-
133
- [SEARCH_PROVIDER.tavily]
134
- active = "true"
135
- base_url = "https://api.tavily.com/search"
136
- env_key = "TAVILY_API_KEY" # β†’ .env: TAVILY_API_KEY=tvly-...
137
- default_results = "5"
138
- max_results = "10"
139
- include_answer = "true" # AI-synthesized answer
140
- fallback_to = ""
141
- [SEARCH_PROVIDER.tavily_END]
142
-
143
- # ── Add more search providers below ───────────────────────────────────────
144
- # [SEARCH_PROVIDER.serper]
145
- # active = "false"
146
- # base_url = "https://google.serper.dev/search"
147
- # env_key = "SERPER_API_KEY"
148
- # fallback_to = ""
149
- # [SEARCH_PROVIDER.serper_END]
150
-
151
- [SEARCH_PROVIDERS_END]
152
-
153
- # ── Web / Action Providers (Webhooks, Bots, Social) ───────────────────────────
154
- # [WEB_PROVIDERS]
155
-
156
- # [WEB_PROVIDER.discord]
157
- # active = "false"
158
- # base_url = "https://discord.com/api/v10"
159
- # env_key = "BOT_TOKEN"
160
- # [WEB_PROVIDER.discord_END]
161
-
162
- # [WEB_PROVIDER.github]
163
- # active = "false"
164
- # base_url = "https://api.github.com"
165
- # env_key = "GITHUB_TOKEN"
166
- # [WEB_PROVIDER.github_END]
167
-
168
- # [WEB_PROVIDERS_END]
169
-
170
- [PROVIDERS_END]
171
-
172
- # =============================================================================
173
- # MODELS β€” Token & rate limits per model
174
- # Parser builds: MODELS[provider][model_name] β†’ limits dict
175
- # =============================================================================
176
- [MODELS]
177
-
178
- [MODEL.claude-opus-4-6]
179
- provider = "anthropic"
180
- context_tokens = "200000"
181
- max_output_tokens = "32000"
182
- requests_per_min = "5"
183
- requests_per_day = "300"
184
- cost_input_per_1k = "0.015" # USD β€” update as pricing changes
185
- cost_output_per_1k = "0.075"
186
- capabilities = "text, code, analysis, vision"
187
- [MODEL.claude-opus-4-6_END]
188
-
189
- [MODEL.claude-sonnet-4-6]
190
- provider = "anthropic"
191
- context_tokens = "200000"
192
- max_output_tokens = "16000"
193
- requests_per_min = "50"
194
- requests_per_day = "1000"
195
- cost_input_per_1k = "0.003"
196
- cost_output_per_1k = "0.015"
197
- capabilities = "text, code, analysis, vision"
198
- [MODEL.claude-sonnet-4-6_END]
199
-
200
- [MODEL.claude-haiku-4-5-20251001]
201
- provider = "anthropic"
202
- context_tokens = "200000"
203
- max_output_tokens = "8000"
204
- requests_per_min = "50"
205
- requests_per_day = "2000"
206
- cost_input_per_1k = "0.00025"
207
- cost_output_per_1k = "0.00125"
208
- capabilities = "text, code, fast"
209
- [MODEL.claude-haiku-4-5-20251001_END]
210
-
211
- [MODEL.gemini-2.0-flash]
212
- provider = "gemini"
213
- context_tokens = "1000000"
214
- max_output_tokens = "8192"
215
- requests_per_min = "15"
216
- requests_per_day = "1500"
217
- cost_input_per_1k = "0.00010"
218
- cost_output_per_1k = "0.00040"
219
- capabilities = "text, code, vision, audio"
220
- [MODEL.gemini-2.0-flash_END]
221
-
222
- [MODEL.gemini-1.5-pro]
223
- provider = "gemini"
224
- context_tokens = "2000000"
225
- max_output_tokens = "8192"
226
- requests_per_min = "2"
227
- requests_per_day = "50"
228
- cost_input_per_1k = "0.00125"
229
- cost_output_per_1k = "0.00500"
230
- capabilities = "text, code, vision, audio, long-context"
231
- [MODEL.gemini-1.5-pro_END]
232
-
233
- [MODEL.mistral-7b-instruct]
234
- provider = "openrouter"
235
- context_tokens = "32000"
236
- max_output_tokens = "4096"
237
- requests_per_min = "60"
238
- requests_per_day = "10000"
239
- cost_input_per_1k = "0.00006"
240
- cost_output_per_1k = "0.00006"
241
- capabilities = "text, code, fast, cheap"
242
- [MODEL.mistral-7b-instruct_END]
243
-
244
- [MODELS_END]
245
-
246
- # =============================================================================
247
- # TOOLS β€” Tool definitions + provider mapping
248
- # Tools are registered in mcp.py only if their provider ENV key exists!
249
- # =============================================================================
250
- [TOOLS]
251
-
252
- [TOOL.llm_complete]
253
- active = "true"
254
- description = "Send prompt to any configured LLM provider"
255
- provider_type = "llm"
256
- default_provider = "anthropic"
257
- timeout_sec = "60"
258
- [TOOL.llm_complete_END]
259
-
260
- [TOOL.web_search]
261
- active = "true"
262
- description = "Search the web via configured search provider"
263
- provider_type = "search"
264
- default_provider = "brave"
265
- timeout_sec = "30"
266
- [TOOL.web_search_END]
267
-
268
- [TOOL.db_query]
269
- active = "true"
270
- description = "Execute SELECT queries on connected database (read-only)"
271
- provider_type = "db"
272
- readonly = "true"
273
- timeout_sec = "10"
274
- [TOOL.db_query_END]
275
-
276
- # ── Future tools ──────────────────────────────────────────────────────────
277
- # [TOOL.image_gen]
278
- # active = "false"
279
- # description = "Generate images via configured provider"
280
- # provider_type = "image"
281
- # default_provider = ""
282
- # timeout_sec = "120"
283
- # [TOOL.image_gen_END]
284
-
285
- # [TOOL.code_exec]
286
- # active = "false"
287
- # description = "Execute sandboxed code snippets"
288
- # provider_type = "sandbox"
289
- # timeout_sec = "30"
290
- # [TOOL.code_exec_END]
291
-
292
- [TOOLS_END]
293
-
294
- # =============================================================================
295
- # DB_SYNC β€” Internal SQLite config for app/* IPC
296
- # This is NOT the cloud DB β€” that lives in .env β†’ DATABASE_URL
297
- # =============================================================================
298
- [DB_SYNC]
299
- SQLITE_PATH = "app/.hub_state.db" # internal state, never commit!
300
- SYNC_INTERVAL_SEC = "30" # how often to flush to SQLite
301
- MAX_CACHE_ENTRIES = "1000"
302
- [DB_SYNC_END]
303
-
304
- # =============================================================================
305
- # DEBUG β€” app/* debug behavior (fundaments debug stays in .env)
306
- # =============================================================================
307
- [DEBUG]
308
- DEBUG = "ON" # ON | OFF
309
- DEBUG_LEVEL = "FULL" # FULL | WARN | ERROR
310
- LOG_FILE = "hub_debug.log"
311
- LOG_REQUESTS = "true" # log every provider request
312
- LOG_RESPONSES = "false" # careful: may log sensitive data!
313
- [DEBUG_END]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app/__init__.py DELETED
@@ -1 +0,0 @@
1
-
 
 
app/app.py DELETED
@@ -1,224 +0,0 @@
1
- # =============================================================================
2
- # app/app.py
3
- # Universal MCP Hub (Sandboxed) - based on PyFundaments Architecture
4
- # Copyright 2026 - Volkan KΓΌcΓΌkbudak
5
- # Apache License V. 2 + ESOL 1.1
6
- # Repo: https://github.com/VolkanSah/Universal-MCP-Hub-sandboxed
7
- # =============================================================================
8
- # ARCHITECTURE NOTE:
9
- # This file is the Orchestrator of the sandboxed app/* layer.
10
- # It is ONLY started by main.py (the "Guardian").
11
- # All fundament services are injected via the `fundaments` dictionary.
12
- # Direct execution is blocked by design.
13
- #
14
- # SANDBOX RULES:
15
- # - fundaments dict is ONLY unpacked inside start_application()
16
- # - fundaments are NEVER stored globally or passed to other app/* modules
17
- # - app/* modules read their own config from app/.pyfun
18
- # - app/* internal state/IPC uses app/db_sync.py (SQLite) β€” NOT postgresql.py
19
- # - Secrets stay in .env β†’ Guardian reads them β†’ never touched by app/*
20
- # =============================================================================
21
-
22
- from quart import Quart, request, jsonify # async Flask β€” ASGI compatible
23
- import logging
24
- from hypercorn.asyncio import serve # ASGI server β€” async native, replaces waitress
25
- from hypercorn.config import Config # hypercorn config
26
- import threading # for future tools that need own threads
27
- import requests # sync HTTP for future tool workers
28
- import time
29
- from datetime import datetime
30
- import asyncio
31
- from typing import Dict, Any, Optional
32
-
33
- # =============================================================================
34
- # Import app/* modules β€” MINIMAL BUILD (uncomment when module is ready)
35
- # Each module reads its own config from app/.pyfun independently.
36
- # NO fundaments passed into these modules!
37
- # =============================================================================
38
- from . import mcp # MCP transport layer (SSE via Quart route)
39
- from . import config as app_config # app/.pyfun parser β€” used only in app/*
40
- # from . import providers # API provider registry β€” reads app/.pyfun
41
- # from . import models # Model config + token/rate limits β€” reads app/.pyfun
42
- # from . import tools # MCP tool definitions + provider mapping β€” reads app/.pyfun
43
- # from . import db_sync # Internal SQLite IPC β€” app/* state & communication
44
- # # db_sync β‰  postgresql.py! Cloud DB is Guardian-only.
45
-
46
- # Future modules (uncomment when ready):
47
- # from . import discord_api # Discord bot integration
48
- # from . import hf_hooks # HuggingFace Space hooks
49
- # from . import git_hooks # GitHub/GitLab webhook handler
50
- # from . import web_api # Generic REST API handler
51
-
52
- # =============================================================================
53
- # Loggers β€” one per module for clean log filtering
54
- # =============================================================================
55
- logger = logging.getLogger('application')
56
- logger_mcp = logging.getLogger('mcp')
57
- logger_config = logging.getLogger('config')
58
- # logger_tools = logging.getLogger('tools')
59
- # logger_providers = logging.getLogger('providers')
60
- # logger_models = logging.getLogger('models')
61
- # logger_db_sync = logging.getLogger('db_sync')
62
-
63
- # =============================================================================
64
- # Quart app instance
65
- # =============================================================================
66
- app = Quart(__name__)
67
- START_TIME = datetime.utcnow()
68
-
69
- # =============================================================================
70
- # Quart Routes
71
- # =============================================================================
72
-
73
- @app.route("/", methods=["GET"])
74
- async def health_check():
75
- """
76
- Health check endpoint.
77
- Used by HuggingFace Spaces and monitoring systems to verify the app is running.
78
- """
79
- uptime = datetime.utcnow() - START_TIME
80
- return jsonify({
81
- "status": "running",
82
- "service": "Universal MCP Hub",
83
- "uptime_seconds": int(uptime.total_seconds()),
84
- })
85
-
86
-
87
- @app.route("/api", methods=["POST"])
88
- async def api_endpoint():
89
- """
90
- Generic REST API endpoint for direct tool invocation.
91
- Accepts JSON: { "tool": "tool_name", "params": { ... } }
92
- Auth and validation handled by tools layer.
93
- """
94
- # TODO: implement tool dispatch via tools.invoke()
95
- data = await request.get_json()
96
- return jsonify({"status": "not_implemented", "received": data}), 501
97
-
98
-
99
- @app.route("/crypto", methods=["POST"])
100
- async def crypto_endpoint():
101
- """
102
- Encrypted API endpoint.
103
- Encryption handled by app/* layer β€” no direct fundaments access here.
104
- """
105
- # TODO: implement via app/* encryption wrapper
106
- data = await request.get_json()
107
- return jsonify({"status": "not_implemented"}), 501
108
-
109
-
110
- @app.route("/mcp", methods=["GET", "POST"])
111
- async def mcp_endpoint():
112
- """
113
- MCP SSE Transport endpoint β€” routed through Quart/hypercorn.
114
- All MCP traffic passes through here β€” enables interception, logging,
115
- auth checks, rate limiting, payload transformation before reaching MCP.
116
- """
117
- return await mcp.handle_request(request)
118
-
119
-
120
- # Future routes (uncomment when ready):
121
- # @app.route("/discord", methods=["POST"])
122
- # async def discord_interactions():
123
- # """Discord interactions endpoint β€” signature verification via discord_api module."""
124
- # pass
125
-
126
- # @app.route("/webhook/hf", methods=["POST"])
127
- # async def hf_webhook():
128
- # """HuggingFace Space event hooks."""
129
- # pass
130
-
131
- # @app.route("/webhook/git", methods=["POST"])
132
- # async def git_webhook():
133
- # """GitHub / GitLab webhook handler."""
134
- # pass
135
-
136
-
137
-
138
-
139
-
140
- # =============================================================================
141
- # Main entry point β€” called exclusively by Guardian (main.py)
142
- # =============================================================================
143
- async def start_application(fundaments: Dict[str, Any]) -> None:
144
- """
145
- Main entry point for the sandboxed app layer.
146
- Called exclusively by main.py after all fundament services are initialized.
147
-
148
- Args:
149
- fundaments: Dictionary of initialized services from Guardian (main.py).
150
- Services are unpacked here and NEVER stored globally or
151
- passed into other app/* modules.
152
- """
153
- logger.info("Application starting...")
154
-
155
- # =========================================================================
156
- # Unpack fundaments β€” ONLY here, NEVER elsewhere in app/*
157
- # These are the 6 fundament services from fundaments/*
158
- # =========================================================================
159
- config_service = fundaments["config"] # fundaments/config_handler.py
160
- db_service = fundaments["db"] # fundaments/postgresql.py β€” None if not configured
161
- encryption_service = fundaments["encryption"] # fundaments/encryption.py β€” None if keys not set
162
- access_control_service = fundaments["access_control"] # fundaments/access_control.py β€” None if no DB
163
- user_handler_service = fundaments["user_handler"] # fundaments/user_handler.py β€” None if no DB
164
- security_service = fundaments["security"] # fundaments/security.py β€” None if deps missing
165
-
166
- # --- Log active fundament services ---
167
- if encryption_service:
168
- logger.info("Encryption service active.")
169
-
170
- if user_handler_service and security_service:
171
- logger.info("Auth services active (user_handler + security).")
172
-
173
- if access_control_service and security_service:
174
- logger.info("Access control active.")
175
-
176
- if db_service and not user_handler_service:
177
- logger.info("Database-only mode active (e.g. ML pipeline).")
178
-
179
- if not db_service:
180
- logger.info("Database-free mode active (e.g. Discord bot, API client).")
181
-
182
- # =========================================================================
183
- # Initialize app/* internal services β€” MINIMAL BUILD
184
- # Uncomment each line when the module is ready!
185
- # =========================================================================
186
- # db_sync.initialize() # SQLite IPC store for app/* β€” unrelated to postgresql.py
187
- # providers.initialize() # reads app/.pyfun [LLM_PROVIDERS] [SEARCH_PROVIDERS]
188
- # models.initialize() # reads app/.pyfun [MODELS]
189
- # tools.initialize() # reads app/.pyfun [TOOLS]
190
-
191
- # --- Initialize MCP (registers tools, prepares SSE handler) ---
192
- await mcp.initialize()
193
-
194
- # --- Read PORT from app/.pyfun [HUB] ---
195
- port = int(app_config.get_hub().get("HUB_PORT", "7860"))
196
-
197
- # --- Configure hypercorn ---
198
- config = Config()
199
- config.bind = [f"0.0.0.0:{port}"]
200
-
201
- logger.info(f"Starting hypercorn on port {port}...")
202
- logger.info("All services running.")
203
-
204
- # --- Run hypercorn β€” blocks until shutdown ---
205
- await serve(app, config)
206
-
207
-
208
- # =============================================================================
209
- # Direct execution guard
210
- # =============================================================================
211
- if __name__ == '__main__':
212
- print("WARNING: Running app.py directly. Fundament modules might not be correctly initialized.")
213
- print("Please run 'python main.py' instead for proper initialization.")
214
-
215
- test_fundaments = {
216
- "config": None,
217
- "db": None,
218
- "encryption": None,
219
- "access_control": None,
220
- "user_handler": None,
221
- "security": None,
222
- }
223
-
224
- asyncio.run(start_application(test_fundaments))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app/config.py DELETED
@@ -1,293 +0,0 @@
1
- # =============================================================================
2
- # app/config.py
3
- # .pyfun parser for app/* modules
4
- # Universal MCP Hub (Sandboxed) - based on PyFundaments Architecture
5
- # Copyright 2026 - Volkan KΓΌcΓΌkbudak
6
- # Apache License V. 2 + ESOL 1.1
7
- # =============================================================================
8
- # USAGE in any app/* module:
9
- # from . import config
10
- # cfg = config.get()
11
- # providers = cfg["LLM_PROVIDERS"]
12
- # =============================================================================
13
- # USAGE
14
- # in providers.py
15
- # from . import config
16
-
17
- # active = config.get_active_llm_providers()
18
- # β†’ { "anthropic": { "base_url": "...", "env_key": "ANTHROPIC_API_KEY", ... }, ... }
19
- # =============================================================================
20
- # in models.py
21
- # from . import config
22
-
23
- # anthropic_models = config.get_models_for_provider("anthropic")
24
- # =============================================================================
25
- # in tools.py
26
- # from . import config
27
-
28
- # active_tools = config.get_active_tools()
29
- # =============================================================================
30
- import os
31
- import logging
32
- from typing import Dict, Any, Optional
33
-
34
- logger = logging.getLogger('app.config')
35
-
36
- # Path to .pyfun β€” lives in app/ next to this file
37
- PYFUN_PATH = os.path.join(os.path.dirname(__file__), ".pyfun")
38
-
39
- # Internal cache β€” loaded once at first get()
40
- _cache: Optional[Dict[str, Any]] = None
41
-
42
-
43
- def _parse_value(value: str) -> str:
44
- """Strip quotes and inline comments from a value."""
45
- value = value.strip()
46
- # Remove inline comment
47
- if " #" in value:
48
- value = value[:value.index(" #")].strip()
49
- # Strip surrounding quotes
50
- if value.startswith('"') and value.endswith('"'):
51
- value = value[1:-1]
52
- return value
53
-
54
-
55
- def _parse() -> Dict[str, Any]:
56
- """
57
- Parses the app/.pyfun file into a nested dictionary.
58
-
59
- Structure:
60
- [SECTION]
61
- [SUBSECTION]
62
- [BLOCK.name]
63
- key = "value"
64
- [BLOCK.name_END]
65
- [SUBSECTION_END]
66
- [SECTION_END]
67
-
68
- Returns nested dict:
69
- {
70
- "HUB": { "HUB_NAME": "...", ... },
71
- "LLM_PROVIDERS": {
72
- "anthropic": { "active": "true", "base_url": "...", ... },
73
- "gemini": { ... },
74
- },
75
- "MODELS": {
76
- "claude-opus-4-6": { "provider": "anthropic", ... },
77
- },
78
- ...
79
- }
80
- """
81
- if not os.path.isfile(PYFUN_PATH):
82
- logger.critical(f".pyfun not found at: {PYFUN_PATH}")
83
- raise FileNotFoundError(f".pyfun not found at: {PYFUN_PATH}")
84
-
85
- result: Dict[str, Any] = {}
86
-
87
- # Parser state
88
- section: Optional[str] = None # e.g. "HUB", "PROVIDERS"
89
- subsection: Optional[str] = None # e.g. "LLM_PROVIDERS"
90
- block_type: Optional[str] = None # e.g. "LLM_PROVIDER", "MODEL", "TOOL"
91
- block_name: Optional[str] = None # e.g. "anthropic", "claude-opus-4-6"
92
-
93
- with open(PYFUN_PATH, "r", encoding="utf-8") as f:
94
- for raw_line in f:
95
- line = raw_line.strip()
96
-
97
- # Skip empty lines and full-line comments
98
- if not line or line.startswith("#"):
99
- continue
100
-
101
- # Skip file identifier
102
- if line.startswith("[PYFUN_FILE"):
103
- continue
104
-
105
- # --- Block END markers (most specific first) ---
106
- if line.endswith("_END]") and "." in line:
107
- # e.g. [LLM_PROVIDER.anthropic_END] or [MODEL.claude-opus-4-6_END]
108
- block_type = None
109
- block_name = None
110
- continue
111
-
112
- if line.endswith("_END]") and not "." in line:
113
- # e.g. [LLM_PROVIDERS_END], [HUB_END], [MODELS_END]
114
- inner = line[1:-1].replace("_END", "")
115
- if subsection and inner == subsection:
116
- subsection = None
117
- elif section and inner == section:
118
- section = None
119
- continue
120
-
121
- # --- Block START markers ---
122
- if line.startswith("[") and line.endswith("]"):
123
- inner = line[1:-1]
124
-
125
- # Named block: [LLM_PROVIDER.anthropic] or [MODEL.claude-opus-4-6]
126
- if "." in inner:
127
- parts = inner.split(".", 1)
128
- block_type = parts[0] # e.g. LLM_PROVIDER, MODEL, TOOL
129
- block_name = parts[1] # e.g. anthropic, claude-opus-4-6
130
-
131
- # Determine which top-level key to store under
132
- if block_type == "LLM_PROVIDER":
133
- result.setdefault("LLM_PROVIDERS", {})
134
- result["LLM_PROVIDERS"].setdefault(block_name, {})
135
- elif block_type == "SEARCH_PROVIDER":
136
- result.setdefault("SEARCH_PROVIDERS", {})
137
- result["SEARCH_PROVIDERS"].setdefault(block_name, {})
138
- elif block_type == "WEB_PROVIDER":
139
- result.setdefault("WEB_PROVIDERS", {})
140
- result["WEB_PROVIDERS"].setdefault(block_name, {})
141
- elif block_type == "MODEL":
142
- result.setdefault("MODELS", {})
143
- result["MODELS"].setdefault(block_name, {})
144
- elif block_type == "TOOL":
145
- result.setdefault("TOOLS", {})
146
- result["TOOLS"].setdefault(block_name, {})
147
- continue
148
-
149
- # Subsection: [LLM_PROVIDERS], [SEARCH_PROVIDERS] etc.
150
- if section and not subsection:
151
- subsection = inner
152
- result.setdefault(inner, {})
153
- continue
154
-
155
- # Top-level section: [HUB], [PROVIDERS], [MODELS] etc.
156
- section = inner
157
- result.setdefault(inner, {})
158
- continue
159
-
160
- # --- Key = Value ---
161
- if "=" in line:
162
- key, _, val = line.partition("=")
163
- key = key.strip()
164
- val = _parse_value(val)
165
-
166
- # Strip provider prefix from key (e.g. "anthropic.base_url" β†’ "base_url")
167
- if block_name and key.startswith(f"{block_name}."):
168
- key = key[len(block_name) + 1:]
169
-
170
- # Store in correct location
171
- if block_type and block_name:
172
- if block_type == "LLM_PROVIDER":
173
- result["LLM_PROVIDERS"][block_name][key] = val
174
- elif block_type == "SEARCH_PROVIDER":
175
- result["SEARCH_PROVIDERS"][block_name][key] = val
176
- elif block_type == "WEB_PROVIDER":
177
- result["WEB_PROVIDERS"][block_name][key] = val
178
- elif block_type == "MODEL":
179
- result["MODELS"][block_name][key] = val
180
- elif block_type == "TOOL":
181
- result["TOOLS"][block_name][key] = val
182
- elif section:
183
- result[section][key] = val
184
-
185
- logger.info(f".pyfun loaded. Sections: {list(result.keys())}")
186
- return result
187
-
188
-
189
- def load() -> Dict[str, Any]:
190
- """Force (re)load of .pyfun β€” clears cache."""
191
- global _cache
192
- _cache = _parse()
193
- return _cache
194
-
195
-
196
- def get() -> Dict[str, Any]:
197
- """
198
- Returns parsed .pyfun config as nested dict.
199
- Loads and caches on first call β€” subsequent calls return cache.
200
- """
201
- global _cache
202
- if _cache is None:
203
- _cache = _parse()
204
- return _cache
205
-
206
-
207
- def get_section(section: str) -> Dict[str, Any]:
208
- """
209
- Returns a specific top-level section.
210
- Returns empty dict if section not found.
211
- """
212
- return get().get(section, {})
213
-
214
-
215
- def get_llm_providers() -> Dict[str, Any]:
216
- """Returns all LLM providers (active and inactive)."""
217
- return get().get("LLM_PROVIDERS", {})
218
-
219
-
220
- def get_active_llm_providers() -> Dict[str, Any]:
221
- """Returns only LLM providers where active = 'true'."""
222
- return {
223
- name: cfg
224
- for name, cfg in get_llm_providers().items()
225
- if cfg.get("active", "false").lower() == "true"
226
- }
227
-
228
-
229
- def get_search_providers() -> Dict[str, Any]:
230
- """Returns all search providers."""
231
- return get().get("SEARCH_PROVIDERS", {})
232
-
233
-
234
- def get_active_search_providers() -> Dict[str, Any]:
235
- """Returns only search providers where active = 'true'."""
236
- return {
237
- name: cfg
238
- for name, cfg in get_search_providers().items()
239
- if cfg.get("active", "false").lower() == "true"
240
- }
241
-
242
-
243
- def get_models() -> Dict[str, Any]:
244
- """Returns all model definitions."""
245
- return get().get("MODELS", {})
246
-
247
-
248
- def get_models_for_provider(provider_name: str) -> Dict[str, Any]:
249
- """Returns all models for a specific provider."""
250
- return {
251
- name: cfg
252
- for name, cfg in get_models().items()
253
- if cfg.get("provider", "") == provider_name
254
- }
255
-
256
-
257
- def get_tools() -> Dict[str, Any]:
258
- """Returns all tool definitions."""
259
- return get().get("TOOLS", {})
260
-
261
-
262
- def get_active_tools() -> Dict[str, Any]:
263
- """Returns only tools where active = 'true'."""
264
- return {
265
- name: cfg
266
- for name, cfg in get_tools().items()
267
- if cfg.get("active", "false").lower() == "true"
268
- }
269
-
270
-
271
- def get_hub() -> Dict[str, Any]:
272
- """Returns [HUB] section."""
273
- return get_section("HUB")
274
-
275
-
276
- def get_limits() -> Dict[str, Any]:
277
- """Returns [HUB_LIMITS] section."""
278
- return get_section("HUB_LIMITS")
279
-
280
-
281
- def get_db_sync() -> Dict[str, Any]:
282
- """Returns [DB_SYNC] section."""
283
- return get_section("DB_SYNC")
284
-
285
-
286
- def get_debug() -> Dict[str, Any]:
287
- """Returns [DEBUG] section."""
288
- return get_section("DEBUG")
289
-
290
-
291
- def is_debug() -> bool:
292
- """Returns True if DEBUG = 'ON' in .pyfun."""
293
- return get_debug().get("DEBUG", "OFF").upper() == "ON"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app/db_sync.py DELETED
@@ -1 +0,0 @@
1
-
 
 
app/mcp.py DELETED
@@ -1,379 +0,0 @@
1
- # =============================================================================
2
- # app/mcp.py
3
- # Universal MCP Hub (Sandboxed) - based on PyFundaments Architecture
4
- # Copyright 2026 - Volkan KΓΌcΓΌkbudak
5
- # Apache License V. 2 + ESOL 1.1
6
- # Repo: https://github.com/VolkanSah/Universal-MCP-Hub-sandboxed
7
- # =============================================================================
8
- # ARCHITECTURE NOTE:
9
- # This file lives exclusively in app/ and is ONLY started by app/app.py.
10
- # NO direct access to fundaments/*, .env, or Guardian (main.py).
11
- # All config comes from app/.pyfun via app/config.py.
12
- #
13
- # MCP SSE transport runs through Quart/hypercorn via /mcp route.
14
- # All MCP traffic can be intercepted, logged, and transformed in app.py
15
- # before reaching the MCP handler β€” this is by design.
16
- #
17
- # TOOL REGISTRATION PRINCIPLE:
18
- # Tools are only registered if their required ENV key exists.
19
- # No key = no tool = no crash. Server always starts, just with fewer tools.
20
- # ENV key NAMES come from app/.pyfun β€” values are never touched here.
21
- # =============================================================================
22
-
23
- import asyncio
24
- import logging
25
- import os
26
- from typing import Dict, Any
27
-
28
- from . import config as app_config # reads app/.pyfun β€” only config source for app/*
29
- # from . import polymarket
30
-
31
- logger = logging.getLogger('mcp')
32
-
33
- # Global MCP instance β€” initialized once via initialize()
34
- _mcp = None
35
-
36
-
37
- async def initialize() -> None:
38
- """
39
- Initializes the MCP instance and registers all tools.
40
- Called once by app/app.py during startup.
41
- No fundaments passed in β€” sandboxed.
42
- """
43
- global _mcp
44
-
45
- logger.info("MCP Hub initializing...")
46
-
47
- hub_cfg = app_config.get_hub()
48
-
49
- try:
50
- from mcp.server.fastmcp import FastMCP
51
- except ImportError:
52
- logger.critical("FastMCP not installed. Run: pip install mcp")
53
- raise
54
-
55
- _mcp = FastMCP(
56
- name=hub_cfg.get("HUB_NAME", "Universal MCP Hub"),
57
- instructions=(
58
- f"{hub_cfg.get('HUB_DESCRIPTION', 'Universal MCP Hub on PyFundaments')} "
59
- "Use list_active_tools to see what is currently available."
60
- )
61
- )
62
-
63
- # --- Register tools ---
64
- _register_llm_tools(_mcp)
65
- _register_search_tools(_mcp)
66
- # _register_db_tools(_mcp) # uncomment when db_sync is ready
67
- _register_system_tools(_mcp)
68
- _register_polymarket_tools(_mcp)
69
-
70
- logger.info("MCP Hub initialized.")
71
-
72
-
73
- async def handle_request(request) -> None:
74
- """
75
- Handles incoming MCP SSE requests routed through Quart /mcp endpoint.
76
- This is the interceptor point β€” add auth, logging, rate limiting here.
77
- """
78
- if _mcp is None:
79
- logger.error("MCP not initialized β€” call initialize() first.")
80
- from quart import jsonify
81
- return jsonify({"error": "MCP not initialized"}), 503
82
-
83
- # --- Interceptor hooks (add as needed) ---
84
- # logger.debug(f"MCP request: {request.method} {request.path}")
85
- # await _check_auth(request)
86
- # await _rate_limit(request)
87
- # await _log_payload(request)
88
-
89
- # --- Forward to FastMCP SSE handler ---
90
- return await _mcp.handle_sse(request)
91
-
92
-
93
- # =============================================================================
94
- # Tool registration helpers
95
- # =============================================================================
96
-
97
- def _register_llm_tools(mcp) -> None:
98
- """Register LLM tools based on active providers in app/.pyfun + ENV key check."""
99
- active = app_config.get_active_llm_providers()
100
-
101
- for name, cfg in active.items():
102
- env_key = cfg.get("env_key", "")
103
- if not env_key or not os.getenv(env_key):
104
- logger.info(f"LLM provider '{name}' skipped β€” ENV key '{env_key}' not set.")
105
- continue
106
-
107
- if name == "anthropic":
108
- import httpx
109
- _key = os.getenv(env_key)
110
- _api_ver = cfg.get("api_version_header", "2023-06-01")
111
- _base_url = cfg.get("base_url", "https://api.anthropic.com/v1")
112
- _def_model = cfg.get("default_model", "claude-haiku-4-5-20251001")
113
-
114
- @mcp.tool()
115
- async def anthropic_complete(
116
- prompt: str,
117
- model: str = _def_model,
118
- max_tokens: int = 1024
119
- ) -> str:
120
- """Send a prompt to Anthropic Claude."""
121
- async with httpx.AsyncClient() as client:
122
- r = await client.post(
123
- f"{_base_url}/messages",
124
- headers={
125
- "x-api-key": _key,
126
- "anthropic-version": _api_ver,
127
- "content-type": "application/json"
128
- },
129
- json={
130
- "model": model,
131
- "max_tokens": max_tokens,
132
- "messages": [{"role": "user", "content": prompt}]
133
- },
134
- timeout=60.0
135
- )
136
- r.raise_for_status()
137
- return r.json()["content"][0]["text"]
138
-
139
- logger.info(f"Tool registered: anthropic_complete (model: {_def_model})")
140
-
141
- elif name == "gemini":
142
- import httpx
143
- _key = os.getenv(env_key)
144
- _base_url = cfg.get("base_url", "https://generativelanguage.googleapis.com/v1beta")
145
- _def_model = cfg.get("default_model", "gemini-2.0-flash")
146
-
147
- @mcp.tool()
148
- async def gemini_complete(
149
- prompt: str,
150
- model: str = _def_model,
151
- max_tokens: int = 1024
152
- ) -> str:
153
- """Send a prompt to Google Gemini."""
154
- async with httpx.AsyncClient() as client:
155
- r = await client.post(
156
- f"{_base_url}/models/{model}:generateContent",
157
- params={"key": _key},
158
- json={
159
- "contents": [{"parts": [{"text": prompt}]}],
160
- "generationConfig": {"maxOutputTokens": max_tokens}
161
- },
162
- timeout=60.0
163
- )
164
- r.raise_for_status()
165
- return r.json()["candidates"][0]["content"]["parts"][0]["text"]
166
-
167
- logger.info(f"Tool registered: gemini_complete (model: {_def_model})")
168
-
169
- elif name == "openrouter":
170
- import httpx
171
- _key = os.getenv(env_key)
172
- _base_url = cfg.get("base_url", "https://openrouter.ai/api/v1")
173
- _def_model = cfg.get("default_model", "mistralai/mistral-7b-instruct")
174
- _referer = os.getenv("APP_URL", "https://huggingface.co")
175
-
176
- @mcp.tool()
177
- async def openrouter_complete(
178
- prompt: str,
179
- model: str = _def_model,
180
- max_tokens: int = 1024
181
- ) -> str:
182
- """Send a prompt via OpenRouter (100+ models)."""
183
- async with httpx.AsyncClient() as client:
184
- r = await client.post(
185
- f"{_base_url}/chat/completions",
186
- headers={
187
- "Authorization": f"Bearer {_key}",
188
- "HTTP-Referer": _referer,
189
- "content-type": "application/json"
190
- },
191
- json={
192
- "model": model,
193
- "max_tokens": max_tokens,
194
- "messages": [{"role": "user", "content": prompt}]
195
- },
196
- timeout=60.0
197
- )
198
- r.raise_for_status()
199
- return r.json()["choices"][0]["message"]["content"]
200
-
201
- logger.info(f"Tool registered: openrouter_complete (model: {_def_model})")
202
-
203
- elif name == "huggingface":
204
- import httpx
205
- _key = os.getenv(env_key)
206
- _base_url = cfg.get("base_url", "https://api-inference.huggingface.co/models")
207
- _def_model = cfg.get("default_model", "mistralai/Mistral-7B-Instruct-v0.3")
208
-
209
- @mcp.tool()
210
- async def hf_inference(
211
- prompt: str,
212
- model: str = _def_model,
213
- max_tokens: int = 512
214
- ) -> str:
215
- """Send a prompt to HuggingFace Inference API."""
216
- async with httpx.AsyncClient() as client:
217
- r = await client.post(
218
- f"{_base_url}/{model}/v1/chat/completions",
219
- headers={
220
- "Authorization": f"Bearer {_key}",
221
- "content-type": "application/json"
222
- },
223
- json={
224
- "model": model,
225
- "max_tokens": max_tokens,
226
- "messages": [{"role": "user", "content": prompt}]
227
- },
228
- timeout=120.0
229
- )
230
- r.raise_for_status()
231
- return r.json()["choices"][0]["message"]["content"]
232
-
233
- logger.info(f"Tool registered: hf_inference (model: {_def_model})")
234
-
235
- else:
236
- logger.info(f"LLM provider '{name}' has no tool handler yet β€” skipped.")
237
-
238
-
239
- def _register_search_tools(mcp) -> None:
240
- """Register search tools based on active providers in app/.pyfun + ENV key check."""
241
- active = app_config.get_active_search_providers()
242
-
243
- for name, cfg in active.items():
244
- env_key = cfg.get("env_key", "")
245
- if not env_key or not os.getenv(env_key):
246
- logger.info(f"Search provider '{name}' skipped β€” ENV key '{env_key}' not set.")
247
- continue
248
-
249
- if name == "brave":
250
- import httpx
251
- _key = os.getenv(env_key)
252
- _base_url = cfg.get("base_url", "https://api.search.brave.com/res/v1/web/search")
253
- _def_results = int(cfg.get("default_results", "5"))
254
- _max_results = int(cfg.get("max_results", "20"))
255
-
256
- @mcp.tool()
257
- async def brave_search(query: str, count: int = _def_results) -> str:
258
- """Search the web via Brave Search API."""
259
- async with httpx.AsyncClient() as client:
260
- r = await client.get(
261
- _base_url,
262
- headers={
263
- "Accept": "application/json",
264
- "X-Subscription-Token": _key
265
- },
266
- params={"q": query, "count": min(count, _max_results)},
267
- timeout=30.0
268
- )
269
- r.raise_for_status()
270
- results = r.json().get("web", {}).get("results", [])
271
- if not results:
272
- return "No results found."
273
- return "\n\n".join([
274
- f"{i}. {res.get('title', '')}\n {res.get('url', '')}\n {res.get('description', '')}"
275
- for i, res in enumerate(results, 1)
276
- ])
277
-
278
- logger.info("Tool registered: brave_search")
279
-
280
- elif name == "tavily":
281
- import httpx
282
- _key = os.getenv(env_key)
283
- _base_url = cfg.get("base_url", "https://api.tavily.com/search")
284
- _def_results = int(cfg.get("default_results", "5"))
285
- _incl_answer = cfg.get("include_answer", "true").lower() == "true"
286
-
287
- @mcp.tool()
288
- async def tavily_search(query: str, max_results: int = _def_results) -> str:
289
- """AI-optimized web search via Tavily."""
290
- async with httpx.AsyncClient() as client:
291
- r = await client.post(
292
- _base_url,
293
- json={
294
- "api_key": _key,
295
- "query": query,
296
- "max_results": max_results,
297
- "include_answer": _incl_answer
298
- },
299
- timeout=30.0
300
- )
301
- r.raise_for_status()
302
- data = r.json()
303
- parts = []
304
- if data.get("answer"):
305
- parts.append(f"Summary: {data['answer']}")
306
- for res in data.get("results", []):
307
- parts.append(
308
- f"- {res['title']}\n {res['url']}\n {res.get('content', '')[:200]}..."
309
- )
310
- return "\n\n".join(parts)
311
-
312
- logger.info("Tool registered: tavily_search")
313
-
314
- else:
315
- logger.info(f"Search provider '{name}' has no tool handler yet β€” skipped.")
316
-
317
-
318
- def _register_system_tools(mcp) -> None:
319
- """System tools β€” always registered, no ENV key required."""
320
-
321
- @mcp.tool()
322
- def list_active_tools() -> Dict[str, Any]:
323
- """Show active providers and configured integrations (key names only, never values)."""
324
- llm = app_config.get_active_llm_providers()
325
- search = app_config.get_active_search_providers()
326
- hub = app_config.get_hub()
327
- return {
328
- "hub": hub.get("HUB_NAME", "Universal MCP Hub"),
329
- "version": hub.get("HUB_VERSION", ""),
330
- "active_llm_providers": [n for n, c in llm.items() if os.getenv(c.get("env_key", ""))],
331
- "active_search_providers":[n for n, c in search.items() if os.getenv(c.get("env_key", ""))],
332
- }
333
- logger.info("Tool registered: list_active_tools")
334
-
335
- @mcp.tool()
336
- def health_check() -> Dict[str, str]:
337
- """Health check for monitoring and HuggingFace Spaces."""
338
- return {"status": "ok", "service": "Universal MCP Hub"}
339
- logger.info("Tool registered: health_check")
340
-
341
-
342
-
343
- # 3. Neue Funktion β€” analog zu _register_search_tools():
344
- def _register_polymarket_tools(mcp) -> None:
345
- """Polymarket tools β€” no ENV key needed, Gamma API is public."""
346
-
347
- @mcp.tool()
348
- async def get_markets(category: str = None, limit: int = 20) -> list:
349
- """Get active prediction markets, optional category filter."""
350
- return await polymarket.get_markets(category=category, limit=limit)
351
-
352
- @mcp.tool()
353
- async def trending_markets(limit: int = 10) -> list:
354
- """Get top trending markets by trading volume."""
355
- return await polymarket.trending_markets(limit=limit)
356
-
357
- @mcp.tool()
358
- async def analyze_market(market_id: str) -> dict:
359
- """LLM analysis of a single market. Fallback if no LLM key set."""
360
- return await polymarket.analyze_market(market_id)
361
-
362
- @mcp.tool()
363
- async def summary_report(category: str = None) -> dict:
364
- """Summary report for a category or all markets."""
365
- return await polymarket.summary_report(category=category)
366
-
367
- @mcp.tool()
368
- async def polymarket_cache_info() -> dict:
369
- """Cache status, available categories, LLM availability."""
370
- return await polymarket.get_cache_info()
371
-
372
- logger.info("Tools registered: polymarket (5 tools)")
373
-
374
-
375
- # =============================================================================
376
- # Direct execution guard
377
- # =============================================================================
378
- if __name__ == '__main__':
379
- print("WARNING: Run via main.py, not directly.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app/models.py DELETED
@@ -1 +0,0 @@
1
-
 
 
app/polymarket.py DELETED
@@ -1,611 +0,0 @@
1
- # =============================================================================
2
- # app/polymarket.py
3
- # Polymarket Analysis Tool β€” Single File Module
4
- # Part of: Universal MCP Hub (Sandboxed) - based on PyFundaments Architecture
5
- # Copyright 2026 - Volkan KΓΌcΓΌkbudak
6
- # Apache License V. 2 + ESOL 1.1
7
- # Repo: https://github.com/VolkanSah/Universal-MCP-Hub-sandboxed
8
- # =============================================================================
9
- # ARCHITECTURE NOTE:
10
- # This file lives exclusively in app/ and is ONLY registered by app/mcp.py.
11
- # NO direct access to fundaments/*, .env, or Guardian (main.py).
12
- # All config comes from app/.pyfun via app/config.py.
13
- #
14
- # LAZY INIT PRINCIPLE:
15
- # initialize() is called on first tool use β€” NOT during startup.
16
- # This keeps startup fast and avoids crashes if Gamma API is unreachable.
17
- #
18
- # SANDBOX RULES:
19
- # - SQLite cache is app/* internal state β€” NOT postgresql.py (Guardian-only)
20
- # - LLM calls are optional β€” gracefully skipped if no provider key is set
21
- # - No global state leaks outside this file
22
- # - Read-Only access to Polymarket β€” no transactions, no auth needed
23
- # =============================================================================
24
-
25
- import asyncio
26
- import aiosqlite
27
- import aiohttp
28
- import logging
29
- import json
30
- import os
31
- from datetime import datetime, timezone
32
- from typing import Optional
33
-
34
- logger = logging.getLogger("polymarket")
35
-
36
- # =============================================================================
37
- # Constants
38
- # =============================================================================
39
-
40
- GAMMA_API = "https://gamma-api.polymarket.com"
41
- CACHE_DB = "SQLITE_PATH"
42
- FETCH_INTERVAL = 300 # seconds β€” 5 min between API pulls
43
- MARKET_LIMIT = 100 # max markets per fetch (rate limit friendly)
44
- PRICE_DECIMALS = 2 # rounding for probability display
45
-
46
- # Gamma API uses its own tag/category system.
47
- # We map their tags to our simplified categories for filtering.
48
- CATEGORY_MAP = {
49
- "politics": ["politics", "elections", "government", "trump", "us-politics"],
50
- "crypto": ["crypto", "bitcoin", "ethereum", "defi", "web3"],
51
- "economics": ["economics", "inflation", "fed", "interest-rates", "finance"],
52
- "energy": ["energy", "oil", "gas", "renewables", "climate"],
53
- "tech": ["technology", "ai", "spacex", "elon-musk", "science"],
54
- "sports": ["sports", "football", "soccer", "nba", "nfl", "esports"],
55
- "world": ["world", "geopolitics", "war", "nato", "china", "russia"],
56
- }
57
-
58
- # =============================================================================
59
- # Internal State β€” lazy init guard
60
- # =============================================================================
61
-
62
- _initialized = False
63
- _scheduler_task = None # asyncio background task handle
64
-
65
- # =============================================================================
66
- # SECTION 1 β€” Init (Lazy)
67
- # =============================================================================
68
-
69
- async def initialize() -> None:
70
- """
71
- Lazy initializer β€” called on first tool use.
72
- Sets up SQLite cache and starts background scheduler.
73
- Idempotent β€” safe to call multiple times.
74
- """
75
- global _initialized, _scheduler_task
76
-
77
- if _initialized:
78
- return
79
-
80
- logger.info("Polymarket module initializing (lazy)...")
81
-
82
- await _init_cache()
83
-
84
- # Start background scheduler as asyncio task
85
- if _scheduler_task is None or _scheduler_task.done():
86
- _scheduler_task = asyncio.create_task(_scheduler_loop())
87
- logger.info(f"Scheduler started β€” fetching every {FETCH_INTERVAL}s.")
88
-
89
- _initialized = True
90
- logger.info("Polymarket module ready.")
91
-
92
-
93
- # =============================================================================
94
- # SECTION 2 β€” SQLite Cache (app/* internal, NOT postgresql.py!)
95
- # =============================================================================
96
-
97
- async def _init_cache() -> None:
98
- """Create SQLite tables if they don't exist."""
99
- async with aiosqlite.connect(CACHE_DB) as db:
100
- await db.execute("""
101
- CREATE TABLE IF NOT EXISTS markets (
102
- id TEXT PRIMARY KEY,
103
- slug TEXT,
104
- question TEXT,
105
- category TEXT,
106
- probability REAL,
107
- volume REAL,
108
- liquidity REAL,
109
- end_date TEXT,
110
- active INTEGER,
111
- data TEXT,
112
- fetched_at TEXT
113
- )
114
- """)
115
- await db.execute("""
116
- CREATE INDEX IF NOT EXISTS idx_category ON markets(category);
117
- """)
118
- await db.execute("""
119
- CREATE INDEX IF NOT EXISTS idx_active ON markets(active);
120
- """)
121
- await db.commit()
122
- logger.info("Cache initialized.")
123
-
124
-
125
- async def _store_markets(markets: list) -> None:
126
- """Upsert markets into SQLite cache."""
127
- if not markets:
128
- return
129
-
130
- now = datetime.now(timezone.utc).isoformat()
131
-
132
- async with aiosqlite.connect(CACHE_DB) as db:
133
- for m in markets:
134
- category = _categorize_market(m)
135
- prob = _extract_probability(m)
136
-
137
- await db.execute("""
138
- INSERT OR REPLACE INTO markets
139
- (id, slug, question, category, probability, volume, liquidity, end_date, active, data, fetched_at)
140
- VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
141
- """, (
142
- str(m.get("id", "")),
143
- m.get("slug", ""),
144
- m.get("question", ""),
145
- category,
146
- prob,
147
- float(m.get("volume", 0) or 0),
148
- float(m.get("liquidity", 0) or 0),
149
- m.get("endDate", ""),
150
- 1 if m.get("active", False) else 0,
151
- json.dumps(m),
152
- now,
153
- ))
154
- await db.commit()
155
-
156
- logger.info(f"Cached {len(markets)} markets.")
157
-
158
-
159
- async def _get_cached_markets(
160
- category: Optional[str] = None,
161
- active_only: bool = True,
162
- limit: int = 50
163
- ) -> list:
164
- """Retrieve markets from SQLite cache with optional filters."""
165
- async with aiosqlite.connect(CACHE_DB) as db:
166
- db.row_factory = aiosqlite.Row
167
-
168
- conditions = []
169
- params = []
170
-
171
- if active_only:
172
- conditions.append("active = 1")
173
-
174
- if category and category.lower() in CATEGORY_MAP:
175
- conditions.append("category = ?")
176
- params.append(category.lower())
177
-
178
- where = f"WHERE {' AND '.join(conditions)}" if conditions else ""
179
-
180
- params.append(limit)
181
- cursor = await db.execute(
182
- f"SELECT * FROM markets {where} ORDER BY volume DESC LIMIT ?",
183
- params
184
- )
185
- rows = await cursor.fetchall()
186
- return [dict(r) for r in rows]
187
-
188
-
189
- async def _get_market_by_id(market_id: str) -> Optional[dict]:
190
- """Retrieve a single market by ID from cache."""
191
- async with aiosqlite.connect(CACHE_DB) as db:
192
- db.row_factory = aiosqlite.Row
193
- cursor = await db.execute(
194
- "SELECT * FROM markets WHERE id = ?", (market_id,)
195
- )
196
- row = await cursor.fetchone()
197
- return dict(row) if row else None
198
-
199
-
200
- async def _get_cache_stats() -> dict:
201
- """Return basic cache statistics."""
202
- async with aiosqlite.connect(CACHE_DB) as db:
203
- cursor = await db.execute("SELECT COUNT(*) FROM markets WHERE active = 1")
204
- active = (await cursor.fetchone())[0]
205
- cursor = await db.execute("SELECT MAX(fetched_at) FROM markets")
206
- last = (await cursor.fetchone())[0]
207
- return {"active_markets": active, "last_fetch": last}
208
-
209
-
210
- # =============================================================================
211
- # SECTION 3 β€” Fetcher (Gamma API β€” Read Only, no auth)
212
- # =============================================================================
213
-
214
- async def _fetch_from_api(limit: int = MARKET_LIMIT) -> list:
215
- """
216
- Fetch active markets from Polymarket Gamma API.
217
- Read-Only β€” no auth, no transactions.
218
- """
219
- params = {
220
- "active": "true",
221
- "archived": "false",
222
- "closed": "false",
223
- "limit": limit,
224
- "order": "volume",
225
- "ascending":"false",
226
- }
227
-
228
- async with aiohttp.ClientSession() as session:
229
- async with session.get(
230
- f"{GAMMA_API}/markets",
231
- params=params,
232
- timeout=aiohttp.ClientTimeout(total=30)
233
- ) as resp:
234
- resp.raise_for_status()
235
- data = await resp.json()
236
- logger.info(f"Fetched {len(data)} markets from Gamma API.")
237
- return data
238
-
239
-
240
- async def _scheduler_loop() -> None:
241
- """
242
- Background task β€” polls Gamma API every FETCH_INTERVAL seconds.
243
- Runs indefinitely inside asyncio event loop.
244
- """
245
- logger.info("Scheduler loop started.")
246
-
247
- while True:
248
- try:
249
- markets = await _fetch_from_api()
250
- await _store_markets(markets)
251
- except aiohttp.ClientError as e:
252
- logger.warning(f"Gamma API fetch failed (network): {e}")
253
- except Exception as e:
254
- logger.error(f"Scheduler error: {e}")
255
-
256
- await asyncio.sleep(FETCH_INTERVAL)
257
-
258
-
259
- # =============================================================================
260
- # SECTION 4 β€” Filter & Categorization
261
- # =============================================================================
262
-
263
- def _categorize_market(market: dict) -> str:
264
- """
265
- Map a Polymarket market to our simplified category system.
266
- Uses tags array from Gamma API response.
267
- Falls back to 'other' if no match found.
268
- """
269
- tags = []
270
-
271
- # Gamma API returns tags as list of dicts or strings
272
- for t in market.get("tags", []):
273
- if isinstance(t, dict):
274
- tags.append(t.get("slug", "").lower())
275
- tags.append(t.get("label", "").lower())
276
- elif isinstance(t, str):
277
- tags.append(t.lower())
278
-
279
- # Also check question text for keywords
280
- question = market.get("question", "").lower()
281
-
282
- for category, keywords in CATEGORY_MAP.items():
283
- for kw in keywords:
284
- if kw in tags or kw in question:
285
- return category
286
-
287
- return "other"
288
-
289
-
290
- def _extract_probability(market: dict) -> float:
291
- """
292
- Extract YES probability from market data.
293
- Polymarket stores outcome prices as probability (0.0 - 1.0).
294
- Returns probability as percentage (0-100).
295
- """
296
- try:
297
- # outcomePrices is a JSON string like '["0.73", "0.27"]'
298
- prices = market.get("outcomePrices")
299
- if isinstance(prices, str):
300
- prices = json.loads(prices)
301
- if prices and len(prices) > 0:
302
- return round(float(prices[0]) * 100, PRICE_DECIMALS)
303
- except (ValueError, TypeError, json.JSONDecodeError):
304
- pass
305
- return 0.0
306
-
307
-
308
- def _format_market_simple(market: dict) -> dict:
309
- """
310
- Format a market for human-readable output.
311
- Used by all public tools for consistent output.
312
- """
313
- prob = market.get("probability", 0.0)
314
-
315
- # Simple plain-language probability label
316
- if prob >= 80:
317
- sentiment = "sehr wahrscheinlich"
318
- elif prob >= 60:
319
- sentiment = "wahrscheinlich"
320
- elif prob >= 40:
321
- sentiment = "ungewiss"
322
- elif prob >= 20:
323
- sentiment = "unwahrscheinlich"
324
- else:
325
- sentiment = "sehr unwahrscheinlich"
326
-
327
- return {
328
- "id": market.get("id"),
329
- "question": market.get("question"),
330
- "category": market.get("category"),
331
- "probability": f"{prob}%",
332
- "sentiment": sentiment,
333
- "volume_usd": f"${market.get('volume', 0):,.0f}",
334
- "liquidity": f"${market.get('liquidity', 0):,.0f}",
335
- "end_date": market.get("end_date", ""),
336
- "slug": market.get("slug", ""),
337
- "url": f"https://polymarket.com/event/{market.get('slug', '')}",
338
- }
339
-
340
-
341
- # =============================================================================
342
- # SECTION 5 β€” LLM Adapter (Optional β€” graceful fallback if no key)
343
- # =============================================================================
344
-
345
- async def _llm_analyze(prompt: str) -> Optional[str]:
346
- """
347
- Send prompt to available LLM provider.
348
- Checks for API keys in order: Anthropic β†’ OpenRouter β†’ HuggingFace.
349
- Returns None if no provider is available β€” caller handles fallback.
350
- """
351
- # --- Anthropic Claude ---
352
- anthropic_key = os.getenv("ANTHROPIC_API_KEY")
353
- if anthropic_key:
354
- try:
355
- async with aiohttp.ClientSession() as session:
356
- async with session.post(
357
- "https://api.anthropic.com/v1/messages",
358
- headers={
359
- "x-api-key": anthropic_key,
360
- "anthropic-version": "2023-06-01",
361
- "content-type": "application/json",
362
- },
363
- json={
364
- "model": "claude-haiku-4-5-20251001",
365
- "max_tokens": 512,
366
- "messages": [{"role": "user", "content": prompt}],
367
- },
368
- timeout=aiohttp.ClientTimeout(total=30)
369
- ) as resp:
370
- resp.raise_for_status()
371
- data = await resp.json()
372
- return data["content"][0]["text"]
373
- except Exception as e:
374
- logger.warning(f"Anthropic LLM call failed: {e}")
375
-
376
- # --- OpenRouter fallback ---
377
- openrouter_key = os.getenv("OPENROUTER_API_KEY")
378
- if openrouter_key:
379
- try:
380
- async with aiohttp.ClientSession() as session:
381
- async with session.post(
382
- "https://openrouter.ai/api/v1/chat/completions",
383
- headers={
384
- "Authorization": f"Bearer {openrouter_key}",
385
- "content-type": "application/json",
386
- },
387
- json={
388
- "model": "mistralai/mistral-7b-instruct",
389
- "max_tokens": 512,
390
- "messages": [{"role": "user", "content": prompt}],
391
- },
392
- timeout=aiohttp.ClientTimeout(total=30)
393
- ) as resp:
394
- resp.raise_for_status()
395
- data = await resp.json()
396
- return data["choices"][0]["message"]["content"]
397
- except Exception as e:
398
- logger.warning(f"OpenRouter LLM call failed: {e}")
399
-
400
- # --- HuggingFace fallback ---
401
- hf_key = os.getenv("HF_API_KEY")
402
- if hf_key:
403
- try:
404
- model = "mistralai/Mistral-7B-Instruct-v0.3"
405
- async with aiohttp.ClientSession() as session:
406
- async with session.post(
407
- f"https://api-inference.huggingface.co/models/{model}/v1/chat/completions",
408
- headers={
409
- "Authorization": f"Bearer {hf_key}",
410
- "content-type": "application/json",
411
- },
412
- json={
413
- "model": model,
414
- "max_tokens": 512,
415
- "messages": [{"role": "user", "content": prompt}],
416
- },
417
- timeout=aiohttp.ClientTimeout(total=60)
418
- ) as resp:
419
- resp.raise_for_status()
420
- data = await resp.json()
421
- return data["choices"][0]["message"]["content"]
422
- except Exception as e:
423
- logger.warning(f"HuggingFace LLM call failed: {e}")
424
-
425
- logger.info("No LLM provider available β€” returning None.")
426
- return None
427
-
428
-
429
- # =============================================================================
430
- # SECTION 6 β€” Public Tools (registered by mcp.py)
431
- # =============================================================================
432
-
433
- async def get_markets(
434
- category: Optional[str] = None,
435
- limit: int = 20
436
- ) -> list:
437
- """
438
- MCP Tool: Get active prediction markets from cache.
439
-
440
- Args:
441
- category: Filter by category. Options: politics, crypto, economics,
442
- energy, tech, sports, world, other. None = all categories.
443
- limit: Max number of markets to return (default 20, max 100).
444
-
445
- Returns:
446
- List of formatted market dicts with human-readable probability.
447
- """
448
- await initialize()
449
-
450
- limit = min(limit, 100)
451
- markets = await _get_cached_markets(category=category, limit=limit)
452
-
453
- if not markets:
454
- return [{"info": "No markets in cache yet. Try again in 30 seconds."}]
455
-
456
- return [_format_market_simple(m) for m in markets]
457
-
458
-
459
- async def trending_markets(limit: int = 10) -> list:
460
- """
461
- MCP Tool: Get top trending markets by trading volume.
462
-
463
- Args:
464
- limit: Number of trending markets to return (default 10).
465
-
466
- Returns:
467
- List of top markets sorted by volume descending.
468
- """
469
- await initialize()
470
-
471
- markets = await _get_cached_markets(active_only=True, limit=limit)
472
-
473
- if not markets:
474
- return [{"info": "No markets in cache yet. Try again in 30 seconds."}]
475
-
476
- return [_format_market_simple(m) for m in markets]
477
-
478
-
479
- async def analyze_market(market_id: str) -> dict:
480
- """
481
- MCP Tool: Get LLM analysis of a single prediction market.
482
- Falls back to structured data summary if no LLM key is configured.
483
-
484
- Args:
485
- market_id: Polymarket market ID from get_markets() results.
486
-
487
- Returns:
488
- Dict with market data + LLM analysis (or structured fallback).
489
- """
490
- await initialize()
491
-
492
- market = await _get_market_by_id(market_id)
493
-
494
- if not market:
495
- return {"error": f"Market '{market_id}' not found in cache."}
496
-
497
- formatted = _format_market_simple(market)
498
-
499
- # Build LLM prompt
500
- prompt = (
501
- f"Analysiere diesen Prediction Market kurz und prΓ€zise:\n\n"
502
- f"Frage: {market.get('question')}\n"
503
- f"Wahrscheinlichkeit YES: {formatted['probability']}\n"
504
- f"Handelsvolumen: {formatted['volume_usd']}\n"
505
- f"Kategorie: {market.get('category')}\n"
506
- f"LΓ€uft bis: {market.get('end_date', 'unbekannt')}\n\n"
507
- f"ErklΓ€re in 2-3 SΓ€tzen was der Markt aussagt und was das fΓΌr "
508
- f"Alltagsentscheidungen bedeuten kΓΆnnte. Keine Finanzberatung."
509
- )
510
-
511
- analysis = await _llm_analyze(prompt)
512
-
513
- if analysis:
514
- formatted["analysis"] = analysis
515
- else:
516
- # Structured fallback β€” no LLM needed
517
- prob = market.get("probability", 0)
518
- formatted["analysis"] = (
519
- f"Der Markt bewertet '{market.get('question')}' mit "
520
- f"{prob}% Wahrscheinlichkeit. "
521
- f"FΓΌr eine KI-Analyse bitte LLM Provider API Key konfigurieren."
522
- )
523
-
524
- return formatted
525
-
526
-
527
- async def summary_report(category: Optional[str] = None) -> dict:
528
- """
529
- MCP Tool: Generate a summary report for a category or all markets.
530
- Uses LLM if available, falls back to structured statistics.
531
-
532
- Args:
533
- category: Category to summarize. None = all active markets.
534
-
535
- Returns:
536
- Dict with statistics and optional LLM narrative summary.
537
- """
538
- await initialize()
539
-
540
- markets = await _get_cached_markets(category=category, limit=50)
541
-
542
- if not markets:
543
- return {"error": "No markets in cache yet. Try again in 30 seconds."}
544
-
545
- # --- Build statistics (always available, no LLM needed) ---
546
- probs = [m["probability"] for m in markets if m.get("probability")]
547
- avg_prob = round(sum(probs) / len(probs), 1) if probs else 0
548
- total_vol = sum(m.get("volume", 0) for m in markets)
549
-
550
- # Top 3 by volume
551
- top3 = [_format_market_simple(m) for m in markets[:3]]
552
-
553
- stats = {
554
- "category": category or "all",
555
- "market_count": len(markets),
556
- "avg_probability":f"{avg_prob}%",
557
- "total_volume": f"${total_vol:,.0f}",
558
- "top_markets": top3,
559
- "generated_at": datetime.now(timezone.utc).isoformat(),
560
- }
561
-
562
- # --- LLM narrative (optional) ---
563
- market_list = "\n".join([
564
- f"- {m.get('question')} ({m.get('probability', 0)}% YES, Vol: ${m.get('volume', 0):,.0f})"
565
- for m in markets[:10]
566
- ])
567
-
568
- prompt = (
569
- f"Erstelle eine kurze Zusammenfassung (3-4 SΓ€tze) der aktuellen "
570
- f"Prediction Market Lage{' fΓΌr ' + category if category else ''}:\n\n"
571
- f"{market_list}\n\n"
572
- f"Was sind die auffΓ€lligsten Trends? Sachlich, keine Finanzberatung."
573
- )
574
-
575
- narrative = await _llm_analyze(prompt)
576
- if narrative:
577
- stats["narrative"] = narrative
578
-
579
- return stats
580
-
581
-
582
- async def get_cache_info() -> dict:
583
- """
584
- MCP Tool: Get cache status and available categories.
585
- Useful for debugging and monitoring.
586
-
587
- Returns:
588
- Dict with cache stats and available category list.
589
- """
590
- await initialize()
591
-
592
- cache_stats = await _get_cache_stats()
593
-
594
- return {
595
- **cache_stats,
596
- "fetch_interval_seconds": FETCH_INTERVAL,
597
- "available_categories": list(CATEGORY_MAP.keys()) + ["other"],
598
- "gamma_api": GAMMA_API,
599
- "llm_available": any([
600
- os.getenv("ANTHROPIC_API_KEY"),
601
- os.getenv("OPENROUTER_API_KEY"),
602
- os.getenv("HF_API_KEY"),
603
- ]),
604
- }
605
-
606
-
607
- # =============================================================================
608
- # Direct execution guard
609
- # =============================================================================
610
- if __name__ == "__main__":
611
- print("WARNING: Run via main.py β†’ app.py β†’ mcp.py, not directly.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app/provider.py DELETED
@@ -1 +0,0 @@
1
-
 
 
app/tools.py DELETED
@@ -1 +0,0 @@
1
-