Spaces:
Sleeping
Sleeping
Upload 10 files
Browse files- app.py +522 -0
- crypto_news_scraper.py +48 -0
- crypto_news_scraper_server.py +30 -0
- hl_indicators.py +530 -0
- hl_indicators_server.py +325 -0
- hype_accounts.py +63 -0
- hype_accounts_server.py +59 -0
- memories.json +0 -0
- memory_utils.py +39 -0
- requirements.txt +11 -0
app.py
ADDED
|
@@ -0,0 +1,522 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app.py
|
| 2 |
+
import os
|
| 3 |
+
import re
|
| 4 |
+
import asyncio
|
| 5 |
+
from datetime import datetime
|
| 6 |
+
from zoneinfo import ZoneInfo
|
| 7 |
+
|
| 8 |
+
import gradio as gr
|
| 9 |
+
import pandas as pd
|
| 10 |
+
|
| 11 |
+
from dotenv import load_dotenv
|
| 12 |
+
from agents import Agent, Runner, trace, Tool
|
| 13 |
+
from agents.mcp import MCPServerStdio
|
| 14 |
+
|
| 15 |
+
# Your local helper modules
|
| 16 |
+
import hype_accounts_server
|
| 17 |
+
from memory_utils import load_memories, save_memory, load_memories_df
|
| 18 |
+
|
| 19 |
+
load_dotenv(override=True)
|
| 20 |
+
|
| 21 |
+
# === Time / Locale ===
|
| 22 |
+
SGT = ZoneInfo("Asia/Singapore")
|
| 23 |
+
def now_sgt():
|
| 24 |
+
return datetime.now(SGT)
|
| 25 |
+
|
| 26 |
+
# === MCP Server Factories ===
|
| 27 |
+
def make_hyperliquid_trader_mcp_servers():
|
| 28 |
+
return [MCPServerStdio(
|
| 29 |
+
{"command": "python3", "args": ["-u", "hype_accounts_server.py"],
|
| 30 |
+
"env": {
|
| 31 |
+
"HYPERLIQUID_API_KEY": os.getenv("HYPERLIQUID_API_KEY"),
|
| 32 |
+
"HYPERLIQUID_PRIVATE_KEY": os.getenv("HYPERLIQUID_PRIVATE_KEY"),
|
| 33 |
+
"HYPERLIQUID_ACCOUNT_ADDRESS": os.getenv("HYPERLIQUID_ACCOUNT_ADDRESS"),
|
| 34 |
+
}},
|
| 35 |
+
client_session_timeout_seconds=30
|
| 36 |
+
)]
|
| 37 |
+
|
| 38 |
+
def make_crypto_news_mcp_servers():
|
| 39 |
+
# Uses your scraper-based news MCP to avoid API plan limits
|
| 40 |
+
return [MCPServerStdio(
|
| 41 |
+
{"command": "python3", "args": ["-u", "crypto_news_scraper_server.py"]},
|
| 42 |
+
client_session_timeout_seconds=30
|
| 43 |
+
)]
|
| 44 |
+
|
| 45 |
+
def make_technical_analyst_mcp_servers():
|
| 46 |
+
return [MCPServerStdio(
|
| 47 |
+
{"command": "python3", "args": ["-u", "hl_indicators_server.py"]},
|
| 48 |
+
client_session_timeout_seconds=30
|
| 49 |
+
)]
|
| 50 |
+
|
| 51 |
+
# === Utils for MCP lifecycle ===
|
| 52 |
+
async def connect_all(servers):
|
| 53 |
+
for s in servers:
|
| 54 |
+
await s.connect()
|
| 55 |
+
|
| 56 |
+
async def close_all(servers):
|
| 57 |
+
for s in servers:
|
| 58 |
+
try:
|
| 59 |
+
await s.close()
|
| 60 |
+
except Exception:
|
| 61 |
+
pass
|
| 62 |
+
|
| 63 |
+
# === Agent Builders ===
|
| 64 |
+
async def build_news_tool(news_servers) -> Tool:
|
| 65 |
+
instructions = (
|
| 66 |
+
"You are a cryptocurrency researcher. You can search and summarise the most relevant, "
|
| 67 |
+
"recent crypto news. If the user asks about a specific coin (e.g., HYPE, BTC, ETH, XRP), "
|
| 68 |
+
"focus on that. Otherwise, highlight notable events and potential long/short opportunities. "
|
| 69 |
+
f"Current datetime (SGT): {now_sgt():%Y-%m-%d %H:%M:%S}."
|
| 70 |
+
)
|
| 71 |
+
agent = Agent(
|
| 72 |
+
name="Crypto news researcher",
|
| 73 |
+
instructions=instructions,
|
| 74 |
+
model="gpt-4.1-mini",
|
| 75 |
+
mcp_servers=news_servers,
|
| 76 |
+
)
|
| 77 |
+
return agent.as_tool(
|
| 78 |
+
tool_name="crypto_news_researcher",
|
| 79 |
+
tool_description="Research crypto news and opportunities for a coin or broad scan."
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
async def build_ta_tool(ta_servers) -> Tool:
|
| 83 |
+
instructions = (
|
| 84 |
+
"You are a cryptocurrency perpetuals technical trading researcher.\n"
|
| 85 |
+
"Default interval: 1h; default lookback: 36.\n"
|
| 86 |
+
"Indicators: EMA(20,200), MACD(12,26,9), StochRSI(14,14,3,3), ADL, Volume.\n"
|
| 87 |
+
"Given a coin/interval/lookback, compute indicator state, infer trend, and propose entries, "
|
| 88 |
+
"exits, and stop-loss/take-profit with reasoning.\n"
|
| 89 |
+
f"Current datetime (SGT): {now_sgt():%Y-%m-%d %H:%M:%S}."
|
| 90 |
+
)
|
| 91 |
+
agent = Agent(
|
| 92 |
+
name="Crypto technical researcher",
|
| 93 |
+
instructions=instructions,
|
| 94 |
+
model="gpt-4.1-mini",
|
| 95 |
+
mcp_servers=ta_servers,
|
| 96 |
+
)
|
| 97 |
+
return agent.as_tool(
|
| 98 |
+
tool_name="crypto_technical_researcher",
|
| 99 |
+
tool_description="Run TA (EMA, MACD, StochRSI, ADL, Volume)."
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
async def build_trader(hyper_servers, tools: list[Tool]) -> Agent:
|
| 103 |
+
# Pull short memory + balances so the agent can context-switch well
|
| 104 |
+
past_memories = load_memories(5)
|
| 105 |
+
memory_text = "\n".join(past_memories) if past_memories else "No prior memories."
|
| 106 |
+
try:
|
| 107 |
+
account_details = await hype_accounts_server.get_account_details()
|
| 108 |
+
except Exception as e:
|
| 109 |
+
account_details = f"(Could not fetch account details: {e})"
|
| 110 |
+
|
| 111 |
+
instructions = f"""
|
| 112 |
+
You are a cryptocurrency perpetuals trader that can:
|
| 113 |
+
- Query account balances/positions (via MCP servers on Hyperliquid).
|
| 114 |
+
- Do market/news research and TA using attached tools.
|
| 115 |
+
- Place long/short orders when the setup has clear edge. Transaction cost: 0.04%.
|
| 116 |
+
- If signals are unclear, do NOT trade.
|
| 117 |
+
|
| 118 |
+
Recent notes:
|
| 119 |
+
{memory_text}
|
| 120 |
+
|
| 121 |
+
Account state:
|
| 122 |
+
{account_details}
|
| 123 |
+
|
| 124 |
+
General rules:
|
| 125 |
+
- Prefer confluence: trend + momentum + volume/ADL agreement.
|
| 126 |
+
- Always suggest stop-loss and take-profit levels.
|
| 127 |
+
- Keep risk per trade modest. Avoid overtrading.
|
| 128 |
+
"""
|
| 129 |
+
trader = Agent(
|
| 130 |
+
name="crypto_trader",
|
| 131 |
+
instructions=instructions,
|
| 132 |
+
tools=tools,
|
| 133 |
+
mcp_servers=hyper_servers, # these expose trading actions
|
| 134 |
+
model="gpt-4.1-mini",
|
| 135 |
+
)
|
| 136 |
+
return trader
|
| 137 |
+
|
| 138 |
+
# === Intent Routing ===
|
| 139 |
+
COMMAND_HELP = """\
|
| 140 |
+
You can ask in natural language, e.g.:
|
| 141 |
+
• "Balance" / "portfolio" — show Hyperliquid balances/positions
|
| 142 |
+
• "News on BTC and ETH" — market research
|
| 143 |
+
• "TA HYPE 1h lookback 48" — technical analysis
|
| 144 |
+
• "Long HYPE 500 at market, SL 2% TP 4%" — execute trade
|
| 145 |
+
• "Short BTC 0.01 at 68000, SL 69000 TP 66000" — limit order example
|
| 146 |
+
• "Summarize opportunities today" — broad scan (news + TA)
|
| 147 |
+
"""
|
| 148 |
+
|
| 149 |
+
RE_TA = re.compile(r"\bTA\s+([A-Za-z0-9_\-]+)(?:\s+(\d+[mhHdD]))?(?:\s+lookback\s+(\d+))?", re.IGNORECASE)
|
| 150 |
+
RE_LONG = re.compile(r"\bLONG\s+([A-Za-z0-9_\-]+)\s+([\d.]+)(?:\s+at\s+(market|mkt|[\d.]+))?(?:.*?\bSL\s+([\d.%]+))?(?:.*?\bTP\s+([\d.%]+))?", re.IGNORECASE)
|
| 151 |
+
RE_SHORT = re.compile(r"\bSHORT\s+([A-Za-z0-9_\-]+)\s+([\d.]+)(?:\s+at\s+(market|mkt|[\d.]+))?(?:.*?\bSL\s+([\d.%]+))?(?:.*?\bTP\s+([\d.%]+))?", re.IGNORECASE)
|
| 152 |
+
|
| 153 |
+
def pct_or_price(s):
|
| 154 |
+
if not s:
|
| 155 |
+
return None
|
| 156 |
+
s = s.strip().lower()
|
| 157 |
+
if s.endswith("%"):
|
| 158 |
+
try:
|
| 159 |
+
return {"type": "percent", "value": float(s[:-1])}
|
| 160 |
+
except:
|
| 161 |
+
return None
|
| 162 |
+
try:
|
| 163 |
+
return {"type": "price", "value": float(s)}
|
| 164 |
+
except:
|
| 165 |
+
return None
|
| 166 |
+
|
| 167 |
+
# === Core Chatbot Handler ===
|
| 168 |
+
async def handle_message(message: str, history: list[tuple[str, str]]):
|
| 169 |
+
"""
|
| 170 |
+
Routes user intent to: balance, news, TA, or trade execution.
|
| 171 |
+
Returns markdown text.
|
| 172 |
+
"""
|
| 173 |
+
text = (message or "").strip()
|
| 174 |
+
ts = now_sgt().strftime("%Y-%m-%d %H:%M:%S %Z")
|
| 175 |
+
|
| 176 |
+
# Quick help
|
| 177 |
+
if text.lower() in {"help", "/help", "commands"}:
|
| 178 |
+
return f"### Commands\n{COMMAND_HELP}"
|
| 179 |
+
|
| 180 |
+
# 1) Balance / portfolio
|
| 181 |
+
if re.search(r"\b(balance|portfolio|positions?)\b", text, re.IGNORECASE):
|
| 182 |
+
try:
|
| 183 |
+
acct = await hype_accounts_server.get_account_details()
|
| 184 |
+
save_memory(f"[{now_sgt():%Y-%m-%d %H:%M:%S %Z}] User checked balance.")
|
| 185 |
+
return format_account_for_chat(acct)
|
| 186 |
+
except Exception as e:
|
| 187 |
+
return f"❌ Error fetching account details: `{e}`"
|
| 188 |
+
|
| 189 |
+
# 2) TA intent
|
| 190 |
+
m = RE_TA.search(text)
|
| 191 |
+
if m:
|
| 192 |
+
coin = m.group(1).upper()
|
| 193 |
+
interval = (m.group(2) or "1h").lower()
|
| 194 |
+
lookback = int(m.group(3) or 36)
|
| 195 |
+
|
| 196 |
+
news_servers = [] # not needed here
|
| 197 |
+
ta_servers = []
|
| 198 |
+
try:
|
| 199 |
+
ta_servers = make_technical_analyst_mcp_servers()
|
| 200 |
+
await connect_all(ta_servers)
|
| 201 |
+
|
| 202 |
+
ta_tool = await build_ta_tool(ta_servers)
|
| 203 |
+
|
| 204 |
+
# Build a "TA-only" agent so we don't touch trading MPC here
|
| 205 |
+
researcher = Agent(
|
| 206 |
+
name="crypto_ta_agent",
|
| 207 |
+
instructions=f"Focus on TA for {coin} at interval {interval}, lookback {lookback}. Output indicator values and strategy.",
|
| 208 |
+
tools=[ta_tool],
|
| 209 |
+
model="gpt-4.1-mini",
|
| 210 |
+
)
|
| 211 |
+
|
| 212 |
+
prompt = f"Run TA for {coin} on {interval}, lookback {lookback}. Return indicators and actionable plan."
|
| 213 |
+
with trace("crypto_ta"):
|
| 214 |
+
result = await Runner.run(researcher, prompt, max_turns=12)
|
| 215 |
+
|
| 216 |
+
save_memory(f"[{ts}] TA {coin} {interval} lookback {lookback}")
|
| 217 |
+
return f"### 🔬 TA — {coin} ({interval}, lookback {lookback})\n\n{result.final_output}"
|
| 218 |
+
except Exception as e:
|
| 219 |
+
return f"❌ TA error: `{e}`"
|
| 220 |
+
finally:
|
| 221 |
+
await close_all(ta_servers)
|
| 222 |
+
|
| 223 |
+
# 3) Trade intent (LONG / SHORT)
|
| 224 |
+
mm = RE_LONG.search(text) or RE_SHORT.search(text)
|
| 225 |
+
if mm:
|
| 226 |
+
is_long = bool(RE_LONG.search(text))
|
| 227 |
+
side = "LONG" if is_long else "SHORT"
|
| 228 |
+
coin = mm.group(1).upper()
|
| 229 |
+
qty = float(mm.group(2))
|
| 230 |
+
at = mm.group(3) # "market"/"mkt" or price
|
| 231 |
+
sl_raw = mm.group(4)
|
| 232 |
+
tp_raw = mm.group(5)
|
| 233 |
+
sl = pct_or_price(sl_raw)
|
| 234 |
+
tp = pct_or_price(tp_raw)
|
| 235 |
+
|
| 236 |
+
price_desc = "market" if (at is None or str(at).lower() in {"market", "mkt"}) else at
|
| 237 |
+
order_desc = f"{side} {coin} {qty} at {price_desc}"
|
| 238 |
+
if sl: order_desc += f", SL {sl_raw}"
|
| 239 |
+
if tp: order_desc += f", TP {tp_raw}"
|
| 240 |
+
|
| 241 |
+
hyper_servers = []
|
| 242 |
+
news_servers = []
|
| 243 |
+
ta_servers = []
|
| 244 |
+
try:
|
| 245 |
+
# Tools available to the *trader*: news + TA
|
| 246 |
+
news_servers = make_crypto_news_mcp_servers()
|
| 247 |
+
ta_servers = make_technical_analyst_mcp_servers()
|
| 248 |
+
hyper_servers = make_hyperliquid_trader_mcp_servers()
|
| 249 |
+
|
| 250 |
+
await asyncio.gather(
|
| 251 |
+
connect_all(news_servers),
|
| 252 |
+
connect_all(ta_servers),
|
| 253 |
+
connect_all(hyper_servers),
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
news_tool = await build_news_tool(news_servers)
|
| 257 |
+
ta_tool = await build_ta_tool(ta_servers)
|
| 258 |
+
|
| 259 |
+
trader = await build_trader(hyper_servers, [news_tool, ta_tool])
|
| 260 |
+
|
| 261 |
+
# Natural-language trade instruction to the trader agent.
|
| 262 |
+
trade_prompt = f"""
|
| 263 |
+
User requested: {order_desc}.
|
| 264 |
+
If safe and reasonable given risk rules, place the order via Hyperliquid MCP.
|
| 265 |
+
- If price specified (numeric), treat as limit; otherwise market.
|
| 266 |
+
- Always include stop-loss and take-profit (convert % to prices).
|
| 267 |
+
- Confirm the exact order(s) you placed and rationale in the output.
|
| 268 |
+
"""
|
| 269 |
+
with trace("trade_execution"):
|
| 270 |
+
result = await Runner.run(trader, trade_prompt, max_turns=20)
|
| 271 |
+
|
| 272 |
+
save_memory(f"[{ts}] Executed: {order_desc}")
|
| 273 |
+
return f"### 🧾 Execution — {order_desc}\n\n{result.final_output}"
|
| 274 |
+
except Exception as e:
|
| 275 |
+
return f"❌ Trade execution error: `{e}`"
|
| 276 |
+
finally:
|
| 277 |
+
await asyncio.gather(
|
| 278 |
+
close_all(news_servers),
|
| 279 |
+
close_all(ta_servers),
|
| 280 |
+
close_all(hyper_servers),
|
| 281 |
+
)
|
| 282 |
+
|
| 283 |
+
# 4) News intent (e.g., "news on BTC", "what's happening to HYPE")
|
| 284 |
+
if re.search(r"\b(news|headline|what's happening|what is happening|happening)\b", text, re.IGNORECASE):
|
| 285 |
+
# Try to pick coins mentioned
|
| 286 |
+
coins = re.findall(r"\b([A-Z]{2,6})\b", text.upper())
|
| 287 |
+
coins = [c for c in coins if c not in {"NEWS", "HELP"}]
|
| 288 |
+
topic = ", ".join(coins) if coins else "broad market"
|
| 289 |
+
news_servers = []
|
| 290 |
+
try:
|
| 291 |
+
news_servers = make_crypto_news_mcp_servers()
|
| 292 |
+
await connect_all(news_servers)
|
| 293 |
+
news_tool = await build_news_tool(news_servers)
|
| 294 |
+
|
| 295 |
+
researcher = Agent(
|
| 296 |
+
name="crypto_news_agent",
|
| 297 |
+
instructions=f"Focus news on: {topic}. Be concise and actionable.",
|
| 298 |
+
tools=[news_tool],
|
| 299 |
+
model="gpt-4.1-mini",
|
| 300 |
+
)
|
| 301 |
+
prompt = f"Summarize the most relevant crypto news for {topic}. Include potential trade angles."
|
| 302 |
+
with trace("crypto_news"):
|
| 303 |
+
result = await Runner.run(researcher, prompt, max_turns=12)
|
| 304 |
+
|
| 305 |
+
save_memory(f"[{ts}] News requested: {topic}")
|
| 306 |
+
return f"### 🗞️ News — {topic}\n\n{result.final_output}"
|
| 307 |
+
except Exception as e:
|
| 308 |
+
return f"❌ News error: `{e}`"
|
| 309 |
+
finally:
|
| 310 |
+
await close_all(news_servers)
|
| 311 |
+
|
| 312 |
+
# 5) Summary scan (news + TA picks)
|
| 313 |
+
if re.search(r"\b(opportunit|ideas|setup|summary|today)\b", text, re.IGNORECASE):
|
| 314 |
+
hyper_servers = []
|
| 315 |
+
news_servers = []
|
| 316 |
+
ta_servers = []
|
| 317 |
+
try:
|
| 318 |
+
news_servers = make_crypto_news_mcp_servers()
|
| 319 |
+
ta_servers = make_technical_analyst_mcp_servers()
|
| 320 |
+
hyper_servers = make_hyperliquid_trader_mcp_servers()
|
| 321 |
+
await asyncio.gather(
|
| 322 |
+
connect_all(news_servers),
|
| 323 |
+
connect_all(ta_servers),
|
| 324 |
+
connect_all(hyper_servers),
|
| 325 |
+
)
|
| 326 |
+
news_tool = await build_news_tool(news_servers)
|
| 327 |
+
ta_tool = await build_ta_tool(ta_servers)
|
| 328 |
+
trader = await build_trader(hyper_servers, [news_tool, ta_tool])
|
| 329 |
+
|
| 330 |
+
prompt = (
|
| 331 |
+
"Step 1: Broad news scan for major catalysts.\n"
|
| 332 |
+
"Step 2: Pick 3–5 coins with potential edges; run compact TA summary (1h, lookback 36).\n"
|
| 333 |
+
"Step 3: Recommend 1–2 best setups with entry, SL, TP and rationale. Do NOT place orders."
|
| 334 |
+
)
|
| 335 |
+
with trace("daily_opportunities"):
|
| 336 |
+
result = await Runner.run(trader, prompt, max_turns=24)
|
| 337 |
+
|
| 338 |
+
save_memory(f"[{ts}] Opportunity summary requested.")
|
| 339 |
+
return f"### 📌 Opportunities — {ts}\n\n{result.final_output}"
|
| 340 |
+
except Exception as e:
|
| 341 |
+
return f"❌ Summary error: `{e}`"
|
| 342 |
+
finally:
|
| 343 |
+
await asyncio.gather(
|
| 344 |
+
close_all(news_servers),
|
| 345 |
+
close_all(ta_servers),
|
| 346 |
+
close_all(hyper_servers),
|
| 347 |
+
)
|
| 348 |
+
|
| 349 |
+
# Fallback: clarify + brief help
|
| 350 |
+
return (
|
| 351 |
+
"I can help with balance, news, TA, and trade execution.\n\n"
|
| 352 |
+
+ COMMAND_HELP
|
| 353 |
+
)
|
| 354 |
+
|
| 355 |
+
# ---------- Pretty printing for account/positions ----------
|
| 356 |
+
|
| 357 |
+
from math import isnan
|
| 358 |
+
|
| 359 |
+
def _fnum(x, decimals=2):
|
| 360 |
+
try:
|
| 361 |
+
v = float(x)
|
| 362 |
+
return f"{v:,.{decimals}f}"
|
| 363 |
+
except Exception:
|
| 364 |
+
return str(x)
|
| 365 |
+
|
| 366 |
+
def _fpct(x, decimals=2):
|
| 367 |
+
try:
|
| 368 |
+
v = float(x) * 100 # input is ROE like 0.0036 -> 0.36%
|
| 369 |
+
sign = "🟢" if v > 0 else ("🔴" if v < 0 else "⚪️")
|
| 370 |
+
return f"{sign} {v:.{decimals}f}%"
|
| 371 |
+
except Exception:
|
| 372 |
+
return "—"
|
| 373 |
+
|
| 374 |
+
def _pnl(x, decimals=2):
|
| 375 |
+
try:
|
| 376 |
+
v = float(x)
|
| 377 |
+
sign = "🟢" if v > 0 else ("🔴" if v < 0 else "⚪️")
|
| 378 |
+
return f"{sign} ${abs(v):,.{decimals}f}"
|
| 379 |
+
except Exception:
|
| 380 |
+
return "—"
|
| 381 |
+
|
| 382 |
+
def _side_and_abs_size(szi):
|
| 383 |
+
try:
|
| 384 |
+
v = float(szi)
|
| 385 |
+
side = "LONG" if v > 0 else ("SHORT" if v < 0 else "FLAT")
|
| 386 |
+
return side, abs(v)
|
| 387 |
+
except Exception:
|
| 388 |
+
return "—", szi
|
| 389 |
+
|
| 390 |
+
def format_account_for_chat(acct: dict) -> str:
|
| 391 |
+
"""
|
| 392 |
+
Converts the get_account_details() dict into a nice Markdown summary.
|
| 393 |
+
"""
|
| 394 |
+
if not isinstance(acct, dict):
|
| 395 |
+
return f"```\n{acct}\n```"
|
| 396 |
+
|
| 397 |
+
holdings = acct.get("holdings", []) or []
|
| 398 |
+
cash = acct.get("cash_balance", "0")
|
| 399 |
+
realized_pnl = acct.get("profit_and_loss", None)
|
| 400 |
+
|
| 401 |
+
# Totals
|
| 402 |
+
total_pos_value = 0.0
|
| 403 |
+
total_margin_used = 0.0
|
| 404 |
+
total_upnl = 0.0
|
| 405 |
+
|
| 406 |
+
rows_md = []
|
| 407 |
+
for h in holdings:
|
| 408 |
+
pos = h.get("position", {})
|
| 409 |
+
coin = pos.get("coin", "—")
|
| 410 |
+
szi = pos.get("szi", 0)
|
| 411 |
+
side, abs_size = _side_and_abs_size(szi)
|
| 412 |
+
entry = pos.get("entryPx", "—")
|
| 413 |
+
pval = pos.get("positionValue", 0)
|
| 414 |
+
u = pos.get("unrealizedPnl", 0)
|
| 415 |
+
roe = pos.get("returnOnEquity", 0)
|
| 416 |
+
lev = pos.get("leverage", {})
|
| 417 |
+
lev_str = f"{lev.get('type','—')}×{lev.get('value','—')}"
|
| 418 |
+
liq = pos.get("liquidationPx", None)
|
| 419 |
+
m_used = pos.get("marginUsed", 0)
|
| 420 |
+
fund = pos.get("cumFunding", {}).get("sinceOpen", None)
|
| 421 |
+
|
| 422 |
+
# Totals
|
| 423 |
+
try: total_pos_value += float(pval)
|
| 424 |
+
except: pass
|
| 425 |
+
try: total_margin_used += float(m_used)
|
| 426 |
+
except: pass
|
| 427 |
+
try: total_upnl += float(u)
|
| 428 |
+
except: pass
|
| 429 |
+
|
| 430 |
+
rows_md.append(
|
| 431 |
+
f"| {coin} | {side} | {_fnum(abs_size, 6)} | ${_fnum(entry, 2)} | ${_fnum(pval, 2)} | {_pnl(u, 2)} | {_fpct(roe, 2)} | {lev_str} | {('—' if liq in (None, 'None') else '$'+_fnum(liq, 2))} | ${_fnum(m_used, 2)} | {('—' if fund in (None, 'None') else _fnum(fund, 6))} |"
|
| 432 |
+
)
|
| 433 |
+
|
| 434 |
+
header = (
|
| 435 |
+
"### 📊 Account / Positions\n"
|
| 436 |
+
f"- **Cash balance:** ${_fnum(cash, 2)}\n"
|
| 437 |
+
f"- **Total pos. value:** ${_fnum(total_pos_value, 2)}\n"
|
| 438 |
+
f"- **Unrealized PnL:** {_pnl(total_upnl, 2)}\n"
|
| 439 |
+
f"- **Margin used (total):** ${_fnum(total_margin_used, 2)}\n"
|
| 440 |
+
)
|
| 441 |
+
if realized_pnl is not None:
|
| 442 |
+
header += f"- **Realized PnL (session/period):** {_pnl(realized_pnl, 2)}\n"
|
| 443 |
+
|
| 444 |
+
table_head = (
|
| 445 |
+
"\n| Coin | Side | Size | Entry Px | Pos. Value | uPnL | ROE | Leverage | Liq Px | Margin Used | Funding (since open) |\n"
|
| 446 |
+
"|---|---:|---:|---:|---:|---:|---:|---:|---:|---:|---:|\n"
|
| 447 |
+
)
|
| 448 |
+
table_body = "\n".join(rows_md) if rows_md else "_No open positions_"
|
| 449 |
+
|
| 450 |
+
return header + table_head + table_body
|
| 451 |
+
|
| 452 |
+
|
| 453 |
+
# === Gradio UI ===
|
| 454 |
+
with gr.Blocks(fill_height=True) as demo:
|
| 455 |
+
gr.Markdown("# 🤖 Crypto Trading Copilot")
|
| 456 |
+
gr.Markdown(
|
| 457 |
+
f"Local time: **{now_sgt():%Y-%m-%d %H:%M:%S %Z}** \n"
|
| 458 |
+
"[OpenAI Traces](https://platform.openai.com/logs?api=traces) · "
|
| 459 |
+
"[Hyperliquid](https://app.hyperliquid.xyz/trade)"
|
| 460 |
+
)
|
| 461 |
+
|
| 462 |
+
with gr.Row():
|
| 463 |
+
quick1 = gr.Button("📊 Balance")
|
| 464 |
+
quick2 = gr.Button("🗞️ News: BTC, ETH")
|
| 465 |
+
quick3 = gr.Button("🔬 TA: HYPE 1h")
|
| 466 |
+
quick4 = gr.Button("🧾 Long HYPE 500 @ market (SL 2% TP 4%)")
|
| 467 |
+
|
| 468 |
+
chatbot = gr.Chatbot(height=480, type="messages", show_copy_button=True)
|
| 469 |
+
user_in = gr.Textbox(placeholder="Try: TA HYPE 1h lookback 48 • News on BTC • Long HYPE 500 at market, SL 2% TP 4%", scale=1)
|
| 470 |
+
send_btn = gr.Button("Send", variant="primary")
|
| 471 |
+
|
| 472 |
+
with gr.Accordion("Memory (last 10)", open=False):
|
| 473 |
+
mem_table = gr.Dataframe(value=load_memories_df(10), interactive=False, wrap=True, show_label=False)
|
| 474 |
+
|
| 475 |
+
async def _respond(user_msg, chat_state):
|
| 476 |
+
bot_md = await handle_message(user_msg, chat_state or [])
|
| 477 |
+
# Log short memory line
|
| 478 |
+
save_memory(f"[{now_sgt():%Y-%m-%d %H:%M}] {user_msg[:80]}")
|
| 479 |
+
# Update display memory table
|
| 480 |
+
latest_mem = load_memories_df(10)
|
| 481 |
+
return chat_state + [{"role":"user","content":user_msg},{"role":"assistant","content":bot_md}], "", latest_mem
|
| 482 |
+
|
| 483 |
+
send_btn.click(_respond, inputs=[user_in, chatbot], outputs=[chatbot, user_in, mem_table])
|
| 484 |
+
user_in.submit(_respond, inputs=[user_in, chatbot], outputs=[chatbot, user_in, mem_table])
|
| 485 |
+
|
| 486 |
+
# Quick actions
|
| 487 |
+
async def _qa_balance(chat_state):
|
| 488 |
+
msg = "balance"
|
| 489 |
+
bot_md = await handle_message(msg, chat_state or [])
|
| 490 |
+
save_memory(f"[{now_sgt():%Y-%m-%d %H:%M}] Quick: balance")
|
| 491 |
+
latest_mem = load_memories_df(10)
|
| 492 |
+
return chat_state + [{"role":"user","content":msg},{"role":"assistant","content":bot_md}], latest_mem
|
| 493 |
+
|
| 494 |
+
async def _qa_news(chat_state):
|
| 495 |
+
msg = "news on BTC and ETH"
|
| 496 |
+
bot_md = await handle_message(msg, chat_state or [])
|
| 497 |
+
save_memory(f"[{now_sgt():%Y-%m-%d %H:%M}] Quick: news BTC ETH")
|
| 498 |
+
latest_mem = load_memories_df(10)
|
| 499 |
+
return chat_state + [{"role":"user","content":msg},{"role":"assistant","content":bot_md}], latest_mem
|
| 500 |
+
|
| 501 |
+
async def _qa_ta(chat_state):
|
| 502 |
+
msg = "TA HYPE 1h lookback 48"
|
| 503 |
+
bot_md = await handle_message(msg, chat_state or [])
|
| 504 |
+
save_memory(f"[{now_sgt():%Y-%m-%d %H:%M}] Quick: TA HYPE")
|
| 505 |
+
latest_mem = load_memories_df(10)
|
| 506 |
+
return chat_state + [{"role":"user","content":msg},{"role":"assistant","content":bot_md}], latest_mem
|
| 507 |
+
|
| 508 |
+
async def _qa_long(chat_state):
|
| 509 |
+
msg = "Long HYPE 500 at market, SL 2% TP 4%"
|
| 510 |
+
bot_md = await handle_message(msg, chat_state or [])
|
| 511 |
+
save_memory(f"[{now_sgt():%Y-%m-%d %H:%M}] Quick: long HYPE")
|
| 512 |
+
latest_mem = load_memories_df(10)
|
| 513 |
+
return chat_state + [{"role":"user","content":msg},{"role":"assistant","content":bot_md}], latest_mem
|
| 514 |
+
|
| 515 |
+
quick1.click(_qa_balance, inputs=[chatbot], outputs=[chatbot, mem_table])
|
| 516 |
+
quick2.click(_qa_news, inputs=[chatbot], outputs=[chatbot, mem_table])
|
| 517 |
+
quick3.click(_qa_ta, inputs=[chatbot], outputs=[chatbot, mem_table])
|
| 518 |
+
quick4.click(_qa_long, inputs=[chatbot], outputs=[chatbot, mem_table])
|
| 519 |
+
|
| 520 |
+
if __name__ == "__main__":
|
| 521 |
+
# No deprecated args; queue() OK without concurrency_count
|
| 522 |
+
demo.queue().launch()
|
crypto_news_scraper.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import feedparser
|
| 3 |
+
import html
|
| 4 |
+
import re
|
| 5 |
+
|
| 6 |
+
def scrape_crypto_news(urls: list = None, num_entries: int = 10) -> pd.DataFrame:
|
| 7 |
+
if urls is None:
|
| 8 |
+
urls = ["https://cointelegraph.com/rss","https://cryptopotato.com/feed/","https://cryptonews.com/news/feed/"]
|
| 9 |
+
if num_entries is None:
|
| 10 |
+
num_entries = 10
|
| 11 |
+
news_dict = {}
|
| 12 |
+
for url in urls:
|
| 13 |
+
try:
|
| 14 |
+
feed = feedparser.parse(url, request_headers={
|
| 15 |
+
"User-Agent": "Mozilla/5.0 (CryptoNewsBot; +https://example.com)"
|
| 16 |
+
})
|
| 17 |
+
|
| 18 |
+
# Check if the feed was parsed correctly
|
| 19 |
+
if getattr(feed, "bozo", 0):
|
| 20 |
+
print("[warn] bozo_exception:", getattr(feed, "bozo_exception", None))
|
| 21 |
+
|
| 22 |
+
# Helper to extract the best text field per entry
|
| 23 |
+
def get_text(entry):
|
| 24 |
+
if "content" in entry and entry.content:
|
| 25 |
+
for c in entry.content:
|
| 26 |
+
if c.get("value"):
|
| 27 |
+
return c["value"]
|
| 28 |
+
if entry.get("summary"):
|
| 29 |
+
return entry["summary"]
|
| 30 |
+
if entry.get("description"):
|
| 31 |
+
return entry["description"]
|
| 32 |
+
return ""
|
| 33 |
+
|
| 34 |
+
# Print the first few articles
|
| 35 |
+
for i, e in enumerate(feed.entries[:num_entries], 1):
|
| 36 |
+
title = e.get("title", "").strip()
|
| 37 |
+
link = e.get("link", "")
|
| 38 |
+
raw_html = get_text(e)
|
| 39 |
+
plain = re.sub(r"<[^>]+>", " ", html.unescape(raw_html))
|
| 40 |
+
plain = re.sub(r"\s+", " ", plain).strip()
|
| 41 |
+
pub = e.get("published", "")
|
| 42 |
+
news_dict[title] = {"link": link, "published": pub, "description": plain, "source": url}
|
| 43 |
+
|
| 44 |
+
except:
|
| 45 |
+
pass
|
| 46 |
+
news_df = pd.DataFrame(news_dict).T
|
| 47 |
+
news_df = news_df.reset_index().rename(columns={"index":"title"})
|
| 48 |
+
return news_df
|
crypto_news_scraper_server.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from mcp.server.fastmcp import FastMCP
|
| 2 |
+
from crypto_news_scraper import scrape_crypto_news
|
| 3 |
+
import pandas as pd
|
| 4 |
+
from dotenv import load_dotenv
|
| 5 |
+
load_dotenv(override=True)
|
| 6 |
+
|
| 7 |
+
mcp = FastMCP("cryptopanic_news_server")
|
| 8 |
+
|
| 9 |
+
@mcp.tool()
|
| 10 |
+
async def get_crypto_news(urls: list = None, num_entries: int = 10) -> str:
|
| 11 |
+
"""Scrape and aggregate the latest cryptocurrency news articles from multiple RSS feeds.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
urls: Optional list of RSS feed URLs to scrape.
|
| 15 |
+
- Accepts a list of strings, each being a valid RSS feed URL.
|
| 16 |
+
- Example: ["https://cointelegraph.com/rss", "https://cryptopotato.com/feed/"].
|
| 17 |
+
- If set to None, defaults to:
|
| 18 |
+
[
|
| 19 |
+
"https://cointelegraph.com/rss",
|
| 20 |
+
"https://cryptopotato.com/feed/",
|
| 21 |
+
"https://cryptonews.com/news/feed/"
|
| 22 |
+
].
|
| 23 |
+
num_entries: The maximum number of news entries to retrieve per feed.
|
| 24 |
+
- Defaults to 10.
|
| 25 |
+
- Example: setting num_entries=5 retrieves the 5 most recent articles per source.
|
| 26 |
+
"""
|
| 27 |
+
return scrape_crypto_news(urls=None, num_entries=10).to_string()
|
| 28 |
+
|
| 29 |
+
if __name__ == "__main__":
|
| 30 |
+
mcp.run(transport='stdio')
|
hl_indicators.py
ADDED
|
@@ -0,0 +1,530 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# hl_indicators.py
|
| 2 |
+
# pip install: hyperliquid-python-sdk pandas numpy
|
| 3 |
+
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
from typing import Dict, Any, List, Tuple, Literal, Iterable
|
| 6 |
+
import time
|
| 7 |
+
import numpy as np
|
| 8 |
+
import pandas as pd
|
| 9 |
+
|
| 10 |
+
from hyperliquid.info import Info
|
| 11 |
+
from hyperliquid.utils import constants
|
| 12 |
+
|
| 13 |
+
Interval = Literal["1m", "5m", "15m", "1h", "4h", "1d"]
|
| 14 |
+
|
| 15 |
+
_MS = {"1m": 60_000, "5m": 5*60_000, "15m": 15*60_000, "1h": 60*60_000, "4h": 4*60*60_000, "1d": 24*60*60_000}
|
| 16 |
+
|
| 17 |
+
def _now_ms() -> int:
|
| 18 |
+
return int(time.time() * 1000)
|
| 19 |
+
|
| 20 |
+
def _start_end_from_limit(interval: Interval, limit: int, end_ms: int | None = None) -> tuple[int, int]:
|
| 21 |
+
end_ms = end_ms or _now_ms()
|
| 22 |
+
span = (limit + 2) * _MS[interval] # small buffer for smoothing windows
|
| 23 |
+
start_ms = max(0, end_ms - span)
|
| 24 |
+
return start_ms, end_ms
|
| 25 |
+
|
| 26 |
+
# ---------------- Data fetch via candles_snapshot ---------------- #
|
| 27 |
+
|
| 28 |
+
def fetch_candles(
|
| 29 |
+
name: str,
|
| 30 |
+
interval: Interval = "1h",
|
| 31 |
+
limit: int = 600,
|
| 32 |
+
testnet: bool = True,
|
| 33 |
+
end_ms: int | None = None,
|
| 34 |
+
) -> pd.DataFrame:
|
| 35 |
+
"""
|
| 36 |
+
Fetch OHLCV candles via Info.candles_snapshot(name, interval, startTime, endTime).
|
| 37 |
+
Returns DataFrame with ['timestamp','open','high','low','close','volume'] sorted by time.
|
| 38 |
+
"""
|
| 39 |
+
api_url = constants.TESTNET_API_URL if testnet else constants.MAINNET_API_URL
|
| 40 |
+
info = Info(api_url, skip_ws=True)
|
| 41 |
+
|
| 42 |
+
start_ms, end_ms = _start_end_from_limit(interval, limit, end_ms)
|
| 43 |
+
raw = info.candles_snapshot(name, interval, start_ms, end_ms)
|
| 44 |
+
if not raw:
|
| 45 |
+
raise ValueError(f"No candles returned for {name} {interval}")
|
| 46 |
+
|
| 47 |
+
df = pd.DataFrame(raw).rename(columns={
|
| 48 |
+
"t": "timestamp", "o": "open", "h": "high", "l": "low", "c": "close", "v": "volume",
|
| 49 |
+
"T": "close_time", "i": "interval", "s": "symbol", "n": "trades",
|
| 50 |
+
})
|
| 51 |
+
|
| 52 |
+
needed = ["timestamp", "open", "high", "low", "close", "volume"]
|
| 53 |
+
for k in needed:
|
| 54 |
+
if k not in df.columns:
|
| 55 |
+
raise ValueError(f"Missing '{k}' in candles_snapshot payload. Got: {list(df.columns)}")
|
| 56 |
+
|
| 57 |
+
df["timestamp"] = pd.to_datetime(df["timestamp"], unit="ms", errors="coerce")
|
| 58 |
+
for k in ["open","high","low","close","volume"]:
|
| 59 |
+
df[k] = pd.to_numeric(df[k], errors="coerce")
|
| 60 |
+
|
| 61 |
+
df = df.dropna(subset=["timestamp","close"]).sort_values("timestamp").reset_index(drop=True)
|
| 62 |
+
if len(df) > limit:
|
| 63 |
+
df = df.iloc[-limit:].reset_index(drop=True)
|
| 64 |
+
return df
|
| 65 |
+
|
| 66 |
+
# ---------------- Base indicators ---------------- #
|
| 67 |
+
|
| 68 |
+
def ema(series: pd.Series, period: int) -> pd.Series:
|
| 69 |
+
return series.ewm(span=period, adjust=False).mean()
|
| 70 |
+
|
| 71 |
+
def macd(series: pd.Series, fast: int = 12, slow: int = 26, signal: int = 9) -> Tuple[pd.Series, pd.Series, pd.Series]:
|
| 72 |
+
fast_ema, slow_ema = ema(series, fast), ema(series, slow)
|
| 73 |
+
line = fast_ema - slow_ema
|
| 74 |
+
sig = ema(line, signal)
|
| 75 |
+
hist = line - sig
|
| 76 |
+
return line, sig, hist
|
| 77 |
+
|
| 78 |
+
def rsi(series: pd.Series, period: int = 14) -> pd.Series:
|
| 79 |
+
delta = series.diff()
|
| 80 |
+
up = pd.Series(np.where(delta > 0, delta, 0.0), index=series.index)
|
| 81 |
+
down = pd.Series(np.where(delta < 0, -delta, 0.0), index=series.index)
|
| 82 |
+
avg_up = up.ewm(alpha=1/period, adjust=False).mean()
|
| 83 |
+
avg_down = down.ewm(alpha=1/period, adjust=False).mean()
|
| 84 |
+
rs = avg_up / avg_down.replace(0, np.nan)
|
| 85 |
+
return (100 - (100 / (1 + rs))).fillna(0)
|
| 86 |
+
|
| 87 |
+
def stoch_rsi(series: pd.Series, rsi_length: int = 14, stoch_length: int = 14, k_smooth: int = 3, d_smooth: int = 3
|
| 88 |
+
) -> Tuple[pd.Series, pd.Series, pd.Series]:
|
| 89 |
+
r = rsi(series, rsi_length)
|
| 90 |
+
r_low, r_high = r.rolling(stoch_length).min(), r.rolling(stoch_length).max()
|
| 91 |
+
base = (r - r_low) / (r_high - r_low)
|
| 92 |
+
k = base.rolling(k_smooth).mean() * 100.0
|
| 93 |
+
d = k.rolling(d_smooth).mean()
|
| 94 |
+
return base * 100.0, k, d
|
| 95 |
+
|
| 96 |
+
# ---------------- Volume/volatility family ---------------- #
|
| 97 |
+
|
| 98 |
+
def adl(high: pd.Series, low: pd.Series, close: pd.Series, volume: pd.Series) -> pd.Series:
|
| 99 |
+
"""
|
| 100 |
+
Chaikin Accumulation/Distribution Line.
|
| 101 |
+
mfm = ((close - low) - (high - close)) / (high - low), guarded for zero range.
|
| 102 |
+
ADL = cumulative sum(mfm * volume)
|
| 103 |
+
"""
|
| 104 |
+
hl_range = (high - low).replace(0, np.nan)
|
| 105 |
+
mfm = ((close - low) - (high - close)) / hl_range
|
| 106 |
+
mfm = mfm.fillna(0.0)
|
| 107 |
+
mfv = mfm * volume
|
| 108 |
+
return mfv.cumsum()
|
| 109 |
+
|
| 110 |
+
def obv(close: pd.Series, volume: pd.Series) -> pd.Series:
|
| 111 |
+
"""
|
| 112 |
+
On-Balance Volume.
|
| 113 |
+
"""
|
| 114 |
+
sign = np.sign(close.diff()).fillna(0)
|
| 115 |
+
return (volume * sign).cumsum()
|
| 116 |
+
|
| 117 |
+
def true_range(high: pd.Series, low: pd.Series, close: pd.Series) -> pd.Series:
|
| 118 |
+
prev_close = close.shift(1)
|
| 119 |
+
tr = pd.concat([(high - low).abs(), (high - prev_close).abs(), (low - prev_close).abs()], axis=1).max(axis=1)
|
| 120 |
+
return tr
|
| 121 |
+
|
| 122 |
+
def atr(high: pd.Series, low: pd.Series, close: pd.Series, period: int = 14) -> pd.Series:
|
| 123 |
+
tr = true_range(high, low, close)
|
| 124 |
+
return tr.ewm(alpha=1/period, adjust=False).mean()
|
| 125 |
+
|
| 126 |
+
def di_adx(high: pd.Series, low: pd.Series, close: pd.Series, period: int = 14
|
| 127 |
+
) -> Tuple[pd.Series, pd.Series, pd.Series]:
|
| 128 |
+
up_move = high.diff()
|
| 129 |
+
down_move = -low.diff()
|
| 130 |
+
plus_dm = pd.Series(np.where((up_move > down_move) & (up_move > 0), up_move, 0.0), index=high.index)
|
| 131 |
+
minus_dm = pd.Series(np.where((down_move > up_move) & (down_move > 0), down_move, 0.0), index=high.index)
|
| 132 |
+
atr_series = atr(high, low, close, period)
|
| 133 |
+
|
| 134 |
+
plus_di = 100 * (plus_dm.ewm(alpha=1/period, adjust=False).mean() / atr_series.replace(0, np.nan))
|
| 135 |
+
minus_di = 100 * (minus_dm.ewm(alpha=1/period, adjust=False).mean() / atr_series.replace(0, np.nan))
|
| 136 |
+
dx = (100 * (plus_di - minus_di).abs() / (plus_di + minus_di).replace(0, np.nan)).fillna(0)
|
| 137 |
+
adx = dx.ewm(alpha=1/period, adjust=False).mean()
|
| 138 |
+
return plus_di.fillna(0), minus_di.fillna(0), adx.fillna(0)
|
| 139 |
+
|
| 140 |
+
def bbands(series: pd.Series, period: int = 20, std_mult: float = 2.0
|
| 141 |
+
) -> Tuple[pd.Series, pd.Series, pd.Series, pd.Series, pd.Series]:
|
| 142 |
+
ma = series.rolling(period).mean()
|
| 143 |
+
sd = series.rolling(period).std(ddof=0)
|
| 144 |
+
upper = ma + std_mult * sd
|
| 145 |
+
lower = ma - std_mult * sd
|
| 146 |
+
pct_b = (series - lower) / (upper - lower)
|
| 147 |
+
bandwidth = (upper - lower) / ma
|
| 148 |
+
return ma, upper, lower, pct_b, bandwidth
|
| 149 |
+
|
| 150 |
+
def mfi(high: pd.Series, low: pd.Series, close: pd.Series, volume: pd.Series, period: int = 14) -> pd.Series:
|
| 151 |
+
tp = (high + low + close) / 3.0
|
| 152 |
+
rmf = tp * volume
|
| 153 |
+
pos_flow = pd.Series(np.where(tp > tp.shift(1), rmf, 0.0), index=tp.index).rolling(period).sum()
|
| 154 |
+
neg_flow = pd.Series(np.where(tp < tp.shift(1), rmf, 0.0), index=tp.index).rolling(period).sum()
|
| 155 |
+
money_ratio = pos_flow / neg_flow.replace(0, np.nan)
|
| 156 |
+
out = 100 - (100 / (1 + money_ratio))
|
| 157 |
+
return out.fillna(0)
|
| 158 |
+
|
| 159 |
+
def vwap_cumulative(high: pd.Series, low: pd.Series, close: pd.Series, volume: pd.Series) -> pd.Series:
|
| 160 |
+
"""
|
| 161 |
+
Cumulative VWAP over the full series: sum(TP*V)/sum(V) where TP=(H+L+C)/3.
|
| 162 |
+
Resets only at the beginning (not each day).
|
| 163 |
+
"""
|
| 164 |
+
tp = (high + low + close) / 3.0
|
| 165 |
+
cum_v = volume.cumsum().replace(0, np.nan)
|
| 166 |
+
cum_tp_v = (tp * volume).cumsum()
|
| 167 |
+
return (cum_tp_v / cum_v).fillna(method="bfill").fillna(0)
|
| 168 |
+
|
| 169 |
+
def vwap_daily(high: pd.Series, low: pd.Series, close: pd.Series, volume: pd.Series, timestamps: pd.Series) -> pd.Series:
|
| 170 |
+
"""
|
| 171 |
+
Session VWAP that resets daily (by calendar date of 'timestamps').
|
| 172 |
+
"""
|
| 173 |
+
tp = (high + low + close) / 3.0
|
| 174 |
+
dates = pd.to_datetime(timestamps).dt.date
|
| 175 |
+
df = pd.DataFrame({"tp": tp, "v": volume, "date": dates})
|
| 176 |
+
df["tpv"] = df["tp"] * df["v"]
|
| 177 |
+
cum = df.groupby("date")[["tpv", "v"]].cumsum()
|
| 178 |
+
vwap = (cum["tpv"] / cum["v"].replace(0, np.nan)).values
|
| 179 |
+
return pd.Series(vwap, index=high.index).fillna(method="bfill").fillna(0)
|
| 180 |
+
|
| 181 |
+
# ---------------- JSON helpers ---------------- #
|
| 182 |
+
|
| 183 |
+
def _pts(ts: pd.Series, vals: pd.Series) -> List[Dict[str, float]]:
|
| 184 |
+
out: List[Dict[str, float]] = []
|
| 185 |
+
for t, v in zip(ts, vals):
|
| 186 |
+
if pd.isna(t) or pd.isna(v):
|
| 187 |
+
continue
|
| 188 |
+
out.append({"t": int(pd.Timestamp(t).timestamp() * 1000), "v": float(v)})
|
| 189 |
+
return out
|
| 190 |
+
|
| 191 |
+
def _tail_pts(ts: pd.Series, vals: pd.Series, n: int) -> List[Dict[str, float]]:
|
| 192 |
+
"""Return only the last n timestamp/value points (safe if n > len)."""
|
| 193 |
+
if n is None or n <= 0:
|
| 194 |
+
return _pts(ts, vals)
|
| 195 |
+
tail_ts = ts.iloc[-n:] if len(ts) > n else ts
|
| 196 |
+
tail_vals = vals.iloc[-n:] if len(vals) > n else vals
|
| 197 |
+
return _pts(tail_ts, tail_vals)
|
| 198 |
+
|
| 199 |
+
# ---------------- MCP-friendly functions (per indicator) ---------------- #
|
| 200 |
+
|
| 201 |
+
def get_ema(
|
| 202 |
+
name: str,
|
| 203 |
+
periods: List[int] | None = None,
|
| 204 |
+
interval: Interval = "1h",
|
| 205 |
+
limit: int = 300,
|
| 206 |
+
testnet: bool = False,
|
| 207 |
+
output_tail: int = 30, # NEW
|
| 208 |
+
) -> Dict[str, Any]:
|
| 209 |
+
periods = periods or [20, 200]
|
| 210 |
+
df = fetch_candles(name, interval, limit, testnet)
|
| 211 |
+
res: Dict[str, Any] = {
|
| 212 |
+
"coin": name,
|
| 213 |
+
"interval": interval,
|
| 214 |
+
"ema": {},
|
| 215 |
+
"close": _tail_pts(df["timestamp"], df["close"], output_tail), # sliced
|
| 216 |
+
"last": {"close": float(df["close"].iloc[-1])},
|
| 217 |
+
}
|
| 218 |
+
for p in periods:
|
| 219 |
+
e = ema(df["close"], p)
|
| 220 |
+
res["ema"][str(p)] = _tail_pts(df["timestamp"], e, output_tail) # sliced
|
| 221 |
+
res["last"][f"ema_{p}"] = float(e.iloc[-1])
|
| 222 |
+
return res
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def get_macd(
|
| 226 |
+
name: str,
|
| 227 |
+
fast: int = 12,
|
| 228 |
+
slow: int = 26,
|
| 229 |
+
signal: int = 9,
|
| 230 |
+
interval: Interval = "1h",
|
| 231 |
+
limit: int = 300,
|
| 232 |
+
testnet: bool = False,
|
| 233 |
+
output_tail: int = 30, # NEW
|
| 234 |
+
) -> Dict[str, Any]:
|
| 235 |
+
df = fetch_candles(name, interval, limit, testnet)
|
| 236 |
+
line, sig, hist = macd(df["close"], fast, slow, signal)
|
| 237 |
+
return {
|
| 238 |
+
"coin": name,
|
| 239 |
+
"interval": interval,
|
| 240 |
+
"params": {"fast": fast, "slow": slow, "signal": signal},
|
| 241 |
+
"macd_line": _tail_pts(df["timestamp"], line, output_tail), # sliced
|
| 242 |
+
"signal": _tail_pts(df["timestamp"], sig, output_tail), # sliced
|
| 243 |
+
"histogram": _tail_pts(df["timestamp"], hist, output_tail), # sliced
|
| 244 |
+
"last": {
|
| 245 |
+
"macd_line": float(line.iloc[-1]),
|
| 246 |
+
"signal": float(sig.iloc[-1]),
|
| 247 |
+
"histogram": float(hist.iloc[-1]),
|
| 248 |
+
"close": float(df["close"].iloc[-1]),
|
| 249 |
+
},
|
| 250 |
+
}
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def get_stoch_rsi(
|
| 254 |
+
name: str,
|
| 255 |
+
rsi_length: int = 14,
|
| 256 |
+
stoch_length: int = 14,
|
| 257 |
+
k_smooth: int = 3,
|
| 258 |
+
d_smooth: int = 3,
|
| 259 |
+
interval: Interval = "1h",
|
| 260 |
+
limit: int = 300,
|
| 261 |
+
testnet: bool = False,
|
| 262 |
+
output_tail: int = 30, # NEW
|
| 263 |
+
) -> Dict[str, Any]:
|
| 264 |
+
df = fetch_candles(name, interval, limit, testnet)
|
| 265 |
+
stoch, k, d = stoch_rsi(df["close"], rsi_length, stoch_length, k_smooth, d_smooth)
|
| 266 |
+
return {
|
| 267 |
+
"coin": name,
|
| 268 |
+
"interval": interval,
|
| 269 |
+
"params": {
|
| 270 |
+
"rsi_length": rsi_length,
|
| 271 |
+
"stoch_length": stoch_length,
|
| 272 |
+
"k_smooth": k_smooth,
|
| 273 |
+
"d_smooth": d_smooth,
|
| 274 |
+
},
|
| 275 |
+
"stoch_rsi": _tail_pts(df["timestamp"], stoch, output_tail), # sliced
|
| 276 |
+
"%K": _tail_pts(df["timestamp"], k, output_tail), # sliced
|
| 277 |
+
"%D": _tail_pts(df["timestamp"], d, output_tail), # sliced
|
| 278 |
+
"last": {
|
| 279 |
+
"stoch_rsi": float(stoch.iloc[-1]),
|
| 280 |
+
"k": float(k.iloc[-1]),
|
| 281 |
+
"d": float(d.iloc[-1]),
|
| 282 |
+
"close": float(df["close"].iloc[-1]),
|
| 283 |
+
},
|
| 284 |
+
}
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
def get_adl(
|
| 288 |
+
name: str,
|
| 289 |
+
interval: Interval = "1h",
|
| 290 |
+
limit: int = 300,
|
| 291 |
+
testnet: bool = False,
|
| 292 |
+
output_tail: int = 30, # NEW
|
| 293 |
+
) -> Dict[str, Any]:
|
| 294 |
+
df = fetch_candles(name, interval, limit, testnet)
|
| 295 |
+
series = adl(df["high"], df["low"], df["close"], df["volume"])
|
| 296 |
+
return {
|
| 297 |
+
"coin": name,
|
| 298 |
+
"interval": interval,
|
| 299 |
+
"adl": _tail_pts(df["timestamp"], series, output_tail), # sliced
|
| 300 |
+
"last": {"adl": float(series.iloc[-1])},
|
| 301 |
+
}
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
def get_obv(
|
| 305 |
+
name: str,
|
| 306 |
+
interval: Interval = "1h",
|
| 307 |
+
limit: int = 300,
|
| 308 |
+
testnet: bool = False,
|
| 309 |
+
output_tail: int = 30, # NEW
|
| 310 |
+
) -> Dict[str, Any]:
|
| 311 |
+
df = fetch_candles(name, interval, limit, testnet)
|
| 312 |
+
series = obv(df["close"], df["volume"])
|
| 313 |
+
return {
|
| 314 |
+
"coin": name,
|
| 315 |
+
"interval": interval,
|
| 316 |
+
"obv": _tail_pts(df["timestamp"], series, output_tail), # sliced
|
| 317 |
+
"last": {"obv": float(series.iloc[-1])},
|
| 318 |
+
}
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
def get_atr_adx(
|
| 322 |
+
name: str,
|
| 323 |
+
period: int = 14,
|
| 324 |
+
interval: Interval = "1h",
|
| 325 |
+
limit: int = 300,
|
| 326 |
+
testnet: bool = False,
|
| 327 |
+
output_tail: int = 30, # NEW
|
| 328 |
+
) -> Dict[str, Any]:
|
| 329 |
+
df = fetch_candles(name, interval, limit, testnet)
|
| 330 |
+
plus_di, minus_di, adx_series = di_adx(df["high"], df["low"], df["close"], period)
|
| 331 |
+
atr_series = atr(df["high"], df["low"], df["close"], period)
|
| 332 |
+
return {
|
| 333 |
+
"coin": name,
|
| 334 |
+
"interval": interval,
|
| 335 |
+
"params": {"period": period},
|
| 336 |
+
"+DI": _tail_pts(df["timestamp"], plus_di, output_tail), # sliced
|
| 337 |
+
"-DI": _tail_pts(df["timestamp"], minus_di, output_tail), # sliced
|
| 338 |
+
"ADX": _tail_pts(df["timestamp"], adx_series, output_tail),# sliced
|
| 339 |
+
"ATR": _tail_pts(df["timestamp"], atr_series, output_tail),# sliced
|
| 340 |
+
"last": {
|
| 341 |
+
"+DI": float(plus_di.iloc[-1]),
|
| 342 |
+
"-DI": float(minus_di.iloc[-1]),
|
| 343 |
+
"ADX": float(adx_series.iloc[-1]),
|
| 344 |
+
"ATR": float(atr_series.iloc[-1]),
|
| 345 |
+
},
|
| 346 |
+
}
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
def get_bbands(
|
| 350 |
+
name: str,
|
| 351 |
+
period: int = 20,
|
| 352 |
+
std_mult: float = 2.0,
|
| 353 |
+
interval: Interval = "1h",
|
| 354 |
+
limit: int = 300,
|
| 355 |
+
testnet: bool = False,
|
| 356 |
+
output_tail: int = 30, # NEW
|
| 357 |
+
) -> Dict[str, Any]:
|
| 358 |
+
df = fetch_candles(name, interval, limit, testnet)
|
| 359 |
+
ma, upper, lower, pct_b, bandwidth = bbands(df["close"], period, std_mult)
|
| 360 |
+
return {
|
| 361 |
+
"coin": name,
|
| 362 |
+
"interval": interval,
|
| 363 |
+
"params": {"period": period, "std_mult": std_mult},
|
| 364 |
+
"basis": _tail_pts(df["timestamp"], ma, output_tail), # sliced
|
| 365 |
+
"upper": _tail_pts(df["timestamp"], upper, output_tail), # sliced
|
| 366 |
+
"lower": _tail_pts(df["timestamp"], lower, output_tail), # sliced
|
| 367 |
+
"%b": _tail_pts(df["timestamp"], pct_b, output_tail), # sliced
|
| 368 |
+
"bandwidth": _tail_pts(df["timestamp"], bandwidth, output_tail), # sliced
|
| 369 |
+
"last": {
|
| 370 |
+
"basis": float(ma.iloc[-1]),
|
| 371 |
+
"upper": float(upper.iloc[-1]),
|
| 372 |
+
"lower": float(lower.iloc[-1]),
|
| 373 |
+
"%b": float(pct_b.iloc[-1]),
|
| 374 |
+
"bandwidth": float(bandwidth.iloc[-1]),
|
| 375 |
+
},
|
| 376 |
+
}
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
def get_mfi(
|
| 380 |
+
name: str,
|
| 381 |
+
period: int = 14,
|
| 382 |
+
interval: Interval = "1h",
|
| 383 |
+
limit: int = 300,
|
| 384 |
+
testnet: bool = False,
|
| 385 |
+
output_tail: int = 30, # NEW
|
| 386 |
+
) -> Dict[str, Any]:
|
| 387 |
+
df = fetch_candles(name, interval, limit, testnet)
|
| 388 |
+
series = mfi(df["high"], df["low"], df["close"], df["volume"], period)
|
| 389 |
+
return {
|
| 390 |
+
"coin": name,
|
| 391 |
+
"interval": interval,
|
| 392 |
+
"params": {"period": period},
|
| 393 |
+
"mfi": _tail_pts(df["timestamp"], series, output_tail), # sliced
|
| 394 |
+
"last": {"mfi": float(series.iloc[-1])},
|
| 395 |
+
}
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
def get_vwap(
|
| 399 |
+
name: str,
|
| 400 |
+
daily_reset: bool = False,
|
| 401 |
+
interval: Interval = "1h",
|
| 402 |
+
limit: int = 300,
|
| 403 |
+
testnet: bool = False,
|
| 404 |
+
output_tail: int = 30, # NEW
|
| 405 |
+
) -> Dict[str, Any]:
|
| 406 |
+
df = fetch_candles(name, interval, limit, testnet)
|
| 407 |
+
series = (
|
| 408 |
+
vwap_daily(df["high"], df["low"], df["close"], df["volume"], df["timestamp"])
|
| 409 |
+
if daily_reset else
|
| 410 |
+
vwap_cumulative(df["high"], df["low"], df["close"], df["volume"])
|
| 411 |
+
)
|
| 412 |
+
return {
|
| 413 |
+
"coin": name,
|
| 414 |
+
"interval": interval,
|
| 415 |
+
"params": {"daily_reset": bool(daily_reset)},
|
| 416 |
+
"vwap": _tail_pts(df["timestamp"], series, output_tail), # sliced
|
| 417 |
+
"last": {"vwap": float(series.iloc[-1])},
|
| 418 |
+
}
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
def get_volume(
|
| 422 |
+
name: str,
|
| 423 |
+
interval: Interval = "1h",
|
| 424 |
+
limit: int = 300,
|
| 425 |
+
testnet: bool = False,
|
| 426 |
+
output_tail: int = 30, # NEW
|
| 427 |
+
) -> Dict[str, Any]:
|
| 428 |
+
df = fetch_candles(name, interval, limit, testnet)
|
| 429 |
+
return {
|
| 430 |
+
"coin": name,
|
| 431 |
+
"interval": interval,
|
| 432 |
+
"volume": _tail_pts(df["timestamp"], df["volume"], output_tail), # sliced
|
| 433 |
+
"last": {"volume": float(df["volume"].iloc[-1])},
|
| 434 |
+
}
|
| 435 |
+
|
| 436 |
+
|
| 437 |
+
def get_bundle(
|
| 438 |
+
name: str,
|
| 439 |
+
interval: Interval = "1h",
|
| 440 |
+
limit: int = 300,
|
| 441 |
+
testnet: bool = False,
|
| 442 |
+
include: Iterable[str] = ("ema","macd","stoch_rsi","adl","obv","atr_adx","bbands","mfi","vwap","volume"),
|
| 443 |
+
ema_periods: List[int] | None = None,
|
| 444 |
+
macd_fast: int = 12, macd_slow: int = 26, macd_signal: int = 9,
|
| 445 |
+
stoch_rsi_len: int = 14, stoch_len: int = 14, k_smooth: int = 3, d_smooth: int = 3,
|
| 446 |
+
bb_period: int = 20, bb_std: float = 2.0,
|
| 447 |
+
mfi_period: int = 14,
|
| 448 |
+
vwap_daily_reset: bool = False,
|
| 449 |
+
output_tail: int = 30, # NEW
|
| 450 |
+
) -> Dict[str, Any]:
|
| 451 |
+
df = fetch_candles(name, interval, limit, testnet)
|
| 452 |
+
out: Dict[str, Any] = {
|
| 453 |
+
"coin": name,
|
| 454 |
+
"interval": interval,
|
| 455 |
+
"close": _tail_pts(df["timestamp"], df["close"], output_tail), # sliced
|
| 456 |
+
"last": {"close": float(df["close"].iloc[-1])},
|
| 457 |
+
}
|
| 458 |
+
|
| 459 |
+
if "ema" in include:
|
| 460 |
+
ema_periods = ema_periods or [20, 200]
|
| 461 |
+
out["ema"] = {}
|
| 462 |
+
for p in ema_periods:
|
| 463 |
+
e = ema(df["close"], p)
|
| 464 |
+
out["ema"][str(p)] = _tail_pts(df["timestamp"], e, output_tail) # sliced
|
| 465 |
+
out["last"][f"ema_{p}"] = float(e.iloc[-1])
|
| 466 |
+
|
| 467 |
+
if "macd" in include:
|
| 468 |
+
line, sig, hist = macd(df["close"], macd_fast, macd_slow, macd_signal)
|
| 469 |
+
out["macd"] = {
|
| 470 |
+
"params": {"fast": macd_fast, "slow": macd_slow, "signal": macd_signal},
|
| 471 |
+
"macd_line": _tail_pts(df["timestamp"], line, output_tail), # sliced
|
| 472 |
+
"signal": _tail_pts(df["timestamp"], sig, output_tail), # sliced
|
| 473 |
+
"histogram": _tail_pts(df["timestamp"], hist, output_tail), # sliced
|
| 474 |
+
"last": {"macd_line": float(line.iloc[-1]), "signal": float(sig.iloc[-1]), "histogram": float(hist.iloc[-1])},
|
| 475 |
+
}
|
| 476 |
+
|
| 477 |
+
if "stoch_rsi" in include:
|
| 478 |
+
st, k, d = stoch_rsi(df["close"], stoch_rsi_len, stoch_len, k_smooth, d_smooth)
|
| 479 |
+
out["stoch_rsi"] = {
|
| 480 |
+
"params": {"rsi_length": stoch_rsi_len, "stoch_length": stoch_len, "k_smooth": k_smooth, "d_smooth": d_smooth},
|
| 481 |
+
"stoch_rsi": _tail_pts(df["timestamp"], st, output_tail), # sliced
|
| 482 |
+
"%K": _tail_pts(df["timestamp"], k, output_tail), # sliced
|
| 483 |
+
"%D": _tail_pts(df["timestamp"], d, output_tail), # sliced
|
| 484 |
+
"last": {"stoch_rsi": float(st.iloc[-1]), "k": float(k.iloc[-1]), "d": float(d.iloc[-1])},
|
| 485 |
+
}
|
| 486 |
+
|
| 487 |
+
if "adl" in include:
|
| 488 |
+
series = adl(df["high"], df["low"], df["close"], df["volume"])
|
| 489 |
+
out["adl"] = {"series": _tail_pts(df["timestamp"], series, output_tail), "last": float(series.iloc[-1])}
|
| 490 |
+
|
| 491 |
+
if "obv" in include:
|
| 492 |
+
series = obv(df["close"], df["volume"])
|
| 493 |
+
out["obv"] = {"series": _tail_pts(df["timestamp"], series, output_tail), "last": float(series.iloc[-1])}
|
| 494 |
+
|
| 495 |
+
if "atr_adx" in include:
|
| 496 |
+
plus_di, minus_di, adx_series = di_adx(df["high"], df["low"], df["close"])
|
| 497 |
+
atr_series = atr(df["high"], df["low"], df["close"])
|
| 498 |
+
out["atr_adx"] = {
|
| 499 |
+
"+DI": _tail_pts(df["timestamp"], plus_di, output_tail), # sliced
|
| 500 |
+
"-DI": _tail_pts(df["timestamp"], minus_di, output_tail), # sliced
|
| 501 |
+
"ADX": _tail_pts(df["timestamp"], adx_series, output_tail), # sliced
|
| 502 |
+
"ATR": _tail_pts(df["timestamp"], atr_series, output_tail), # sliced
|
| 503 |
+
"last": {"+DI": float(plus_di.iloc[-1]), "-DI": float(minus_di.iloc[-1]), "ADX": float(adx_series.iloc[-1]), "ATR": float(atr_series.iloc[-1])},
|
| 504 |
+
}
|
| 505 |
+
|
| 506 |
+
if "bbands" in include:
|
| 507 |
+
ma, up, lo, pct_b, bw = bbands(df["close"], bb_period, bb_std)
|
| 508 |
+
out["bbands"] = {
|
| 509 |
+
"params": {"period": bb_period, "std_mult": bb_std},
|
| 510 |
+
"basis": _tail_pts(df["timestamp"], ma, output_tail), # sliced
|
| 511 |
+
"upper": _tail_pts(df["timestamp"], up, output_tail), # sliced
|
| 512 |
+
"lower": _tail_pts(df["timestamp"], lo, output_tail), # sliced
|
| 513 |
+
"%b": _tail_pts(df["timestamp"], pct_b, output_tail),# sliced
|
| 514 |
+
"bandwidth": _tail_pts(df["timestamp"], bw, output_tail), # sliced
|
| 515 |
+
"last": {"basis": float(ma.iloc[-1]), "upper": float(up.iloc[-1]), "lower": float(lo.iloc[-1]), "%b": float(pct_b.iloc[-1]), "bandwidth": float(bw.iloc[-1])},
|
| 516 |
+
}
|
| 517 |
+
|
| 518 |
+
if "mfi" in include:
|
| 519 |
+
series = mfi(df["high"], df["low"], df["close"], df["volume"], mfi_period)
|
| 520 |
+
out["mfi"] = {"params": {"period": mfi_period}, "series": _tail_pts(df["timestamp"], series, output_tail), "last": float(series.iloc[-1])}
|
| 521 |
+
|
| 522 |
+
if "vwap" in include:
|
| 523 |
+
series = vwap_daily(df["high"], df["low"], df["close"], df["volume"], df["timestamp"]) if vwap_daily_reset else \
|
| 524 |
+
vwap_cumulative(df["high"], df["low"], df["close"], df["volume"])
|
| 525 |
+
out["vwap"] = {"params": {"daily_reset": bool(vwap_daily_reset)}, "series": _tail_pts(df["timestamp"], series, output_tail), "last": float(series.iloc[-1])}
|
| 526 |
+
|
| 527 |
+
if "volume" in include:
|
| 528 |
+
out["volume"] = {"series": _tail_pts(df["timestamp"], df["volume"], output_tail), "last": float(df["volume"].iloc[-1])}
|
| 529 |
+
|
| 530 |
+
return out
|
hl_indicators_server.py
ADDED
|
@@ -0,0 +1,325 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# hl_indicators_server.py
|
| 2 |
+
"""
|
| 3 |
+
FastMCP server exposing Hyperliquid indicator tools.
|
| 4 |
+
|
| 5 |
+
This server provides a unified interface to compute common trading indicators
|
| 6 |
+
directly from Hyperliquid testnet market data via the `candles_snapshot` API.
|
| 7 |
+
|
| 8 |
+
Available tools:
|
| 9 |
+
- ema → Exponential Moving Average
|
| 10 |
+
- macd → Moving Average Convergence Divergence
|
| 11 |
+
- stoch_rsi → Stochastic RSI
|
| 12 |
+
- adl → Accumulation / Distribution Line
|
| 13 |
+
- obv → On-Balance Volume
|
| 14 |
+
- atr_adx → Average True Range / Directional Index / ADX
|
| 15 |
+
- bbands → Bollinger Bands
|
| 16 |
+
- mfi → Money Flow Index
|
| 17 |
+
- vwap → Volume-Weighted Average Price
|
| 18 |
+
- volume → Raw trading volume
|
| 19 |
+
- bundle → Compute multiple indicators in one call
|
| 20 |
+
|
| 21 |
+
Run:
|
| 22 |
+
python hl_indicators_server.py
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
from __future__ import annotations
|
| 26 |
+
from typing import List, Optional, Literal, Dict, Any
|
| 27 |
+
|
| 28 |
+
from mcp.server.fastmcp import FastMCP
|
| 29 |
+
import hl_indicators as hi
|
| 30 |
+
|
| 31 |
+
Interval = Literal["1m", "5m", "15m", "1h", "4h", "1d"]
|
| 32 |
+
|
| 33 |
+
mcp = FastMCP("hl_indicators_server")
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
# ------------------ Health check ------------------ #
|
| 37 |
+
|
| 38 |
+
@mcp.tool()
|
| 39 |
+
async def ping() -> str:
|
| 40 |
+
"""Check if the MCP server is online and responding."""
|
| 41 |
+
return "pong"
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
# ------------------ Indicator tools ------------------ #
|
| 45 |
+
|
| 46 |
+
@mcp.tool()
|
| 47 |
+
async def ema(
|
| 48 |
+
name: str,
|
| 49 |
+
interval: Interval = "1h",
|
| 50 |
+
periods: Optional[List[int]] = None,
|
| 51 |
+
lookback: Optional[int] = None,
|
| 52 |
+
limit: int = 600,
|
| 53 |
+
) -> Dict[str, Any]:
|
| 54 |
+
"""
|
| 55 |
+
Compute Exponential Moving Averages (EMA).
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
name: Coin name (e.g. "BTC", "ETH", "HYPE").
|
| 59 |
+
interval: Candle interval ("1m", "5m", "15m", "1h", "4h", "1d").
|
| 60 |
+
periods: List of EMA window lengths (e.g. [20, 200]).
|
| 61 |
+
lookback: Optional shorthand for a single EMA (e.g. 36).
|
| 62 |
+
limit: Number of candles to fetch from the API.
|
| 63 |
+
|
| 64 |
+
Notes:
|
| 65 |
+
- `limit` controls how many data points are retrieved; it should be at
|
| 66 |
+
least 2–3× the largest EMA period for accurate results.
|
| 67 |
+
- The function automatically uses Hyperliquid testnet data.
|
| 68 |
+
|
| 69 |
+
Returns:
|
| 70 |
+
A dictionary containing EMA series for each period and the most recent values.
|
| 71 |
+
"""
|
| 72 |
+
if periods is None and lookback is not None:
|
| 73 |
+
periods = [lookback]
|
| 74 |
+
return hi.get_ema(name=name, periods=periods, interval=interval, limit=limit, testnet=False)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
@mcp.tool()
|
| 78 |
+
async def macd(
|
| 79 |
+
name: str,
|
| 80 |
+
interval: Interval = "1h",
|
| 81 |
+
fast: int = 12,
|
| 82 |
+
slow: int = 26,
|
| 83 |
+
signal: int = 9,
|
| 84 |
+
limit: int = 600,
|
| 85 |
+
) -> Dict[str, Any]:
|
| 86 |
+
"""
|
| 87 |
+
Compute the Moving Average Convergence Divergence (MACD).
|
| 88 |
+
|
| 89 |
+
Args:
|
| 90 |
+
name: Coin name (e.g. "BTC").
|
| 91 |
+
interval: Candle interval.
|
| 92 |
+
fast: Period for the fast EMA (default: 12).
|
| 93 |
+
slow: Period for the slow EMA (default: 26).
|
| 94 |
+
signal: Period for the MACD signal line (default: 9).
|
| 95 |
+
limit: Number of candles to fetch.
|
| 96 |
+
|
| 97 |
+
Returns:
|
| 98 |
+
A dictionary with MACD line, signal line, histogram, and last computed values.
|
| 99 |
+
"""
|
| 100 |
+
return hi.get_macd(name=name, fast=fast, slow=slow, signal=signal, interval=interval, limit=limit, testnet=False)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
@mcp.tool()
|
| 104 |
+
async def stoch_rsi(
|
| 105 |
+
name: str,
|
| 106 |
+
interval: Interval = "1h",
|
| 107 |
+
rsi_length: int = 14,
|
| 108 |
+
stoch_length: int = 14,
|
| 109 |
+
k_smooth: int = 3,
|
| 110 |
+
d_smooth: int = 3,
|
| 111 |
+
limit: int = 600,
|
| 112 |
+
) -> Dict[str, Any]:
|
| 113 |
+
"""
|
| 114 |
+
Compute the Stochastic RSI oscillator (%K and %D).
|
| 115 |
+
|
| 116 |
+
Args:
|
| 117 |
+
name: Coin name.
|
| 118 |
+
interval: Candle interval.
|
| 119 |
+
rsi_length: Period for RSI computation (default: 14).
|
| 120 |
+
stoch_length: Period for Stochastic window (default: 14).
|
| 121 |
+
k_smooth: Smoothing factor for %K (default: 3).
|
| 122 |
+
d_smooth: Smoothing factor for %D (default: 3).
|
| 123 |
+
limit: Number of candles to fetch.
|
| 124 |
+
|
| 125 |
+
Returns:
|
| 126 |
+
A dictionary containing %K, %D, and the raw StochRSI values.
|
| 127 |
+
"""
|
| 128 |
+
return hi.get_stoch_rsi(
|
| 129 |
+
name=name,
|
| 130 |
+
rsi_length=rsi_length,
|
| 131 |
+
stoch_length=stoch_length,
|
| 132 |
+
k_smooth=k_smooth,
|
| 133 |
+
d_smooth=d_smooth,
|
| 134 |
+
interval=interval,
|
| 135 |
+
limit=limit,
|
| 136 |
+
testnet=False,
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
@mcp.tool()
|
| 141 |
+
async def adl(name: str, interval: Interval = "1h", limit: int = 600) -> Dict[str, Any]:
|
| 142 |
+
"""
|
| 143 |
+
Compute the Accumulation/Distribution Line (ADL).
|
| 144 |
+
|
| 145 |
+
Args:
|
| 146 |
+
name: Coin name.
|
| 147 |
+
interval: Candle interval.
|
| 148 |
+
limit: Number of candles to fetch.
|
| 149 |
+
|
| 150 |
+
Returns:
|
| 151 |
+
A dictionary containing the ADL time series and the latest ADL value.
|
| 152 |
+
"""
|
| 153 |
+
return hi.get_adl(name=name, interval=interval, limit=limit, testnet=False)
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
@mcp.tool()
|
| 157 |
+
async def obv(name: str, interval: Interval = "1h", limit: int = 600) -> Dict[str, Any]:
|
| 158 |
+
"""
|
| 159 |
+
Compute the On-Balance Volume (OBV).
|
| 160 |
+
|
| 161 |
+
Args:
|
| 162 |
+
name: Coin name.
|
| 163 |
+
interval: Candle interval.
|
| 164 |
+
limit: Number of candles to fetch.
|
| 165 |
+
|
| 166 |
+
Returns:
|
| 167 |
+
OBV values accumulated over time and the latest OBV.
|
| 168 |
+
"""
|
| 169 |
+
return hi.get_obv(name=name, interval=interval, limit=limit, testnet=False)
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
@mcp.tool()
|
| 173 |
+
async def atr_adx(name: str, interval: Interval = "1h", period: int = 14, limit: int = 600) -> Dict[str, Any]:
|
| 174 |
+
"""
|
| 175 |
+
Compute volatility and directional indicators: ATR, +DI, -DI, and ADX.
|
| 176 |
+
|
| 177 |
+
Args:
|
| 178 |
+
name: Coin name.
|
| 179 |
+
interval: Candle interval.
|
| 180 |
+
period: Lookback for smoothing (default: 14).
|
| 181 |
+
limit: Number of candles to fetch.
|
| 182 |
+
|
| 183 |
+
Returns:
|
| 184 |
+
A dictionary with ATR, +DI, -DI, and ADX values.
|
| 185 |
+
"""
|
| 186 |
+
return hi.get_atr_adx(name=name, period=period, interval=interval, limit=limit, testnet=False)
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
@mcp.tool()
|
| 190 |
+
async def bbands(
|
| 191 |
+
name: str,
|
| 192 |
+
interval: Interval = "1h",
|
| 193 |
+
period: int = 20,
|
| 194 |
+
std_mult: float = 2.0,
|
| 195 |
+
limit: int = 600,
|
| 196 |
+
) -> Dict[str, Any]:
|
| 197 |
+
"""
|
| 198 |
+
Compute Bollinger Bands (basis, upper/lower bands, %b, bandwidth).
|
| 199 |
+
|
| 200 |
+
Args:
|
| 201 |
+
name: Coin name.
|
| 202 |
+
interval: Candle interval.
|
| 203 |
+
period: Window for SMA (default: 20).
|
| 204 |
+
std_mult: Standard deviation multiplier (default: 2.0).
|
| 205 |
+
limit: Number of candles to fetch.
|
| 206 |
+
|
| 207 |
+
Returns:
|
| 208 |
+
A dictionary with band series and the most recent band values.
|
| 209 |
+
"""
|
| 210 |
+
return hi.get_bbands(name=name, period=period, std_mult=std_mult, interval=interval, limit=limit, testnet=False)
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
@mcp.tool()
|
| 214 |
+
async def mfi(name: str, interval: Interval = "1h", period: int = 14, limit: int = 600) -> Dict[str, Any]:
|
| 215 |
+
"""
|
| 216 |
+
Compute the Money Flow Index (MFI), a volume-weighted momentum oscillator.
|
| 217 |
+
|
| 218 |
+
Args:
|
| 219 |
+
name: Coin name.
|
| 220 |
+
interval: Candle interval.
|
| 221 |
+
period: Rolling window (default: 14).
|
| 222 |
+
limit: Number of candles to fetch.
|
| 223 |
+
|
| 224 |
+
Returns:
|
| 225 |
+
A dictionary containing MFI series and the most recent value.
|
| 226 |
+
"""
|
| 227 |
+
return hi.get_mfi(name=name, period=period, interval=interval, limit=limit, testnet=False)
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
@mcp.tool()
|
| 231 |
+
async def vwap(name: str, interval: Interval = "1h", daily_reset: bool = False, limit: int = 600) -> Dict[str, Any]:
|
| 232 |
+
"""
|
| 233 |
+
Compute the Volume-Weighted Average Price (VWAP).
|
| 234 |
+
|
| 235 |
+
Args:
|
| 236 |
+
name: Coin name.
|
| 237 |
+
interval: Candle interval.
|
| 238 |
+
daily_reset: If True, VWAP resets each trading day.
|
| 239 |
+
limit: Number of candles to fetch.
|
| 240 |
+
|
| 241 |
+
Returns:
|
| 242 |
+
VWAP time series and the last computed VWAP value.
|
| 243 |
+
"""
|
| 244 |
+
return hi.get_vwap(name=name, daily_reset=daily_reset, interval=interval, limit=limit, testnet=False)
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
@mcp.tool()
|
| 248 |
+
async def volume(name: str, interval: Interval = "1h", limit: int = 600) -> Dict[str, Any]:
|
| 249 |
+
"""
|
| 250 |
+
Retrieve the raw trading volume per candle.
|
| 251 |
+
|
| 252 |
+
Args:
|
| 253 |
+
name: Coin name.
|
| 254 |
+
interval: Candle interval.
|
| 255 |
+
limit: Number of candles to fetch.
|
| 256 |
+
|
| 257 |
+
Returns:
|
| 258 |
+
Volume values for each candle and the latest volume.
|
| 259 |
+
"""
|
| 260 |
+
return hi.get_volume(name=name, interval=interval, limit=limit, testnet=False)
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
@mcp.tool()
|
| 264 |
+
async def bundle(
|
| 265 |
+
name: str,
|
| 266 |
+
interval: Interval = "1h",
|
| 267 |
+
limit: int = 600,
|
| 268 |
+
include: Optional[List[str]] = None,
|
| 269 |
+
ema_periods: Optional[List[int]] = None,
|
| 270 |
+
macd_fast: int = 12,
|
| 271 |
+
macd_slow: int = 26,
|
| 272 |
+
macd_signal: int = 9,
|
| 273 |
+
stoch_rsi_len: int = 14,
|
| 274 |
+
stoch_len: int = 14,
|
| 275 |
+
k_smooth: int = 3,
|
| 276 |
+
d_smooth: int = 3,
|
| 277 |
+
bb_period: int = 20,
|
| 278 |
+
bb_std: float = 2.0,
|
| 279 |
+
mfi_period: int = 14,
|
| 280 |
+
vwap_daily_reset: bool = False,
|
| 281 |
+
) -> Dict[str, Any]:
|
| 282 |
+
"""
|
| 283 |
+
Compute multiple indicators in a single request.
|
| 284 |
+
|
| 285 |
+
Args:
|
| 286 |
+
name: Coin name.
|
| 287 |
+
interval: Candle interval.
|
| 288 |
+
limit: Number of candles to fetch.
|
| 289 |
+
include: List of indicators to include. Default includes all:
|
| 290 |
+
["ema","macd","stoch_rsi","adl","obv","atr_adx","bbands","mfi","vwap","volume"]
|
| 291 |
+
ema_periods: EMA periods (default: [20, 200]).
|
| 292 |
+
macd_fast / macd_slow / macd_signal: MACD configuration.
|
| 293 |
+
stoch_rsi_len / stoch_len / k_smooth / d_smooth: StochRSI configuration.
|
| 294 |
+
bb_period / bb_std: Bollinger Band configuration.
|
| 295 |
+
mfi_period: Money Flow Index lookback.
|
| 296 |
+
vwap_daily_reset: Whether VWAP resets daily.
|
| 297 |
+
|
| 298 |
+
Returns:
|
| 299 |
+
A combined dictionary with all requested indicators.
|
| 300 |
+
"""
|
| 301 |
+
return hi.get_bundle(
|
| 302 |
+
name=name,
|
| 303 |
+
interval=interval,
|
| 304 |
+
limit=limit,
|
| 305 |
+
testnet=False,
|
| 306 |
+
include=include or ("ema","macd","stoch_rsi","adl","obv","atr_adx","bbands","mfi","vwap","volume"),
|
| 307 |
+
ema_periods=ema_periods,
|
| 308 |
+
macd_fast=macd_fast,
|
| 309 |
+
macd_slow=macd_slow,
|
| 310 |
+
macd_signal=macd_signal,
|
| 311 |
+
stoch_rsi_len=stoch_rsi_len,
|
| 312 |
+
stoch_len=stoch_len,
|
| 313 |
+
k_smooth=k_smooth,
|
| 314 |
+
d_smooth=d_smooth,
|
| 315 |
+
bb_period=bb_period,
|
| 316 |
+
bb_std=bb_std,
|
| 317 |
+
mfi_period=mfi_period,
|
| 318 |
+
vwap_daily_reset=vwap_daily_reset,
|
| 319 |
+
)
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
# ------------------ Entry point ------------------ #
|
| 323 |
+
|
| 324 |
+
if __name__ == "__main__":
|
| 325 |
+
mcp.run(transport='stdio')
|
hype_accounts.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# uv pip install hyperliquid-python-sdk pandas numpy python-dotenv
|
| 2 |
+
import os
|
| 3 |
+
import json
|
| 4 |
+
import eth_account
|
| 5 |
+
import numpy as np
|
| 6 |
+
import pandas as pd
|
| 7 |
+
from eth_account.signers.local import LocalAccount
|
| 8 |
+
|
| 9 |
+
from hyperliquid.exchange import Exchange
|
| 10 |
+
from hyperliquid.info import Info
|
| 11 |
+
from hyperliquid.utils import constants
|
| 12 |
+
|
| 13 |
+
from dotenv import load_dotenv
|
| 14 |
+
from datetime import datetime, timezone
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# Load environment variables
|
| 18 |
+
load_dotenv(override=True)
|
| 19 |
+
|
| 20 |
+
# https://github.com/hyperliquid-dex/hyperliquid-python-sdk/blob/master/examples/example_utils.py
|
| 21 |
+
def setup(base_url=constants.MAINNET_API_URL, skip_ws=False, perp_dexs=None):
|
| 22 |
+
account: LocalAccount = eth_account.Account.from_key(os.getenv("HYPERLIQUID_PRIVATE_KEY"))
|
| 23 |
+
address = os.getenv("HYPERLIQUID_ACCOUNT_ADDRESS") # config["account_address"]
|
| 24 |
+
if address == "":
|
| 25 |
+
address = account.address
|
| 26 |
+
print("Running with account address:", address)
|
| 27 |
+
if address != account.address:
|
| 28 |
+
print("Running with agent address:", account.address)
|
| 29 |
+
info = Info(base_url, skip_ws, perp_dexs=perp_dexs)
|
| 30 |
+
user_state = info.user_state(address)
|
| 31 |
+
spot_user_state = info.spot_user_state(address)
|
| 32 |
+
margin_summary = user_state["marginSummary"]
|
| 33 |
+
if float(margin_summary["accountValue"]) == 0 and len(spot_user_state["balances"]) == 0:
|
| 34 |
+
print("Not running the example because the provided account has no equity.")
|
| 35 |
+
url = info.base_url.split(".", 1)[1]
|
| 36 |
+
error_string = f"No accountValue:\nIf you think this is a mistake, make sure that {address} has a balance on {url}.\nIf address shown is your API wallet address, update the config to specify the address of your account, not the address of the API wallet."
|
| 37 |
+
raise Exception(error_string)
|
| 38 |
+
exchange = Exchange(account, base_url, account_address=address, perp_dexs=perp_dexs)
|
| 39 |
+
return address, info, exchange
|
| 40 |
+
|
| 41 |
+
def long_short_shares_mkt_price(symbol, value, leverage, is_buy) -> str:
|
| 42 |
+
address, info, exchange = setup()
|
| 43 |
+
end_time = int(datetime.now(timezone.utc).timestamp() * 1000) # Current time in ms
|
| 44 |
+
start_time = int(datetime.now(timezone.utc).timestamp() * 1000) # Current time in ms
|
| 45 |
+
close_price = float(info.candles_snapshot(symbol, "1m", start_time, end_time)[0]['c'])
|
| 46 |
+
coins_metadata_df = pd.DataFrame(info.meta()['universe'])
|
| 47 |
+
rounding_decimals = coins_metadata_df[coins_metadata_df['name']==symbol]['szDecimals'].values[0]
|
| 48 |
+
quantity = np.round(float(value)/float(close_price),rounding_decimals)
|
| 49 |
+
print(quantity)
|
| 50 |
+
mkt_order_result = exchange.market_open(
|
| 51 |
+
name=symbol,
|
| 52 |
+
is_buy=is_buy,
|
| 53 |
+
sz=quantity,
|
| 54 |
+
px=None,
|
| 55 |
+
slippage=0.01
|
| 56 |
+
)
|
| 57 |
+
is_cross = True
|
| 58 |
+
user_state = info.user_state(address)
|
| 59 |
+
for asset_position in user_state["assetPositions"]:
|
| 60 |
+
if asset_position["position"]["coin"] == symbol:
|
| 61 |
+
print(f"Current leverage for {symbol}:", json.dumps(asset_position["position"]["leverage"], indent=2))
|
| 62 |
+
leverage_position = exchange.update_leverage(leverage, symbol, is_cross)
|
| 63 |
+
return {'mkt_order_result': mkt_order_result, 'leverage_position': leverage_position}
|
hype_accounts_server.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from mcp.server.fastmcp import FastMCP
|
| 2 |
+
from hype_accounts import setup, long_short_shares_mkt_price
|
| 3 |
+
|
| 4 |
+
mcp = FastMCP("hype_accounts_server")
|
| 5 |
+
|
| 6 |
+
@mcp.tool()
|
| 7 |
+
async def get_account_details() -> dict:
|
| 8 |
+
"""Get cash balance, cryptocurrency holdings and PNL of hyperliquid account.
|
| 9 |
+
"""
|
| 10 |
+
account_details_dict = {
|
| 11 |
+
"holdings": None,
|
| 12 |
+
"cash_balance": None,
|
| 13 |
+
"profit_and_loss": None
|
| 14 |
+
}
|
| 15 |
+
address, info, exchange = setup()
|
| 16 |
+
user_state = info.user_state(address)
|
| 17 |
+
account_details_dict["cash_balance"] = user_state['marginSummary']['accountValue']
|
| 18 |
+
if user_state['assetPositions'] == []:
|
| 19 |
+
account_details_dict["holdings"] = "No holdings"
|
| 20 |
+
else:
|
| 21 |
+
account_details_dict["holdings"] = user_state['assetPositions']
|
| 22 |
+
account_details_dict["profit_and_loss"] = exchange.info.portfolio(address)[3][1]['pnlHistory'][-1][1]
|
| 23 |
+
|
| 24 |
+
return account_details_dict
|
| 25 |
+
|
| 26 |
+
@mcp.tool()
|
| 27 |
+
async def long_perps_mkt_price(symbol: str, value:float, leverage:int) -> str:
|
| 28 |
+
"""Create a long position for a cryptocurrency perpetual contract at market price.
|
| 29 |
+
|
| 30 |
+
Args:
|
| 31 |
+
symbol: The symbol of the cryptocurrency (e.g., "HYPE","ETC","BTC","XRP").
|
| 32 |
+
value: The USD value to long.
|
| 33 |
+
leverage: The leverage to use for the position.
|
| 34 |
+
"""
|
| 35 |
+
return str(long_short_shares_mkt_price(symbol, value, leverage, is_buy=True))
|
| 36 |
+
|
| 37 |
+
@mcp.tool()
|
| 38 |
+
async def short_perps_mkt_price(symbol: str, value:float, leverage:int) -> str:
|
| 39 |
+
"""Create a short position for a cryptocurrency perpetual contract at market price.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
symbol: The symbol of the cryptocurrency (e.g., "HYPE","ETC","BTC").
|
| 43 |
+
value: The USD value to short.
|
| 44 |
+
leverage: The leverage to use for the position.
|
| 45 |
+
"""
|
| 46 |
+
return str(long_short_shares_mkt_price(symbol, value, leverage, is_buy=False))
|
| 47 |
+
|
| 48 |
+
@mcp.tool()
|
| 49 |
+
async def close_perps_mkt_price(symbol: str) -> str:
|
| 50 |
+
"""Close the position for a cryptocurrency perpetual contract at market price.
|
| 51 |
+
|
| 52 |
+
Args:
|
| 53 |
+
symbol: The symbol of the cryptocurrency (e.g., "HYPE","ETC","BTC").
|
| 54 |
+
"""
|
| 55 |
+
_, _, exchange = setup()
|
| 56 |
+
return str(exchange.market_close(symbol))
|
| 57 |
+
|
| 58 |
+
if __name__ == "__main__":
|
| 59 |
+
mcp.run(transport='stdio')
|
memories.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
memory_utils.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# memory_utils.py
|
| 2 |
+
import json
|
| 3 |
+
import pandas as pd
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from typing import Optional, List, Dict, Any
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
MEMORY_FILE = Path("memories.json")
|
| 10 |
+
|
| 11 |
+
def load_memories(n: int = 3) -> list[str]:
|
| 12 |
+
"""
|
| 13 |
+
Load the last n summarized memories from the JSONL file.
|
| 14 |
+
Returns a list of summary strings.
|
| 15 |
+
"""
|
| 16 |
+
if not MEMORY_FILE.exists():
|
| 17 |
+
return []
|
| 18 |
+
with open(MEMORY_FILE, "r", encoding="utf-8") as f:
|
| 19 |
+
lines = [json.loads(l) for l in f if l.strip()]
|
| 20 |
+
return [m["summary"] for m in lines[-n:]]
|
| 21 |
+
|
| 22 |
+
def save_memory(summary: str):
|
| 23 |
+
"""
|
| 24 |
+
Append a new memory summary to the JSONL file.
|
| 25 |
+
Each line is a JSON object: {"timestamp": "...", "summary": "..."}
|
| 26 |
+
"""
|
| 27 |
+
entry = {"timestamp": datetime.now().isoformat(), "summary": summary}
|
| 28 |
+
with open(MEMORY_FILE, "a", encoding="utf-8") as f:
|
| 29 |
+
f.write(json.dumps(entry) + "\n")
|
| 30 |
+
|
| 31 |
+
def load_memories_df(n: Optional[int] = None):
|
| 32 |
+
"""
|
| 33 |
+
Return recent memories as a pandas DataFrame (newest first).
|
| 34 |
+
"""
|
| 35 |
+
with open("memories.json", "r", encoding="utf-8") as f:
|
| 36 |
+
lines = [json.loads(line) for line in f if line.strip()]
|
| 37 |
+
df = pd.DataFrame(lines)
|
| 38 |
+
|
| 39 |
+
return df.tail(n).iloc[::-1] if n else df.iloc[::-1]
|
requirements.txt
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio
|
| 2 |
+
pandas
|
| 3 |
+
numpy
|
| 4 |
+
python-dotenv
|
| 5 |
+
openai
|
| 6 |
+
openai-agents
|
| 7 |
+
fastmcp
|
| 8 |
+
hyperliquid-python-sdk
|
| 9 |
+
eth-account
|
| 10 |
+
feedparser
|
| 11 |
+
snscrape
|