Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import sys
|
| 3 |
+
import json
|
| 4 |
+
from typing import Optional, List
|
| 5 |
+
|
| 6 |
+
import pandas as pd
|
| 7 |
+
|
| 8 |
+
from config import DEFAULT_SYMBOLS, TOP_N_TOKENS, TIMEFRAME, CANDLE_LIMIT
|
| 9 |
+
from data_fetcher import fetch_multiple, fetch_instruments
|
| 10 |
+
from regime import detect_regime
|
| 11 |
+
from volume_analysis import analyze_volume
|
| 12 |
+
from risk_engine import evaluate_risk
|
| 13 |
+
from veto import apply_veto
|
| 14 |
+
from scorer import compute_structure_score, score_token, rank_tokens
|
| 15 |
+
|
| 16 |
+
logging.basicConfig(
|
| 17 |
+
level=logging.INFO,
|
| 18 |
+
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
|
| 19 |
+
handlers=[logging.StreamHandler(sys.stdout)],
|
| 20 |
+
)
|
| 21 |
+
logger = logging.getLogger("main")
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def analyze_symbol(symbol: str, df: pd.DataFrame, account_equity: float = 10000.0):
|
| 25 |
+
regime_data = detect_regime(df)
|
| 26 |
+
volume_data = analyze_volume(df)
|
| 27 |
+
|
| 28 |
+
structure_score = compute_structure_score(regime_data)
|
| 29 |
+
|
| 30 |
+
vetoed, veto_reason = apply_veto(regime_data, volume_data, structure_score)
|
| 31 |
+
|
| 32 |
+
scores = score_token(regime_data, volume_data, vetoed)
|
| 33 |
+
|
| 34 |
+
risk_data = evaluate_risk(
|
| 35 |
+
df_last_close=df["close"].iloc[-1],
|
| 36 |
+
atr=regime_data["atr"],
|
| 37 |
+
atr_pct=regime_data["atr_pct"],
|
| 38 |
+
regime_score=regime_data["regime_score"],
|
| 39 |
+
vol_ratio=regime_data["vol_ratio"],
|
| 40 |
+
account_equity=account_equity,
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
return {
|
| 44 |
+
"symbol": symbol,
|
| 45 |
+
"close": round(df["close"].iloc[-1], 8),
|
| 46 |
+
"trend": regime_data["trend"],
|
| 47 |
+
"vol_ratio": round(regime_data["vol_ratio"], 3),
|
| 48 |
+
"volatility_expanding": regime_data["volatility_expanding"],
|
| 49 |
+
"volume_spike": volume_data["spike"],
|
| 50 |
+
"volume_climax": volume_data["climax"],
|
| 51 |
+
"breakout": volume_data["breakout"],
|
| 52 |
+
"vetoed": vetoed,
|
| 53 |
+
"veto_reason": veto_reason,
|
| 54 |
+
"regime_score": scores["regime_score"],
|
| 55 |
+
"volume_score": scores["volume_score"],
|
| 56 |
+
"structure_score": scores["structure_score"],
|
| 57 |
+
"total_score": scores["total_score"],
|
| 58 |
+
"risk": risk_data,
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def run(
|
| 63 |
+
symbols: Optional[List[str]] = None,
|
| 64 |
+
account_equity: float = 10000.0,
|
| 65 |
+
fetch_live_instruments: bool = False,
|
| 66 |
+
max_symbols: Optional[int] = None,
|
| 67 |
+
top_n: int = TOP_N_TOKENS,
|
| 68 |
+
):
|
| 69 |
+
if fetch_live_instruments:
|
| 70 |
+
logger.info("Fetching live instrument list from OKX...")
|
| 71 |
+
live_symbols = fetch_instruments("SPOT")
|
| 72 |
+
if live_symbols:
|
| 73 |
+
symbols = live_symbols
|
| 74 |
+
logger.info(f"Found {len(symbols)} live USDT spot instruments")
|
| 75 |
+
else:
|
| 76 |
+
logger.warning("Failed to fetch instruments, using defaults")
|
| 77 |
+
symbols = DEFAULT_SYMBOLS
|
| 78 |
+
elif symbols is None:
|
| 79 |
+
symbols = DEFAULT_SYMBOLS
|
| 80 |
+
|
| 81 |
+
logger.info(f"Fetching OHLCV data for {len(symbols)} symbols...")
|
| 82 |
+
ohlcv_map = fetch_multiple(symbols, timeframe=TIMEFRAME, limit=CANDLE_LIMIT, max_symbols=max_symbols)
|
| 83 |
+
logger.info(f"Successfully fetched {len(ohlcv_map)} symbols")
|
| 84 |
+
|
| 85 |
+
results = {}
|
| 86 |
+
for sym, df in ohlcv_map.items():
|
| 87 |
+
try:
|
| 88 |
+
result = analyze_symbol(sym, df, account_equity=account_equity)
|
| 89 |
+
results[sym] = result
|
| 90 |
+
except Exception as e:
|
| 91 |
+
logger.error(f"Error analyzing {sym}: {e}", exc_info=True)
|
| 92 |
+
|
| 93 |
+
ranked = rank_tokens({sym: r for sym, r in results.items()})
|
| 94 |
+
|
| 95 |
+
logger.info("\n" + "=" * 80)
|
| 96 |
+
logger.info(f"TOP {top_n} SETUPS RANKED BY TOTAL SCORE")
|
| 97 |
+
logger.info("=" * 80)
|
| 98 |
+
|
| 99 |
+
top_results = []
|
| 100 |
+
for rank, (sym, data) in enumerate(ranked[:top_n], 1):
|
| 101 |
+
veto_str = f" [VETOED: {data['veto_reason']}]" if data["vetoed"] else ""
|
| 102 |
+
logger.info(
|
| 103 |
+
f"#{rank:>3} {sym:<15} "
|
| 104 |
+
f"Score={data['total_score']:.4f} "
|
| 105 |
+
f"(R={data['regime_score']:.2f} V={data['volume_score']:.2f} S={data['structure_score']:.2f}) "
|
| 106 |
+
f"Trend={data['trend']:<8} "
|
| 107 |
+
f"VolRatio={data['vol_ratio']:.2f} "
|
| 108 |
+
f"Spike={data['volume_spike']} "
|
| 109 |
+
f"Breakout={data['breakout']:>2}"
|
| 110 |
+
f"{veto_str}"
|
| 111 |
+
)
|
| 112 |
+
top_results.append(data)
|
| 113 |
+
|
| 114 |
+
logger.info("=" * 80)
|
| 115 |
+
|
| 116 |
+
non_vetoed = [(s, d) for s, d in ranked if not d["vetoed"]]
|
| 117 |
+
if non_vetoed:
|
| 118 |
+
best_sym, best_data = non_vetoed[0]
|
| 119 |
+
logger.info(f"\nBEST SETUP: {best_sym}")
|
| 120 |
+
risk = best_data["risk"]
|
| 121 |
+
logger.info(f" Entry: {risk['entry_price']:.8f}")
|
| 122 |
+
logger.info(f" Stop (Long): {risk['stop_price_long']:.8f}")
|
| 123 |
+
logger.info(f" Target (Long): {risk['target_long']:.8f}")
|
| 124 |
+
logger.info(f" ATR: {risk['atr']:.8f} ({risk['atr_pct']*100:.2f}%)")
|
| 125 |
+
logger.info(f" Risk Fraction: {risk['risk_fraction']*100:.2f}%")
|
| 126 |
+
logger.info(f" Position Size: ${risk['position_notional']:.2f} notional")
|
| 127 |
+
|
| 128 |
+
return {
|
| 129 |
+
"all_results": results,
|
| 130 |
+
"ranked": ranked,
|
| 131 |
+
"top_n": top_results,
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
if __name__ == "__main__":
|
| 136 |
+
import argparse
|
| 137 |
+
|
| 138 |
+
parser = argparse.ArgumentParser(description="OKX Trading Analysis Engine")
|
| 139 |
+
parser.add_argument("--equity", type=float, default=10000.0, help="Account equity in USD")
|
| 140 |
+
parser.add_argument("--top", type=int, default=TOP_N_TOKENS, help="Number of top setups to display")
|
| 141 |
+
parser.add_argument("--max-symbols", type=int, default=None, help="Limit number of symbols analyzed")
|
| 142 |
+
parser.add_argument("--live-instruments", action="store_true", help="Fetch live instrument list from OKX")
|
| 143 |
+
parser.add_argument("--output", type=str, default=None, help="Save results to JSON file")
|
| 144 |
+
args = parser.parse_args()
|
| 145 |
+
|
| 146 |
+
output = run(
|
| 147 |
+
account_equity=args.equity,
|
| 148 |
+
fetch_live_instruments=args.live_instruments,
|
| 149 |
+
max_symbols=args.max_symbols,
|
| 150 |
+
top_n=args.top,
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
if args.output:
|
| 154 |
+
serializable = {
|
| 155 |
+
sym: {k: v for k, v in data.items() if k != "risk"}
|
| 156 |
+
for sym, data in output["all_results"].items()
|
| 157 |
+
}
|
| 158 |
+
with open(args.output, "w") as f:
|
| 159 |
+
json.dump(serializable, f, indent=2, default=str)
|
| 160 |
+
logger.info(f"Results saved to {args.output}")
|