Create MLCryptoForecasterAllAssetsTPSL_ParisTime_02.py
Browse filesImprovement in predicted data :
=== BTCUSDT (4h) ===
Classification report for BTCUSDT:
precision recall f1-score support
-1 0.65 0.54 0.59 287
0 0.85 0.90 0.87 462
1 0.91 0.93 0.92 726
accuracy 0.85 1475
macro avg 0.80 0.79 0.80 1475
weighted avg 0.84 0.85 0.84 1475
Time: 2025-04-29 14:00:00, Price: 94846.5000, Prediction: Uptrend
UP Price Target: 100537.2900 (+6.0%)
DN Price Target: 90104.1750 (-5.0%)
Optimal UP TP/SL: +6.0% / -9.0%
Optimal DN TP/SL: +5.0% / -9.0%
Avg. Time to TP: 54.2 hours
MLCryptoForecasterAllAssetsTPSL_ParisTime_02.py
ADDED
|
@@ -0,0 +1,227 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import numpy as np
|
| 4 |
+
import argparse
|
| 5 |
+
from datetime import timedelta
|
| 6 |
+
from binance.client import Client
|
| 7 |
+
from sklearn.model_selection import train_test_split
|
| 8 |
+
from sklearn.ensemble import RandomForestClassifier
|
| 9 |
+
from sklearn.metrics import classification_report
|
| 10 |
+
import ta
|
| 11 |
+
import pytz
|
| 12 |
+
|
| 13 |
+
# Parse command-line arguments for timeframe
|
| 14 |
+
parser = argparse.ArgumentParser(description="Binance Trend Forecaster with adjustable timeframe")
|
| 15 |
+
parser.add_argument("--interval", type=str, default="4h",
|
| 16 |
+
choices=["1m","3m","5m","15m","30m","1h","4h","1d"],
|
| 17 |
+
help="Time interval for klines (e.g. '1h', '4h', '1d')")
|
| 18 |
+
args = parser.parse_args()
|
| 19 |
+
|
| 20 |
+
# Map user-friendly intervals to Binance API constants
|
| 21 |
+
interval_map = {
|
| 22 |
+
"1m": Client.KLINE_INTERVAL_1MINUTE,
|
| 23 |
+
"3m": Client.KLINE_INTERVAL_3MINUTE,
|
| 24 |
+
"5m": Client.KLINE_INTERVAL_5MINUTE,
|
| 25 |
+
"15m": Client.KLINE_INTERVAL_15MINUTE,
|
| 26 |
+
"30m": Client.KLINE_INTERVAL_30MINUTE,
|
| 27 |
+
"1h": Client.KLINE_INTERVAL_1HOUR,
|
| 28 |
+
"4h": Client.KLINE_INTERVAL_4HOUR,
|
| 29 |
+
"1d": Client.KLINE_INTERVAL_1DAY
|
| 30 |
+
}
|
| 31 |
+
interval = interval_map[args.interval]
|
| 32 |
+
|
| 33 |
+
# Function to log results to both console and file
|
| 34 |
+
def log_results(message, filename="predictions_results.txt"):
|
| 35 |
+
print(message)
|
| 36 |
+
with open(filename, "a") as f:
|
| 37 |
+
f.write(message + "\n")
|
| 38 |
+
|
| 39 |
+
# Convert UTC timestamp to Europe/Paris timezone
|
| 40 |
+
def convert_to_paris_time(utc_time):
|
| 41 |
+
paris_tz = pytz.timezone('Europe/Paris')
|
| 42 |
+
utc_time = utc_time.replace(tzinfo=pytz.utc)
|
| 43 |
+
paris_time = utc_time.astimezone(paris_tz)
|
| 44 |
+
return paris_time.strftime('%Y-%m-%d %H:%M:%S')
|
| 45 |
+
|
| 46 |
+
# Initialize Binance client
|
| 47 |
+
client = Client()
|
| 48 |
+
|
| 49 |
+
# Settings
|
| 50 |
+
result_file = f"predictions_results_{args.interval}.txt"
|
| 51 |
+
|
| 52 |
+
# Delete the results file if it exists for a fresh start
|
| 53 |
+
if os.path.exists(result_file):
|
| 54 |
+
os.remove(result_file)
|
| 55 |
+
|
| 56 |
+
# Initialize result file header
|
| 57 |
+
with open(result_file, "w") as f:
|
| 58 |
+
f.write("Asset,Time,Price,Prediction,UP_Price_Target,DN_Price_Target,UP_TP%,UP_SL%,DN_TP%,DN_SL%,Avg_Time_To_TP(h)\n")
|
| 59 |
+
|
| 60 |
+
# Get USDT-quoted trading symbols
|
| 61 |
+
symbols = [s['symbol'] for s in client.get_exchange_info()['symbols']
|
| 62 |
+
if s['status']=='TRADING' and s['quoteAsset']=='USDT']
|
| 63 |
+
|
| 64 |
+
# Optimize take-profit / stop-loss function
|
| 65 |
+
def optimize_tp_sl(df, signals, side, pgrid, lgrid):
|
| 66 |
+
best = (0, 0, -np.inf)
|
| 67 |
+
prices = df['close'].values
|
| 68 |
+
idxs = np.where(signals == side)[0]
|
| 69 |
+
for tp in pgrid:
|
| 70 |
+
for sl in lgrid:
|
| 71 |
+
rets = []
|
| 72 |
+
for i in idxs:
|
| 73 |
+
entry = prices[i]
|
| 74 |
+
for j in range(i+1, min(i+11, len(prices))):
|
| 75 |
+
ret = (prices[j] - entry) / entry if side == 1 else (entry - prices[j]) / entry
|
| 76 |
+
if ret >= tp or ret <= -sl:
|
| 77 |
+
rets.append(np.sign(ret) * min(abs(ret), max(tp, sl)))
|
| 78 |
+
break
|
| 79 |
+
if rets:
|
| 80 |
+
avg_ret = np.mean(rets)
|
| 81 |
+
if avg_ret > best[2]:
|
| 82 |
+
best = (tp, sl, avg_ret)
|
| 83 |
+
return best
|
| 84 |
+
|
| 85 |
+
def calculate_time_to_threshold(df, threshold=0.01, lookahead_bars=24):
|
| 86 |
+
"""
|
| 87 |
+
Calculate how long it takes to cross a price change threshold.
|
| 88 |
+
Returns time in hours.
|
| 89 |
+
"""
|
| 90 |
+
n = len(df)
|
| 91 |
+
times = np.full(n, np.nan)
|
| 92 |
+
minutes_per_bar = (df.index[1] - df.index[0]).total_seconds() / 60
|
| 93 |
+
|
| 94 |
+
for i in range(n):
|
| 95 |
+
entry = df['close'].iat[i]
|
| 96 |
+
target = entry * (1 + threshold) # For long positions
|
| 97 |
+
|
| 98 |
+
for k in range(1, lookahead_bars + 1):
|
| 99 |
+
j = i + k
|
| 100 |
+
if j >= n:
|
| 101 |
+
break
|
| 102 |
+
if df['close'].iat[j] >= target:
|
| 103 |
+
times[i] = k * minutes_per_bar / 60 # Convert to hours
|
| 104 |
+
break
|
| 105 |
+
|
| 106 |
+
return times
|
| 107 |
+
|
| 108 |
+
# Main loop: process each symbol
|
| 109 |
+
for symbol in symbols:
|
| 110 |
+
try:
|
| 111 |
+
log_results(f"\n=== {symbol} ({args.interval}) ===", result_file)
|
| 112 |
+
|
| 113 |
+
# Load or download historical data
|
| 114 |
+
data_file = f"{symbol}_data_{args.interval}_full.csv"
|
| 115 |
+
if os.path.exists(data_file):
|
| 116 |
+
df = pd.read_csv(data_file, index_col=0, parse_dates=True)
|
| 117 |
+
last_ts = df.index[-1]
|
| 118 |
+
start = (last_ts + timedelta(**{
|
| 119 |
+
'minutes':1 if args.interval=='1m' else 3 if args.interval=='3m' else 5 if args.interval=='5m' else 15 if args.interval=='15m' else 30 if args.interval=='30m' else 60 if args.interval=='1h' else 240 if args.interval=='4h' else 1440
|
| 120 |
+
})).strftime("%d %B %Y %H:%M:%S")
|
| 121 |
+
new = client.get_historical_klines(symbol, interval, start)
|
| 122 |
+
if new:
|
| 123 |
+
new_df = pd.DataFrame(new, columns=[
|
| 124 |
+
'timestamp','open','high','low','close','volume',
|
| 125 |
+
'close_time','quote_av','trades','tb_base_av','tb_quote_av','ignore'
|
| 126 |
+
])
|
| 127 |
+
new_df = new_df[['timestamp','open','high','low','close','volume']].astype(float)
|
| 128 |
+
new_df['timestamp'] = pd.to_datetime(new_df['timestamp'], unit='ms')
|
| 129 |
+
new_df.set_index('timestamp', inplace=True)
|
| 130 |
+
df = pd.concat([df, new_df]).drop_duplicates()
|
| 131 |
+
df.to_csv(data_file)
|
| 132 |
+
else:
|
| 133 |
+
klines = client.get_historical_klines(symbol, interval, "01 December 2021")
|
| 134 |
+
df = pd.DataFrame(klines, columns=[
|
| 135 |
+
'timestamp','open','high','low','close','volume',
|
| 136 |
+
'close_time','quote_av','trades','tb_base_av','tb_quote_av','ignore'
|
| 137 |
+
])
|
| 138 |
+
df = df[['timestamp','open','high','low','close','volume']].astype(float)
|
| 139 |
+
df['timestamp'] = pd.to_datetime(df['timestamp'], unit='ms')
|
| 140 |
+
df.set_index('timestamp', inplace=True)
|
| 141 |
+
df.to_csv(data_file)
|
| 142 |
+
|
| 143 |
+
# Compute technical indicators
|
| 144 |
+
df['rsi'] = ta.momentum.RSIIndicator(df['close'], window=14).rsi()
|
| 145 |
+
df['macd'] = ta.trend.MACD(df['close']).macd()
|
| 146 |
+
for s in [10, 20, 50, 100]:
|
| 147 |
+
df[f'ema_{s}'] = df['close'].ewm(span=s).mean()
|
| 148 |
+
for w in [10, 20, 50, 100]:
|
| 149 |
+
df[f'sma_{w}'] = df['close'].rolling(window=w).mean()
|
| 150 |
+
bb = ta.volatility.BollingerBands(df['close'], window=20, window_dev=2)
|
| 151 |
+
df['bbw'] = (bb.bollinger_hband() - bb.bollinger_lband()) / bb.bollinger_mavg()
|
| 152 |
+
df['atr'] = ta.volatility.AverageTrueRange(df['high'], df['low'], df['close'], window=14).average_true_range()
|
| 153 |
+
df['adx'] = ta.trend.ADXIndicator(df['high'], df['low'], df['close'], window=14).adx()
|
| 154 |
+
st = ta.momentum.StochasticOscillator(df['high'], df['low'], df['close'], window=14)
|
| 155 |
+
df['st_k'] = st.stoch()
|
| 156 |
+
df['st_d'] = st.stoch_signal()
|
| 157 |
+
df['wr'] = ta.momentum.WilliamsRIndicator(df['high'], df['low'], df['close'], lbp=14).williams_r()
|
| 158 |
+
df['cci'] = ta.trend.CCIIndicator(df['high'], df['low'], df['close'], window=20).cci()
|
| 159 |
+
df['mom'] = df['close'] - df['close'].shift(10)
|
| 160 |
+
ichi = ta.trend.IchimokuIndicator(df['high'], df['low'], window1=9, window2=26, window3=52)
|
| 161 |
+
df['span_a'] = ichi.ichimoku_a()
|
| 162 |
+
df['span_b'] = ichi.ichimoku_b()
|
| 163 |
+
df.dropna(inplace=True)
|
| 164 |
+
|
| 165 |
+
# Label signals based on Ichimoku cloud
|
| 166 |
+
df['signal'] = np.select([
|
| 167 |
+
(df['close'] > df['span_a']) & (df['close'] > df['span_b']),
|
| 168 |
+
(df['close'] < df['span_a']) & (df['close'] < df['span_b'])
|
| 169 |
+
], [1, 0], default=-1)
|
| 170 |
+
|
| 171 |
+
# Train/test split
|
| 172 |
+
features = [c for c in df.columns if c not in ['open','high','low','close','volume','signal']]
|
| 173 |
+
X, y = df[features], df['signal']
|
| 174 |
+
Xtr, Xte, ytr, yte = train_test_split(X, y, test_size=0.2, shuffle=False)
|
| 175 |
+
model = RandomForestClassifier(n_estimators=200, class_weight='balanced', random_state=42)
|
| 176 |
+
model.fit(Xtr, ytr)
|
| 177 |
+
ypr = model.predict(Xte)
|
| 178 |
+
|
| 179 |
+
# Log classification report
|
| 180 |
+
report = classification_report(yte, ypr, zero_division=0)
|
| 181 |
+
log_results(f"Classification report for {symbol}:\n{report}", result_file)
|
| 182 |
+
|
| 183 |
+
# Predict latest trend
|
| 184 |
+
latest_df = X.iloc[-1:]
|
| 185 |
+
trend_label = model.predict(latest_df)[0]
|
| 186 |
+
|
| 187 |
+
# Convert timestamp to Paris time and fetch price
|
| 188 |
+
pred_time_utc = df.index[-1]
|
| 189 |
+
pred_time = convert_to_paris_time(pred_time_utc)
|
| 190 |
+
current_price = df['close'].iloc[-1]
|
| 191 |
+
trend_str = {1:'Uptrend', 0:'Downtrend', -1:'Neutral'}[trend_label]
|
| 192 |
+
|
| 193 |
+
# Optimize TP/SL
|
| 194 |
+
hist_sign = model.predict(X)
|
| 195 |
+
pgrid = np.arange(0.01, 0.1, 0.01)
|
| 196 |
+
lgrid = np.arange(0.01, 0.1, 0.01)
|
| 197 |
+
up_tp, up_sl, _ = optimize_tp_sl(df, hist_sign, 1, pgrid, lgrid)
|
| 198 |
+
dn_tp, dn_sl, _ = optimize_tp_sl(df, hist_sign, 0, pgrid, lgrid)
|
| 199 |
+
|
| 200 |
+
# Calculate predicted price targets
|
| 201 |
+
predicted_up_price = current_price * (1 + up_tp)
|
| 202 |
+
predicted_dn_price = current_price * (1 - dn_tp)
|
| 203 |
+
|
| 204 |
+
# Estimate time to reach TP (long positions only)
|
| 205 |
+
time_to_tp = calculate_time_to_threshold(df, threshold=up_tp, lookahead_bars=24)
|
| 206 |
+
avg_time_to_tp = np.nanmean(time_to_tp) # Average in hours
|
| 207 |
+
|
| 208 |
+
# Log results
|
| 209 |
+
log_results(f"Time: {pred_time}, Price: {current_price:.4f}, Prediction: {trend_str}", result_file)
|
| 210 |
+
log_results(f"UP Price Target: {predicted_up_price:.4f} (+{up_tp*100:.1f}%)", result_file)
|
| 211 |
+
log_results(f"DN Price Target: {predicted_dn_price:.4f} (-{dn_tp*100:.1f}%)", result_file)
|
| 212 |
+
log_results(f"Optimal UP TP/SL: +{up_tp*100:.1f}% / -{up_sl*100:.1f}%", result_file)
|
| 213 |
+
log_results(f"Optimal DN TP/SL: +{dn_tp*100:.1f}% / -{dn_sl*100:.1f}%", result_file)
|
| 214 |
+
log_results(f"Avg. Time to TP: {avg_time_to_tp:.1f} hours", result_file)
|
| 215 |
+
|
| 216 |
+
# Write CSV line
|
| 217 |
+
with open(result_file, "a") as f:
|
| 218 |
+
f.write(f"{symbol},{pred_time},{current_price:.4f},{trend_str},"
|
| 219 |
+
f"{predicted_up_price:.4f},{predicted_dn_price:.4f},"
|
| 220 |
+
f"{up_tp*100:.1f},{up_sl*100:.1f},{dn_tp*100:.1f},{dn_sl*100:.1f},"
|
| 221 |
+
f"{avg_time_to_tp:.1f}\n")
|
| 222 |
+
|
| 223 |
+
except Exception as e:
|
| 224 |
+
log_results(f"Error processing {symbol}: {str(e)}", result_file)
|
| 225 |
+
|
| 226 |
+
# End of processing
|
| 227 |
+
log_results("\nAll assets processed.", result_file)
|