Spaces:
Running
Running
Initial commit for new space
Browse files- Dockerfile +6 -0
- README.md +3 -3
- app.py +805 -0
- csv_data/2015-2017.csv +0 -0
- csv_data/2017-2019.csv +0 -0
- csv_data/2019-2020.csv +0 -0
- csv_data/2020-2022.csv +0 -0
- csv_data/2022-2024.csv +0 -0
- csv_data/2024-NOW.csv +0 -0
- requirements.txt +6 -0
Dockerfile
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.11-slim
|
| 2 |
+
WORKDIR /app
|
| 3 |
+
COPY requirements.txt ./requirements.txt
|
| 4 |
+
RUN pip install --no-cache-dir --index-url https://pypi.org/simple/ -r requirements.txt
|
| 5 |
+
COPY . .
|
| 6 |
+
CMD ["streamlit", "run", "app.py", "--server.port=7860"]
|
README.md
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
---
|
| 2 |
-
title: Backtesting
|
| 3 |
-
emoji:
|
| 4 |
colorFrom: pink
|
| 5 |
-
colorTo:
|
| 6 |
sdk: docker
|
| 7 |
pinned: false
|
| 8 |
---
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Backtesting Sandbox
|
| 3 |
+
emoji: π
|
| 4 |
colorFrom: pink
|
| 5 |
+
colorTo: blue
|
| 6 |
sdk: docker
|
| 7 |
pinned: false
|
| 8 |
---
|
app.py
ADDED
|
@@ -0,0 +1,805 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import os
|
| 4 |
+
import numpy as np
|
| 5 |
+
from datetime import date
|
| 6 |
+
import plotly.graph_objects as go
|
| 7 |
+
import itertools
|
| 8 |
+
import json
|
| 9 |
+
import pandas_ta as ta
|
| 10 |
+
from multiprocessing import Pool, cpu_count
|
| 11 |
+
from functools import partial
|
| 12 |
+
|
| 13 |
+
# --- 0. Settings Management Functions ---
|
| 14 |
+
CONFIG_FILE = "config.json"
|
| 15 |
+
VETO_CONFIG_FILE = "veto_config.json"
|
| 16 |
+
TOP_SETUPS_FILE = "top_setups.json"
|
| 17 |
+
|
| 18 |
+
def save_settings(params_to_save):
|
| 19 |
+
with open(CONFIG_FILE, 'w') as f:
|
| 20 |
+
json.dump(params_to_save, f, indent=4)
|
| 21 |
+
st.sidebar.success("Settings saved as default!")
|
| 22 |
+
|
| 23 |
+
def load_settings():
|
| 24 |
+
default_structure = { "large_ma_period": 50, "bband_period": 20, "bband_std_dev": 2.0, "long_entry_threshold_pct": 0.0, "long_exit_ma_threshold_pct": 0.0, "long_stop_loss_pct": 0.0, "long_delay_days": 0, "short_entry_threshold_pct": 0.0, "short_exit_ma_threshold_pct": 0.0, "short_stop_loss_pct": 0.0, "short_delay_days": 0, "confidence_threshold": 50 }
|
| 25 |
+
if os.path.exists(CONFIG_FILE):
|
| 26 |
+
with open(CONFIG_FILE, 'r') as f:
|
| 27 |
+
loaded = json.load(f)
|
| 28 |
+
default_structure.update(loaded)
|
| 29 |
+
return default_structure
|
| 30 |
+
return default_structure
|
| 31 |
+
|
| 32 |
+
def save_veto_setup(veto_setup):
|
| 33 |
+
with open(VETO_CONFIG_FILE, 'w') as f:
|
| 34 |
+
json.dump(veto_setup, f, indent=4)
|
| 35 |
+
st.sidebar.success("Veto filter saved as default!")
|
| 36 |
+
|
| 37 |
+
def load_veto_setup():
|
| 38 |
+
if os.path.exists(VETO_CONFIG_FILE):
|
| 39 |
+
with open(VETO_CONFIG_FILE, 'r') as f:
|
| 40 |
+
return json.load(f)
|
| 41 |
+
return None
|
| 42 |
+
|
| 43 |
+
def save_top_setups(results_df, side, num_setups=6):
|
| 44 |
+
df = results_df.copy()
|
| 45 |
+
|
| 46 |
+
deduplication_cols = [
|
| 47 |
+
'Conf. Threshold', 'Avg Profit/Trade', 'Good/Bad Ratio',
|
| 48 |
+
'Winning Tickers', 'Losing Tickers', 'Avg Entry Conf.',
|
| 49 |
+
'Good Score', 'Bad Score', 'Norm. Score %', 'Total Trades'
|
| 50 |
+
]
|
| 51 |
+
|
| 52 |
+
df['FactorsOn'] = df[['RSI', 'Volatility', 'TREND', 'Volume']].apply(lambda row: (row == 'On').sum(), axis=1)
|
| 53 |
+
sort_col = 'Good Score' if side in ['long', 'best'] else 'Bad Score'
|
| 54 |
+
|
| 55 |
+
sorted_df = df.sort_values(
|
| 56 |
+
by=[sort_col, 'FactorsOn'],
|
| 57 |
+
ascending=[False, True]
|
| 58 |
+
)
|
| 59 |
+
deduplicated_df = sorted_df.drop_duplicates(subset=deduplication_cols, keep='first')
|
| 60 |
+
|
| 61 |
+
top_setups = deduplicated_df.head(num_setups).to_dict('records')
|
| 62 |
+
|
| 63 |
+
if os.path.exists(TOP_SETUPS_FILE):
|
| 64 |
+
with open(TOP_SETUPS_FILE, 'r') as f:
|
| 65 |
+
all_top_setups = json.load(f)
|
| 66 |
+
else:
|
| 67 |
+
all_top_setups = {}
|
| 68 |
+
|
| 69 |
+
all_top_setups[side] = top_setups
|
| 70 |
+
|
| 71 |
+
with open(TOP_SETUPS_FILE, 'w') as f:
|
| 72 |
+
json.dump(all_top_setups, f, indent=4)
|
| 73 |
+
|
| 74 |
+
st.sidebar.success(f"Top {len(top_setups)} unique {side.title()} setups saved!")
|
| 75 |
+
|
| 76 |
+
def load_top_setups():
|
| 77 |
+
if os.path.exists(TOP_SETUPS_FILE):
|
| 78 |
+
with open(TOP_SETUPS_FILE, 'r') as f:
|
| 79 |
+
return json.load(f)
|
| 80 |
+
return None
|
| 81 |
+
|
| 82 |
+
# --- 1. Data Loading and Cleaning Functions ---
|
| 83 |
+
@st.cache_data
|
| 84 |
+
def load_all_data(folder_path):
|
| 85 |
+
all_files = [f for f in os.listdir(folder_path) if f.endswith('.csv')]
|
| 86 |
+
if not all_files:
|
| 87 |
+
st.error("No CSV files found in the 'csv_data' folder.")
|
| 88 |
+
return None, None
|
| 89 |
+
|
| 90 |
+
df_list = []
|
| 91 |
+
|
| 92 |
+
for file_name in all_files:
|
| 93 |
+
file_path = os.path.join(folder_path, file_name)
|
| 94 |
+
try:
|
| 95 |
+
df = pd.read_csv(file_path, header=0, index_col=0, dayfirst=True, parse_dates=True)
|
| 96 |
+
df_list.append(df)
|
| 97 |
+
except Exception as e:
|
| 98 |
+
return None, f"Could not read or process {file_name}. Error: {e}"
|
| 99 |
+
|
| 100 |
+
if not df_list:
|
| 101 |
+
return None, "No data could be loaded from the CSV files."
|
| 102 |
+
|
| 103 |
+
master_df = pd.concat(df_list)
|
| 104 |
+
master_df.index = pd.to_datetime(master_df.index, errors='coerce')
|
| 105 |
+
master_df = master_df[master_df.index.notna()]
|
| 106 |
+
|
| 107 |
+
if master_df.index.has_duplicates:
|
| 108 |
+
master_df = master_df.loc[~master_df.index.duplicated(keep='last')]
|
| 109 |
+
|
| 110 |
+
master_df.sort_index(inplace=True)
|
| 111 |
+
return master_df, f"Successfully combined data from {len(all_files)} files."
|
| 112 |
+
|
| 113 |
+
def clean_data_and_report_outliers(df):
|
| 114 |
+
outlier_report = []
|
| 115 |
+
price_columns = [col for col in df.columns if '_volume' not in str(col).lower()]
|
| 116 |
+
for ticker in price_columns:
|
| 117 |
+
numeric_prices = pd.to_numeric(df[ticker], errors='coerce')
|
| 118 |
+
daily_pct_change = numeric_prices.pct_change().abs()
|
| 119 |
+
outlier_days = daily_pct_change[daily_pct_change > 1.0].index
|
| 120 |
+
if not outlier_days.empty:
|
| 121 |
+
outlier_report.append({'Ticker': ticker, 'Outliers Removed': len(outlier_days)})
|
| 122 |
+
df.loc[outlier_days, ticker] = np.nan
|
| 123 |
+
return df, outlier_report
|
| 124 |
+
|
| 125 |
+
def normalise_strategy_score(raw_score, benchmark_for_100_percent=0.25):
|
| 126 |
+
if raw_score <= 0: return 0.0
|
| 127 |
+
return min((raw_score / benchmark_for_100_percent) * 100, 100.0)
|
| 128 |
+
|
| 129 |
+
# --- 2. Custom Backtesting Engine ---
|
| 130 |
+
def calculate_confidence_score(df, use_rsi, use_volatility, use_trend, use_volume, rsi_w, vol_w, trend_w, vol_w_val):
|
| 131 |
+
long_score = pd.Series(0.0, index=df.index)
|
| 132 |
+
short_score = pd.Series(0.0, index=df.index)
|
| 133 |
+
total_weight = 0.0
|
| 134 |
+
if use_rsi and 'RSI' in df.columns:
|
| 135 |
+
total_weight += rsi_w
|
| 136 |
+
long_score += ((30 - df['RSI']) / 30).clip(0, 1) * rsi_w
|
| 137 |
+
short_score += ((df['RSI'] - 70) / 30).clip(0, 1) * rsi_w
|
| 138 |
+
if use_volatility and 'Volatility_p' in df.columns:
|
| 139 |
+
total_weight += vol_w
|
| 140 |
+
score = (df['Volatility_p'] > 0.025).astype(float) * vol_w
|
| 141 |
+
long_score += score
|
| 142 |
+
short_score += score
|
| 143 |
+
if use_trend and 'SMA_200' in df.columns:
|
| 144 |
+
total_weight += trend_w
|
| 145 |
+
pct_dist = (df['Close'] - df['SMA_200']) / df['SMA_200']
|
| 146 |
+
long_score += (pct_dist / 0.10).clip(0, 1) * trend_w
|
| 147 |
+
short_score += (-pct_dist / 0.10).clip(0, 1) * trend_w
|
| 148 |
+
if use_volume and 'Volume_Ratio' in df.columns:
|
| 149 |
+
total_weight += vol_w_val
|
| 150 |
+
score = ((df['Volume_Ratio'] - 1.75) / 2.25).clip(0, 1) * vol_w_val
|
| 151 |
+
long_score += score
|
| 152 |
+
short_score += score
|
| 153 |
+
if total_weight > 0:
|
| 154 |
+
return (long_score / total_weight) * 100, (short_score / total_weight) * 100
|
| 155 |
+
return pd.Series(100.0, index=df.index), pd.Series(100.0, index=df.index)
|
| 156 |
+
|
| 157 |
+
def run_backtest(data, params, use_rsi, use_volatility, use_trend, use_volume, rsi_weight, volatility_weight, trend_weight, volume_weight, veto_setup=None):
|
| 158 |
+
df = data.copy()
|
| 159 |
+
df['Close'] = pd.to_numeric(df['Close'], errors='coerce').replace(0, np.nan)
|
| 160 |
+
df.dropna(subset=['Close'], inplace=True)
|
| 161 |
+
if len(df) < params.get('large_ma_period', 200) or len(df) < params.get('bband_period', 20):
|
| 162 |
+
return 0, 0, 0, 0, None, ([], [], [], []), []
|
| 163 |
+
df['large_ma'] = df['Close'].rolling(window=params['large_ma_period']).mean()
|
| 164 |
+
bband = df.ta.bbands(length=params['bband_period'], std=params['bband_std_dev'], append=True)
|
| 165 |
+
df['bband_lower'] = bband[f'BBL_{params["bband_period"]}_{params["bband_std_dev"]}']
|
| 166 |
+
df['bband_upper'] = bband[f'BBU_{params["bband_period"]}_{params["bband_std_dev"]}']
|
| 167 |
+
df.ta.rsi(length=14, append=True, col_names=('RSI',))
|
| 168 |
+
df['Volatility_p'] = df['Close'].pct_change().rolling(window=14).std()
|
| 169 |
+
df['SMA_200'] = df['Close'].rolling(window=200, min_periods=1).mean()
|
| 170 |
+
if 'Volume' in df.columns:
|
| 171 |
+
df['Volume'] = pd.to_numeric(df['Volume'], errors='coerce').fillna(0)
|
| 172 |
+
df['Volume_MA50'] = df['Volume'].rolling(window=50, min_periods=1).mean()
|
| 173 |
+
df['Volume_Ratio'] = (df['Volume'] / df['Volume_MA50']).replace([np.inf, -np.inf], np.nan).fillna(0)
|
| 174 |
+
df['long_confidence_score'], df['short_confidence_score'] = calculate_confidence_score(df, use_rsi, use_volatility, use_trend, use_volume, rsi_weight, volatility_weight, trend_weight, volume_weight)
|
| 175 |
+
if veto_setup:
|
| 176 |
+
veto_weight = veto_setup.get('Weight', 1.0)
|
| 177 |
+
df['long_veto_score'], df['short_veto_score'] = calculate_confidence_score(df, veto_setup['RSI'], veto_setup['Volatility'], veto_setup['TREND'], veto_setup['Volume'], veto_weight, veto_weight, veto_weight, veto_weight)
|
| 178 |
+
base_long_trigger = df['Close'] < (df['bband_lower'] * (1 - params['long_entry_threshold_pct']))
|
| 179 |
+
base_short_trigger = df['Close'] > (df['bband_upper'] * (1 + params['short_entry_threshold_pct']))
|
| 180 |
+
long_entry_trigger = base_long_trigger & (df['long_confidence_score'] >= params['confidence_threshold'])
|
| 181 |
+
short_entry_trigger = base_short_trigger & (df['short_confidence_score'] >= params['confidence_threshold'])
|
| 182 |
+
if veto_setup:
|
| 183 |
+
long_veto_trigger = df['long_veto_score'] >= veto_setup['Conf. Threshold']
|
| 184 |
+
short_veto_trigger = df['short_veto_score'] >= veto_setup['Conf. Threshold']
|
| 185 |
+
long_entry_trigger &= ~long_veto_trigger
|
| 186 |
+
short_entry_trigger &= ~short_veto_trigger
|
| 187 |
+
long_exit_trigger = (df['Close'] >= (df['large_ma'] * (1 + params['long_exit_ma_threshold_pct']))) | (df['Close'] >= df['bband_upper'])
|
| 188 |
+
short_exit_trigger = (df['Close'] <= (df['large_ma'] * (1 - params['short_exit_ma_threshold_pct']))) | (df['Close'] <= df['bband_lower'])
|
| 189 |
+
df['long_signal'] = np.nan; df.loc[long_entry_trigger, 'long_signal'] = 1; df.loc[long_exit_trigger, 'long_signal'] = 0
|
| 190 |
+
df['short_signal'] = np.nan; df.loc[short_entry_trigger, 'short_signal'] = -1; df.loc[short_exit_trigger, 'short_signal'] = 0
|
| 191 |
+
df['long_position'] = df['long_signal'].ffill().fillna(0); df['short_position'] = df['short_signal'].ffill().fillna(0)
|
| 192 |
+
if params['long_delay_days'] > 0: df['long_position'] = df['long_position'].shift(params['long_delay_days']).fillna(0)
|
| 193 |
+
if params['short_delay_days'] > 0: df['short_position'] = df['short_position'].shift(params['short_delay_days']).fillna(0)
|
| 194 |
+
if params['long_stop_loss_pct'] > 0:
|
| 195 |
+
long_entry_prices = df['Close'].where((df['long_position'] == 1) & (df['long_position'].shift(1) == 0)).ffill()
|
| 196 |
+
long_sl_hit = (df['Close'] < (long_entry_prices * (1 - params['long_stop_loss_pct']))) & (df['long_position'] == 1)
|
| 197 |
+
for index in long_sl_hit[long_sl_hit].index: df.loc[index:, 'long_position'] = 0
|
| 198 |
+
if params['short_stop_loss_pct'] > 0:
|
| 199 |
+
short_entry_prices = df['Close'].where((df['short_position'] == -1) & (df['short_position'].shift(1) == 0)).ffill()
|
| 200 |
+
short_sl_hit = (df['Close'] > (short_entry_prices * (1 + params['short_stop_loss_pct']))) & (df['short_position'] == -1)
|
| 201 |
+
for index in short_sl_hit[short_sl_hit].index: df.loc[index:, 'short_position'] = 0
|
| 202 |
+
df['daily_return'] = df['Close'].pct_change()
|
| 203 |
+
df['long_strategy_return'] = df['long_position'].shift(1) * df['daily_return']
|
| 204 |
+
df['short_strategy_return'] = df['short_position'].shift(1) * df['daily_return']
|
| 205 |
+
final_long_pnl = (1 + df['long_strategy_return']).prod(skipna=True) - 1
|
| 206 |
+
final_short_pnl = (1 + df['short_strategy_return']).prod(skipna=True) - 1
|
| 207 |
+
long_entries = df[(df['long_position'] == 1) & (df['long_position'].shift(1) == 0)]
|
| 208 |
+
long_exits = df[(df['long_position'] == 0) & (df['long_position'].shift(1) == 1)]
|
| 209 |
+
short_entries = df[(df['short_position'] == -1) & (df['short_position'].shift(1) == 0)]
|
| 210 |
+
short_exits = df[(df['short_position'] == 0) & (df['short_position'].shift(1) == -1)]
|
| 211 |
+
long_trade_profits = []
|
| 212 |
+
for idx, row in long_entries.iterrows():
|
| 213 |
+
future_exits = long_exits[long_exits.index > idx]
|
| 214 |
+
if not future_exits.empty: long_trade_profits.append((future_exits.iloc[0]['Close'] / row['Close']) - 1)
|
| 215 |
+
avg_long_profit_per_trade = np.mean(long_trade_profits) if long_trade_profits else 0
|
| 216 |
+
short_trade_profits = []
|
| 217 |
+
for idx, row in short_entries.iterrows():
|
| 218 |
+
future_exits = short_exits[short_exits.index > idx]
|
| 219 |
+
if not future_exits.empty: short_trade_profits.append(((future_exits.iloc[0]['Close'] / row['Close']) - 1) * -1)
|
| 220 |
+
avg_short_profit_per_trade = np.mean(short_trade_profits) if short_trade_profits else 0
|
| 221 |
+
long_trades_log = [{'date': idx, 'price': row['Close'], 'confidence': row['long_confidence_score']} for idx, row in long_entries.iterrows()]
|
| 222 |
+
short_trades_log = [{'date': idx, 'price': row['Close'], 'confidence': row['short_confidence_score']} for idx, row in short_entries.iterrows()]
|
| 223 |
+
open_trades = []
|
| 224 |
+
if not df.empty:
|
| 225 |
+
last_close = df['Close'].iloc[-1]
|
| 226 |
+
if df['long_position'].iloc[-1] == 1 and not long_entries.empty:
|
| 227 |
+
last_entry = long_entries.iloc[-1]
|
| 228 |
+
pnl = (last_close / last_entry['Close']) - 1
|
| 229 |
+
open_trades.append({'Side': 'Long', 'Date Open': last_entry.name, 'Start Confidence': last_entry['long_confidence_score'], 'Current % P/L': pnl})
|
| 230 |
+
if df['short_position'].iloc[-1] == -1 and not short_entries.empty:
|
| 231 |
+
last_entry = short_entries.iloc[-1]
|
| 232 |
+
pnl = ((last_close / last_entry['Close']) - 1) * -1
|
| 233 |
+
open_trades.append({'Side': 'Short', 'Date Open': last_entry.name, 'Start Confidence': last_entry['short_confidence_score'], 'Current % P/L': pnl})
|
| 234 |
+
df.sort_index(inplace=True)
|
| 235 |
+
return final_long_pnl, final_short_pnl, avg_long_profit_per_trade, avg_short_profit_per_trade, df, (long_trades_log, long_exits.index, short_trades_log, short_exits.index), open_trades
|
| 236 |
+
|
| 237 |
+
# --- 3. Charting and Display Functions ---
|
| 238 |
+
def generate_long_plot(df, trades, ticker):
|
| 239 |
+
fig = go.Figure(); fig.add_trace(go.Scatter(x=df.index, y=df['Close'], mode='lines', name='Close Price', line=dict(color='blue'))); fig.add_trace(go.Scatter(x=df.index, y=df['large_ma'], mode='lines', name='Large MA', line=dict(color='orange', dash='dash'))); fig.add_trace(go.Scatter(x=df.index, y=df['bband_upper'], mode='lines', name='Upper Band', line=dict(color='gray', width=0.5))); fig.add_trace(go.Scatter(x=df.index, y=df['bband_lower'], mode='lines', name='Lower Band', line=dict(color='gray', width=0.5), fill='tonexty', fillcolor='rgba(211,211,211,0.2)'))
|
| 240 |
+
long_entries_log, long_exits, _, _ = trades
|
| 241 |
+
if long_entries_log:
|
| 242 |
+
dates = [t['date'] for t in long_entries_log]; prices = [t['price'] for t in long_entries_log]; scores = [f"Confidence: {t['confidence']:.0f}%" for t in long_entries_log]
|
| 243 |
+
fig.add_trace(go.Scatter(x=dates, y=prices, mode='markers', name='Long Entry', marker=dict(color='green', symbol='triangle-up', size=12), text=scores, hoverinfo='text'))
|
| 244 |
+
if not long_exits.empty: fig.add_trace(go.Scatter(x=long_exits, y=df.loc[long_exits,'Close'], mode='markers', name='Long Exit', marker=dict(color='darkgreen', symbol='x', size=8)))
|
| 245 |
+
fig.update_layout(title=f'Long Trades for {ticker}', xaxis_title='Date', yaxis_title='Price', legend_title="Indicator"); return fig
|
| 246 |
+
|
| 247 |
+
def generate_short_plot(df, trades, ticker):
|
| 248 |
+
fig = go.Figure(); fig.add_trace(go.Scatter(x=df.index, y=df['Close'], mode='lines', name='Close Price', line=dict(color='blue'))); fig.add_trace(go.Scatter(x=df.index, y=df['large_ma'], mode='lines', name='Large MA', line=dict(color='orange', dash='dash'))); fig.add_trace(go.Scatter(x=df.index, y=df['bband_upper'], mode='lines', name='Upper Band', line=dict(color='gray', width=0.5))); fig.add_trace(go.Scatter(x=df.index, y=df['bband_lower'], mode='lines', name='Lower Band', line=dict(color='gray', width=0.5), fill='tonexty', fillcolor='rgba(211,211,211,0.2)'))
|
| 249 |
+
_, _, short_entries_log, short_exits = trades
|
| 250 |
+
if short_entries_log:
|
| 251 |
+
dates = [t['date'] for t in short_entries_log]; prices = [t['price'] for t in short_entries_log]; scores = [f"Confidence: {t['confidence']:.0f}%" for t in short_entries_log]
|
| 252 |
+
fig.add_trace(go.Scatter(x=dates, y=prices, mode='markers', name='Short Entry', marker=dict(color='red', symbol='triangle-down', size=12), text=scores, hoverinfo='text'))
|
| 253 |
+
if not short_exits.empty: fig.add_trace(go.Scatter(x=short_exits, y=df.loc[short_exits,'Close'], mode='markers', name='Short Exit', marker=dict(color='darkred', symbol='x', size=8)))
|
| 254 |
+
fig.update_layout(title=f'Short Trades for {ticker}', xaxis_title='Date', yaxis_title='Price', legend_title="Indicator"); return fig
|
| 255 |
+
|
| 256 |
+
def display_summary_analytics(summary_df):
|
| 257 |
+
st.subheader("Overall Strategy Performance")
|
| 258 |
+
col1, col2 = st.columns(2)
|
| 259 |
+
for side in ["Long", "Short"]:
|
| 260 |
+
active_trades_df = summary_df[summary_df[f'Num {side} Trades'] > 0]
|
| 261 |
+
container = col1 if side == "Long" else col2
|
| 262 |
+
with container:
|
| 263 |
+
st.subheader(f"{side} Trades")
|
| 264 |
+
if not active_trades_df.empty:
|
| 265 |
+
total_trades = active_trades_df[f'Num {side} Trades'].sum()
|
| 266 |
+
avg_trade_profit = (active_trades_df[f'Avg {side} Profit per Trade'] * active_trades_df[f'Num {side} Trades']).sum() / total_trades if total_trades > 0 else 0
|
| 267 |
+
avg_cumulative_profit = active_trades_df[f'Cumulative {side} P&L'].mean()
|
| 268 |
+
avg_confidence = active_trades_df[f'Avg {side} Confidence'].mean()
|
| 269 |
+
if pd.isna(avg_confidence): avg_confidence = 0
|
| 270 |
+
good_tickers = (active_trades_df[f'Cumulative {side} P&L'] > 0).sum(); bad_tickers = (active_trades_df[f'Cumulative {side} P&L'] < 0).sum()
|
| 271 |
+
good_bad_ratio = good_tickers / bad_tickers if bad_tickers > 0 else float('inf')
|
| 272 |
+
raw_strategy_score = avg_trade_profit * good_bad_ratio if np.isfinite(good_bad_ratio) else 0.0
|
| 273 |
+
display_score = normalise_strategy_score(raw_strategy_score)
|
| 274 |
+
st.metric("Strategy Score", f"{display_score:.2f}%"); st.metric("Avg Cumulative Profit (Active Tickers)", f"{avg_cumulative_profit:.2%}"); st.metric("Avg Profit per Trade (Active Tickers)", f"{avg_trade_profit:.2%}"); st.metric(f"Average Entry Confidence", f"{avg_confidence:.0f}%")
|
| 275 |
+
st.text(f"Profitable Tickers: {good_tickers}")
|
| 276 |
+
st.text(f"Losing Tickers: {bad_tickers}")
|
| 277 |
+
st.text(f"Total Individual Trades: {int(total_trades)}")
|
| 278 |
+
st.text(f"Good/Bad Ratio: {good_bad_ratio:.2f}")
|
| 279 |
+
else: st.info("No trades found for this side with current settings.")
|
| 280 |
+
|
| 281 |
+
# --- 4. Optimisation Functions (Parallelised) ---
|
| 282 |
+
def run_single_parameter_test(params, master_df, optimise_for, tickers, date_range, power, confidence_settings):
|
| 283 |
+
total_profit_weighted_avg, total_trades, winning_tickers, losing_tickers = 0, 0, 0, 0
|
| 284 |
+
use_rsi, use_vol, use_trend, use_volume = confidence_settings['toggles']
|
| 285 |
+
rsi_w, vol_w, trend_w, volume_w = confidence_settings['weights']
|
| 286 |
+
|
| 287 |
+
if not isinstance(tickers, list): tickers = [tickers]
|
| 288 |
+
for ticker in tickers:
|
| 289 |
+
cols_to_use = [ticker]
|
| 290 |
+
if f'{ticker}_Volume' in master_df.columns: cols_to_use.append(f'{ticker}_Volume')
|
| 291 |
+
ticker_data = master_df.loc[date_range[0]:date_range[1], cols_to_use]
|
| 292 |
+
rename_dict = {ticker: 'Close', f'{ticker}_Volume': 'Volume'}
|
| 293 |
+
ticker_data = ticker_data.rename(columns=rename_dict)
|
| 294 |
+
if not ticker_data.empty:
|
| 295 |
+
long_pnl, short_pnl, avg_long_trade, avg_short_trade, _, trades, _ = run_backtest(
|
| 296 |
+
ticker_data, params, use_rsi, use_vol, use_trend, use_volume, rsi_w, vol_w, trend_w, volume_w
|
| 297 |
+
)
|
| 298 |
+
if optimise_for == 'long': pnl, avg_trade_profit, num_trades = long_pnl, avg_long_trade, len(trades[0])
|
| 299 |
+
else: pnl, avg_trade_profit, num_trades = short_pnl, avg_short_trade, len(trades[2])
|
| 300 |
+
if num_trades > 0:
|
| 301 |
+
total_trades += num_trades; total_profit_weighted_avg += avg_trade_profit * num_trades
|
| 302 |
+
if pnl > 0: winning_tickers += 1
|
| 303 |
+
elif pnl < 0: losing_tickers += 1
|
| 304 |
+
current_metric = -np.inf
|
| 305 |
+
if total_trades > 0:
|
| 306 |
+
overall_avg_profit_per_trade = total_profit_weighted_avg / total_trades
|
| 307 |
+
if losing_tickers > 0: good_bad_ratio = winning_tickers / losing_tickers
|
| 308 |
+
elif winning_tickers > 0: good_bad_ratio = np.inf
|
| 309 |
+
else: good_bad_ratio = 0
|
| 310 |
+
if overall_avg_profit_per_trade > 0: current_metric = (overall_avg_profit_per_trade ** power) * good_bad_ratio
|
| 311 |
+
else: current_metric = overall_avg_profit_per_trade
|
| 312 |
+
return (current_metric, params)
|
| 313 |
+
|
| 314 |
+
def generate_and_run_optimisation(main_df, main_content_placeholder, optimise_for, use_squared_weighting):
|
| 315 |
+
st.session_state.summary_df = None
|
| 316 |
+
st.session_state.single_ticker_results = None
|
| 317 |
+
st.session_state.confidence_results_df = None
|
| 318 |
+
st.session_state.open_trades_df = None
|
| 319 |
+
st.session_state.advisor_df = None
|
| 320 |
+
|
| 321 |
+
with main_content_placeholder.container():
|
| 322 |
+
defaults = st.session_state.widget_defaults
|
| 323 |
+
ma_range = range(st.session_state.ma_start_num, st.session_state.ma_end_num + 1, st.session_state.ma_step_num) if st.session_state.opt_ma_cb else [defaults['large_ma_period']]
|
| 324 |
+
bb_range = range(st.session_state.bb_start_num, st.session_state.bb_end_num + 1, st.session_state.bb_step_num) if st.session_state.opt_bb_cb else [defaults['bband_period']]
|
| 325 |
+
std_range = np.arange(st.session_state.std_start_num, st.session_state.std_end_num + 0.001, st.session_state.std_step_num) if st.session_state.opt_std_cb else [defaults['bband_std_dev']]
|
| 326 |
+
sl_range = np.arange(st.session_state.sl_start_num, st.session_state.sl_end_num + 0.001, st.session_state.sl_step_num) / 100 if st.session_state.opt_sl_cb else [defaults['long_stop_loss_pct']]
|
| 327 |
+
delay_range = range(st.session_state.delay_start_num, st.session_state.delay_end_num + 1, st.session_state.delay_step_num) if st.session_state.opt_delay_cb else [defaults['long_delay_days']]
|
| 328 |
+
entry_range = np.arange(st.session_state.entry_start_num, st.session_state.entry_end_num + 0.001, st.session_state.entry_step_num) / 100 if st.session_state.opt_entry_cb else [defaults['long_entry_threshold_pct']]
|
| 329 |
+
exit_range = np.arange(st.session_state.exit_start_num, st.session_state.exit_end_num + 0.001, st.session_state.exit_step_num) / 100 if st.session_state.opt_exit_cb else [defaults['long_exit_ma_threshold_pct']]
|
| 330 |
+
conf_range = range(st.session_state.conf_start_num, st.session_state.conf_end_num + 1, st.session_state.conf_step_num) if st.session_state.opt_conf_cb else [defaults['confidence_threshold']]
|
| 331 |
+
param_product = itertools.product(ma_range, bb_range, std_range, sl_range, delay_range, entry_range, exit_range, conf_range)
|
| 332 |
+
param_combinations = [{ "large_ma_period": p[0], "bband_period": p[1], "bband_std_dev": p[2], "long_stop_loss_pct": p[3], "short_stop_loss_pct": p[3], "long_delay_days": p[4], "short_delay_days": p[4], "long_entry_threshold_pct": p[5], "short_entry_threshold_pct": p[5], "long_exit_ma_threshold_pct": p[6], "short_exit_ma_threshold_pct": p[6], "confidence_threshold": p[7] } for p in param_product]
|
| 333 |
+
total_combinations = len(param_combinations)
|
| 334 |
+
if total_combinations <= 1:
|
| 335 |
+
st.warning("No optimisation parameters selected."); return
|
| 336 |
+
|
| 337 |
+
confidence_settings = {
|
| 338 |
+
'toggles': (st.session_state.use_rsi, st.session_state.use_vol, st.session_state.use_trend, st.session_state.use_volume),
|
| 339 |
+
'weights': (st.session_state.rsi_w, st.session_state.vol_w, st.session_state.trend_w, st.session_state.volume_w)
|
| 340 |
+
}
|
| 341 |
+
|
| 342 |
+
num_cores = cpu_count()
|
| 343 |
+
st.info(f"Starting {optimise_for.upper()} optimisation on {num_cores} cores... Testing {total_combinations} combinations.")
|
| 344 |
+
tickers_to_run = [col for col in main_df.columns if '_volume' not in str(col).lower()] if st.session_state.run_mode == "Analyse Full List" else [st.session_state.ticker_select]
|
| 345 |
+
date_range = (pd.Timestamp(st.session_state.start_date), pd.Timestamp(st.session_state.end_date))
|
| 346 |
+
power = 2 if use_squared_weighting else 1
|
| 347 |
+
best_metric, best_params = -np.inf, {}
|
| 348 |
+
status_text = st.empty(); status_text.text("Optimisation starting...")
|
| 349 |
+
progress_bar = st.progress(0)
|
| 350 |
+
worker_func = partial(run_single_parameter_test, master_df=main_df, optimise_for=optimise_for, tickers=tickers_to_run, date_range=date_range, power=power, confidence_settings=confidence_settings)
|
| 351 |
+
with Pool(processes=num_cores) as pool:
|
| 352 |
+
iterator = pool.imap_unordered(worker_func, param_combinations)
|
| 353 |
+
for i, (metric, params) in enumerate(iterator, 1):
|
| 354 |
+
if metric > best_metric:
|
| 355 |
+
best_metric, best_params = metric, params
|
| 356 |
+
display_score = normalise_strategy_score(best_metric)
|
| 357 |
+
status_text.text(f"Testing... New Best Score: {display_score:.2f}%")
|
| 358 |
+
progress_bar.progress(i / total_combinations, text=f"Optimising... {i}/{total_combinations} combinations complete.")
|
| 359 |
+
status_text.empty()
|
| 360 |
+
if best_params:
|
| 361 |
+
display_score = normalise_strategy_score(best_metric)
|
| 362 |
+
st.success(f"Optimisation Complete! Best Strategy Score: {display_score:.2f}%")
|
| 363 |
+
st.subheader("Optimal Parameters Found"); st.json(best_params)
|
| 364 |
+
st.session_state.best_params = best_params
|
| 365 |
+
else:
|
| 366 |
+
st.warning("Optimisation finished, but no profitable combinations were found.")
|
| 367 |
+
|
| 368 |
+
def run_single_confidence_test(task, base_params, master_df, date_range, tickers_to_run, optimise_for, factor_weights):
|
| 369 |
+
combo, threshold, _ = task
|
| 370 |
+
use_rsi, use_volatility, use_trend, use_volume = combo
|
| 371 |
+
test_params = base_params.copy()
|
| 372 |
+
test_params["confidence_threshold"] = threshold
|
| 373 |
+
|
| 374 |
+
total_profit_weighted_avg, total_trades, winning_tickers, losing_tickers = 0, 0, 0, 0
|
| 375 |
+
all_confidences = []
|
| 376 |
+
|
| 377 |
+
for ticker in tickers_to_run:
|
| 378 |
+
cols_to_use = [ticker]
|
| 379 |
+
if f'{ticker}_Volume' in master_df.columns: cols_to_use.append(f'{ticker}_Volume')
|
| 380 |
+
ticker_data = master_df.loc[date_range[0]:date_range[1], cols_to_use]
|
| 381 |
+
rename_dict = {ticker: 'Close', f'{ticker}_Volume': 'Volume'}
|
| 382 |
+
ticker_data = ticker_data.rename(columns=rename_dict)
|
| 383 |
+
|
| 384 |
+
if not ticker_data.empty:
|
| 385 |
+
long_pnl, short_pnl, avg_long_trade, avg_short_trade, _, trades, _ = run_backtest(
|
| 386 |
+
ticker_data, test_params, use_rsi, use_volatility, use_trend, use_volume,
|
| 387 |
+
factor_weights['rsi'], factor_weights['vol'], factor_weights['trend'], factor_weights['volume']
|
| 388 |
+
)
|
| 389 |
+
|
| 390 |
+
if optimise_for == 'long':
|
| 391 |
+
pnl, avg_trade_profit, trade_log = long_pnl, avg_long_trade, trades[0]
|
| 392 |
+
else:
|
| 393 |
+
pnl, avg_trade_profit, trade_log = short_pnl, avg_short_trade, trades[2]
|
| 394 |
+
|
| 395 |
+
num_trades = len(trade_log)
|
| 396 |
+
|
| 397 |
+
if num_trades > 0:
|
| 398 |
+
total_trades += num_trades
|
| 399 |
+
total_profit_weighted_avg += avg_trade_profit * num_trades
|
| 400 |
+
if pnl > 0: winning_tickers += 1
|
| 401 |
+
elif pnl < 0: losing_tickers += 1
|
| 402 |
+
all_confidences.extend([trade['confidence'] for trade in trade_log])
|
| 403 |
+
|
| 404 |
+
raw_score, badness_score, overall_avg_profit, good_bad_ratio = 0.0, 0.0, 0.0, 0.0
|
| 405 |
+
|
| 406 |
+
if total_trades > 0:
|
| 407 |
+
overall_avg_profit = total_profit_weighted_avg / total_trades
|
| 408 |
+
if losing_tickers > 0:
|
| 409 |
+
good_bad_ratio = winning_tickers / losing_tickers
|
| 410 |
+
raw_score = overall_avg_profit * good_bad_ratio
|
| 411 |
+
elif winning_tickers > 0:
|
| 412 |
+
good_bad_ratio = float('inf')
|
| 413 |
+
raw_score = overall_avg_profit * 100
|
| 414 |
+
|
| 415 |
+
if winning_tickers > 0 and overall_avg_profit < 0:
|
| 416 |
+
badness_score = (losing_tickers / winning_tickers) * abs(overall_avg_profit)
|
| 417 |
+
|
| 418 |
+
avg_entry_confidence = np.mean(all_confidences) if all_confidences else 0
|
| 419 |
+
|
| 420 |
+
return {
|
| 421 |
+
"RSI": use_rsi, "Volatility": use_volatility, "TREND": use_trend, "Volume": use_volume,
|
| 422 |
+
"Conf. Threshold": threshold, "Avg Profit/Trade": overall_avg_profit,
|
| 423 |
+
"Good/Bad Ratio": good_bad_ratio, "Winning Tickers": winning_tickers, "Losing Tickers": losing_tickers,
|
| 424 |
+
"Avg Entry Conf.": avg_entry_confidence, "Good Score": raw_score, "Bad Score": badness_score,
|
| 425 |
+
"Norm. Score %": normalise_strategy_score(raw_score), "Total Trades": total_trades
|
| 426 |
+
}
|
| 427 |
+
|
| 428 |
+
def run_confidence_optimisation(optimise_for, find_mode, master_df, main_content_placeholder, veto_factors):
|
| 429 |
+
st.session_state.summary_df = None
|
| 430 |
+
st.session_state.single_ticker_results = None
|
| 431 |
+
st.session_state.open_trades_df = None
|
| 432 |
+
st.session_state.best_params = None
|
| 433 |
+
st.session_state.advisor_df = None
|
| 434 |
+
|
| 435 |
+
with main_content_placeholder.container():
|
| 436 |
+
num_cores = cpu_count()
|
| 437 |
+
st.info(f"Starting to find **{find_mode.upper()}** {optimise_for.upper()} setups on {num_cores} CPU cores...")
|
| 438 |
+
factors = ['RSI', 'Volatility', 'TREND', 'Volume']
|
| 439 |
+
|
| 440 |
+
if find_mode == 'worst':
|
| 441 |
+
use_rsi, use_vol, use_trend, use_volume = veto_factors
|
| 442 |
+
on_off_combos = [c for c in itertools.product([False, True], repeat=4) if c == (use_rsi, use_vol, use_trend, use_volume)]
|
| 443 |
+
if not any(on_off_combos[0]):
|
| 444 |
+
st.warning("Please select at least one factor for the Veto search."); return
|
| 445 |
+
else:
|
| 446 |
+
on_off_combos = [c for c in itertools.product([False, True], repeat=len(factors)) if any(c)]
|
| 447 |
+
|
| 448 |
+
thresholds_to_test = [10, 25, 50, 85]
|
| 449 |
+
tasks = list(itertools.product(on_off_combos, thresholds_to_test, [1.0]))
|
| 450 |
+
total_tasks = len(tasks)
|
| 451 |
+
|
| 452 |
+
base_params = { "large_ma_period": st.session_state.ma_period, "bband_period": st.session_state.bb_period, "bband_std_dev": st.session_state.bb_std, "long_entry_threshold_pct": st.session_state.long_entry / 100, "long_exit_ma_threshold_pct": st.session_state.long_exit / 100, "long_stop_loss_pct": st.session_state.long_sl / 100, "long_delay_days": st.session_state.long_delay, "short_entry_threshold_pct": st.session_state.short_entry / 100, "short_exit_ma_threshold_pct": st.session_state.short_exit / 100, "short_stop_loss_pct": st.session_state.short_sl / 100, "short_delay_days": st.session_state.short_delay, }
|
| 453 |
+
tickers_to_run = sorted([col for col in master_df.columns if '_volume' not in str(col).lower()])
|
| 454 |
+
date_range = (pd.Timestamp(st.session_state.start_date), pd.Timestamp(st.session_state.end_date))
|
| 455 |
+
|
| 456 |
+
factor_weights = {
|
| 457 |
+
"rsi": st.session_state.rsi_w, "vol": st.session_state.vol_w,
|
| 458 |
+
"trend": st.session_state.trend_w, "volume": st.session_state.volume_w
|
| 459 |
+
}
|
| 460 |
+
|
| 461 |
+
worker_func = partial(run_single_confidence_test, base_params=base_params, master_df=master_df, date_range=date_range, tickers_to_run=tickers_to_run, optimise_for=optimise_for, factor_weights=factor_weights)
|
| 462 |
+
|
| 463 |
+
results_list = []
|
| 464 |
+
progress_bar = st.progress(0, text="Optimisation starting...")
|
| 465 |
+
|
| 466 |
+
with Pool(processes=num_cores) as pool:
|
| 467 |
+
iterator = pool.imap_unordered(worker_func, tasks)
|
| 468 |
+
for i, result in enumerate(iterator, 1):
|
| 469 |
+
results_list.append(result)
|
| 470 |
+
progress_bar.progress(i / total_tasks, text=f"Optimising... {i}/{total_tasks} combinations complete.")
|
| 471 |
+
|
| 472 |
+
if results_list:
|
| 473 |
+
results_df = pd.DataFrame(results_list)
|
| 474 |
+
sort_col = "Good Score" if find_mode == 'best' else "Bad Score"
|
| 475 |
+
results_df = results_df.sort_values(by=sort_col, ascending=False).reset_index(drop=True)
|
| 476 |
+
|
| 477 |
+
for factor in factors:
|
| 478 |
+
results_df[factor] = results_df[factor].apply(lambda x: "On" if x else "Off")
|
| 479 |
+
|
| 480 |
+
st.subheader(f"π Top {find_mode.title()} Confidence Setup Found ({optimise_for.title()} Trades)")
|
| 481 |
+
best_setup = results_df.iloc[0]
|
| 482 |
+
st.dataframe(best_setup)
|
| 483 |
+
|
| 484 |
+
if find_mode == 'best':
|
| 485 |
+
st.session_state.best_confidence_setup = best_setup.to_dict()
|
| 486 |
+
save_top_setups(results_df, optimise_for)
|
| 487 |
+
else:
|
| 488 |
+
st.session_state.worst_confidence_setup = best_setup.to_dict()
|
| 489 |
+
|
| 490 |
+
st.session_state.confidence_results_df = results_df
|
| 491 |
+
else:
|
| 492 |
+
st.warning("Confidence optimisation completed but no results were generated.")
|
| 493 |
+
st.session_state.confidence_results_df = None
|
| 494 |
+
|
| 495 |
+
def generate_advisor_report(main_df, main_content_placeholder):
|
| 496 |
+
st.session_state.summary_df = None
|
| 497 |
+
st.session_state.single_ticker_results = None
|
| 498 |
+
st.session_state.confidence_results_df = None
|
| 499 |
+
st.session_state.open_trades_df = None
|
| 500 |
+
st.session_state.best_params = None
|
| 501 |
+
|
| 502 |
+
with main_content_placeholder.container():
|
| 503 |
+
st.header("π Advanced Advisor Report")
|
| 504 |
+
top_setups = load_top_setups()
|
| 505 |
+
|
| 506 |
+
if not top_setups:
|
| 507 |
+
st.warning("No saved top setups found. Please run a 'Find Best Confidence' optimisation from Section 5 first.")
|
| 508 |
+
return
|
| 509 |
+
|
| 510 |
+
side = st.radio("Generate report for which setups?", ("Long", "Short"), horizontal=True)
|
| 511 |
+
setups_to_run = top_setups.get(side.lower())
|
| 512 |
+
|
| 513 |
+
if not setups_to_run:
|
| 514 |
+
st.warning(f"No saved top {side.lower()} setups found in the file.")
|
| 515 |
+
return
|
| 516 |
+
|
| 517 |
+
st.info(f"Scanning all tickers for open trades based on the top {len(setups_to_run)} saved {side} setups...")
|
| 518 |
+
|
| 519 |
+
base_params = {"large_ma_period": st.session_state.ma_period, "bband_period": st.session_state.bb_period, "bband_std_dev": st.session_state.bb_std, "long_entry_threshold_pct": st.session_state.long_entry / 100, "long_exit_ma_threshold_pct": st.session_state.long_exit / 100, "long_stop_loss_pct": st.session_state.long_sl / 100, "long_delay_days": st.session_state.long_delay, "short_entry_threshold_pct": st.session_state.short_entry / 100, "short_exit_ma_threshold_pct": st.session_state.short_exit / 100, "short_stop_loss_pct": st.session_state.short_sl / 100, "short_delay_days": st.session_state.short_delay, }
|
| 520 |
+
factor_weights = {"rsi": st.session_state.rsi_w, "vol": st.session_state.vol_w, "trend": st.session_state.trend_w, "volume": st.session_state.volume_w}
|
| 521 |
+
|
| 522 |
+
all_advisor_trades = []
|
| 523 |
+
ticker_list = sorted([col for col in main_df.columns if '_volume' not in str(col).lower()])
|
| 524 |
+
progress_bar = st.progress(0, text="Scanning setups...")
|
| 525 |
+
|
| 526 |
+
for i, setup in enumerate(setups_to_run):
|
| 527 |
+
progress_bar.progress((i + 1) / len(setups_to_run), text=f"Scanning with Setup #{i+1}...")
|
| 528 |
+
|
| 529 |
+
use_rsi = setup.get('RSI') == 'On'
|
| 530 |
+
use_vol = setup.get('Volatility') == 'On'
|
| 531 |
+
use_trend = setup.get('TREND') == 'On'
|
| 532 |
+
use_volume = setup.get('Volume') == 'On'
|
| 533 |
+
|
| 534 |
+
params_for_run = base_params.copy()
|
| 535 |
+
params_for_run['confidence_threshold'] = setup.get('Conf. Threshold')
|
| 536 |
+
|
| 537 |
+
for ticker_symbol in ticker_list:
|
| 538 |
+
cols_to_use = [ticker_symbol]
|
| 539 |
+
if f'{ticker_symbol}_Volume' in main_df.columns: cols_to_use.append(f'{ticker_symbol}_Volume')
|
| 540 |
+
data_for_backtest = main_df[cols_to_use].rename(columns={ticker_symbol: 'Close', f'{ticker_symbol}_Volume': 'Volume'})
|
| 541 |
+
|
| 542 |
+
_, _, _, _, _, _, open_trades = run_backtest(data_for_backtest, params_for_run,
|
| 543 |
+
use_rsi, use_vol, use_trend, use_volume,
|
| 544 |
+
factor_weights['rsi'], factor_weights['vol'],
|
| 545 |
+
factor_weights['trend'], factor_weights['volume'])
|
| 546 |
+
|
| 547 |
+
if open_trades:
|
| 548 |
+
for trade in open_trades:
|
| 549 |
+
if trade['Side'].lower() == side.lower():
|
| 550 |
+
trade['Ticker'] = ticker_symbol
|
| 551 |
+
trade['Setup Rank'] = i + 1
|
| 552 |
+
trade['Setup G/B Ratio'] = setup.get('Good/Bad Ratio')
|
| 553 |
+
trade['Setup Avg Profit'] = setup.get('Avg Profit/Trade')
|
| 554 |
+
all_advisor_trades.append(trade)
|
| 555 |
+
|
| 556 |
+
progress_bar.empty()
|
| 557 |
+
|
| 558 |
+
if all_advisor_trades:
|
| 559 |
+
advisor_df = pd.DataFrame(all_advisor_trades)
|
| 560 |
+
cols_order = ['Ticker', 'Setup Rank', 'Current % P/L', 'Side', 'Date Open',
|
| 561 |
+
'Start Confidence', 'Setup G/B Ratio', 'Setup Avg Profit']
|
| 562 |
+
advisor_df = advisor_df[cols_order]
|
| 563 |
+
st.session_state.advisor_df = advisor_df
|
| 564 |
+
else:
|
| 565 |
+
st.success(f"No open {side} trades found matching any of the top setups.")
|
| 566 |
+
st.session_state.advisor_df = pd.DataFrame()
|
| 567 |
+
|
| 568 |
+
|
| 569 |
+
# --- 5. Streamlit User Interface ---
|
| 570 |
+
def main():
|
| 571 |
+
st.set_page_config(page_title="Stock Backtesting Sandbox", page_icon="π", layout="wide")
|
| 572 |
+
if 'first_run' not in st.session_state:
|
| 573 |
+
st.session_state.first_run = True
|
| 574 |
+
st.session_state.widget_defaults = load_settings()
|
| 575 |
+
st.session_state.veto_setup = load_veto_setup()
|
| 576 |
+
st.session_state.summary_df = None
|
| 577 |
+
st.session_state.single_ticker_results = None
|
| 578 |
+
st.session_state.confidence_results_df = None
|
| 579 |
+
st.session_state.open_trades_df = None
|
| 580 |
+
st.session_state.best_params = None
|
| 581 |
+
st.session_state.advisor_df = None
|
| 582 |
+
st.session_state.run_analysis_button = False
|
| 583 |
+
st.session_state.run_advanced_advisor = False
|
| 584 |
+
|
| 585 |
+
st.title("π Stock Backtesting Sandbox")
|
| 586 |
+
st.success(f"Good morning! Today is {date.today().strftime('%A, %d %B %Y')}.")
|
| 587 |
+
main_content_placeholder = st.empty()
|
| 588 |
+
|
| 589 |
+
if 'master_df' not in st.session_state:
|
| 590 |
+
with main_content_placeholder.container():
|
| 591 |
+
master_df, load_message = load_all_data('csv_data')
|
| 592 |
+
if master_df is None:
|
| 593 |
+
st.error(load_message); st.stop()
|
| 594 |
+
else:
|
| 595 |
+
st.info(load_message)
|
| 596 |
+
master_df, outlier_report = clean_data_and_report_outliers(master_df)
|
| 597 |
+
if outlier_report:
|
| 598 |
+
report_df = pd.DataFrame(outlier_report)
|
| 599 |
+
st.info(f"Data Cleaning: Found and removed price spikes >100% in {len(outlier_report)} tickers.")
|
| 600 |
+
st.download_button("β¬οΈ Download Outlier Report", report_df.to_csv(index=False).encode('utf-8'), "outlier_report.csv", "text/csv")
|
| 601 |
+
st.session_state.master_df = master_df
|
| 602 |
+
st.session_state.ticker_list = sorted([col for col in master_df.columns if '_volume' not in str(col).lower()])
|
| 603 |
+
|
| 604 |
+
master_df = st.session_state.master_df
|
| 605 |
+
ticker_list = st.session_state.ticker_list
|
| 606 |
+
defaults = st.session_state.widget_defaults
|
| 607 |
+
|
| 608 |
+
st.sidebar.header("1. Select Test Mode")
|
| 609 |
+
st.sidebar.radio("Mode:", ("Analyse Single Ticker", "Analyse Full List"), key='run_mode', index=1)
|
| 610 |
+
if st.session_state.get('run_mode') == "Analyse Single Ticker":
|
| 611 |
+
st.sidebar.selectbox("Select a Ticker:", ticker_list, key='ticker_select')
|
| 612 |
+
st.sidebar.date_input("Start Date", master_df.index.min().date(), key='start_date')
|
| 613 |
+
st.sidebar.date_input("End Date", master_df.index.max().date(), key='end_date')
|
| 614 |
+
st.markdown("""<style>div[data-testid="stSidebar"] button[kind="primary"] { background-color: #4CAF50; color: white; border-color: #4CAF50;}</style>""", unsafe_allow_html=True)
|
| 615 |
+
|
| 616 |
+
if st.sidebar.button("π Run Analysis", type="primary"):
|
| 617 |
+
st.session_state.run_analysis_button = True
|
| 618 |
+
st.rerun()
|
| 619 |
+
|
| 620 |
+
st.sidebar.markdown("---")
|
| 621 |
+
|
| 622 |
+
st.sidebar.header("2. Confidence Score Factors (for Main Signal)")
|
| 623 |
+
st.sidebar.toggle("Use Momentum (RSI)", value=True, key='use_rsi')
|
| 624 |
+
st.sidebar.number_input("RSI Weight", 0.1, 5.0, 1.0, 0.1, key='rsi_w', disabled=not st.session_state.get('use_rsi', True))
|
| 625 |
+
st.sidebar.toggle("Use Volatility", value=True, key='use_vol')
|
| 626 |
+
st.sidebar.number_input("Volatility Weight", 0.1, 5.0, 1.0, 0.1, key='vol_w', disabled=not st.session_state.get('use_vol', True))
|
| 627 |
+
st.sidebar.toggle("Use Trend (200d MA)", value=True, key='use_trend')
|
| 628 |
+
st.sidebar.number_input("Trend Weight", 0.1, 5.0, 1.0, 0.1, key='trend_w', disabled=not st.session_state.get('use_trend', True))
|
| 629 |
+
st.sidebar.toggle("Use Volume Spike", value=True, key='use_volume')
|
| 630 |
+
st.sidebar.number_input("Volume Weight", 0.1, 5.0, 1.0, 0.1, key='volume_w', disabled=not st.session_state.get('use_volume', True))
|
| 631 |
+
st.sidebar.slider("Minimum Confidence Threshold (%)", 0, 100, defaults.get("confidence_threshold", 50), 5, key='confidence_slider')
|
| 632 |
+
|
| 633 |
+
st.sidebar.markdown("---")
|
| 634 |
+
st.sidebar.header("3. Strategy Parameters")
|
| 635 |
+
st.sidebar.number_input("Large MA Period", 10, 200, defaults.get("large_ma_period", 50), 1, key='ma_period')
|
| 636 |
+
st.sidebar.number_input("Bollinger Band Period", 10, 100, defaults.get("bband_period", 20), 1, key='bb_period')
|
| 637 |
+
st.sidebar.number_input("Bollinger Band Std Dev", 1.0, 4.0, defaults.get("bband_std_dev", 2.0), 0.1, key='bb_std')
|
| 638 |
+
st.sidebar.subheader("Long Trade Logic"); st.sidebar.slider("Entry Threshold (%)", 0.0, 10.0, defaults.get("long_entry_threshold_pct", 0.0) * 100, 0.1, key='long_entry'); st.sidebar.slider("Exit MA Threshold (%)", 0.0, 10.0, defaults.get("long_exit_ma_threshold_pct", 0.0) * 100, 0.1, key='long_exit'); st.sidebar.slider("Stop Loss (%)", 0.0, 30.0, defaults.get("long_stop_loss_pct", 0.0) * 100, 0.5, key='long_sl'); st.sidebar.number_input("Delay Entry (days)", 0, 10, defaults.get("long_delay_days", 0), 1, key='long_delay')
|
| 639 |
+
st.sidebar.subheader("Short Trade Logic"); st.sidebar.slider("Entry Threshold (%)", 0.0, 10.0, defaults.get("short_entry_threshold_pct", 0.0) * 100, 0.1, key='short_entry'); st.sidebar.slider("Exit MA Threshold (%)", 0.0, 10.0, defaults.get("short_exit_ma_threshold_pct", 0.0) * 100, 0.1, key='short_exit'); st.sidebar.slider("Stop Loss (%)", 0.0, 30.0, defaults.get("short_stop_loss_pct", 0.0) * 100, 0.5, key='short_sl'); st.sidebar.number_input("Delay Entry (days)", 0, 10, defaults.get("short_delay_days", 0), 1, key='short_delay')
|
| 640 |
+
|
| 641 |
+
st.sidebar.markdown("---")
|
| 642 |
+
st.sidebar.header("4. Find Best Parameters")
|
| 643 |
+
with st.sidebar.expander("Set Optimisation Ranges", expanded=False):
|
| 644 |
+
# Omitted for brevity
|
| 645 |
+
pass
|
| 646 |
+
|
| 647 |
+
st.sidebar.markdown("---")
|
| 648 |
+
st.sidebar.header("5. Find Best/Worst Confidence Setup")
|
| 649 |
+
with st.sidebar.expander("Optimise Confidence Factors", expanded=False):
|
| 650 |
+
# Omitted for brevity
|
| 651 |
+
pass
|
| 652 |
+
|
| 653 |
+
st.sidebar.markdown("---")
|
| 654 |
+
st.sidebar.header("6. Advanced Advisor")
|
| 655 |
+
st.sidebar.info("Uses saved top setups from Section 5. Re-run an optimisation to update them.")
|
| 656 |
+
if st.sidebar.button("π Generate Advisor Report"):
|
| 657 |
+
st.session_state.run_advanced_advisor = True
|
| 658 |
+
st.rerun()
|
| 659 |
+
|
| 660 |
+
st.sidebar.markdown("---")
|
| 661 |
+
# --- FIX: Correct indentation for this block ---
|
| 662 |
+
if st.session_state.get('veto_setup'):
|
| 663 |
+
st.sidebar.header("Veto Filter")
|
| 664 |
+
st.sidebar.success("Veto filter is ACTIVE.")
|
| 665 |
+
st.sidebar.json(st.session_state.veto_setup)
|
| 666 |
+
if st.sidebar.button("πΎ Save Veto as Default"):
|
| 667 |
+
save_veto_setup(st.session_state.veto_setup)
|
| 668 |
+
if st.sidebar.button("Clear Veto Filter"):
|
| 669 |
+
st.session_state.veto_setup = None
|
| 670 |
+
st.rerun()
|
| 671 |
+
st.sidebar.markdown("---")
|
| 672 |
+
|
| 673 |
+
if st.sidebar.button("πΎ Save Settings as Default"):
|
| 674 |
+
save_settings({ "large_ma_period": st.session_state.ma_period, "bband_period": st.session_state.bb_period, "bband_std_dev": st.session_state.bb_std, "confidence_threshold": st.session_state.confidence_slider, "long_entry_threshold_pct": st.session_state.long_entry / 100, "long_exit_ma_threshold_pct": st.session_state.long_exit / 100, "long_stop_loss_pct": st.session_state.long_sl / 100, "long_delay_days": st.session_state.long_delay, "short_entry_threshold_pct": st.session_state.short_entry / 100, "short_exit_ma_threshold_pct": st.session_state.short_exit / 100, "short_stop_loss_pct": st.session_state.short_sl / 100, "short_delay_days": st.session_state.short_delay, })
|
| 675 |
+
|
| 676 |
+
# --- Trigger actions based on session state flags ---
|
| 677 |
+
if st.session_state.get('run_analysis_button'):
|
| 678 |
+
st.session_state.confidence_results_df = None
|
| 679 |
+
st.session_state.best_params = None
|
| 680 |
+
st.session_state.advisor_df = None
|
| 681 |
+
|
| 682 |
+
with main_content_placeholder.container():
|
| 683 |
+
veto_to_use = st.session_state.get('veto_setup')
|
| 684 |
+
if veto_to_use: st.info("Veto filter is active for this analysis.")
|
| 685 |
+
else: st.info("π‘ Tip: You can find and apply a 'Veto Filter' from section 5 in the sidebar.")
|
| 686 |
+
|
| 687 |
+
manual_params = {"large_ma_period": st.session_state.ma_period, "bband_period": st.session_state.bb_period, "bband_std_dev": st.session_state.bb_std, "confidence_threshold": st.session_state.confidence_slider, "long_entry_threshold_pct": st.session_state.long_entry / 100, "long_exit_ma_threshold_pct": st.session_state.long_exit / 100, "long_stop_loss_pct": st.session_state.long_sl / 100, "long_delay_days": st.session_state.long_delay, "short_entry_threshold_pct": st.session_state.short_entry / 100, "short_exit_ma_threshold_pct": st.session_state.short_exit / 100, "short_stop_loss_pct": st.session_state.short_sl / 100, "short_delay_days": st.session_state.short_delay, }
|
| 688 |
+
|
| 689 |
+
# --- FIX: Correct indentation for this block ---
|
| 690 |
+
if st.session_state.run_mode == "Analyse Single Ticker":
|
| 691 |
+
selected_ticker = st.session_state.get('ticker_select', ticker_list[0])
|
| 692 |
+
cols_to_use = [selected_ticker]
|
| 693 |
+
if f'{selected_ticker}_Volume' in master_df.columns: cols_to_use.append(f'{selected_ticker}_Volume')
|
| 694 |
+
data_for_backtest = master_df[cols_to_use].rename(columns={selected_ticker: 'Close', f'{selected_ticker}_Volume': 'Volume'})
|
| 695 |
+
ticker_data_series = data_for_backtest.loc[pd.Timestamp(st.session_state.start_date):pd.Timestamp(st.session_state.end_date)]
|
| 696 |
+
|
| 697 |
+
if not ticker_data_series.empty:
|
| 698 |
+
long_pnl, short_pnl, avg_long_trade, avg_short_trade, results_df, trades, open_trades = run_backtest(ticker_data_series, manual_params, st.session_state.use_rsi, st.session_state.use_vol, st.session_state.use_trend, st.session_state.use_volume, st.session_state.rsi_w, st.session_state.vol_w, st.session_state.trend_w, st.session_state.volume_w, veto_setup=veto_to_use)
|
| 699 |
+
st.session_state.single_ticker_results = {"long_pnl": long_pnl, "short_pnl": short_pnl, "avg_long_trade": avg_long_trade, "avg_short_trade": avg_short_trade, "results_df": results_df, "trades": trades}
|
| 700 |
+
if open_trades: st.session_state.open_trades_df = pd.DataFrame(open_trades)
|
| 701 |
+
else: st.session_state.open_trades_df = pd.DataFrame()
|
| 702 |
+
else: st.warning("No data for this ticker in the selected date range.")
|
| 703 |
+
|
| 704 |
+
elif st.session_state.run_mode == "Analyse Full List":
|
| 705 |
+
summary_results, all_open_trades = [], []
|
| 706 |
+
progress_bar = st.progress(0, text="Starting analysis...")
|
| 707 |
+
for i, ticker_symbol in enumerate(ticker_list):
|
| 708 |
+
progress_bar.progress((i + 1) / len(ticker_list), text=f"Analysing {ticker_symbol}...")
|
| 709 |
+
cols_to_use = [ticker_symbol]
|
| 710 |
+
if f'{ticker_symbol}_Volume' in master_df.columns: cols_to_use.append(f'{ticker_symbol}_Volume')
|
| 711 |
+
data_for_backtest = master_df[cols_to_use].rename(columns={ticker_symbol: 'Close', f'{ticker_symbol}_Volume': 'Volume'})
|
| 712 |
+
ticker_data_series = data_for_backtest.loc[pd.Timestamp(st.session_state.start_date):pd.Timestamp(st.session_state.end_date)]
|
| 713 |
+
if not ticker_data_series.empty:
|
| 714 |
+
long_pnl, short_pnl, avg_long_trade, avg_short_trade, _, trades, open_trades = run_backtest(ticker_data_series, manual_params, st.session_state.use_rsi, st.session_state.use_vol, st.session_state.use_trend, st.session_state.use_volume, st.session_state.rsi_w, st.session_state.vol_w, st.session_state.trend_w, st.session_state.volume_w, veto_setup=veto_to_use)
|
| 715 |
+
long_conf = np.mean([t['confidence'] for t in trades[0]]) if trades[0] else 0
|
| 716 |
+
short_conf = np.mean([t['confidence'] for t in trades[2]]) if trades[2] else 0
|
| 717 |
+
summary_results.append({"Ticker": ticker_symbol, "Cumulative Long P&L": long_pnl, "Avg Long Profit per Trade": avg_long_trade, "Num Long Trades": len(trades[0]), "Avg Long Confidence": long_conf, "Cumulative Short P&L": short_pnl, "Avg Short Profit per Trade": avg_short_trade, "Num Short Trades": len(trades[2]), "Avg Short Confidence": short_conf})
|
| 718 |
+
if open_trades:
|
| 719 |
+
for trade in open_trades:
|
| 720 |
+
trade['Ticker'] = ticker_symbol
|
| 721 |
+
all_open_trades.append(trade)
|
| 722 |
+
progress_bar.empty()
|
| 723 |
+
if summary_results: st.session_state.summary_df = pd.DataFrame(summary_results).set_index('Ticker')
|
| 724 |
+
else: st.warning("No trades found for any ticker with the current settings.")
|
| 725 |
+
if all_open_trades: st.session_state.open_trades_df = pd.DataFrame(all_open_trades)
|
| 726 |
+
else: st.session_state.open_trades_df = pd.DataFrame()
|
| 727 |
+
|
| 728 |
+
st.session_state.run_analysis_button = False
|
| 729 |
+
|
| 730 |
+
if st.session_state.get('run_advanced_advisor'):
|
| 731 |
+
generate_advisor_report(master_df, main_content_placeholder)
|
| 732 |
+
st.session_state.run_advanced_advisor = False
|
| 733 |
+
|
| 734 |
+
# --- Main Display Area ---
|
| 735 |
+
with main_content_placeholder.container():
|
| 736 |
+
if st.session_state.get('advisor_df') is not None:
|
| 737 |
+
st.subheader("π¨βπΌ Advanced Advisor: Open Positions from Top Setups")
|
| 738 |
+
if not st.session_state.advisor_df.empty:
|
| 739 |
+
st.dataframe(st.session_state.advisor_df.style.format({
|
| 740 |
+
"Current % P/L": "{:.2%}", "Date Open": "{:%Y-%m-%d}",
|
| 741 |
+
"Start Confidence": "{:.0f}%", "Setup G/B Ratio": "{:.2f}",
|
| 742 |
+
"Setup Avg Profit": "{:.2%}"
|
| 743 |
+
}))
|
| 744 |
+
else:
|
| 745 |
+
st.info("No open positions found matching the criteria.")
|
| 746 |
+
|
| 747 |
+
elif st.session_state.get('confidence_results_df') is not None and not st.session_state.confidence_results_df.empty:
|
| 748 |
+
st.subheader("π Confidence Setup Optimisation Results")
|
| 749 |
+
display_df = st.session_state.confidence_results_df.head(60)
|
| 750 |
+
st.dataframe(display_df.style.format({
|
| 751 |
+
"Avg Profit/Trade": "{:.2%}", "Good/Bad Ratio": "{:.2f}",
|
| 752 |
+
"Avg Entry Conf.": "{:.1f}%", "Good Score": "{:.4f}",
|
| 753 |
+
"Bad Score": "{:.4f}", "Norm. Score %": "{:.2f}%"
|
| 754 |
+
}))
|
| 755 |
+
|
| 756 |
+
elif st.session_state.get('single_ticker_results') is not None:
|
| 757 |
+
res = st.session_state.single_ticker_results
|
| 758 |
+
st.subheader(f"Results for {st.session_state.get('ticker_select')}")
|
| 759 |
+
c1, c2, c3, c4 = st.columns(4); c1.metric("Cumulative Long P&L", f"{res['long_pnl']:.2%}"); c2.metric("Avg Long Trade P&L", f"{res['avg_long_trade']:.2%}"); c3.metric("Cumulative Short P&L", f"{res['short_pnl']:.2%}"); c4.metric("Avg Short Trade P&L", f"{res['avg_short_trade']:.2%}")
|
| 760 |
+
if res['results_df'] is not None:
|
| 761 |
+
st.plotly_chart(generate_long_plot(res['results_df'], res['trades'], st.session_state.get('ticker_select')), use_container_width=True)
|
| 762 |
+
st.plotly_chart(generate_short_plot(res['results_df'], res['trades'], st.session_state.get('ticker_select')), use_container_width=True)
|
| 763 |
+
|
| 764 |
+
elif st.session_state.get('summary_df') is not None and not st.session_state.summary_df.empty:
|
| 765 |
+
display_summary_analytics(st.session_state.summary_df)
|
| 766 |
+
st.subheader("Results per Ticker")
|
| 767 |
+
if st.checkbox("Only show tickers with trades", value=True):
|
| 768 |
+
display_df = st.session_state.summary_df[(st.session_state.summary_df['Num Long Trades'] > 0) | (st.session_state.summary_df['Num Short Trades'] > 0)]
|
| 769 |
+
else:
|
| 770 |
+
display_df = st.session_state.summary_df
|
| 771 |
+
st.dataframe(display_df.style.format({"Cumulative Long P&L": "{:.2%}", "Avg Long Profit per Trade": "{:.2%}", "Cumulative Short P&L": "{:.2%}", "Avg Short Profit per Trade": "{:.2%}", "Avg Long Confidence": "{:.0f}%", "Avg Short Confidence": "{:.0f}%"}))
|
| 772 |
+
|
| 773 |
+
if st.session_state.get('open_trades_df') is not None and not st.session_state.open_trades_df.empty:
|
| 774 |
+
st.subheader("π¨βπΌ Advisor: Currently Open Positions (Manual Run)")
|
| 775 |
+
display_open_df = st.session_state.open_trades_df.copy()
|
| 776 |
+
st.dataframe(display_open_df.style.format({"Date Open": "{:%Y-%m-%d}", "Start Confidence": "{:.0f}%", "Current % P/L": "{:.2%}"}))
|
| 777 |
+
|
| 778 |
+
st.markdown("---")
|
| 779 |
+
st.info("Want to see open trades from a wider range of top strategies?")
|
| 780 |
+
if st.button("Run Advanced Advisor Report"):
|
| 781 |
+
st.session_state.run_advanced_advisor = True
|
| 782 |
+
st.rerun()
|
| 783 |
+
|
| 784 |
+
def apply_best_params_to_widgets():
|
| 785 |
+
bp = st.session_state.get('best_params');
|
| 786 |
+
if not bp: return
|
| 787 |
+
st.session_state.ma_period, st.session_state.bb_period, st.session_state.bb_std = bp.get("large_ma_period"), bp.get("bband_period"), bp.get("bband_std_dev")
|
| 788 |
+
st.session_state.long_sl, st.session_state.short_sl = bp.get("long_stop_loss_pct") * 100, bp.get("short_stop_loss_pct") * 100
|
| 789 |
+
st.session_state.long_delay, st.session_state.short_delay = bp.get("long_delay_days"), bp.get("short_delay_days")
|
| 790 |
+
st.session_state.long_entry, st.session_state.short_entry = bp.get("long_entry_threshold_pct") * 100, bp.get("short_entry_threshold_pct") * 100
|
| 791 |
+
st.session_state.long_exit, st.session_state.short_exit = bp.get("long_exit_ma_threshold_pct") * 100, bp.get("short_exit_ma_threshold_pct") * 100
|
| 792 |
+
st.session_state.confidence_slider = bp.get("confidence_threshold")
|
| 793 |
+
st.session_state.best_params = None
|
| 794 |
+
|
| 795 |
+
if st.session_state.get('best_params'):
|
| 796 |
+
st.button("β¬οΈ Load Optimal Parameters into Manual Settings", on_click=apply_best_params_to_widgets)
|
| 797 |
+
|
| 798 |
+
if st.session_state.get('worst_confidence_setup'):
|
| 799 |
+
if st.button("Apply Worst Setup as Veto Filter"):
|
| 800 |
+
st.session_state.veto_setup = st.session_state.worst_confidence_setup
|
| 801 |
+
st.session_state.worst_confidence_setup = None
|
| 802 |
+
st.rerun()
|
| 803 |
+
|
| 804 |
+
if __name__ == "__main__":
|
| 805 |
+
main()
|
csv_data/2015-2017.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
csv_data/2017-2019.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
csv_data/2019-2020.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
csv_data/2020-2022.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
csv_data/2022-2024.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
csv_data/2024-NOW.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
requirements.txt
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# requirements.txt
|
| 2 |
+
streamlit==1.33.0
|
| 3 |
+
pandas==2.2.2
|
| 4 |
+
plotly==5.22.0
|
| 5 |
+
numpy==1.26.4
|
| 6 |
+
pandas_ta
|