nguyenquocanh commited on
Commit
4378303
·
verified ·
1 Parent(s): 2f19479

Upload 3 files

Browse files
Files changed (3) hide show
  1. src/fullimage.py +367 -0
  2. src/irregular.py +361 -0
  3. src/last_candle.py +329 -0
src/fullimage.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import pandas as pd
3
+ import mplfinance as mpf
4
+ import matplotlib.pyplot as plt
5
+ from datetime import datetime, timedelta
6
+ import os
7
+ import numpy as np
8
+ from PIL import Image
9
+ import tensorflow as tf
10
+ from tensorflow.keras import layers, models
11
+ from sklearn.metrics import accuracy_score, f1_score, recall_score, roc_auc_score, precision_recall_curve, auc
12
+ from sklearn.utils.class_weight import compute_class_weight
13
+ import argparse
14
+ import gc
15
+ import time
16
+ import shutil
17
+
18
+ # Use non-interactive backend for matplotlib
19
+ plt.switch_backend('Agg')
20
+
21
+ # Coin configurations
22
+ COINS = {
23
+ "BTCUSDT": {"train_month": (2024, 6), "test_months": [(2024, 12), (2024, 3), (2024, 8), (2024, 4), (2024, 1)]},
24
+ "ETHUSDT": {"train_month": (2024, 6), "test_months": [(2024, 8), (2024, 4), (2024, 5), (2024, 3), (2024, 2)]},
25
+ "BNBUSDT": {"train_month": (2024, 10), "test_months": [(2024, 3), (2024, 12), (2024, 8), (2024, 1), (2024, 4)]},
26
+ "XRPUSDT": {"train_month": (2024, 9), "test_months": [(2024, 11), (2024, 12), (2024, 4), (2024, 8), (2024, 1)]},
27
+ "ADAUSDT": {"train_month": (2024, 9), "test_months": [(2024, 4), (2024, 12), (2024, 1), (2024, 3), (2024, 11)]},
28
+ "DOGEUSDT": {"train_month": (2024, 9), "test_months": [(2024, 3), (2024, 4), (2024, 11), (2024, 8), (2024, 12)]}
29
+ }
30
+
31
+ TIME_LENGTHS = [7, 14, 21, 28] # 1, 2, 3, 4 weeks in days
32
+ WINDOW_SIZES = [5, 15, 30] # Candles per image
33
+
34
+ # Set BASE_DIR for new output
35
+ BASE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "crypto_research_minute_fullimage")
36
+ # Old directory for reusing data and images
37
+ OLD_BASE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "crypto_research_minute")
38
+
39
+ # Binance API data fetcher (fixed to 1m interval) - Skipped since we reuse raw data
40
+ def fetch_coin_data(symbol, start_time, end_time):
41
+ url = "https://api.binance.com/api/v3/klines"
42
+ all_data = []
43
+ current_start = int(start_time.timestamp() * 1000)
44
+ end_ms = int(end_time.timestamp() * 1000)
45
+
46
+ while current_start < end_ms:
47
+ params = {"symbol": symbol, "interval": "1m", "startTime": current_start, "endTime": end_ms, "limit": 1000}
48
+ response = requests.get(url, params=params)
49
+ data = response.json()
50
+ if not data:
51
+ break
52
+ all_data.extend(data)
53
+ current_start = int(data[-1][0]) + 60000 # 1 minute in milliseconds
54
+
55
+ df = pd.DataFrame(all_data, columns=["timestamp", "open", "high", "low", "close", "volume", "close_time", "quote_asset_volume", "trades", "taker_buy_base", "taker_buy_quote", "ignore"])
56
+ df["timestamp"] = pd.to_datetime(df["timestamp"], unit="ms")
57
+ df[["open", "high", "low", "close"]] = df[["open", "high", "low", "close"]].astype(float)
58
+ return df[["timestamp", "open", "high", "low", "close"]]
59
+
60
+ # Generate candlestick images and labels with variable window size - Modified to reuse images
61
+ def generate_images(df, window_size, output_dir, period_name, month_str):
62
+ os.makedirs(output_dir, exist_ok=True)
63
+ labels_file = os.path.join(output_dir, f"labels_{month_str}_1m_{period_name}_w{window_size}.csv")
64
+ old_images_dir = os.path.join(OLD_BASE_DIR, df.name, "images", f"{month_str}_1m_{period_name}_w{window_size}")
65
+
66
+ # Check if images already exist in the old directory
67
+ if os.path.exists(old_images_dir):
68
+ print(f"Images already exist at {old_images_dir}, copying to {output_dir}")
69
+ # Copy images to new directory
70
+ if os.path.exists(output_dir) and os.path.samefile(old_images_dir, output_dir):
71
+ print(f"Output directory {output_dir} is the same as source, skipping copy")
72
+ else:
73
+ shutil.copytree(old_images_dir, output_dir, dirs_exist_ok=True)
74
+
75
+ # Regenerate labels using the new logic
76
+ labels = []
77
+ for i in range(window_size - 1, len(df)):
78
+ window_df = df.iloc[i - (window_size - 1):i + 1]
79
+ first_candle = window_df.iloc[0]
80
+ last_candle = window_df.iloc[-1]
81
+ label = "UP" if last_candle["close"] > first_candle["open"] else "DOWN"
82
+ labels.append(label)
83
+
84
+ labels_df = pd.DataFrame({"image_path": [f"candle_{i}.png" for i in range(window_size - 1, len(df))], "label": labels})
85
+ labels_df.to_csv(labels_file, index=False)
86
+ print(f"Regenerated {len(labels_df)} labels to {labels_file}")
87
+ return labels_file
88
+ else:
89
+ # Fallback: Generate images if they don't exist (shouldn't happen in this case)
90
+ print(f"Images not found at {old_images_dir}, generating new images")
91
+ labels = []
92
+ start_time = time.time()
93
+ for i in range(window_size - 1, len(df)):
94
+ window_df = df.iloc[i - (window_size - 1):i + 1]
95
+ first_candle = window_df.iloc[0]
96
+ last_candle = window_df.iloc[-1]
97
+ label = "UP" if last_candle["close"] > first_candle["open"] else "DOWN"
98
+ labels.append(label)
99
+
100
+ plt.figure(figsize=(2, 2))
101
+ mpf.plot(window_df, type="candle", style="binance", axisoff=True, title="", ylabel="", xlabel="", volume=False)
102
+ plt.tight_layout(pad=0)
103
+ image_path = os.path.join(output_dir, f"candle_{i}.png")
104
+ plt.savefig(image_path, bbox_inches="tight", pad_inches=0, dpi=32)
105
+ plt.close('all')
106
+
107
+ if i % 1000 == 0:
108
+ elapsed = time.time() - start_time
109
+ images_generated = i - (window_size - 1) + 1
110
+ speed = images_generated / elapsed if elapsed > 0 else 0
111
+ print(f"Generated image {i}/{len(df)} for {month_str} 1m {period_name} w{window_size} ({speed:.2f} images/sec)")
112
+
113
+ labels_df = pd.DataFrame({"image_path": [f"candle_{i}.png" for i in range(window_size - 1, len(df))], "label": labels})
114
+ labels_df.to_csv(labels_file, index=False)
115
+ print(f"Saved {len(labels_df)} labels to {labels_file}")
116
+ return labels_file
117
+
118
+ # Load and preprocess images
119
+ def load_images(labels_file, images_dir):
120
+ labels_df = pd.read_csv(labels_file)
121
+ X = np.array([np.array(Image.open(os.path.join(images_dir, row["image_path"])).convert("RGB").resize((64, 64))) / 255.0 for _, row in labels_df.iterrows()])
122
+ y = np.array([1 if label == "UP" else 0 for label in labels_df["label"]])
123
+ return X, y
124
+
125
+ # Train CNN model
126
+ def train_model(X, y, period_name, month_str, window_size, coin_dir):
127
+ model_path = os.path.join(coin_dir, "models", f"model_{month_str}_1m_{period_name}_w{window_size}.h5")
128
+ if os.path.exists(model_path):
129
+ print(f"Model already exists at {model_path}, loading instead of training")
130
+ return tf.keras.models.load_model(model_path), None
131
+
132
+ model = models.Sequential([
133
+ layers.Conv2D(32, (3, 3), activation="relu", input_shape=(64, 64, 3)),
134
+ layers.MaxPooling2D((2, 2)),
135
+ layers.Dropout(0.25),
136
+ layers.Conv2D(64, (3, 3), activation="relu"),
137
+ layers.MaxPooling2D((2, 2)),
138
+ layers.Dropout(0.25),
139
+ layers.Conv2D(128, (3, 3), activation="relu"),
140
+ layers.Flatten(),
141
+ layers.Dense(128, activation="relu"),
142
+ layers.Dropout(0.5),
143
+ layers.Dense(1, activation="sigmoid")
144
+ ])
145
+
146
+ model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
147
+ class_weights = compute_class_weight('balanced', classes=np.unique(y), y=y)
148
+ history = model.fit(X, y, epochs=10, batch_size=32, class_weight=dict(enumerate(class_weights)))
149
+
150
+ model.save(model_path)
151
+ print(f"Model saved to {model_path}")
152
+ return model, history
153
+
154
+ # Evaluate and save results
155
+ def evaluate_and_save(model, X, y, period_name, month_str, window_size, coin_dir, dataset_type="train", exp_suffix=""):
156
+ results_file = os.path.join(coin_dir, "results", f"results_{dataset_type}_{month_str}_1m_{period_name}_w{window_size}{exp_suffix}.txt")
157
+ if os.path.exists(results_file) and exp_suffix != "_exp2":
158
+ print(f"Results already exist at {results_file}, skipping evaluation")
159
+ return None
160
+
161
+ y_pred_prob = model.predict(X, verbose=0)
162
+ y_pred = (y_pred_prob > 0.5).astype(int).flatten()
163
+
164
+ metrics = {
165
+ "accuracy": accuracy_score(y, y_pred),
166
+ "f1": f1_score(y, y_pred),
167
+ "recall": recall_score(y, y_pred),
168
+ "auroc": roc_auc_score(y, y_pred_prob),
169
+ "auprc": auc(*precision_recall_curve(y, y_pred_prob)[1::-1])
170
+ }
171
+
172
+ with open(results_file, "w") as f:
173
+ f.write(f"{dataset_type.capitalize()} Metrics for {month_str} 1m {period_name} w{window_size} {exp_suffix}:\n")
174
+ for k, v in metrics.items():
175
+ f.write(f"{k.capitalize()}: {v:.4f}\n")
176
+ print(f"Results saved to {results_file}")
177
+ return metrics
178
+
179
+ # Check if all experiments for a window size are complete
180
+ def is_window_size_complete(symbol, train_month, test_months, window_size):
181
+ coin_dir = os.path.join(BASE_DIR, symbol)
182
+ train_year, train_month_num = train_month
183
+ train_month_str = f"{train_year}-{train_month_num:02d}"
184
+
185
+ # Check Experiment I
186
+ for days in TIME_LENGTHS:
187
+ period_name = f"{days}days"
188
+ train_result = os.path.join(coin_dir, "results", f"results_train_{train_month_str}_1m_{period_name}_w{window_size}.txt")
189
+ if not os.path.exists(train_result):
190
+ return False
191
+ for test_year, test_month_num in test_months:
192
+ test_month_str = f"{test_year}-{test_month_num:02d}"
193
+ test_result = os.path.join(coin_dir, "results", f"results_test_{test_month_str}_1m_{period_name}_w{window_size}.txt")
194
+ if not os.path.exists(test_result):
195
+ return False
196
+
197
+ # Check Experiment II
198
+ period_name = "1week"
199
+ train_result = os.path.join(coin_dir, "results", f"results_train_{train_month_str}_1m_{period_name}_w{window_size}_exp2.txt")
200
+ if not os.path.exists(train_result):
201
+ return False
202
+ for test_year, test_month_num in test_months:
203
+ test_month_str = f"{test_year}-{test_month_num:02d}"
204
+ for days in [14, 21, 28]:
205
+ period_name = f"{days}days"
206
+ test_result = os.path.join(coin_dir, "results", f"results_test_{test_month_str}_1m_{period_name}_w{window_size}_exp2.txt")
207
+ if not os.path.exists(test_result):
208
+ return False
209
+
210
+ return True
211
+
212
+ # Main experiment runner for a single coin and window size
213
+ def run_experiments_for_coin(symbol, train_month, test_months, window_size):
214
+ if is_window_size_complete(symbol, train_month, test_months, window_size):
215
+ print(f"All experiments for {symbol} with window size {window_size} are complete, skipping")
216
+ return
217
+
218
+ coin_dir = os.path.join(BASE_DIR, symbol)
219
+ RAW_DATA_DIR = os.path.join(coin_dir, "raw_data")
220
+ IMAGES_DIR = os.path.join(coin_dir, "images")
221
+ MODELS_DIR = os.path.join(coin_dir, "models")
222
+ RESULTS_DIR = os.path.join(coin_dir, "results")
223
+ OLD_RAW_DATA_DIR = os.path.join(OLD_BASE_DIR, symbol, "raw_data")
224
+
225
+ os.makedirs(RAW_DATA_DIR, exist_ok=True)
226
+ os.makedirs(IMAGES_DIR, exist_ok=True)
227
+ os.makedirs(MODELS_DIR, exist_ok=True)
228
+ os.makedirs(RESULTS_DIR, exist_ok=True)
229
+
230
+ train_year, train_month_num = train_month
231
+
232
+ # Experiment I: Train and test on matching timelengths
233
+ for days in TIME_LENGTHS:
234
+ period_name = f"{days}days"
235
+ train_start = datetime(train_year, train_month_num, 1)
236
+ train_end = train_start + timedelta(days=days - 1, hours=23, minutes=59)
237
+ train_month_str = f"{train_year}-{train_month_num:02d}"
238
+
239
+ raw_file = os.path.join(RAW_DATA_DIR, f"raw_{train_month_str}_1m_{period_name}.csv")
240
+ old_raw_file = os.path.join(OLD_RAW_DATA_DIR, f"raw_{train_month_str}_1m_{period_name}.csv")
241
+ if os.path.exists(old_raw_file):
242
+ print(f"Raw data exists at {old_raw_file}, copying to {raw_file}")
243
+ shutil.copy(old_raw_file, raw_file)
244
+ df = pd.read_csv(raw_file, index_col="timestamp", parse_dates=["timestamp"])
245
+ df.index = pd.to_datetime(df.index)
246
+ else:
247
+ print(f"Raw data not found at {old_raw_file}, fetching new data")
248
+ df = fetch_coin_data(symbol, train_start, train_end)
249
+ df.set_index("timestamp", inplace=True)
250
+ df.to_csv(raw_file)
251
+ print(f"Raw data saved to {raw_file}")
252
+
253
+ # Attach symbol to df for use in generate_images
254
+ df.name = symbol
255
+ images_subdir = os.path.join(IMAGES_DIR, f"{train_month_str}_1m_{period_name}_w{window_size}")
256
+ labels_file = generate_images(df, window_size, images_subdir, period_name, train_month_str)
257
+ X, y = load_images(labels_file, images_subdir)
258
+ model, history = train_model(X, y, period_name, train_month_str, window_size, coin_dir)
259
+ evaluate_and_save(model, X, y, period_name, train_month_str, window_size, coin_dir, "train")
260
+
261
+ tf.keras.backend.clear_session()
262
+ gc.collect()
263
+
264
+ for test_year, test_month_num in test_months:
265
+ test_start = datetime(test_year, test_month_num, 1)
266
+ test_end = test_start + timedelta(days=days - 1, hours=23, minutes=59)
267
+ test_month_str = f"{test_year}-{test_month_num:02d}"
268
+
269
+ raw_file = os.path.join(RAW_DATA_DIR, f"raw_{test_month_str}_1m_{period_name}.csv")
270
+ old_raw_file = os.path.join(OLD_RAW_DATA_DIR, f"raw_{test_month_str}_1m_{period_name}.csv")
271
+ if os.path.exists(old_raw_file):
272
+ print(f"Raw data exists at {old_raw_file}, copying to {raw_file}")
273
+ shutil.copy(old_raw_file, raw_file)
274
+ df = pd.read_csv(raw_file, index_col="timestamp", parse_dates=["timestamp"])
275
+ df.index = pd.to_datetime(df.index)
276
+ else:
277
+ print(f"Raw data not found at {old_raw_file}, fetching new data")
278
+ df = fetch_coin_data(symbol, test_start, test_end)
279
+ df.set_index("timestamp", inplace=True)
280
+ df.to_csv(raw_file)
281
+ print(f"Raw data saved to {raw_file}")
282
+
283
+ df.name = symbol
284
+ images_subdir = os.path.join(IMAGES_DIR, f"{test_month_str}_1m_{period_name}_w{window_size}")
285
+ labels_file = generate_images(df, window_size, images_subdir, period_name, test_month_str)
286
+ X, y = load_images(labels_file, images_subdir)
287
+ evaluate_and_save(model, X, y, period_name, test_month_str, window_size, coin_dir, "test")
288
+
289
+ tf.keras.backend.clear_session()
290
+ gc.collect()
291
+
292
+ # Experiment II: Train on 1 week, test on 2-3-4 weeks
293
+ exp2_test_lengths = [14, 21, 28]
294
+ train_start = datetime(train_year, train_month_num, 1)
295
+ train_end = train_start + timedelta(days=6, hours=23, minutes=59)
296
+ train_month_str = f"{train_year}-{train_month_num:02d}"
297
+ period_name = "1week"
298
+
299
+ raw_file = os.path.join(RAW_DATA_DIR, f"raw_{train_month_str}_1m_{period_name}.csv")
300
+ old_raw_file = os.path.join(OLD_RAW_DATA_DIR, f"raw_{train_month_str}_1m_{period_name}.csv")
301
+ if os.path.exists(old_raw_file):
302
+ print(f"Raw data exists at {old_raw_file}, copying to {raw_file}")
303
+ shutil.copy(old_raw_file, raw_file)
304
+ df = pd.read_csv(raw_file, index_col="timestamp", parse_dates=["timestamp"])
305
+ df.index = pd.to_datetime(df.index)
306
+ else:
307
+ print(f"Raw data not found at {old_raw_file}, fetching new data")
308
+ df = fetch_coin_data(symbol, train_start, end_time=train_end)
309
+ df.set_index("timestamp", inplace=True)
310
+ df.to_csv(raw_file)
311
+ print(f"Raw data saved to {raw_file}")
312
+
313
+ df.name = symbol
314
+ images_subdir = os.path.join(IMAGES_DIR, f"{train_month_str}_1m_{period_name}_w{window_size}")
315
+ labels_file = generate_images(df, window_size, images_subdir, period_name, train_month_str)
316
+ X, y = load_images(labels_file, images_subdir)
317
+ model, history = train_model(X, y, period_name, train_month_str, window_size, coin_dir)
318
+ evaluate_and_save(model, X, y, period_name, train_month_str, window_size, coin_dir, "train", "_exp2")
319
+
320
+ tf.keras.backend.clear_session()
321
+ gc.collect()
322
+
323
+ for test_year, test_month_num in test_months:
324
+ test_month_str = f"{test_year}-{test_month_num:02d}"
325
+ for days in exp2_test_lengths:
326
+ period_name = f"{days}days"
327
+ test_start = datetime(test_year, test_month_num, 1)
328
+ test_end = test_start + timedelta(days=days - 1, hours=23, minutes=59)
329
+
330
+ raw_file = os.path.join(RAW_DATA_DIR, f"raw_{test_month_str}_1m_{period_name}.csv")
331
+ old_raw_file = os.path.join(OLD_RAW_DATA_DIR, f"raw_{test_month_str}_1m_{period_name}.csv")
332
+ if os.path.exists(old_raw_file):
333
+ print(f"Raw data exists at {old_raw_file}, copying to {raw_file}")
334
+ shutil.copy(old_raw_file, raw_file)
335
+ df = pd.read_csv(raw_file, index_col="timestamp", parse_dates=["timestamp"])
336
+ df.index = pd.to_datetime(df.index)
337
+ else:
338
+ print(f"Raw data not found at {old_raw_file}, fetching new data")
339
+ df = fetch_coin_data(symbol, test_start, test_end)
340
+ df.set_index("timestamp", inplace=True)
341
+ df.to_csv(raw_file)
342
+ print(f"Raw data saved to {raw_file}")
343
+
344
+ df.name = symbol
345
+ images_subdir = os.path.join(IMAGES_DIR, f"{test_month_str}_1m_{period_name}_w{window_size}")
346
+ labels_file = generate_images(df, window_size, images_subdir, period_name, test_month_str)
347
+ X, y = load_images(labels_file, images_subdir)
348
+ evaluate_and_save(model, X, y, period_name, test_month_str, window_size, coin_dir, "test", "_exp2")
349
+
350
+ tf.keras.backend.clear_session()
351
+ gc.collect()
352
+
353
+ # Run experiments for all coins and window sizes
354
+ def run_all_experiments():
355
+ os.makedirs(BASE_DIR, exist_ok=True)
356
+ for symbol, config in COINS.items():
357
+ for window_size in WINDOW_SIZES:
358
+ print(f"Running experiments for {symbol} with window size {window_size}")
359
+ run_experiments_for_coin(symbol, config["train_month"], config["test_months"], window_size)
360
+ print(f"Completed experiments for {symbol} with window size {window_size}")
361
+ tf.keras.backend.clear_session()
362
+ gc.collect()
363
+
364
+ if __name__ == "__main__":
365
+ parser = argparse.ArgumentParser(description="Crypto Minute-Based Image Classification Research with Full-Window Labeling")
366
+ args = parser.parse_args()
367
+ run_all_experiments()
src/irregular.py ADDED
@@ -0,0 +1,361 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import pandas as pd
3
+ import mplfinance as mpf
4
+ import matplotlib.pyplot as plt
5
+ from datetime import datetime, timedelta
6
+ import os
7
+ import numpy as np
8
+ from PIL import Image
9
+ import tensorflow as tf
10
+ from tensorflow.keras import layers, models
11
+ from sklearn.metrics import accuracy_score, f1_score, recall_score, roc_auc_score, precision_recall_curve, auc
12
+ from sklearn.utils.class_weight import compute_class_weight
13
+ import argparse
14
+ import gc
15
+ import time
16
+
17
+ # Use non-interactive backend for matplotlib
18
+ plt.switch_backend('Agg')
19
+
20
+ # Coin configurations
21
+ COINS = {
22
+ "BTCUSDT": {"train_month": (2024, 6), "test_months": [(2024, 12), (2024, 3), (2024, 8), (2024, 4), (2024, 1)]},
23
+ "ETHUSDT": {"train_month": (2024, 6), "test_months": [(2024, 8), (2024, 4), (2024, 5), (2024, 3), (2024, 2)]},
24
+ "BNBUSDT": {"train_month": (2024, 10), "test_months": [(2024, 3), (2024, 12), (2024, 8), (2024, 1), (2024, 4)]},
25
+ "XRPUSDT": {"train_month": (2024, 9), "test_months": [(2024, 11), (2024, 12), (2024, 4), (2024, 8), (2024, 1)]},
26
+ "ADAUSDT": {"train_month": (2024, 9), "test_months": [(2024, 4), (2024, 12), (2024, 1), (2024, 3), (2024, 11)]},
27
+ "DOGEUSDT": {"train_month": (2024, 9), "test_months": [(2024, 3), (2024, 4), (2024, 11), (2024, 8), (2024, 12)]}
28
+ }
29
+
30
+ TIME_LENGTHS = [7, 14, 21, 28] # 1, 2, 3, 4 weeks in days
31
+ WINDOW_SIZES = [5, 15, 30] # Candles per image
32
+ MISSING_RATIOS = [0.6, 0.8, 0.95] # 60%, 80%, 95% missing data
33
+
34
+ # Set BASE_DIR to new folder for irregular data
35
+ BASE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "crypto_research_minute_irregular")
36
+
37
+ # Binance API data fetcher with irregular data omission
38
+ def fetch_coin_data(symbol, start_time, end_time, missing_ratio):
39
+ url = "https://api.binance.com/api/v3/klines"
40
+ all_data = []
41
+ current_start = int(start_time.timestamp() * 1000)
42
+ end_ms = int(end_time.timestamp() * 1000)
43
+
44
+ while current_start < end_ms:
45
+ params = {"symbol": symbol, "interval": "1m", "startTime": current_start, "endTime": end_ms, "limit": 1000}
46
+ response = requests.get(url, params=params)
47
+ data = response.json()
48
+ if not data:
49
+ break
50
+ all_data.extend(data)
51
+ current_start = int(data[-1][0]) + 60000 # 1 minute in milliseconds
52
+
53
+ df = pd.DataFrame(all_data, columns=["timestamp", "open", "high", "low", "close", "volume", "close_time", "quote_asset_volume", "trades", "taker_buy_base", "taker_buy_quote", "ignore"])
54
+ df["timestamp"] = pd.to_datetime(df["timestamp"], unit="ms")
55
+ df[["open", "high", "low", "close"]] = df[["open", "high", "low", "close"]].astype(float)
56
+
57
+ # Apply irregular data omission
58
+ if missing_ratio > 0:
59
+ n_rows = len(df)
60
+ n_keep = int(n_rows * (1 - missing_ratio))
61
+ if n_keep < 1: # Allow at least 1 row
62
+ print(f"Warning: Not enough data after {missing_ratio*100}% omission for {symbol}, keeping all data")
63
+ return df[["timestamp", "open", "high", "low", "close"]]
64
+ keep_indices = np.random.choice(n_rows, size=n_keep, replace=False)
65
+ df = df.iloc[keep_indices].sort_values("timestamp").reset_index(drop=True)
66
+
67
+ return df[["timestamp", "open", "high", "low", "close"]]
68
+
69
+ # Generate candlestick images and labels with sparse windows
70
+ def generate_images(df, window_size, output_dir, period_name, month_str, missing_ratio):
71
+ os.makedirs(output_dir, exist_ok=True)
72
+ labels_file = os.path.join(output_dir, f"labels_{month_str}_1m_{period_name}_w{window_size}_{int(missing_ratio*100)}pct.csv")
73
+ if os.path.exists(labels_file):
74
+ print(f"Labels already exist at {labels_file}, skipping image generation")
75
+ return labels_file
76
+
77
+ if len(df) < 1:
78
+ print(f"Warning: DataFrame too small ({len(df)} rows) for any window, skipping image generation")
79
+ return None
80
+
81
+ labels = []
82
+ start_time = time.time()
83
+ # Use index as timestamps since it's set as index
84
+ original_timestamps = pd.date_range(start=df.index[0], end=df.index[-1], freq="1min")
85
+
86
+ for i in range(len(original_timestamps) - window_size + 1):
87
+ window_start = original_timestamps[i]
88
+ window_end = original_timestamps[i + window_size - 1]
89
+ window_indices = df.index[(df.index >= window_start) & (df.index <= window_end)]
90
+ window_df = df.loc[window_indices]
91
+
92
+ if len(window_df) > 0:
93
+ first_candle = window_df.iloc[0]
94
+ last_candle = window_df.iloc[-1]
95
+ label = "UP" if last_candle["close"] > first_candle["open"] else "DOWN"
96
+ labels.append(label)
97
+
98
+ plt.figure(figsize=(2, 2))
99
+ mpf.plot(window_df, type="candle", style="binance", axisoff=True, title="", ylabel="", xlabel="", volume=False, tight_layout=True)
100
+ image_path = os.path.join(output_dir, f"candle_{i}_{int(missing_ratio*100)}pct.png")
101
+ plt.savefig(image_path, bbox_inches="tight", pad_inches=0, dpi=32)
102
+ plt.close('all')
103
+
104
+ if i % 1000 == 0:
105
+ elapsed = time.time() - start_time
106
+ images_generated = i + 1
107
+ speed = images_generated / elapsed if elapsed > 0 else 0
108
+ print(f"Generated image {i}/{len(original_timestamps) - window_size + 1} for {month_str} 1m {period_name} w{window_size} {missing_ratio*100}% ({speed:.2f} images/sec)")
109
+ else:
110
+ continue
111
+
112
+ labels_df = pd.DataFrame({"image_path": [f"candle_{i}_{int(missing_ratio*100)}pct.png" for i in range(len(original_timestamps) - window_size + 1) if os.path.exists(os.path.join(output_dir, f"candle_{i}_{int(missing_ratio*100)}pct.png"))], "label": labels})
113
+ labels_df.to_csv(labels_file, index=False)
114
+ print(f"Saved {len(labels_df)} labels to {labels_file}")
115
+ return labels_file
116
+
117
+ # Load and preprocess images
118
+ def load_images(labels_file, images_dir):
119
+ if not os.path.exists(labels_file):
120
+ return None, None
121
+ labels_df = pd.read_csv(labels_file)
122
+ X = np.array([np.array(Image.open(os.path.join(images_dir, row["image_path"])).convert("RGB").resize((64, 64))) / 255.0 for _, row in labels_df.iterrows()])
123
+ y = np.array([1 if label == "UP" else 0 for label in labels_df["label"]])
124
+ return X, y
125
+
126
+ # Train CNN model
127
+ def train_model(X, y, period_name, month_str, window_size, coin_dir, missing_ratio):
128
+ model_path = os.path.join(coin_dir, "models", f"model_{month_str}_1m_{period_name}_w{window_size}_{int(missing_ratio*100)}pct.h5")
129
+ if os.path.exists(model_path):
130
+ print(f"Model already exists at {model_path}, loading instead of training")
131
+ return tf.keras.models.load_model(model_path), None
132
+
133
+ model = models.Sequential([
134
+ layers.Conv2D(32, (3, 3), activation="relu", input_shape=(64, 64, 3)),
135
+ layers.MaxPooling2D((2, 2)),
136
+ layers.Dropout(0.25),
137
+ layers.Conv2D(64, (3, 3), activation="relu"),
138
+ layers.MaxPooling2D((2, 2)),
139
+ layers.Dropout(0.25),
140
+ layers.Conv2D(128, (3, 3), activation="relu"),
141
+ layers.Flatten(),
142
+ layers.Dense(128, activation="relu"),
143
+ layers.Dropout(0.5),
144
+ layers.Dense(1, activation="sigmoid")
145
+ ])
146
+
147
+ model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
148
+ class_weights = compute_class_weight('balanced', classes=np.unique(y), y=y)
149
+ history = model.fit(X, y, epochs=10, batch_size=32, class_weight=dict(enumerate(class_weights)))
150
+
151
+ model.save(model_path)
152
+ print(f"Model saved to {model_path}")
153
+ return model, history
154
+
155
+ # Evaluate and save results
156
+ def evaluate_and_save(model, X, y, period_name, month_str, window_size, coin_dir, dataset_type="train", exp_suffix="", missing_ratio=0):
157
+ results_file = os.path.join(coin_dir, "results", f"results_{dataset_type}_{month_str}_1m_{period_name}_w{window_size}_{int(missing_ratio*100)}pct{exp_suffix}.txt")
158
+ if os.path.exists(results_file) and exp_suffix != "_exp2":
159
+ print(f"Results already exist at {results_file}, skipping evaluation")
160
+ return None
161
+
162
+ y_pred_prob = model.predict(X, verbose=0)
163
+ y_pred = (y_pred_prob > 0.5).astype(int).flatten()
164
+
165
+ metrics = {
166
+ "accuracy": accuracy_score(y, y_pred),
167
+ "f1": f1_score(y, y_pred),
168
+ "recall": recall_score(y, y_pred),
169
+ "auroc": roc_auc_score(y, y_pred_prob),
170
+ "auprc": auc(*precision_recall_curve(y, y_pred_prob)[1::-1])
171
+ }
172
+
173
+ with open(results_file, "w") as f:
174
+ f.write(f"{dataset_type.capitalize()} Metrics for {month_str} 1m {period_name} w{window_size} {missing_ratio*100}% {exp_suffix}:\n")
175
+ for k, v in metrics.items():
176
+ f.write(f"{k.capitalize()}: {v:.4f}\n")
177
+ print(f"Results saved to {results_file}")
178
+ return metrics
179
+
180
+ # Check if all experiments for a window size and missing ratio are complete
181
+ def is_window_size_complete(symbol, train_month, test_months, window_size, missing_ratio):
182
+ coin_dir = os.path.join(BASE_DIR, symbol)
183
+ train_year, train_month_num = train_month
184
+ train_month_str = f"{train_year}-{train_month_num:02d}"
185
+ ratio_str = f"_{int(missing_ratio*100)}pct"
186
+
187
+ # Check Experiment I
188
+ for days in TIME_LENGTHS:
189
+ period_name = f"{days}days"
190
+ train_result = os.path.join(coin_dir, "results", f"results_train_{train_month_str}_1m_{period_name}_w{window_size}{ratio_str}.txt")
191
+ if not os.path.exists(train_result):
192
+ return False
193
+ for test_year, test_month_num in test_months:
194
+ test_month_str = f"{test_year}-{test_month_num:02d}"
195
+ test_result = os.path.join(coin_dir, "results", f"results_test_{test_month_str}_1m_{period_name}_w{window_size}{ratio_str}.txt")
196
+ if not os.path.exists(test_result):
197
+ return False
198
+
199
+ # Check Experiment II
200
+ period_name = "1week"
201
+ train_result = os.path.join(coin_dir, "results", f"results_train_{train_month_str}_1m_{period_name}_w{window_size}{ratio_str}_exp2.txt")
202
+ if not os.path.exists(train_result):
203
+ return False
204
+ for test_year, test_month_num in test_months:
205
+ test_month_str = f"{test_year}-{test_month_num:02d}"
206
+ for days in [14, 21, 28]:
207
+ period_name = f"{days}days"
208
+ test_result = os.path.join(coin_dir, "results", f"results_test_{test_month_str}_1m_{period_name}_w{window_size}{ratio_str}_exp2.txt")
209
+ if not os.path.exists(test_result):
210
+ return False
211
+
212
+ return True
213
+
214
+ # Main experiment runner for a single coin, window size, and missing ratio
215
+ def run_experiments_for_coin(symbol, train_month, test_months, window_size, missing_ratio):
216
+ if is_window_size_complete(symbol, train_month, test_months, window_size, missing_ratio):
217
+ print(f"All experiments for {symbol} with window size {window_size} and {missing_ratio*100}% missing complete, skipping")
218
+ return
219
+
220
+ coin_dir = os.path.join(BASE_DIR, symbol)
221
+ RAW_DATA_DIR = os.path.join(coin_dir, "raw_data")
222
+ IMAGES_DIR = os.path.join(coin_dir, "images")
223
+ MODELS_DIR = os.path.join(coin_dir, "models")
224
+ RESULTS_DIR = os.path.join(coin_dir, "results")
225
+
226
+ os.makedirs(RAW_DATA_DIR, exist_ok=True)
227
+ os.makedirs(IMAGES_DIR, exist_ok=True)
228
+ os.makedirs(MODELS_DIR, exist_ok=True)
229
+ os.makedirs(RESULTS_DIR, exist_ok=True)
230
+
231
+ train_year, train_month_num = train_month
232
+ ratio_str = f"_{int(missing_ratio*100)}pct"
233
+
234
+ # Experiment I: Train and test on matching timelengths
235
+ for days in TIME_LENGTHS:
236
+ period_name = f"{days}days"
237
+ train_start = datetime(train_year, train_month_num, 1)
238
+ train_end = train_start + timedelta(days=days - 1, hours=23, minutes=59)
239
+ train_month_str = f"{train_year}-{train_month_num:02d}"
240
+
241
+ raw_file = os.path.join(RAW_DATA_DIR, f"raw_{train_month_str}_1m_{period_name}{ratio_str}.csv")
242
+ if not os.path.exists(raw_file):
243
+ df = fetch_coin_data(symbol, train_start, train_end, missing_ratio)
244
+ df.set_index("timestamp", inplace=True)
245
+ df.to_csv(raw_file)
246
+ print(f"Raw data saved to {raw_file}")
247
+ else:
248
+ print(f"Raw data already exists at {raw_file}, skipping fetch")
249
+ df = pd.read_csv(raw_file, index_col="timestamp", parse_dates=["timestamp"])
250
+ df.index = pd.to_datetime(df.index)
251
+
252
+ images_subdir = os.path.join(IMAGES_DIR, f"{train_month_str}_1m_{period_name}_w{window_size}{ratio_str}")
253
+ labels_file = generate_images(df, window_size, images_subdir, period_name, train_month_str, missing_ratio)
254
+ if labels_file:
255
+ X, y = load_images(labels_file, images_subdir)
256
+ if X is not None:
257
+ model, history = train_model(X, y, period_name, train_month_str, window_size, coin_dir, missing_ratio)
258
+ evaluate_and_save(model, X, y, period_name, train_month_str, window_size, coin_dir, "train", missing_ratio=missing_ratio)
259
+
260
+ tf.keras.backend.clear_session()
261
+ gc.collect()
262
+
263
+ for test_year, test_month_num in test_months:
264
+ test_start = datetime(test_year, test_month_num, 1)
265
+ test_end = test_start + timedelta(days=days - 1, hours=23, minutes=59)
266
+ test_month_str = f"{test_year}-{test_month_num:02d}"
267
+
268
+ raw_file = os.path.join(RAW_DATA_DIR, f"raw_{test_month_str}_1m_{period_name}{ratio_str}.csv")
269
+ if not os.path.exists(raw_file):
270
+ df = fetch_coin_data(symbol, test_start, test_end, missing_ratio)
271
+ df.set_index("timestamp", inplace=True)
272
+ df.to_csv(raw_file)
273
+ print(f"Raw data saved to {raw_file}")
274
+ else:
275
+ print(f"Raw data already exists at {raw_file}, skipping fetch")
276
+ df = pd.read_csv(raw_file, index_col="timestamp", parse_dates=["timestamp"])
277
+ df.index = pd.to_datetime(df.index)
278
+
279
+ images_subdir = os.path.join(IMAGES_DIR, f"{test_month_str}_1m_{period_name}_w{window_size}{ratio_str}")
280
+ labels_file = generate_images(df, window_size, images_subdir, period_name, test_month_str, missing_ratio)
281
+ if labels_file:
282
+ X, y = load_images(labels_file, images_subdir)
283
+ if X is not None:
284
+ evaluate_and_save(model, X, y, period_name, test_month_str, window_size, coin_dir, "test", missing_ratio=missing_ratio)
285
+
286
+ tf.keras.backend.clear_session()
287
+ gc.collect()
288
+
289
+ # Experiment II: Train on 1 week, test on 2-3-4 weeks
290
+ exp2_test_lengths = [14, 21, 28]
291
+ train_start = datetime(train_year, train_month_num, 1)
292
+ train_end = train_start + timedelta(days=6, hours=23, minutes=59)
293
+ train_month_str = f"{train_year}-{train_month_num:02d}"
294
+ period_name = "1week"
295
+
296
+ raw_file = os.path.join(RAW_DATA_DIR, f"raw_{train_month_str}_1m_{period_name}{ratio_str}.csv")
297
+ if not os.path.exists(raw_file):
298
+ df = fetch_coin_data(symbol, train_start, train_end, missing_ratio)
299
+ df.set_index("timestamp", inplace=True)
300
+ df.to_csv(raw_file)
301
+ print(f"Raw data saved to {raw_file}")
302
+ else:
303
+ print(f"Raw data already exists at {raw_file}, skipping fetch")
304
+ df = pd.read_csv(raw_file, index_col="timestamp", parse_dates=["timestamp"])
305
+ df.index = pd.to_datetime(df.index)
306
+
307
+ images_subdir = os.path.join(IMAGES_DIR, f"{train_month_str}_1m_{period_name}_w{window_size}{ratio_str}")
308
+ labels_file = generate_images(df, window_size, images_subdir, period_name, train_month_str, missing_ratio)
309
+ if labels_file:
310
+ X, y = load_images(labels_file, images_subdir)
311
+ if X is not None:
312
+ model, history = train_model(X, y, period_name, train_month_str, window_size, coin_dir, missing_ratio)
313
+ evaluate_and_save(model, X, y, period_name, train_month_str, window_size, coin_dir, "train", "_exp2", missing_ratio)
314
+
315
+ tf.keras.backend.clear_session()
316
+ gc.collect()
317
+
318
+ for test_year, test_month_num in test_months:
319
+ test_month_str = f"{test_year}-{test_month_num:02d}"
320
+ for days in exp2_test_lengths:
321
+ period_name = f"{days}days"
322
+ test_start = datetime(test_year, test_month_num, 1)
323
+ test_end = test_start + timedelta(days=days - 1, hours=23, minutes=59)
324
+
325
+ raw_file = os.path.join(RAW_DATA_DIR, f"raw_{test_month_str}_1m_{period_name}{ratio_str}.csv")
326
+ if not os.path.exists(raw_file):
327
+ df = fetch_coin_data(symbol, test_start, end_time, missing_ratio)
328
+ df.set_index("timestamp", inplace=True)
329
+ df.to_csv(raw_file)
330
+ print(f"Raw data saved to {raw_file}")
331
+ else:
332
+ print(f"Raw data already exists at {raw_file}, skipping fetch")
333
+ df = pd.read_csv(raw_file, index_col="timestamp", parse_dates=["timestamp"])
334
+ df.index = pd.to_datetime(df.index)
335
+
336
+ images_subdir = os.path.join(IMAGES_DIR, f"{test_month_str}_1m_{period_name}_w{window_size}{ratio_str}")
337
+ labels_file = generate_images(df, window_size, images_subdir, period_name, test_month_str, missing_ratio)
338
+ if labels_file:
339
+ X, y = load_images(labels_file, images_subdir)
340
+ if X is not None:
341
+ evaluate_and_save(model, X, y, period_name, test_month_str, window_size, coin_dir, "test", "_exp2", missing_ratio)
342
+
343
+ tf.keras.backend.clear_session()
344
+ gc.collect()
345
+
346
+ # Run experiments for all coins, window sizes, and missing ratios
347
+ def run_all_experiments():
348
+ os.makedirs(BASE_DIR, exist_ok=True)
349
+ for symbol, config in COINS.items():
350
+ for window_size in WINDOW_SIZES:
351
+ for missing_ratio in MISSING_RATIOS:
352
+ print(f"Running experiments for {symbol} with window size {window_size} and {missing_ratio*100}% missing")
353
+ run_experiments_for_coin(symbol, config["train_month"], config["test_months"], window_size, missing_ratio)
354
+ print(f"Completed experiments for {symbol} with window size {window_size} and {missing_ratio*100}% missing")
355
+ tf.keras.backend.clear_session()
356
+ gc.collect()
357
+
358
+ if __name__ == "__main__":
359
+ parser = argparse.ArgumentParser(description="Crypto Minute-Based Image Classification with Irregular Missing Data and Sparse Windows")
360
+ args = parser.parse_args()
361
+ run_all_experiments()
src/last_candle.py ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import pandas as pd
3
+ import mplfinance as mpf
4
+ import matplotlib.pyplot as plt
5
+ from datetime import datetime, timedelta
6
+ import os
7
+ import numpy as np
8
+ from PIL import Image
9
+ import tensorflow as tf
10
+ from tensorflow.keras import layers, models
11
+ from sklearn.metrics import accuracy_score, f1_score, recall_score, roc_auc_score, precision_recall_curve, auc
12
+ from sklearn.utils.class_weight import compute_class_weight
13
+ import argparse
14
+ import gc
15
+ import time
16
+
17
+ # Use non-interactive backend for matplotlib
18
+ plt.switch_backend('Agg')
19
+
20
+ # Coin configurations
21
+ COINS = {
22
+ "BTCUSDT": {"train_month": (2024, 6), "test_months": [(2024, 12), (2024, 3), (2024, 8), (2024, 4), (2024, 1)]},
23
+ "ETHUSDT": {"train_month": (2024, 6), "test_months": [(2024, 8), (2024, 4), (2024, 5), (2024, 3), (2024, 2)]},
24
+ "BNBUSDT": {"train_month": (2024, 10), "test_months": [(2024, 3), (2024, 12), (2024, 8), (2024, 1), (2024, 4)]},
25
+ "XRPUSDT": {"train_month": (2024, 9), "test_months": [(2024, 11), (2024, 12), (2024, 4), (2024, 8), (2024, 1)]},
26
+ "ADAUSDT": {"train_month": (2024, 9), "test_months": [(2024, 4), (2024, 12), (2024, 1), (2024, 3), (2024, 11)]},
27
+ "DOGEUSDT": {"train_month": (2024, 9), "test_months": [(2024, 3), (2024, 4), (2024, 11), (2024, 8), (2024, 12)]}
28
+ }
29
+
30
+ TIME_LENGTHS = [7, 14, 21, 28] # 1, 2, 3, 4 weeks in days
31
+ WINDOW_SIZES = [5, 15, 30] # Candles per image
32
+
33
+ # Set BASE_DIR to absolute path relative to script location
34
+ BASE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "crypto_research_minute")
35
+
36
+ # Binance API data fetcher (fixed to 1m interval)
37
+ def fetch_coin_data(symbol, start_time, end_time):
38
+ url = "https://api.binance.com/api/v3/klines"
39
+ all_data = []
40
+ current_start = int(start_time.timestamp() * 1000)
41
+ end_ms = int(end_time.timestamp() * 1000)
42
+
43
+ while current_start < end_ms:
44
+ params = {"symbol": symbol, "interval": "1m", "startTime": current_start, "endTime": end_ms, "limit": 1000}
45
+ response = requests.get(url, params=params)
46
+ data = response.json()
47
+ if not data:
48
+ break
49
+ all_data.extend(data)
50
+ current_start = int(data[-1][0]) + 60000 # 1 minute in milliseconds
51
+
52
+ df = pd.DataFrame(all_data, columns=["timestamp", "open", "high", "low", "close", "volume", "close_time", "quote_asset_volume", "trades", "taker_buy_base", "taker_buy_quote", "ignore"])
53
+ df["timestamp"] = pd.to_datetime(df["timestamp"], unit="ms")
54
+ df[["open", "high", "low", "close"]] = df[["open", "high", "low", "close"]].astype(float)
55
+ return df[["timestamp", "open", "high", "low", "close"]]
56
+
57
+ # Generate candlestick images and labels with variable window size
58
+ def generate_images(df, window_size, output_dir, period_name, month_str):
59
+ os.makedirs(output_dir, exist_ok=True)
60
+ labels_file = os.path.join(output_dir, f"labels_{month_str}_1m_{period_name}_w{window_size}.csv")
61
+ if os.path.exists(labels_file):
62
+ print(f"Labels already exist at {labels_file}, skipping image generation")
63
+ return labels_file
64
+
65
+ labels = []
66
+ start_time = time.time()
67
+ for i in range(window_size - 1, len(df)):
68
+ window_df = df.iloc[i - (window_size - 1):i + 1]
69
+ last_candle = window_df.iloc[-1]
70
+ label = "UP" if last_candle["close"] > last_candle["open"] else "DOWN"
71
+ labels.append(label)
72
+
73
+ plt.figure(figsize=(2, 2))
74
+ mpf.plot(window_df, type="candle", style="binance", axisoff=True, title="", ylabel="", xlabel="", volume=False)
75
+ plt.tight_layout(pad=0)
76
+ image_path = os.path.join(output_dir, f"candle_{i}.png")
77
+ plt.savefig(image_path, bbox_inches="tight", pad_inches=0, dpi=32)
78
+ plt.close('all') # Explicitly close all figures
79
+
80
+ if i % 1000 == 0:
81
+ elapsed = time.time() - start_time
82
+ images_generated = i - (window_size - 1) + 1
83
+ speed = images_generated / elapsed if elapsed > 0 else 0
84
+ print(f"Generated image {i}/{len(df)} for {month_str} 1m {period_name} w{window_size} ({speed:.2f} images/sec)")
85
+
86
+ labels_df = pd.DataFrame({"image_path": [f"candle_{i}.png" for i in range(window_size - 1, len(df))], "label": labels})
87
+ labels_df.to_csv(labels_file, index=False)
88
+ print(f"Saved {len(labels_df)} labels to {labels_file}")
89
+ return labels_file
90
+
91
+ # Load and preprocess images
92
+ def load_images(labels_file, images_dir):
93
+ labels_df = pd.read_csv(labels_file)
94
+ X = np.array([np.array(Image.open(os.path.join(images_dir, row["image_path"])).convert("RGB").resize((64, 64))) / 255.0 for _, row in labels_df.iterrows()])
95
+ y = np.array([1 if label == "UP" else 0 for label in labels_df["label"]])
96
+ return X, y
97
+
98
+ # Train CNN model
99
+ def train_model(X, y, period_name, month_str, window_size, coin_dir):
100
+ model_path = os.path.join(coin_dir, "models", f"model_{month_str}_1m_{period_name}_w{window_size}.h5")
101
+ if os.path.exists(model_path):
102
+ print(f"Model already exists at {model_path}, loading instead of training")
103
+ return tf.keras.models.load_model(model_path), None
104
+
105
+ model = models.Sequential([
106
+ layers.Conv2D(32, (3, 3), activation="relu", input_shape=(64, 64, 3)),
107
+ layers.MaxPooling2D((2, 2)),
108
+ layers.Dropout(0.25),
109
+ layers.Conv2D(64, (3, 3), activation="relu"),
110
+ layers.MaxPooling2D((2, 2)),
111
+ layers.Dropout(0.25),
112
+ layers.Conv2D(128, (3, 3), activation="relu"),
113
+ layers.Flatten(),
114
+ layers.Dense(128, activation="relu"),
115
+ layers.Dropout(0.5),
116
+ layers.Dense(1, activation="sigmoid")
117
+ ])
118
+
119
+ model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
120
+ class_weights = compute_class_weight('balanced', classes=np.unique(y), y=y)
121
+ history = model.fit(X, y, epochs=10, batch_size=32, class_weight=dict(enumerate(class_weights)))
122
+
123
+ model.save(model_path)
124
+ print(f"Model saved to {model_path}")
125
+ return model, history
126
+
127
+ # Evaluate and save results
128
+ def evaluate_and_save(model, X, y, period_name, month_str, window_size, coin_dir, dataset_type="train", exp_suffix=""):
129
+ results_file = os.path.join(coin_dir, "results", f"results_{dataset_type}_{month_str}_1m_{period_name}_w{window_size}{exp_suffix}.txt")
130
+ if os.path.exists(results_file) and exp_suffix != "_exp2": # Force write for Experiment II
131
+ print(f"Results already exist at {results_file}, skipping evaluation")
132
+ return None
133
+
134
+ y_pred_prob = model.predict(X, verbose=0)
135
+ y_pred = (y_pred_prob > 0.5).astype(int).flatten()
136
+
137
+ metrics = {
138
+ "accuracy": accuracy_score(y, y_pred),
139
+ "f1": f1_score(y, y_pred),
140
+ "recall": recall_score(y, y_pred),
141
+ "auroc": roc_auc_score(y, y_pred_prob),
142
+ "auprc": auc(*precision_recall_curve(y, y_pred_prob)[1::-1])
143
+ }
144
+
145
+ with open(results_file, "w") as f:
146
+ f.write(f"{dataset_type.capitalize()} Metrics for {month_str} 1m {period_name} w{window_size} {exp_suffix}:\n")
147
+ for k, v in metrics.items():
148
+ f.write(f"{k.capitalize()}: {v:.4f}\n")
149
+ print(f"Results saved to {results_file}")
150
+ return metrics
151
+
152
+ # Check if all experiments for a window size are complete
153
+ def is_window_size_complete(symbol, train_month, test_months, window_size):
154
+ coin_dir = os.path.join(BASE_DIR, symbol)
155
+ train_year, train_month_num = train_month
156
+ train_month_str = f"{train_year}-{train_month_num:02d}"
157
+
158
+ # Check Experiment I
159
+ for days in TIME_LENGTHS:
160
+ period_name = f"{days}days"
161
+ # Train results
162
+ train_result = os.path.join(coin_dir, "results", f"results_train_{train_month_str}_1m_{period_name}_w{window_size}.txt")
163
+ if not os.path.exists(train_result):
164
+ return False
165
+ # Test results for each volatile month
166
+ for test_year, test_month_num in test_months:
167
+ test_month_str = f"{test_year}-{test_month_num:02d}"
168
+ test_result = os.path.join(coin_dir, "results", f"results_test_{test_month_str}_1m_{period_name}_w{window_size}.txt")
169
+ if not os.path.exists(test_result):
170
+ return False
171
+
172
+ # Check Experiment II
173
+ period_name = "1week"
174
+ train_result = os.path.join(coin_dir, "results", f"results_train_{train_month_str}_1m_{period_name}_w{window_size}_exp2.txt")
175
+ if not os.path.exists(train_result):
176
+ return False
177
+ for test_year, test_month_num in test_months:
178
+ test_month_str = f"{test_year}-{test_month_num:02d}"
179
+ for days in [14, 21, 28]: # 2, 3, 4 weeks
180
+ period_name = f"{days}days"
181
+ test_result = os.path.join(coin_dir, "results", f"results_test_{test_month_str}_1m_{period_name}_w{window_size}_exp2.txt")
182
+ if not os.path.exists(test_result):
183
+ return False
184
+
185
+ return True
186
+
187
+ # Main experiment runner for a single coin and window size
188
+ def run_experiments_for_coin(symbol, train_month, test_months, window_size):
189
+ if is_window_size_complete(symbol, train_month, test_months, window_size):
190
+ print(f"All experiments for {symbol} with window size {window_size} are complete, skipping")
191
+ return
192
+
193
+ coin_dir = os.path.join(BASE_DIR, symbol)
194
+ RAW_DATA_DIR = os.path.join(coin_dir, "raw_data")
195
+ IMAGES_DIR = os.path.join(coin_dir, "images")
196
+ MODELS_DIR = os.path.join(coin_dir, "models")
197
+ RESULTS_DIR = os.path.join(coin_dir, "results")
198
+
199
+ os.makedirs(RAW_DATA_DIR, exist_ok=True)
200
+ os.makedirs(IMAGES_DIR, exist_ok=True)
201
+ os.makedirs(MODELS_DIR, exist_ok=True)
202
+ os.makedirs(RESULTS_DIR, exist_ok=True)
203
+
204
+ train_year, train_month_num = train_month
205
+
206
+ # Experiment I: Train and test on matching timelengths
207
+ for days in TIME_LENGTHS:
208
+ period_name = f"{days}days"
209
+ train_start = datetime(train_year, train_month_num, 1)
210
+ train_end = train_start + timedelta(days=days - 1, hours=23, minutes=59)
211
+ train_month_str = f"{train_year}-{train_month_num:02d}"
212
+
213
+ raw_file = os.path.join(RAW_DATA_DIR, f"raw_{train_month_str}_1m_{period_name}.csv")
214
+ if not os.path.exists(raw_file):
215
+ df = fetch_coin_data(symbol, train_start, train_end)
216
+ df.set_index("timestamp", inplace=True)
217
+ df.to_csv(raw_file)
218
+ print(f"Raw data saved to {raw_file}")
219
+ else:
220
+ print(f"Raw data already exists at {raw_file}, skipping fetch")
221
+ df = pd.read_csv(raw_file, index_col="timestamp", parse_dates=["timestamp"])
222
+ df.index = pd.to_datetime(df.index) # Ensure DatetimeIndex
223
+
224
+ images_subdir = os.path.join(IMAGES_DIR, f"{train_month_str}_1m_{period_name}_w{window_size}")
225
+ labels_file = generate_images(df, window_size, images_subdir, period_name, train_month_str)
226
+ X, y = load_images(labels_file, images_subdir)
227
+ model, history = train_model(X, y, period_name, train_month_str, window_size, coin_dir)
228
+ evaluate_and_save(model, X, y, period_name, train_month_str, window_size, coin_dir, "train")
229
+
230
+ # Clear TensorFlow resources
231
+ tf.keras.backend.clear_session()
232
+ gc.collect()
233
+
234
+ for test_year, test_month_num in test_months:
235
+ test_start = datetime(test_year, test_month_num, 1)
236
+ test_end = test_start + timedelta(days=days - 1, hours=23, minutes=59)
237
+ test_month_str = f"{test_year}-{test_month_num:02d}"
238
+
239
+ raw_file = os.path.join(RAW_DATA_DIR, f"raw_{test_month_str}_1m_{period_name}.csv")
240
+ if not os.path.exists(raw_file):
241
+ df = fetch_coin_data(symbol, test_start, test_end)
242
+ df.set_index("timestamp", inplace=True)
243
+ df.to_csv(raw_file)
244
+ print(f"Raw data saved to {raw_file}")
245
+ else:
246
+ print(f"Raw data already exists at {raw_file}, skipping fetch")
247
+ df = pd.read_csv(raw_file, index_col="timestamp", parse_dates=["timestamp"])
248
+ df.index = pd.to_datetime(df.index) # Ensure DatetimeIndex
249
+
250
+ images_subdir = os.path.join(IMAGES_DIR, f"{test_month_str}_1m_{period_name}_w{window_size}")
251
+ labels_file = generate_images(df, window_size, images_subdir, period_name, test_month_str)
252
+ X, y = load_images(labels_file, images_subdir)
253
+ evaluate_and_save(model, X, y, period_name, test_month_str, window_size, coin_dir, "test")
254
+
255
+ # Clear TensorFlow resources again
256
+ tf.keras.backend.clear_session()
257
+ gc.collect()
258
+
259
+ # Experiment II: Train on 1 week, test on 2-3-4 weeks
260
+ exp2_test_lengths = [14, 21, 28] # 2, 3, 4 weeks
261
+ train_start = datetime(train_year, train_month_num, 1)
262
+ train_end = train_start + timedelta(days=6, hours=23, minutes=59) # 1 week
263
+ train_month_str = f"{train_year}-{train_month_num:02d}"
264
+ period_name = "1week"
265
+
266
+ raw_file = os.path.join(RAW_DATA_DIR, f"raw_{train_month_str}_1m_{period_name}.csv")
267
+ if not os.path.exists(raw_file):
268
+ df = fetch_coin_data(symbol, train_start, end_time=train_end)
269
+ df.set_index("timestamp", inplace=True)
270
+ df.to_csv(raw_file)
271
+ print(f"Raw data saved to {raw_file}")
272
+ else:
273
+ print(f"Raw data already exists at {raw_file}, skipping fetch")
274
+ df = pd.read_csv(raw_file, index_col="timestamp", parse_dates=["timestamp"])
275
+ df.index = pd.to_datetime(df.index) # Ensure DatetimeIndex
276
+
277
+ images_subdir = os.path.join(IMAGES_DIR, f"{train_month_str}_1m_{period_name}_w{window_size}")
278
+ labels_file = generate_images(df, window_size, images_subdir, period_name, train_month_str)
279
+ X, y = load_images(labels_file, images_subdir)
280
+ model, history = train_model(X, y, period_name, train_month_str, window_size, coin_dir)
281
+ evaluate_and_save(model, X, y, period_name, train_month_str, window_size, coin_dir, "train", "_exp2")
282
+
283
+ # Clear TensorFlow resources
284
+ tf.keras.backend.clear_session()
285
+ gc.collect()
286
+
287
+ for test_year, test_month_num in test_months:
288
+ test_month_str = f"{test_year}-{test_month_num:02d}"
289
+ for days in exp2_test_lengths:
290
+ period_name = f"{days}days"
291
+ test_start = datetime(test_year, test_month_num, 1)
292
+ test_end = test_start + timedelta(days=days - 1, hours=23, minutes=59)
293
+
294
+ raw_file = os.path.join(RAW_DATA_DIR, f"raw_{test_month_str}_1m_{period_name}.csv")
295
+ if not os.path.exists(raw_file):
296
+ df = fetch_coin_data(symbol, test_start, test_end)
297
+ df.set_index("timestamp", inplace=True)
298
+ df.to_csv(raw_file)
299
+ print(f"Raw data saved to {raw_file}")
300
+ else:
301
+ print(f"Raw data already exists at {raw_file}, skipping fetch")
302
+ df = pd.read_csv(raw_file, index_col="timestamp", parse_dates=["timestamp"])
303
+ df.index = pd.to_datetime(df.index) # Ensure DatetimeIndex
304
+
305
+ images_subdir = os.path.join(IMAGES_DIR, f"{test_month_str}_1m_{period_name}_w{window_size}")
306
+ labels_file = generate_images(df, window_size, images_subdir, period_name, test_month_str)
307
+ X, y = load_images(labels_file, images_subdir)
308
+ evaluate_and_save(model, X, y, period_name, test_month_str, window_size, coin_dir, "test", "_exp2")
309
+
310
+ # Clear TensorFlow resources
311
+ tf.keras.backend.clear_session()
312
+ gc.collect()
313
+
314
+ # Run experiments for all coins and window sizes
315
+ def run_all_experiments():
316
+ os.makedirs(BASE_DIR, exist_ok=True) # Ensure BASE_DIR exists
317
+ for symbol, config in COINS.items():
318
+ for window_size in WINDOW_SIZES:
319
+ print(f"Running experiments for {symbol} with window size {window_size}")
320
+ run_experiments_for_coin(symbol, config["train_month"], config["test_months"], window_size)
321
+ print(f"Completed experiments for {symbol} with window size {window_size}")
322
+ # Clear TensorFlow resources between window sizes
323
+ tf.keras.backend.clear_session()
324
+ gc.collect()
325
+
326
+ if __name__ == "__main__":
327
+ parser = argparse.ArgumentParser(description="Crypto Minute-Based Image Classification Research for Multiple Coins")
328
+ args = parser.parse_args()
329
+ run_all_experiments()