zirobtc commited on
Commit
a547253
·
verified ·
1 Parent(s): d195287

Upload folder using huggingface_hub

Browse files
data/data_fetcher.py CHANGED
@@ -94,6 +94,8 @@ class DataFetcher:
94
  'cabalspy_name',
95
  'axiom_kol_name'
96
  ]
 
 
97
  def __init__(self, clickhouse_client: Any, neo4j_driver: Any):
98
  self.db_client = clickhouse_client
99
  self.graph_client = neo4j_driver
@@ -115,7 +117,6 @@ class DataFetcher:
115
  if where_clauses:
116
  query += " WHERE " + " AND ".join(where_clauses)
117
 
118
- print(f"INFO: Executing query to get all mints: `{query}` with params: {params}")
119
  try:
120
  rows, columns_info = self.db_client.execute(query, params, with_column_types=True)
121
  if not rows:
@@ -136,7 +137,6 @@ class DataFetcher:
136
  Fetches the raw mint record for a token from the 'mints' table.
137
  """
138
  query = f"SELECT timestamp, creator_address, mint_address, protocol FROM mints WHERE mint_address = '{token_address}' ORDER BY timestamp ASC LIMIT 1"
139
- print(f"INFO: Executing query to fetch mint record: `{query}`")
140
 
141
  # Assumes the client returns a list of dicts or can be converted
142
  # Using column names from your schema
@@ -177,12 +177,9 @@ class DataFetcher:
177
  if not wallet_addresses:
178
  return {}
179
 
180
- BATCH_SIZE = 1000
181
  socials = {}
182
- total_wallets = len(wallet_addresses)
183
- print(f"INFO: Executing query to fetch wallet socials for {total_wallets} wallets in batches of {BATCH_SIZE}.")
184
-
185
- for i in range(0, total_wallets, BATCH_SIZE):
186
  batch_addresses = wallet_addresses[i : i + BATCH_SIZE]
187
 
188
  query = "SELECT * FROM wallet_socials WHERE wallet_address IN %(addresses)s"
@@ -245,14 +242,11 @@ class DataFetcher:
245
  profile_keys = [f"profile__{col}" for col in (profile_base_select_cols + profile_metric_select_cols)]
246
  social_keys = [f"social__{col}" for col in social_select_cols]
247
 
248
- BATCH_SIZE = 1000
249
  all_profiles = {}
250
  all_socials = {}
251
 
252
- total_wallets = len(wallet_addresses)
253
- print(f"INFO: Fetching profiles+socials for {total_wallets} wallets in batches of {BATCH_SIZE}...")
254
-
255
- for i in range(0, total_wallets, BATCH_SIZE):
256
  batch_addresses = wallet_addresses[i : i + BATCH_SIZE]
257
 
258
  query = f"""
@@ -349,12 +343,9 @@ class DataFetcher:
349
  if not wallet_addresses:
350
  return {}
351
 
352
- BATCH_SIZE = 1000
353
  holdings = defaultdict(list)
354
- total_wallets = len(wallet_addresses)
355
- print(f"INFO: Executing query to fetch wallet holdings for {total_wallets} wallets in batches of {BATCH_SIZE}.")
356
-
357
- for i in range(0, total_wallets, BATCH_SIZE):
358
  batch_addresses = wallet_addresses[i : i + BATCH_SIZE]
359
 
360
  # --- Time-aware query ---
@@ -423,8 +414,6 @@ class DataFetcher:
423
 
424
  cutoff_ts = int(T_cutoff.timestamp())
425
 
426
- print(f"INFO: Fetching graph links up to {max_degrees} degrees for {len(initial_addresses)} initial entities...")
427
-
428
  max_retries = 3
429
  backoff_sec = 2
430
 
@@ -439,8 +428,6 @@ class DataFetcher:
439
  if not newly_found_entities:
440
  break
441
 
442
- print(f" - Degree {i+1}: Traversing from {len(newly_found_entities)} new entities...")
443
-
444
  # --- TIMING: Query execution ---
445
  _t_query_start = time.perf_counter()
446
 
@@ -481,12 +468,6 @@ class DataFetcher:
481
 
482
  _t_process_done = time.perf_counter()
483
 
484
- # --- TIMING: Print detailed stats ---
485
- print(f" [NEO4J TIMING] query_exec: {(_t_query_done - _t_query_start)*1000:.1f}ms, "
486
- f"result_process: {(_t_process_done - _t_process_start)*1000:.1f}ms")
487
- print(f" [NEO4J STATS] records_returned: {records_total}, "
488
- f"new_entities: {len(current_degree_new_entities)}")
489
-
490
  newly_found_entities = current_degree_new_entities
491
 
492
  # --- Post-process: rename, map props, strip, cap ---
@@ -589,12 +570,9 @@ class DataFetcher:
589
  if not token_addresses:
590
  return {}
591
 
592
- BATCH_SIZE = 1000
593
  tokens = {}
594
- total_tokens = len(token_addresses)
595
- print(f"INFO: Executing query to fetch token data for {total_tokens} tokens in batches of {BATCH_SIZE}.")
596
-
597
- for i in range(0, total_tokens, BATCH_SIZE):
598
  batch_addresses = token_addresses[i : i + BATCH_SIZE]
599
 
600
  # --- NEW: Time-aware query for historical token data ---
@@ -645,10 +623,9 @@ class DataFetcher:
645
  if not token_addresses:
646
  return {}
647
 
648
- BATCH_SIZE = 1000
649
  token_details = {}
650
  total_tokens = len(token_addresses)
651
- print(f"INFO: Executing query to fetch deployed token details for {total_tokens} tokens in batches of {BATCH_SIZE}.")
652
 
653
  for i in range(0, total_tokens, BATCH_SIZE):
654
  batch_addresses = token_addresses[i : i + BATCH_SIZE]
@@ -790,7 +767,6 @@ class DataFetcher:
790
  ORDER BY timestamp ASC
791
  """
792
  params = {'token_address': token_address, 'T_cutoff': T_cutoff, 'min_amount': min_amount_threshold}
793
- print(f"INFO: Fetching significant transfers for {token_address} (amount >= {min_amount_threshold}).")
794
 
795
  try:
796
  # This query no longer uses H/B/H, it fetches all significant transfers
 
94
  'cabalspy_name',
95
  'axiom_kol_name'
96
  ]
97
+ DB_BATCH_SIZE = 5000
98
+
99
  def __init__(self, clickhouse_client: Any, neo4j_driver: Any):
100
  self.db_client = clickhouse_client
101
  self.graph_client = neo4j_driver
 
117
  if where_clauses:
118
  query += " WHERE " + " AND ".join(where_clauses)
119
 
 
120
  try:
121
  rows, columns_info = self.db_client.execute(query, params, with_column_types=True)
122
  if not rows:
 
137
  Fetches the raw mint record for a token from the 'mints' table.
138
  """
139
  query = f"SELECT timestamp, creator_address, mint_address, protocol FROM mints WHERE mint_address = '{token_address}' ORDER BY timestamp ASC LIMIT 1"
 
140
 
141
  # Assumes the client returns a list of dicts or can be converted
142
  # Using column names from your schema
 
177
  if not wallet_addresses:
178
  return {}
179
 
180
+ BATCH_SIZE = self.DB_BATCH_SIZE
181
  socials = {}
182
+ for i in range(0, len(wallet_addresses), BATCH_SIZE):
 
 
 
183
  batch_addresses = wallet_addresses[i : i + BATCH_SIZE]
184
 
185
  query = "SELECT * FROM wallet_socials WHERE wallet_address IN %(addresses)s"
 
242
  profile_keys = [f"profile__{col}" for col in (profile_base_select_cols + profile_metric_select_cols)]
243
  social_keys = [f"social__{col}" for col in social_select_cols]
244
 
245
+ BATCH_SIZE = self.DB_BATCH_SIZE
246
  all_profiles = {}
247
  all_socials = {}
248
 
249
+ for i in range(0, len(wallet_addresses), BATCH_SIZE):
 
 
 
250
  batch_addresses = wallet_addresses[i : i + BATCH_SIZE]
251
 
252
  query = f"""
 
343
  if not wallet_addresses:
344
  return {}
345
 
346
+ BATCH_SIZE = self.DB_BATCH_SIZE
347
  holdings = defaultdict(list)
348
+ for i in range(0, len(wallet_addresses), BATCH_SIZE):
 
 
 
349
  batch_addresses = wallet_addresses[i : i + BATCH_SIZE]
350
 
351
  # --- Time-aware query ---
 
414
 
415
  cutoff_ts = int(T_cutoff.timestamp())
416
 
 
 
417
  max_retries = 3
418
  backoff_sec = 2
419
 
 
428
  if not newly_found_entities:
429
  break
430
 
 
 
431
  # --- TIMING: Query execution ---
432
  _t_query_start = time.perf_counter()
433
 
 
468
 
469
  _t_process_done = time.perf_counter()
470
 
 
 
 
 
 
 
471
  newly_found_entities = current_degree_new_entities
472
 
473
  # --- Post-process: rename, map props, strip, cap ---
 
570
  if not token_addresses:
571
  return {}
572
 
573
+ BATCH_SIZE = self.DB_BATCH_SIZE
574
  tokens = {}
575
+ for i in range(0, len(token_addresses), BATCH_SIZE):
 
 
 
576
  batch_addresses = token_addresses[i : i + BATCH_SIZE]
577
 
578
  # --- NEW: Time-aware query for historical token data ---
 
623
  if not token_addresses:
624
  return {}
625
 
626
+ BATCH_SIZE = self.DB_BATCH_SIZE
627
  token_details = {}
628
  total_tokens = len(token_addresses)
 
629
 
630
  for i in range(0, total_tokens, BATCH_SIZE):
631
  batch_addresses = token_addresses[i : i + BATCH_SIZE]
 
767
  ORDER BY timestamp ASC
768
  """
769
  params = {'token_address': token_address, 'T_cutoff': T_cutoff, 'min_amount': min_amount_threshold}
 
770
 
771
  try:
772
  # This query no longer uses H/B/H, it fetches all significant transfers
data/data_loader.py CHANGED
@@ -19,6 +19,8 @@ from models.multi_modal_processor import MultiModalEncoder
19
  from data.data_fetcher import DataFetcher # NEW: Import the DataFetcher
20
  from data.context_targets import derive_movement_targets
21
  from data.quant_ohlc_feature_schema import (
 
 
22
  FEATURE_VERSION,
23
  FEATURE_VERSION_ID,
24
  LOOKBACK_SECONDS,
@@ -27,7 +29,6 @@ from data.quant_ohlc_feature_schema import (
27
  empty_feature_dict,
28
  feature_dict_to_vector,
29
  )
30
- from signals.patterns import compute_pattern_features
31
  from signals.rolling_quant import compute_rolling_quant_features
32
  from signals.support_resistance import compute_support_resistance_features
33
  from signals.trendlines import compute_trendline_features
@@ -233,6 +234,7 @@ class OracleDataset(Dataset):
233
 
234
  # Cache for lightweight token metadata to avoid redundant DB fetches
235
  self._token_meta_cache = {}
 
236
 
237
 
238
 
@@ -956,11 +958,6 @@ class OracleDataset(Dataset):
956
  continue
957
  all_token_data[addr] = data
958
 
959
- # Print wallet_data sub-timings
960
- print(f" [WALLET_DATA] db_fetch: {_wd_timings['db_fetch']*1000:.1f}ms, "
961
- f"holding_tokens: {_wd_timings['num_holding_tokens']}, "
962
- f"holding_token_processing: {_wd_timings['holding_token_processing']*1000:.1f}ms")
963
-
964
  # --- Calculate deployed token stats using point-in-time logic ---
965
  self._calculate_deployed_token_stats(profiles, T_cutoff)
966
 
@@ -1105,7 +1102,6 @@ class OracleDataset(Dataset):
1105
 
1106
  # Fetch missing tokens
1107
  if missing_tokens and self.fetcher:
1108
- print(f"INFO: Processing token data for {len(missing_tokens)} unique tokens (cache miss)...")
1109
  fetched = self.fetcher.fetch_token_data(missing_tokens, T_cutoff)
1110
  # Update cache
1111
  for addr, data in fetched.items():
@@ -1327,24 +1323,27 @@ class OracleDataset(Dataset):
1327
  ) -> Dict[str, float]:
1328
  return compute_trendline_features(closes, highs, lows, end_idx)
1329
 
1330
- def _compute_optional_pattern_flags(
1331
- self,
1332
- closes: List[float],
1333
- highs: List[float],
1334
- lows: List[float],
1335
- end_idx: int,
1336
- ) -> Dict[str, float]:
1337
- return compute_pattern_features(closes, highs, lows, end_idx)
1338
-
1339
  def _extract_quant_ohlc_features_for_segment(
1340
  self,
1341
  segment: List[tuple],
1342
  interval_label: str,
 
1343
  ) -> List[Dict[str, Any]]:
1344
- del interval_label
1345
  if not segment:
 
 
 
 
1346
  return []
1347
 
 
 
 
 
 
 
 
 
1348
  timestamps = [int(row[0]) for row in segment]
1349
  opens = [float(row[1]) for row in segment]
1350
  closes = [float(row[2]) for row in segment]
@@ -1354,11 +1353,11 @@ class OracleDataset(Dataset):
1354
  one_sec_returns = np.diff(log_closes)
1355
  feature_windows: List[Dict[str, Any]] = []
1356
 
1357
- for window_idx in range(TOKENS_PER_SEGMENT):
1358
- window_start = window_idx * WINDOW_SECONDS
1359
  if window_start >= len(segment):
1360
  break
1361
- window_end = min(len(segment), window_start + WINDOW_SECONDS)
1362
  current_end_idx = window_end - 1
1363
  window_returns = one_sec_returns[window_start:max(window_start, current_end_idx)]
1364
  window_closes = closes[window_start:window_end]
@@ -1439,17 +1438,11 @@ class OracleDataset(Dataset):
1439
  closes=closes,
1440
  end_idx=current_end_idx,
1441
  ))
1442
- features.update(self._compute_optional_pattern_flags(
1443
- closes=closes,
1444
- highs=highs,
1445
- lows=lows,
1446
- end_idx=current_end_idx,
1447
- ))
1448
 
1449
  feature_windows.append({
1450
  "start_ts": timestamps[window_start],
1451
  "end_ts": timestamps[current_end_idx],
1452
- "window_seconds": WINDOW_SECONDS,
1453
  "feature_vector": feature_dict_to_vector(features),
1454
  "feature_names_version": FEATURE_VERSION,
1455
  "feature_version_id": FEATURE_VERSION_ID,
@@ -1458,14 +1451,57 @@ class OracleDataset(Dataset):
1458
  "resistance_distance": features.get("nearest_resistance_dist", 0.0),
1459
  "support_strength": features.get("support_strength", 0.0),
1460
  "resistance_strength": features.get("resistance_strength", 0.0),
 
 
 
 
 
 
1461
  },
1462
- "pattern_flags": {
1463
- key.replace("pattern_", "").replace("_confidence", ""): features[key]
1464
- for key in features.keys()
1465
- if key.startswith("pattern_") and key.endswith("_confidence")
 
 
 
 
 
1466
  },
1467
  })
1468
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1469
  return feature_windows
1470
 
1471
  def __getitem__(self, idx: int) -> Optional[Dict[str, Any]]:
@@ -2179,7 +2215,7 @@ class OracleDataset(Dataset):
2179
  'opens': self._normalize_price_series(opens_raw),
2180
  'closes': self._normalize_price_series(closes_raw),
2181
  'i': interval_label,
2182
- 'quant_ohlc_features': self._extract_quant_ohlc_features_for_segment(segment, interval_label) if interval_label == "1s" else [],
2183
  'quant_feature_version': FEATURE_VERSION,
2184
  }
2185
  emitted_events.append(chart_event)
 
19
  from data.data_fetcher import DataFetcher # NEW: Import the DataFetcher
20
  from data.context_targets import derive_movement_targets
21
  from data.quant_ohlc_feature_schema import (
22
+ FEATURE_INDEX,
23
+ SEGMENT_SECONDS,
24
  FEATURE_VERSION,
25
  FEATURE_VERSION_ID,
26
  LOOKBACK_SECONDS,
 
29
  empty_feature_dict,
30
  feature_dict_to_vector,
31
  )
 
32
  from signals.rolling_quant import compute_rolling_quant_features
33
  from signals.support_resistance import compute_support_resistance_features
34
  from signals.trendlines import compute_trendline_features
 
234
 
235
  # Cache for lightweight token metadata to avoid redundant DB fetches
236
  self._token_meta_cache = {}
237
+ self._chart_feature_log_count = 0
238
 
239
 
240
 
 
958
  continue
959
  all_token_data[addr] = data
960
 
 
 
 
 
 
961
  # --- Calculate deployed token stats using point-in-time logic ---
962
  self._calculate_deployed_token_stats(profiles, T_cutoff)
963
 
 
1102
 
1103
  # Fetch missing tokens
1104
  if missing_tokens and self.fetcher:
 
1105
  fetched = self.fetcher.fetch_token_data(missing_tokens, T_cutoff)
1106
  # Update cache
1107
  for addr, data in fetched.items():
 
1323
  ) -> Dict[str, float]:
1324
  return compute_trendline_features(closes, highs, lows, end_idx)
1325
 
 
 
 
 
 
 
 
 
 
1326
  def _extract_quant_ohlc_features_for_segment(
1327
  self,
1328
  segment: List[tuple],
1329
  interval_label: str,
1330
+ token_address: Optional[str] = None,
1331
  ) -> List[Dict[str, Any]]:
 
1332
  if not segment:
1333
+ print(
1334
+ f"INFO: Chart quant skipped | token={token_address or 'unknown'} "
1335
+ "reason=empty_segment"
1336
+ )
1337
  return []
1338
 
1339
+ try:
1340
+ interval_seconds = max(1, int(str(interval_label).rstrip("s")))
1341
+ except Exception:
1342
+ interval_seconds = 1
1343
+ window_bar_count = max(1, WINDOW_SECONDS // interval_seconds)
1344
+ effective_window_seconds = max(WINDOW_SECONDS, interval_seconds)
1345
+ max_windows = max(1, SEGMENT_SECONDS // effective_window_seconds)
1346
+
1347
  timestamps = [int(row[0]) for row in segment]
1348
  opens = [float(row[1]) for row in segment]
1349
  closes = [float(row[2]) for row in segment]
 
1353
  one_sec_returns = np.diff(log_closes)
1354
  feature_windows: List[Dict[str, Any]] = []
1355
 
1356
+ for window_idx in range(max_windows):
1357
+ window_start = window_idx * window_bar_count
1358
  if window_start >= len(segment):
1359
  break
1360
+ window_end = min(len(segment), window_start + window_bar_count)
1361
  current_end_idx = window_end - 1
1362
  window_returns = one_sec_returns[window_start:max(window_start, current_end_idx)]
1363
  window_closes = closes[window_start:window_end]
 
1438
  closes=closes,
1439
  end_idx=current_end_idx,
1440
  ))
 
 
 
 
 
 
1441
 
1442
  feature_windows.append({
1443
  "start_ts": timestamps[window_start],
1444
  "end_ts": timestamps[current_end_idx],
1445
+ "window_seconds": effective_window_seconds,
1446
  "feature_vector": feature_dict_to_vector(features),
1447
  "feature_names_version": FEATURE_VERSION,
1448
  "feature_version_id": FEATURE_VERSION_ID,
 
1451
  "resistance_distance": features.get("nearest_resistance_dist", 0.0),
1452
  "support_strength": features.get("support_strength", 0.0),
1453
  "resistance_strength": features.get("resistance_strength", 0.0),
1454
+ "breakout_up": features.get("keylevel_breakout_up", 0.0),
1455
+ "breakout_down": features.get("keylevel_breakout_down", 0.0),
1456
+ "hold_above": features.get("keylevel_hold_above", 0.0),
1457
+ "hold_below": features.get("keylevel_hold_below", 0.0),
1458
+ "flip_to_support": features.get("keylevel_flip_to_support", 0.0),
1459
+ "flip_to_resistance": features.get("keylevel_flip_to_resistance", 0.0),
1460
  },
1461
+ "keylevel_flags": {
1462
+ "breakout_up": features.get("keylevel_breakout_up", 0.0),
1463
+ "breakout_down": features.get("keylevel_breakout_down", 0.0),
1464
+ "hold_above": features.get("keylevel_hold_above", 0.0),
1465
+ "hold_below": features.get("keylevel_hold_below", 0.0),
1466
+ "failed_breakout_up": features.get("keylevel_failed_breakout_up", 0.0),
1467
+ "failed_breakout_down": features.get("keylevel_failed_breakout_down", 0.0),
1468
+ "flip_to_support": features.get("keylevel_flip_to_support", 0.0),
1469
+ "flip_to_resistance": features.get("keylevel_flip_to_resistance", 0.0),
1470
  },
1471
  })
1472
 
1473
+ sr_windows = sum(
1474
+ 1 for window in feature_windows
1475
+ if float(window["feature_vector"][FEATURE_INDEX["sr_available"]]) > 0.0
1476
+ )
1477
+ trendline_windows = sum(
1478
+ 1 for window in feature_windows
1479
+ if float(window["feature_vector"][FEATURE_INDEX["trendline_available"]]) > 0.0
1480
+ )
1481
+ breakout_windows = sum(
1482
+ 1 for window in feature_windows
1483
+ if (
1484
+ float(window["feature_vector"][FEATURE_INDEX["keylevel_breakout_up"]]) > 0.0
1485
+ or float(window["feature_vector"][FEATURE_INDEX["keylevel_breakout_down"]]) > 0.0
1486
+ or float(window["feature_vector"][FEATURE_INDEX["keylevel_flip_to_support"]]) > 0.0
1487
+ or float(window["feature_vector"][FEATURE_INDEX["keylevel_flip_to_resistance"]]) > 0.0
1488
+ )
1489
+ )
1490
+ keylevel_break_events = sum(
1491
+ 1 for window in feature_windows
1492
+ if (
1493
+ float(window["feature_vector"][FEATURE_INDEX["keylevel_breakout_up"]]) > 0.0
1494
+ or float(window["feature_vector"][FEATURE_INDEX["keylevel_breakout_down"]]) > 0.0
1495
+ )
1496
+ )
1497
+ self._chart_feature_log_count += 1
1498
+ print(
1499
+ f"INFO: Chart quant built | token={token_address or 'unknown'} "
1500
+ f"interval={interval_label} segment={self._chart_feature_log_count} "
1501
+ f"windows={len(feature_windows)}/{max_windows} "
1502
+ f"sr={sr_windows} trend={trendline_windows} breaks={breakout_windows} "
1503
+ f"break_events={keylevel_break_events}"
1504
+ )
1505
  return feature_windows
1506
 
1507
  def __getitem__(self, idx: int) -> Optional[Dict[str, Any]]:
 
2215
  'opens': self._normalize_price_series(opens_raw),
2216
  'closes': self._normalize_price_series(closes_raw),
2217
  'i': interval_label,
2218
+ 'quant_ohlc_features': self._extract_quant_ohlc_features_for_segment(segment, interval_label, token_address=token_address),
2219
  'quant_feature_version': FEATURE_VERSION,
2220
  }
2221
  emitted_events.append(chart_event)
data/quant_ohlc_feature_schema.py CHANGED
@@ -2,19 +2,11 @@ from collections import OrderedDict
2
  from typing import Dict, Iterable, List
3
 
4
 
5
- FEATURE_VERSION = "qohlc_v1"
6
- FEATURE_VERSION_ID = 1
7
  WINDOW_SECONDS = 5
8
  SEGMENT_SECONDS = 300
9
  TOKENS_PER_SEGMENT = SEGMENT_SECONDS // WINDOW_SECONDS
10
- PATTERN_NAMES = [
11
- "double_top",
12
- "double_bottom",
13
- "ascending_triangle",
14
- "descending_triangle",
15
- "head_shoulders",
16
- "inverse_head_shoulders",
17
- ]
18
  LOOKBACK_SECONDS = [15, 30, 60, 120]
19
 
20
 
@@ -63,6 +55,18 @@ FEATURE_NAMES.extend([
63
  "resistance_swept",
64
  "support_reclaim",
65
  "resistance_reject",
 
 
 
 
 
 
 
 
 
 
 
 
66
  "lower_trendline_slope",
67
  "upper_trendline_slope",
68
  "dist_to_lower_line",
@@ -84,13 +88,9 @@ FEATURE_NAMES.extend([
84
  "rolling_vol_zscore",
85
  ])
86
 
87
- for pattern_name in PATTERN_NAMES:
88
- FEATURE_NAMES.append(f"pattern_{pattern_name}_confidence")
89
-
90
  FEATURE_NAMES.extend([
91
  "sr_available",
92
  "trendline_available",
93
- "pattern_available",
94
  ])
95
 
96
  FEATURE_INDEX = {name: idx for idx, name in enumerate(FEATURE_NAMES)}
@@ -126,6 +126,18 @@ FEATURE_GROUPS = OrderedDict([
126
  "resistance_swept",
127
  "support_reclaim",
128
  "resistance_reject",
 
 
 
 
 
 
 
 
 
 
 
 
129
  ]),
130
  ("trendlines", [
131
  "lower_trendline_slope",
@@ -150,11 +162,9 @@ FEATURE_GROUPS = OrderedDict([
150
  "mean_reversion_score",
151
  "rolling_vol_zscore",
152
  ]),
153
- ("patterns", [name for name in FEATURE_NAMES if name.startswith("pattern_")]),
154
  ("availability", [
155
  "sr_available",
156
  "trendline_available",
157
- "pattern_available",
158
  ]),
159
  ])
160
 
 
2
  from typing import Dict, Iterable, List
3
 
4
 
5
+ FEATURE_VERSION = "qohlc_v2"
6
+ FEATURE_VERSION_ID = 2
7
  WINDOW_SECONDS = 5
8
  SEGMENT_SECONDS = 300
9
  TOKENS_PER_SEGMENT = SEGMENT_SECONDS // WINDOW_SECONDS
 
 
 
 
 
 
 
 
10
  LOOKBACK_SECONDS = [15, 30, 60, 120]
11
 
12
 
 
55
  "resistance_swept",
56
  "support_reclaim",
57
  "resistance_reject",
58
+ "keylevel_breakout_up",
59
+ "keylevel_breakout_down",
60
+ "keylevel_hold_above",
61
+ "keylevel_hold_below",
62
+ "keylevel_failed_breakout_up",
63
+ "keylevel_failed_breakout_down",
64
+ "keylevel_flip_to_support",
65
+ "keylevel_flip_to_resistance",
66
+ "keylevel_upper_distance",
67
+ "keylevel_lower_distance",
68
+ "keylevel_zone_width_frac",
69
+ "keylevel_density",
70
  "lower_trendline_slope",
71
  "upper_trendline_slope",
72
  "dist_to_lower_line",
 
88
  "rolling_vol_zscore",
89
  ])
90
 
 
 
 
91
  FEATURE_NAMES.extend([
92
  "sr_available",
93
  "trendline_available",
 
94
  ])
95
 
96
  FEATURE_INDEX = {name: idx for idx, name in enumerate(FEATURE_NAMES)}
 
126
  "resistance_swept",
127
  "support_reclaim",
128
  "resistance_reject",
129
+ "keylevel_breakout_up",
130
+ "keylevel_breakout_down",
131
+ "keylevel_hold_above",
132
+ "keylevel_hold_below",
133
+ "keylevel_failed_breakout_up",
134
+ "keylevel_failed_breakout_down",
135
+ "keylevel_flip_to_support",
136
+ "keylevel_flip_to_resistance",
137
+ "keylevel_upper_distance",
138
+ "keylevel_lower_distance",
139
+ "keylevel_zone_width_frac",
140
+ "keylevel_density",
141
  ]),
142
  ("trendlines", [
143
  "lower_trendline_slope",
 
162
  "mean_reversion_score",
163
  "rolling_vol_zscore",
164
  ]),
 
165
  ("availability", [
166
  "sr_available",
167
  "trendline_available",
 
168
  ]),
169
  ])
170
 
log.log CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:49c4d500d58301a7c158851716c6cf0c7e6bc60cdf59f7a166fcb92b4e77b04a
3
- size 390587
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edec35bc8304b6d83bfc091ee6f7ff1a9dd1864937f1e91b8655e01de2617d1c
3
+ size 102571
scripts/cache_debug_web.py ADDED
@@ -0,0 +1,986 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Minimal web viewer for cached context samples.
4
+
5
+ Loads one cache sample with the same torch.load path used by dump_cache_sample.py
6
+ and renders:
7
+ - top-level debug metadata
8
+ - cached Chart_Segment line view
9
+ - quant window boundaries
10
+ - per-window level/pattern summaries
11
+ - full per-window feature maps
12
+
13
+ Usage:
14
+ /venv/main/bin/python scripts/cache_debug_web.py
15
+ /venv/main/bin/python scripts/cache_debug_web.py --cache_dir data/cache --port 8765
16
+ /venv/main/bin/python scripts/cache_debug_web.py --file data/cache/sample_ABC.pt
17
+ """
18
+
19
+ import argparse
20
+ import json
21
+ import os
22
+ import random
23
+ import sys
24
+ from collections import Counter
25
+ from datetime import datetime
26
+ from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
27
+ from pathlib import Path
28
+ from typing import Any, Dict, List, Optional
29
+ from urllib.parse import parse_qs, urlparse
30
+
31
+ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
32
+
33
+ import numpy as np
34
+ import torch
35
+ import pandas as pd
36
+
37
+ from data.quant_ohlc_feature_schema import FEATURE_NAMES
38
+ from signals.support_resistance import compute_support_resistance_debug, compute_support_resistance_features
39
+ from signals.trendlines import _fit_with_trendln
40
+ from ta.trend import ema_indicator, sma_indicator
41
+
42
+
43
+ def _load_cache_sample(path: Path) -> Dict[str, Any]:
44
+ return torch.load(path, map_location="cpu", weights_only=False)
45
+
46
+
47
+ def _safe_float(value: Any) -> float:
48
+ try:
49
+ return float(value)
50
+ except Exception:
51
+ return 0.0
52
+
53
+
54
+ def _feature_map(window: Dict[str, Any]) -> Dict[str, float]:
55
+ vector = window.get("feature_vector", [])
56
+ if not isinstance(vector, list):
57
+ return {}
58
+ return {
59
+ name: _safe_float(vector[idx]) if idx < len(vector) else 0.0
60
+ for idx, name in enumerate(FEATURE_NAMES)
61
+ }
62
+
63
+
64
+ def _chart_points(chart_event: Dict[str, Any]) -> List[Dict[str, float]]:
65
+ raw_opens = chart_event.get("raw_opens")
66
+ raw_closes = chart_event.get("raw_closes")
67
+ if isinstance(raw_opens, list) and isinstance(raw_closes, list) and raw_closes:
68
+ opens = raw_opens
69
+ closes = raw_closes
70
+ else:
71
+ opens_logged = chart_event.get("opens", []) or []
72
+ closes_logged = chart_event.get("closes", []) or []
73
+ opens = [float(np.exp(v)) for v in opens_logged]
74
+ closes = [float(np.exp(v)) for v in closes_logged]
75
+ end_ts = int(chart_event.get("timestamp", 0) or 0)
76
+ if not closes:
77
+ return []
78
+ interval_str = str(chart_event.get("i", "1s"))
79
+ try:
80
+ interval_seconds = max(1, int(interval_str.rstrip("s")))
81
+ except Exception:
82
+ interval_seconds = 1
83
+ start_ts = end_ts - interval_seconds * (len(closes) - 1)
84
+ points: List[Dict[str, float]] = []
85
+ for idx, (open_value, close_value) in enumerate(zip(opens, closes)):
86
+ ts = start_ts + idx * interval_seconds
87
+ high_value = max(open_value, close_value)
88
+ low_value = min(open_value, close_value)
89
+ points.append({
90
+ "time": int(ts),
91
+ "open": _safe_float(open_value),
92
+ "high": _safe_float(high_value),
93
+ "low": _safe_float(low_value),
94
+ "close": _safe_float(close_value),
95
+ "index": idx,
96
+ })
97
+ return points
98
+
99
+
100
+ def _compute_level_overlays(points: List[Dict[str, float]], windows: List[Dict[str, Any]]) -> Dict[str, Any]:
101
+ del windows
102
+ if not points:
103
+ return {"support_levels": [], "resistance_levels": []}
104
+ closes = [_safe_float(p["close"]) for p in points]
105
+ highs = [_safe_float(p["high"]) for p in points]
106
+ lows = [_safe_float(p["low"]) for p in points]
107
+ timestamps = [int(p["time"]) for p in points]
108
+ debug = compute_support_resistance_debug(
109
+ closes=closes,
110
+ highs=highs,
111
+ lows=lows,
112
+ timestamps=timestamps,
113
+ )
114
+ support_levels = debug.get("support_levels", []) or debug.get("all_support_levels", [])
115
+ resistance_levels = debug.get("resistance_levels", []) or debug.get("all_resistance_levels", [])
116
+ return {
117
+ "support_levels": support_levels,
118
+ "resistance_levels": resistance_levels,
119
+ "all_support_levels": debug.get("all_support_levels", []),
120
+ "all_resistance_levels": debug.get("all_resistance_levels", []),
121
+ "sr_available": debug.get("sr_available", 0.0),
122
+ }
123
+
124
+
125
+ def _compute_trendline_overlays(points: List[Dict[str, float]]) -> List[Dict[str, Any]]:
126
+ if len(points) < 5:
127
+ return []
128
+ closes = np.asarray([p["close"] for p in points], dtype=np.float64)
129
+ highs = np.asarray([p["high"] for p in points], dtype=np.float64)
130
+ lows = np.asarray([p["low"] for p in points], dtype=np.float64)
131
+ out: List[Dict[str, Any]] = []
132
+ try:
133
+ lower_line, upper_line = _fit_with_trendln(closes)
134
+ except Exception:
135
+ lower_line, upper_line = None, None
136
+ if lower_line is None:
137
+ try:
138
+ lower_line, _ = _fit_with_trendln(lows)
139
+ except Exception:
140
+ lower_line = None
141
+ if upper_line is None:
142
+ try:
143
+ _, upper_line = _fit_with_trendln(highs)
144
+ except Exception:
145
+ upper_line = None
146
+
147
+ def _line_payload(name: str, line: Any, color: str) -> Optional[Dict[str, Any]]:
148
+ if line is None:
149
+ return None
150
+ slope, intercept = line
151
+ x0, x1 = 0, len(points) - 1
152
+ y0 = slope * x0 + intercept
153
+ y1 = slope * x1 + intercept
154
+ return {
155
+ "name": name,
156
+ "color": color,
157
+ "points": [
158
+ {"time": points[x0]["time"], "value": _safe_float(y0)},
159
+ {"time": points[x1]["time"], "value": _safe_float(y1)},
160
+ ],
161
+ }
162
+
163
+ lower_payload = _line_payload("lower_trendline", lower_line, "#0f766e")
164
+ upper_payload = _line_payload("upper_trendline", upper_line, "#b91c1c")
165
+ if lower_payload:
166
+ out.append(lower_payload)
167
+ if upper_payload:
168
+ out.append(upper_payload)
169
+ return out
170
+
171
+
172
+ def _compute_window_boundaries(points: List[Dict[str, float]], windows: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
173
+ if not points:
174
+ return []
175
+ min_value = min(point["low"] for point in points)
176
+ max_value = max(point["high"] for point in points)
177
+ out: List[Dict[str, Any]] = []
178
+ for idx, window in enumerate(windows):
179
+ end_ts = int(window.get("end_ts", 0) or 0)
180
+ breakout_active = any(
181
+ _safe_float((window.get("keylevel_flags", {}) or {}).get(flag, 0.0)) > 0.0
182
+ for flag in ("breakout_up", "breakout_down", "flip_to_support", "flip_to_resistance")
183
+ )
184
+ out.append({
185
+ "name": f"window_{idx}",
186
+ "window_idx": idx,
187
+ "color": "#8b1e3f" if breakout_active else "#c84c2d",
188
+ "points": [
189
+ {"time": end_ts, "value": _safe_float(min_value)},
190
+ {"time": end_ts, "value": _safe_float(max_value)},
191
+ ],
192
+ })
193
+ return out
194
+
195
+
196
+ def _compute_indicator_overlays(points: List[Dict[str, float]]) -> List[Dict[str, Any]]:
197
+ if not points:
198
+ return []
199
+ closes = pd.Series([_safe_float(point["close"]) for point in points], dtype="float64")
200
+ ema_fast = ema_indicator(closes, window=8, fillna=True)
201
+ ema_medium = ema_indicator(closes, window=21, fillna=True)
202
+ sma_fast = sma_indicator(closes, window=8, fillna=True)
203
+ sma_medium = sma_indicator(closes, window=21, fillna=True)
204
+
205
+ def _series_payload(name: str, series: pd.Series, color: str, dash: str = "solid") -> Dict[str, Any]:
206
+ return {
207
+ "name": name,
208
+ "color": color,
209
+ "dash": dash,
210
+ "points": [
211
+ {"time": points[idx]["time"], "value": _safe_float(value)}
212
+ for idx, value in enumerate(series.tolist())
213
+ ],
214
+ }
215
+
216
+ return [
217
+ _series_payload("ema_fast_8", ema_fast, "#2563eb"),
218
+ _series_payload("ema_medium_21", ema_medium, "#7c3aed"),
219
+ _series_payload("sma_fast_8", sma_fast, "#ea580c", "dot"),
220
+ _series_payload("sma_medium_21", sma_medium, "#0891b2", "dot"),
221
+ ]
222
+
223
+
224
+ def _recompute_window_keylevel_flags(
225
+ points: List[Dict[str, float]],
226
+ windows: List[Dict[str, Any]],
227
+ ) -> List[Dict[str, Any]]:
228
+ if not points or not windows:
229
+ return windows
230
+ closes = [_safe_float(point["close"]) for point in points]
231
+ highs = [_safe_float(point["high"]) for point in points]
232
+ lows = [_safe_float(point["low"]) for point in points]
233
+ timestamps = [int(point["time"]) for point in points]
234
+ time_to_idx = {timestamp: idx for idx, timestamp in enumerate(timestamps)}
235
+
236
+ updated: List[Dict[str, Any]] = []
237
+ for window in windows:
238
+ start_ts = int(window.get("start_ts", 0) or 0)
239
+ end_ts = int(window.get("end_ts", 0) or 0)
240
+ if end_ts not in time_to_idx:
241
+ updated.append(window)
242
+ continue
243
+ end_idx = time_to_idx[end_ts]
244
+ start_idx = time_to_idx.get(start_ts, max(0, end_idx))
245
+ if start_idx > end_idx:
246
+ start_idx = end_idx
247
+ sr_features = compute_support_resistance_features(
248
+ closes=closes,
249
+ highs=highs,
250
+ lows=lows,
251
+ end_idx=end_idx,
252
+ window_start=start_idx,
253
+ window_end=end_idx + 1,
254
+ timestamps=timestamps,
255
+ )
256
+ keylevel_flags = {
257
+ "breakout_up": sr_features.get("keylevel_breakout_up", 0.0),
258
+ "breakout_down": sr_features.get("keylevel_breakout_down", 0.0),
259
+ "hold_above": sr_features.get("keylevel_hold_above", 0.0),
260
+ "hold_below": sr_features.get("keylevel_hold_below", 0.0),
261
+ "failed_breakout_up": sr_features.get("keylevel_failed_breakout_up", 0.0),
262
+ "failed_breakout_down": sr_features.get("keylevel_failed_breakout_down", 0.0),
263
+ "flip_to_support": sr_features.get("keylevel_flip_to_support", 0.0),
264
+ "flip_to_resistance": sr_features.get("keylevel_flip_to_resistance", 0.0),
265
+ }
266
+ top_signal_name = "none"
267
+ for signal_name in (
268
+ "breakout_up",
269
+ "breakout_down",
270
+ "flip_to_support",
271
+ "flip_to_resistance",
272
+ "failed_breakout_up",
273
+ "failed_breakout_down",
274
+ ):
275
+ if _safe_float(keylevel_flags.get(signal_name, 0.0)) > 0.0:
276
+ top_signal_name = signal_name
277
+ break
278
+ updated_window = dict(window)
279
+ updated_window["keylevel_flags"] = keylevel_flags
280
+ updated_window["top_signal_name"] = top_signal_name
281
+ updated.append(updated_window)
282
+ return updated
283
+
284
+
285
+ def _compute_keylevel_signal_overlays(
286
+ points: List[Dict[str, float]],
287
+ windows: List[Dict[str, Any]],
288
+ ) -> List[Dict[str, Any]]:
289
+ if not points or not windows:
290
+ return []
291
+ time_to_point = {int(point["time"]): point for point in points}
292
+ signal_specs = {
293
+ "breakout_up": {"color": "#15803d", "symbol": "triangle-up", "y_key": "high"},
294
+ "breakout_down": {"color": "#b91c1c", "symbol": "triangle-down", "y_key": "low"},
295
+ "flip_to_support": {"color": "#1d4ed8", "symbol": "diamond", "y_key": "close"},
296
+ "flip_to_resistance": {"color": "#7c2d12", "symbol": "diamond", "y_key": "close"},
297
+ "failed_breakout_up": {"color": "#ea580c", "symbol": "x", "y_key": "high"},
298
+ "failed_breakout_down": {"color": "#9333ea", "symbol": "x", "y_key": "low"},
299
+ }
300
+ overlays: List[Dict[str, Any]] = []
301
+ for window in windows:
302
+ end_ts = int(window.get("end_ts", 0) or 0)
303
+ point = time_to_point.get(end_ts)
304
+ if point is None:
305
+ continue
306
+ flags = window.get("keylevel_flags", {}) or {}
307
+ for signal_name, spec in signal_specs.items():
308
+ if _safe_float(flags.get(signal_name, 0.0)) <= 0.0:
309
+ continue
310
+ y_value = _safe_float(point.get(spec["y_key"], point["close"]))
311
+ if spec["y_key"] == "high":
312
+ y_value *= 1.003
313
+ elif spec["y_key"] == "low":
314
+ y_value *= 0.997
315
+ overlays.append({
316
+ "name": signal_name,
317
+ "time": end_ts,
318
+ "value": y_value,
319
+ "color": spec["color"],
320
+ "symbol": spec["symbol"],
321
+ "window_idx": int(window.get("idx", -1)),
322
+ })
323
+ return overlays
324
+
325
+
326
+ def _sample_to_payload(sample: Dict[str, Any], source_file: Path) -> Dict[str, Any]:
327
+ event_sequence = sample.get("event_sequence", [])
328
+ event_counts = Counter(event.get("event_type", "Unknown") for event in event_sequence)
329
+ chart_events = [event for event in event_sequence if event.get("event_type") == "Chart_Segment"]
330
+ chart_event = chart_events[0] if chart_events else {}
331
+ quant_windows = chart_event.get("quant_ohlc_features", []) or []
332
+
333
+ windows_payload: List[Dict[str, Any]] = []
334
+ for idx, window in enumerate(quant_windows):
335
+ feature_map = _feature_map(window)
336
+
337
+ windows_payload.append({
338
+ "idx": idx,
339
+ "start_ts": window.get("start_ts"),
340
+ "end_ts": window.get("end_ts"),
341
+ "window_seconds": window.get("window_seconds"),
342
+ "level_snapshot": window.get("level_snapshot", {}) or {},
343
+ "keylevel_flags": window.get("keylevel_flags", {}) or {},
344
+ "top_signal_name": "none",
345
+ "feature_map": feature_map,
346
+ "sr_available": _safe_float(feature_map.get("sr_available", 0.0)),
347
+ "trendline_available": _safe_float(feature_map.get("trendline_available", 0.0)),
348
+ })
349
+
350
+ chart_points = _chart_points(chart_event) if chart_event else []
351
+ windows_payload = _recompute_window_keylevel_flags(chart_points, windows_payload)
352
+ level_overlays = _compute_level_overlays(chart_points, windows_payload)
353
+ trendline_overlays = _compute_trendline_overlays(chart_points)
354
+ boundary_overlays = _compute_window_boundaries(chart_points, windows_payload)
355
+ indicator_overlays = _compute_indicator_overlays(chart_points)
356
+ signal_overlays = _compute_keylevel_signal_overlays(chart_points, windows_payload)
357
+
358
+ return {
359
+ "source_file": str(source_file),
360
+ "sample": {
361
+ "token_address": sample.get("token_address"),
362
+ "source_token": sample.get("source_token"),
363
+ "sample_idx": sample.get("sample_idx"),
364
+ "class_id": sample.get("class_id"),
365
+ "context_bucket": sample.get("context_bucket"),
366
+ "context_score": sample.get("context_score"),
367
+ "quality_score": _safe_float(sample.get("quality_score", 0.0)),
368
+ "t_cutoff": sample.get("t_cutoff"),
369
+ "labels": sample.get("labels").tolist() if hasattr(sample.get("labels"), "tolist") else sample.get("labels"),
370
+ "labels_mask": sample.get("labels_mask").tolist() if hasattr(sample.get("labels_mask"), "tolist") else sample.get("labels_mask"),
371
+ "event_counts": dict(event_counts),
372
+ "n_events": len(event_sequence),
373
+ "n_wallets": len(sample.get("wallets", {})),
374
+ "n_tokens": len(sample.get("tokens", {})),
375
+ "n_graph_link_types": len(sample.get("graph_links", {})),
376
+ },
377
+ "chart": {
378
+ "present": bool(chart_event),
379
+ "timestamp": chart_event.get("timestamp"),
380
+ "relative_ts": chart_event.get("relative_ts"),
381
+ "interval": chart_event.get("i"),
382
+ "opens": chart_event.get("opens", []) or [],
383
+ "closes": chart_event.get("closes", []) or [],
384
+ "windows": windows_payload,
385
+ "points": chart_points,
386
+ "overlays": {
387
+ "levels": level_overlays,
388
+ "trendlines": trendline_overlays,
389
+ "boundaries": boundary_overlays,
390
+ "indicators": indicator_overlays,
391
+ "signals": signal_overlays,
392
+ },
393
+ },
394
+ }
395
+
396
+
397
+ HTML = """<!doctype html>
398
+ <html lang="en">
399
+ <head>
400
+ <meta charset="utf-8">
401
+ <title>Cache Debug</title>
402
+ <meta name="viewport" content="width=device-width, initial-scale=1">
403
+ <style>
404
+ :root {
405
+ --bg: #f4efe7;
406
+ --panel: #fffaf2;
407
+ --ink: #1f1a17;
408
+ --muted: #6f645b;
409
+ --line: #d7ccbf;
410
+ --accent: #c84c2d;
411
+ --accent2: #0f766e;
412
+ --warn: #8b1e3f;
413
+ --chart: #1f1a17;
414
+ --chart2: #6a8a82;
415
+ }
416
+ * { box-sizing: border-box; }
417
+ body {
418
+ margin: 0;
419
+ font-family: Georgia, "Times New Roman", serif;
420
+ background: radial-gradient(circle at top left, #fffdf8, var(--bg) 46%, #efe4d4 100%);
421
+ color: var(--ink);
422
+ }
423
+ header, main { max-width: 1420px; margin: 0 auto; padding: 18px 20px; }
424
+ header { display: flex; gap: 12px; align-items: center; justify-content: space-between; }
425
+ h1 { margin: 0; font-size: 28px; letter-spacing: -0.03em; }
426
+ .sub { color: var(--muted); font-size: 14px; }
427
+ .actions { display: flex; gap: 10px; align-items: center; }
428
+ button, input {
429
+ font: inherit;
430
+ border: 1px solid var(--line);
431
+ background: var(--panel);
432
+ color: var(--ink);
433
+ border-radius: 999px;
434
+ padding: 9px 14px;
435
+ }
436
+ button { cursor: pointer; }
437
+ button:hover { border-color: var(--accent); }
438
+ main { display: grid; gap: 18px; }
439
+ .grid {
440
+ display: grid;
441
+ grid-template-columns: 340px minmax(0, 1fr);
442
+ gap: 18px;
443
+ align-items: start;
444
+ }
445
+ .panel {
446
+ background: color-mix(in srgb, var(--panel) 88%, white);
447
+ border: 1px solid var(--line);
448
+ border-radius: 22px;
449
+ box-shadow: 0 14px 50px rgba(31, 26, 23, 0.06);
450
+ }
451
+ .panel h2, .panel h3 { margin: 0 0 12px; }
452
+ .meta { padding: 18px; }
453
+ .kv {
454
+ display: grid;
455
+ grid-template-columns: 120px 1fr;
456
+ gap: 8px 10px;
457
+ font-size: 14px;
458
+ margin-bottom: 16px;
459
+ }
460
+ .kv div:nth-child(odd) { color: var(--muted); }
461
+ .badge {
462
+ display: inline-block;
463
+ border: 1px solid var(--line);
464
+ border-radius: 999px;
465
+ padding: 4px 10px;
466
+ margin: 0 6px 6px 0;
467
+ font-size: 12px;
468
+ background: #fff;
469
+ }
470
+ .main-panel { padding: 18px; }
471
+ #chart-wrap { border: 1px solid var(--line); border-radius: 18px; background: #fff; overflow: hidden; padding: 6px; }
472
+ #chart-root { width: 100%; height: 460px; }
473
+ .legend { display: flex; flex-wrap: wrap; gap: 10px; margin-top: 12px; color: var(--muted); font-size: 13px; }
474
+ .legend span::before { content: ""; display: inline-block; width: 16px; height: 2px; margin-right: 6px; vertical-align: middle; background: var(--ink); }
475
+ .legend .close::before { background: var(--chart); }
476
+ .legend .open::before { background: var(--chart2); }
477
+ .legend .boundary::before { background: var(--accent); }
478
+ .legend .pattern::before { background: var(--warn); }
479
+ .window-layout {
480
+ display: grid;
481
+ grid-template-columns: minmax(0, 1fr) 380px;
482
+ gap: 18px;
483
+ align-items: start;
484
+ }
485
+ table {
486
+ width: 100%;
487
+ border-collapse: collapse;
488
+ font-size: 13px;
489
+ }
490
+ th, td {
491
+ padding: 8px 10px;
492
+ border-bottom: 1px solid var(--line);
493
+ text-align: left;
494
+ vertical-align: top;
495
+ }
496
+ tr:hover { background: rgba(200, 76, 45, 0.05); cursor: pointer; }
497
+ tr.active { background: rgba(200, 76, 45, 0.11); }
498
+ .detail {
499
+ padding: 14px;
500
+ border: 1px solid var(--line);
501
+ border-radius: 18px;
502
+ background: #fff;
503
+ position: sticky;
504
+ top: 18px;
505
+ }
506
+ .detail pre {
507
+ margin: 0;
508
+ white-space: pre-wrap;
509
+ word-break: break-word;
510
+ font-size: 12px;
511
+ color: var(--ink);
512
+ }
513
+ .muted { color: var(--muted); }
514
+ .toggles {
515
+ display: flex;
516
+ flex-wrap: wrap;
517
+ gap: 10px;
518
+ margin: 0 0 12px;
519
+ font-size: 13px;
520
+ color: var(--muted);
521
+ }
522
+ .toggles label {
523
+ display: inline-flex;
524
+ align-items: center;
525
+ gap: 6px;
526
+ padding: 6px 10px;
527
+ border: 1px solid var(--line);
528
+ border-radius: 999px;
529
+ background: #fff;
530
+ cursor: pointer;
531
+ }
532
+ @media (max-width: 1080px) {
533
+ .grid, .window-layout { grid-template-columns: 1fr; }
534
+ .detail { position: static; }
535
+ }
536
+ </style>
537
+ </head>
538
+ <body>
539
+ <header>
540
+ <div>
541
+ <h1>Cache Debug Console</h1>
542
+ <div class="sub">Render one cached sample, its chart segment, quant-window boundaries, keylevels, breakouts, and feature payloads.</div>
543
+ </div>
544
+ <div class="actions">
545
+ <input id="file-input" placeholder="sample_123.pt or absolute path">
546
+ <button id="load-file">Load File</button>
547
+ <button id="random-btn">Random Sample</button>
548
+ </div>
549
+ </header>
550
+ <main>
551
+ <div class="grid">
552
+ <section class="panel meta">
553
+ <h2>Sample</h2>
554
+ <div id="meta"></div>
555
+ <h3>Event Counts</h3>
556
+ <div id="events"></div>
557
+ </section>
558
+ <section class="panel main-panel">
559
+ <h2>Chart Segment</h2>
560
+ <div class="toggles">
561
+ <label><input type="checkbox" id="toggle-indicators" checked> Indicators</label>
562
+ <label><input type="checkbox" id="toggle-levels"> Levels</label>
563
+ <label><input type="checkbox" id="toggle-trendlines"> Trendlines</label>
564
+ <label><input type="checkbox" id="toggle-boundaries" checked> Boundaries</label>
565
+ </div>
566
+ <div id="chart-wrap">
567
+ <div id="chart-root"></div>
568
+ </div>
569
+ <div class="legend">
570
+ <span class="close">Price</span>
571
+ <span class="open">Levels</span>
572
+ <span class="boundary">Quant Window Boundary</span>
573
+ <span class="pattern">Break / Trendline</span>
574
+ </div>
575
+ </section>
576
+ </div>
577
+ <section class="panel main-panel">
578
+ <h2>Quant Windows</h2>
579
+ <div class="window-layout">
580
+ <div style="overflow:auto;">
581
+ <table>
582
+ <thead>
583
+ <tr>
584
+ <th>#</th>
585
+ <th>Range</th>
586
+ <th>SR</th>
587
+ <th>Trend</th>
588
+ <th>Top Signal</th>
589
+ <th>Support</th>
590
+ <th>Resistance</th>
591
+ </tr>
592
+ </thead>
593
+ <tbody id="windows-body"></tbody>
594
+ </table>
595
+ </div>
596
+ <div class="detail">
597
+ <h3 id="detail-title">Window Detail</h3>
598
+ <pre id="detail-pre">Select a quant window.</pre>
599
+ </div>
600
+ </div>
601
+ </section>
602
+ </main>
603
+ <script src="https://cdn.plot.ly/plotly-2.35.2.min.js"></script>
604
+ <script>
605
+ const chartRoot = document.getElementById("chart-root");
606
+ const meta = document.getElementById("meta");
607
+ const events = document.getElementById("events");
608
+ const body = document.getElementById("windows-body");
609
+ const detailTitle = document.getElementById("detail-title");
610
+ const detailPre = document.getElementById("detail-pre");
611
+ let currentPayload = null;
612
+ let activeWindow = -1;
613
+ const layerState = {
614
+ indicators: true,
615
+ levels: false,
616
+ trendlines: false,
617
+ boundaries: true,
618
+ };
619
+
620
+ function q(path) {
621
+ return fetch(path).then(r => {
622
+ if (!r.ok) throw new Error("HTTP " + r.status);
623
+ return r.json();
624
+ });
625
+ }
626
+
627
+ function fmt(v) {
628
+ if (v === null || v === undefined) return "n/a";
629
+ if (typeof v === "number") return Number(v).toFixed(4);
630
+ return String(v);
631
+ }
632
+
633
+ function renderMeta(data) {
634
+ const s = data.sample;
635
+ meta.innerHTML = `
636
+ <div class="kv">
637
+ <div>file</div><div>${data.source_file}</div>
638
+ <div>token</div><div>${s.token_address || "n/a"}</div>
639
+ <div>source token</div><div>${s.source_token || "n/a"}</div>
640
+ <div>sample idx</div><div>${fmt(s.sample_idx)}</div>
641
+ <div>class</div><div>${fmt(s.class_id)}</div>
642
+ <div>bucket</div><div>${fmt(s.context_bucket)}</div>
643
+ <div>context score</div><div>${fmt(s.context_score)}</div>
644
+ <div>quality score</div><div>${fmt(s.quality_score)}</div>
645
+ <div>t cutoff</div><div>${fmt(s.t_cutoff)}</div>
646
+ <div>events</div><div>${fmt(s.n_events)}</div>
647
+ <div>wallets</div><div>${fmt(s.n_wallets)}</div>
648
+ <div>tokens</div><div>${fmt(s.n_tokens)}</div>
649
+ <div>graph types</div><div>${fmt(s.n_graph_link_types)}</div>
650
+ <div>labels</div><div>${JSON.stringify(s.labels)}</div>
651
+ <div>mask</div><div>${JSON.stringify(s.labels_mask)}</div>
652
+ </div>
653
+ `;
654
+ events.innerHTML = Object.entries(s.event_counts)
655
+ .sort((a, b) => b[1] - a[1])
656
+ .map(([k, v]) => `<span class="badge">${k}: ${v}</span>`)
657
+ .join("");
658
+ }
659
+
660
+ function clearChart() {
661
+ Plotly.purge(chartRoot);
662
+ chartRoot.innerHTML = "";
663
+ }
664
+
665
+ function renderChart(data) {
666
+ clearChart();
667
+ const chart = data.chart;
668
+ if (!chart.present || !(chart.points || []).length) {
669
+ chartRoot.innerHTML = `<div style="padding:24px;color:#6f645b;">No Chart_Segment found in this sample.</div>`;
670
+ return;
671
+ }
672
+
673
+ const xs = chart.points.map((_, idx) => idx);
674
+ const hoverTexts = chart.points.map((p, idx) =>
675
+ `idx=${idx}<br>ts=${new Date(p.time * 1000).toISOString()}<br>open=${p.open.toFixed(4)}<br>close=${p.close.toFixed(4)}`
676
+ );
677
+ const traces = [{
678
+ type: 'scatter',
679
+ mode: 'lines+markers',
680
+ x: xs,
681
+ y: chart.points.map(p => p.close),
682
+ line: { color: '#1f1a17', width: 3 },
683
+ marker: { color: '#1f1a17', size: 5 },
684
+ name: 'close',
685
+ text: hoverTexts,
686
+ hovertemplate: '%{text}<extra></extra>',
687
+ }, {
688
+ type: 'scatter',
689
+ mode: 'lines',
690
+ x: xs,
691
+ y: chart.points.map(p => p.open),
692
+ line: { color: '#6a8a82', width: 1.5, dash: 'dot' },
693
+ name: 'open',
694
+ text: hoverTexts,
695
+ hovertemplate: '%{text}<extra></extra>',
696
+ }];
697
+
698
+ const timeToIndex = new Map(chart.points.map((p, idx) => [p.time, idx]));
699
+ const selectedWindow = activeWindow >= 0 ? chart.windows[activeWindow] : null;
700
+
701
+ function overlayTrace(name, points, color, width = 2, dash = 'solid', opacity = 1.0) {
702
+ return {
703
+ type: 'scatter',
704
+ mode: 'lines',
705
+ x: points.map(p => timeToIndex.has(p.time) ? timeToIndex.get(p.time) : 0),
706
+ y: points.map(p => p.value),
707
+ line: { color, width, dash },
708
+ opacity,
709
+ name,
710
+ hovertemplate: `${name}<br>idx=%{x}<br>%{y}<extra></extra>`,
711
+ };
712
+ }
713
+
714
+ if (layerState.levels) {
715
+ (chart.overlays.levels.support_levels || []).forEach((level, idx) => {
716
+ const isSelected = activeWindow >= 0 && level.window_idx === activeWindow;
717
+ traces.push(overlayTrace(
718
+ `support_${idx}`,
719
+ [
720
+ { time: chart.points[0].time, value: level.price },
721
+ { time: chart.points[chart.points.length - 1].time, value: level.price },
722
+ ],
723
+ '#064e3b',
724
+ isSelected ? 4 : 3,
725
+ 'dash',
726
+ isSelected ? 1.0 : 0.95
727
+ ));
728
+ });
729
+ (chart.overlays.levels.resistance_levels || []).forEach((level, idx) => {
730
+ const isSelected = activeWindow >= 0 && level.window_idx === activeWindow;
731
+ traces.push(overlayTrace(
732
+ `resistance_${idx}`,
733
+ [
734
+ { time: chart.points[0].time, value: level.price },
735
+ { time: chart.points[chart.points.length - 1].time, value: level.price },
736
+ ],
737
+ '#7f1d1d',
738
+ isSelected ? 4 : 3,
739
+ 'dash',
740
+ isSelected ? 1.0 : 0.95
741
+ ));
742
+ });
743
+ }
744
+ if (layerState.trendlines) {
745
+ (chart.overlays.trendlines || []).forEach((overlay) => {
746
+ traces.push(overlayTrace(overlay.name, overlay.points, overlay.color, 2.5, 'solid', 0.55));
747
+ });
748
+ }
749
+ if (layerState.indicators) {
750
+ (chart.overlays.indicators || []).forEach((overlay) => {
751
+ traces.push(overlayTrace(
752
+ overlay.name,
753
+ overlay.points,
754
+ overlay.color,
755
+ overlay.name.includes('medium') ? 2.4 : 1.8,
756
+ overlay.dash || 'solid',
757
+ 0.95
758
+ ));
759
+ });
760
+ }
761
+ const signalGroups = {};
762
+ (chart.overlays.signals || []).forEach((signal) => {
763
+ if (!signalGroups[signal.name]) signalGroups[signal.name] = [];
764
+ signalGroups[signal.name].push(signal);
765
+ });
766
+ Object.entries(signalGroups).forEach(([signalName, signals]) => {
767
+ traces.push({
768
+ type: 'scatter',
769
+ mode: 'markers+text',
770
+ x: signals.map(signal => timeToIndex.has(signal.time) ? timeToIndex.get(signal.time) : chart.points.length - 1),
771
+ y: signals.map(signal => signal.value),
772
+ text: signals.map(() => signalName),
773
+ textposition: signalName.includes('down') ? 'bottom center' : 'top center',
774
+ marker: {
775
+ color: signals[0].color,
776
+ size: 14,
777
+ symbol: signals[0].symbol,
778
+ line: { color: '#111827', width: 1.5 },
779
+ },
780
+ name: signalName,
781
+ hovertemplate: `${signalName}<br>idx=%{x}<br>%{y}<extra></extra>`,
782
+ });
783
+ });
784
+
785
+ const shapes = layerState.boundaries ? (chart.overlays.boundaries || []).map((overlay) => {
786
+ const x = timeToIndex.has(overlay.points[0].time) ? timeToIndex.get(overlay.points[0].time) : 0;
787
+ const isActive = activeWindow >= 0 && overlay.window_idx === activeWindow;
788
+ return {
789
+ type: 'line',
790
+ x0: x,
791
+ x1: x,
792
+ y0: Math.min(...chart.points.map(p => p.low)),
793
+ y1: Math.max(...chart.points.map(p => p.high)),
794
+ line: {
795
+ color: overlay.color,
796
+ width: isActive ? 3 : 1,
797
+ dash: isActive ? 'solid' : 'dot',
798
+ },
799
+ opacity: isActive ? 0.95 : 0.35,
800
+ };
801
+ }) : [];
802
+
803
+ if (selectedWindow) {
804
+ const startIdx = Math.max(0, Math.floor((selectedWindow.start_ts - chart.points[0].time) / Math.max(1, chart.points[1] ? chart.points[1].time - chart.points[0].time : 1)));
805
+ const endIdx = timeToIndex.has(selectedWindow.end_ts) ? timeToIndex.get(selectedWindow.end_ts) : startIdx;
806
+ shapes.push({
807
+ type: 'rect',
808
+ x0: startIdx,
809
+ x1: endIdx,
810
+ y0: Math.min(...chart.points.map(p => p.low)),
811
+ y1: Math.max(...chart.points.map(p => p.high)),
812
+ fillcolor: 'rgba(200, 76, 45, 0.08)',
813
+ line: { width: 0 },
814
+ layer: 'below',
815
+ });
816
+ }
817
+
818
+ const layout = {
819
+ margin: { l: 50, r: 18, t: 28, b: 40 },
820
+ paper_bgcolor: '#fff',
821
+ plot_bgcolor: '#fff',
822
+ font: { family: 'Georgia, "Times New Roman", serif', color: '#1f1a17' },
823
+ xaxis: {
824
+ showgrid: true,
825
+ gridcolor: '#efe4d4',
826
+ rangeslider: { visible: false },
827
+ tickmode: 'array',
828
+ tickvals: xs,
829
+ ticktext: chart.points.map((p, idx) => idx === 0 || idx === chart.points.length - 1 || idx % 5 === 0
830
+ ? new Date(p.time * 1000).toLocaleTimeString()
831
+ : ''),
832
+ },
833
+ yaxis: {
834
+ showgrid: true,
835
+ gridcolor: '#efe4d4',
836
+ fixedrange: false,
837
+ },
838
+ shapes,
839
+ showlegend: false,
840
+ hovermode: 'closest',
841
+ };
842
+
843
+ Plotly.newPlot(chartRoot, traces, layout, {
844
+ displayModeBar: true,
845
+ responsive: true,
846
+ });
847
+ }
848
+
849
+ function selectWindow(idx) {
850
+ activeWindow = idx;
851
+ const win = currentPayload.chart.windows[idx];
852
+ detailTitle.textContent = `Window ${idx} | ${win.start_ts} -> ${win.end_ts}`;
853
+ detailPre.textContent = JSON.stringify(win, null, 2);
854
+ Array.from(body.querySelectorAll("tr")).forEach((row, rowIdx) => row.classList.toggle("active", rowIdx === idx));
855
+ renderChart(currentPayload);
856
+ }
857
+
858
+ function renderWindows(data) {
859
+ body.innerHTML = "";
860
+ const windows = data.chart.windows || [];
861
+ windows.forEach((win, idx) => {
862
+ const tr = document.createElement("tr");
863
+ tr.innerHTML = `
864
+ <td>${idx}</td>
865
+ <td>${win.start_ts} -> ${win.end_ts}<br><span class="muted">${win.window_seconds}s</span></td>
866
+ <td>${fmt(win.sr_available)}</td>
867
+ <td>${fmt(win.trendline_available)}</td>
868
+ <td>${win.top_signal_name}</td>
869
+ <td>${fmt((win.level_snapshot || {}).support_distance)}</td>
870
+ <td>${fmt((win.level_snapshot || {}).resistance_distance)}</td>
871
+ `;
872
+ tr.addEventListener("click", () => selectWindow(idx));
873
+ body.appendChild(tr);
874
+ });
875
+ if (windows.length) selectWindow(0);
876
+ else detailPre.textContent = "No quant windows present.";
877
+ }
878
+
879
+ function render(data) {
880
+ currentPayload = data;
881
+ renderMeta(data);
882
+ renderChart(data);
883
+ renderWindows(data);
884
+ }
885
+
886
+ function loadRandom() {
887
+ q("/api/sample?random=1").then(render).catch(err => alert(err));
888
+ }
889
+
890
+ function loadByFile() {
891
+ const value = document.getElementById("file-input").value.trim();
892
+ if (!value) return;
893
+ q("/api/sample?file=" + encodeURIComponent(value)).then(render).catch(err => alert(err));
894
+ }
895
+
896
+ document.getElementById("random-btn").addEventListener("click", loadRandom);
897
+ document.getElementById("load-file").addEventListener("click", loadByFile);
898
+ document.getElementById("toggle-indicators").addEventListener("change", (e) => { layerState.indicators = e.target.checked; if (currentPayload) renderChart(currentPayload); });
899
+ document.getElementById("toggle-levels").addEventListener("change", (e) => { layerState.levels = e.target.checked; if (currentPayload) renderChart(currentPayload); });
900
+ document.getElementById("toggle-trendlines").addEventListener("change", (e) => { layerState.trendlines = e.target.checked; if (currentPayload) renderChart(currentPayload); });
901
+ document.getElementById("toggle-boundaries").addEventListener("change", (e) => { layerState.boundaries = e.target.checked; if (currentPayload) renderChart(currentPayload); });
902
+ loadRandom();
903
+ </script>
904
+ </body>
905
+ </html>
906
+ """
907
+
908
+
909
+ class CacheDebugHandler(BaseHTTPRequestHandler):
910
+ cache_dir: Path = Path("data/cache")
911
+ fixed_file: Optional[Path] = None
912
+
913
+ def _json(self, payload: Dict[str, Any], code: int = 200) -> None:
914
+ encoded = json.dumps(payload).encode("utf-8")
915
+ self.send_response(code)
916
+ self.send_header("Content-Type", "application/json; charset=utf-8")
917
+ self.send_header("Content-Length", str(len(encoded)))
918
+ self.end_headers()
919
+ self.wfile.write(encoded)
920
+
921
+ def _html(self, body: str, code: int = 200) -> None:
922
+ encoded = body.encode("utf-8")
923
+ self.send_response(code)
924
+ self.send_header("Content-Type", "text/html; charset=utf-8")
925
+ self.send_header("Content-Length", str(len(encoded)))
926
+ self.end_headers()
927
+ self.wfile.write(encoded)
928
+
929
+ def _pick_sample(self, qs: Dict[str, List[str]]) -> Path:
930
+ if self.fixed_file is not None:
931
+ return self.fixed_file
932
+ if "file" in qs and qs["file"] and qs["file"][0]:
933
+ candidate = Path(qs["file"][0]).expanduser()
934
+ if not candidate.is_absolute():
935
+ candidate = (Path.cwd() / candidate).resolve()
936
+ if not candidate.exists():
937
+ raise FileNotFoundError(candidate)
938
+ return candidate
939
+ files = sorted(self.cache_dir.glob("sample_*.pt"))
940
+ if not files:
941
+ raise FileNotFoundError(f"No sample_*.pt files found in {self.cache_dir}")
942
+ if "index" in qs and qs["index"]:
943
+ idx = max(0, min(int(qs["index"][0]), len(files) - 1))
944
+ return files[idx]
945
+ return random.choice(files)
946
+
947
+ def do_GET(self) -> None:
948
+ parsed = urlparse(self.path)
949
+ if parsed.path == "/":
950
+ self._html(HTML)
951
+ return
952
+ if parsed.path == "/api/sample":
953
+ try:
954
+ source = self._pick_sample(parse_qs(parsed.query))
955
+ sample = _load_cache_sample(source)
956
+ self._json(_sample_to_payload(sample, source))
957
+ except Exception as exc:
958
+ self._json({"error": str(exc)}, code=500)
959
+ return
960
+ self.send_response(404)
961
+ self.end_headers()
962
+
963
+
964
+ def main() -> int:
965
+ parser = argparse.ArgumentParser(description="View cached samples in a simple web UI.")
966
+ parser.add_argument("--cache_dir", type=str, default="data/cache", help="Cache directory containing sample_*.pt")
967
+ parser.add_argument("--file", type=str, default=None, help="Optional fixed sample file to always render")
968
+ parser.add_argument("--host", type=str, default="127.0.0.1", help="Bind host")
969
+ parser.add_argument("--port", type=int, default=8765, help="Bind port")
970
+ args = parser.parse_args()
971
+
972
+ CacheDebugHandler.cache_dir = Path(args.cache_dir)
973
+ CacheDebugHandler.fixed_file = Path(args.file).resolve() if args.file else None
974
+ server = ThreadingHTTPServer((args.host, args.port), CacheDebugHandler)
975
+ print(f"Cache debug viewer running at http://{args.host}:{args.port}")
976
+ try:
977
+ server.serve_forever()
978
+ except KeyboardInterrupt:
979
+ pass
980
+ finally:
981
+ server.server_close()
982
+ return 0
983
+
984
+
985
+ if __name__ == "__main__":
986
+ raise SystemExit(main())
scripts/evaluate_sample.py CHANGED
@@ -50,7 +50,6 @@ ABLATION_SWEEP_MODES = [
50
  "quant_trendline",
51
  "quant_breaks",
52
  "quant_rolling",
53
- "quant_patterns",
54
  ]
55
 
56
  OHLC_PROBE_MODES = [
@@ -85,7 +84,7 @@ def parse_args():
85
  "--ablation",
86
  type=str,
87
  default="none",
88
- choices=["none", "wallet", "graph", "wallet_graph", "social", "token", "holder", "ohlc", "ohlc_wallet", "trade", "onchain", "all", "sweep", "ohlc_probe", "quant_ohlc", "quant_levels", "quant_trendline", "quant_breaks", "quant_rolling", "quant_patterns"],
89
  help="Run inference with selected signal families removed, or use 'sweep' to rank multiple families.",
90
  )
91
  return parser.parse_args()
@@ -183,7 +182,6 @@ def apply_ablation(batch, mode, device):
183
  "quant_trendline": ["trendlines"],
184
  "quant_breaks": ["relative_structure", "levels_breaks"],
185
  "quant_rolling": ["rolling_quant"],
186
- "quant_patterns": ["patterns"],
187
  }
188
  if mode in quant_group_map and "quant_ohlc_feature_tensors" in ablated:
189
  idxs = group_feature_indices(quant_group_map[mode])
 
50
  "quant_trendline",
51
  "quant_breaks",
52
  "quant_rolling",
 
53
  ]
54
 
55
  OHLC_PROBE_MODES = [
 
84
  "--ablation",
85
  type=str,
86
  default="none",
87
+ choices=["none", "wallet", "graph", "wallet_graph", "social", "token", "holder", "ohlc", "ohlc_wallet", "trade", "onchain", "all", "sweep", "ohlc_probe", "quant_ohlc", "quant_levels", "quant_trendline", "quant_breaks", "quant_rolling"],
88
  help="Run inference with selected signal families removed, or use 'sweep' to rank multiple families.",
89
  )
90
  return parser.parse_args()
 
182
  "quant_trendline": ["trendlines"],
183
  "quant_breaks": ["relative_structure", "levels_breaks"],
184
  "quant_rolling": ["rolling_quant"],
 
185
  }
186
  if mode in quant_group_map and "quant_ohlc_feature_tensors" in ablated:
187
  idxs = group_feature_indices(quant_group_map[mode])
signals/support_resistance.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import Dict, List, Optional
2
 
3
  import numpy as np
4
  from scipy.signal import argrelextrema
@@ -38,11 +38,227 @@ def _cluster_levels(prices: np.ndarray, pivot_indices: List[int], tolerance: flo
38
  return levels
39
 
40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  def _nearest_level(levels: List[Dict[str, float]], current_price: float, below: bool) -> Optional[Dict[str, float]]:
42
  candidates = [level for level in levels if (level["price"] <= current_price if below else level["price"] >= current_price)]
43
  if not candidates:
44
  return None
45
- return min(candidates, key=lambda level: abs(level["price"] - current_price))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
 
48
  def compute_support_resistance_features(
@@ -63,21 +279,35 @@ def compute_support_resistance_features(
63
  "resistance_touch_count", "support_age_sec", "resistance_age_sec",
64
  "support_strength", "resistance_strength", "inside_support_zone",
65
  "inside_resistance_zone", "support_swept", "resistance_swept",
66
- "support_reclaim", "resistance_reject", "sr_available",
 
 
 
 
 
67
  ]}
68
 
69
  current_price = float(closes_np[-1])
70
- local_range = max(float(np.max(highs_np) - np.min(lows_np)), current_price * 1e-3, 1e-8)
71
- tolerance = max(local_range * 0.08, current_price * 0.0025)
72
-
73
- pivots_high = _compute_pivots(highs_np, order=2)["highs"]
74
- pivots_low = _compute_pivots(lows_np, order=2)["lows"]
75
- support_levels = _cluster_levels(lows_np, pivots_low, tolerance)
76
- resistance_levels = _cluster_levels(highs_np, pivots_high, tolerance)
 
 
 
77
 
78
  support = _nearest_level(support_levels, current_price, below=True)
79
  resistance = _nearest_level(resistance_levels, current_price, below=False)
80
  current_ts = float(timestamps[min(end_idx, len(timestamps) - 1)]) if timestamps else float(end_idx)
 
 
 
 
 
 
81
 
82
  def _level_stats(level: Optional[Dict[str, float]], is_support: bool) -> Dict[str, float]:
83
  if level is None:
@@ -91,17 +321,14 @@ def compute_support_resistance_features(
91
  "confirm": 0.0,
92
  }
93
  level_price = float(level["price"])
94
- zone_half_width = max(tolerance, abs(level_price) * 0.002)
95
- window_prices_high = highs[window_start:window_end]
96
- window_prices_low = lows[window_start:window_end]
97
  swept = 0.0
98
  confirm = 0.0
99
  if is_support:
100
- min_low = min(window_prices_low) if window_prices_low else current_price
101
  swept = 1.0 if min_low < (level_price - zone_half_width) else 0.0
102
  confirm = 1.0 if swept > 0 and current_price >= level_price else 0.0
103
  else:
104
- max_high = max(window_prices_high) if window_prices_high else current_price
105
  swept = 1.0 if max_high > (level_price + zone_half_width) else 0.0
106
  confirm = 1.0 if swept > 0 and current_price <= level_price else 0.0
107
 
@@ -117,6 +344,41 @@ def compute_support_resistance_features(
117
 
118
  support_stats = _level_stats(support, True)
119
  resistance_stats = _level_stats(resistance, False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
  return {
121
  "nearest_support_dist": support_stats["dist"],
122
  "nearest_resistance_dist": resistance_stats["dist"],
@@ -132,5 +394,17 @@ def compute_support_resistance_features(
132
  "resistance_swept": resistance_stats["swept"],
133
  "support_reclaim": support_stats["confirm"],
134
  "resistance_reject": resistance_stats["confirm"],
 
 
 
 
 
 
 
 
 
 
 
 
135
  "sr_available": 1.0 if support_levels or resistance_levels else 0.0,
136
  }
 
1
+ from typing import Any, Dict, List, Optional
2
 
3
  import numpy as np
4
  from scipy.signal import argrelextrema
 
38
  return levels
39
 
40
 
41
+ def _filter_significant_pivots(prices: np.ndarray, pivot_indices: List[int], lookaround: int, min_prominence: float) -> List[int]:
42
+ kept: List[int] = []
43
+ for idx in pivot_indices:
44
+ left = max(0, idx - lookaround)
45
+ right = min(len(prices), idx + lookaround + 1)
46
+ window = prices[left:right]
47
+ if window.size <= 1:
48
+ continue
49
+ center = float(prices[idx])
50
+ prominence = max(abs(center - float(np.min(window))), abs(center - float(np.max(window))))
51
+ if prominence >= min_prominence:
52
+ kept.append(idx)
53
+ return kept
54
+
55
+
56
+ def _merge_close_levels(levels: List[Dict[str, float]], tolerance: float) -> List[Dict[str, float]]:
57
+ if not levels:
58
+ return []
59
+ levels_sorted = sorted(levels, key=lambda level: level["price"])
60
+ merged: List[Dict[str, float]] = [levels_sorted[0].copy()]
61
+ for level in levels_sorted[1:]:
62
+ prev = merged[-1]
63
+ if abs(level["price"] - prev["price"]) <= tolerance:
64
+ total_touches = prev["touches"] + level["touches"]
65
+ prev["price"] = ((prev["price"] * prev["touches"]) + (level["price"] * level["touches"])) / total_touches
66
+ prev["touches"] = total_touches
67
+ prev["first_idx"] = min(prev["first_idx"], level["first_idx"])
68
+ prev["last_idx"] = max(prev["last_idx"], level["last_idx"])
69
+ else:
70
+ merged.append(level.copy())
71
+ return merged
72
+
73
+
74
+ def _suppress_nearby_weaker_levels(levels: List[Dict[str, float]], tolerance: float) -> List[Dict[str, float]]:
75
+ if not levels:
76
+ return []
77
+ kept: List[Dict[str, float]] = []
78
+ for level in sorted(levels, key=lambda l: (-float(l.get("quality", 0.0)), abs(float(l["price"])))):
79
+ if any(abs(float(level["price"]) - float(prev["price"])) <= tolerance for prev in kept):
80
+ continue
81
+ kept.append(level)
82
+ kept.sort(key=lambda l: float(l["price"]))
83
+ return kept
84
+
85
+
86
+ def _prune_weak_levels(levels: List[Dict[str, float]], current_idx: int, min_touches: float, min_separation_bars: float) -> List[Dict[str, float]]:
87
+ kept: List[Dict[str, float]] = []
88
+ for level in levels:
89
+ touches = float(level["touches"])
90
+ span = float(level["last_idx"]) - float(level["first_idx"])
91
+ age = float(current_idx) - float(level["last_idx"])
92
+ if touches < min_touches:
93
+ continue
94
+ if touches < (min_touches + 1.0) and span < min_separation_bars:
95
+ continue
96
+ if age > max(30.0, current_idx * 0.85):
97
+ continue
98
+ kept.append(level)
99
+ return kept
100
+
101
+
102
+ def _level_quality(level: Dict[str, float], current_idx: int, current_price: float) -> float:
103
+ touches = float(level["touches"])
104
+ age = max(0.0, float(current_idx) - float(level["last_idx"]))
105
+ recency = 1.0 / (1.0 + age / 20.0)
106
+ distance = abs(float(level["price"]) - current_price) / max(abs(current_price), 1e-8)
107
+ proximity = 1.0 / (1.0 + distance * 20.0)
108
+ duration = max(1.0, float(level["last_idx"]) - float(level["first_idx"]) + 1.0)
109
+ duration_score = min(1.0, duration / 20.0)
110
+ return (touches * 2.0) + (1.25 * recency) + (0.75 * proximity) + (0.75 * duration_score)
111
+
112
+
113
+ def _rank_levels(levels: List[Dict[str, float]], current_idx: int, current_price: float) -> List[Dict[str, float]]:
114
+ ranked = [level.copy() for level in levels]
115
+ for level in ranked:
116
+ level["quality"] = _level_quality(level, current_idx, current_price)
117
+ ranked.sort(key=lambda level: (-level["quality"], abs(level["price"] - current_price)))
118
+ return ranked
119
+
120
+
121
  def _nearest_level(levels: List[Dict[str, float]], current_price: float, below: bool) -> Optional[Dict[str, float]]:
122
  candidates = [level for level in levels if (level["price"] <= current_price if below else level["price"] >= current_price)]
123
  if not candidates:
124
  return None
125
+ candidates = sorted(candidates, key=lambda level: (-float(level.get("quality", 0.0)), abs(level["price"] - current_price)))
126
+ return candidates[0]
127
+
128
+
129
+ def _select_active_levels(levels: List[Dict[str, float]], current_price: float, below: bool, quality_floor: float) -> List[Dict[str, float]]:
130
+ side_levels = [level for level in levels if (level["price"] <= current_price if below else level["price"] >= current_price)]
131
+ strong = [level for level in side_levels if float(level.get("quality", 0.0)) >= quality_floor]
132
+ if strong:
133
+ return strong
134
+ return side_levels
135
+
136
+
137
+ def _dedupe_cross_side_levels(
138
+ support_levels: List[Dict[str, float]],
139
+ resistance_levels: List[Dict[str, float]],
140
+ overlap_tolerance: float,
141
+ current_price: float,
142
+ ) -> tuple[List[Dict[str, float]], List[Dict[str, float]]]:
143
+ kept_supports: List[Dict[str, float]] = []
144
+ kept_resistances = [level.copy() for level in resistance_levels]
145
+
146
+ for support in support_levels:
147
+ overlapping = [
148
+ resistance
149
+ for resistance in kept_resistances
150
+ if abs(float(support["price"]) - float(resistance["price"])) <= overlap_tolerance
151
+ ]
152
+ if not overlapping:
153
+ kept_supports.append(support.copy())
154
+ continue
155
+
156
+ best_resistance = max(overlapping, key=lambda level: float(level.get("quality", 0.0)))
157
+ support_gap = abs(current_price - float(support["price"]))
158
+ resistance_gap = abs(float(best_resistance["price"]) - current_price)
159
+ support_score = float(support.get("quality", 0.0)) + (0.25 if support_gap <= resistance_gap else 0.0)
160
+ resistance_score = float(best_resistance.get("quality", 0.0)) + (0.25 if resistance_gap < support_gap else 0.0)
161
+
162
+ if support_score >= resistance_score:
163
+ kept_supports.append(support.copy())
164
+ kept_resistances = [
165
+ resistance
166
+ for resistance in kept_resistances
167
+ if abs(float(support["price"]) - float(resistance["price"])) > overlap_tolerance
168
+ ]
169
+
170
+ return kept_supports, kept_resistances
171
+
172
+
173
+ def compute_support_resistance_debug(
174
+ closes: List[float],
175
+ highs: List[float],
176
+ lows: List[float],
177
+ timestamps: Optional[List[int]] = None,
178
+ ) -> Dict[str, Any]:
179
+ closes_np = np.asarray(closes, dtype=np.float64)
180
+ highs_np = np.asarray(highs, dtype=np.float64)
181
+ lows_np = np.asarray(lows, dtype=np.float64)
182
+ if closes_np.size == 0:
183
+ return {
184
+ "support_levels": [],
185
+ "resistance_levels": [],
186
+ "all_support_levels": [],
187
+ "all_resistance_levels": [],
188
+ "sr_available": 0.0,
189
+ }
190
+
191
+ current_price = float(closes_np[-1])
192
+ current_idx = len(closes_np) - 1
193
+ local_range = max(float(np.max(highs_np) - np.min(lows_np)), current_price * 1e-3, 1e-8)
194
+ tolerance = max(local_range * 0.035, current_price * 0.001)
195
+ support_merge_tolerance = max(local_range * 0.055, current_price * 0.0021)
196
+ resistance_merge_tolerance = max(local_range * 0.065, current_price * 0.0026)
197
+ order = 2 if closes_np.size < 48 else 3
198
+ resistance_order = order
199
+ lookaround = max(2, order)
200
+ resistance_lookaround = max(2, resistance_order)
201
+ min_prominence = max(local_range * 0.03, current_price * 0.0012)
202
+ resistance_min_prominence = max(local_range * 0.045, current_price * 0.0018)
203
+ support_min_touches = 1.0
204
+ resistance_min_touches = 1.0
205
+ min_separation_bars = float(max(2, order))
206
+ resistance_min_separation_bars = float(max(3, resistance_order))
207
+
208
+ pivots_high = _compute_pivots(highs_np, order=resistance_order)["highs"]
209
+ pivots_low = _compute_pivots(lows_np, order=order)["lows"]
210
+ pivots_high = _filter_significant_pivots(highs_np, pivots_high, resistance_lookaround, resistance_min_prominence)
211
+ pivots_low = _filter_significant_pivots(lows_np, pivots_low, lookaround, min_prominence)
212
+
213
+ support_levels = _cluster_levels(lows_np, pivots_low, tolerance)
214
+ resistance_levels = _cluster_levels(highs_np, pivots_high, tolerance)
215
+ support_levels = _merge_close_levels(support_levels, support_merge_tolerance)
216
+ resistance_levels = _merge_close_levels(resistance_levels, resistance_merge_tolerance)
217
+ support_levels = _prune_weak_levels(support_levels, current_idx, min_touches=support_min_touches, min_separation_bars=min_separation_bars)
218
+ resistance_levels = _prune_weak_levels(resistance_levels, current_idx, min_touches=resistance_min_touches, min_separation_bars=resistance_min_separation_bars)
219
+ support_levels = _rank_levels(support_levels, current_idx, current_price)
220
+ resistance_levels = _rank_levels(resistance_levels, current_idx, current_price)
221
+ support_levels = _suppress_nearby_weaker_levels(support_levels, support_merge_tolerance * 0.8)
222
+ resistance_levels = _suppress_nearby_weaker_levels(resistance_levels, resistance_merge_tolerance * 1.25)
223
+
224
+ overlap_tolerance = max(local_range * 0.03, current_price * 0.0012)
225
+ support_levels, resistance_levels = _dedupe_cross_side_levels(
226
+ support_levels=support_levels,
227
+ resistance_levels=resistance_levels,
228
+ overlap_tolerance=overlap_tolerance,
229
+ current_price=current_price,
230
+ )
231
+
232
+ all_support_levels = [level.copy() for level in support_levels]
233
+ all_resistance_levels = [level.copy() for level in resistance_levels]
234
+
235
+ support_levels = _select_active_levels(support_levels, current_price, below=True, quality_floor=3.25)
236
+ resistance_levels = _select_active_levels(resistance_levels, current_price, below=False, quality_floor=3.75)
237
+
238
+ def _serialize(levels: List[Dict[str, float]], side: str) -> List[Dict[str, float]]:
239
+ payload: List[Dict[str, float]] = []
240
+ for level in levels:
241
+ last_idx = int(level["last_idx"])
242
+ first_idx = int(level["first_idx"])
243
+ payload.append({
244
+ "price": float(level["price"]),
245
+ "touches": float(level["touches"]),
246
+ "first_idx": first_idx,
247
+ "last_idx": last_idx,
248
+ "quality": float(level.get("quality", 0.0)),
249
+ "first_ts": float(timestamps[first_idx]) if timestamps and 0 <= first_idx < len(timestamps) else float(first_idx),
250
+ "last_ts": float(timestamps[last_idx]) if timestamps and 0 <= last_idx < len(timestamps) else float(last_idx),
251
+ "side": side,
252
+ })
253
+ return payload
254
+
255
+ return {
256
+ "support_levels": _serialize(support_levels, "support"),
257
+ "resistance_levels": _serialize(resistance_levels, "resistance"),
258
+ "all_support_levels": _serialize(all_support_levels, "support"),
259
+ "all_resistance_levels": _serialize(all_resistance_levels, "resistance"),
260
+ "sr_available": 1.0 if support_levels or resistance_levels else 0.0,
261
+ }
262
 
263
 
264
  def compute_support_resistance_features(
 
279
  "resistance_touch_count", "support_age_sec", "resistance_age_sec",
280
  "support_strength", "resistance_strength", "inside_support_zone",
281
  "inside_resistance_zone", "support_swept", "resistance_swept",
282
+ "support_reclaim", "resistance_reject", "keylevel_breakout_up",
283
+ "keylevel_breakout_down", "keylevel_hold_above", "keylevel_hold_below",
284
+ "keylevel_failed_breakout_up", "keylevel_failed_breakout_down",
285
+ "keylevel_flip_to_support", "keylevel_flip_to_resistance",
286
+ "keylevel_upper_distance", "keylevel_lower_distance",
287
+ "keylevel_zone_width_frac", "keylevel_density", "sr_available",
288
  ]}
289
 
290
  current_price = float(closes_np[-1])
291
+ current_high = float(highs_np[-1])
292
+ current_low = float(lows_np[-1])
293
+ debug = compute_support_resistance_debug(
294
+ closes=closes[: end_idx + 1],
295
+ highs=highs[: end_idx + 1],
296
+ lows=lows[: end_idx + 1],
297
+ timestamps=timestamps[: end_idx + 1] if timestamps else None,
298
+ )
299
+ support_levels = debug["support_levels"]
300
+ resistance_levels = debug["resistance_levels"]
301
 
302
  support = _nearest_level(support_levels, current_price, below=True)
303
  resistance = _nearest_level(resistance_levels, current_price, below=False)
304
  current_ts = float(timestamps[min(end_idx, len(timestamps) - 1)]) if timestamps else float(end_idx)
305
+ local_range = max(float(np.max(highs_np) - np.min(lows_np)), current_price * 1e-3, 1e-8)
306
+ zone_half_width = max(local_range * 0.035, abs(current_price) * 0.0015)
307
+ window_prices_high = highs[window_start:window_end]
308
+ window_prices_low = lows[window_start:window_end]
309
+ window_high = max(window_prices_high) if window_prices_high else current_high
310
+ window_low = min(window_prices_low) if window_prices_low else current_low
311
 
312
  def _level_stats(level: Optional[Dict[str, float]], is_support: bool) -> Dict[str, float]:
313
  if level is None:
 
321
  "confirm": 0.0,
322
  }
323
  level_price = float(level["price"])
 
 
 
324
  swept = 0.0
325
  confirm = 0.0
326
  if is_support:
327
+ min_low = window_low
328
  swept = 1.0 if min_low < (level_price - zone_half_width) else 0.0
329
  confirm = 1.0 if swept > 0 and current_price >= level_price else 0.0
330
  else:
331
+ max_high = window_high
332
  swept = 1.0 if max_high > (level_price + zone_half_width) else 0.0
333
  confirm = 1.0 if swept > 0 and current_price <= level_price else 0.0
334
 
 
344
 
345
  support_stats = _level_stats(support, True)
346
  resistance_stats = _level_stats(resistance, False)
347
+
348
+ breakout_up = 0.0
349
+ breakout_down = 0.0
350
+ hold_above = 0.0
351
+ hold_below = 0.0
352
+ failed_breakout_up = 0.0
353
+ failed_breakout_down = 0.0
354
+ flip_to_support = 0.0
355
+ flip_to_resistance = 0.0
356
+ keylevel_upper_distance = 0.0
357
+ keylevel_lower_distance = 0.0
358
+
359
+ if resistance is not None:
360
+ resistance_price = float(resistance["price"])
361
+ keylevel_upper_distance = (resistance_price - current_price) / max(abs(current_price), 1e-8)
362
+ breakout_up = 1.0 if window_high > (resistance_price + zone_half_width) and current_price > resistance_price else 0.0
363
+ hold_above = 1.0 if breakout_up > 0.0 and window_low >= (resistance_price - zone_half_width * 0.5) else 0.0
364
+ failed_breakout_up = 1.0 if window_high > (resistance_price + zone_half_width) and current_price <= resistance_price else 0.0
365
+ flip_to_support = 1.0 if breakout_up > 0.0 and current_low <= (resistance_price + zone_half_width) and current_price >= resistance_price else 0.0
366
+
367
+ if support is not None:
368
+ support_price = float(support["price"])
369
+ keylevel_lower_distance = (current_price - support_price) / max(abs(current_price), 1e-8)
370
+ breakout_down = 1.0 if window_low < (support_price - zone_half_width) and current_price < support_price else 0.0
371
+ hold_below = 1.0 if breakout_down > 0.0 and window_high <= (support_price + zone_half_width * 0.5) else 0.0
372
+ failed_breakout_down = 1.0 if window_low < (support_price - zone_half_width) and current_price >= support_price else 0.0
373
+ flip_to_resistance = 1.0 if breakout_down > 0.0 and current_high >= (support_price - zone_half_width) and current_price <= support_price else 0.0
374
+
375
+ density_levels = debug.get("all_support_levels", []) + debug.get("all_resistance_levels", [])
376
+ nearby_count = sum(
377
+ 1
378
+ for level in density_levels
379
+ if abs(float(level["price"]) - current_price) <= zone_half_width * 3.0
380
+ )
381
+
382
  return {
383
  "nearest_support_dist": support_stats["dist"],
384
  "nearest_resistance_dist": resistance_stats["dist"],
 
394
  "resistance_swept": resistance_stats["swept"],
395
  "support_reclaim": support_stats["confirm"],
396
  "resistance_reject": resistance_stats["confirm"],
397
+ "keylevel_breakout_up": breakout_up,
398
+ "keylevel_breakout_down": breakout_down,
399
+ "keylevel_hold_above": hold_above,
400
+ "keylevel_hold_below": hold_below,
401
+ "keylevel_failed_breakout_up": failed_breakout_up,
402
+ "keylevel_failed_breakout_down": failed_breakout_down,
403
+ "keylevel_flip_to_support": flip_to_support,
404
+ "keylevel_flip_to_resistance": flip_to_resistance,
405
+ "keylevel_upper_distance": keylevel_upper_distance,
406
+ "keylevel_lower_distance": keylevel_lower_distance,
407
+ "keylevel_zone_width_frac": zone_half_width / max(abs(current_price), 1e-8),
408
+ "keylevel_density": float(nearby_count),
409
  "sr_available": 1.0 if support_levels or resistance_levels else 0.0,
410
  }