algorembrant's picture
Upload 2 files
d48606c verified
import os
import time
import gc
import sys
try:
import psutil
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
import torch
from tqdm import tqdm
# SOTA-Tier Hardware Configuration Target N > 2 (Stochastic GPU Sampling)
N = 4
V = 4 * N
BATCH_SIZE = max(10_000, 2_000_000 // N) if not torch.cuda.is_available() else max(20_000, 5_000_000 // N)
RAM_LIMIT_GB = 11.5
MAX_TIME_SEC = 4.85 * 3600 # Almost 5 hours minus 10 mins
def get_logic_string(p):
labels = []
for i in range(1, N+1): labels.extend([f'O{i}', f'H{i}', f'L{i}', f'C{i}'])
groups = {}
for i, val in enumerate(p):
groups.setdefault(val, []).append(labels[i])
return " > ".join("(" + " = ".join(groups[val]) + ")" for val in sorted(groups.keys(), reverse=True))
if __name__ == '__main__':
print(f"--- SOTA Fast Topological Engine (STOCHASTIC DISCOVERY): 4-candle ---")
start_time = time.time()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"Device: {device} | Fast Topology Batches: {BATCH_SIZE:,}")
unique_patterns = set()
limit_hit = False
try:
with tqdm(desc="Stochastic Discovery Phase") as pbar:
while time.time() - start_time < MAX_TIME_SEC:
# Stochastic Generative Constructive Mathematics
O = torch.randint(0, V, (BATCH_SIZE, N), device=device, dtype=torch.int16)
C = torch.randint(0, V, (BATCH_SIZE, N), device=device, dtype=torch.int16)
top = torch.maximum(O, C)
bot = torch.minimum(O, C)
R = torch.rand((BATCH_SIZE, N), device=device)
H_diff = (V - top)
H = top + (R * H_diff).to(torch.int16)
R2 = torch.rand((BATCH_SIZE, N), device=device)
L = (R2 * (bot + 1)).to(torch.int16)
candles = torch.stack([O, H, L, C], dim=-1).view(BATCH_SIZE, 4 * N)
# GPU Dense Ranking Fubini mapping
sorted_c, indices = torch.sort(candles, dim=1)
diffs = torch.cat([torch.ones(BATCH_SIZE, 1, device=device, dtype=torch.int16), (sorted_c[:, 1:] > sorted_c[:, :-1]).to(torch.int16)], dim=1)
cum_ranks = torch.cumsum(diffs, dim=1) - 1
ranks = torch.empty_like(candles)
ranks.scatter_(1, indices, cum_ranks.to(torch.int16))
# Deduplication
b_unique = torch.unique(ranks, dim=0).cpu().numpy()
before_len = len(unique_patterns)
for row in b_unique:
unique_patterns.add(tuple(row))
added = len(unique_patterns) - before_len
pbar.update(added)
pbar.set_postfix(unique=len(unique_patterns))
if HAS_PSUTIL and psutil.virtual_memory().used / (1024**3) > RAM_LIMIT_GB:
print("\nMemory limit reached. Transitioning to export.")
limit_hit = True
break
except KeyboardInterrupt:
print("\nInterrupted. Moving to export.")
except Exception as e:
print(f"\nError: {e}"); limit_hit = True
patterns = sorted(list(unique_patterns))
total_patterns = len(patterns)
elapsed = time.time() - start_time
print(f"Discovered {total_patterns} exact patterns in {elapsed:.2f}s.")
md_path = f'4C_patterns_fast.md'
with open(md_path, 'w') as f:
f.write(f"# Extracted Topological 4-Candle Patterns (Stochastic)\n\n")
f.write(f"**Patterns Discovered in 5hrs:** {total_patterns}\n\n")
if limit_hit: f.write("*Memory limit triggered seamlessly.*\n\n")
f.write(f"| Pattern ID | Mathematical Logic |\n|---|---|\n")
# Write streaming to save memory in memory-constrained environment
for i, p in enumerate(patterns):
f.write(f"| P_{i:05d} | {get_logic_string(p)} |\n")
print(f"SUCCESS! Total Time: {time.time() - start_time:.2f}s | Saved to {md_path}")