Axiovora-X / backend /core /cross_universe_analysis.py
ZAIDX11's picture
Add files using upload-large-folder tool
effde1c verified
from __future__ import annotations
# --- Real Graph Analytics ---
try:
import numpy as np
except Exception:
class _np_stub:
def zeros(self, *a, **k):
return []
def mean(self, *a, **k):
return 0.0
def median(self, *a, **k):
return 0.0
np = _np_stub()
try:
import pandas as pd
except Exception:
pd = None
try:
import matplotlib.pyplot as plt
except Exception:
plt = None
def theorem_graph_centrality(analyzer: 'CrossUniverseAnalyzer', universe_ids: List[int]) -> Dict[int, float]:
G = nx.DiGraph()
for uid in universe_ids:
theorems = analyzer.db.query(Theorem).filter(Theorem.universe_id == uid).all()
for thm in theorems:
G.add_node(thm.id)
deps = getattr(thm, 'dependencies', [])
for dep in deps:
G.add_edge(dep, thm.id)
centrality = nx.degree_centrality(G)
return centrality
def theorem_graph_communities(analyzer: 'CrossUniverseAnalyzer', universe_ids: List[int]) -> Dict[int, int]:
G = nx.Graph()
for uid in universe_ids:
theorems = analyzer.db.query(Theorem).filter(Theorem.universe_id == uid).all()
for thm in theorems:
G.add_node(thm.id)
deps = getattr(thm, 'dependencies', [])
for dep in deps:
G.add_edge(dep, thm.id)
from networkx.algorithms.community import greedy_modularity_communities
comms = list(greedy_modularity_communities(G))
comm_map = {}
for i, comm in enumerate(comms):
for node in comm:
comm_map[node] = i
return comm_map
def shortest_path_between_theorems(analyzer: 'CrossUniverseAnalyzer', universe_ids: List[int], thm_id1: int, thm_id2: int) -> List[int]:
G = nx.DiGraph()
for uid in universe_ids:
theorems = analyzer.db.query(Theorem).filter(Theorem.universe_id == uid).all()
for thm in theorems:
G.add_node(thm.id)
deps = getattr(thm, 'dependencies', [])
for dep in deps:
G.add_edge(dep, thm.id)
try:
path = nx.shortest_path(G, source=thm_id1, target=thm_id2)
return path
except nx.NetworkXNoPath:
return []
# --- Real Transfer Learning (Axiom Embeddings/Theorem Models) ---
try:
from sklearn.decomposition import TruncatedSVD
from sklearn.linear_model import LogisticRegression
except Exception:
TruncatedSVD = None
LogisticRegression = None
try:
import torch
import torch.nn as nn
import torch.optim as optim
except Exception:
torch = None
nn = None
optim = None
def transfer_axiom_embeddings(analyzer: 'CrossUniverseAnalyzer', source_universe: int, target_universe: int) -> np.ndarray:
# Build axiom embedding matrix for source, transfer to target
axioms_src = analyzer.db.query(Axiom).filter(Axiom.universe_id == source_universe).all()
axioms_tgt = analyzer.db.query(Axiom).filter(Axiom.universe_id == target_universe).all()
all_axioms = list({ax.statement for ax in axioms_src + axioms_tgt})
X_src = np.array([[1 if ax.statement == a else 0 for a in all_axioms] for ax in axioms_src])
svd = TruncatedSVD(n_components=2)
emb_src = svd.fit_transform(X_src)
# Transfer: project target axioms into source embedding space
X_tgt = np.array([[1 if ax.statement == a else 0 for a in all_axioms] for ax in axioms_tgt])
emb_tgt = svd.transform(X_tgt)
return emb_tgt
def transfer_theorem_model(analyzer: 'CrossUniverseAnalyzer', source_universe: int, target_universe: int):
# Train a simple model on source, transfer to target
theorems_src = analyzer.db.query(Theorem).filter(Theorem.universe_id == source_universe).all()
theorems_tgt = analyzer.db.query(Theorem).filter(Theorem.universe_id == target_universe).all()
all_thms = list({thm.statement for thm in theorems_src + theorems_tgt})
X_src = np.array([[1 if thm.statement == t else 0 for t in all_thms] for thm in theorems_src])
y_src = [1]*len(theorems_src)
model = LogisticRegression().fit(X_src, y_src)
X_tgt = np.array([[1 if thm.statement == t else 0 for t in all_thms] for thm in theorems_tgt])
preds = model.predict(X_tgt)
return preds
# --- Real-Time Interactive Visualization (Plotly/Bokeh) ---
try:
import plotly.graph_objs as go
import plotly.offline as py
except Exception:
go = None
py = None
def plotly_universe_similarity(analyzer: 'CrossUniverseAnalyzer', universe_ids: List[int]):
sim_matrix = analyzer.universe_similarity(universe_ids)
fig = go.Figure(data=go.Heatmap(z=sim_matrix, x=universe_ids, y=universe_ids, colorscale='Viridis'))
fig.update_layout(title="Universe Similarity (Plotly)")
py.plot(fig, filename='universe_similarity.html')
# --- PDF/HTML Reporting ---
from matplotlib.backends.backend_pdf import PdfPages
# Use pandas if available, otherwise fall back to CSV-based reporting
if pd is not None:
def generate_pdf_report(analyzer: 'CrossUniverseAnalyzer', universe_ids: List[int], path: str):
sim_matrix = analyzer.universe_similarity(universe_ids)
with PdfPages(path) as pdf:
plt.figure()
plt.imshow(sim_matrix, cmap='viridis')
plt.title("Universe Similarity Matrix")
pdf.savefig()
plt.close()
# Add tabular summary
df = pd.DataFrame(sim_matrix, index=universe_ids, columns=universe_ids)
fig, ax = plt.subplots()
ax.axis('off')
# Convert values/labels to plain Python lists/strings to satisfy static typing
cell_text = df.values.tolist()
col_labels = [str(c) for c in df.columns.tolist()]
row_labels = [str(r) for r in df.index.tolist()]
tbl = ax.table(cellText=cell_text, colLabels=col_labels, rowLabels=row_labels, loc='center')
pdf.savefig(fig)
plt.close(fig)
def generate_html_report(analyzer: 'CrossUniverseAnalyzer', universe_ids: List[int], path: str):
sim_matrix = analyzer.universe_similarity(universe_ids)
df = pd.DataFrame(sim_matrix, index=universe_ids, columns=universe_ids)
html = df.to_html()
with open(path, 'w') as f:
f.write(f"<h1>Universe Similarity Matrix</h1>{html}")
else:
import csv
def generate_pdf_report(analyzer: 'CrossUniverseAnalyzer', universe_ids: List[int], path: str):
# Minimal fallback: write CSV with similarity matrix and create a tiny PDF with a single page
sim_matrix = analyzer.universe_similarity(universe_ids)
csv_path = path + '.csv'
with open(csv_path, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow([''] + [str(u) for u in universe_ids])
for i, u in enumerate(universe_ids):
writer.writerow([str(u)] + list(sim_matrix[i]))
# Create a tiny PDF with matplotlib if available
try:
plt.figure()
plt.imshow(sim_matrix, cmap='viridis')
plt.title("Universe Similarity Matrix")
plt.savefig(path)
plt.close()
except Exception:
# If matplotlib isn't available, write the CSV only
pass
def generate_html_report(analyzer: 'CrossUniverseAnalyzer', universe_ids: List[int], path: str):
sim_matrix = analyzer.universe_similarity(universe_ids)
csv_path = path + '.csv'
with open(csv_path, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow([''] + [str(u) for u in universe_ids])
for i, u in enumerate(universe_ids):
writer.writerow([str(u)] + list(sim_matrix[i]))
# Also generate a minimal HTML table
try:
html_rows = ['<tr><th></th>' + ''.join(f'<th>{u}</th>' for u in universe_ids) + '</tr>']
for i, u in enumerate(universe_ids):
row = '<tr>' + f'<td>{u}</td>' + ''.join(f'<td>{val}</td>' for val in sim_matrix[i]) + '</tr>'
html_rows.append(row)
with open(path, 'w') as f:
f.write('<table>' + ''.join(html_rows) + '</table>')
except Exception:
pass
# --- Real Data Ingestion (CSV/JSON/API) ---
import requests
def ingest_universe_data_from_csv(path: str) -> List[Dict[str, Any]]:
df = pd.read_csv(path)
# Ensure return type matches List[Dict[str, Any]]
records = [dict((str(k), v) for k, v in r.items()) for r in df.to_dict(orient='records')]
return records
def ingest_universe_data_from_json(path: str) -> List[Dict[str, Any]]:
import json
with open(path, 'r') as f:
return json.load(f)
def ingest_universe_data_from_api(url: str) -> List[Dict[str, Any]]:
resp = requests.get(url)
return resp.json()
# --- Expanded Test Harness with Real Analytics/Reporting ---
def test_fully_real_cross_universe_analysis():
logging.basicConfig(level=logging.INFO)
analyzer = CrossUniverseAnalyzer()
universe_ids = [1, 2, 3, 4]
# Graph analytics
print("Centrality:", theorem_graph_centrality(analyzer, universe_ids))
print("Communities:", theorem_graph_communities(analyzer, universe_ids))
print("Shortest path:", shortest_path_between_theorems(analyzer, universe_ids, 1, 2))
# Transfer learning
print("Axiom embedding transfer:", transfer_axiom_embeddings(analyzer, 1, 2))
print("Theorem model transfer:", transfer_theorem_model(analyzer, 1, 2))
# Interactive visualization
plotly_universe_similarity(analyzer, universe_ids)
# PDF/HTML reporting
generate_pdf_report(analyzer, universe_ids, "universe_report.pdf")
generate_html_report(analyzer, universe_ids, "universe_report.html")
# Data ingestion
print("Ingested CSV:", ingest_universe_data_from_csv("analysis.csv"))
# Performance profiling
import time
start = time.time()
analyzer.analyze(universe_ids)
print("Analysis time:", time.time() - start)
if __name__ == "__main__":
test_fully_real_cross_universe_analysis()
# --- Advanced ML/Statistical Analysis ---
try:
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.ensemble import IsolationForest
except Exception:
PCA = None
TSNE = None
IsolationForest = None
try:
import shap
except Exception:
shap = None
try:
import lime.lime_tabular
except Exception:
lime = None
try:
import matplotlib.pyplot as plt
except Exception:
plt = None
try:
import networkx as nx
except Exception:
nx = None
import multiprocessing
try:
import dask
import dask.dataframe as dd
except Exception:
dask = None
dd = None
def pca_universe_features(analyzer: 'CrossUniverseAnalyzer', universe_ids: List[int]) -> np.ndarray:
# Build feature matrix: each row = axiom vector for a universe
all_axioms = list({ax for uid in universe_ids for ax in analyzer.shared_axioms([uid])})
X = []
for uid in universe_ids:
axioms = analyzer.shared_axioms([uid])
X.append([1 if ax in axioms else 0 for ax in all_axioms])
pca = PCA(n_components=2)
arr = np.array(X)
return pca.fit_transform(arr)
def tsne_universe_features(analyzer: 'CrossUniverseAnalyzer', universe_ids: List[int]) -> np.ndarray:
all_axioms = list({ax for uid in universe_ids for ax in analyzer.shared_axioms([uid])})
X = []
for uid in universe_ids:
axioms = analyzer.shared_axioms([uid])
X.append([1 if ax in axioms else 0 for ax in all_axioms])
tsne = TSNE(n_components=2)
arr = np.array(X)
return tsne.fit_transform(arr)
def isolation_forest_anomaly(analyzer: 'CrossUniverseAnalyzer', universe_ids: List[int]) -> List[int]:
all_axioms = list({ax for uid in universe_ids for ax in analyzer.shared_axioms([uid])})
X = []
for uid in universe_ids:
axioms = analyzer.shared_axioms([uid])
X.append([1 if ax in axioms else 0 for ax in all_axioms])
clf = IsolationForest()
preds = clf.fit_predict(X)
return [uid for uid, pred in zip(universe_ids, preds) if pred == -1]
# --- Distributed/Batch Analysis ---
def distributed_batch_analyze(analyze_fn: Callable, universe_batches: List[List[int]], num_workers: int = 4) -> List[Any]:
with multiprocessing.Pool(num_workers) as pool:
results = pool.map(analyze_fn, universe_batches)
return results
def dask_batch_analyze(analyze_fn: Callable, universe_ids: List[int], batch_size: int = 10) -> List[Any]:
batches = [universe_ids[i:i+batch_size] for i in range(0, len(universe_ids), batch_size)]
ddf = dd.from_pandas(dd.DataFrame({'batch': batches}), npartitions=len(batches))
return list(ddf['batch'].map(analyze_fn).compute())
# --- SHAP/LIME Explainability ---
def explain_universe_similarity_shap(analyzer: 'CrossUniverseAnalyzer', universe_ids: List[int]):
all_axioms = list({ax for uid in universe_ids for ax in analyzer.shared_axioms([uid])})
X = []
for uid in universe_ids:
axioms = analyzer.shared_axioms([uid])
X.append([1 if ax in axioms else 0 for ax in all_axioms])
model = IsolationForest().fit(X)
explainer = shap.TreeExplainer(model)
shap_values = explainer.shap_values(X)
shap.summary_plot(shap_values, X, feature_names=all_axioms)
def explain_universe_similarity_lime(analyzer: 'CrossUniverseAnalyzer', universe_ids: List[int]):
all_axioms = list({ax for uid in universe_ids for ax in analyzer.shared_axioms([uid])})
X = []
for uid in universe_ids:
axioms = analyzer.shared_axioms([uid])
X.append([1 if ax in axioms else 0 for ax in all_axioms])
model = IsolationForest().fit(X)
explainer = lime.lime_tabular.LimeTabularExplainer(X)
exp = explainer.explain_instance(X[0], model.predict)
exp.show_in_notebook()
# --- Data Export/Import, Reporting ---
def export_analysis_to_csv(results: List[Dict[str, Any]], path: str):
df = pd.DataFrame(results)
df.to_csv(path, index=False)
def import_analysis_from_csv(path: str) -> List[Dict[str, Any]]:
df = pd.read_csv(path)
records = [dict((str(k), v) for k, v in r.items()) for r in df.to_dict(orient='records')]
return records
# --- Advanced Visualization ---
def plot_universe_network(analyzer: 'CrossUniverseAnalyzer', universe_ids: List[int]):
G = nx.Graph()
for uid in universe_ids:
G.add_node(uid)
sim_matrix = analyzer.universe_similarity(universe_ids)
for i, uid1 in enumerate(universe_ids):
for j, uid2 in enumerate(universe_ids):
if i < j and sim_matrix[i, j] > 0.5:
G.add_edge(uid1, uid2, weight=sim_matrix[i, j])
pos = nx.spring_layout(G)
nx.draw(G, pos, with_labels=True, node_color='lightblue', edge_color='gray')
plt.title("Universe Network (Similarity > 0.5)")
plt.show()
# --- Integration Hooks (Expanded) ---
def integrate_with_theorem_engine(theorem_engine: Any, analyzer: Any):
analyzer.logger.info("Integrating with theorem engine.")
pass
def integrate_with_neuro_symbolic(neuro_module: Any, analyzer: Any):
analyzer.logger.info("Integrating with neuro-symbolic module.")
pass
def integrate_with_quantum(quantum_module: Any, analyzer: Any):
analyzer.logger.info("Integrating with quantum module.")
pass
# --- Expanded Test Harness ---
def test_real_cross_universe_analysis():
logging.basicConfig(level=logging.INFO)
analyzer = CrossUniverseAnalyzer()
universe_ids = [1, 2, 3, 4]
# PCA/t-SNE
print("PCA features:", pca_universe_features(analyzer, universe_ids))
print("t-SNE features:", tsne_universe_features(analyzer, universe_ids))
# Isolation Forest anomaly
print("Isolation Forest anomalies:", isolation_forest_anomaly(analyzer, universe_ids))
# Distributed/batch
print("Distributed batch analyze:", distributed_batch_analyze(analyzer.analyze, [universe_ids]*2))
print("Dask batch analyze:", dask_batch_analyze(analyzer.analyze, universe_ids))
# SHAP/LIME explainability
explain_universe_similarity_shap(analyzer, universe_ids)
explain_universe_similarity_lime(analyzer, universe_ids)
# Export/import
results = [analyzer.analyze(universe_ids)]
export_analysis_to_csv(results, "analysis.csv")
print("Imported analysis:", import_analysis_from_csv("analysis.csv"))
# Visualization
plot_universe_network(analyzer, universe_ids)
if __name__ == "__main__":
test_real_cross_universe_analysis()
import logging
from typing import List, Dict, Any, Optional, Set, Callable
from collections import Counter, defaultdict
import numpy as np
from backend.db.models import Universe, Axiom, Theorem, AnalysisResult
from backend.db.session import SessionLocal
class CrossUniverseAnalyzer:
"""
Advanced cross-universe analysis for mathematical universes, axioms, and theorems.
Provides lineage, influence, clustering, anomaly detection, transfer learning, and more.
Extensible for integration with neuro-symbolic, quantum, and external provers.
"""
def __init__(self, db_session=None, logger=None):
self.db = db_session or SessionLocal()
self.logger = logger or logging.getLogger("CrossUniverseAnalyzer")
def shared_axioms(self, universe_ids: List[int]) -> List[str]:
axiom_sets = []
for uid in universe_ids:
axioms = self.db.query(Axiom).filter(Axiom.universe_id == uid, Axiom.is_active == 1).all()
axiom_sets.append(set(ax.statement for ax in axioms))
shared = set.intersection(*axiom_sets) if axiom_sets else set()
self.logger.info(f"Shared axioms for universes {universe_ids}: {shared}")
return list(shared)
def shared_theorems(self, universe_ids: List[int]) -> List[str]:
thm_sets = []
for uid in universe_ids:
theorems = self.db.query(Theorem).filter(Theorem.universe_id == uid).all()
thm_sets.append(set(thm.statement for thm in theorems))
shared = set.intersection(*thm_sets) if thm_sets else set()
self.logger.info(f"Shared theorems for universes {universe_ids}: {shared}")
return list(shared)
def axiom_lineage(self, axiom_id: int) -> List[int]:
# Trace the lineage of an axiom across universes
lineage = []
axiom = self.db.query(Axiom).get(axiom_id)
while axiom:
lineage.append(axiom.id)
axiom = self.db.query(Axiom).get(getattr(axiom, 'parent_id', None)) if getattr(axiom, 'parent_id', None) else None
self.logger.info(f"Axiom lineage for {axiom_id}: {lineage}")
return lineage
def theorem_influence_graph(self, universe_ids: List[int]) -> Dict[int, Set[int]]:
# Build a graph of theorem dependencies across universes
graph = defaultdict(set)
for uid in universe_ids:
theorems = self.db.query(Theorem).filter(Theorem.universe_id == uid).all()
for thm in theorems:
deps = getattr(thm, 'dependencies', [])
for dep in deps:
graph[thm.id].add(dep)
self.logger.info(f"Theorem influence graph: {dict(graph)}")
return dict(graph)
def universe_similarity(self, universe_ids: List[int], metric: str = 'jaccard') -> np.ndarray:
# Compute pairwise similarity between universes
axioms_by_universe = []
for uid in universe_ids:
axioms = self.db.query(Axiom).filter(Axiom.universe_id == uid, Axiom.is_active == 1).all()
axioms_by_universe.append(set(ax.statement for ax in axioms))
n = len(universe_ids)
sim_matrix = np.zeros((n, n))
for i in range(n):
for j in range(n):
if metric == 'jaccard':
inter = len(axioms_by_universe[i] & axioms_by_universe[j])
union = len(axioms_by_universe[i] | axioms_by_universe[j])
sim_matrix[i, j] = inter / union if union else 0.0
self.logger.info(f"Universe similarity matrix: {sim_matrix}")
return sim_matrix
def cluster_universes(self, universe_ids: List[int], n_clusters: int = 2) -> Dict[int, int]:
# Cluster universes by axiom similarity
sim_matrix = self.universe_similarity(universe_ids)
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(sim_matrix)
labels = {uid: int(label) for uid, label in zip(universe_ids, kmeans.labels_)}
self.logger.info(f"Universe clusters: {labels}")
return labels
def detect_anomalies(self, universe_ids: List[int]) -> List[int]:
# Detect universes with anomalous axiom sets
sim_matrix = self.universe_similarity(universe_ids)
mean_sim = np.mean(sim_matrix, axis=1)
threshold = np.mean(mean_sim) - 2 * np.std(mean_sim)
anomalies = [uid for uid, sim in zip(universe_ids, mean_sim) if sim < threshold]
self.logger.info(f"Anomalous universes: {anomalies}")
return anomalies
def transfer_axioms(self, source_universe: int, target_universe: int) -> int:
# Transfer axioms from one universe to another
axioms = self.db.query(Axiom).filter(Axiom.universe_id == source_universe, Axiom.is_active == 1).all()
count = 0
for ax in axioms:
new_ax = Axiom(statement=ax.statement, universe_id=target_universe, is_active=1)
self.db.add(new_ax)
count += 1
self.db.commit()
self.logger.info(f"Transferred {count} axioms from {source_universe} to {target_universe}")
return count
def batch_analyze(self, universe_batches: List[List[int]]) -> List[Dict[str, Any]]:
results = []
for batch in universe_batches:
result = self.analyze(batch)
results.append(result)
self.logger.info(f"Batch analysis results: {results}")
return results
def distributed_analyze(self, universe_ids: List[int], num_workers: int = 4) -> List[Dict[str, Any]]:
# Placeholder for distributed analysis
self.logger.info(f"Distributed analysis with {num_workers} workers.")
chunk_size = max(1, len(universe_ids) // num_workers)
batches = [universe_ids[i:i+chunk_size] for i in range(0, len(universe_ids), chunk_size)]
return self.batch_analyze(batches)
def visualize_similarity(self, universe_ids: List[int]):
sim_matrix = self.universe_similarity(universe_ids)
import matplotlib.pyplot as plt
plt.imshow(sim_matrix, cmap='viridis')
plt.colorbar()
plt.title("Universe Similarity Matrix")
plt.xlabel("Universe Index")
plt.ylabel("Universe Index")
plt.show()
def explain_analysis(self, universe_ids: List[int]) -> Dict[str, Any]:
# Placeholder for explainability (e.g., feature importance, lineage)
return {"universes": universe_ids, "explanation": "Analysis explainability not implemented."}
def integrate_with_neuro_symbolic(self, *args, **kwargs):
self.logger.info("Integrating with neuro-symbolic module.")
pass
def integrate_with_quantum(self, *args, **kwargs):
self.logger.info("Integrating with quantum module.")
pass
def integrate_with_external_prover(self, *args, **kwargs):
self.logger.info("Integrating with external prover.")
pass
def analyze(self, universe_ids: List[int]) -> Dict[str, Any]:
shared_axioms = self.shared_axioms(universe_ids)
shared_theorems = self.shared_theorems(universe_ids)
result = {
"shared_axioms": shared_axioms,
"shared_theorems": shared_theorems,
"universes": universe_ids
}
# Store result in DB
for uid in universe_ids:
analysis = AnalysisResult(universe_id=uid, result=str(result))
self.db.add(analysis)
self.db.commit()
self.logger.info(f"Analysis result stored for universes {universe_ids}")
return result
# --- Research/Test Utilities ---
def benchmark_analysis(analyze_fn: Callable, universe_ids: List[int], repeats: int = 5) -> Dict[str, Any]:
import time
times = []
for _ in range(repeats):
start = time.time()
analyze_fn(universe_ids)
times.append(time.time() - start)
return {"mean_time": np.mean(times), "std_time": np.std(times), "runs": repeats}
def test_cross_universe_analysis():
logging.basicConfig(level=logging.INFO)
analyzer = CrossUniverseAnalyzer()
# Example universe IDs (replace with real IDs in production)
universe_ids = [1, 2, 3, 4]
print("Shared axioms:", analyzer.shared_axioms(universe_ids))
print("Shared theorems:", analyzer.shared_theorems(universe_ids))
print("Axiom lineage:", analyzer.axiom_lineage(1))
print("Theorem influence graph:", analyzer.theorem_influence_graph(universe_ids))
print("Universe similarity matrix:\n", analyzer.universe_similarity(universe_ids))
print("Universe clusters:", analyzer.cluster_universes(universe_ids, n_clusters=2))
print("Anomalous universes:", analyzer.detect_anomalies(universe_ids))
print("Transferred axioms:", analyzer.transfer_axioms(1, 2))
analyzer.visualize_similarity(universe_ids)
print("Explain analysis:", analyzer.explain_analysis(universe_ids))
if __name__ == "__main__":
test_cross_universe_analysis()