#!/usr/bin/env python3 """ ╔══════════════════════════════════════════════════════════════╗ ║ USLaP — Universal Scientific Laws and Principles v2.0 ║ ║ بِسْمِ اللَّهِ الرَّحْمَٰنِ الرَّحِيمِ ║ ║ ║ ║ SINGLE MASTER CONTROL: USLAP_LATTICE.xlsx ║ ║ Edit the XLSX → this file reads it automatically. ║ ║ One file to edit. One file to run. ║ ║ ║ ║ خَوارِزمِيَّة الاِسْتِجَابَة — Response Algorithm ║ ║ Step 0: Terminology Scan (فَحْص المُصْطَلَحَات) ║ ║ Step 1: Ibn Sīnā FIRST (ابن سينا أوّلاً) ║ ║ Step 2: Al-Khwārizmī Method (الخَوارِزمِيَّة) ║ ║ Step 3: Tools & Applications (أَدَوَات) ║ ║ Step 4: Output (المُخْرَجَات) ║ ╚══════════════════════════════════════════════════════════════╝ Usage: python3 uslap.py # Interactive menu python3 uslap.py --scan "text" # Quick scan python3 uslap.py --process "query" # Run Response Algorithm python3 uslap.py --export-json # Export all data as JSON python3 uslap.py --info # Show master control status Requires: pip install openpyxl Master control: Place USLAP_LATTICE.xlsx in same directory as this file. """ import json, re, sys, os from pathlib import Path # ============================================================ # MASTER CONTROL LOADER # ============================================================ MASTER_FILE = "USLAP_LATTICE.xlsx" def find_master(): """Find USLAP_LATTICE.xlsx — same dir as script, or current dir.""" script_dir = Path(__file__).parent candidates = [ script_dir / MASTER_FILE, Path.cwd() / MASTER_FILE, Path.home() / MASTER_FILE, ] for p in candidates: if p.exists(): return p return None def load_master(path): """Load all data from USLAP_LATTICE.xlsx master control.""" try: import openpyxl except ImportError: print("ERROR: openpyxl required. Run: pip install openpyxl") sys.exit(1) wb = openpyxl.load_workbook(path, data_only=True) data = { "path": str(path), "modified": os.path.getmtime(path), "terms": [], "rejected": [], "sciences": [], "evidence": [], "applications": [], "meta": [], "two_systems": [], "deep_dive": [], "iqra_moments": [], } # ALL TERMS sheet if "All Terms" in wb.sheetnames: ws = wb["All Terms"] for r in range(2, ws.max_row + 1): term = ws.cell(r, 1).value if not term: continue data["terms"].append({ "term": str(term).strip(), "status": str(ws.cell(r, 2).value or "").strip(), "domain_q": str(ws.cell(r, 3).value or "").strip(), "domain_t": str(ws.cell(r, 4).value or "").strip(), "replacement": str(ws.cell(r, 5).value or "").strip(), "root": str(ws.cell(r, 6).value or "").strip(), "ref": str(ws.cell(r, 7).value or "").strip(), "contamination": str(ws.cell(r, 8).value or "").strip(), }) # REJECTED sheet (has etymology/confession detail) if "Rejected" in wb.sheetnames: ws = wb["Rejected"] for r in range(2, ws.max_row + 1): term = ws.cell(r, 1).value if not term: continue data["rejected"].append({ "term": str(term).strip(), "etymology": str(ws.cell(r, 2).value or "").strip(), "domain": str(ws.cell(r, 3).value or "").strip(), "replacement": str(ws.cell(r, 4).value or "").strip(), "root": str(ws.cell(r, 5).value or "").strip(), "ref": str(ws.cell(r, 6).value or "").strip(), }) # 111 SCIENCES sheet if "111 Sciences" in wb.sheetnames: ws = wb["111 Sciences"] for r in range(2, ws.max_row + 1): sid = ws.cell(r, 1).value if not sid: continue data["sciences"].append({ "id": int(sid) if str(sid).isdigit() else 0, "root": str(ws.cell(r, 2).value or "").strip(), "meaning": str(ws.cell(r, 3).value or "").strip(), "verse": str(ws.cell(r, 4).value or "").strip(), "name_ar": str(ws.cell(r, 5).value or "").strip(), "name_t": str(ws.cell(r, 6).value or "").strip(), }) # EVIDENCE sheet if "Evidence" in wb.sheetnames: ws = wb["Evidence"] for r in range(3, ws.max_row + 1): # Skip header rows ev = ws.cell(r, 1).value if not ev: continue data["evidence"].append({ "evidence": str(ev).strip(), "type": str(ws.cell(r, 2).value or "").strip(), "domain": str(ws.cell(r, 3).value or "").strip(), }) # APPLICATIONS sheet if "Applications" in wb.sheetnames: ws = wb["Applications"] for r in range(2, ws.max_row + 1): app = ws.cell(r, 1).value if not app: continue data["applications"].append({ "application": str(app).strip(), "domain": str(ws.cell(r, 2).value or "").strip(), }) # META FAQ sheet if "Meta FAQ" in wb.sheetnames: ws = wb["Meta FAQ"] for r in range(2, ws.max_row + 1): q = ws.cell(r, 1).value if not q: continue data["meta"].append({ "question": str(q).strip(), "domain": str(ws.cell(r, 2).value or "").strip(), }) # TWO SYSTEMS sheet if "Two Systems" in wb.sheetnames: ws = wb["Two Systems"] for r in range(3, ws.max_row + 1): dim = ws.cell(r, 1).value if not dim: continue data["two_systems"].append({ "dimension": str(dim).strip(), "moloch": str(ws.cell(r, 2).value or "").strip(), "quran": str(ws.cell(r, 3).value or "").strip(), }) # DEEP DIVE sheet if "Deep Dive" in wb.sheetnames: ws = wb["Deep Dive"] for r in range(3, ws.max_row + 1): chain = ws.cell(r, 1).value if not chain: continue data["deep_dive"].append({ "chain": str(chain).strip(), "crime": str(ws.cell(r, 2).value or "").strip(), "evidence": str(ws.cell(r, 3).value or "").strip(), "pre_trace": str(ws.cell(r, 4).value or "").strip(), "sources": str(ws.cell(r, 5).value or "").strip(), "quran_warning": str(ws.cell(r, 6).value or "").strip(), }) # IQRA MOMENTS sheet if "IQRA Moments" in wb.sheetnames: ws = wb["IQRA Moments"] for r in range(3, ws.max_row + 1): num = ws.cell(r, 1).value if not num: continue data["iqra_moments"].append({ "number": int(num) if str(num).isdigit() else 0, "target_lie": str(ws.cell(r, 2).value or "").strip(), "kill_shot": str(ws.cell(r, 3).value or "").strip(), "time": str(ws.cell(r, 4).value or "").strip(), }) wb.close() return data # ============================================================ # FOUR-SOURCE LATTICE (hardcoded — this is the protocol itself) # ============================================================ FOUR_SOURCES = [ {"n": 1, "ar": "القُرْآن", "t": "al-Qur'ān", "role": "Root + Meaning. Is it Qur'anic Arabic? FAIL → Deep Trace → Confession."}, {"n": 2, "ar": "الحَدِيث", "t": "al-Ḥadīth", "role": "Prophetic confirmation. Sunnah confirms, explains, applies."}, {"n": 3, "ar": "العِلْم الوسطانيّ", "t": "al-ʿIlm al-Waṣṭānī", "role": "Ibn Sīnā FIRST (framework), then al-Khwārizmī (method). أُمَّةً وَسَطًا scholars."}, {"n": 4, "ar": "عُلَمَاء إِضَافِيُّون", "t": "ʿUlamāʾ Iḍāfiyyūn", "role": "Additional scholars. Subject to بِسْمِ اللَّهِ + Qur'anic terminology verification."}, ] CONTAM_HIERARCHY = [ {"level": 1, "name": "PRE-civilizational", "desc": "Parasite 'elite' vocabulary BEFORE host civilization. 100% criminal."}, {"level": 2, "name": "Persian", "desc": "Most dangerous active contaminator. Corrupts Arabic + Turkic."}, {"level": 3, "name": "Jahilian / corrupted Arabic", "desc": "Arabic-script wrapping Greek/PRE- contamination."}, {"level": 4, "name": "Greek / Latin", "desc": "Surface HOST wrappers carrying PRE- vocabulary."}, ] RESPONSE_ALGORITHM = [ {"step": 0, "ar": "فَحْص المُصْطَلَحَات", "en": "Terminology Scan", "mandatory": True, "action": "Scan ALL terms through Source 1. Replace contamination before proceeding.", "anti": "NEVER skip. NEVER use contaminated terms even casually."}, {"step": 1, "ar": "ابن سينا أوّلاً", "en": "Ibn Sīnā FIRST", "mandatory": True, "action": "Apply القانون في الطب framework BEFORE any practical advice.", "subs": ["SYSTEM: What is the قَوْس (bow/system)?", "WARP: Longitudinal chain?", "WEFT: Lateral connections?", "BIAS: Where does pressure create change?", "صَلَاة DIAGNOSTIC: Do prayer positions test this?"], "anti": "NEVER jump to tools before this. #1 contamination trap."}, {"step": 2, "ar": "الخَوارِزْمِيَّة", "en": "Al-Khwārizmī Method", "mandatory": True, "action": "Define structured method: MEASURE → SEQUENCE → DECISION → VERIFY.", "anti": "NEVER give unstructured advice."}, {"step": 3, "ar": "أَدَوَات", "en": "Tools & Applications", "mandatory": False, "action": "Practical interventions. Source 4. ONLY after Steps 0-2.", "anti": "Tools SERVE the framework."}, {"step": 4, "ar": "المُخْرَجَات", "en": "Output", "mandatory": True, "action": "Deliver what user asked for. ALL USLaP-compliant.", "anti": "NEVER deliver with contaminated terminology."}, ] # ============================================================ # CONTAMINATION SCANNER — reads from master control # ============================================================ class ContaminationScanner: """Scans text using the master control term database.""" def __init__(self, master_data): self.terms = master_data["terms"] self.rejected = master_data["rejected"] self._build_index() # Hardcoded single-word scanning terms (common contamination) # These get OVERRIDDEN by XLSX data if present SCAN_TERMS = { "medical": {"repl": "الطِّبّ / al-Ṭibb (Return to Purity)", "ct": "Greek/Latin", "pre": "PRE-Latin *med- → Medea (child-killer)"}, "medicine": {"repl": "الطِّبّ / al-Ṭibb", "ct": "Greek/Latin", "pre": "PRE-Latin *med- → Medea → Medici"}, "surgery": {"repl": "الجِرَاحَة / al-Jirāḥah (root ج ر ح, Q5:4)", "ct": "Greek/Latin", "pre": "Greek χειρουργία → PRE-Greek"}, "surgical": {"repl": "جِرَاحِيّ / Jirāḥī", "ct": "Greek/Latin", "pre": "Greek χειρουργία"}, "pharmaceutical": {"repl": "الصَّيْدَلَة / al-Ṣaydalah", "ct": "Greek/Latin", "pre": "φαρμακεία → PRE-Greek *pharmak- = POISON RITUAL"}, "pharmacy": {"repl": "الصَّيْدَلِيَّة", "ct": "Greek/Latin", "pre": "φαρμακεία → poison ritual"}, "robot": {"repl": "مَصْنُوع / Maṣnūʿ (Q20:39 صُنْعَ اللَّهِ)", "ct": "European", "pre": "Czech robota = SLAVERY"}, "anatomy": {"repl": "عِلْم البِنْيَة / ʿIlm al-Binyah", "ct": "Greek/Latin", "pre": "ἀνατομή → cutting corpses"}, "biology": {"repl": "عِلْم الحَيَاة / ʿIlm al-Ḥayāh", "ct": "Greek/Latin", "pre": "Greek βίος+λόγος"}, "physics": {"repl": "عِلْم الطَّبِيعَة / ʿIlm al-Ṭabīʿah", "ct": "Greek/Latin", "pre": "Greek φύσις"}, "chemistry": {"repl": "عِلْم التَّحْوِيل / ʿIlm al-Taḥwīl", "ct": "Greek/Latin", "pre": "χημεία → dark transmutation"}, "geometry": {"repl": "عِلْم المِسَاحَة", "ct": "Greek/Latin", "pre": "γεωμετρία → land seizure"}, "therapy": {"repl": "عِلَاج / ʿIlāj", "ct": "Greek/Latin", "pre": "θεραπεία → ritual servitude"}, "diagnosis": {"repl": "تَقْيِيم / Taqyīm", "ct": "Greek/Latin", "pre": "Greek διάγνωσις"}, "symptom": {"repl": "إِشَارَة / Ishārah (signal)", "ct": "Greek/Latin", "pre": "Greek σύμπτωμα"}, "patient": {"repl": "شَخْص / Shakhs (person)", "ct": "Greek/Latin", "pre": "Latin patiens = one who SUFFERS"}, "doctor": {"repl": "مُرْشِد / Murshid (guide)", "ct": "Greek/Latin", "pre": "Latin docēre"}, "hospital": {"repl": "مَرْكَز الاِسْتِعَادَة", "ct": "Greek/Latin", "pre": "Latin hospitale → hospes = HOST"}, "sensor": {"repl": "حَاسَّة / Ḥāssah", "ct": "Greek/Latin", "pre": "Latin sentire"}, "actuator": {"repl": "مُحَرِّك / Muḥarrik", "ct": "Greek/Latin", "pre": "Latin actuare"}, "haptic": {"repl": "لَمْسِيّ / Lamsī", "ct": "Greek/Latin", "pre": "Greek ἁπτικός → sensory ritual"}, "servo": {"repl": "مُحَرِّك / Muḥarrik", "ct": "Greek/Latin", "pre": "Latin servus = SLAVE"}, "cortisone": {"repl": "هُرْمُون صِنَاعِيّ", "ct": "Greek/Latin", "pre": "Weakens tissue — φαρμακεία product"}, "skeleton": {"repl": "هَيْكَل / Haykal (Q2:247)", "ct": "Greek/Latin", "pre": "σκελετός = dried corpse"}, "medieval": {"repl": "العصر الوسطانيّ", "ct": "Greek/Latin", "pre": "PRE-Latin *med- = Medea's era"}, "science": {"repl": "عِلْم / ʿIlm (Q17:36)", "ct": "Greek/Latin", "pre": "Latin scindere = to CUT"}, "technology": {"repl": "تِقَانَة / Tiqānah", "ct": "Greek/Latin", "pre": "Greek τέχνη → craft-ritual"}, "psychology": {"repl": "عِلْم النَّفْس / ʿIlm al-Nafs", "ct": "Greek/Latin", "pre": "ψυχή → soul-ritual"}, "neurology": {"repl": "عِلْم الأَعْصَاب", "ct": "Greek/Latin", "pre": "Greek νεῦρον"}, "pathology": {"repl": "عِلْم العِلَل", "ct": "Greek/Latin", "pre": "Greek πάθος = suffering"}, "philosophy": {"repl": "عِلْم الحِكْمَة (Q2:269)", "ct": "Greek/Latin", "pre": "φιλοσοφία"}, "algorithm": {"repl": "CLEAN — خَوارِزمِيَّة (al-Khwārizmī)", "ct": "CLEAN", "pre": ""}, "algebra": {"repl": "CLEAN — الجَبْر (الجَبَّار Name of Allah)", "ct": "CLEAN", "pre": ""}, } def _build_index(self): """Build lookup index: XLSX data + hardcoded single-word terms.""" self.index = {} # 1. Load hardcoded single-word terms first for word, info in self.SCAN_TERMS.items(): self.index[word] = { "term": word, "status": "CLEAN" if info["ct"] == "CLEAN" else "REJECT", "domain_q": "", "domain_t": "", "replacement": info["repl"], "root": "", "ref": "", "contamination": info["ct"], "_pre": info["pre"], } # 2. Override/add from XLSX (master control wins) for t in self.terms: if t["status"] == "REJECT": key = t["term"].lower().strip() self.index[key] = t # Also index individual words from multi-word terms for word in key.split(): if len(word) > 3 and word not in self.SCAN_TERMS: if word not in self.index: self.index[word] = t # Build rejected detail index self.rej_index = {} for r in self.rejected: self.rej_index[r["term"].lower().strip()] = r def scan(self, text): """Scan text and return all contamination found.""" results = [] text_lower = text.lower() seen = set() # Check each rejected term against input for key, entry in self.index.items(): if key in text_lower and key not in seen: seen.add(key) rej_detail = self.rej_index.get(key, {}) results.append({ "term": entry["term"], "status": entry["status"], "domain": entry["domain_q"], "replacement": entry["replacement"], "root": entry["root"], "ref": entry["ref"], "contamination": entry["contamination"], "etymology": rej_detail.get("etymology", entry["contamination"]), }) return results def clean(self, text): """Replace contaminated terms with USLaP replacements.""" cleaned = text for key, entry in sorted(self.index.items(), key=lambda x: -len(x[0])): if entry["replacement"]: pattern = re.compile(re.escape(key), re.IGNORECASE) # Extract just the Arabic part of replacement for cleaner output repl = entry["replacement"].split("(")[0].strip() if "(" in entry["replacement"] else entry["replacement"] cleaned = pattern.sub(repl, cleaned) return cleaned def drill_down(self, term, level=1): """Three-level drill-down.""" entry = self.index.get(term.lower().strip()) if not entry: # Try partial match for key, val in self.index.items(): if term.lower() in key: entry = val break if not entry: return f'"{term}" not in contamination database.' if entry["status"] != "REJECT": return f'✅ "{term}" is CLEAN.' rej = self.rej_index.get(entry["term"].lower().strip(), {}) out = [] out.append(f'\n❌ "{entry["term"]}" — REJECTED at Source 1 (القُرْآن)') out.append(f' Domain: {entry["domain"]}') out.append(f' Contamination type: {entry["contamination"]}') if level >= 1: out.append(f'\n LEVEL 1 — SURFACE:') out.append(f' "{entry["term"]}" is not Qur\'anic Arabic.') out.append(f' Etymology: {rej.get("etymology", entry["contamination"])}') out.append(f' ✅ Replacement: {entry["replacement"]}') if entry["root"]: out.append(f' Root: {entry["root"]}') if entry["ref"]: out.append(f' Qur\'an: {entry["ref"]}') if level >= 2: out.append(f'\n LEVEL 2 — PRE-CIVILIZATIONAL TRACE:') out.append(f' The "{entry["contamination"]}" label is a HOST wrapper.') out.append(f' The actual vocabulary predates the host civilization.') out.append(f' = CONFESSION — the word confesses its own crime.') if level >= 3: out.append(f'\n LEVEL 3 — CRIMINAL NETWORK:') out.append(f' Connects to multi-era global criminal network.') out.append(f' 100%: sacrifice / trafficking / weapons / ritual.') out.append(f' Qur\'anic warning: Q2:204-206 — يُهْلِكَ الْحَرْثَ وَالنَّسْلَ') out.append(f'\n ↩ CLEAN PATH: {entry["replacement"]}') return '\n'.join(out) # ============================================================ # RESPONSE ALGORITHM ENGINE # ============================================================ class ResponseAlgorithmEngine: """خَوارِزمِيَّة الاِسْتِجَابَة — processes any query through Steps 0-4.""" def __init__(self, master_data): self.scanner = ContaminationScanner(master_data) self.data = master_data def process(self, user_input, verbose=True): """Run full Response Algorithm.""" out = [] out.append("=" * 60) out.append("خَوارِزمِيَّة الاِسْتِجَابَة — Response Algorithm") out.append("بِسْمِ اللَّهِ الرَّحْمَٰنِ الرَّحِيمِ") out.append("=" * 60) # STEP 0 out.append(f'\n{"─" * 40}') out.append(f'STEP 0: فَحْص المُصْطَلَحَات (Terminology Scan)') out.append(f'{"─" * 40}') results = self.scanner.scan(user_input) if results: for r in results: out.append(f' ❌ "{r["term"]}"') out.append(f' → {r["replacement"]}') out.append(f' Type: {r["contamination"]}') out.append(f'\n Cleaned: {self.scanner.clean(user_input)}') else: out.append(f' ✅ No contamination detected. All terms clean.') # STEP 1 out.append(f'\n{"─" * 40}') out.append(f'STEP 1: ابن سينا أوّلاً (Ibn Sīnā FIRST)') out.append(f'{"─" * 40}') out.append(f' Source 3: القانون في الطب — Framework BEFORE tools') out.append(f' ├── قَوْس: What is the system/framework?') out.append(f' ├── WARP: What longitudinal chain?') out.append(f' ├── WEFT: What lateral connections?') out.append(f' ├── BIAS: Where does pressure create change?') out.append(f' └── صَلَاة: Do prayer positions test this?') out.append(f' ⚠ Complete this BEFORE any tools/advice.') # STEP 2 out.append(f'\n{"─" * 40}') out.append(f'STEP 2: الخَوارِزْمِيَّة (Al-Khwārizmī Method)') out.append(f'{"─" * 40}') out.append(f' ├── MEASURE (حَاسَّات): What to observe?') out.append(f' ├── SEQUENCE (تَرْتِيب): What order?') out.append(f' ├── DECISION (حُكْم): What determines next step?') out.append(f' └── VERIFY (تَحَقُّق): How to confirm?') # STEP 3 out.append(f'\n{"─" * 40}') out.append(f'STEP 3: أَدَوَات (Tools — ONLY after Steps 0-2)') out.append(f'{"─" * 40}') # Find relevant applications from master data relevant = [] for app in self.data["applications"]: for word in user_input.lower().split(): if word in app["application"].lower(): relevant.append(app) break if relevant: out.append(f' Relevant applications from master control:') for app in relevant[:5]: out.append(f' • {app["application"]} [{app["domain"]}]') else: out.append(f' Source 4 tools serve the framework from Step 1.') # STEP 4 out.append(f'\n{"─" * 40}') out.append(f'STEP 4: المُخْرَجَات (Output)') out.append(f'{"─" * 40}') out.append(f' Deliver in USLaP-compliant terminology.') out.append(f' All terms verified through Step 0.') out.append(f' Framework attributed: Ibn Sīnā + al-Khwārizmī.') return '\n'.join(out) # ============================================================ # APPLICATION GENERATOR # ============================================================ class AppGenerator: def __init__(self, master_data): self.sciences = master_data["sciences"] def list_sciences(self): out = [] for s in self.sciences: out.append(f' [{s["id"]:>3}] {s["name_ar"]:<24} {s["root"]:<18} {s.get("verse","")[:30]}') return '\n'.join(out) def generate(self, name, science_ids, components): selected = [s for s in self.sciences if s["id"] in science_ids] out = [] out.append(f'\n{"═" * 60}') out.append(f'USLaP APPLICATION: {name}') out.append(f'بِسْمِ اللَّهِ الرَّحْمَٰنِ الرَّحِيمِ') out.append(f'{"═" * 60}') out.append(f'\nROOT SCIENCES:') for s in selected: out.append(f' [{s["id"]:>3}] {s["name_ar"]} ({s["name_t"]})') out.append(f' Root: {s["root"]} | {s.get("verse","")}') out.append(f'\nCOMPONENTS:') for i, c in enumerate(components, 1): out.append(f' {i}. {c}') out.append(f' Q-U-F: [ ] Quantifiable [ ] Universal [ ] Falsifiable') out.append(f'\nخَوارِزمِيَّة الاِسْتِجَابَة COMPLIANCE:') for s in RESPONSE_ALGORITHM: m = "✓ MANDATORY" if s["mandatory"] else " optional" out.append(f' Step {s["step"]}: {s["ar"]} / {s["en"]} [{m}]') return '\n'.join(out) # ============================================================ # Q-U-F TOOL # ============================================================ class QUFTool: @staticmethod def verify(metrics=None, limits=None, conditions=None): out = [] out.append(f'\nQ-U-F VERIFICATION') out.append(f'Not a source — a tool for doubters.\n') if metrics is not None: fails = [m for m in metrics if not m.get("unit")] status = "✅ PASS" if not fails else "❌ FAIL" out.append(f' Q — Quantifiable: {status} (Q54:49 بِقَدَرٍ)') for f in fails: out.append(f' Missing unit: {f.get("name","?")}') if limits is not None: fails = {k: v for k, v in limits.items() if v != "NONE"} status = "✅ PASS" if not fails else "❌ FAIL" out.append(f' U — Universal: {status} (Q34:28 كَافَّةً لِّلنَّاسِ)') for k, v in fails.items(): out.append(f' Limitation: {k} = {v}') if conditions is not None: fails = [c for c in conditions if not c.get("test")] status = "✅ PASS" if not fails else "❌ FAIL" out.append(f' F — Falsifiable: {status} (Q17:36)') for f in fails: out.append(f' Missing test: {f.get("name","?")}') return '\n'.join(out) # ============================================================ # EXPORT (for plugging into any AI) # ============================================================ def export_json(master_data, output_path=None): """Export everything as JSON — paste into any AI system prompt.""" export = { "uslap_version": "2.0", "bismillah": "بِسْمِ اللَّهِ الرَّحْمَٰنِ الرَّحِيمِ", "four_source_lattice": FOUR_SOURCES, "contamination_hierarchy": CONTAM_HIERARCHY, "response_algorithm": RESPONSE_ALGORITHM, "master_data": { "terms_count": len(master_data["terms"]), "reject_count": len([t for t in master_data["terms"] if t["status"] == "REJECT"]), "pass_count": len([t for t in master_data["terms"] if t["status"] == "PASS"]), "sciences_count": len(master_data["sciences"]), "evidence_count": len(master_data["evidence"]), "applications_count": len(master_data["applications"]), }, "terms": master_data["terms"], "sciences": master_data["sciences"], "evidence": master_data["evidence"], "applications": master_data["applications"], "two_systems": master_data["two_systems"], "deep_dive": master_data["deep_dive"], "iqra_moments": master_data["iqra_moments"], } if output_path: with open(output_path, "w", encoding="utf-8") as f: json.dump(export, f, ensure_ascii=False, indent=2) print(f"Exported to {output_path}") print(f" Terms: {export['master_data']['terms_count']}") print(f" Sciences: {export['master_data']['sciences_count']}") size = os.path.getsize(output_path) print(f" Size: {size // 1024}KB") return export # ============================================================ # INTERACTIVE MENU # ============================================================ def print_header(master_data): t = master_data["terms"] rej = len([x for x in t if x["status"] == "REJECT"]) pas = len([x for x in t if x["status"] == "PASS"]) con = len([x for x in t if x["status"] == "CONCEPT"]) print("\n" + "═" * 60) print(" USLaP — Universal Scientific Laws and Principles v2.0") print(" بِسْمِ اللَّهِ الرَّحْمَٰنِ الرَّحِيمِ") print("═" * 60) print(f" Master: {master_data['path']}") print(f" Terms: {len(t)} ({rej} REJECT · {pas} PASS · {con} CONCEPT)") print(f" Sciences: {len(master_data['sciences'])} · Evidence: {len(master_data['evidence'])} · Apps: {len(master_data['applications'])}") def print_menu(): print("\n [1] خَوارِزمِيَّة الاِسْتِجَابَة — Response Algorithm (process query)") print(" [2] فَحْص التَّلَوُّث — Contamination Scanner") print(" [3] مُوَلِّد التَّطْبِيقَات — Application Generator") print(" [4] عُلُوم — Browse Sciences") print(" [5] أَدَاة Q-U-F — Verification Tool") print(" [6] الأُصُول الأَرْبَعَة — Four-Source Lattice") print(" [7] خَوارِزمِيَّة — View Response Algorithm") print(" [8] تَصْدِير — Export JSON (for any AI)") print(" [9] ⚔ Two Systems") print(" [A] 🔍 Deep Dive — Criminal Networks") print(" [B] اقْرَأْ — IQRA Moments (one-prompt destroyers)") print(" [0] خُرُوج — Exit") def menu_response_algorithm(data): engine = ResponseAlgorithmEngine(data) print("\nEnter query (Response Algorithm will process it):") user_input = input("> ").strip() if user_input: print(engine.process(user_input)) def menu_scanner(data): scanner = ContaminationScanner(data) print("\nPaste text to scan (Enter twice when done):") lines = [] while True: try: line = input() if line == "": break lines.append(line) except EOFError: break text = '\n'.join(lines) if not text: return results = scanner.scan(text) contam = [r for r in results if r["status"] == "REJECT"] if contam: print(f"\n❌ {len(contam)} contaminated term(s):\n") for r in contam: print(f' ❌ "{r["term"]}" [{r["contamination"]}]') print(f' → {r["replacement"]}') print(f'\n{"─" * 40}') print(f'CLEANED:\n{scanner.clean(text)}') print("\nDrill-down? Enter term (or Enter to skip):") term = input("> ").strip() if term: print("Depth? [1] Surface [2] PRE-trace [3] Criminal network") try: lvl = int(input("> ").strip()) except: lvl = 1 print(scanner.drill_down(term, lvl)) else: print("\n✅ No contamination detected.") def menu_app_gen(data): gen = AppGenerator(data) print("\nApplication name:") name = input("> ").strip() if not name: return print(f"\n{gen.list_sciences()}") print("\nEnter science IDs (space-separated):") try: ids = [int(x) for x in input("> ").strip().split()] except: return print("\nComponents (one per line, empty to finish):") comps = [] while True: c = input(f" {len(comps)+1}: ").strip() if not c: break comps.append(c) print(gen.generate(name, ids, comps)) def menu_sciences(data): print(f"\n{'═' * 60}") print(f"{len(data['sciences'])} SCIENCES") print(f"{'═' * 60}") for s in data["sciences"]: print(f' [{s["id"]:>3}] {s["name_ar"]:<24} Root: {s["root"]:<18}') if s.get("verse"): print(f' {s["verse"][:60]}') def menu_quf(): print(QUFTool.verify( metrics=[{"name": "example", "unit": "Hz"}], limits={"cultural": "NONE", "geographic": "NONE", "temporal": "NONE", "economic": "NONE"}, conditions=[{"name": "example", "test": "measure X"}], )) def menu_four_sources(): print(f"\n{'═' * 60}") print(f"FOUR-SOURCE LATTICE") print(f"{'═' * 60}") for s in FOUR_SOURCES: print(f"\n Source {s['n']}: {s['ar']} / {s['t']}") print(f" {s['role']}") print(f"\n{'─' * 40}") print(f"CONTAMINATION HIERARCHY:") for h in CONTAM_HIERARCHY: print(f" {h['level']}. {h['name']}: {h['desc'][:65]}") def menu_view_algorithm(): print(f"\n{'═' * 60}") print(f"خَوارِزمِيَّة الاِسْتِجَابَة — Response Algorithm") print(f"{'═' * 60}") for s in RESPONSE_ALGORITHM: m = "MANDATORY" if s["mandatory"] else "AFTER 0-2" print(f'\n Step {s["step"]}: {s["ar"]} / {s["en"]} [{m}]') print(f' {s["action"]}') if "subs" in s: for sub in s["subs"]: print(f' • {sub}') print(f' ⚠ {s["anti"]}') def menu_two_systems(data): print(f"\n{'═' * 60}") print(f"Moloch vs Qur'an — Bukhari 6069") print(f"{'═' * 60}") for row in data["two_systems"]: print(f'\n {row["dimension"]}:') print(f' ☠ Moloch: {row["moloch"]}') print(f' ☪ Qur\'an: {row["quran"]}') def menu_deep_dive(data): print(f"\n{'═' * 60}") print(f"🔍 DEEP DIVE — Criminal Networks & Concealment Chains") print(f"{'═' * 60}") # Group by chain chains = {} for d in data["deep_dive"]: chains.setdefault(d["chain"], []).append(d) for chain_name, entries in chains.items(): print(f'\n{"─" * 50}') print(f' ⛓ {chain_name}') print(f'{"─" * 50}') for e in entries: print(f'\n CRIME: {e["crime"]}') print(f' EVIDENCE: {e["evidence"]}') print(f' PRE-TRACE: {e["pre_trace"]}') print(f' SOURCES: {e["sources"]}') print(f' QUR\'AN: {e["quran_warning"]}') print(f'\n{"─" * 50}') print(f'Drill deeper? Enter chain name (or Enter to return):') choice = input("> ").strip().upper() if choice and choice in chains: for e in chains[choice]: print(f'\n {"=" * 40}') print(f' {e["crime"]}') print(f' {"=" * 40}') print(f' {e["evidence"]}') print(f'\n PRE-CIVILIZATIONAL TRACE:') print(f' {e["pre_trace"]}') print(f'\n PRIMARY SOURCES:') print(f' {e["sources"]}') print(f'\n QUR\'ANIC WARNING:') print(f' {e["quran_warning"]}') def menu_iqra(data): print(f"\n{'═' * 60}") print(f"اقْرَأْ MOMENTS — One prompt. One corner. No escape.") print(f"{'═' * 60}") print(f"\n Each takes 5 seconds. Each destroys centuries of lies.\n") for m in data["iqra_moments"]: print(f' #{m["number"]:>2} LIE: {m["target_lie"]}') print(f' KILL: {m["kill_shot"][:75]}') print() print(f'Pick a number for full details (or Enter to return):') try: choice = int(input("> ").strip()) for m in data["iqra_moments"]: if m["number"] == choice: print(f'\n {"═" * 50}') print(f' اقْرَأْ MOMENT #{m["number"]}') print(f' {"═" * 50}') print(f'\n THE LIE: {m["target_lie"]}') print(f'\n THE KILL SHOT:') print(f' {m["kill_shot"]}') print(f'\n TIME TO EXPOSE: {m["time"]}') break except: pass def menu_export(data): out = Path(__file__).parent / "uslap_export.json" export_json(data, str(out)) def interactive(data): print_header(data) while True: print_menu() choice = input("\n Select: ").strip() if choice == "1": menu_response_algorithm(data) elif choice == "2": menu_scanner(data) elif choice == "3": menu_app_gen(data) elif choice == "4": menu_sciences(data) elif choice == "5": menu_quf() elif choice == "6": menu_four_sources() elif choice == "7": menu_view_algorithm() elif choice == "8": menu_export(data) elif choice == "9": menu_two_systems(data) elif choice.upper() == "A": menu_deep_dive(data) elif choice.upper() == "B": menu_iqra(data) elif choice == "0": print("\nبِسْمِ اللَّهِ الرَّحْمَٰنِ الرَّحِيمِ") break else: print(" Invalid.") # ============================================================ # CLI # ============================================================ def main(): master_path = find_master() if not master_path: print(f"ERROR: {MASTER_FILE} not found.") print(f"Place it in same directory as uslap.py or current directory.") sys.exit(1) data = load_master(master_path) # CLI modes if len(sys.argv) > 1: if sys.argv[1] == "--scan" and len(sys.argv) > 2: scanner = ContaminationScanner(data) text = ' '.join(sys.argv[2:]) results = scanner.scan(text) for r in results: if r["status"] == "REJECT": print(f'❌ "{r["term"]}" → {r["replacement"]}') if not results: print("✅ Clean.") return elif sys.argv[1] == "--process" and len(sys.argv) > 2: engine = ResponseAlgorithmEngine(data) print(engine.process(' '.join(sys.argv[2:]))) return elif sys.argv[1] == "--export-json": out = sys.argv[2] if len(sys.argv) > 2 else "uslap_export.json" export_json(data, out) return elif sys.argv[1] == "--info": print_header(data) return else: print(f"Unknown option: {sys.argv[1]}") print(__doc__) return # Interactive interactive(data) if __name__ == "__main__": main()