Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,1032 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import random
|
| 4 |
+
import time
|
| 5 |
+
import requests
|
| 6 |
+
import json
|
| 7 |
+
import re
|
| 8 |
+
import matplotlib.pyplot as plt
|
| 9 |
+
import networkx as nx
|
| 10 |
+
from datetime import datetime
|
| 11 |
+
import gradio as gr
|
| 12 |
+
import io
|
| 13 |
+
from PIL import Image
|
| 14 |
+
|
| 15 |
+
# -------------------------------------------------------------------
|
| 16 |
+
# TemporalPredictionSystem Class Definition
|
| 17 |
+
# -------------------------------------------------------------------
|
| 18 |
+
class TemporalPredictionSystem:
|
| 19 |
+
def __init__(self):
|
| 20 |
+
self.nodes = []
|
| 21 |
+
self.edges = []
|
| 22 |
+
self.time_scales = [0.1, 0.5, 1, 2, 5] # Different time scales
|
| 23 |
+
self.recursion_depth = 3
|
| 24 |
+
self.causal_strength = 5
|
| 25 |
+
self.time_scale = 3
|
| 26 |
+
|
| 27 |
+
# System metrics
|
| 28 |
+
self.prediction_accuracy = 0
|
| 29 |
+
self.causal_consistency = 0
|
| 30 |
+
self.meta_rule_optimizations = 0
|
| 31 |
+
self.fixed_point_iterations = 0
|
| 32 |
+
|
| 33 |
+
# Simulation state
|
| 34 |
+
self.is_running = False
|
| 35 |
+
self.tick_count = 0
|
| 36 |
+
|
| 37 |
+
# Constants
|
| 38 |
+
self.MAX_NODES = 50
|
| 39 |
+
self.NODE_DECAY_RATE = 0.02
|
| 40 |
+
self.EDGE_THRESHOLD = 0.3
|
| 41 |
+
|
| 42 |
+
# Markov Chain data for text predictions
|
| 43 |
+
self.word_corpus = [
|
| 44 |
+
"future", "past", "present", "time", "space", "causality", "effect", "prediction",
|
| 45 |
+
"reality", "quantum", "probability", "divergence", "convergence", "branch", "path",
|
| 46 |
+
"timeline", "paradox", "stability", "chaos", "deterministic", "random", "uncertain",
|
| 47 |
+
"fixed", "flexible", "loop", "recursive", "retrocausal", "forward", "backward", "cycle",
|
| 48 |
+
"wave", "particle", "superposition", "entanglement", "observer", "collapse", "multiverse",
|
| 49 |
+
"dimension", "parallel", "simulation", "emergence", "complexity", "singularity", "event",
|
| 50 |
+
"horizon", "threshold", "attractor", "repeller", "equilibrium", "evolution", "revolution"
|
| 51 |
+
]
|
| 52 |
+
self.quotes_data = []
|
| 53 |
+
self.tags_data = []
|
| 54 |
+
self.markov_chain = {}
|
| 55 |
+
self.timelines = []
|
| 56 |
+
self.predicted_futures = []
|
| 57 |
+
|
| 58 |
+
# Chat model properties
|
| 59 |
+
self.conversation_history = []
|
| 60 |
+
self.personality_traits = {
|
| 61 |
+
"optimism": 0.7,
|
| 62 |
+
"creativity": 0.8,
|
| 63 |
+
"analytical": 0.65,
|
| 64 |
+
"humor": 0.5,
|
| 65 |
+
"empathy": 0.6
|
| 66 |
+
}
|
| 67 |
+
self.learning_rate = 0.05
|
| 68 |
+
self.context_size = 5
|
| 69 |
+
self.knowledge_base = {}
|
| 70 |
+
self.temporal_bias = 0.3 # How much to bias toward future perspective
|
| 71 |
+
self.self_model_accuracy = 0.2 # Starts low, improves with conversation
|
| 72 |
+
self.model_growth_rate = 0
|
| 73 |
+
self.total_interactions = 0
|
| 74 |
+
|
| 75 |
+
# Response generators
|
| 76 |
+
self.response_templates = [
|
| 77 |
+
"Based on your current trajectory, I see {{future}}.",
|
| 78 |
+
"From my perspective {{timeYears}} years ahead, I can tell you that {{insight}}.",
|
| 79 |
+
"Your interest in {{topic}} eventually leads to {{outcome}}.",
|
| 80 |
+
"I remember this conversation. It {{impact}} my thinking significantly.",
|
| 81 |
+
"In the future, you'll realize that {{realization}}.",
|
| 82 |
+
"What you're doing now with {{activity}} actually {{futureResult}}.",
|
| 83 |
+
"The {{decision}} you're considering has interesting consequences: {{consequences}}.",
|
| 84 |
+
"From my vantage point, I'd suggest focusing more on {{suggestion}}.",
|
| 85 |
+
"Your intuition about {{subject}} is {{accuracy}} - I know because I've {{evidence}}."
|
| 86 |
+
]
|
| 87 |
+
|
| 88 |
+
self.insight_patterns = [
|
| 89 |
+
"this challenge ultimately strengthens your {{attribute}}",
|
| 90 |
+
"you develop a unique perspective on {{topic}} that others find valuable",
|
| 91 |
+
"the uncertainty you feel now resolves into {{outcome}}",
|
| 92 |
+
"what seems important now shifts dramatically after you {{experience}}",
|
| 93 |
+
"this period of {{state}} leads to unexpected opportunities in {{domain}}",
|
| 94 |
+
"your approach to {{activity}} evolves in ways that surprise even you",
|
| 95 |
+
"the questions you're asking now form the foundation of your future {{role}}"
|
| 96 |
+
]
|
| 97 |
+
|
| 98 |
+
# Topic and concept modeling
|
| 99 |
+
self.topic_extractor = {}
|
| 100 |
+
self.topic_relations = {}
|
| 101 |
+
|
| 102 |
+
# Initialize with basic topics from word corpus
|
| 103 |
+
for word in self.word_corpus:
|
| 104 |
+
self.topic_extractor[word] = 1
|
| 105 |
+
self.topic_relations[word] = []
|
| 106 |
+
|
| 107 |
+
# Fetch quotes data and build markov chain
|
| 108 |
+
self.fetch_quotes_data()
|
| 109 |
+
|
| 110 |
+
def fetch_quotes_data(self):
|
| 111 |
+
"""Fetch quotes data from Hugging Face dataset API"""
|
| 112 |
+
try:
|
| 113 |
+
response = requests.get(
|
| 114 |
+
"https://datasets-server.huggingface.co/rows?dataset=Abirate%2Fenglish_quotes&config=default&split=train&offset=0&length=100"
|
| 115 |
+
)
|
| 116 |
+
data = response.json()
|
| 117 |
+
|
| 118 |
+
if data and "rows" in data:
|
| 119 |
+
for row in data["rows"]:
|
| 120 |
+
if row and "row" in row and "quote" in row["row"]:
|
| 121 |
+
quote = row["row"]["quote"]
|
| 122 |
+
self.quotes_data.append(quote)
|
| 123 |
+
words = [w for w in re.sub(r'[^\w\s]', '', quote.lower()).split() if len(w) > 3]
|
| 124 |
+
self.word_corpus = list(set(self.word_corpus + words))
|
| 125 |
+
if "tags" in row["row"]:
|
| 126 |
+
tags = [row["row"]["tags"]] if not isinstance(row["row"]["tags"], list) else row["row"]["tags"]
|
| 127 |
+
self.tags_data.extend(tags)
|
| 128 |
+
tag_words = [tag.lower() for tag in tags if len(tag) > 3]
|
| 129 |
+
self.word_corpus = list(set(self.word_corpus + tag_words))
|
| 130 |
+
|
| 131 |
+
print(f"Loaded {len(self.quotes_data)} quotes and {len(self.tags_data)} tags")
|
| 132 |
+
print(f"Word corpus expanded to {len(self.word_corpus)} words")
|
| 133 |
+
for word in self.word_corpus:
|
| 134 |
+
self.topic_extractor[word] = 1
|
| 135 |
+
self.topic_relations[word] = []
|
| 136 |
+
self.build_initial_markov_chain()
|
| 137 |
+
|
| 138 |
+
except Exception as e:
|
| 139 |
+
print(f"Error fetching quotes data: {e}")
|
| 140 |
+
self.build_initial_markov_chain()
|
| 141 |
+
|
| 142 |
+
self.initialize()
|
| 143 |
+
|
| 144 |
+
def build_initial_markov_chain(self):
|
| 145 |
+
"""Build a Markov chain from quotes and corpus data"""
|
| 146 |
+
chain = {}
|
| 147 |
+
for quote in self.quotes_data:
|
| 148 |
+
words = [w for w in re.sub(r'[^\w\s]', '', quote.lower()).split() if len(w) > 3]
|
| 149 |
+
for i in range(len(words) - 1):
|
| 150 |
+
word = words[i]
|
| 151 |
+
next_word = words[i + 1]
|
| 152 |
+
if word not in chain:
|
| 153 |
+
chain[word] = {}
|
| 154 |
+
chain[word][next_word] = chain[word].get(next_word, 0) + 1
|
| 155 |
+
if len(chain) < 30:
|
| 156 |
+
for i in range(len(self.word_corpus)):
|
| 157 |
+
word = self.word_corpus[i]
|
| 158 |
+
next_word = self.word_corpus[(i + 1) % len(self.word_corpus)]
|
| 159 |
+
if word not in chain:
|
| 160 |
+
chain[word] = {}
|
| 161 |
+
chain[word][next_word] = chain[word].get(next_word, 0) + 1
|
| 162 |
+
for tag in self.tags_data:
|
| 163 |
+
related_words = random.sample(self.word_corpus, min(5, len(self.word_corpus)))
|
| 164 |
+
tag_word = tag.lower()
|
| 165 |
+
if len(tag_word) > 3:
|
| 166 |
+
if tag_word not in chain:
|
| 167 |
+
chain[tag_word] = {}
|
| 168 |
+
for related_word in related_words:
|
| 169 |
+
chain[tag_word][related_word] = chain[tag_word].get(related_word, 0) + 1
|
| 170 |
+
for i in range(50):
|
| 171 |
+
word1 = random.choice(self.word_corpus)
|
| 172 |
+
word2 = random.choice(self.word_corpus)
|
| 173 |
+
if word1 not in chain:
|
| 174 |
+
chain[word1] = {}
|
| 175 |
+
chain[word1][word2] = chain[word1].get(word2, 0) + 1
|
| 176 |
+
self.markov_chain = chain
|
| 177 |
+
|
| 178 |
+
def initialize(self):
|
| 179 |
+
"""Initialize the system with nodes, edges, and timelines"""
|
| 180 |
+
self.generate_initial_nodes()
|
| 181 |
+
self.generate_initial_edges()
|
| 182 |
+
self.reset_metrics()
|
| 183 |
+
self.initialize_timelines()
|
| 184 |
+
self.generate_predictions()
|
| 185 |
+
|
| 186 |
+
def initialize_timelines(self):
|
| 187 |
+
"""Initialize timelines with seed words and predictions"""
|
| 188 |
+
self.timelines = []
|
| 189 |
+
timeline_count = 3 + random.randint(0, 2)
|
| 190 |
+
for i in range(timeline_count):
|
| 191 |
+
seed_word = random.choice(self.word_corpus)
|
| 192 |
+
length = 3 + random.randint(0, 4)
|
| 193 |
+
words = [seed_word]
|
| 194 |
+
current_word = seed_word
|
| 195 |
+
for j in range(1, length):
|
| 196 |
+
current_word = self.get_next_word_from_markov(current_word)
|
| 197 |
+
words.append(current_word)
|
| 198 |
+
self.timelines.append({
|
| 199 |
+
"id": f"timeline-{i}",
|
| 200 |
+
"words": words,
|
| 201 |
+
"probability": random.random(),
|
| 202 |
+
"timeScale": random.randint(0, len(self.time_scales) - 1),
|
| 203 |
+
"stability": random.random()
|
| 204 |
+
})
|
| 205 |
+
|
| 206 |
+
def get_next_word_from_markov(self, current_word):
|
| 207 |
+
"""Get next word based on Markov chain probabilities"""
|
| 208 |
+
if current_word not in self.markov_chain:
|
| 209 |
+
return random.choice(self.word_corpus)
|
| 210 |
+
next_words = self.markov_chain[current_word]
|
| 211 |
+
words = list(next_words.keys())
|
| 212 |
+
weights = list(next_words.values())
|
| 213 |
+
total_weight = sum(weights)
|
| 214 |
+
r = random.random() * total_weight
|
| 215 |
+
running_sum = 0
|
| 216 |
+
for i in range(len(words)):
|
| 217 |
+
running_sum += weights[i]
|
| 218 |
+
if r <= running_sum:
|
| 219 |
+
return words[i]
|
| 220 |
+
return random.choice(words) if words else random.choice(self.word_corpus)
|
| 221 |
+
|
| 222 |
+
def generate_predictions(self):
|
| 223 |
+
"""Generate future predictions for each timeline"""
|
| 224 |
+
self.predicted_futures = []
|
| 225 |
+
for timeline in self.timelines:
|
| 226 |
+
future_count = 2 + random.randint(0, 1)
|
| 227 |
+
for i in range(future_count):
|
| 228 |
+
last_word = timeline["words"][-1]
|
| 229 |
+
future_length = 2 + random.randint(0, 2)
|
| 230 |
+
current_word = last_word
|
| 231 |
+
future_words = [current_word]
|
| 232 |
+
for j in range(1, future_length):
|
| 233 |
+
current_word = self.get_next_word_from_markov(current_word)
|
| 234 |
+
future_words.append(current_word)
|
| 235 |
+
self.predicted_futures.append({
|
| 236 |
+
"id": f"future-{len(self.predicted_futures)}",
|
| 237 |
+
"timelineId": timeline["id"],
|
| 238 |
+
"words": future_words,
|
| 239 |
+
"probability": random.random() * timeline["probability"],
|
| 240 |
+
"confidence": random.random(),
|
| 241 |
+
"timeScale": timeline["timeScale"]
|
| 242 |
+
})
|
| 243 |
+
|
| 244 |
+
def generate_initial_nodes(self):
|
| 245 |
+
"""Generate initial nodes for visualization and computation"""
|
| 246 |
+
self.nodes = []
|
| 247 |
+
initial_node_count = 5 + self.recursion_depth * 2
|
| 248 |
+
for i in range(initial_node_count):
|
| 249 |
+
self.nodes.append({
|
| 250 |
+
"id": f"node-{i}",
|
| 251 |
+
"x": random.random() * 700 + 50,
|
| 252 |
+
"y": random.random() * 400 + 50,
|
| 253 |
+
"timeScale": random.randint(0, len(self.time_scales) - 1),
|
| 254 |
+
"value": random.random(),
|
| 255 |
+
"prediction": random.random(),
|
| 256 |
+
"age": 0,
|
| 257 |
+
"radius": 10 + random.random() * 15,
|
| 258 |
+
"word": random.choice(self.word_corpus),
|
| 259 |
+
"future": self.generate_future_phrase(3)
|
| 260 |
+
})
|
| 261 |
+
|
| 262 |
+
def generate_future_phrase(self, length):
|
| 263 |
+
"""Generate a phrase of specified length using Markov chain"""
|
| 264 |
+
words = []
|
| 265 |
+
start_word = random.choice(self.word_corpus)
|
| 266 |
+
current_word = start_word
|
| 267 |
+
words.append(current_word)
|
| 268 |
+
for i in range(1, length):
|
| 269 |
+
current_word = self.get_next_word_from_markov(current_word)
|
| 270 |
+
words.append(current_word)
|
| 271 |
+
return " ".join(words)
|
| 272 |
+
|
| 273 |
+
def generate_initial_edges(self):
|
| 274 |
+
"""Generate initial edges between nodes"""
|
| 275 |
+
self.edges = []
|
| 276 |
+
for i in range(len(self.nodes)):
|
| 277 |
+
source_node = self.nodes[i]
|
| 278 |
+
connection_count = random.randint(1, 3)
|
| 279 |
+
for j in range(connection_count):
|
| 280 |
+
target_index = i
|
| 281 |
+
while target_index == i:
|
| 282 |
+
target_index = random.randint(0, len(self.nodes) - 1)
|
| 283 |
+
target_node = self.nodes[target_index]
|
| 284 |
+
self.edges.append({
|
| 285 |
+
"id": f"edge-{len(self.edges)}",
|
| 286 |
+
"source": source_node["id"],
|
| 287 |
+
"target": target_node["id"],
|
| 288 |
+
"strength": random.random() * self.causal_strength / 10,
|
| 289 |
+
"age": 0
|
| 290 |
+
})
|
| 291 |
+
|
| 292 |
+
def reset_metrics(self):
|
| 293 |
+
"""Reset system metrics"""
|
| 294 |
+
self.prediction_accuracy = 0
|
| 295 |
+
self.causal_consistency = 0
|
| 296 |
+
self.meta_rule_optimizations = 0
|
| 297 |
+
self.fixed_point_iterations = 0
|
| 298 |
+
|
| 299 |
+
def tick(self):
|
| 300 |
+
"""Run one simulation tick"""
|
| 301 |
+
self.tick_count += 1
|
| 302 |
+
self.propagate_influences()
|
| 303 |
+
self.apply_retrocausality()
|
| 304 |
+
self.apply_fixed_point_combination()
|
| 305 |
+
if self.tick_count % 10 == 0:
|
| 306 |
+
self.optimize_meta_rules()
|
| 307 |
+
self.enforce_causal_consistency()
|
| 308 |
+
self.manage_node_lifecycle()
|
| 309 |
+
self.manage_edge_lifecycle()
|
| 310 |
+
self.update_metrics()
|
| 311 |
+
if self.tick_count % 5 == 0:
|
| 312 |
+
self.update_timelines()
|
| 313 |
+
self.generate_predictions()
|
| 314 |
+
|
| 315 |
+
def propagate_influences(self):
|
| 316 |
+
"""Propagate causal influences through the network"""
|
| 317 |
+
node_values_snapshot = [{
|
| 318 |
+
"id": node["id"],
|
| 319 |
+
"value": node["value"],
|
| 320 |
+
"prediction": node["prediction"]
|
| 321 |
+
} for node in self.nodes]
|
| 322 |
+
for node in self.nodes:
|
| 323 |
+
incoming_edges = [edge for edge in self.edges if edge["target"] == node["id"]]
|
| 324 |
+
if incoming_edges:
|
| 325 |
+
influence_sum = 0
|
| 326 |
+
weight_sum = 0
|
| 327 |
+
for edge in incoming_edges:
|
| 328 |
+
source_node = next((n for n in node_values_snapshot if n["id"] == edge["source"]), None)
|
| 329 |
+
if source_node:
|
| 330 |
+
influence_sum += source_node["value"] * edge["strength"]
|
| 331 |
+
weight_sum += edge["strength"]
|
| 332 |
+
if weight_sum > 0:
|
| 333 |
+
scale_factor = self.time_scales[node["timeScale"]] * (self.time_scale / 3)
|
| 334 |
+
influence_value = influence_sum / weight_sum
|
| 335 |
+
node["value"] = (node["value"] * (1 - scale_factor * 0.1) +
|
| 336 |
+
influence_value * scale_factor * 0.1 +
|
| 337 |
+
(random.random() - 0.5) * 0.02)
|
| 338 |
+
node["value"] = max(0, min(1, node["value"]))
|
| 339 |
+
|
| 340 |
+
def apply_retrocausality(self):
|
| 341 |
+
"""Apply backward-in-time influence to update predictions"""
|
| 342 |
+
for node in self.nodes:
|
| 343 |
+
outgoing_edges = [edge for edge in self.edges if edge["source"] == node["id"]]
|
| 344 |
+
if outgoing_edges:
|
| 345 |
+
prediction_error = 0
|
| 346 |
+
for edge in outgoing_edges:
|
| 347 |
+
target_node = next((n for n in self.nodes if n["id"] == edge["target"]), None)
|
| 348 |
+
if target_node:
|
| 349 |
+
error = abs(target_node["value"] - target_node["prediction"])
|
| 350 |
+
prediction_error += error * edge["strength"]
|
| 351 |
+
adjustment_factor = 0.05 * (self.recursion_depth / 5)
|
| 352 |
+
node["prediction"] = node["prediction"] * (1 - adjustment_factor) + \
|
| 353 |
+
(node["value"] + (random.random() - 0.5) * 0.1) * adjustment_factor
|
| 354 |
+
node["prediction"] = max(0, min(1, node["prediction"]))
|
| 355 |
+
|
| 356 |
+
def apply_fixed_point_combination(self):
|
| 357 |
+
"""Apply Y combinator-inspired approach to find fixed points"""
|
| 358 |
+
if self.tick_count % 5 == 0:
|
| 359 |
+
iterations = 1 + self.recursion_depth // 2
|
| 360 |
+
for i in range(iterations):
|
| 361 |
+
nodes_to_update = random.sample(
|
| 362 |
+
self.nodes,
|
| 363 |
+
min(3 + self.recursion_depth // 2, len(self.nodes))
|
| 364 |
+
)
|
| 365 |
+
for node in nodes_to_update:
|
| 366 |
+
old_value = node["value"]
|
| 367 |
+
combined_value = (node["value"] + node["prediction"]) / 2
|
| 368 |
+
node["value"] = old_value * 0.8 + combined_value * 0.2
|
| 369 |
+
self.fixed_point_iterations += 1
|
| 370 |
+
|
| 371 |
+
def optimize_meta_rules(self):
|
| 372 |
+
"""Optimize system meta-rules based on performance"""
|
| 373 |
+
current_accuracy = self.calculate_prediction_accuracy()
|
| 374 |
+
if current_accuracy < 0.6:
|
| 375 |
+
for edge in self.edges:
|
| 376 |
+
source_node = next((n for n in self.nodes if n["id"] == edge["source"]), None)
|
| 377 |
+
target_node = next((n for n in self.nodes if n["id"] == edge["target"]), None)
|
| 378 |
+
if source_node and target_node:
|
| 379 |
+
prediction_error = abs(source_node["prediction"] - target_node["value"])
|
| 380 |
+
if prediction_error < 0.3:
|
| 381 |
+
edge["strength"] = min(1, edge["strength"] * 1.05)
|
| 382 |
+
else:
|
| 383 |
+
edge["strength"] = max(0.1, edge["strength"] * 0.95)
|
| 384 |
+
if random.random() < 0.2 and len(self.edges) < len(self.nodes) * 3:
|
| 385 |
+
self.create_new_edge()
|
| 386 |
+
self.meta_rule_optimizations += 1
|
| 387 |
+
if random.random() < 0.1 and len(self.nodes) < self.MAX_NODES:
|
| 388 |
+
self.create_new_node()
|
| 389 |
+
|
| 390 |
+
def enforce_causal_consistency(self):
|
| 391 |
+
"""Enforce logical consistency in causal relationships"""
|
| 392 |
+
causal_loops = self.detect_causal_loops()
|
| 393 |
+
if causal_loops:
|
| 394 |
+
for loop in causal_loops:
|
| 395 |
+
weakest_edge = None
|
| 396 |
+
min_strength = float('inf')
|
| 397 |
+
for i in range(len(loop) - 1):
|
| 398 |
+
edge = next((e for e in self.edges if e["source"] == loop[i] and e["target"] == loop[i+1]), None)
|
| 399 |
+
if edge and edge["strength"] < min_strength:
|
| 400 |
+
weakest_edge = edge
|
| 401 |
+
min_strength = edge["strength"]
|
| 402 |
+
if weakest_edge:
|
| 403 |
+
weakest_edge["strength"] *= 0.9
|
| 404 |
+
for node in self.nodes:
|
| 405 |
+
incoming_edges = [edge for edge in self.edges if edge["target"] == node["id"]]
|
| 406 |
+
if len(incoming_edges) > 1:
|
| 407 |
+
contradictions = 0
|
| 408 |
+
for i in range(len(incoming_edges)):
|
| 409 |
+
for j in range(i + 1, len(incoming_edges)):
|
| 410 |
+
source_node_i = next((n for n in self.nodes if n["id"] == incoming_edges[i]["source"]), None)
|
| 411 |
+
source_node_j = next((n for n in self.nodes if n["id"] == incoming_edges[j]["source"]), None)
|
| 412 |
+
if source_node_i and source_node_j:
|
| 413 |
+
value_difference = abs(source_node_i["value"] - source_node_j["value"])
|
| 414 |
+
if value_difference > 0.5:
|
| 415 |
+
contradictions += 1
|
| 416 |
+
incoming_edges[i]["strength"] *= 0.98
|
| 417 |
+
incoming_edges[j]["strength"] *= 0.98
|
| 418 |
+
self.causal_consistency = max(0, self.causal_consistency - contradictions * 0.01)
|
| 419 |
+
|
| 420 |
+
def manage_node_lifecycle(self):
|
| 421 |
+
"""Manage node creation and removal"""
|
| 422 |
+
for node in self.nodes:
|
| 423 |
+
node["age"] += 1
|
| 424 |
+
if len(self.nodes) > self.MAX_NODES:
|
| 425 |
+
node_connections = []
|
| 426 |
+
for node in self.nodes:
|
| 427 |
+
connections = len([e for e in self.edges if e["source"] == node["id"] or e["target"] == node["id"]])
|
| 428 |
+
node_connections.append({"node": node, "connections": connections})
|
| 429 |
+
node_connections.sort(key=lambda x: (x["connections"], -x["node"]["age"]))
|
| 430 |
+
node_to_remove = node_connections[0]["node"]
|
| 431 |
+
self.nodes = [n for n in self.nodes if n["id"] != node_to_remove["id"]]
|
| 432 |
+
self.edges = [e for e in self.edges if e["source"] != node_to_remove["id"] and e["target"] != node_to_remove["id"]]
|
| 433 |
+
|
| 434 |
+
def manage_edge_lifecycle(self):
|
| 435 |
+
"""Manage edge creation and removal"""
|
| 436 |
+
for edge in self.edges:
|
| 437 |
+
edge["age"] += 1
|
| 438 |
+
self.edges = [edge for edge in self.edges if edge["strength"] > self.EDGE_THRESHOLD]
|
| 439 |
+
|
| 440 |
+
def update_metrics(self):
|
| 441 |
+
"""Update system performance metrics"""
|
| 442 |
+
self.prediction_accuracy = self.calculate_prediction_accuracy()
|
| 443 |
+
if self.tick_count % 20 == 0:
|
| 444 |
+
self.causal_consistency = min(1, self.causal_consistency + 0.01)
|
| 445 |
+
|
| 446 |
+
def calculate_prediction_accuracy(self):
|
| 447 |
+
"""Calculate how close predictions are to actual values"""
|
| 448 |
+
if not self.nodes:
|
| 449 |
+
return 0
|
| 450 |
+
total_error = sum(abs(node["value"] - node["prediction"]) for node in self.nodes)
|
| 451 |
+
average_error = total_error / len(self.nodes)
|
| 452 |
+
return max(0, 1 - average_error)
|
| 453 |
+
|
| 454 |
+
def detect_causal_loops(self):
|
| 455 |
+
"""Detect causal loops in the network using DFS"""
|
| 456 |
+
loops = []
|
| 457 |
+
visited = set()
|
| 458 |
+
|
| 459 |
+
def dfs(node_id, path):
|
| 460 |
+
if node_id in path:
|
| 461 |
+
loops.append(path[path.index(node_id):] + [node_id])
|
| 462 |
+
return
|
| 463 |
+
if node_id in visited:
|
| 464 |
+
return
|
| 465 |
+
path.append(node_id)
|
| 466 |
+
visited.add(node_id)
|
| 467 |
+
outgoing_edges = [edge for edge in self.edges if edge["source"] == node_id]
|
| 468 |
+
for edge in outgoing_edges:
|
| 469 |
+
dfs(edge["target"], path[:])
|
| 470 |
+
for node in self.nodes:
|
| 471 |
+
if node["id"] not in visited:
|
| 472 |
+
dfs(node["id"], [])
|
| 473 |
+
return loops
|
| 474 |
+
|
| 475 |
+
def create_new_node(self):
|
| 476 |
+
"""Create a new node connected to existing nodes"""
|
| 477 |
+
node_id = f"node-{len(self.nodes)}"
|
| 478 |
+
x, y = 0, 0
|
| 479 |
+
if self.nodes:
|
| 480 |
+
reference_node = random.choice(self.nodes)
|
| 481 |
+
x = reference_node["x"] + (random.random() - 0.5) * 100
|
| 482 |
+
y = reference_node["y"] + (random.random() - 0.5) * 100
|
| 483 |
+
x = max(50, min(750, x))
|
| 484 |
+
y = max(50, min(450, y))
|
| 485 |
+
else:
|
| 486 |
+
x = random.random() * 700 + 50
|
| 487 |
+
y = random.random() * 400 + 50
|
| 488 |
+
word = random.choice(self.word_corpus)
|
| 489 |
+
if self.timelines:
|
| 490 |
+
random_timeline = random.choice(self.timelines)
|
| 491 |
+
word = random.choice(random_timeline["words"])
|
| 492 |
+
new_node = {
|
| 493 |
+
"id": node_id,
|
| 494 |
+
"x": x,
|
| 495 |
+
"y": y,
|
| 496 |
+
"timeScale": random.randint(0, len(self.time_scales) - 1),
|
| 497 |
+
"value": random.random(),
|
| 498 |
+
"prediction": random.random(),
|
| 499 |
+
"age": 0,
|
| 500 |
+
"radius": 10 + random.random() * 15,
|
| 501 |
+
"word": word,
|
| 502 |
+
"future": self.generate_future_phrase(3)
|
| 503 |
+
}
|
| 504 |
+
self.nodes.append(new_node)
|
| 505 |
+
connection_count = random.randint(1, 3)
|
| 506 |
+
for i in range(connection_count):
|
| 507 |
+
if len(self.nodes) > 1:
|
| 508 |
+
target_index = 0
|
| 509 |
+
while True:
|
| 510 |
+
target_index = random.randint(0, len(self.nodes) - 1)
|
| 511 |
+
if self.nodes[target_index]["id"] != new_node["id"]:
|
| 512 |
+
break
|
| 513 |
+
self.edges.append({
|
| 514 |
+
"id": f"edge-{len(self.edges)}",
|
| 515 |
+
"source": new_node["id"],
|
| 516 |
+
"target": self.nodes[target_index]["id"],
|
| 517 |
+
"strength": random.random() * 0.5 + 0.3,
|
| 518 |
+
"age": 0
|
| 519 |
+
})
|
| 520 |
+
|
| 521 |
+
def create_new_edge(self):
|
| 522 |
+
"""Create a new edge between existing nodes that aren't already connected"""
|
| 523 |
+
if len(self.nodes) < 2:
|
| 524 |
+
return
|
| 525 |
+
source_index = random.randint(0, len(self.nodes) - 1)
|
| 526 |
+
target_index = source_index
|
| 527 |
+
while target_index == source_index:
|
| 528 |
+
target_index = random.randint(0, len(self.nodes) - 1)
|
| 529 |
+
source_id = self.nodes[source_index]["id"]
|
| 530 |
+
target_id = self.nodes[target_index]["id"]
|
| 531 |
+
edge_exists = any(edge["source"] == source_id and edge["target"] == target_id for edge in self.edges)
|
| 532 |
+
if not edge_exists:
|
| 533 |
+
self.edges.append({
|
| 534 |
+
"id": f"edge-{len(self.edges)}",
|
| 535 |
+
"source": source_id,
|
| 536 |
+
"target": target_id,
|
| 537 |
+
"strength": random.random() * 0.5 + 0.3,
|
| 538 |
+
"age": 0
|
| 539 |
+
})
|
| 540 |
+
|
| 541 |
+
def update_timelines(self):
|
| 542 |
+
"""Update existing timelines and occasionally create new ones"""
|
| 543 |
+
for timeline in self.timelines:
|
| 544 |
+
if random.random() < 0.3:
|
| 545 |
+
last_word = timeline["words"][-1]
|
| 546 |
+
next_word = self.get_next_word_from_markov(last_word)
|
| 547 |
+
timeline["words"].append(next_word)
|
| 548 |
+
if len(timeline["words"]) > 10:
|
| 549 |
+
timeline["words"].pop(0)
|
| 550 |
+
related_nodes = [node for node in self.nodes
|
| 551 |
+
if node["word"] in timeline["words"] or
|
| 552 |
+
timeline["words"][-1] in node["future"]]
|
| 553 |
+
if related_nodes:
|
| 554 |
+
avg_prediction_accuracy = sum(abs(node["value"] - node["prediction"]) for node in related_nodes) / len(related_nodes)
|
| 555 |
+
timeline["probability"] = max(0.1, 1 - avg_prediction_accuracy)
|
| 556 |
+
timeline["stability"] = max(0.1, timeline["stability"] * 0.95 + self.causal_consistency * 0.05)
|
| 557 |
+
if random.random() < 0.2 and len(self.timelines) < 8:
|
| 558 |
+
if self.timelines:
|
| 559 |
+
source_timeline = random.choice(self.timelines)
|
| 560 |
+
branch_point = random.randint(0, len(source_timeline["words"]) - 1)
|
| 561 |
+
new_words = source_timeline["words"][:branch_point + 1]
|
| 562 |
+
current_word = new_words[-1]
|
| 563 |
+
additional_words = 2 + random.randint(0, 2)
|
| 564 |
+
for i in range(additional_words):
|
| 565 |
+
current_word = self.get_next_word_from_markov(current_word)
|
| 566 |
+
new_words.append(current_word)
|
| 567 |
+
self.timelines.append({
|
| 568 |
+
"id": f"timeline-{len(self.timelines)}",
|
| 569 |
+
"words": new_words,
|
| 570 |
+
"probability": source_timeline["probability"] * 0.8,
|
| 571 |
+
"timeScale": source_timeline["timeScale"],
|
| 572 |
+
"stability": source_timeline["stability"] * 0.8
|
| 573 |
+
})
|
| 574 |
+
self.timelines = [timeline for timeline in self.timelines
|
| 575 |
+
if timeline["stability"] > 0.2 or random.random() < 0.7]
|
| 576 |
+
|
| 577 |
+
def process_user_message(self, message):
|
| 578 |
+
"""Process user message and generate a response"""
|
| 579 |
+
self.conversation_history.append({
|
| 580 |
+
"role": "user",
|
| 581 |
+
"content": message,
|
| 582 |
+
"timestamp": datetime.now().timestamp()
|
| 583 |
+
})
|
| 584 |
+
self.total_interactions += 1
|
| 585 |
+
extracted_topics = self.extract_topics(message)
|
| 586 |
+
self.update_knowledge_base(extracted_topics, message)
|
| 587 |
+
for _ in range(5):
|
| 588 |
+
self.tick()
|
| 589 |
+
response = self.generate_future_response(message, extracted_topics)
|
| 590 |
+
self.conversation_history.append({
|
| 591 |
+
"role": "future",
|
| 592 |
+
"content": response["text"],
|
| 593 |
+
"timestamp": datetime.now().timestamp(),
|
| 594 |
+
"timeOffset": response["timeOffset"],
|
| 595 |
+
"confidence": response["confidence"]
|
| 596 |
+
})
|
| 597 |
+
self.update_model_growth()
|
| 598 |
+
return response
|
| 599 |
+
|
| 600 |
+
def extract_topics(self, message):
|
| 601 |
+
"""Extract topics from message"""
|
| 602 |
+
words = [w for w in re.sub(r'[^\w\s]', '', message.lower()).split() if len(w) > 2]
|
| 603 |
+
extracted_topics = []
|
| 604 |
+
for word in words:
|
| 605 |
+
if word in self.topic_extractor:
|
| 606 |
+
extracted_topics.append(word)
|
| 607 |
+
self.topic_extractor[word] += 1
|
| 608 |
+
for word in words:
|
| 609 |
+
if word not in self.topic_extractor and len(word) > 3:
|
| 610 |
+
self.topic_extractor[word] = 1
|
| 611 |
+
extracted_topics.append(word)
|
| 612 |
+
for topic in extracted_topics:
|
| 613 |
+
if topic != word:
|
| 614 |
+
if word not in self.topic_relations:
|
| 615 |
+
self.topic_relations[word] = []
|
| 616 |
+
self.topic_relations[word].append(topic)
|
| 617 |
+
if topic not in self.topic_relations:
|
| 618 |
+
self.topic_relations[topic] = []
|
| 619 |
+
self.topic_relations[topic].append(word)
|
| 620 |
+
for word in words:
|
| 621 |
+
if len(word) > 3 and word not in self.word_corpus:
|
| 622 |
+
self.word_corpus.append(word)
|
| 623 |
+
random_existing_words = random.sample(self.word_corpus, min(3, len(self.word_corpus)))
|
| 624 |
+
for existing_word in random_existing_words:
|
| 625 |
+
if existing_word not in self.markov_chain:
|
| 626 |
+
self.markov_chain[existing_word] = {}
|
| 627 |
+
self.markov_chain[existing_word][word] = self.markov_chain[existing_word].get(word, 0) + 1
|
| 628 |
+
if word not in self.markov_chain:
|
| 629 |
+
self.markov_chain[word] = {}
|
| 630 |
+
self.markov_chain[word][existing_word] = self.markov_chain[word].get(existing_word, 0) + 1
|
| 631 |
+
return extracted_topics
|
| 632 |
+
|
| 633 |
+
def update_knowledge_base(self, topics, message):
|
| 634 |
+
"""Update knowledge base with topics and message"""
|
| 635 |
+
for topic in topics:
|
| 636 |
+
if topic not in self.knowledge_base:
|
| 637 |
+
self.knowledge_base[topic] = []
|
| 638 |
+
self.knowledge_base[topic].append(message)
|
| 639 |
+
if len(self.knowledge_base[topic]) > 10:
|
| 640 |
+
self.knowledge_base[topic].pop(0)
|
| 641 |
+
|
| 642 |
+
def generate_future_response(self, message, topics):
|
| 643 |
+
"""Generate a response from the future perspective"""
|
| 644 |
+
time_offset = self.determine_time_offset()
|
| 645 |
+
confidence = 0.3 + (self.self_model_accuracy * 0.5) + (self.model_growth_rate * 0.2)
|
| 646 |
+
relevant_timeline = self.select_relevant_timeline(topics)
|
| 647 |
+
response_text = self.construct_response(message, topics, time_offset, relevant_timeline)
|
| 648 |
+
return {
|
| 649 |
+
"text": response_text,
|
| 650 |
+
"timeOffset": time_offset,
|
| 651 |
+
"confidence": min(0.95, confidence)
|
| 652 |
+
}
|
| 653 |
+
|
| 654 |
+
def determine_time_offset(self):
|
| 655 |
+
"""Determine how far in the future the response comes from"""
|
| 656 |
+
base_years = 1 + int(self.self_model_accuracy * 10)
|
| 657 |
+
random_factor = random.random() * 2 - 1
|
| 658 |
+
years = max(1, round(base_years + random_factor))
|
| 659 |
+
return years
|
| 660 |
+
|
| 661 |
+
def select_relevant_timeline(self, topics):
|
| 662 |
+
"""Select the most relevant timeline based on topics"""
|
| 663 |
+
if not self.timelines:
|
| 664 |
+
return None
|
| 665 |
+
best_match = None
|
| 666 |
+
best_match_score = -1
|
| 667 |
+
for timeline in self.timelines:
|
| 668 |
+
score = 0
|
| 669 |
+
for word in timeline["words"]:
|
| 670 |
+
if word in topics:
|
| 671 |
+
score += 2
|
| 672 |
+
for topic in topics:
|
| 673 |
+
if topic in self.topic_relations and word in self.topic_relations[topic]:
|
| 674 |
+
score += 1
|
| 675 |
+
score *= timeline["probability"]
|
| 676 |
+
if score > best_match_score:
|
| 677 |
+
best_match_score = score
|
| 678 |
+
best_match = timeline
|
| 679 |
+
if best_match_score == 0:
|
| 680 |
+
sorted_timelines = sorted(self.timelines, key=lambda t: t["probability"], reverse=True)
|
| 681 |
+
best_match = sorted_timelines[random.randint(0, min(2, len(sorted_timelines) - 1))]
|
| 682 |
+
return best_match
|
| 683 |
+
|
| 684 |
+
def construct_response(self, message, topics, time_offset, timeline):
|
| 685 |
+
"""Construct response using templates and filling in variables"""
|
| 686 |
+
template_idx = random.randint(0, len(self.response_templates) - 1)
|
| 687 |
+
template = self.response_templates[template_idx]
|
| 688 |
+
if "{{future}}" in template:
|
| 689 |
+
future_scenario = self.generate_future_scenario(topics, timeline)
|
| 690 |
+
template = template.replace("{{future}}", future_scenario)
|
| 691 |
+
if "{{timeYears}}" in template:
|
| 692 |
+
template = template.replace("{{timeYears}}", str(time_offset))
|
| 693 |
+
if "{{insight}}" in template:
|
| 694 |
+
insight = self.generate_insight(topics)
|
| 695 |
+
template = template.replace("{{insight}}", insight)
|
| 696 |
+
if "{{topic}}" in template and topics:
|
| 697 |
+
topic = random.choice(topics)
|
| 698 |
+
template = template.replace("{{topic}}", topic)
|
| 699 |
+
if "{{outcome}}" in template:
|
| 700 |
+
outcome = self.generate_outcome(topics, timeline)
|
| 701 |
+
template = template.replace("{{outcome}}", outcome)
|
| 702 |
+
if "{{impact}}" in template:
|
| 703 |
+
impacts = ["changed", "transformed", "refined", "confirmed", "challenged"]
|
| 704 |
+
impact = random.choice(impacts)
|
| 705 |
+
template = template.replace("{{impact}}", impact)
|
| 706 |
+
if "{{realization}}" in template:
|
| 707 |
+
realization = self.generate_realization(topics)
|
| 708 |
+
template = template.replace("{{realization}}", realization)
|
| 709 |
+
if "{{activity}}" in template and len(message) > 10:
|
| 710 |
+
words = message.split()
|
| 711 |
+
activity_idx = random.randint(0, len(words) - 1)
|
| 712 |
+
end_idx = min(activity_idx + 2, len(words))
|
| 713 |
+
activity = " ".join(words[activity_idx:end_idx])
|
| 714 |
+
template = template.replace("{{activity}}", activity)
|
| 715 |
+
if "{{futureResult}}" in template:
|
| 716 |
+
results = [
|
| 717 |
+
"becomes a core part of your expertise",
|
| 718 |
+
"evolves into something quite different",
|
| 719 |
+
"connects to unexpected opportunities",
|
| 720 |
+
"proves more valuable than you currently realize",
|
| 721 |
+
"becomes a foundation for your future work"
|
| 722 |
+
]
|
| 723 |
+
result = random.choice(results)
|
| 724 |
+
template = template.replace("{{futureResult}}", result)
|
| 725 |
+
if "{{decision}}" in template:
|
| 726 |
+
decisions = ["path", "choice", "approach", "perspective", "commitment", "idea"]
|
| 727 |
+
decision = random.choice(decisions)
|
| 728 |
+
template = template.replace("{{decision}}", decision)
|
| 729 |
+
if "{{consequences}}" in template:
|
| 730 |
+
consequences = self.generate_consequences(topics, timeline)
|
| 731 |
+
template = template.replace("{{consequences}}", consequences)
|
| 732 |
+
if "{{suggestion}}" in template:
|
| 733 |
+
suggestion = self.generate_suggestion(topics)
|
| 734 |
+
template = template.replace("{{suggestion}}", suggestion)
|
| 735 |
+
if "{{subject}}" in template and topics:
|
| 736 |
+
subject = random.choice(topics)
|
| 737 |
+
template = template.replace("{{subject}}", subject)
|
| 738 |
+
if "{{accuracy}}" in template:
|
| 739 |
+
accuracies = [
|
| 740 |
+
"surprisingly accurate", "partially correct",
|
| 741 |
+
"insightful but incomplete",
|
| 742 |
+
"headed in the right direction", "more significant than you realize"
|
| 743 |
+
]
|
| 744 |
+
accuracy = random.choice(accuracies)
|
| 745 |
+
template = template.replace("{{accuracy}}", accuracy)
|
| 746 |
+
if "{{evidence}}" in template:
|
| 747 |
+
evidences = [
|
| 748 |
+
"explored it further", "seen the results", "lived through it",
|
| 749 |
+
"connected those dots", "studied this extensively", "experienced it firsthand"
|
| 750 |
+
]
|
| 751 |
+
evidence = random.choice(evidences)
|
| 752 |
+
template = template.replace("{{evidence}}", evidence)
|
| 753 |
+
template = re.sub(r'\{\{[^}]+\}\}', "interesting", template)
|
| 754 |
+
return template
|
| 755 |
+
|
| 756 |
+
def generate_future_scenario(self, topics, timeline):
|
| 757 |
+
"""Generate a future scenario based on topics and timeline"""
|
| 758 |
+
scenario = ""
|
| 759 |
+
if timeline and timeline["words"]:
|
| 760 |
+
scenario = " leads to ".join(timeline["words"])
|
| 761 |
+
predictions = [p for p in self.predicted_futures if p["timelineId"] == timeline["id"]]
|
| 762 |
+
if predictions:
|
| 763 |
+
prediction = random.choice(predictions)
|
| 764 |
+
scenario += ", ultimately resulting in " + " ".join(prediction["words"])
|
| 765 |
+
else:
|
| 766 |
+
if topics:
|
| 767 |
+
base_topic = random.choice(topics)
|
| 768 |
+
current = base_topic
|
| 769 |
+
scenario_words = [current]
|
| 770 |
+
for i in range(4):
|
| 771 |
+
current = self.get_next_word_from_markov(current)
|
| 772 |
+
scenario_words.append(current)
|
| 773 |
+
scenario = " ".join(scenario_words)
|
| 774 |
+
else:
|
| 775 |
+
scenario = self.generate_future_phrase(5)
|
| 776 |
+
return scenario
|
| 777 |
+
|
| 778 |
+
def generate_insight(self, topics):
|
| 779 |
+
"""Generate an insight based on patterns and topics"""
|
| 780 |
+
pattern_idx = random.randint(0, len(self.insight_patterns) - 1)
|
| 781 |
+
pattern = self.insight_patterns[pattern_idx]
|
| 782 |
+
if "{{attribute}}" in pattern:
|
| 783 |
+
attributes = [
|
| 784 |
+
"resilience", "creativity", "focus", "adaptability",
|
| 785 |
+
"intuition", "strategic thinking", "empathy"
|
| 786 |
+
]
|
| 787 |
+
attribute = random.choice(attributes)
|
| 788 |
+
pattern = pattern.replace("{{attribute}}", attribute)
|
| 789 |
+
if "{{topic}}" in pattern and topics:
|
| 790 |
+
topic = random.choice(topics)
|
| 791 |
+
pattern = pattern.replace("{{topic}}", topic)
|
| 792 |
+
if "{{outcome}}" in pattern:
|
| 793 |
+
outcomes = [
|
| 794 |
+
"a clear sense of purpose", "unexpected confidence",
|
| 795 |
+
"a unique perspective", "valuable expertise",
|
| 796 |
+
"a new direction"
|
| 797 |
+
]
|
| 798 |
+
outcome = random.choice(outcomes)
|
| 799 |
+
pattern = pattern.replace("{{outcome}}", outcome)
|
| 800 |
+
if "{{experience}}" in pattern:
|
| 801 |
+
experiences = [
|
| 802 |
+
"encounter a key mentor", "discover a new passion",
|
| 803 |
+
"solve a challenging problem", "make a brave decision",
|
| 804 |
+
"connect seemingly unrelated ideas"
|
| 805 |
+
]
|
| 806 |
+
experience = random.choice(experiences)
|
| 807 |
+
pattern = pattern.replace("{{experience}}", experience)
|
| 808 |
+
if "{{state}}" in pattern:
|
| 809 |
+
states = [
|
| 810 |
+
"exploration", "uncertainty", "learning", "questioning",
|
| 811 |
+
"experimenting", "reflection"
|
| 812 |
+
]
|
| 813 |
+
state = random.choice(states)
|
| 814 |
+
pattern = pattern.replace("{{state}}", state)
|
| 815 |
+
if "{{domain}}" in pattern:
|
| 816 |
+
domains = [
|
| 817 |
+
"technology", "creativity", "relationships", "career",
|
| 818 |
+
"personal growth", "community", "leadership"
|
| 819 |
+
]
|
| 820 |
+
domain = random.choice(domains)
|
| 821 |
+
pattern = pattern.replace("{{domain}}", domain)
|
| 822 |
+
if "{{activity}}" in pattern and topics:
|
| 823 |
+
topic = random.choice(topics)
|
| 824 |
+
pattern = pattern.replace("{{activity}}", "understanding " + topic)
|
| 825 |
+
if "{{role}}" in pattern:
|
| 826 |
+
roles = [
|
| 827 |
+
"expertise", "perspective", "approach", "contribution",
|
| 828 |
+
"methodology", "philosophy", "framework"
|
| 829 |
+
]
|
| 830 |
+
role = random.choice(roles)
|
| 831 |
+
pattern = pattern.replace("{{role}}", role)
|
| 832 |
+
pattern = re.sub(r'\{\{[^}]+\}\}', "development", pattern)
|
| 833 |
+
return pattern
|
| 834 |
+
|
| 835 |
+
def generate_outcome(self, topics, timeline):
|
| 836 |
+
"""Generate an outcome based on topics and timeline"""
|
| 837 |
+
if timeline and timeline["words"]:
|
| 838 |
+
outcome_base = " ".join(timeline["words"][-2:])
|
| 839 |
+
return outcome_base + " becoming significant to you"
|
| 840 |
+
if topics:
|
| 841 |
+
seed = random.choice(topics)
|
| 842 |
+
current = seed
|
| 843 |
+
outcome_words = [current]
|
| 844 |
+
for i in range(3):
|
| 845 |
+
current = self.get_next_word_from_markov(current)
|
| 846 |
+
outcome_words.append(current)
|
| 847 |
+
return " ".join(outcome_words)
|
| 848 |
+
generic_outcomes = [
|
| 849 |
+
"a deeper understanding of what truly matters to you",
|
| 850 |
+
"an unexpected opportunity that aligns with your values",
|
| 851 |
+
"a more nuanced perspective that serves you well",
|
| 852 |
+
"connecting seemingly unrelated areas of interest",
|
| 853 |
+
"developing a unique approach that becomes your signature"
|
| 854 |
+
]
|
| 855 |
+
return random.choice(generic_outcomes)
|
| 856 |
+
|
| 857 |
+
def generate_realization(self, topics):
|
| 858 |
+
"""Generate a realization insight"""
|
| 859 |
+
realizations = [
|
| 860 |
+
"the obstacles you're facing now are developing crucial skills",
|
| 861 |
+
"your unique combination of interests creates your edge",
|
| 862 |
+
"what seems like a detour is actually a direct path",
|
| 863 |
+
"the questions you're asking matter more than immediate answers",
|
| 864 |
+
"your intuitive approaches often outperform conventional wisdom",
|
| 865 |
+
"consistency in small actions leads to your biggest breakthroughs",
|
| 866 |
+
"your current doubts become the foundation of future confidence"
|
| 867 |
+
]
|
| 868 |
+
realization = random.choice(realizations)
|
| 869 |
+
if topics and random.random() > 0.5:
|
| 870 |
+
topic = random.choice(topics)
|
| 871 |
+
realization += " particularly regarding " + topic
|
| 872 |
+
return realization
|
| 873 |
+
|
| 874 |
+
def generate_consequences(self, topics, timeline):
|
| 875 |
+
"""Generate potential consequences"""
|
| 876 |
+
base_consequence = ""
|
| 877 |
+
if timeline and timeline["words"]:
|
| 878 |
+
base_consequence = "leading to " + " ".join(timeline["words"][-2:])
|
| 879 |
+
elif topics:
|
| 880 |
+
topic = random.choice(topics)
|
| 881 |
+
base_consequence = "connecting to " + topic + " in unexpected ways"
|
| 882 |
+
else:
|
| 883 |
+
consequences = [
|
| 884 |
+
"opening new perspectives",
|
| 885 |
+
"creating valuable connections",
|
| 886 |
+
"developing specialized knowledge",
|
| 887 |
+
"revealing hidden opportunities",
|
| 888 |
+
"challenging key assumptions"
|
| 889 |
+
]
|
| 890 |
+
base_consequence = random.choice(consequences)
|
| 891 |
+
temporal_phrases = [
|
| 892 |
+
"sooner than you expect",
|
| 893 |
+
"with effects that cascade over time",
|
| 894 |
+
"creating a foundation for future decisions",
|
| 895 |
+
"with initial challenges but long-term benefits",
|
| 896 |
+
"in ways that aren't immediately obvious"
|
| 897 |
+
]
|
| 898 |
+
temporal_phrase = random.choice(temporal_phrases)
|
| 899 |
+
return base_consequence + " " + temporal_phrase
|
| 900 |
+
|
| 901 |
+
def generate_suggestion(self, topics):
|
| 902 |
+
"""Generate a suggestion based on topics"""
|
| 903 |
+
suggestions = [
|
| 904 |
+
"connecting seemingly unrelated ideas and experiences",
|
| 905 |
+
"documenting your thought process, not just outcomes",
|
| 906 |
+
"pursuing quality of understanding over quantity of information",
|
| 907 |
+
"allowing more time for reflection between actions",
|
| 908 |
+
"articulating your unique perspective more boldly",
|
| 909 |
+
"recognizing patterns across different domains of interest",
|
| 910 |
+
"exploring the boundaries where different fields intersect"
|
| 911 |
+
]
|
| 912 |
+
suggestion = random.choice(suggestions)
|
| 913 |
+
if topics and random.random() > 0.5:
|
| 914 |
+
topic = random.choice(topics)
|
| 915 |
+
suggestion = topic + " and " + suggestion
|
| 916 |
+
return suggestion
|
| 917 |
+
|
| 918 |
+
def update_model_growth(self):
|
| 919 |
+
"""Update model growth based on interactions"""
|
| 920 |
+
base_growth = 0.005
|
| 921 |
+
topic_diversity = min(1, len(self.topic_extractor) / 100)
|
| 922 |
+
new_growth = base_growth * (1 + topic_diversity)
|
| 923 |
+
self.model_growth_rate = self.model_growth_rate * 0.95 + new_growth * 0.05
|
| 924 |
+
self.self_model_accuracy = min(0.9, self.self_model_accuracy + self.model_growth_rate)
|
| 925 |
+
|
| 926 |
+
def get_model_stats(self):
|
| 927 |
+
"""Get current model statistics"""
|
| 928 |
+
return {
|
| 929 |
+
"knowledgeSize": len(self.knowledge_base),
|
| 930 |
+
"vocabularySize": len(self.word_corpus),
|
| 931 |
+
"selfModelAccuracy": round(self.self_model_accuracy * 100),
|
| 932 |
+
"growthRate": round(self.model_growth_rate * 10000) / 100,
|
| 933 |
+
"totalInteractions": self.total_interactions,
|
| 934 |
+
"contextualAwareness": min(100, round(len(self.topic_relations) / 10)),
|
| 935 |
+
"learningProgress": min(100, round(self.self_model_accuracy * 100))
|
| 936 |
+
}
|
| 937 |
+
|
| 938 |
+
def set_recursion_depth(self, depth):
|
| 939 |
+
self.recursion_depth = depth
|
| 940 |
+
|
| 941 |
+
def set_time_scale(self, scale):
|
| 942 |
+
self.time_scale = scale
|
| 943 |
+
|
| 944 |
+
def set_causal_strength(self, strength):
|
| 945 |
+
self.causal_strength = strength
|
| 946 |
+
|
| 947 |
+
def run_simulation_steps(self, steps=10):
|
| 948 |
+
for _ in range(steps):
|
| 949 |
+
self.tick()
|
| 950 |
+
|
| 951 |
+
def visualize_network(self, figsize=(12, 8)):
|
| 952 |
+
"""Visualize the causal network using networkx"""
|
| 953 |
+
G = nx.DiGraph()
|
| 954 |
+
for node in self.nodes:
|
| 955 |
+
G.add_node(node["id"],
|
| 956 |
+
word=node["word"],
|
| 957 |
+
timeScale=node["timeScale"],
|
| 958 |
+
value=node["value"],
|
| 959 |
+
prediction=node["prediction"])
|
| 960 |
+
for edge in self.edges:
|
| 961 |
+
G.add_edge(edge["source"], edge["target"],
|
| 962 |
+
weight=edge["strength"],
|
| 963 |
+
age=edge["age"])
|
| 964 |
+
plt.figure(figsize=figsize)
|
| 965 |
+
pos = nx.spring_layout(G, seed=42)
|
| 966 |
+
time_scale_colors = ['#42c5f4', '#4286f4', '#4242f5', '#8442f5', '#f542f5']
|
| 967 |
+
node_colors = [time_scale_colors[G.nodes[n]['timeScale']] for n in G.nodes]
|
| 968 |
+
node_sizes = [100 + 500 * G.nodes[n]['prediction'] for n in G.nodes]
|
| 969 |
+
nx.draw_networkx_nodes(G, pos, node_color=node_colors, node_size=node_sizes, alpha=0.8)
|
| 970 |
+
edges = G.edges()
|
| 971 |
+
edge_weights = [G[u][v]['weight'] * 2 for u, v in edges]
|
| 972 |
+
nx.draw_networkx_edges(G, pos, edgelist=edges, width=edge_weights,
|
| 973 |
+
edge_color='#8888ff', alpha=0.6, arrows=True, arrowsize=10)
|
| 974 |
+
nx.draw_networkx_labels(G, pos, labels={n: G.nodes[n]['word'] for n in G.nodes},
|
| 975 |
+
font_size=10, font_color='white')
|
| 976 |
+
plt.title('Temporal Prediction Network')
|
| 977 |
+
plt.axis('off')
|
| 978 |
+
plt.tight_layout()
|
| 979 |
+
return plt
|
| 980 |
+
|
| 981 |
+
# -------------------------------------------------------------------
|
| 982 |
+
# Global System Initialization
|
| 983 |
+
# -------------------------------------------------------------------
|
| 984 |
+
system = TemporalPredictionSystem()
|
| 985 |
+
time.sleep(2)
|
| 986 |
+
system.run_simulation_steps(20)
|
| 987 |
+
print("Future Self Conversation System initialized and pre-trained.")
|
| 988 |
+
|
| 989 |
+
# -------------------------------------------------------------------
|
| 990 |
+
# Gradio Interface Functions
|
| 991 |
+
# -------------------------------------------------------------------
|
| 992 |
+
def chat_fn(user_message, history):
|
| 993 |
+
if history is None:
|
| 994 |
+
history = []
|
| 995 |
+
response = system.process_user_message(user_message)
|
| 996 |
+
bot_message = f"{response['text']} ({response['timeOffset']} years in future | Confidence: {int(response['confidence']*100)}%)"
|
| 997 |
+
history.append((user_message, bot_message))
|
| 998 |
+
system.run_simulation_steps(5)
|
| 999 |
+
return history
|
| 1000 |
+
|
| 1001 |
+
def update_network_visualization():
|
| 1002 |
+
plt_obj = system.visualize_network()
|
| 1003 |
+
fig = plt.gcf()
|
| 1004 |
+
buf = io.BytesIO()
|
| 1005 |
+
fig.savefig(buf, format="png")
|
| 1006 |
+
buf.seek(0)
|
| 1007 |
+
img = Image.open(buf)
|
| 1008 |
+
plt.close(fig)
|
| 1009 |
+
return img
|
| 1010 |
+
|
| 1011 |
+
# -------------------------------------------------------------------
|
| 1012 |
+
# Gradio App Layout
|
| 1013 |
+
# -------------------------------------------------------------------
|
| 1014 |
+
with gr.Blocks(title="Future Self Conversation System") as demo:
|
| 1015 |
+
gr.Markdown("# Future Self Conversation System")
|
| 1016 |
+
gr.Markdown("Converse with a simulation of your future self, powered by a neural temporal prediction model.")
|
| 1017 |
+
with gr.Tabs():
|
| 1018 |
+
with gr.TabItem("Chat"):
|
| 1019 |
+
chatbot = gr.Chatbot(label="Conversation")
|
| 1020 |
+
txt = gr.Textbox(placeholder="Type your message here...", label="Your Message")
|
| 1021 |
+
send_btn = gr.Button("Send")
|
| 1022 |
+
send_btn.click(fn=chat_fn, inputs=[txt, chatbot], outputs=chatbot).then(lambda: "", None, txt)
|
| 1023 |
+
with gr.TabItem("Network Visualization"):
|
| 1024 |
+
viz_btn = gr.Button("Update Network Visualization")
|
| 1025 |
+
image_out = gr.Image(label="Neural Network Visualization")
|
| 1026 |
+
viz_btn.click(fn=update_network_visualization, inputs=[], outputs=image_out)
|
| 1027 |
+
|
| 1028 |
+
# -------------------------------------------------------------------
|
| 1029 |
+
# Launch the App
|
| 1030 |
+
# -------------------------------------------------------------------
|
| 1031 |
+
if __name__ == "__main__":
|
| 1032 |
+
demo.launch()
|