AEUPH commited on
Commit
a293b60
·
verified ·
1 Parent(s): faa03ed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -31
app.py CHANGED
@@ -72,8 +72,9 @@ class TemporalPredictionSystem:
72
  self.model_growth_rate = 0
73
  self.total_interactions = 0
74
 
75
-
76
  self.quote_offset = 0
 
77
  # Response generators
78
  self.response_templates = [
79
  "Based on your current trajectory, I see {{future}}.",
@@ -86,7 +87,6 @@ class TemporalPredictionSystem:
86
  "From my vantage point, I'd suggest focusing more on {{suggestion}}.",
87
  "Your intuition about {{subject}} is {{accuracy}} - I know because I've {{evidence}}."
88
  ]
89
-
90
  self.insight_patterns = [
91
  "this challenge ultimately strengthens your {{attribute}}",
92
  "you develop a unique perspective on {{topic}} that others find valuable",
@@ -106,7 +106,7 @@ class TemporalPredictionSystem:
106
  self.topic_extractor[word] = 1
107
  self.topic_relations[word] = []
108
 
109
- # Fetch quotes data and build markov chain
110
  self.fetch_quotes_data()
111
 
112
  def fetch_quotes_data(self):
@@ -115,47 +115,41 @@ class TemporalPredictionSystem:
115
  url = f"https://datasets-server.huggingface.co/rows?dataset=Abirate%2Fenglish_quotes&config=default&split=train&offset={self.quote_offset}&length=100"
116
  response = requests.get(url)
117
  data = response.json()
118
-
119
  if data and "rows" in data:
120
  for row in data["rows"]:
121
  if row and "row" in row and "quote" in row["row"]:
122
  quote = row["row"]["quote"]
123
  self.quotes_data.append(quote)
124
-
125
  # Process quote into words for corpus
126
  words = [w for w in re.sub(r'[^\w\s]', '', quote.lower()).split() if len(w) > 3]
127
  self.word_corpus = list(set(self.word_corpus + words))
128
-
129
  # Extract tags if available
130
  if "tags" in row["row"]:
131
  tags = row["row"]["tags"] if isinstance(row["row"]["tags"], list) else [row["row"]["tags"]]
132
  self.tags_data.extend(tags)
133
-
134
- # Add tags to corpus
135
  tag_words = [tag.lower() for tag in tags if len(tag) > 3]
136
  self.word_corpus = list(set(self.word_corpus + tag_words))
137
-
138
  print(f"Loaded {len(self.quotes_data)} quotes and {len(self.tags_data)} tags")
139
  print(f"Word corpus expanded to {len(self.word_corpus)} words")
140
-
141
  # Update topic extractor with any new words
142
  for word in self.word_corpus:
143
  if word not in self.topic_extractor:
144
  self.topic_extractor[word] = 1
145
  self.topic_relations[word] = []
146
-
147
- # Rebuild Markov chain to include new quotes and tags
148
  self.build_initial_markov_chain()
149
-
150
- # Increment the offset for next iteration to load additional quotes
151
  self.quote_offset += 100
152
-
153
  except Exception as e:
154
  print(f"Error fetching quotes data: {e}")
155
- # Rebuild the Markov chain with current corpus if fetch fails
156
  self.build_initial_markov_chain()
157
 
158
-
159
  def build_initial_markov_chain(self):
160
  """Build a Markov chain from quotes and corpus data"""
161
  chain = {}
@@ -470,7 +464,6 @@ class TemporalPredictionSystem:
470
  """Detect causal loops in the network using DFS"""
471
  loops = []
472
  visited = set()
473
-
474
  def dfs(node_id, path):
475
  if node_id in path:
476
  loops.append(path[path.index(node_id):] + [node_id])
@@ -997,6 +990,7 @@ class TemporalPredictionSystem:
997
  # Global System Initialization
998
  # -------------------------------------------------------------------
999
  system = TemporalPredictionSystem()
 
1000
  time.sleep(2)
1001
  system.run_simulation_steps(20)
1002
  print("Future Self Conversation System initialized and pre-trained.")
@@ -1023,22 +1017,56 @@ def update_network_visualization():
1023
  plt.close(fig)
1024
  return img
1025
 
 
 
 
 
 
 
 
 
 
1026
  # -------------------------------------------------------------------
1027
- # Gradio App Layout
1028
  # -------------------------------------------------------------------
1029
  with gr.Blocks(title="Future Self Conversation System") as demo:
1030
- gr.Markdown("# Future Self Conversation System")
1031
- gr.Markdown("Converse with a simulation of your future self, powered by a neural temporal prediction model.")
1032
- with gr.Tabs():
1033
- with gr.TabItem("Chat"):
1034
- chatbot = gr.Chatbot(label="Conversation")
1035
- txt = gr.Textbox(placeholder="Type your message here...", label="Your Message")
1036
- send_btn = gr.Button("Send")
1037
- send_btn.click(fn=chat_fn, inputs=[txt, chatbot], outputs=chatbot).then(lambda: "", None, txt)
1038
- with gr.TabItem("Network Visualization"):
1039
- viz_btn = gr.Button("Update Network Visualization")
1040
- image_out = gr.Image(label="Neural Network Visualization")
1041
- viz_btn.click(fn=update_network_visualization, inputs=[], outputs=image_out)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1042
 
1043
  # -------------------------------------------------------------------
1044
  # Launch the App
 
72
  self.model_growth_rate = 0
73
  self.total_interactions = 0
74
 
75
+ # Offset for continuous quote expansion
76
  self.quote_offset = 0
77
+
78
  # Response generators
79
  self.response_templates = [
80
  "Based on your current trajectory, I see {{future}}.",
 
87
  "From my vantage point, I'd suggest focusing more on {{suggestion}}.",
88
  "Your intuition about {{subject}} is {{accuracy}} - I know because I've {{evidence}}."
89
  ]
 
90
  self.insight_patterns = [
91
  "this challenge ultimately strengthens your {{attribute}}",
92
  "you develop a unique perspective on {{topic}} that others find valuable",
 
106
  self.topic_extractor[word] = 1
107
  self.topic_relations[word] = []
108
 
109
+ # Fetch quotes data and build Markov chain
110
  self.fetch_quotes_data()
111
 
112
  def fetch_quotes_data(self):
 
115
  url = f"https://datasets-server.huggingface.co/rows?dataset=Abirate%2Fenglish_quotes&config=default&split=train&offset={self.quote_offset}&length=100"
116
  response = requests.get(url)
117
  data = response.json()
118
+
119
  if data and "rows" in data:
120
  for row in data["rows"]:
121
  if row and "row" in row and "quote" in row["row"]:
122
  quote = row["row"]["quote"]
123
  self.quotes_data.append(quote)
 
124
  # Process quote into words for corpus
125
  words = [w for w in re.sub(r'[^\w\s]', '', quote.lower()).split() if len(w) > 3]
126
  self.word_corpus = list(set(self.word_corpus + words))
 
127
  # Extract tags if available
128
  if "tags" in row["row"]:
129
  tags = row["row"]["tags"] if isinstance(row["row"]["tags"], list) else [row["row"]["tags"]]
130
  self.tags_data.extend(tags)
 
 
131
  tag_words = [tag.lower() for tag in tags if len(tag) > 3]
132
  self.word_corpus = list(set(self.word_corpus + tag_words))
133
+
134
  print(f"Loaded {len(self.quotes_data)} quotes and {len(self.tags_data)} tags")
135
  print(f"Word corpus expanded to {len(self.word_corpus)} words")
136
+
137
  # Update topic extractor with any new words
138
  for word in self.word_corpus:
139
  if word not in self.topic_extractor:
140
  self.topic_extractor[word] = 1
141
  self.topic_relations[word] = []
142
+
143
+ # Rebuild Markov chain with new data
144
  self.build_initial_markov_chain()
145
+
146
+ # Increment offset for the next fetch
147
  self.quote_offset += 100
148
+
149
  except Exception as e:
150
  print(f"Error fetching quotes data: {e}")
 
151
  self.build_initial_markov_chain()
152
 
 
153
  def build_initial_markov_chain(self):
154
  """Build a Markov chain from quotes and corpus data"""
155
  chain = {}
 
464
  """Detect causal loops in the network using DFS"""
465
  loops = []
466
  visited = set()
 
467
  def dfs(node_id, path):
468
  if node_id in path:
469
  loops.append(path[path.index(node_id):] + [node_id])
 
990
  # Global System Initialization
991
  # -------------------------------------------------------------------
992
  system = TemporalPredictionSystem()
993
+ # Run some initial simulation steps to pre-train
994
  time.sleep(2)
995
  system.run_simulation_steps(20)
996
  print("Future Self Conversation System initialized and pre-trained.")
 
1017
  plt.close(fig)
1018
  return img
1019
 
1020
+ def pre_train_fn(progress=gr.Progress()):
1021
+ steps = 20
1022
+ for i in range(steps):
1023
+ system.run_simulation_steps(1)
1024
+ progress((i+1)/steps)
1025
+ yield f"<h3>Pre-training in progress: {(i+1)/steps*100:.0f}% complete</h3>", (i+1)/steps*100, gr.update(visible=False)
1026
+ time.sleep(0.2)
1027
+ yield "<h3>Pre-training complete!</h3>", 100, gr.update(visible=True)
1028
+
1029
  # -------------------------------------------------------------------
1030
+ # Gradio App Layout (with custom CSS for mobile responsiveness)
1031
  # -------------------------------------------------------------------
1032
  with gr.Blocks(title="Future Self Conversation System") as demo:
1033
+
1034
+ # Custom CSS for chat height and mobile responsiveness
1035
+ gr.HTML("""
1036
+ <style>
1037
+ /* Lower chat area height */
1038
+ #chatbot { height: 300px !important; }
1039
+ /* Mobile responsive adjustments */
1040
+ @media (max-width: 600px) {
1041
+ #chatbot { height: 200px !important; }
1042
+ }
1043
+ /* Ensure split screen columns stack on mobile */
1044
+ .gradio-container .row { flex-wrap: wrap; }
1045
+ .gradio-container .column { flex: 1 1 300px; }
1046
+ </style>
1047
+ """)
1048
+
1049
+ # Pre-training progress area (shown on page load)
1050
+ pretrain_status = gr.HTML("<h3>Initializing pre-training...</h3>")
1051
+ progress_bar = gr.Slider(minimum=0, maximum=100, value=0, interactive=False, label="Pre-training Progress")
1052
+
1053
+ # Main interface (hidden until pre-training completes)
1054
+ main_interface = gr.Column(visible=False)
1055
+
1056
+ with main_interface:
1057
+ with gr.Row():
1058
+ with gr.Column():
1059
+ chatbot = gr.Chatbot(label="Conversation", elem_id="chatbot")
1060
+ txt = gr.Textbox(placeholder="Type your message here...", label="Your Message")
1061
+ send_btn = gr.Button("Send")
1062
+ with gr.Column():
1063
+ network_image = gr.Image(label="Neural Network Visualization")
1064
+ # Auto-update network visualization every 5 seconds.
1065
+ interval = gr.Interval(5, fn=update_network_visualization, outputs=network_image)
1066
+ send_btn.click(fn=chat_fn, inputs=[txt, chatbot], outputs=chatbot).then(lambda: "", None, txt)
1067
+
1068
+ # On page load, run pre-training and then reveal the main interface.
1069
+ demo.load(fn=pre_train_fn, outputs=[pretrain_status, progress_bar, main_interface])
1070
 
1071
  # -------------------------------------------------------------------
1072
  # Launch the App