mset commited on
Commit
a543287
verified
1 Parent(s): 25fe19e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +202 -42
app.py CHANGED
@@ -94,39 +94,105 @@ class VectorizedGeopoliticalAI:
94
  self.entity_embeddings[entity] = vector / (np.linalg.norm(vector) + 1e-8)
95
 
96
  def text_to_vector(self, text):
97
- """Converte testo in rappresentazione vettoriale"""
98
- # Tokenizzazione e pesatura TF-IDF semplificata
99
- words = re.findall(r'\b\w+\b', text.lower())
 
100
 
101
- # Crea vettore composito
102
  composite_vector = np.zeros(self.vector_dimensions)
103
- word_count = 0
104
 
105
- # Mappa semantica delle parole chiave
106
  semantic_mapping = {
107
- 'war': 'conflict', 'peace': 'diplomacy', 'trade': 'economy',
108
- 'military': 'military', 'sanctions': 'economy', 'alliance': 'alliance',
109
- 'nuclear': 'military', 'energy': 'resource', 'oil': 'resource',
110
- 'crisis': 'conflict', 'negotiation': 'diplomacy', 'summit': 'diplomacy'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  }
112
 
 
 
 
 
113
  for word in words:
114
- if word in semantic_mapping and semantic_mapping[word] in self.semantic_space:
115
  concept = semantic_mapping[word]
116
- composite_vector += self.semantic_space[concept]
117
- word_count += 1
 
118
  elif word in self.semantic_space:
119
- composite_vector += self.semantic_space[word]
120
- word_count += 1
121
-
122
- # Normalizzazione con peso logaritmico
123
- if word_count > 0:
124
- composite_vector /= math.log(word_count + 1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
 
126
- return composite_vector / (np.linalg.norm(composite_vector) + 1e-8)
127
 
128
  def compute_entity_relations(self, entities):
129
- """Calcola relazioni tra entit脿 nello spazio vettoriale"""
130
  relations = {}
131
 
132
  for i, entity1 in enumerate(entities):
@@ -140,24 +206,62 @@ class VectorizedGeopoliticalAI:
140
  vec1 = self.entity_embeddings[entity1]
141
  vec2 = self.entity_embeddings[entity2]
142
 
143
- # Similarit脿 coseno
144
- cosine_sim = np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))
 
 
145
 
146
  # Distanza euclidea normalizzata
147
  euclidean_dist = np.linalg.norm(vec1 - vec2) / math.sqrt(self.vector_dimensions)
148
 
149
- # Proiezione su spazio delle alleanze
150
- alliance_projection = np.dot(vec1 + vec2, self.semantic_space['alliance'])
 
 
 
 
 
 
 
 
 
151
 
152
- # Proiezione su spazio dei conflitti
153
- conflict_projection = np.dot(vec1 - vec2, self.semantic_space['conflict'])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154
 
155
  relations[(entity1, entity2)] = {
156
  'similarity': cosine_sim,
157
  'distance': euclidean_dist,
158
  'alliance_potential': alliance_projection,
159
- 'conflict_potential': abs(conflict_projection),
160
- 'power_differential': np.linalg.norm(vec1) - np.linalg.norm(vec2)
161
  }
162
 
163
  return relations
@@ -462,28 +566,84 @@ class VectorizedGeopoliticalAI:
462
  return "\n".join(report_parts)
463
 
464
  def calculate_vector_risk_metrics(self, vector_analysis, relations):
465
- """Calcola metriche di rischio basate su analisi vettoriale"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
466
 
467
- # Risk magnitude basato su proiezioni semantiche
468
- conflict_indicators = ['conflict', 'military', 'power']
469
- risk_magnitude = sum(abs(vector_analysis['semantic_projections'].get(indicator, 0))
470
- for indicator in conflict_indicators)
 
 
 
471
 
472
  # Entropy del sistema basata su trasformazioni contestuali
473
- entropies = [transform['entropy'] for transform in vector_analysis['contextual_transforms'].values()]
474
- system_entropy = np.mean(entropies) if entropies else 0
 
 
 
 
 
 
 
 
 
 
 
 
 
475
 
476
- # Stability index basato su varianza delle relazioni
477
  if relations:
 
478
  similarities = [rel['similarity'] for rel in relations.values()]
479
- stability = 1.0 - np.var(similarities) if similarities else 1.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
480
  else:
481
- stability = 0.5
 
 
 
 
 
 
 
482
 
483
  return {
484
- 'magnitude': min(risk_magnitude, 1.0),
485
- 'entropy': system_entropy,
486
- 'stability': stability
487
  }
488
 
489
  # Inizializza il sistema AI vettoriale
 
94
  self.entity_embeddings[entity] = vector / (np.linalg.norm(vector) + 1e-8)
95
 
96
  def text_to_vector(self, text):
97
+ """Converte testo in rappresentazione vettoriale robusta"""
98
+ if not text or not text.strip():
99
+ # Vettore casuale per input vuoti
100
+ return np.random.normal(0, 0.1, self.vector_dimensions)
101
 
102
+ words = re.findall(r'\b\w+\b', text.lower())
103
  composite_vector = np.zeros(self.vector_dimensions)
 
104
 
105
+ # Mappa semantica estesa con pattern matching robusto
106
  semantic_mapping = {
107
+ # Conflitti e tensioni
108
+ 'war': 'conflict', 'guerra': 'conflict', 'fight': 'conflict',
109
+ 'conflict': 'conflict', 'crisis': 'conflict', 'tension': 'conflict',
110
+ 'attack': 'military', 'invasion': 'military', 'strike': 'military',
111
+
112
+ # Diplomazia e pace
113
+ 'peace': 'diplomacy', 'pace': 'diplomacy', 'negotiation': 'diplomacy',
114
+ 'summit': 'diplomacy', 'agreement': 'diplomacy', 'treaty': 'diplomacy',
115
+ 'diplomatic': 'diplomacy', 'dialogue': 'diplomacy',
116
+
117
+ # Economia e commercio
118
+ 'trade': 'economy', 'economic': 'economy', 'economy': 'economy',
119
+ 'sanctions': 'economy', 'embargo': 'economy', 'tariff': 'economy',
120
+ 'market': 'economy', 'investment': 'economy', 'gdp': 'economy',
121
+
122
+ # Militare e sicurezza
123
+ 'military': 'military', 'defense': 'military', 'security': 'military',
124
+ 'nuclear': 'military', 'missile': 'military', 'weapon': 'military',
125
+ 'army': 'military', 'navy': 'military', 'airforce': 'military',
126
+
127
+ # Alleanze e organizzazioni
128
+ 'alliance': 'alliance', 'nato': 'alliance', 'partnership': 'alliance',
129
+ 'coalition': 'alliance', 'bloc': 'alliance', 'union': 'alliance',
130
+
131
+ # Risorse e territorio
132
+ 'oil': 'resource', 'gas': 'resource', 'energy': 'resource',
133
+ 'water': 'resource', 'mineral': 'resource', 'pipeline': 'resource',
134
+ 'territory': 'territory', 'border': 'territory', 'region': 'territory',
135
+
136
+ # Potere e influenza
137
+ 'power': 'power', 'influence': 'influence', 'control': 'power',
138
+ 'domination': 'power', 'hegemony': 'power', 'superpower': 'power',
139
+
140
+ # Paesi specifici (mapping diretto)
141
+ 'ukraine': 'conflict', 'russia': 'power', 'china': 'power',
142
+ 'usa': 'power', 'america': 'power', 'taiwan': 'conflict',
143
+ 'iran': 'conflict', 'israel': 'conflict', 'gaza': 'conflict'
144
  }
145
 
146
+ matched_words = 0
147
+ semantic_weights = defaultdict(float)
148
+
149
+ # Prima passata: mapping diretto
150
  for word in words:
151
+ if word in semantic_mapping:
152
  concept = semantic_mapping[word]
153
+ if concept in self.semantic_space:
154
+ semantic_weights[concept] += 1.0
155
+ matched_words += 1
156
  elif word in self.semantic_space:
157
+ semantic_weights[word] += 1.0
158
+ matched_words += 1
159
+
160
+ # Seconda passata: pattern parziali
161
+ if matched_words == 0:
162
+ for word in words:
163
+ for pattern, concept in semantic_mapping.items():
164
+ if pattern in word or word in pattern:
165
+ if concept in self.semantic_space:
166
+ semantic_weights[concept] += 0.5
167
+ matched_words += 0.5
168
+
169
+ # Costruzione vettore con pesi
170
+ for concept, weight in semantic_weights.items():
171
+ if concept in self.semantic_space:
172
+ composite_vector += weight * self.semantic_space[concept]
173
+
174
+ # Aggiungi componente casuale se nessun match
175
+ if matched_words == 0:
176
+ # Crea vettore basato su hash del testo per consistenza
177
+ text_hash = int(hashlib.md5(text.encode()).hexdigest(), 16)
178
+ np.random.seed(text_hash % 2**31)
179
+ composite_vector = np.random.normal(0, 0.3, self.vector_dimensions)
180
+ matched_words = 1
181
+
182
+ # Normalizzazione adattiva
183
+ if matched_words > 0:
184
+ composite_vector *= math.log(matched_words + 1) / (matched_words + 0.1)
185
+
186
+ # Assicura che il vettore non sia zero
187
+ norm = np.linalg.norm(composite_vector)
188
+ if norm < 1e-6:
189
+ composite_vector = np.random.normal(0, 0.1, self.vector_dimensions)
190
+ norm = np.linalg.norm(composite_vector)
191
 
192
+ return composite_vector / norm
193
 
194
  def compute_entity_relations(self, entities):
195
+ """Calcola relazioni tra entit脿 nello spazio vettoriale con valori realistici"""
196
  relations = {}
197
 
198
  for i, entity1 in enumerate(entities):
 
206
  vec1 = self.entity_embeddings[entity1]
207
  vec2 = self.entity_embeddings[entity2]
208
 
209
+ # Similarit脿 coseno con correzione numerica
210
+ dot_product = np.dot(vec1, vec2)
211
+ norm_product = np.linalg.norm(vec1) * np.linalg.norm(vec2)
212
+ cosine_sim = dot_product / (norm_product + 1e-8)
213
 
214
  # Distanza euclidea normalizzata
215
  euclidean_dist = np.linalg.norm(vec1 - vec2) / math.sqrt(self.vector_dimensions)
216
 
217
+ # Proiezioni su sottospazi semantici
218
+ alliance_vec = self.semantic_space['alliance']
219
+ conflict_vec = self.semantic_space['conflict']
220
+
221
+ # Proiezione alliance: (v1 + v2) 路 alliance_basis
222
+ alliance_sum = (vec1 + vec2) / 2
223
+ alliance_projection = np.dot(alliance_sum, alliance_vec)
224
+
225
+ # Proiezione conflict: |v1 - v2| 路 conflict_basis
226
+ conflict_diff = vec1 - vec2
227
+ conflict_projection = abs(np.dot(conflict_diff, conflict_vec))
228
 
229
+ # Power differential basato su norma dei vettori
230
+ power_diff = np.linalg.norm(vec1) - np.linalg.norm(vec2)
231
+
232
+ # Relazioni storiche note (override per realismo)
233
+ historical_adjustments = {
234
+ ('Russia', 'Ukraine'): {'conflict_boost': 0.7, 'alliance_penalty': -0.8},
235
+ ('USA', 'Russia'): {'conflict_boost': 0.4, 'alliance_penalty': -0.6},
236
+ ('USA', 'China'): {'conflict_boost': 0.3, 'alliance_penalty': -0.5},
237
+ ('Israel', 'Iran'): {'conflict_boost': 0.8, 'alliance_penalty': -0.9},
238
+ ('China', 'Taiwan'): {'conflict_boost': 0.9, 'alliance_penalty': -0.9},
239
+ ('NATO', 'Russia'): {'conflict_boost': 0.6, 'alliance_penalty': -0.7}
240
+ }
241
+
242
+ # Applica aggiustamenti storici
243
+ key = (entity1, entity2)
244
+ reverse_key = (entity2, entity1)
245
+
246
+ if key in historical_adjustments:
247
+ adj = historical_adjustments[key]
248
+ conflict_projection += adj['conflict_boost']
249
+ alliance_projection += adj['alliance_penalty']
250
+ elif reverse_key in historical_adjustments:
251
+ adj = historical_adjustments[reverse_key]
252
+ conflict_projection += adj['conflict_boost']
253
+ alliance_projection += adj['alliance_penalty']
254
+
255
+ # Clamp valori in range realistico
256
+ alliance_projection = max(-1.0, min(1.0, alliance_projection))
257
+ conflict_projection = max(0.0, min(1.0, conflict_projection))
258
 
259
  relations[(entity1, entity2)] = {
260
  'similarity': cosine_sim,
261
  'distance': euclidean_dist,
262
  'alliance_potential': alliance_projection,
263
+ 'conflict_potential': conflict_projection,
264
+ 'power_differential': power_diff
265
  }
266
 
267
  return relations
 
566
  return "\n".join(report_parts)
567
 
568
  def calculate_vector_risk_metrics(self, vector_analysis, relations):
569
+ """Calcola metriche di rischio basate su analisi vettoriale con valori realistici"""
570
+
571
+ # Risk magnitude basato su proiezioni semantiche con pesi
572
+ conflict_indicators = {
573
+ 'conflict': 3.0, # Peso alto per conflitto diretto
574
+ 'military': 2.5, # Peso medio-alto per aspetti militari
575
+ 'power': 1.5 # Peso medio per dinamiche di potere
576
+ }
577
+
578
+ risk_magnitude = 0.0
579
+ for indicator, weight in conflict_indicators.items():
580
+ projection = abs(vector_analysis['semantic_projections'].get(indicator, 0))
581
+ risk_magnitude += projection * weight
582
+
583
+ # Normalizza tra 0 e 1
584
+ risk_magnitude = min(risk_magnitude / 5.0, 1.0)
585
 
586
+ # Aggiungi boost se ci sono conflitti noti nelle relazioni
587
+ if relations:
588
+ max_conflict_potential = max(
589
+ (rel.get('conflict_potential', 0) for rel in relations.values()),
590
+ default=0
591
+ )
592
+ risk_magnitude = max(risk_magnitude, max_conflict_potential * 0.7)
593
 
594
  # Entropy del sistema basata su trasformazioni contestuali
595
+ entropies = []
596
+ for context_name, transform in vector_analysis['contextual_transforms'].items():
597
+ # Calcola entropy dalla distribuzione del vettore trasformato
598
+ vector = transform['vector']
599
+ # Soft-max per creare distribuzione di probabilit脿
600
+ exp_vector = np.exp(vector - np.max(vector))
601
+ prob_dist = exp_vector / np.sum(exp_vector)
602
+ entropy = -np.sum(prob_dist * np.log(prob_dist + 1e-8))
603
+ entropies.append(entropy)
604
+
605
+ # Entropy media normalizzata
606
+ system_entropy = np.mean(entropies) / math.log(self.vector_dimensions) if entropies else 0.3
607
+
608
+ # Stability index basato su varianza delle relazioni + fattori aggiuntivi
609
+ stability_factors = []
610
 
 
611
  if relations:
612
+ # Varianza delle similarit脿
613
  similarities = [rel['similarity'] for rel in relations.values()]
614
+ if similarities:
615
+ similarity_variance = np.var(similarities)
616
+ stability_factors.append(1.0 - similarity_variance)
617
+
618
+ # Asimmetria di potere
619
+ power_diffs = [abs(rel['power_differential']) for rel in relations.values()]
620
+ if power_diffs:
621
+ power_asymmetry = np.mean(power_diffs)
622
+ stability_factors.append(1.0 - min(power_asymmetry, 1.0))
623
+
624
+ # Stability semantica basata su coherenza delle proiezioni
625
+ semantic_projections = list(vector_analysis['semantic_projections'].values())
626
+ if semantic_projections:
627
+ semantic_coherence = 1.0 - (np.var(semantic_projections) / (np.mean(np.abs(semantic_projections)) + 1e-8))
628
+ stability_factors.append(max(0.0, min(1.0, semantic_coherence)))
629
+
630
+ # Stability index finale
631
+ if stability_factors:
632
+ stability = np.mean(stability_factors)
633
  else:
634
+ stability = 0.5 # Default per situazioni neutre
635
+
636
+ # Aggiustamenti basati su context
637
+ # Se ci sono molte trasformazioni attive, riduce stabilit脿
638
+ active_transforms = sum(1 for t in vector_analysis['contextual_transforms'].values()
639
+ if t['norm'] > 0.1)
640
+ if active_transforms > 3:
641
+ stability *= 0.85
642
 
643
  return {
644
+ 'magnitude': max(0.0, min(1.0, risk_magnitude)),
645
+ 'entropy': max(0.0, min(1.0, system_entropy)),
646
+ 'stability': max(0.0, min(1.0, stability))
647
  }
648
 
649
  # Inizializza il sistema AI vettoriale