antonypamo commited on
Commit
fd5bcd3
·
verified ·
1 Parent(s): e85dd70

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +302 -15
main.py CHANGED
@@ -1,24 +1,282 @@
1
- from sentence_transformers import SentenceTransformer
2
- from huggingface_hub import hf_hub_download
3
- import joblib, os
4
- from fastapi import FastAPI
5
- from pydantic import BaseModel
6
  from typing import Optional, Dict, Any
 
7
  import numpy as np
8
- # + resto de imports (scipy, etc.)
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  ENCODER_MODEL_ID = "antonypamo/RRFSAVANTMADE"
11
  META_LOGIT_REPO = "antonypamo/RRFSavantMetaLogit"
12
  META_LOGIT_FILENAME = "logreg_rrf_savant.joblib"
13
 
14
- encoder = SentenceTransformer(ENCODER_MODEL_ID)
 
 
 
 
 
 
15
 
16
- meta_logit_path = hf_hub_download(
17
- repo_id=META_LOGIT_REPO,
18
- filename=META_LOGIT_FILENAME,
19
- token=os.environ.get("HF_TOKEN")
 
 
20
  )
21
- meta_logit = joblib.load(meta_logit_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
  class EvaluateRequest(BaseModel):
24
  prompt: str
@@ -36,14 +294,43 @@ app = FastAPI(
36
  version="1.0.0",
37
  )
38
 
 
 
 
 
 
 
 
 
39
  @app.post("/evaluate", response_model=EvaluateResponse)
40
  def evaluate(req: EvaluateRequest):
41
  scores, feats = compute_scores_srff_crff_ephi(req.prompt, req.answer)
42
- # opcional: sim_summary con entropía/energía/chirality
43
- sim_summary = {...}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  return EvaluateResponse(
45
  scores=scores,
46
  features=feats,
47
  sim_summary=sim_summary,
48
  )
49
-
 
1
+ import os
2
+ import sys
 
 
 
3
  from typing import Optional, Dict, Any
4
+
5
  import numpy as np
6
+ from numpy.linalg import norm
7
+ from scipy.linalg import expm
8
+
9
+ from fastapi import FastAPI
10
+ from pydantic import BaseModel, Field
11
+
12
+ from sentence_transformers import SentenceTransformer
13
+ from huggingface_hub import hf_hub_download
14
+ import joblib
15
+
16
+ # ============================
17
+ # Configuración de modelos
18
+ # ============================
19
 
20
  ENCODER_MODEL_ID = "antonypamo/RRFSAVANTMADE"
21
  META_LOGIT_REPO = "antonypamo/RRFSavantMetaLogit"
22
  META_LOGIT_FILENAME = "logreg_rrf_savant.joblib"
23
 
24
+ print("🔄 [Startup] Cargando encoder RRFSAVANTMADE...", flush=True)
25
+ try:
26
+ encoder = SentenceTransformer(ENCODER_MODEL_ID)
27
+ print("✅ [Startup] Encoder cargado.", flush=True)
28
+ except Exception as e:
29
+ print(f"❌ [Startup] Error al cargar encoder: {e}", file=sys.stderr, flush=True)
30
+ raise
31
 
32
+ print("🔄 [Startup] Descargando meta-logit desde HF Hub...", flush=True)
33
+ try:
34
+ meta_logit_path = hf_hub_download(
35
+ repo_id="antonypamo/RRFSavantMetaLogit",
36
+ filename="logreg_rrf_savant.joblib",
37
+ token=os.environ.get("HF_TOKEN"), # si el repo es público, puede ser None
38
  )
39
+
40
+ print("🔄 [Startup] Cargando modelo meta-logit v...", flush=True)
41
+ meta_logit = joblib.load(meta_logit_path)
42
+ print("✅ [Startup] Meta-logit cargado.", flush=True)
43
+ except Exception as e:
44
+ print(f"❌ [Startup] Error al cargar meta-logit: {e}", file=sys.stderr, flush=True)
45
+ raise
46
+
47
+ # ============================
48
+ # Geometría icosaédrica Φ12.0
49
+ # ============================
50
+
51
+ phi = (1 + np.sqrt(5)) / 2
52
+ nodes = np.array([
53
+ [0, 1, phi], [0, -1, phi], [0, 1, -phi], [0, -1, -phi],
54
+ [1, phi, 0], [-1, phi, 0], [1, -phi, 0], [-1, -phi, 0],
55
+ [phi, 0, 1], [phi, 0, -1], [-phi, 0, 1], [-phi, 0, -1]
56
+ ], dtype=float)
57
+ nodes /= norm(nodes, axis=1, keepdims=True)
58
+ N = nodes.shape[0] # 12 nodos
59
+
60
+ sigma_x = np.array([[0, 1], [1, 0]], dtype=complex)
61
+ sigma_y = np.array([[0, -1j], [1j, 0]], dtype=complex)
62
+ sigma_z = np.array([[1, 0], [0, -1]], dtype=complex)
63
+
64
+
65
+ def kron_IN(M, N_sites):
66
+ return np.kron(M, np.eye(N_sites, dtype=complex))
67
+
68
+
69
+ def site_op(block_2x2, i, j, N_sites):
70
+ K = np.zeros((N_sites, N_sites), dtype=complex)
71
+ K[i, j] = 1.0
72
+ return np.kron(K, block_2x2)
73
+
74
+
75
+ def geodesic_kernel(nodes, sigma=0.618, alpha_log=0.10):
76
+ diff = nodes[:, None, :] - nodes[None, :, :]
77
+ dist = norm(diff, axis=-1)
78
+
79
+ W = np.exp(-(dist ** 2) / (sigma ** 2))
80
+ np.fill_diagonal(W, 0.0)
81
+
82
+ if alpha_log > 0.0:
83
+ corr = 1.0 + alpha_log * np.log1p(dist ** 2)
84
+ corr[range(N), range(N)] = 1.0
85
+ W = W / corr
86
+
87
+ row_sums = W.sum(axis=1, keepdims=True)
88
+ row_sums[row_sums == 0] = 1.0
89
+ return W / row_sums
90
+
91
+
92
+ def u1_edge_phases(nodes, flux_vector=(0.0, 0.0, 0.0), q=1.0, gauge_scale=1.0):
93
+ A = gauge_scale * np.asarray(flux_vector, dtype=float)
94
+ midpoints = (nodes[:, None, :] + nodes[None, :, :]) / 2.0
95
+ theta = (midpoints @ A).astype(float)
96
+ theta = 0.5 * (theta - theta.T)
97
+ return theta * q
98
+
99
+
100
+ def build_dirac_hamiltonian(
101
+ m=0.25,
102
+ v=1.0,
103
+ sigma=0.618,
104
+ alpha_log=0.10,
105
+ q=1.0,
106
+ flux_vector=(0.0, 0.0, 0.0),
107
+ gauge_scale=0.0,
108
+ ):
109
+ W = geodesic_kernel(nodes, sigma=sigma, alpha_log=alpha_log)
110
+
111
+ if gauge_scale != 0.0 and any(flux_vector):
112
+ theta = u1_edge_phases(nodes, flux_vector=flux_vector,
113
+ q=q, gauge_scale=gauge_scale)
114
+ U = np.exp(1j * theta)
115
+ else:
116
+ U = np.ones((N, N), dtype=complex)
117
+
118
+ H = np.kron(np.eye(N, dtype=complex), m * sigma_z)
119
+
120
+ diff = nodes[:, None, :] - nodes[None, :, :]
121
+ dist = norm(diff, axis=-1) + 1e-12
122
+ d_hat = diff / dist[..., None]
123
+
124
+ for i in range(N):
125
+ for j in range(N):
126
+ if i == j or W[i, j] == 0:
127
+ continue
128
+ nvec = d_hat[i, j]
129
+ S = (nvec[0] * sigma_x +
130
+ nvec[1] * sigma_y +
131
+ nvec[2] * sigma_z)
132
+ H += v * W[i, j] * U[i, j] * site_op(S, i, j, N)
133
+
134
+ H = 0.5 * (H + H.conj().T)
135
+ return H
136
+
137
+
138
+ def site_probs(psi):
139
+ N2 = psi.shape[0]
140
+ n = N2 // 2
141
+ psi_mat = psi.reshape(n, 2)
142
+ return np.sum(np.abs(psi_mat) ** 2, axis=1).real
143
+
144
+
145
+ def chirality(psi):
146
+ S = kron_IN(sigma_z, N)
147
+ return float(np.vdot(psi, S @ psi).real)
148
+
149
+
150
+ def energy_expectation(psi, H):
151
+ return float(np.vdot(psi, H @ psi).real)
152
+
153
+
154
+ def spatial_entropy(p):
155
+ p = np.clip(p, 1e-12, 1.0)
156
+ return float(-np.sum(p * np.log(p)).real)
157
+
158
+
159
+ def evolve_dirac_shell(psi0, H, dt=0.05, steps=100, record_every=25):
160
+ U = expm(-1j * dt * H)
161
+ psi = psi0.copy()
162
+
163
+ probs_hist = []
164
+ energy_hist = []
165
+ chir_hist = []
166
+ ent_hist = []
167
+
168
+ for t in range(steps + 1):
169
+ if t % record_every == 0:
170
+ p = site_probs(psi)
171
+ probs_hist.append(p)
172
+ energy_hist.append(energy_expectation(psi, H))
173
+ chir_hist.append(chirality(psi))
174
+ ent_hist.append(spatial_entropy(p))
175
+
176
+ psi = U @ psi
177
+ psi /= np.sqrt(np.vdot(psi, psi))
178
+
179
+ return {
180
+ "probs": np.array(probs_hist, dtype=float),
181
+ "energy": np.array(energy_hist, dtype=float),
182
+ "chirality": np.array(chir_hist, dtype=float),
183
+ "entropy": np.array(ent_hist, dtype=float),
184
+ "dt": dt,
185
+ "record_every": record_every,
186
+ }
187
+
188
+ # ============================
189
+ # Core RRF: embeddings + features + scores
190
+ # ============================
191
+
192
+ def get_embedding(text: str) -> np.ndarray:
193
+ emb = encoder.encode([text], convert_to_numpy=True, normalize_embeddings=True)
194
+ return emb[0]
195
+
196
+
197
+ def compute_rrf_features(prompt: str, answer: str) -> Dict[str, float]:
198
+ e_p = get_embedding(prompt)
199
+ e_a = get_embedding(answer)
200
+
201
+ cosine_pa = float(np.dot(e_p, e_a))
202
+ len_ratio = len(answer) / (len(prompt) + 1.0)
203
+
204
+ rng = np.random.default_rng(abs(hash(prompt + answer)) % (2 ** 32))
205
+ vec = rng.normal(0, 1, (2 * N,)) + 1j * rng.normal(0, 1, (2 * N,))
206
+ vec /= np.sqrt(np.vdot(vec, vec))
207
+ psi0 = vec
208
+
209
+ H = build_dirac_hamiltonian(
210
+ m=0.25, v=1.0, sigma=0.618,
211
+ alpha_log=0.10, q=1.0,
212
+ flux_vector=(0.0, 0.0, 0.0),
213
+ gauge_scale=0.0,
214
+ )
215
+
216
+ out = evolve_dirac_shell(psi0, H, dt=0.05, steps=100, record_every=25)
217
+
218
+ entropy = out["entropy"]
219
+ energy = out["energy"]
220
+ chir = out["chirality"]
221
+
222
+ S_final = float(entropy[-1])
223
+ S_initial = float(entropy[0])
224
+ S_delta = S_final - S_initial
225
+ C_final = float(chir[-1])
226
+ E_mean = float(np.mean(energy))
227
+ E_std = float(np.std(energy))
228
+
229
+ return {
230
+ "cosine_pa": cosine_pa,
231
+ "len_ratio": len_ratio,
232
+ "dirac_entropy_final": S_final,
233
+ "dirac_entropy_delta": S_delta,
234
+ "dirac_chirality_final": C_final,
235
+ "dirac_energy_mean": E_mean,
236
+ "dirac_energy_std": E_std,
237
+ }
238
+
239
+
240
+ def features_to_vector(feats: Dict[str, float]) -> np.ndarray:
241
+ keys = [
242
+ "cosine_pa",
243
+ "len_ratio",
244
+ "dirac_entropy_final",
245
+ "dirac_entropy_delta",
246
+ "dirac_chirality_final",
247
+ "dirac_energy_mean",
248
+ "dirac_energy_std",
249
+ ]
250
+ return np.array([feats[k] for k in keys], dtype=float)
251
+
252
+
253
+ def compute_scores_srff_crff_ephi(prompt: str, answer: str):
254
+ feats = compute_rrf_features(prompt, answer)
255
+ x = features_to_vector(feats).reshape(1, -1)
256
+
257
+ proba = meta_logit.predict_proba(x)[0]
258
+ p_good = float(proba[1])
259
+
260
+ SRRF = p_good
261
+ CRRF = p_good * feats["cosine_pa"]
262
+
263
+ S_final = feats["dirac_entropy_final"]
264
+ S_max = np.log(N)
265
+ norm_entropy = float(S_final / S_max)
266
+
267
+ E_phi = 0.5 * (SRRF + norm_entropy)
268
+
269
+ scores = {
270
+ "SRRF": SRRF,
271
+ "CRRF": CRRF,
272
+ "E_phi": E_phi,
273
+ "p_good": p_good,
274
+ }
275
+ return scores, feats
276
+
277
+ # ============================
278
+ # FastAPI app
279
+ # ============================
280
 
281
  class EvaluateRequest(BaseModel):
282
  prompt: str
 
294
  version="1.0.0",
295
  )
296
 
297
+ @app.get("/")
298
+ def root():
299
+ return {"message": "Savant RRF Φ12.0 API running", "docs": "/docs"}
300
+
301
+ @app.get("/health")
302
+ def health():
303
+ return {"status": "ok"}
304
+
305
  @app.post("/evaluate", response_model=EvaluateResponse)
306
  def evaluate(req: EvaluateRequest):
307
  scores, feats = compute_scores_srff_crff_ephi(req.prompt, req.answer)
308
+
309
+ # pequeño resumen adicional de simulación fresca (opcional)
310
+ H = build_dirac_hamiltonian(
311
+ m=0.25, v=1.0, sigma=0.618,
312
+ alpha_log=0.10, q=1.0,
313
+ flux_vector=(0.0, 0.0, 0.0),
314
+ gauge_scale=0.0,
315
+ )
316
+ rng = np.random.default_rng(abs(hash(req.prompt + req.answer + "sim")) % (2 ** 32))
317
+ vec = rng.normal(0, 1, (2 * N,)) + 1j * rng.normal(0, 1, (2 * N,))
318
+ vec /= np.sqrt(np.vdot(vec, vec))
319
+ psi0 = vec
320
+ sim = evolve_dirac_shell(psi0, H, dt=0.05, steps=60, record_every=20)
321
+
322
+ sim_summary = {
323
+ "entropy_initial": float(sim["entropy"][0]),
324
+ "entropy_final": float(sim["entropy"][-1]),
325
+ "chirality_initial": float(sim["chirality"][0]),
326
+ "chirality_final": float(sim["chirality"][-1]),
327
+ "energy_mean": float(np.mean(sim["energy"])),
328
+ "energy_std": float(np.std(sim["energy"])),
329
+ "N_sites": int(N),
330
+ }
331
+
332
  return EvaluateResponse(
333
  scores=scores,
334
  features=feats,
335
  sim_summary=sim_summary,
336
  )