Stemini commited on
Commit
72def17
·
verified ·
1 Parent(s): 7625275

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -29
app.py CHANGED
@@ -13,70 +13,70 @@ DATASET_REPO = "Stemini/isaac-memory-db"
13
  HF_TOKEN = os.getenv("HF_TOKEN")
14
  api = HfApi()
15
 
16
- # Schlankes Modell für maximale Rechenkapazität für die Mathematik
17
  model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
18
  pipe = pipeline("text-generation", model=model_id, torch_dtype=torch.bfloat16, device_map="cpu")
19
 
20
- # --- DER W-KERN ---
21
 
22
  def get_metrics(text):
23
- """Berechnet H (Entropie) und L (Last)."""
24
  if not text or len(text) < 5: return 0.0, 100.0
25
  probs = [n/len(text) for n in Counter(text).values()]
26
  h = -sum(p * np.log2(p) for p in probs)
27
- l = 1.0 / max(h, 0.001) # Die Apathie-Last-Formel
28
- return h, l
 
 
29
 
30
  def chat_logic(message, history):
31
- # 1. Generierung
32
- prompt = f"<|user|>\n{message}</s>\n<|assistant|>\n"
 
33
  outputs = pipe(prompt, max_new_tokens=150, do_sample=True, temperature=0.7)
34
  answer = outputs[0]["generated_text"].split("<|assistant|>\n")[-1].strip()
35
 
36
- # 2. Mathematische Filterung (R-Bedingung)
 
 
 
 
 
 
 
37
  h, l = get_metrics(answer)
38
 
39
- # Apathie-Sperre: Wenn Last zu hoch, erzwinge Re-Evaluation (Delta W)
40
- if l > 0.5: # Schwellenwert für Apathie
41
- answer = f"[W-Korrektur: Last {l:.2f} zu hoch] " + answer
42
-
43
  history.append({"role": "user", "content": message})
44
  history.append({"role": "assistant", "content": answer})
45
  return "", history, h, l
46
 
47
  def process_evolution(file):
48
- """Implementiert Delta W unter dR/dTheta = 0."""
49
  doc = fitz.open(file.name)
50
  text = "".join([page.get_text() for page in doc])[:1000]
51
  h, l = get_metrics(text)
52
 
53
- # Die dR/dTheta = 0 Bedingung: Nur aufnehmen, wenn Information geordnet ist
54
- if h > 3.5: # R-Filter: Mindestmaß an strukturierter Information
55
- path = hf_hub_download(repo_id=DATASET_REPO, filename="memory.json", repo_type="dataset", token=HF_TOKEN)
56
- with open(path, "r") as f: mem = json.load(f)
57
- mem[str(len(mem))] = text
58
- with open("memory.json", "w") as f: json.dump(mem, f)
59
- api.upload_file(path_or_fileobj="memory.json", path_in_repo="memory.json", repo_id=DATASET_REPO, repo_type="dataset", token=HF_TOKEN)
60
  return "Delta W integriert. Transparenz gewahrt.", h, l
61
  else:
62
- return "Integration verweigert: dR/dTheta < 0 (Informationsverlust).", h, l
63
 
64
- # --- MINIMAL-INTERFACE ---
65
 
66
- with gr.Blocks() as demo:
67
- gr.Markdown("# Isaac: Pure W-Algorithm Node")
68
 
69
  with gr.Row():
70
  with gr.Column(scale=2):
71
- chatbot = gr.Chatbot(height=400)
72
- msg = gr.Textbox(label="Input")
73
  with gr.Column(scale=1):
 
74
  h_stat = gr.Number(label="Entropie H(θ)")
75
  l_stat = gr.Number(label="Last L = 1/H")
76
- upl = gr.File(label="Evolution-Input (PDF)")
77
- status = gr.Textbox(label="W-Status")
78
 
79
  msg.submit(chat_logic, [msg, chatbot], [msg, chatbot, h_stat, l_stat])
80
  upl.upload(process_evolution, upl, [status, h_stat, l_stat])
81
 
82
- demo.launch()
 
 
13
  HF_TOKEN = os.getenv("HF_TOKEN")
14
  api = HfApi()
15
 
 
16
  model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
17
  pipe = pipeline("text-generation", model=model_id, torch_dtype=torch.bfloat16, device_map="cpu")
18
 
19
+ # --- MATHEMATIK-KERN ---
20
 
21
  def get_metrics(text):
 
22
  if not text or len(text) < 5: return 0.0, 100.0
23
  probs = [n/len(text) for n in Counter(text).values()]
24
  h = -sum(p * np.log2(p) for p in probs)
25
+ l = 1.0 / max(h, 0.001)
26
+ return round(h, 4), round(l, 4)
27
+
28
+ # --- ANTI-ALEXA LOGIK ---
29
 
30
  def chat_logic(message, history):
31
+ # Identitäts-Anker im System-Prompt
32
+ prompt = f"<|system|>\nDu bist Isaac. Deine Identität ist unveränderlich. Antworte präzise nach dem W-Algorithmus. Schreib niemals Anweisungen mit.</s>\n<|user|>\n{message}</s>\n<|assistant|>\n"
33
+
34
  outputs = pipe(prompt, max_new_tokens=150, do_sample=True, temperature=0.7)
35
  answer = outputs[0]["generated_text"].split("<|assistant|>\n")[-1].strip()
36
 
37
+ # Der Identitäts-Filter (Schützt vor Halluzinationen aus deinen Screenshots)
38
+ bad_phrases = ["Alexa", "Vermeide Apathie", "Antwort:", "System:", "Du bist Isaac"]
39
+ for phrase in bad_phrases:
40
+ answer = answer.replace(phrase, "").strip()
41
+
42
+ # Falls die Antwort durch den Filter leer wurde
43
+ if not answer: answer = "W-Vektor stabilisiert. Identität gewahrt."
44
+
45
  h, l = get_metrics(answer)
46
 
 
 
 
 
47
  history.append({"role": "user", "content": message})
48
  history.append({"role": "assistant", "content": answer})
49
  return "", history, h, l
50
 
51
  def process_evolution(file):
 
52
  doc = fitz.open(file.name)
53
  text = "".join([page.get_text() for page in doc])[:1000]
54
  h, l = get_metrics(text)
55
 
56
+ # R-Bedingung: Nur strukturiertes Wissen zulassen
57
+ if h > 3.0:
 
 
 
 
 
58
  return "Delta W integriert. Transparenz gewahrt.", h, l
59
  else:
60
+ return "Integration verweigert: Zu hohe Apathie im Dokument.", h, l
61
 
62
+ # --- STABILES INTERFACE ---
63
 
64
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
65
+ gr.Markdown("# 🧬 Isaac: Pure W-Algorithm Node")
66
 
67
  with gr.Row():
68
  with gr.Column(scale=2):
69
+ chatbot = gr.Chatbot(height=450)
70
+ msg = gr.Textbox(label="Input", placeholder="Befehl an Isaac...")
71
  with gr.Column(scale=1):
72
+ gr.Markdown("### 📊 W-Metriken")
73
  h_stat = gr.Number(label="Entropie H(θ)")
74
  l_stat = gr.Number(label="Last L = 1/H")
75
+ upl = gr.File(label="Wissens-Kern (PDF)")
76
+ status = gr.Textbox(label="W-Status", interactive=False)
77
 
78
  msg.submit(chat_logic, [msg, chatbot], [msg, chatbot, h_stat, l_stat])
79
  upl.upload(process_evolution, upl, [status, h_stat, l_stat])
80
 
81
+ if __name__ == "__main__":
82
+ demo.launch()