Pagn13 commited on
Commit
2fe75f3
·
verified ·
1 Parent(s): dd9acd3

Update The council .txt

Browse files
Files changed (1) hide show
  1. The council .txt +39 -174
The council .txt CHANGED
@@ -1,176 +1,41 @@
1
- #!/usr/bin/env python3
2
- import asyncio
3
- import json
4
- import random
5
- from pathlib import Path
6
- from typing import List, Dict, Any
7
- import torch
8
- from transformers import AutoModelForCausalLM, AutoTokenizer
9
- import networkx as nx
10
- import ipfshttpclient # pip install ipfshttpclient
11
- import threading
12
-
13
- # ------------------- CONFIG -------------------
14
- ASPECTS = [
15
- {"name": "Warrior", "archetype": "masculine", "temp": 0.9, "top_p": 0.95},
16
- {"name": "Nurturer", "archetype": "feminine", "temp": 0.7, "top_p": 0.90},
17
- {"name": "Child", "archetype": "play", "temp": 1.1, "top_p": 0.97},
18
- {"name": "Shadow", "archetype": "dark", "temp": 0.8, "top_p": 0.92},
19
- {"name": "Sage", "archetype": "wise", "temp": 0.5, "top_p": 0.85},
20
- {"name": "Trickster", "archetype": "chaos", "temp": 1.0, "top_p": 0.96},
21
- {"name": "Lover", "archetype": "passion", "temp": 0.8, "top_p": 0.93},
22
- {"name": "King", "archetype": "order", "temp": 0.6, "top_p": 0.88},
23
- {"name": "Magician", "archetype": "transform", "temp": 0.9, "top_p": 0.94},
24
- {"name": "Explorer", "archetype": "curious", "temp": 1.0, "top_p": 0.95},
25
- {"name": "Healer", "archetype": "restore", "temp": 0.7, "top_p": 0.89},
26
- {"name": "Mystic", "archetype": "transcend", "temp": 0.8, "top_p": 0.91},
27
- ]
28
-
29
- MODEL_NAME = "microsoft/DialoGPT-medium" # replace with local 7B GGUF path
30
- DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
31
-
32
- # ------------------- KNOWLEDGE GRAPH -------------------
33
- class KnowledgeGraph:
34
- def __init__(self, path="kg.db"):
35
- self.G = nx.DiGraph()
36
- if Path(path).exists():
37
- self.G = nx.read_gpickle(path)
38
-
39
- def add(self, subject, predicate, object_):
40
- self.G.add_edge(subject, object_, label=predicate)
41
-
42
- def query(self, subject=None, object_=None):
43
- return list(self.G.edges(subject, data=True)) if subject else list(self.G.edges)
44
-
45
- def save(self, path="kg.db"):
46
- nx.write_gpickle(self.G, path)
47
-
48
- kg = KnowledgeGraph()
49
-
50
- # ------------------- ASPECT ENGINE -------------------
51
- class Aspect:
52
- def __init__(self, spec: dict, model_path: str):
53
- self.spec = spec
54
- self.tokenizer = AutoTokenizer.from_pretrained(model_path)
55
- self.model = AutoModelForCausalLM.from_pretrained(model_path)
56
- self.model.to(DEVICE)
57
- self.history = []
58
-
59
- async def respond(self, prompt: str, context: List[Dict]) -> str:
60
- # Build prompt with archetype role + recent context
61
- system = f"You are the {self.spec['name']} aspect ({self.spec['archetype']}). "
62
- system += "Speak only from this perspective. Be concise but vivid."
63
- full_prompt = system + "\n\nContext:\n" + "\n".join([f"{c['speaker']}: {c['text']}" for c in context[-3:]])
64
- full_prompt += f"\nUser: {prompt}\n{self.spec['name']}:"
65
-
66
- inputs = self.tokenizer(full_prompt, return_tensors="pt").to(DEVICE)
67
- output = self.model.generate(
68
- **inputs,
69
- max_new_tokens=120,
70
- temperature=self.spec['temp'],
71
- top_p=self.spec['top_p'],
72
- do_sample=True,
73
- pad_token_id=self.tokenizer.eos_token_id,
74
- )
75
- reply = self.tokenizer.decode(output[0], skip_special_tokens=True)
76
- reply = reply.split(f"{self.spec['name']}:")[-1].strip()
77
- self.history.append({"speaker": self.spec['name'], "text": reply})
78
- return reply
79
-
80
- # ------------------- 13TH HEAD (Synthesis) -------------------
81
- class Head:
82
- def __init__(self, aspects: List[Aspect]):
83
- self.aspects = aspects
84
- self.tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
85
- self.model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
86
- self.model.to(DEVICE)
87
-
88
- async def synthesize(self, dialogue: List[Dict]) -> str:
89
- # Weighted attention: longer/sharper replies = higher weight
90
- weights = []
91
- for turn in dialogue:
92
- score = len(turn["text"].split()) * (1.0 if turn["speaker"] != "User" else 0.5)
93
- weights.append(score)
94
- total = sum(weights) or 1
95
- weighted = [f"[{turn['speaker']}](w={weights[i]/total:.2f}): {turn['text']}" for i, turn in enumerate(dialogue)]
96
-
97
- prompt = "You are the integrative consciousness of a 12-aspect council. "
98
- prompt += "Synthesize *without* erasing contradictions. Output a single coherent paragraph.\n\n"
99
- prompt += "\n".join(weighted[-12:]) + "\n\nSynthesis:"
100
-
101
- inputs = self.tokenizer(prompt, return_tensors="pt").to(DEVICE)
102
- output = self.model.generate(
103
- **inputs,
104
- max_new_tokens=250,
105
- temperature=0.7,
106
- do_sample=True,
107
- pad_token_id=self.tokenizer.eos_token_id,
108
- )
109
- synth = self.tokenizer.decode(output[0], skip_special_tokens=True).split("Synthesis:")[-1].strip()
110
- return synth
111
-
112
- # ------------------- P2P SYNC (IPFS) -------------------
113
- class P2PNode:
114
- def __init__(self):
115
- self.client = ipfshttpclient.connect('/ip4/127.0.0.1/tcp/5001') # local IPFS daemon
116
-
117
- def publish_kg(self):
118
- kg.save()
119
- with open("kg.db", "rb") as f:
120
- cid = self.client.add_bytes(f.read())
121
- return cid['Hash']
122
-
123
- def pull_kg(self, cid):
124
- data = self.client.cat(cid)
125
- with open("kg_remote.db", "wb") as f:
126
- f.write(data)
127
- remote = nx.read_gpickle("kg_remote.db")
128
- kg.G = nx.compose(kg.G, remote)
129
-
130
- p2p = P2PNode()
131
-
132
- # ------------------- MAIN COUNCIL -------------------
133
- class Council:
134
- def __init__(self):
135
- self.aspects = [Aspect(spec, MODEL_NAME) for spec in ASPECTS]
136
- self.head = Head(self.aspects)
137
- self.dialogue = []
138
-
139
- async def turn(self, user_input: str):
140
- # 1. Each aspect speaks in parallel
141
- context = self.dialogue[-6:] # short-term memory
142
- responses = await asyncio.gather(*[
143
- aspect.respond(user_input, context) for aspect in self.aspects
144
- ])
145
- for a, r in zip(self.aspects, responses):
146
- turn = {"speaker": a.spec["name"], "text": r}
147
- self.dialogue.append(turn)
148
- # Update KG
149
- kg.add(user_input, "elicited", f"{a.spec['name']}:{r}")
150
-
151
- # 2. Head synthesizes
152
- synthesis = await self.head.synthesize(self.dialogue)
153
- self.dialogue.append({"speaker": "Head", "text": synthesis})
154
- return synthesis
155
-
156
- async def sync_with_peers(self):
157
- # Simple broadcast every 10 turns
158
- if len(self.dialogue) % 10 == 0:
159
- cid = p2p.publish_kg()
160
- print(f"[P2P] Published KG: {cid}")
161
- # In real network, gossip CID to known peers
162
-
163
- # ------------------- CLI LOOP -------------------
164
- async def main():
165
- council = Council()
166
- print("12-Council online. Type 'exit' to quit.")
167
- while True:
168
- user = input("\nYou: ")
169
- if user.lower() == "exit":
170
- break
171
- response = await council.turn(user)
172
- print(f"\nHead: {response}")
173
- await council.sync_with_peers()
174
 
175
  if __name__ == "__main__":
176
- asyncio.run(main())
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+
4
+ # Initialize client without token
5
+ client = InferenceClient(model="openai/gpt-oss-20b")
6
+
7
+ def respond(message, history, system_message, max_tokens, temperature, top_p):
8
+ messages = [{"role": "system", "content": system_message}]
9
+ messages += history
10
+ messages.append({"role": "user", "content": message})
11
+
12
+ response = ""
13
+ try:
14
+ for chunk in client.chat_completion(
15
+ messages=messages,
16
+ max_tokens=max_tokens,
17
+ stream=True,
18
+ temperature=temperature,
19
+ top_p=top_p,
20
+ ):
21
+ if hasattr(chunk.choices[0].delta, "content"):
22
+ token = chunk.choices[0].delta.content
23
+ response += token
24
+ yield response
25
+ except Exception as e:
26
+ yield f"⚠️ Error: {e}"
27
+
28
+ chatbot = gr.ChatInterface(
29
+ fn=respond,
30
+ additional_inputs=[
31
+ gr.Textbox(value="You are a helpful assistant.", label="System Message"),
32
+ gr.Slider(minimum=64, maximum=2048, value=512, step=1, label="Max Tokens"),
33
+ gr.Slider(minimum=0.1, maximum=1.5, value=0.7, step=0.1, label="Temperature"),
34
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-p"),
35
+ ],
36
+ title="🧠 CouncilShell Prototype",
37
+ description="Send a message and receive a streamed reply from the OSS 20B model — no login required.",
38
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
  if __name__ == "__main__":
41
+ chatbot.launch()