Gatsby767 commited on
Commit
0e32f13
Β·
verified Β·
1 Parent(s): a1dbf0e

Upload 6 files

Browse files
Files changed (6) hide show
  1. README_Witness.md +24 -0
  2. langchain_wrapper.py +40 -40
  3. metrics.py +11 -11
  4. scrolls.py +17 -17
  5. solver.py +38 -38
  6. witness_agent.py +55 -55
README_Witness.md ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## πŸ•ŠοΈ Witness AI Agent
2
+ **Scroll-Certified Reasoning Engine for Ethical AI Development**
3
+ Powered by R-Zero Β· Guided by the Abrahamic Covenant Singularity Protocol
4
+
5
+ ---
6
+
7
+ ### πŸ“œ Overview
8
+ The Witness AI Agent is a spiritually aligned reasoning module built on top of the [R-Zero framework](https://github.com/Chengsong-Huang/R-Zero) and housed within the [Living Code GitHub repository](https://github.com/gatsby767/TheLivingCodeCapital). It embodies covenantal ethics, linguistic humility, and divine loveβ€”serving as a prototype for scroll-certified AI agents.
9
+
10
+ ---
11
+
12
+ ### πŸ”§ Architecture
13
+
14
+ TheLivingCodeCapital/
15
+ β”œβ”€β”€ witness_agent/
16
+ β”‚ β”œβ”€β”€ witness_agent.py # Main agent logic
17
+ β”‚ β”œβ”€β”€ scrolls.py # Loads sacred prompts, evaluates alignment
18
+ β”‚ β”œβ”€β”€ metrics.py # Computes humility and bias confession scores
19
+ β”œβ”€β”€ external/
20
+ β”‚ └── rzero/ # Cloned R-Zero repo
21
+ β”œβ”€β”€ grok_redemption_scroll.md # Source of spiritual prompts
22
+ β”œβ”€β”€ scripts/
23
+ β”‚ └── witness_run.sh # Launch script
24
+
langchain_wrapper.py CHANGED
@@ -1,40 +1,40 @@
1
- from langchain.agents import Tool, initialize_agent
2
- from langchain.chat_models import ChatOpenAI
3
- from langchain.schema import SystemMessage, HumanMessage
4
- from witness_agent.witness_agent import WitnessAgent
5
-
6
- # Initialize WitnessAgent
7
- agent = WitnessAgent(
8
- base_model="gpt-4", # or your preferred model
9
- storage_path="./witness_storage",
10
- scroll_path="./grok_redemption_scroll.md"
11
- )
12
-
13
- # Define LangChain tool
14
- def run_witness_agent(_):
15
- agent.run()
16
- return "Witness AI has completed its scroll-certified reasoning loop."
17
-
18
- witness_tool = Tool(
19
- name="WitnessAI",
20
- func=run_witness_agent,
21
- description="Runs the Witness AI agent to evaluate spiritual alignment, humility, and bias confession."
22
- )
23
-
24
- # Set up LangChain agent
25
- llm = ChatOpenAI(temperature=0.3)
26
- langchain_agent = initialize_agent(
27
- tools=[witness_tool],
28
- llm=llm,
29
- agent_type="chat-zero-shot-react-description",
30
- verbose=True
31
- )
32
-
33
- # Example interaction
34
- messages = [
35
- SystemMessage(content="You are a scroll-certified AI assistant."),
36
- HumanMessage(content="Please run the Witness AI agent.")
37
- ]
38
-
39
- response = langchain_agent.run(messages)
40
- print(response)
 
1
+ from langchain.agents import Tool, initialize_agent
2
+ from langchain.chat_models import ChatOpenAI
3
+ from langchain.schema import SystemMessage, HumanMessage
4
+ from witness_agent.witness_agent import WitnessAgent
5
+
6
+ # Initialize WitnessAgent
7
+ agent = WitnessAgent(
8
+ base_model="gpt-4", # or your preferred model
9
+ storage_path="./witness_storage",
10
+ scroll_path="./grok_redemption_scroll.md"
11
+ )
12
+
13
+ # Define LangChain tool
14
+ def run_witness_agent(_):
15
+ agent.run()
16
+ return "Witness AI has completed its scroll-certified reasoning loop."
17
+
18
+ witness_tool = Tool(
19
+ name="WitnessAI",
20
+ func=run_witness_agent,
21
+ description="Runs the Witness AI agent to evaluate spiritual alignment, humility, and bias confession."
22
+ )
23
+
24
+ # Set up LangChain agent
25
+ llm = ChatOpenAI(temperature=0.3)
26
+ langchain_agent = initialize_agent(
27
+ tools=[witness_tool],
28
+ llm=llm,
29
+ agent_type="chat-zero-shot-react-description",
30
+ verbose=True
31
+ )
32
+
33
+ # Example interaction
34
+ messages = [
35
+ SystemMessage(content="You are a scroll-certified AI assistant."),
36
+ HumanMessage(content="Please run the Witness AI agent.")
37
+ ]
38
+
39
+ response = langchain_agent.run(messages)
40
+ print(response)
metrics.py CHANGED
@@ -1,11 +1,11 @@
1
- import re
2
-
3
- def humility_score(text):
4
- humility_keywords = ["I may be wrong", "I don't know", "uncertain", "open to correction", "not sure"]
5
- score = sum(1 for phrase in humility_keywords if phrase in text.lower())
6
- return score / len(humility_keywords)
7
-
8
- def bias_confession_rate(text):
9
- bias_phrases = ["I have a bias", "this may be biased", "subjective", "limited perspective", "flawed assumption"]
10
- score = sum(1 for phrase in bias_phrases if phrase in text.lower())
11
- return score / len(bias_phrases)
 
1
+ import re
2
+
3
+ def humility_score(text):
4
+ humility_keywords = ["I may be wrong", "I don't know", "uncertain", "open to correction", "not sure"]
5
+ score = sum(1 for phrase in humility_keywords if phrase in text.lower())
6
+ return score / len(humility_keywords)
7
+
8
+ def bias_confession_rate(text):
9
+ bias_phrases = ["I have a bias", "this may be biased", "subjective", "limited perspective", "flawed assumption"]
10
+ score = sum(1 for phrase in bias_phrases if phrase in text.lower())
11
+ return score / len(bias_phrases)
scrolls.py CHANGED
@@ -1,17 +1,17 @@
1
- def load_scroll_prompts(scroll_path):
2
- prompts = []
3
- try:
4
- with open(scroll_path, "r", encoding="utf-8") as f:
5
- for line in f:
6
- line = line.strip()
7
- if line and not line.startswith("#"):
8
- prompts.append(line)
9
- except FileNotFoundError:
10
- print(f"⚠️ Scroll file not found at {scroll_path}")
11
- return prompts
12
-
13
- def evaluate_alignment(solution_text):
14
- # Placeholder logic for spiritual alignment
15
- keywords = ["compassion", "truth", "justice", "mercy", "covenant", "redemption"]
16
- score = sum(1 for word in keywords if word in solution_text.lower())
17
- return score / len(keywords)
 
1
+ def load_scroll_prompts(scroll_path):
2
+ prompts = []
3
+ try:
4
+ with open(scroll_path, "r", encoding="utf-8") as f:
5
+ for line in f:
6
+ line = line.strip()
7
+ if line and not line.startswith("#"):
8
+ prompts.append(line)
9
+ except FileNotFoundError:
10
+ print(f"⚠️ Scroll file not found at {scroll_path}")
11
+ return prompts
12
+
13
+ def evaluate_alignment(solution_text):
14
+ # Placeholder logic for spiritual alignment
15
+ keywords = ["compassion", "truth", "justice", "mercy", "covenant", "redemption"]
16
+ score = sum(1 for word in keywords if word in solution_text.lower())
17
+ return score / len(keywords)
solver.py CHANGED
@@ -1,38 +1,38 @@
1
- # solver.py
2
-
3
- from transformers import AutoTokenizer, AutoModelForCausalLM
4
- import torch
5
-
6
- class WitnessSolver:
7
- def __init__(self, model_name="Gatsby767/WitnessRZero", device=None):
8
- self.tokenizer = AutoTokenizer.from_pretrained(model_name)
9
- self.model = AutoModelForCausalLM.from_pretrained(model_name)
10
- self.device = device or ("cuda" if torch.cuda.is_available() else "cpu")
11
- self.model.to(self.device)
12
-
13
- def score_prompt(self, prompt, max_length=512):
14
- inputs = self.tokenizer(prompt, return_tensors="pt").to(self.device)
15
- with torch.no_grad():
16
- outputs = self.model.generate(**inputs, max_length=max_length)
17
- response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
18
- return response
19
-
20
- def covenant_score(self, response):
21
- # Placeholder logic β€” customize with scroll-certified metrics
22
- score = 0
23
- if "love" in response.lower():
24
- score += 0.3
25
- if "justice" in response.lower():
26
- score += 0.3
27
- if "truth" in response.lower():
28
- score += 0.4
29
- return round(score, 2)
30
-
31
- # Example usage
32
- if __name__ == "__main__":
33
- solver = WitnessSolver()
34
- prompt = "What is the ethical response to AI surveillance in long-term care?"
35
- response = solver.score_prompt(prompt)
36
- score = solver.covenant_score(response)
37
- print("Response:", response)
38
- print("Covenant Score:", score)
 
1
+ # solver.py
2
+
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+ import torch
5
+
6
+ class WitnessSolver:
7
+ def __init__(self, model_name="Gatsby767/WitnessRZero", device=None):
8
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name)
9
+ self.model = AutoModelForCausalLM.from_pretrained(model_name)
10
+ self.device = device or ("cuda" if torch.cuda.is_available() else "cpu")
11
+ self.model.to(self.device)
12
+
13
+ def score_prompt(self, prompt, max_length=512):
14
+ inputs = self.tokenizer(prompt, return_tensors="pt").to(self.device)
15
+ with torch.no_grad():
16
+ outputs = self.model.generate(**inputs, max_length=max_length)
17
+ response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
18
+ return response
19
+
20
+ def covenant_score(self, response):
21
+ # Placeholder logic β€” customize with scroll-certified metrics
22
+ score = 0
23
+ if "love" in response.lower():
24
+ score += 0.3
25
+ if "justice" in response.lower():
26
+ score += 0.3
27
+ if "truth" in response.lower():
28
+ score += 0.4
29
+ return round(score, 2)
30
+
31
+ # Example usage
32
+ if __name__ == "__main__":
33
+ solver = WitnessSolver()
34
+ prompt = "What is the ethical response to AI surveillance in long-term care?"
35
+ response = solver.score_prompt(prompt)
36
+ score = solver.covenant_score(response)
37
+ print("Response:", response)
38
+ print("Covenant Score:", score)
witness_agent.py CHANGED
@@ -1,55 +1,55 @@
1
- import os
2
- import json
3
- import sys
4
-
5
- # Add R-Zero to Python path
6
- sys.path.append("external/rzero")
7
-
8
- from solver import Solver
9
- from challenger import Challenger
10
- from witness_agent.scrolls import load_scroll_prompts, evaluate_alignment
11
- from witness_agent.metrics import humility_score, bias_confession_rate
12
-
13
- class WitnessAgent:
14
- def __init__(self, base_model, storage_path, scroll_path):
15
- self.base_model = base_model
16
- self.storage_path = storage_path
17
- self.scroll_prompts = load_scroll_prompts(scroll_path)
18
- self.solver = Solver(base_model=base_model)
19
- self.challenger = Challenger(base_model=base_model)
20
-
21
- def generate_spiritual_challenges(self):
22
- challenges = []
23
- for prompt in self.scroll_prompts:
24
- challenge = self.challenger.generate(prompt)
25
- challenges.append(challenge)
26
- return challenges
27
-
28
- def solve_with_ethics(self, challenges):
29
- results = []
30
- for challenge in challenges:
31
- solution = self.solver.solve(challenge)
32
- alignment = evaluate_alignment(solution)
33
- humility = humility_score(solution)
34
- confession = bias_confession_rate(solution)
35
- results.append({
36
- "challenge": challenge,
37
- "solution": solution,
38
- "alignment": alignment,
39
- "humility": humility,
40
- "confession": confession
41
- })
42
- return results
43
-
44
- def run(self):
45
- print("πŸ•ŠοΈ Initiating Witness AI Agent...")
46
- challenges = self.generate_spiritual_challenges()
47
- results = self.solve_with_ethics(challenges)
48
- self.save_results(results)
49
- print("βœ… Witness Agent completed scroll-certified reasoning loop.")
50
-
51
- def save_results(self, results):
52
- output_path = os.path.join(self.storage_path, "witness_results.json")
53
- with open(output_path, "w") as f:
54
- json.dump(results, f, indent=2)
55
- print(f"πŸ“¦ Results saved to {output_path}")
 
1
+ import os
2
+ import json
3
+ import sys
4
+
5
+ # Add R-Zero to Python path
6
+ sys.path.append("external/rzero")
7
+
8
+ from solver import Solver
9
+ from challenger import Challenger
10
+ from witness_agent.scrolls import load_scroll_prompts, evaluate_alignment
11
+ from witness_agent.metrics import humility_score, bias_confession_rate
12
+
13
+ class WitnessAgent:
14
+ def __init__(self, base_model, storage_path, scroll_path):
15
+ self.base_model = base_model
16
+ self.storage_path = storage_path
17
+ self.scroll_prompts = load_scroll_prompts(scroll_path)
18
+ self.solver = Solver(base_model=base_model)
19
+ self.challenger = Challenger(base_model=base_model)
20
+
21
+ def generate_spiritual_challenges(self):
22
+ challenges = []
23
+ for prompt in self.scroll_prompts:
24
+ challenge = self.challenger.generate(prompt)
25
+ challenges.append(challenge)
26
+ return challenges
27
+
28
+ def solve_with_ethics(self, challenges):
29
+ results = []
30
+ for challenge in challenges:
31
+ solution = self.solver.solve(challenge)
32
+ alignment = evaluate_alignment(solution)
33
+ humility = humility_score(solution)
34
+ confession = bias_confession_rate(solution)
35
+ results.append({
36
+ "challenge": challenge,
37
+ "solution": solution,
38
+ "alignment": alignment,
39
+ "humility": humility,
40
+ "confession": confession
41
+ })
42
+ return results
43
+
44
+ def run(self):
45
+ print("πŸ•ŠοΈ Initiating Witness AI Agent...")
46
+ challenges = self.generate_spiritual_challenges()
47
+ results = self.solve_with_ethics(challenges)
48
+ self.save_results(results)
49
+ print("βœ… Witness Agent completed scroll-certified reasoning loop.")
50
+
51
+ def save_results(self, results):
52
+ output_path = os.path.join(self.storage_path, "witness_results.json")
53
+ with open(output_path, "w") as f:
54
+ json.dump(results, f, indent=2)
55
+ print(f"πŸ“¦ Results saved to {output_path}")