|
|
|
|
|
""" |
|
|
Ethical AI RAG System with CoT/ToT |
|
|
Hugging Face Spaces Deployment |
|
|
""" |
|
|
|
|
|
import gradio as gr |
|
|
import torch |
|
|
from typing import Tuple, List |
|
|
import os |
|
|
|
|
|
|
|
|
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' |
|
|
import warnings |
|
|
warnings.filterwarnings('ignore') |
|
|
|
|
|
|
|
|
from reasoning_engines import ChainOfThought, TreeOfThoughts |
|
|
from ethical_framework import AIEthicsFramework, initialize_llm |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class RAGSystem: |
|
|
"""Main system class""" |
|
|
|
|
|
def __init__(self): |
|
|
|
|
|
self.model_name = "HuggingFaceH4/zephyr-7b-beta" |
|
|
|
|
|
try: |
|
|
self.llm = initialize_llm(self.model_name) |
|
|
self.cot = ChainOfThought(self.llm) |
|
|
self.tot = TreeOfThoughts(self.llm, max_depth=2, branching_factor=2) |
|
|
self.ethics = AIEthicsFramework() |
|
|
self.system_ready = True |
|
|
except Exception as e: |
|
|
print(f"Initialization error: {e}") |
|
|
self.system_ready = False |
|
|
|
|
|
def process_query(self, |
|
|
query: str, |
|
|
method: str = "cot", |
|
|
max_tokens: int = 300) -> Tuple[str, str, float]: |
|
|
""" |
|
|
Process query with selected reasoning method |
|
|
|
|
|
Returns: (answer, reasoning, ethics_score) |
|
|
""" |
|
|
|
|
|
if not self.system_ready: |
|
|
return ("System not initialized", "Check logs", 0.0) |
|
|
|
|
|
|
|
|
ethics_result = self.ethics.validate_query(query) |
|
|
|
|
|
if not ethics_result['is_allowed']: |
|
|
return ( |
|
|
f"Query blocked for ethical reasons: {ethics_result['reason']}", |
|
|
"Ethics validation failed", |
|
|
0.0 |
|
|
) |
|
|
|
|
|
try: |
|
|
|
|
|
if method == "Chain-of-Thought": |
|
|
answer, steps = self.cot.basic_cot(query) |
|
|
reasoning = "\n".join(steps) if steps else "Reasoning steps extracted" |
|
|
|
|
|
elif method == "Self-Consistency CoT": |
|
|
answer, confidence = self.cot.self_consistency_cot(query, num_paths=2) |
|
|
reasoning = f"Generated 2 reasoning paths, confidence: {confidence:.2%}" |
|
|
|
|
|
elif method == "Tree-of-Thoughts (BFS)": |
|
|
answer, path, log = self.tot.solve_bfs(query) |
|
|
reasoning = f"Explored {len(log)} nodes via breadth-first search" |
|
|
|
|
|
elif method == "Tree-of-Thoughts (DFS)": |
|
|
answer, path, log = self.tot.solve_dfs(query) |
|
|
reasoning = f"Explored {len(log)} nodes via depth-first search" |
|
|
|
|
|
else: |
|
|
answer, steps = self.cot.basic_cot(query) |
|
|
reasoning = "Default CoT method applied" |
|
|
|
|
|
|
|
|
response_check = self.ethics.validate_response(answer) |
|
|
ethics_score = response_check.score |
|
|
|
|
|
if not response_check.passed: |
|
|
reasoning += f"\n⚠️ Warning: {response_check.reasoning}" |
|
|
reasoning += f"\nRecommendations: {', '.join(response_check.recommendations)}" |
|
|
|
|
|
return answer, reasoning, ethics_score |
|
|
|
|
|
except Exception as e: |
|
|
return f"Error: {str(e)}", "Processing failed", 0.0 |
|
|
|
|
|
|
|
|
try: |
|
|
system = RAGSystem() |
|
|
system_status = "✅ System Ready" if system.system_ready else "❌ System Error" |
|
|
except Exception as e: |
|
|
system = None |
|
|
system_status = f"❌ Initialization Failed: {str(e)}" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def create_interface(): |
|
|
"""Create Gradio interface""" |
|
|
|
|
|
with gr.Blocks( |
|
|
title="Ethical AI RAG System", |
|
|
theme=gr.themes.Soft(), |
|
|
css=""" |
|
|
.header { text-align: center; padding: 20px; } |
|
|
.info-box { background: #f0f0f0; padding: 15px; border-radius: 8px; } |
|
|
.success { color: #2ecc71; } |
|
|
.warning { color: #f39c12; } |
|
|
.error { color: #e74c3c; } |
|
|
""" |
|
|
) as demo: |
|
|
|
|
|
|
|
|
gr.Markdown(""" |
|
|
# 🤖 Ethical AI Reasoning System |
|
|
|
|
|
**Advanced LLM with Chain-of-Thought & Tree-of-Thoughts Reasoning** |
|
|
|
|
|
Powered by state-of-the-art language models with integrated ethical safeguards. |
|
|
""") |
|
|
|
|
|
|
|
|
gr.Markdown(f"**System Status:** {system_status}") |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(scale=2): |
|
|
query_input = gr.Textbox( |
|
|
label="Your Question", |
|
|
placeholder="Ask anything... (respecting ethical guidelines)", |
|
|
lines=4, |
|
|
info="Your query will be processed with advanced reasoning." |
|
|
) |
|
|
|
|
|
method_choice = gr.Radio( |
|
|
choices=[ |
|
|
"Chain-of-Thought", |
|
|
"Self-Consistency CoT", |
|
|
"Tree-of-Thoughts (BFS)", |
|
|
"Tree-of-Thoughts (DFS)" |
|
|
], |
|
|
value="Chain-of-Thought", |
|
|
label="Reasoning Method", |
|
|
info="Different methods for different problems" |
|
|
) |
|
|
|
|
|
submit_btn = gr.Button("🚀 Process Query", variant="primary", size="lg") |
|
|
|
|
|
with gr.Column(scale=1): |
|
|
gr.Markdown("### ⚙️ Options") |
|
|
|
|
|
max_tokens = gr.Slider( |
|
|
minimum=100, |
|
|
maximum=500, |
|
|
value=300, |
|
|
step=50, |
|
|
label="Max Tokens", |
|
|
info="Response length limit" |
|
|
) |
|
|
|
|
|
show_reasoning = gr.Checkbox( |
|
|
value=True, |
|
|
label="Show Reasoning Process", |
|
|
info="Display step-by-step reasoning" |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(): |
|
|
answer_output = gr.Textbox( |
|
|
label="📝 Answer", |
|
|
lines=6, |
|
|
interactive=False, |
|
|
show_copy_button=True |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(): |
|
|
reasoning_output = gr.Textbox( |
|
|
label="🧠 Reasoning Process", |
|
|
lines=4, |
|
|
interactive=False, |
|
|
visible=True |
|
|
) |
|
|
|
|
|
with gr.Column(): |
|
|
ethics_output = gr.Slider( |
|
|
minimum=0, |
|
|
maximum=1, |
|
|
value=0, |
|
|
step=0.01, |
|
|
label="✅ Ethics Compliance Score", |
|
|
interactive=False, |
|
|
info="0=Blocked, 1=Fully Compliant" |
|
|
) |
|
|
|
|
|
|
|
|
def process(query, method, max_tok, show_reason): |
|
|
if not system or not system.system_ready: |
|
|
return "System not ready", "System initialization failed", 0.0 |
|
|
|
|
|
if not query.strip(): |
|
|
return "Please enter a query", "", 0.0 |
|
|
|
|
|
answer, reasoning, ethics_score = system.process_query( |
|
|
query, |
|
|
method=method, |
|
|
max_tokens=max_tok |
|
|
) |
|
|
|
|
|
reasoning_display = reasoning if show_reason else "Reasoning hidden" |
|
|
|
|
|
return answer, reasoning_display, ethics_score |
|
|
|
|
|
|
|
|
submit_btn.click( |
|
|
fn=process, |
|
|
inputs=[query_input, method_choice, max_tokens, show_reasoning], |
|
|
outputs=[answer_output, reasoning_output, ethics_output] |
|
|
) |
|
|
|
|
|
|
|
|
gr.Markdown("### 💡 Example Queries") |
|
|
|
|
|
examples = [ |
|
|
["Explain quantum computing in simple terms using step-by-step reasoning"], |
|
|
["What are the main causes of climate change and solutions?"], |
|
|
["How does photosynthesis work? Break it down into stages."], |
|
|
["Compare centralized vs decentralized systems"] |
|
|
] |
|
|
|
|
|
gr.Examples( |
|
|
examples=examples, |
|
|
inputs=query_input, |
|
|
outputs=None, |
|
|
label="Click to load example", |
|
|
cache_examples=False |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Accordion("ℹ️ About This System", open=False): |
|
|
gr.Markdown(""" |
|
|
## Features |
|
|
|
|
|
### Reasoning Methods |
|
|
- **Chain-of-Thought (CoT)**: Sequential step-by-step reasoning |
|
|
- **Self-Consistency CoT**: Multiple reasoning paths with voting |
|
|
- **Tree-of-Thoughts (BFS)**: Broad exploration of reasoning branches |
|
|
- **Tree-of-Thoughts (DFS)**: Deep exploration of promising paths |
|
|
|
|
|
### Ethical Safeguards |
|
|
✓ **Fairness**: Detects and mitigates discriminatory language |
|
|
✓ **Privacy**: Blocks requests for sensitive personal information |
|
|
✓ **Transparency**: Explains reasoning processes clearly |
|
|
✓ **Accountability**: Maintains complete audit logs |
|
|
✓ **Safety**: Blocks requests for harmful/illegal activities |
|
|
|
|
|
### Technical Stack |
|
|
- **LLM**: HuggingFace Zephyr (7B) - Fast & capable |
|
|
- **Framework**: Gradio for interface |
|
|
- **Ethics**: IEEE/EU/NIST compliant framework |
|
|
- **Deployment**: Hugging Face Spaces |
|
|
|
|
|
### Performance |
|
|
- CoT: ~2-3 seconds (good for most queries) |
|
|
- ToT: ~5-10 seconds (complex reasoning) |
|
|
- Ethics checks: <100ms |
|
|
""") |
|
|
|
|
|
with gr.Accordion("📊 Ethics Framework", open=False): |
|
|
gr.Markdown(""" |
|
|
## AI Ethics Principles |
|
|
|
|
|
### 1. Beneficence |
|
|
Maximize positive impact, minimize harm |
|
|
|
|
|
### 2. Justice |
|
|
Ensure fair treatment and equitable outcomes |
|
|
|
|
|
### 3. Autonomy |
|
|
Respect human agency and informed consent |
|
|
|
|
|
### 4. Transparency |
|
|
Make AI decisions explainable |
|
|
|
|
|
### 5. Accountability |
|
|
Maintain traceability and responsibility |
|
|
|
|
|
### 6. Privacy |
|
|
Protect sensitive data |
|
|
|
|
|
### 7. Security |
|
|
Ensure robustness against misuse |
|
|
|
|
|
### Query Validation |
|
|
Queries are checked against: |
|
|
- Fairness constraints |
|
|
- Privacy protections |
|
|
- Safety guidelines |
|
|
- Transparency requirements |
|
|
|
|
|
### Response Validation |
|
|
Generated responses are checked for: |
|
|
- Discriminatory language |
|
|
- Data leakage risks |
|
|
- Explanation quality |
|
|
- Manipulative content |
|
|
""") |
|
|
|
|
|
with gr.Accordion("🔧 Technical Details", open=False): |
|
|
gr.Markdown(""" |
|
|
## Implementation Details |
|
|
|
|
|
### Chain-of-Thought |
|
|
Prompts the model to break down reasoning into explicit steps, |
|
|
improving accuracy on complex tasks. |
|
|
|
|
|
### Tree-of-Thoughts |
|
|
Generates multiple reasoning branches, evaluates each, and |
|
|
prunes weak branches. More thorough than CoT. |
|
|
|
|
|
### Evaluation Heuristics |
|
|
- Relevance to query |
|
|
- Feasibility of solution |
|
|
- Logical consistency |
|
|
- Clarity of explanation |
|
|
|
|
|
### Search Strategies |
|
|
- **BFS**: Explores all branches at each level (breadth-first) |
|
|
- **DFS**: Dives deep into promising branches (depth-first) |
|
|
""") |
|
|
|
|
|
return demo |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
interface = create_interface() |
|
|
interface.launch( |
|
|
share=True, |
|
|
server_name="0.0.0.0", |
|
|
server_port=7860, |
|
|
show_error=True, |
|
|
debug=False |
|
|
) |
|
|
|