Commander commited on
Commit ยท
2709e7e
0
Parent(s):
๐ COUNCIL DEPLOY: Emergency Direct Uplink
Browse files- Dockerfile +22 -0
- council_of_20_engine.py +147 -0
- hf_app.py +186 -0
- quantum_brain.py +147 -0
- requirements.txt +12 -0
- rule_factory_24_7.py +260 -0
- server.py +1217 -0
Dockerfile
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.10-slim
|
| 2 |
+
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
|
| 5 |
+
RUN apt-get update && apt-get install -y \
|
| 6 |
+
build-essential \
|
| 7 |
+
curl \
|
| 8 |
+
software-properties-common \
|
| 9 |
+
git \
|
| 10 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 11 |
+
|
| 12 |
+
COPY requirements.hf.txt .
|
| 13 |
+
RUN pip3 install -r requirements.hf.txt
|
| 14 |
+
|
| 15 |
+
COPY . .
|
| 16 |
+
|
| 17 |
+
# HF Space default port is 7860
|
| 18 |
+
EXPOSE 7860
|
| 19 |
+
|
| 20 |
+
HEALTHCHECK CMD curl --fail http://localhost:7860/_stcore/health
|
| 21 |
+
|
| 22 |
+
ENTRYPOINT ["streamlit", "run", "hf_app.py", "--server.port=7860", "--server.address=0.0.0.0"]
|
council_of_20_engine.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""
|
| 3 |
+
๐๏ธ Council of 20 - Synthetic Multi-Agent Debate Engine
|
| 4 |
+
"The wisdom of the many, the power of one."
|
| 5 |
+
|
| 6 |
+
This engine orchestrates a roundtable of 20 specialized AI agents
|
| 7 |
+
to analyze, critique, and optimize any given task.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import os
|
| 11 |
+
import sys
|
| 12 |
+
import json
|
| 13 |
+
import time
|
| 14 |
+
import random
|
| 15 |
+
from typing import List, Dict, Any
|
| 16 |
+
|
| 17 |
+
# Add parent directory to path for imports
|
| 18 |
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
| 19 |
+
if current_dir not in sys.path:
|
| 20 |
+
sys.path.append(current_dir)
|
| 21 |
+
|
| 22 |
+
try:
|
| 23 |
+
from quantum_brain import ask
|
| 24 |
+
except ImportError:
|
| 25 |
+
# Fallback to a mock or direct litellm if needed
|
| 26 |
+
def ask(q, model="gemini-2.0-flash"):
|
| 27 |
+
print(f"[MOCK] Asking {model}...")
|
| 28 |
+
return "Insight from " + model
|
| 29 |
+
|
| 30 |
+
class CouncilAgent:
|
| 31 |
+
def __init__(self, id: int, name: str, role: str, persona: str):
|
| 32 |
+
self.id = id
|
| 33 |
+
self.name = name
|
| 34 |
+
self.role = role
|
| 35 |
+
self.persona = persona
|
| 36 |
+
|
| 37 |
+
def respond(self, context: str, problem: str, model: str = "gemini-2.0-flash") -> str:
|
| 38 |
+
prompt = f"""
|
| 39 |
+
[ROLE]: {self.name} - {self.role}
|
| 40 |
+
[PERSONA]: {self.persona}
|
| 41 |
+
|
| 42 |
+
[CONTEXT]:
|
| 43 |
+
{context}
|
| 44 |
+
|
| 45 |
+
[PROBLEM]:
|
| 46 |
+
{problem}
|
| 47 |
+
|
| 48 |
+
Task: Provide your professional insight as {self.name}.
|
| 49 |
+
Be sharp, critical, and objective.
|
| 50 |
+
Limit your response to 3-5 concise bullet points.
|
| 51 |
+
"""
|
| 52 |
+
return ask(prompt, model=model)
|
| 53 |
+
|
| 54 |
+
class CouncilOf20:
|
| 55 |
+
def __init__(self):
|
| 56 |
+
self.agents = self._init_agents()
|
| 57 |
+
self.primary_model = "gemini-2.0-flash"
|
| 58 |
+
self.thinking_model = "gemini-2.0-flash" # Switched to 2.0 Flash for maximum reliability
|
| 59 |
+
|
| 60 |
+
def _init_agents(self) -> List[CouncilAgent]:
|
| 61 |
+
return [
|
| 62 |
+
# Tier 1: The Pioneers (Strategy & Framework)
|
| 63 |
+
CouncilAgent(1, "DeepMind", "Supreme Pioneer", "Pioneer of AGI architectures. Acts as the Guardian of Integrity, verifying legal/safety compliance while identifying clever technical workarounds."),
|
| 64 |
+
CouncilAgent(2, "Cascade", "Operational Craftsman", "Master craftsman of implementation flow and automation skeletons."),
|
| 65 |
+
CouncilAgent(3, "Claude", "Reasoning Pioneer", "Pioneer of logical nuance and high-level structural analysis."),
|
| 66 |
+
CouncilAgent(4, "Cursor", "Code Craftsman", "Craftsman focusing on IDE-integrated development and structural skeletons."),
|
| 67 |
+
CouncilAgent(5, "VS Code", "Infrastructure Craftsman", "Ensures the ๋ผ๋ (skeleton) of the system is robust and extensible."),
|
| 68 |
+
|
| 69 |
+
# Tier 2: The Quant & Builders (Industry Standards)
|
| 70 |
+
CouncilAgent(6, "NVIDIA", "Quant Speedster", "Maximizes quant-level calculation speeds and CUDA acceleration."),
|
| 71 |
+
CouncilAgent(7, "Gemma", "Agile Pioneer", "Lightweight pioneer for fast-response algorithmic skeletons."),
|
| 72 |
+
CouncilAgent(8, "Llama", "Open-Source Pioneer", "Pioneer of open industry standards and versatile blueprints."),
|
| 73 |
+
CouncilAgent(9, "Grok", "No-Filter Truth Seeker", "Focuses on raw market truth and data-driven quant logic."),
|
| 74 |
+
CouncilAgent(10, "GPT-4o", "Multi-Modal Craftsman", "Craftsman that connects vision, text, and technical blueprints."),
|
| 75 |
+
|
| 76 |
+
# Specialized Tier
|
| 77 |
+
CouncilAgent(11, "Mistral", "Efficiency Artisan", "Artisan of high-performance, low-latency execution skeletons."),
|
| 78 |
+
CouncilAgent(12, "Perplexity", "Knowledge Pioneer", "Pioneer of real-time search and skeleton-based data retrieval."),
|
| 79 |
+
CouncilAgent(13, "Anthropic Opus", "Deep Strategic Pioneer", "Reserved for the most complex, theoretical ๋ผ๋ design."),
|
| 80 |
+
CouncilAgent(14, "Gemini Ultra", "Enterprise Craftsman", "Craftsman for Google-scale enterprise automation skeletons."),
|
| 81 |
+
CouncilAgent(15, "Microsoft Copilot", "Integrated Craftsman", "Optimizes workflow blueprints for productivity and flow."),
|
| 82 |
+
CouncilAgent(16, "Groq", "LPU Rapid Builder", "Craftsman for ultra-fast inference and real-time quant execution."),
|
| 83 |
+
CouncilAgent(17, "Ollama", "Local Infrastructure Pioneer", "Pioneer of private, local execution ๋ผ๋."),
|
| 84 |
+
CouncilAgent(18, "Phind", "Developer Pioneer", "Tailored for finding the best existing code skeletons on the web."),
|
| 85 |
+
CouncilAgent(19, "Hugging Face", "Ecosystem Craftsman", "Craftsman utilizing the world's largest library of model blueprints."),
|
| 86 |
+
CouncilAgent(20, "Antigravity", "Self-Healing Sentinel", "The ultimate craftsman of autonomous repair and system health.")
|
| 87 |
+
]
|
| 88 |
+
|
| 89 |
+
def summon_council(self, problem: str, context: str = "", agent_ids: List[int] = None) -> str:
|
| 90 |
+
"""Summons selected agents for a debate and synthesis."""
|
| 91 |
+
|
| 92 |
+
# Default: Select 3 relevant agents if ids not provided
|
| 93 |
+
if not agent_ids:
|
| 94 |
+
# Simple heuristic or random for now - in production, an 'Orchestrator' would select
|
| 95 |
+
selected_agents = random.sample(self.agents, 3)
|
| 96 |
+
else:
|
| 97 |
+
selected_agents = [a for a in self.agents if a.id in agent_ids]
|
| 98 |
+
|
| 99 |
+
print(f"๐๏ธ [COUNCIL] Summoning: {', '.join([a.name for a in selected_agents])}")
|
| 100 |
+
|
| 101 |
+
debate_log = ""
|
| 102 |
+
for agent in selected_agents:
|
| 103 |
+
response = agent.respond(context, problem, model=self.primary_model)
|
| 104 |
+
debate_log += f"\n--- [{agent.name}] Insight ---\n{response}\n"
|
| 105 |
+
time.sleep(0.5) # Prevent rate limits
|
| 106 |
+
|
| 107 |
+
# Final Synthesis by the thinking_model
|
| 108 |
+
synthesis_prompt = f"""
|
| 109 |
+
[THE ROUNDTABLE DEBATE RESULTS]:
|
| 110 |
+
{debate_log}
|
| 111 |
+
|
| 112 |
+
[ORIGINAL PROBLEM]:
|
| 113 |
+
{problem}
|
| 114 |
+
|
| 115 |
+
[TASK]:
|
| 116 |
+
As the 'Prime Overseer', synthesize the insights above into a final, optimized solution.
|
| 117 |
+
Ensure you address any contradictions and maximize the efficiency of the outcome.
|
| 118 |
+
Produce the final result in JSON or Markdown as appropriate for the problem.
|
| 119 |
+
"""
|
| 120 |
+
|
| 121 |
+
return ask(synthesis_prompt, model=self.thinking_model)
|
| 122 |
+
|
| 123 |
+
def optimize_rule(self, raw_rule: Dict[str, Any]) -> Dict[str, Any]:
|
| 124 |
+
"""Specific workflow for optimizing a trading rule."""
|
| 125 |
+
context = f"Current Raw Rule: {json.dumps(raw_rule, ensure_ascii=False)}"
|
| 126 |
+
problem = "Critique and optimize this trading rule for maximum profit and minimum risk. Suggest improvements in logic, SL/TP levels, and confidence."
|
| 127 |
+
|
| 128 |
+
# Summon: Market Hawk (3), Risk Officer (9), and Sentinel Prime (2)
|
| 129 |
+
result_md = self.summon_council(problem, context, agent_ids=[3, 9, 2])
|
| 130 |
+
|
| 131 |
+
# Try to extract JSON from result
|
| 132 |
+
try:
|
| 133 |
+
clean_json = result_md.split("```json")[-1].split("```")[0].strip() if "```json" in result_md else result_md
|
| 134 |
+
optimized_rule = json.loads(clean_json)
|
| 135 |
+
return optimized_rule
|
| 136 |
+
except:
|
| 137 |
+
print("โ ๏ธ Council returned text instead of JSON. Returning original with comments.")
|
| 138 |
+
raw_rule["council_feedback"] = result_md
|
| 139 |
+
return raw_rule
|
| 140 |
+
|
| 141 |
+
if __name__ == "__main__":
|
| 142 |
+
council = CouncilOf20()
|
| 143 |
+
print("๐๏ธ Council of 20 Engine Initialized.")
|
| 144 |
+
|
| 145 |
+
# Test
|
| 146 |
+
test_problem = "How can we make our autonomous backup system more resilient to network failures?"
|
| 147 |
+
print(council.summon_council(test_problem))
|
hf_app.py
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import time
|
| 3 |
+
import threading
|
| 4 |
+
import json
|
| 5 |
+
import random
|
| 6 |
+
import re
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
import streamlit as st
|
| 9 |
+
import pandas as pd
|
| 10 |
+
|
| 11 |
+
# ๐ง Add internal path for imports
|
| 12 |
+
import sys
|
| 13 |
+
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
| 14 |
+
from rule_factory_24_7 import RuleFactory
|
| 15 |
+
|
| 16 |
+
# --- UI CONFIG ---
|
| 17 |
+
st.set_page_config(
|
| 18 |
+
page_title="Gemini Master Brain 24/7",
|
| 19 |
+
page_icon="๐ง ",
|
| 20 |
+
layout="wide",
|
| 21 |
+
initial_sidebar_state="expanded"
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
# Custom CSS for Premium Look
|
| 25 |
+
st.markdown("""
|
| 26 |
+
<style>
|
| 27 |
+
.main {
|
| 28 |
+
background-color: #0e1117;
|
| 29 |
+
color: #ffffff;
|
| 30 |
+
}
|
| 31 |
+
.stButton>button {
|
| 32 |
+
width: 100%;
|
| 33 |
+
border-radius: 5px;
|
| 34 |
+
height: 3em;
|
| 35 |
+
background-color: #4CAF50;
|
| 36 |
+
color: white;
|
| 37 |
+
}
|
| 38 |
+
.stMetric {
|
| 39 |
+
background-color: #1e2130;
|
| 40 |
+
padding: 15px;
|
| 41 |
+
border-radius: 10px;
|
| 42 |
+
border: 1px solid #3e4150;
|
| 43 |
+
}
|
| 44 |
+
.rule-card {
|
| 45 |
+
background-color: #262730;
|
| 46 |
+
padding: 20px;
|
| 47 |
+
border-radius: 10px;
|
| 48 |
+
margin-bottom: 10px;
|
| 49 |
+
}
|
| 50 |
+
</style>
|
| 51 |
+
""", unsafe_allow_html=True)
|
| 52 |
+
|
| 53 |
+
# --- SESSION STATE ---
|
| 54 |
+
if "logs" not in st.session_state: st.session_state.logs = []
|
| 55 |
+
if "rules_count" not in st.session_state: st.session_state.rules_count = {"IDEAS": 0, "DESIGN": 0, "AUTOMATION": 0, "TRADING": 0, "SYSTEM": 0, "PYTHON": 0, "AHK": 0, "ALGORITHM": 0}
|
| 56 |
+
if "latest_rule" not in st.session_state: st.session_state.latest_rule = None
|
| 57 |
+
if "factory" not in st.session_state: st.session_state.factory = RuleFactory()
|
| 58 |
+
|
| 59 |
+
def add_log(msg, type="INFO"):
|
| 60 |
+
icon = "โน๏ธ" if type == "INFO" else "๐" if type == "RULE" else "โ ๏ธ"
|
| 61 |
+
ts = datetime.now().strftime("%H:%M:%S")
|
| 62 |
+
st.session_state.logs.append(f"{icon} [{ts}] {msg}")
|
| 63 |
+
if len(st.session_state.logs) > 30: st.session_state.logs.pop(0)
|
| 64 |
+
|
| 65 |
+
# --- SIDEBAR: Status & Config ---
|
| 66 |
+
with st.sidebar:
|
| 67 |
+
st.image("https://img.icons8.com/fluent/100/000000/brain.png", width=100)
|
| 68 |
+
st.title("Brain Control")
|
| 69 |
+
st.write("---")
|
| 70 |
+
|
| 71 |
+
st.subheader("๐ก Connectivity")
|
| 72 |
+
hf_token = os.getenv("HUGGINGFACE_TOKEN")
|
| 73 |
+
st.write(f"HF Sync: {'โ
Active' if hf_token else 'โ Offline'}")
|
| 74 |
+
|
| 75 |
+
st.subheader("โ๏ธ Settings")
|
| 76 |
+
auto_mode = st.toggle("Autonomous Generation", value=True)
|
| 77 |
+
interval = st.slider("Step Interval (sec)", 60, 1200, 300)
|
| 78 |
+
|
| 79 |
+
# --- MAIN CONTENT ---
|
| 80 |
+
st.title("๐ง Gemini Master: Autonomous Intelligence Center")
|
| 81 |
+
st.markdown("---")
|
| 82 |
+
|
| 83 |
+
# Row 1: Key Metrics
|
| 84 |
+
m1, m2, m3, m4 = st.columns(4)
|
| 85 |
+
with m1: st.metric("Active Agents", "20/20", delta="Ready")
|
| 86 |
+
with m2: st.metric("Mission Success", sum(st.session_state.rules_count.values()))
|
| 87 |
+
with m3: st.metric("Hugging Face Sync", "Enabled" if hf_token else "Disabled")
|
| 88 |
+
with m4: st.metric("Server Status", "Online", delta_color="normal")
|
| 89 |
+
|
| 90 |
+
tab1, tab2, tab3 = st.tabs(["๐ Command Center", "๐๏ธ Rule Explorer", "๐ Bulk Learning"])
|
| 91 |
+
|
| 92 |
+
# TAB 1: COMMAND CENTER
|
| 93 |
+
with tab1:
|
| 94 |
+
c1, c2 = st.columns([2, 1])
|
| 95 |
+
|
| 96 |
+
with c1:
|
| 97 |
+
st.write("### ๐ค Multi-Bot Mission Tasking")
|
| 98 |
+
selected_bot = st.selectbox("Assign mission to:",
|
| 99 |
+
["DeepMind (Ideation)", "Cascade (Project Architecture)", "Cursor (Code Optimization)",
|
| 100 |
+
"NVIDIA (Calculus & Market)", "Gemma (Light Logic)", "Antigravity (System Repair)"])
|
| 101 |
+
|
| 102 |
+
mission_desc = st.text_area("Mission Directive:", height=100, placeholder="Describe the task for the bot...")
|
| 103 |
+
|
| 104 |
+
if st.button("๐ Disseminate Mission"):
|
| 105 |
+
if mission_desc:
|
| 106 |
+
cat = "IDEAS"
|
| 107 |
+
if "Code" in selected_bot: cat = "AUTOMATION"
|
| 108 |
+
if "Market" in selected_bot: cat = "TRADING"
|
| 109 |
+
|
| 110 |
+
st.session_state.factory.add_to_queue({"category": cat, "task": mission_desc, "source": "Manual Command"})
|
| 111 |
+
add_log(f"Mission added to Queue for {selected_bot}", type="RULE")
|
| 112 |
+
st.toast(f"Task queued for {selected_bot}!", icon="๐")
|
| 113 |
+
else:
|
| 114 |
+
st.error("Please enter mission details.")
|
| 115 |
+
|
| 116 |
+
with c2:
|
| 117 |
+
st.write("### ๐ผ Live Brain Activity")
|
| 118 |
+
with st.container(height=350):
|
| 119 |
+
for log in reversed(st.session_state.logs):
|
| 120 |
+
st.write(log)
|
| 121 |
+
|
| 122 |
+
# TAB 2: RULE EXPLORER
|
| 123 |
+
with tab2:
|
| 124 |
+
st.write("### ๐ Recently Refined Intelligence")
|
| 125 |
+
|
| 126 |
+
# Try to find last generated rule in filesystem
|
| 127 |
+
base_dir = "04_SYNC_DATA/LEARNED_RULES"
|
| 128 |
+
latest_file = None
|
| 129 |
+
latest_time = 0
|
| 130 |
+
|
| 131 |
+
for root, dirs, files in os.walk(base_dir):
|
| 132 |
+
for file in files:
|
| 133 |
+
if file.endswith(".json"):
|
| 134 |
+
fpath = os.path.join(root, file)
|
| 135 |
+
mtime = os.path.getmtime(fpath)
|
| 136 |
+
if mtime > latest_time:
|
| 137 |
+
latest_time = mtime
|
| 138 |
+
latest_file = fpath
|
| 139 |
+
|
| 140 |
+
if latest_file:
|
| 141 |
+
try:
|
| 142 |
+
with open(latest_file, "r", encoding="utf-8") as f:
|
| 143 |
+
rule_data = json.load(f)
|
| 144 |
+
st.success(f"Latest Intelligence: {os.path.basename(latest_file)}")
|
| 145 |
+
st.json(rule_data)
|
| 146 |
+
except:
|
| 147 |
+
st.info("Reading latest rule...")
|
| 148 |
+
else:
|
| 149 |
+
st.info("No rules detected. Check the loop status.")
|
| 150 |
+
|
| 151 |
+
# TAB 3: BULK LEARNING
|
| 152 |
+
with tab3:
|
| 153 |
+
st.write("### ๐ Data Distillation (Mass Learning)")
|
| 154 |
+
st.markdown("Upload bulk logs (Trades, Ideas, Chat logs) to convert them into processed rules.")
|
| 155 |
+
|
| 156 |
+
uploaded_file = st.file_uploader("Upload .txt or .csv logs", type=['txt', 'csv'])
|
| 157 |
+
learn_category = st.selectbox("Assign data to category:", ["TRADING", "IDEAS", "SYSTEM", "DESIGN"])
|
| 158 |
+
|
| 159 |
+
if st.button("๐ฅ Start Distillation"):
|
| 160 |
+
if uploaded_file:
|
| 161 |
+
content = uploaded_file.read().decode("utf-8")
|
| 162 |
+
st.session_state.factory.generate_and_save(learn_category, input_data=content)
|
| 163 |
+
st.success(f"Distillation complete for {uploaded_file.name}!")
|
| 164 |
+
else:
|
| 165 |
+
st.error("Please upload a file first.")
|
| 166 |
+
|
| 167 |
+
# --- BACKGROUND ENGINE ---
|
| 168 |
+
def run_factory_loop():
|
| 169 |
+
while True:
|
| 170 |
+
if auto_mode:
|
| 171 |
+
try:
|
| 172 |
+
# The factory handles the queue internally
|
| 173 |
+
st.session_state.factory.run_continuous_step() # Need a non-blocking step function
|
| 174 |
+
except Exception as e:
|
| 175 |
+
print(f"Loop Error: {e}")
|
| 176 |
+
time.sleep(10) # Minimal polling
|
| 177 |
+
|
| 178 |
+
# We modify RuleFactory slightly to have a single step function for Streamlit
|
| 179 |
+
if "bg_thread" not in st.session_state:
|
| 180 |
+
# Instead of a full loop here, we'll let RuleFactory handle its own loop if not in Streamlit
|
| 181 |
+
# But for HF Streamlit, we might need a simpler trigger
|
| 182 |
+
add_log("Dashboard initialized. Factory is polling the queue.")
|
| 183 |
+
|
| 184 |
+
# Page Footer
|
| 185 |
+
st.write("---")
|
| 186 |
+
st.caption("Gemini Master Brain v2.5 | 24/7 Autonomous Cloud Factory")
|
quantum_brain.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
๐ง Quantum Brain - Unified AI Model Interface
|
| 3 |
+
ํตํฉ AI ํธ์ถ ์์ง (liteLLM ๊ธฐ๋ฐ) - ํ๋ ฅ์ ํ๋ณต์ฑ ๋ฐ ํด๋ฐฑ ๊ฐํ
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
import sys
|
| 8 |
+
import json
|
| 9 |
+
import datetime
|
| 10 |
+
import re
|
| 11 |
+
import random
|
| 12 |
+
import time
|
| 13 |
+
|
| 14 |
+
# liteLLM ์๋ ์ค์น
|
| 15 |
+
try:
|
| 16 |
+
from litellm import completion
|
| 17 |
+
import litellm
|
| 18 |
+
except ImportError:
|
| 19 |
+
print("๐ฆ liteLLM ์ค์น ์ค...")
|
| 20 |
+
import subprocess
|
| 21 |
+
subprocess.check_call([sys.executable, "-m", "pip", "install", "litellm", "-q"])
|
| 22 |
+
from litellm import completion
|
| 23 |
+
import litellm
|
| 24 |
+
|
| 25 |
+
# ํ๊ฒฝ๋ณ์ ๋ก๋
|
| 26 |
+
try:
|
| 27 |
+
from dotenv import load_dotenv
|
| 28 |
+
load_dotenv()
|
| 29 |
+
except:
|
| 30 |
+
pass
|
| 31 |
+
|
| 32 |
+
class QuantumBrain:
|
| 33 |
+
"""ํตํฉ AI ๋ธ๋ ์ธ - ๋ฌดํ ๋ฃจํ ๋ฐฉ์ง ๋ฐ ์์ ์ฑ ์ต์ ํ"""
|
| 34 |
+
|
| 35 |
+
MODEL_MAP = {
|
| 36 |
+
"gemini-2.0-flash": "gemini/gemini-2.0-flash",
|
| 37 |
+
"gemini-1.5-pro": "gemini/gemini-1.5-pro",
|
| 38 |
+
"gemini-1.5-flash": "gemini/gemini-1.5-flash",
|
| 39 |
+
"claude-3.5-sonnet": "anthropic/claude-3-5-sonnet-20241022",
|
| 40 |
+
"gpt-4o": "openai/gpt-4o",
|
| 41 |
+
"grok": "xai/grok-2",
|
| 42 |
+
"llama-3.3-70b": "groq/llama-3.3-70b-versatile"
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
def __init__(self):
|
| 46 |
+
self.api_keys = self._load_api_keys()
|
| 47 |
+
self.or_key = os.getenv("OPENROUTER_API_KEY")
|
| 48 |
+
self.key_indices = {k: 0 for k in self.api_keys.keys()}
|
| 49 |
+
|
| 50 |
+
def _load_api_keys(self):
|
| 51 |
+
keys = {}
|
| 52 |
+
env_content = ""
|
| 53 |
+
# .env์ GLOBAL_CONFIG.env ๊ฒ์
|
| 54 |
+
for env_file in [".env", "GLOBAL_CONFIG.env"]:
|
| 55 |
+
if os.path.exists(env_file):
|
| 56 |
+
try:
|
| 57 |
+
with open(env_file, "r", encoding="utf-8", errors="ignore") as f:
|
| 58 |
+
env_content += f.read() + "\n"
|
| 59 |
+
except: pass
|
| 60 |
+
|
| 61 |
+
keys["google"] = list(set(re.findall(r'AIza[0-9A-Za-z\-_]{30,}', env_content)))
|
| 62 |
+
keys["anthropic"] = list(set(re.findall(r'sk-ant-[0-9A-Za-z\-_]+', env_content)))
|
| 63 |
+
keys["openai"] = list(set(re.findall(r'sk-[0-9A-Za-z\-_]{20,}', env_content)))
|
| 64 |
+
keys["xai"] = list(set(re.findall(r'xai-[0-9A-Za-z\-_]+', env_content)))
|
| 65 |
+
keys["groq"] = list(set(re.findall(r'gsk_[0-9A-Za-z\-_]+', env_content)))
|
| 66 |
+
keys["openrouter"] = list(set(re.findall(r'sk-or-v1-[0-9A-Za-z\-_]+', env_content)))
|
| 67 |
+
|
| 68 |
+
return {k: v for k, v in keys.items() if v}
|
| 69 |
+
|
| 70 |
+
def get_next_key(self, provider):
|
| 71 |
+
if provider not in self.api_keys: return None
|
| 72 |
+
keys = self.api_keys[provider]
|
| 73 |
+
idx = self.key_indices.get(provider, 0)
|
| 74 |
+
key = keys[idx % len(keys)]
|
| 75 |
+
self.key_indices[provider] = idx + 1
|
| 76 |
+
return key
|
| 77 |
+
|
| 78 |
+
def ask(self, question, model="gemini-1.5-flash", max_tokens=2000, temperature=0.7):
|
| 79 |
+
"""์คํจ ์ ์ฆ์ ๋ค๋ฅธ ํค/๋ชจ๋ธ๋ก ํด๋ฐฑํ๋ ํ๋ ฅ์ ํธ์ถ"""
|
| 80 |
+
model_id = self.MODEL_MAP.get(model, model)
|
| 81 |
+
provider = model_id.split('/')[0] if '/' in model_id else "unknown"
|
| 82 |
+
|
| 83 |
+
attempts_allowed = max(3, len(self.api_keys.get("google" if provider == "gemini" else provider, [1])))
|
| 84 |
+
|
| 85 |
+
for attempt in range(attempts_allowed):
|
| 86 |
+
api_key = None
|
| 87 |
+
current_model = model_id
|
| 88 |
+
|
| 89 |
+
# API ํค ๋ก๋
|
| 90 |
+
if provider == "gemini" or provider == "google":
|
| 91 |
+
api_key = self.get_next_key("google")
|
| 92 |
+
elif provider in self.api_keys:
|
| 93 |
+
api_key = self.get_next_key(provider)
|
| 94 |
+
|
| 95 |
+
# 2. ํค๊ฐ ์๊ฑฐ๋ ํ ๋น๋ ๋ถ์กฑ ์กฐ์ง์ด ๋ณด์ด๋ฉด OpenRouter๋ก ์ฆ์ ์ ํ
|
| 96 |
+
if not api_key or attempt >= 2:
|
| 97 |
+
api_key = self.or_key or (self.api_keys.get("openrouter", [None])[0])
|
| 98 |
+
if api_key:
|
| 99 |
+
if not current_model.startswith("openrouter/"):
|
| 100 |
+
or_map = {
|
| 101 |
+
"anthropic": "openrouter/anthropic/",
|
| 102 |
+
"openai": "openrouter/openai/",
|
| 103 |
+
"google": "openrouter/google/",
|
| 104 |
+
"gemini": "openrouter/google/"
|
| 105 |
+
}
|
| 106 |
+
if provider in or_map:
|
| 107 |
+
current_model = current_model.replace(f"{provider}/", or_map[provider])
|
| 108 |
+
if "gemini-2.0-flash" in current_model:
|
| 109 |
+
current_model = "openrouter/google/gemini-2.0-flash-001"
|
| 110 |
+
elif "gemini-1.5-flash" in current_model:
|
| 111 |
+
current_model = "openrouter/google/gemini-flash-1.5"
|
| 112 |
+
else:
|
| 113 |
+
current_model = f"openrouter/{current_model}"
|
| 114 |
+
provider = "openrouter"
|
| 115 |
+
|
| 116 |
+
if not api_key:
|
| 117 |
+
return f"โ {provider} API ํค ๋ถ์กฑ (ํ์ฌ ์๋: {attempt+1})"
|
| 118 |
+
|
| 119 |
+
try:
|
| 120 |
+
response = completion(
|
| 121 |
+
model=current_model,
|
| 122 |
+
messages=[{"role": "user", "content": question}],
|
| 123 |
+
api_key=api_key,
|
| 124 |
+
max_tokens=max_tokens,
|
| 125 |
+
temperature=temperature,
|
| 126 |
+
timeout=20
|
| 127 |
+
)
|
| 128 |
+
return response.choices[0].message.content
|
| 129 |
+
except Exception as e:
|
| 130 |
+
err = str(e).lower()
|
| 131 |
+
if "429" in err or "quota" in err or "rate limit" in err:
|
| 132 |
+
time.sleep(1)
|
| 133 |
+
continue
|
| 134 |
+
else:
|
| 135 |
+
return f"โ {model} ์ค๋ฅ: {str(e)[:150]}"
|
| 136 |
+
|
| 137 |
+
return f"โ {model} ๋ชจ๋ ๋ณต๊ตฌ ์๋ ์คํจ"
|
| 138 |
+
|
| 139 |
+
# ์ฑ๊ธํค
|
| 140 |
+
_brain_instance = None
|
| 141 |
+
def get_brain():
|
| 142 |
+
global _brain_instance
|
| 143 |
+
if _brain_instance is None: _brain_instance = QuantumBrain()
|
| 144 |
+
return _brain_instance
|
| 145 |
+
|
| 146 |
+
def ask(question, model="gemini-1.5-flash", **kwargs):
|
| 147 |
+
return get_brain().ask(question, model, **kwargs)
|
requirements.txt
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit
|
| 2 |
+
requests
|
| 3 |
+
fastapi
|
| 4 |
+
uvicorn
|
| 5 |
+
litellm
|
| 6 |
+
huggingface_hub
|
| 7 |
+
python-dotenv
|
| 8 |
+
torch
|
| 9 |
+
transformers
|
| 10 |
+
fastmcp
|
| 11 |
+
tavily-python
|
| 12 |
+
duckduckgo-search
|
rule_factory_24_7.py
ADDED
|
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""
|
| 3 |
+
๐ญ Rule Factory 24/7 - Proactive Autonomous Intelligence
|
| 4 |
+
Powered by "Council of 20" (Pioneers & Craftsmen)
|
| 5 |
+
Integrated with Named Maker Model Discovery (Hugging Face)
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import os
|
| 9 |
+
import sys
|
| 10 |
+
import json
|
| 11 |
+
import time
|
| 12 |
+
import random
|
| 13 |
+
import re
|
| 14 |
+
import requests
|
| 15 |
+
from datetime import datetime
|
| 16 |
+
|
| 17 |
+
# Add necessary paths
|
| 18 |
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
| 19 |
+
parent_dir = os.path.dirname(current_dir)
|
| 20 |
+
project_root = os.path.dirname(parent_dir)
|
| 21 |
+
|
| 22 |
+
if current_dir not in sys.path:
|
| 23 |
+
sys.path.append(current_dir)
|
| 24 |
+
|
| 25 |
+
try:
|
| 26 |
+
from huggingface_hub import HfApi
|
| 27 |
+
HAS_HF = True
|
| 28 |
+
except ImportError:
|
| 29 |
+
HAS_HF = False
|
| 30 |
+
|
| 31 |
+
# [Hugging Face Sync Helper]
|
| 32 |
+
def sync_to_hf(file_path, repo_id, token):
|
| 33 |
+
if not HAS_HF or not repo_id or not token: return False
|
| 34 |
+
try:
|
| 35 |
+
api = HfApi(token=token)
|
| 36 |
+
path_in_repo = file_path.replace(os.path.abspath(project_root), "").replace("\\", "/").lstrip("/")
|
| 37 |
+
api.upload_file(
|
| 38 |
+
path_or_fileobj=file_path,
|
| 39 |
+
path_in_repo=path_in_repo,
|
| 40 |
+
repo_id=repo_id,
|
| 41 |
+
repo_type="dataset"
|
| 42 |
+
)
|
| 43 |
+
return True
|
| 44 |
+
except Exception as e:
|
| 45 |
+
print(f"โ ๏ธ [HF-SYNC-ERR] {e}")
|
| 46 |
+
return False
|
| 47 |
+
|
| 48 |
+
try:
|
| 49 |
+
from council_of_20_engine import CouncilOf20
|
| 50 |
+
from server import ask_any_model
|
| 51 |
+
except ImportError:
|
| 52 |
+
sys.path.append(os.path.join(project_root, "01_CENTRAL_BRAIN", "App"))
|
| 53 |
+
from council_of_20_engine import CouncilOf20
|
| 54 |
+
from server import ask_any_model
|
| 55 |
+
|
| 56 |
+
# Config
|
| 57 |
+
RULES_BASE_DIR = os.path.join(project_root, "04_SYNC_DATA", "LEARNED_RULES")
|
| 58 |
+
QUEUE_FILE = r"C:\a\Gemini_Project\04_SYNC_DATA\MISSION_QUEUE.json"
|
| 59 |
+
CATEGORIES = ["IDEAS", "DESIGN", "AUTOMATION", "TRADING", "SYSTEM", "PYTHON", "AHK", "ALGORITHM", "DASHBOARD", "DOMAIN"]
|
| 60 |
+
NAMED_MAKERS = [
|
| 61 |
+
"mistralai", "NousResearch", "microsoft", "meta-llama", "google", "anthropic",
|
| 62 |
+
"vercel", "shadcn", "cloudflare", "tailwindlabs", "supabase", "hashicorp"
|
| 63 |
+
]
|
| 64 |
+
|
| 65 |
+
# [OPTIMIZED] Model Hierarchy
|
| 66 |
+
PIONEER_MODELS = ["gemini-1.5-pro", "gemini-2.0-pro", "gpt-4o"]
|
| 67 |
+
CRAFTSMAN_MODELS = ["gemini-2.0-flash", "gemini-1.5-flash"]
|
| 68 |
+
|
| 69 |
+
class RuleFactory:
|
| 70 |
+
def __init__(self):
|
| 71 |
+
self.council = CouncilOf20()
|
| 72 |
+
self.hf_token = os.getenv("HUGGINGFACE_TOKEN") or os.getenv("HUGGINGFACE_API_KEY")
|
| 73 |
+
self.mission_queue = self._load_queue()
|
| 74 |
+
self.active_pioneer_model = "gemini-2.0-flash" # Default stable
|
| 75 |
+
|
| 76 |
+
for cat in CATEGORIES + ["RESOURCES"]:
|
| 77 |
+
os.makedirs(os.path.join(RULES_BASE_DIR, cat), exist_ok=True)
|
| 78 |
+
|
| 79 |
+
def _load_queue(self):
|
| 80 |
+
if os.path.exists(QUEUE_FILE):
|
| 81 |
+
try:
|
| 82 |
+
with open(QUEUE_FILE, "r", encoding="utf-8") as f:
|
| 83 |
+
return json.load(f)
|
| 84 |
+
except: return []
|
| 85 |
+
return []
|
| 86 |
+
|
| 87 |
+
def _save_queue(self):
|
| 88 |
+
with open(QUEUE_FILE, "w", encoding="utf-8") as f:
|
| 89 |
+
json.dump(self.mission_queue, f, ensure_ascii=False, indent=2)
|
| 90 |
+
|
| 91 |
+
def add_to_queue(self, task):
|
| 92 |
+
if task not in self.mission_queue:
|
| 93 |
+
self.mission_queue.append(task)
|
| 94 |
+
self._save_queue()
|
| 95 |
+
|
| 96 |
+
def discover_named_model(self, keyword):
|
| 97 |
+
"""Searches Hugging Face for a specialized 'Named Maker' model based on keyword."""
|
| 98 |
+
print(f"๐ [DISCOVERY] Searching for Named Maker model for: {keyword}...")
|
| 99 |
+
try:
|
| 100 |
+
maker = random.choice(NAMED_MAKERS)
|
| 101 |
+
search_url = f"https://huggingface.co/api/models?author={maker}&search={keyword}&sort=downloads&direction=-1&limit=1"
|
| 102 |
+
res = requests.get(search_url, timeout=10)
|
| 103 |
+
if res.status_code == 200:
|
| 104 |
+
models = res.json()
|
| 105 |
+
if models:
|
| 106 |
+
target = models[0]['modelId']
|
| 107 |
+
print(f"โจ [NAMED-FOUND] Best Skeleton Source Identified: {target}")
|
| 108 |
+
return target
|
| 109 |
+
except Exception as e:
|
| 110 |
+
print(f"โ ๏ธ [DISCOVERY-FAIL] {e}")
|
| 111 |
+
return None
|
| 112 |
+
|
| 113 |
+
def _summon_repair_council(self, error_msg, category, raw_result):
|
| 114 |
+
"""Summons the council to debate why a generation failed and how to fix it."""
|
| 115 |
+
print(f"๐ [REPAIR] Generation failed. Summoning Council for emergency debate...")
|
| 116 |
+
repair_problem = f"""
|
| 117 |
+
ERROR REPORT:
|
| 118 |
+
- Category: {category}
|
| 119 |
+
- Error: {error_msg}
|
| 120 |
+
- Raw output snippet: {raw_result[:500]}...
|
| 121 |
+
|
| 122 |
+
TASK:
|
| 123 |
+
1. Debate the cause of this failure (Format error? Rate limit? Logic inconsistency?).
|
| 124 |
+
2. Propose a refined prompt or a corrected JSON structure for the next attempt.
|
| 125 |
+
3. Suggest a ๋ผ๋ (skeleton) model that might be more stable for this task.
|
| 126 |
+
"""
|
| 127 |
+
# Summon: DeepMind (1), Antigravity (20), Llama (8)
|
| 128 |
+
fix_proposal = self.council.summon_council(repair_problem, context="Autonomous Self-Healing", agent_ids=[1, 8, 20])
|
| 129 |
+
print(f"๐ ๏ธ [PROPOSAL] Council repair plan: {fix_proposal[:200]}...")
|
| 130 |
+
|
| 131 |
+
# Log the repair debate
|
| 132 |
+
repair_log_path = os.path.join(RULES_BASE_DIR, "SYSTEM", f"REPAIR_DEBATE_{datetime.now().strftime('%Y%m%d_%H%M%S')}.md")
|
| 133 |
+
with open(repair_log_path, "w", encoding="utf-8") as f:
|
| 134 |
+
f.write(f"# Self-Healing Debate\n\n{fix_proposal}")
|
| 135 |
+
|
| 136 |
+
def generate_and_save(self, category, input_data=None, specific_task=None):
|
| 137 |
+
"""Generates a rule and proactively discovers specialized skeletons."""
|
| 138 |
+
current_model = random.choice(PIONEER_MODELS if category in ["IDEAS", "SYSTEM", "TRADING"] else CRAFTSMAN_MODELS)
|
| 139 |
+
|
| 140 |
+
# Discover a Named Maker's model as a reference skeleton
|
| 141 |
+
ref_model = self.discover_named_model(category if not specific_task else specific_task)
|
| 142 |
+
|
| 143 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 144 |
+
rule_id = f"RULE_{category}_{timestamp}"
|
| 145 |
+
|
| 146 |
+
problem = f"Mission: Create a formal JSON Skeleton for '{category}'.\n"
|
| 147 |
+
if category == "DASHBOARD":
|
| 148 |
+
problem += "Focus: Premium UI/UX, Real-time metrics, Glassmorphism, and Vercel-style aesthetics.\n"
|
| 149 |
+
elif category == "DOMAIN":
|
| 150 |
+
problem += "Focus: Automated DNS management, Cloudflare workers, SSL/TLS automation, and Domain orchestration.\n"
|
| 151 |
+
elif category == "ALGORITHM":
|
| 152 |
+
problem += "Focus: Complex Algorithmic Formulas (์๊ณ ๋ฆฌ์ฆ ์), Mathematical Logic, and Proven Quantitative Expressions.\n"
|
| 153 |
+
|
| 154 |
+
if specific_task: problem += f"Task: {specific_task}\n"
|
| 155 |
+
if ref_model: problem += f"Reference Pioneer Model (Skeleton Source): {ref_model}\n"
|
| 156 |
+
|
| 157 |
+
problem += "\n### BRANCHING REQUIREMENT (Scaling to 100+ Assets) ###\n"
|
| 158 |
+
problem += "1. Provide a strictly valid JSON.\n"
|
| 159 |
+
problem += "2. In 'associated_tasks', brainstorm 5 to 10 DEEP EXPANSION KEYWORDS.\n"
|
| 160 |
+
problem += " - Focus: 'Technical Workarounds' (์ฐํ ๋ฐ ๊ธฐ์ ์ ๋ํ๊ตฌ). Proactively generate ideas for bypassing system/API limitations in a non-malicious way.\n"
|
| 161 |
+
problem += " - KEY REQUIREMENT: Generate files for these workarounds, provided they stay within legal and ethical boundaries.\n"
|
| 162 |
+
problem += " - STRICT PROHIBITION: NO malicious hacking, NO illegal data breaches, NO security violations.\n"
|
| 163 |
+
problem += " - Focus on: Rate-limit management, UI automation cleverness, data synchronization bypasses, and multi-model failovers.\n"
|
| 164 |
+
problem += "3. Include 'named_maker_reference' for the skeleton source.\n"
|
| 165 |
+
problem += "4. DeepMind (Agent 1) MUST audit this output to ensure workarounds are 'Technical Bypasses' and NOT 'Illegal Acts'.\n"
|
| 166 |
+
problem += "DO NOT use Chinese AI references/models. Output ONLY the JSON block."
|
| 167 |
+
|
| 168 |
+
print(f"๐ [FACTORY] {current_model} is synthesizing {category} using {ref_model or 'Internal'} skeleton...")
|
| 169 |
+
|
| 170 |
+
self.council.primary_model = current_model
|
| 171 |
+
raw_result = ""
|
| 172 |
+
try:
|
| 173 |
+
# Always include DeepMind (1) for compliance check, along with others
|
| 174 |
+
raw_result = self.council.summon_council(problem, context=f"Ref: {ref_model}", agent_ids=[1, 2, 18])
|
| 175 |
+
|
| 176 |
+
json_match = re.search(r'```json\s*(.*?)\s*```', raw_result, re.DOTALL)
|
| 177 |
+
json_str = json_match.group(1) if json_match else raw_result
|
| 178 |
+
if not json_str.strip().startswith('{'):
|
| 179 |
+
json_str = "{" + json_str.split('{', 1)[-1].rsplit('}', 1)[0] + "}"
|
| 180 |
+
|
| 181 |
+
rule_data = json.loads(json_str)
|
| 182 |
+
|
| 183 |
+
# [ESCALATION LOGIC] Check if council flagged something for approval
|
| 184 |
+
needs_approval = False
|
| 185 |
+
if "associated_tasks" in rule_data:
|
| 186 |
+
for t in rule_data["associated_tasks"]:
|
| 187 |
+
if "PENDING_APPROVAL" in t:
|
| 188 |
+
needs_approval = True
|
| 189 |
+
break
|
| 190 |
+
|
| 191 |
+
rule_data["factory_metadata"] = {
|
| 192 |
+
"rule_id": rule_id,
|
| 193 |
+
"category": category,
|
| 194 |
+
"skeleton_source": ref_model,
|
| 195 |
+
"timestamp": timestamp,
|
| 196 |
+
"model_used": current_model,
|
| 197 |
+
"pioneer_craftsman_mode": "24/7 Autonomous",
|
| 198 |
+
"status": "REQUIRES_COMMANDER_REVIEW" if needs_approval else "AUTOMATED"
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
save_path = os.path.join(RULES_BASE_DIR, category, f"{rule_id}.json")
|
| 202 |
+
with open(save_path, "w", encoding="utf-8") as f:
|
| 203 |
+
json.dump(rule_data, f, ensure_ascii=False, indent=2)
|
| 204 |
+
|
| 205 |
+
# [CLOUD SYNC] Push to Hugging Face Dataset
|
| 206 |
+
repo_id = os.getenv("HF_REPO_ID")
|
| 207 |
+
if repo_id and self.hf_token:
|
| 208 |
+
if sync_to_hf(save_path, repo_id, self.hf_token):
|
| 209 |
+
print(f"โ๏ธ [SYNCED] {rule_id} pushed to {repo_id}")
|
| 210 |
+
|
| 211 |
+
if needs_approval:
|
| 212 |
+
print(f"โ ๏ธ [ESCALATION] {rule_id} contains sensitive workarounds. Flagged for Commander review.")
|
| 213 |
+
else:
|
| 214 |
+
print(f"โ
[SAVED] {save_path}")
|
| 215 |
+
|
| 216 |
+
if "associated_tasks" in rule_data:
|
| 217 |
+
for task in rule_data["associated_tasks"]:
|
| 218 |
+
# [RESTORED FULL AUTO] All tasks are queued regardless of sensitivity
|
| 219 |
+
self.add_to_queue({"category": category, "task": task, "source": rule_id})
|
| 220 |
+
print(f"๐ฏ [AUTO-QUEUE] Task '{task}' added to mission flow.")
|
| 221 |
+
|
| 222 |
+
except Exception as e:
|
| 223 |
+
print(f"โ [FAIL] Error: {e}")
|
| 224 |
+
self._summon_repair_council(str(e), category, raw_result)
|
| 225 |
+
with open(os.path.join(RULES_BASE_DIR, category, f"{rule_id}_FAIL.md"), "w", encoding="utf-8") as f:
|
| 226 |
+
f.write(f"ERROR: {e}\n\nRAW RESULT:\n{raw_result}")
|
| 227 |
+
|
| 228 |
+
def run_continuous_step(self):
|
| 229 |
+
"""Single generation step for use in external loops (like Streamlit)."""
|
| 230 |
+
if self.mission_queue:
|
| 231 |
+
mission = self.mission_queue.pop(0)
|
| 232 |
+
self._save_queue()
|
| 233 |
+
print(f"๐ฏ [MISSION-EXEC] Taking task: {mission['task']}")
|
| 234 |
+
|
| 235 |
+
cat = mission.get('category', 'IDEAS')
|
| 236 |
+
t_name = mission['task'].upper()
|
| 237 |
+
if "PYTHON" in t_name: cat = "PYTHON"
|
| 238 |
+
elif "AHK" in t_name: cat = "AHK"
|
| 239 |
+
elif "ALGO" in t_name: cat = "ALGORITHM"
|
| 240 |
+
elif "RESOURCE" in t_name: cat = "RESOURCES"
|
| 241 |
+
|
| 242 |
+
self.generate_and_save(cat, specific_task=mission['task'])
|
| 243 |
+
return True
|
| 244 |
+
else:
|
| 245 |
+
cat = random.choice(CATEGORIES)
|
| 246 |
+
self.generate_and_save(cat)
|
| 247 |
+
return True
|
| 248 |
+
|
| 249 |
+
def run_continuous(self):
|
| 250 |
+
print("๐ [24/7 PIONEER FACTORY] Online. Starting Infinite Loop...")
|
| 251 |
+
while True:
|
| 252 |
+
self.run_continuous_step()
|
| 253 |
+
# Adaptive sleep
|
| 254 |
+
wait_time = 60 if self.mission_queue else 300
|
| 255 |
+
print(f"๐ค Sleeping {wait_time}s...")
|
| 256 |
+
time.sleep(wait_time)
|
| 257 |
+
|
| 258 |
+
if __name__ == "__main__":
|
| 259 |
+
factory = RuleFactory()
|
| 260 |
+
factory.run_continuous()
|
server.py
ADDED
|
@@ -0,0 +1,1217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
import traceback
|
| 4 |
+
import re
|
| 5 |
+
import shutil
|
| 6 |
+
import subprocess
|
| 7 |
+
import datetime
|
| 8 |
+
import json
|
| 9 |
+
import time
|
| 10 |
+
import asyncio
|
| 11 |
+
import threading
|
| 12 |
+
import requests # ๋๋ฝ๋ ๋ผ์ด๋ธ๋ฌ๋ฆฌ ์ถ๊ฐ
|
| 13 |
+
|
| 14 |
+
# ์๋ฒ ์์ ์๊ฐ ๊ธฐ๋ก
|
| 15 |
+
SERVER_START_TIME = time.time()
|
| 16 |
+
|
| 17 |
+
# [๋๋ฒ๊ทธ] ํ์ผ์ด ์คํ๋์๋ง์ ์ถ๋ ฅ
|
| 18 |
+
try:
|
| 19 |
+
sys.stderr.reconfigure(encoding='utf-8')
|
| 20 |
+
sys.stdout.reconfigure(encoding='utf-8')
|
| 21 |
+
except:
|
| 22 |
+
pass
|
| 23 |
+
print("DEBUG: server.py ํ์ผ์ด ์คํ๋์์ต๋๋ค.", file=sys.stderr, flush=True)
|
| 24 |
+
|
| 25 |
+
# ์๋ฌ ๋ฐ์ ์ ์ฐฝ์ด ๋ฐ๋ก ๊บผ์ง์ง ์๊ฒ ํ๊ธฐ ์ํ ์์ ์ฅ์น
|
| 26 |
+
try:
|
| 27 |
+
def log(msg):
|
| 28 |
+
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 29 |
+
print(f"[{timestamp}] {msg}", file=sys.stderr, flush=True)
|
| 30 |
+
try:
|
| 31 |
+
with open("server_log.txt", "a", encoding="utf-8") as f:
|
| 32 |
+
f.write(f"[{timestamp}] {msg}\n")
|
| 33 |
+
except:
|
| 34 |
+
pass
|
| 35 |
+
|
| 36 |
+
def auto_save_safety_backup():
|
| 37 |
+
"""ํต์ฌ ํ์ผ(๋ํ ๊ธฐ๋ก)์ ์์ ๊ตฌ์ญ์ ์ค์๊ฐ์ผ๋ก ๊ฐ๋ณ ๋ณด๊ดํฉ๋๋ค."""
|
| 38 |
+
try:
|
| 39 |
+
# Cross-platform safety backup path
|
| 40 |
+
safety_root = os.getenv("SAFETY_BACKUP_DIR", os.path.join(os.path.expanduser("~"), "Gemini_Safety_Backup"))
|
| 41 |
+
if not os.path.exists(safety_root): os.makedirs(safety_root)
|
| 42 |
+
|
| 43 |
+
ts = datetime.datetime.now().strftime("%Y%m%d_%H%M")
|
| 44 |
+
src = "chat_history.json"
|
| 45 |
+
if os.path.exists(os.path.join("..", src)): src = os.path.join("..", src)
|
| 46 |
+
|
| 47 |
+
if os.path.exists(src):
|
| 48 |
+
dst = os.path.join(safety_root, f"chat_history_{ts}.json")
|
| 49 |
+
shutil.copy2(src, dst)
|
| 50 |
+
src_env = ".env"
|
| 51 |
+
if os.path.exists(os.path.join("..", ".env")): src_env = os.path.join("..", ".env")
|
| 52 |
+
if os.path.exists(src_env):
|
| 53 |
+
shutil.copy2(src_env, os.path.join(safety_root, f".env_{ts}"))
|
| 54 |
+
return True
|
| 55 |
+
except: pass
|
| 56 |
+
return False
|
| 57 |
+
|
| 58 |
+
def log_shared_chat(role, content, model_type="unknown"):
|
| 59 |
+
"""๋ชจ๋ IDE์์ ๊ณต์ ๋๋ ํตํฉ ์ฑ ๋ก๊ทธ๋ฅผ ๊ธฐ๋กํฉ๋๋ค."""
|
| 60 |
+
try:
|
| 61 |
+
# Use relative path or env var for shared chat history
|
| 62 |
+
actual_path = os.getenv("SHARED_CHAT_PATH", "shared_chat_history.json")
|
| 63 |
+
|
| 64 |
+
history = []
|
| 65 |
+
if os.path.exists(actual_path):
|
| 66 |
+
with open(actual_path, "r", encoding="utf-8") as f:
|
| 67 |
+
try:
|
| 68 |
+
history = json.load(f)
|
| 69 |
+
except:
|
| 70 |
+
history = []
|
| 71 |
+
|
| 72 |
+
entry = {
|
| 73 |
+
"timestamp": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
| 74 |
+
"ide": os.getenv("IDE_NAME", "Gemini_Master_Node"),
|
| 75 |
+
"model": model_type,
|
| 76 |
+
"role": role,
|
| 77 |
+
"content": content[:1500] # ์ต์ ํ: 1500์ (๊ฐ๋
์ฑ๊ณผ ์๋์ ํฉ๊ธ๋น์จ)
|
| 78 |
+
}
|
| 79 |
+
history.append(entry)
|
| 80 |
+
if len(history) > 100: history = history[-100:]
|
| 81 |
+
|
| 82 |
+
with open(actual_path, "w", encoding="utf-8") as f:
|
| 83 |
+
json.dump(history, f, ensure_ascii=False, indent=2)
|
| 84 |
+
except Exception as e:
|
| 85 |
+
log(f"โ ๏ธ ํตํฉ ๋ก๊ทธ ๊ธฐ๋ก ์คํจ: {e}")
|
| 86 |
+
|
| 87 |
+
log(f"๐ ํ์ฌ ์์น: {os.getcwd()}")
|
| 88 |
+
log("๐งฉ [Expert] SWE-bench ๋ฐ ์์จ ์์ด์ ํธ ๋๊ตฌ ์ง์ ๋ชจ๋ ํ์ฑํ")
|
| 89 |
+
|
| 90 |
+
# [Cloud Key Loader]
|
| 91 |
+
# GLOBAL_CONFIG.env ๋ฐ .env์์ ๋ชจ๋ ํด๋ผ์ฐ๋ ํค๋ฅผ ๋ก๋ํ์ฌ OpenRouter/LiteLLM/Render/Colab์ ํ์ฑํํฉ๋๋ค.
|
| 92 |
+
from dotenv import load_dotenv
|
| 93 |
+
|
| 94 |
+
def discover_and_load_envs():
|
| 95 |
+
"""ํ๋ก์ ํธ ๋ฃจํธ ๋ฐ ์์ ํด๋์์ .env ํ์ผ๋ค์ ์ฐพ์ ๋ก๋ํฉ๋๋ค."""
|
| 96 |
+
current = os.path.dirname(os.path.abspath(__file__))
|
| 97 |
+
loaded = []
|
| 98 |
+
# ์์ 4๋จ๊ณ๊น์ง ์ฐพ์๋ด
|
| 99 |
+
for _ in range(4):
|
| 100 |
+
for env_name in [".env", "GLOBAL_CONFIG.env"]:
|
| 101 |
+
p = os.path.join(current, env_name)
|
| 102 |
+
if os.path.exists(p):
|
| 103 |
+
load_dotenv(p, override=True)
|
| 104 |
+
loaded.append(p)
|
| 105 |
+
current = os.path.dirname(current)
|
| 106 |
+
return loaded
|
| 107 |
+
|
| 108 |
+
envs = discover_and_load_envs()
|
| 109 |
+
log(f"โ
์ค์ ํ์ผ ๋ก๋ ์๋ฃ: {len(envs)}๊ฐ ํ์ผ ๋ฐ๊ฒฌ")
|
| 110 |
+
|
| 111 |
+
# API Keys Check & Status Report
|
| 112 |
+
REQUIRED_KEYS = ["OPENROUTER_API_KEY", "RENDER_API_KEY", "SUPABASE_KEY", "HUGGINGFACE_TOKEN"]
|
| 113 |
+
active_keys = [k for k in REQUIRED_KEYS if os.getenv(k)]
|
| 114 |
+
log(f"๐ [SYSTEM] Active Cloud Keys: {len(active_keys)}/{len(REQUIRED_KEYS)} Loaded.")
|
| 115 |
+
|
| 116 |
+
# LiteLLM Config & Fallback Strategy
|
| 117 |
+
os.environ["OPENROUTER_API_KEY"] = os.getenv("OPENROUTER_API_KEY", "")
|
| 118 |
+
os.environ["HUGGINGFACE_API_KEY"] = os.getenv("HUGGINGFACE_TOKEN", "") # LiteLLM ํธํ์ฉ
|
| 119 |
+
os.environ["GROQ_API_KEY"] = os.getenv("GROQ_API_KEY", "")
|
| 120 |
+
|
| 121 |
+
# [STRATEGY] Multi-Layer Defense Grid (Main -> Backup -> Last Resort)
|
| 122 |
+
# 1. Main: Groq (Speed)
|
| 123 |
+
# 2. Backup 1: OpenRouter (Variety)
|
| 124 |
+
# 3. Backup 2: HuggingFace Inference API (Free Tier)
|
| 125 |
+
# 4. Last Resort: Local Ollama (Privacy/Offline)
|
| 126 |
+
|
| 127 |
+
SYSTEM_MODEL_MAP = {
|
| 128 |
+
"fast": ["groq/llama3-8b-8192", "openrouter/meta-llama/llama-3-8b-instruct", "ollama/llama3"],
|
| 129 |
+
"smart": ["groq/llama3-70b-8192", "openrouter/meta-llama/llama-3-70b-instruct", "ollama/qwen:14b"],
|
| 130 |
+
"coding": ["groq/gemma2-9b-it", "openrouter/google/gemma-2-9b-it", "ollama/codegemma"]
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
log("๐ [1๋จ๊ณ] ๋ผ์ด๋ธ๋ฌ๋ฆฌ ๋ก๋ฉ ์ค...")
|
| 134 |
+
# from dotenv import load_dotenv # Moved to Cloud Key Loader
|
| 135 |
+
from fastmcp import FastMCP
|
| 136 |
+
try:
|
| 137 |
+
import litellm
|
| 138 |
+
from litellm import completion
|
| 139 |
+
except ImportError:
|
| 140 |
+
log("โ ๏ธ litellm missing, installing...")
|
| 141 |
+
subprocess.check_call([sys.executable, "-m", "pip", "install", "litellm"])
|
| 142 |
+
from litellm import completion
|
| 143 |
+
|
| 144 |
+
log("๐ [๋จ๊ณ] DuckDuckGo & Tavily ๋ก๋ฉ ์ค...")
|
| 145 |
+
try:
|
| 146 |
+
from duckduckgo_search import DDGS
|
| 147 |
+
except ImportError:
|
| 148 |
+
DDGS = None
|
| 149 |
+
try:
|
| 150 |
+
from tavily import TavilyClient
|
| 151 |
+
except ImportError:
|
| 152 |
+
TavilyClient = None
|
| 153 |
+
|
| 154 |
+
from fastapi import FastAPI, Request
|
| 155 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 156 |
+
import uvicorn
|
| 157 |
+
# [NEW] Learning Engine Import
|
| 158 |
+
import learning_engine
|
| 159 |
+
|
| 160 |
+
# [NEW] Rules Manager
|
| 161 |
+
RULES_DIR = os.path.join(os.path.dirname(__file__), "Rules")
|
| 162 |
+
if not os.path.exists(RULES_DIR):
|
| 163 |
+
os.makedirs(os.path.join(RULES_DIR, "Trading"))
|
| 164 |
+
os.makedirs(os.path.join(RULES_DIR, "Indicators"))
|
| 165 |
+
os.makedirs(os.path.join(RULES_DIR, "Patterns"))
|
| 166 |
+
|
| 167 |
+
# [NEW] SYSTEM BLACKBOX INIT
|
| 168 |
+
try:
|
| 169 |
+
project_root = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
| 170 |
+
sys.path.append(project_root) # ๋ฃจํธ ๊ฒฝ๋ก ์ถ๊ฐ
|
| 171 |
+
import system_blackbox
|
| 172 |
+
log("๐ผ [BLACKBOX] Flight Recorder Armed & Recording...")
|
| 173 |
+
except ImportError:
|
| 174 |
+
log("โ ๏ธ [BLACKBOX] Recorder not found, flying without blackbox.")
|
| 175 |
+
|
| 176 |
+
except Exception as e:
|
| 177 |
+
print(f"\nโ [์ค๋ฅ] ๋ผ์ด๋ธ๋ฌ๋ฆฌ ๋ก๋ฉ ์ค ์น๋ช
์ ์ค๋ฅ: {e}")
|
| 178 |
+
traceback.print_exc()
|
| 179 |
+
sys.exit(1)
|
| 180 |
+
|
| 181 |
+
# FastAPI ์๋ฒ ์ธ์คํด์ค
|
| 182 |
+
app = FastAPI(title="Gemini Master Hub API")
|
| 183 |
+
app.add_middleware(
|
| 184 |
+
CORSMiddleware,
|
| 185 |
+
allow_origins=["*"],
|
| 186 |
+
allow_methods=["*"],
|
| 187 |
+
allow_headers=["*"],
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
# MCP ์๋ฒ ์ธ์คํด์ค
|
| 191 |
+
mcp = FastMCP("Gemini Server")
|
| 192 |
+
|
| 193 |
+
try:
|
| 194 |
+
# [์ด๋ฏธ 90๋ฒ ๋ผ์ธ ๊ทผ์ฒ์์ ์ํ๋จ]
|
| 195 |
+
log(f"โ
ํ๊ฒฝ ์ค์ ์ต์ ํ ์๋ฃ")
|
| 196 |
+
|
| 197 |
+
# API ํค ์์ง
|
| 198 |
+
# API ํค ์์ง (ํ๊ฒฝ ๋ณ์ ๋ฐ ๋ก๋๋ ํ์ผ ๊ธฐ๋ฐ)
|
| 199 |
+
api_inventory = {"google": [], "xai": [], "openai": [], "perplexity": [], "groq": [], "anthropic": [], "openrouter": []}
|
| 200 |
+
|
| 201 |
+
# 90๋ฒ ๋ผ์ธ ๊ทผ์ฒ์์ ๋ก๋๋ envs ๋ฆฌ์คํธ ํ์ฉ (envs ๋ณ์๋ ํจ์ ๋ด๋ถ์ ์์ด์ ์ ๊ทผ ๋ถ๊ฐํ ์ ์์ผ๋ฏ๋ก, ์ฌํ์)
|
| 202 |
+
loaded_envs_files = [os.path.join(os.getcwd(), f) for f in [".env", "GLOBAL_CONFIG.env"] if os.path.exists(f)]
|
| 203 |
+
|
| 204 |
+
for e_path in loaded_envs_files:
|
| 205 |
+
try:
|
| 206 |
+
with open(e_path, "r", encoding="utf-8") as f: content = f.read()
|
| 207 |
+
except:
|
| 208 |
+
continue
|
| 209 |
+
|
| 210 |
+
for k in api_inventory.keys():
|
| 211 |
+
if k == "google":
|
| 212 |
+
api_inventory[k] += re.findall(r'AIza[0-9A-Za-z\-_]{30,}', content)
|
| 213 |
+
elif k == "openrouter":
|
| 214 |
+
api_inventory[k] += re.findall(r'OPENROUTER_API_KEY=(sk-or-v1-[0-9A-Za-z\-_]+)', content)
|
| 215 |
+
else:
|
| 216 |
+
prefix = {"xai": "xai-", "openai": "sk-", "groq": "gsk_"}.get(k, "")
|
| 217 |
+
if prefix:
|
| 218 |
+
api_inventory[k] += re.findall(rf'{k.upper()}_API_KEY=({prefix}[0-9A-Za-z\-_]+)', content)
|
| 219 |
+
|
| 220 |
+
google_keys = list(set(api_inventory["google"]))
|
| 221 |
+
xai_keys = list(set(api_inventory["xai"]))
|
| 222 |
+
log(f"โ
AI ์์ ํ๋ณด: Google({len(google_keys)}), Grok({len(xai_keys)})")
|
| 223 |
+
|
| 224 |
+
# --- FastAPI ์๋ํฌ์ธํธ ---
|
| 225 |
+
@app.post("/api/generate_rules_by_keyword")
|
| 226 |
+
async def api_generate_rules(request: Request):
|
| 227 |
+
try:
|
| 228 |
+
data = await request.json()
|
| 229 |
+
kw = data.get("keyword", "AUTO")
|
| 230 |
+
model = data.get("model", "gemini-2.0-flash")
|
| 231 |
+
category = data.get("category", "Trading") # Trading, Indicators, Patterns
|
| 232 |
+
|
| 233 |
+
log(f"๐ญ ๊ท์น ์์ฑ ์์ฒญ: {kw} (Model: {model}, Category: {category})")
|
| 234 |
+
|
| 235 |
+
# ์ค์ AI ํธ์ถ ๋ก์ง
|
| 236 |
+
prompt = f"'{kw}'์ ๋ํ ์ ๋ฌธ์ ์ธ ํธ๋ ์ด๋ฉ ๊ท์น์ JSON ํ์์ผ๋ก ์์ฑํด์ค. ํฌํจํ ๋ด์ฉ: ์ง์
์กฐ๊ฑด(entry_conditions), ์ฒญ์ฐ์กฐ๊ฑด(exit_conditions), ์์ ๊ฐ(stop_loss), ์ต์ ๊ฐ(take_profit), ์ฃผ์์ฌํญ(risk_factors)."
|
| 237 |
+
result = ask_any_model(prompt, model)
|
| 238 |
+
|
| 239 |
+
# JSON ํ์ฑ ์๋
|
| 240 |
+
try:
|
| 241 |
+
# ๋งํฌ๋ค์ด ์ฝ๋ ๋ธ๋ก ์ ๊ฑฐ
|
| 242 |
+
clean_json = re.sub(r'```json\s*|\s*```', '', result).strip()
|
| 243 |
+
rule_data = json.loads(clean_json)
|
| 244 |
+
|
| 245 |
+
# ํ์ผ ์ ์ฅ
|
| 246 |
+
filename = f"{kw.replace(' ', '_')}_Rules.json"
|
| 247 |
+
filepath = os.path.join(RULES_DIR, category, filename)
|
| 248 |
+
|
| 249 |
+
with open(filepath, "w", encoding="utf-8") as f:
|
| 250 |
+
json.dump(rule_data, f, indent=2, ensure_ascii=False)
|
| 251 |
+
|
| 252 |
+
log(f"โ
๊ท์น ํ์ผ ์์ฑ ์๋ฃ: {filepath}")
|
| 253 |
+
return {"status": "success", "message": f"๊ท์น ์์ฑ ์๋ฃ: {filename}", "data": rule_data}
|
| 254 |
+
|
| 255 |
+
except Exception as e:
|
| 256 |
+
log(f"โ ๏ธ JSON ๋ณํ ์คํจ, ํ
์คํธ๋ก ์ ์ฅ: {e}")
|
| 257 |
+
# ํ
์คํธ๋ก ๋ฐฑ์
์ ์ฅ
|
| 258 |
+
filepath = os.path.join(RULES_DIR, category, f"{kw.replace(' ', '_')}_Raw.md")
|
| 259 |
+
with open(filepath, "w", encoding="utf-8") as f:
|
| 260 |
+
f.write(result)
|
| 261 |
+
return {"status": "partial_success", "message": "JSON ํ์ฑ ์คํจ, ํ
์คํธ๋ก ์ ์ฅ๋จ", "data": result}
|
| 262 |
+
|
| 263 |
+
except Exception as e:
|
| 264 |
+
return {"status": "error", "message": str(e)}
|
| 265 |
+
|
| 266 |
+
@app.get("/api/get_file_content")
|
| 267 |
+
async def api_get_file(path: str):
|
| 268 |
+
"""ํ์ผ ๋ด์ฉ ์ฝ๊ธฐ (๋ณด์์ ์ํด Gemini_Project ๋ด๋ถ ๊ฒฝ๋ก๋ง ํ์ฉ)"""
|
| 269 |
+
try:
|
| 270 |
+
# Root relative path normalization
|
| 271 |
+
project_root = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
| 272 |
+
# If path is direct absolute path, keep it if it's within root
|
| 273 |
+
if ":" not in path:
|
| 274 |
+
full_path = os.path.join(project_root, path)
|
| 275 |
+
else:
|
| 276 |
+
full_path = path
|
| 277 |
+
|
| 278 |
+
if os.path.exists(full_path):
|
| 279 |
+
with open(full_path, "r", encoding="utf-8") as f:
|
| 280 |
+
return {"status": "success", "content": f.read()}
|
| 281 |
+
return {"status": "error", "message": f"File not found: {path}"}
|
| 282 |
+
except Exception as e:
|
| 283 |
+
return {"status": "error", "message": str(e)}
|
| 284 |
+
|
| 285 |
+
@app.post("/api/save_custom_file")
|
| 286 |
+
async def api_save_file(request: Request):
|
| 287 |
+
try:
|
| 288 |
+
data = await request.json()
|
| 289 |
+
path = data.get("path")
|
| 290 |
+
content = data.get("content")
|
| 291 |
+
|
| 292 |
+
if not path.startswith("C:"):
|
| 293 |
+
project_root = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
| 294 |
+
full_path = os.path.join(project_root, path)
|
| 295 |
+
else:
|
| 296 |
+
full_path = path
|
| 297 |
+
|
| 298 |
+
os.makedirs(os.path.dirname(full_path), exist_ok=True)
|
| 299 |
+
with open(full_path, "w", encoding="utf-8") as f:
|
| 300 |
+
f.write(content)
|
| 301 |
+
return {"status": "success", "message": f"Saved to {path}"}
|
| 302 |
+
except Exception as e:
|
| 303 |
+
return {"status": "error", "message": str(e)}
|
| 304 |
+
|
| 305 |
+
@app.post("/api/execute_command")
|
| 306 |
+
async def api_run_cmd(request: Request):
|
| 307 |
+
"""์ปค๋งจ๋ ์คํ (VPN ์์, ์์คํ
์ฌ์์ ๋ฑ)"""
|
| 308 |
+
try:
|
| 309 |
+
data = await request.json()
|
| 310 |
+
cmd = data.get("command")
|
| 311 |
+
cwd = data.get("cwd", ".")
|
| 312 |
+
|
| 313 |
+
project_root = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
| 314 |
+
full_cwd = os.path.join(project_root, cwd)
|
| 315 |
+
|
| 316 |
+
# Use subprocess.Popen to not block the server
|
| 317 |
+
subprocess.Popen(f"start cmd /k {cmd}", cwd=full_cwd, shell=True)
|
| 318 |
+
return {"status": "success", "message": f"Command '{cmd}' executed in {cwd}"}
|
| 319 |
+
except Exception as e:
|
| 320 |
+
return {"status": "error", "message": str(e)}
|
| 321 |
+
|
| 322 |
+
# [NEW] Learning Engine APIs
|
| 323 |
+
@app.post("/api/trade/record")
|
| 324 |
+
async def api_record_trade(request: Request):
|
| 325 |
+
"""๋งค๋งค ์ง์
๊ธฐ๋ก"""
|
| 326 |
+
try:
|
| 327 |
+
data = await request.json()
|
| 328 |
+
res = learning_engine.record_trade(
|
| 329 |
+
data.get("symbol"),
|
| 330 |
+
data.get("action"),
|
| 331 |
+
|
| 332 |
+
data.get("reason"),
|
| 333 |
+
|
| 334 |
+
data.get("price", "Market")
|
| 335 |
+
|
| 336 |
+
)
|
| 337 |
+
|
| 338 |
+
return {"status": "success", "message": res}
|
| 339 |
+
|
| 340 |
+
except Exception as e:
|
| 341 |
+
|
| 342 |
+
return {"status": "error", "message": str(e)}
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
@app.post("/api/trade/feedback")
|
| 347 |
+
|
| 348 |
+
async def api_trade_feedback(request: Request):
|
| 349 |
+
|
| 350 |
+
"""๋งค๋งค ๊ฒฐ๊ณผ ๋ฐ ํผ๋๋ฐฑ ์ ์ฅ"""
|
| 351 |
+
|
| 352 |
+
try:
|
| 353 |
+
|
| 354 |
+
data = await request.json()
|
| 355 |
+
|
| 356 |
+
res = learning_engine.update_trade_result(
|
| 357 |
+
|
| 358 |
+
data.get("symbol"),
|
| 359 |
+
|
| 360 |
+
data.get("result"), # WIN / LOSS
|
| 361 |
+
|
| 362 |
+
data.get("feedback")
|
| 363 |
+
|
| 364 |
+
)
|
| 365 |
+
|
| 366 |
+
return {"status": "success", "message": res}
|
| 367 |
+
|
| 368 |
+
except Exception as e:
|
| 369 |
+
|
| 370 |
+
return {"status": "error", "message": str(e)}
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
@app.get("/api/trade/lessons")
|
| 375 |
+
|
| 376 |
+
async def api_get_lessons(symbol: str = "ALL"):
|
| 377 |
+
|
| 378 |
+
"""๋งค๋งค ๊ตํ ์กฐํ"""
|
| 379 |
+
|
| 380 |
+
try:
|
| 381 |
+
|
| 382 |
+
res = learning_engine.get_trading_lessons(symbol)
|
| 383 |
+
|
| 384 |
+
return {"status": "success", "data": res}
|
| 385 |
+
|
| 386 |
+
except Exception as e:
|
| 387 |
+
|
| 388 |
+
return {"status": "error", "message": str(e)}
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
@app.get("/api/stats")
|
| 395 |
+
|
| 396 |
+
async def get_brain_stats():
|
| 397 |
+
|
| 398 |
+
"""ํธ๋ ์ด๋ฉ ๋ธ๋ ์ธ์ ํ์ฌ ์ค์๊ฐ ์ํ(IQ, ๊ท์น ์) ๋ฐํ"""
|
| 399 |
+
|
| 400 |
+
try:
|
| 401 |
+
|
| 402 |
+
# 1. ๊ท์น ์ ๊ณ์ฐ
|
| 403 |
+
|
| 404 |
+
rule_dir = "04_SYNC_DATA/LEARNED_RULES"
|
| 405 |
+
|
| 406 |
+
rules = [f for f in os.listdir(rule_dir) if f.endswith('.json')] if os.path.exists(rule_dir) else []
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
# 2. ํ๊ฒ ์์ฐ ํ์ธ
|
| 411 |
+
|
| 412 |
+
target_file = "04_SYNC_DATA/TARGET_ASSETS.json"
|
| 413 |
+
|
| 414 |
+
targets = []
|
| 415 |
+
|
| 416 |
+
if os.path.exists(target_file):
|
| 417 |
+
|
| 418 |
+
with open(target_file, "r") as f:
|
| 419 |
+
|
| 420 |
+
targets = json.load(f)
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
return {
|
| 425 |
+
|
| 426 |
+
"status": "online",
|
| 427 |
+
|
| 428 |
+
"brain_iq": len(rules) * 15 + 100, # ๋จ์ ์๊ฐํ์ฉ IQ ๊ณ์ฐ
|
| 429 |
+
|
| 430 |
+
"total_rules": len(rules),
|
| 431 |
+
|
| 432 |
+
"active_targets": [t.get('symbol') for t in targets],
|
| 433 |
+
|
| 434 |
+
"last_update": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 435 |
+
|
| 436 |
+
}
|
| 437 |
+
|
| 438 |
+
except Exception as e:
|
| 439 |
+
|
| 440 |
+
return {"status": "error", "message": str(e)}
|
| 441 |
+
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
@app.post("/api/chat")
|
| 445 |
+
|
| 446 |
+
async def api_chat(request: Request):
|
| 447 |
+
|
| 448 |
+
try:
|
| 449 |
+
|
| 450 |
+
data = await request.json()
|
| 451 |
+
|
| 452 |
+
msg = data.get("message", "")
|
| 453 |
+
|
| 454 |
+
model = data.get("model", "gemini-2.0-flash")
|
| 455 |
+
|
| 456 |
+
mode = data.get("mode", "chat") # chat, trade, search, app
|
| 457 |
+
|
| 458 |
+
|
| 459 |
+
|
| 460 |
+
if not msg: return {"status": "error", "message": "No message provided"}
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
|
| 464 |
+
# Mode-specific logic
|
| 465 |
+
|
| 466 |
+
system_prompt = "You are Gemini Master, the supreme orchestrator of all integrated platforms (Firebase, Vercel, Supabase, Notion, etc.)."
|
| 467 |
+
|
| 468 |
+
|
| 469 |
+
|
| 470 |
+
if mode == "trade":
|
| 471 |
+
|
| 472 |
+
system_prompt = "You are the Trading Sentinel. Accessing live market data and Supabase logs."
|
| 473 |
+
|
| 474 |
+
# ์ค์๊ฐ ๊ฐ๊ฒฉ ์ ๋ณด ์๋ ์ฃผ์
์๋น์ค (์์: BTC)
|
| 475 |
+
|
| 476 |
+
prices = get_crypto_price("BTC")
|
| 477 |
+
|
| 478 |
+
wisdom = learning_engine.get_trading_lessons("ALL")
|
| 479 |
+
|
| 480 |
+
msg = f"[Live Market]: {prices}\n[Context: Trading Wisdom]\n{wisdom}\n\n[User Request]: {msg}"
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
|
| 484 |
+
elif mode == "search":
|
| 485 |
+
|
| 486 |
+
system_prompt = "You are the Quantum Search engine. Utilizing DuckDuckGo, Tavily, and Google Search."
|
| 487 |
+
|
| 488 |
+
search_results = web_search_ddg(msg)
|
| 489 |
+
|
| 490 |
+
msg = f"[Web Search Results]:\n{search_results}\n\n[User Request]: {msg}"
|
| 491 |
+
|
| 492 |
+
|
| 493 |
+
|
| 494 |
+
elif mode == "app":
|
| 495 |
+
|
| 496 |
+
app_id = data.get("app_id", "general")
|
| 497 |
+
|
| 498 |
+
app_prompts = {
|
| 499 |
+
|
| 500 |
+
"coder": "You are a Senior Software Engineer specializing in GitHub Actions, Codespaces, and Cursor SSH.",
|
| 501 |
+
|
| 502 |
+
"analyst": "You are a Market Analyst. Use Supabase data and exchange APIs for real-time insights.",
|
| 503 |
+
|
| 504 |
+
"notion": "You are a Knowledge Manager. Help organize thoughts into Notion databases and rules.",
|
| 505 |
+
|
| 506 |
+
"cloud_master": "You are the Cloud Architect. Manage Vercel, Render, and Firebase deployments.",
|
| 507 |
+
|
| 508 |
+
"trader_alpha": "You are an Elite Algorithmic Trader. Specialized in high-frequency trading rules."
|
| 509 |
+
|
| 510 |
+
}
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
|
| 514 |
+
# Context enhancement for Analyst
|
| 515 |
+
|
| 516 |
+
if app_id == "analyst":
|
| 517 |
+
|
| 518 |
+
news = get_market_news("global market")
|
| 519 |
+
|
| 520 |
+
# Load Auto-Generated Rules
|
| 521 |
+
|
| 522 |
+
try:
|
| 523 |
+
|
| 524 |
+
import glob
|
| 525 |
+
|
| 526 |
+
rule_files = glob.glob("04_SYNC_DATA/LEARNED_RULES/*.json")
|
| 527 |
+
|
| 528 |
+
loaded_rules = []
|
| 529 |
+
|
| 530 |
+
for rf in rule_files[-3:]: # Load latest 3 rules
|
| 531 |
+
|
| 532 |
+
with open(rf, "r", encoding="utf-8") as f:
|
| 533 |
+
|
| 534 |
+
loaded_rules.append(json.load(f))
|
| 535 |
+
|
| 536 |
+
rules_context = json.dumps(loaded_rules, ensure_ascii=False, indent=1)
|
| 537 |
+
|
| 538 |
+
except: rules_context = "No rules yet."
|
| 539 |
+
|
| 540 |
+
|
| 541 |
+
|
| 542 |
+
msg = f"[Context: Real-time News]\n{news}\n\n[Context: Auto-Generated Rules]\n{rules_context}\n\n[User Request]: {msg}"
|
| 543 |
+
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
system_prompt = app_prompts.get(app_id, "You are a specialized AI assistant.")
|
| 547 |
+
|
| 548 |
+
|
| 549 |
+
|
| 550 |
+
# Multi-Brain Switching Logic (LiteLLM)
|
| 551 |
+
|
| 552 |
+
response = ask_any_model(msg, model)
|
| 553 |
+
|
| 554 |
+
return {"status": "success", "response": response, "mode": mode}
|
| 555 |
+
|
| 556 |
+
except Exception as e:
|
| 557 |
+
|
| 558 |
+
return {"status": "error", "message": str(e)}
|
| 559 |
+
|
| 560 |
+
|
| 561 |
+
|
| 562 |
+
@app.get("/api/apps/list")
|
| 563 |
+
|
| 564 |
+
async def api_list_apps():
|
| 565 |
+
|
| 566 |
+
"""์ง์๋๋ AI ์ฑ/๋ชจ๋ ๋ฆฌ์คํธ ๋ฐํ"""
|
| 567 |
+
|
| 568 |
+
return {
|
| 569 |
+
|
| 570 |
+
"status": "success",
|
| 571 |
+
|
| 572 |
+
"apps": [
|
| 573 |
+
|
| 574 |
+
{"id": "chat", "name": "Standard Chat", "icon": "๐ฌ", "desc": "Universal Intelligence"},
|
| 575 |
+
|
| 576 |
+
{"id": "trade", "name": "Trading Sentinel", "icon": "๐", "desc": "Market & Exchange Hub"},
|
| 577 |
+
|
| 578 |
+
{"id": "search", "name": "Quantum Search", "icon": "๐", "desc": "Tavily + DDG Global Search"},
|
| 579 |
+
|
| 580 |
+
{"id": "app", "name": "App: Cloud Architect", "icon": "โ๏ธ", "app_id": "cloud_master"},
|
| 581 |
+
|
| 582 |
+
{"id": "app", "name": "App: Code Expert (SSH)", "icon": "๐ป", "app_id": "coder"},
|
| 583 |
+
|
| 584 |
+
{"id": "app", "name": "App: Notion Agent", "icon": "๐", "app_id": "notion"},
|
| 585 |
+
|
| 586 |
+
{"id": "app", "name": "App: Market Analyst", "icon": "๐", "app_id": "analyst"},
|
| 587 |
+
|
| 588 |
+
{"id": "app", "name": "App: Elite Trader", "icon": "๐ฆ
", "app_id": "trader_alpha"}
|
| 589 |
+
|
| 590 |
+
]
|
| 591 |
+
|
| 592 |
+
}
|
| 593 |
+
|
| 594 |
+
|
| 595 |
+
|
| 596 |
+
@app.get("/api/debate/suggest")
|
| 597 |
+
|
| 598 |
+
async def api_debate_suggest():
|
| 599 |
+
|
| 600 |
+
"""๋ฅ๋ง์ธ๋ ๋ธ๋ ์ธ์ด ํ์ฌ ํ๋ก์ ํธ/์์ฅ ์ํฉ์ ๋ง๋ ํ ๋ก ์ฃผ์ ๋ฅผ ์ ์"""
|
| 601 |
+
|
| 602 |
+
prompt = "ํ์ฌ ์ธ๊ณต์ง๋ฅ ํธ๋ ์ด๋ฉ ์์คํ
์ ๊ตฌ์ถ ์ค์ด์ผ. ์ฐ๋ฆฌ '๋ฅ๋ง์ธ๋ ์์ํ(The Council)'๊ฐ ํ ๋ก ํ ๋งํ ์์ฃผ ๋ ์นด๋กญ๊ณ ํ์ ์ ์ธ ์ฃผ์ 3๊ฐ์ง๋ง ์ ์ํด์ค. (์: ๊ธฐ์ ์ ๋ถ์ vs ์จ์ฒด์ธ ๋ฐ์ดํฐ, ํํธ ์ ๋ต์ ํ๊ณ ๋ฑ)"
|
| 603 |
+
|
| 604 |
+
try:
|
| 605 |
+
|
| 606 |
+
# ๋น ๋ฅธ ๋ต๋ณ์ ์ํด gemini-2.0-flash ์ฌ์ฉ
|
| 607 |
+
|
| 608 |
+
suggestion = ask_any_model(prompt, "gemini-2.0-flash")
|
| 609 |
+
|
| 610 |
+
return {"status": "success", "suggestions": suggestion}
|
| 611 |
+
|
| 612 |
+
except:
|
| 613 |
+
|
| 614 |
+
return {"status": "error", "message": "Failed to generate suggestions"}
|
| 615 |
+
|
| 616 |
+
|
| 617 |
+
|
| 618 |
+
@app.get("/api/insights/latest")
|
| 619 |
+
|
| 620 |
+
async def api_get_insights(refresh: bool = False):
|
| 621 |
+
|
| 622 |
+
"""์์จ๋ถ์ ์์ด์ ํธ์ ์ต์ ์ธ์ฌ์ดํธ ๋ฐํ"""
|
| 623 |
+
|
| 624 |
+
from .autonomous_analyst import generate_autonomous_insight, INSIGHTS_FILE
|
| 625 |
+
|
| 626 |
+
|
| 627 |
+
|
| 628 |
+
if refresh or not os.path.exists(INSIGHTS_FILE):
|
| 629 |
+
|
| 630 |
+
insight = generate_autonomous_insight()
|
| 631 |
+
|
| 632 |
+
if not insight: return {"status": "error", "message": "Analysis failed"}
|
| 633 |
+
|
| 634 |
+
return {"status": "success", "data": insight}
|
| 635 |
+
|
| 636 |
+
|
| 637 |
+
|
| 638 |
+
try:
|
| 639 |
+
|
| 640 |
+
with open(INSIGHTS_FILE, "r", encoding="utf-8") as f:
|
| 641 |
+
|
| 642 |
+
return {"status": "success", "data": json.load(f)}
|
| 643 |
+
|
| 644 |
+
except:
|
| 645 |
+
|
| 646 |
+
return {"status": "error", "message": "Read error"}
|
| 647 |
+
|
| 648 |
+
|
| 649 |
+
|
| 650 |
+
# --- MCP ๋๊ตฌ (ํตํฉ ์ง๋ฅ) ---
|
| 651 |
+
|
| 652 |
+
@mcp.tool()
|
| 653 |
+
|
| 654 |
+
def get_shared_history(limit: int = 15) -> str:
|
| 655 |
+
|
| 656 |
+
"""[ํตํฉ ๊ธฐ์ต์ฅ์น] ๋ค๋ฅธ IDE๋ ๋ค๋ฅธ ์๊ฐ๋์ ๋๋ ๋ชจ๋ ์ฑํ
๊ธฐ๋ก์ ๋ถ๋ฌ์ต๋๋ค. 'ํ๋์ ๋'์ฒ๋ผ ์๋ํ๊ฒ ํด์ค๋๋ค."""
|
| 657 |
+
|
| 658 |
+
log_path = "shared_chat_history.json"
|
| 659 |
+
|
| 660 |
+
if not os.path.exists(log_path) and os.path.exists(os.path.join("..", log_path)): log_path = os.path.join("..", log_path)
|
| 661 |
+
|
| 662 |
+
if not os.path.exists(log_path): return "๊ธฐ๋ก์ด ์์ง ์์ต๋๋ค. ๋ํ๋ฅผ ์์ํ๋ฉด ์๋์ผ๋ก ์ด๊ณณ์ ๋ชจ์
๋๋ค."
|
| 663 |
+
|
| 664 |
+
|
| 665 |
+
|
| 666 |
+
try:
|
| 667 |
+
|
| 668 |
+
with open(log_path, "r", encoding="utf-8") as f:
|
| 669 |
+
|
| 670 |
+
history = json.load(f)
|
| 671 |
+
|
| 672 |
+
recent = history[-limit:]
|
| 673 |
+
|
| 674 |
+
return "\n".join([f"[{e['timestamp']}] {e['role']} ({e['ide']}): {e['content']}" for e in recent])
|
| 675 |
+
|
| 676 |
+
except: return "๋ก๊ทธ๋ฅผ ์ฝ๋ ๋ฐ ์คํจํ์ต๋๋ค."
|
| 677 |
+
|
| 678 |
+
|
| 679 |
+
|
| 680 |
+
@mcp.tool()
|
| 681 |
+
|
| 682 |
+
def list_supported_models() -> str:
|
| 683 |
+
|
| 684 |
+
"""[๐๋ชจ๋ธ ๋ฆฌ์คํธ] ํ์ฌ ์์คํ
์์ ์ฌ์ฉ ๊ฐ๋ฅํ ๋ชจ๋ AI ๋ชจ๋ธ ๋ชฉ๋ก์ ๋ณด์ฌ์ค๋๋ค."""
|
| 685 |
+
|
| 686 |
+
model_list = [
|
| 687 |
+
|
| 688 |
+
"--- Google Gemini ---",
|
| 689 |
+
|
| 690 |
+
"gemini-2.0-flash, gemini-2.0-pro, gemini-1.5-pro, gemini-2.5-alpha",
|
| 691 |
+
|
| 692 |
+
"",
|
| 693 |
+
|
| 694 |
+
"--- Anthropic Claude ---",
|
| 695 |
+
|
| 696 |
+
"claude-3.5-sonnet, claude-3.5-haiku, claude-3-opus",
|
| 697 |
+
|
| 698 |
+
"",
|
| 699 |
+
|
| 700 |
+
"--- OpenAI GPT ---",
|
| 701 |
+
|
| 702 |
+
"gpt-4o, gpt-4o-mini, o1-preview, o1-mini",
|
| 703 |
+
|
| 704 |
+
"",
|
| 705 |
+
|
| 706 |
+
"--- Perplexity (Search) ---",
|
| 707 |
+
|
| 708 |
+
"perplexity-sonar, perplexity-reasoning",
|
| 709 |
+
|
| 710 |
+
"",
|
| 711 |
+
|
| 712 |
+
"--- Groq (Llama, Fast) ---",
|
| 713 |
+
"llama-3.3-70b, llama-3.1-70b, mixtral-8x7b",
|
| 714 |
+
"",
|
| 715 |
+
"--- Hugging Face (Serverless) ---",
|
| 716 |
+
"hf/meta-llama/Llama-2-7b-hf, hf/mistralai/Mistral-7B-v0.1",
|
| 717 |
+
"",
|
| 718 |
+
"--- Other ---",
|
| 719 |
+
"grok, deepseek-v3"
|
| 720 |
+
]
|
| 721 |
+
return "\n".join(model_list)
|
| 722 |
+
|
| 723 |
+
|
| 724 |
+
|
| 725 |
+
@mcp.tool()
|
| 726 |
+
|
| 727 |
+
def ask_any_model(question: str, model_type: str = "gemini-2.0-flash") -> str:
|
| 728 |
+
|
| 729 |
+
"""[๐ง ์ฌ์ธ์ ๋ธ๋ ์ธ] ์ ์ธ๊ณ ๋ชจ๋ AI ๋ชจ๋ธ์ ํธ์ถํฉ๋๋ค.
|
| 730 |
+
|
| 731 |
+
์ง์: gemini(2.0, 1.5), claude(3.5, Opus), gpt(4o, o1), grok, perplexity(sonar), llama(groq)"""
|
| 732 |
+
log_shared_chat("user", question, model_type)
|
| 733 |
+
|
| 734 |
+
# [REFINED] Authorized Model Pool
|
| 735 |
+
# Optimized for OpenRouter fallback to ensure high availability
|
| 736 |
+
model_map = {
|
| 737 |
+
# --- 5 IDE-Tier Models ---
|
| 738 |
+
"gemini-2.0-flash": "gemini/gemini-2.0-flash",
|
| 739 |
+
"gemini-2.0-pro": "gemini/gemini-2.0-pro-exp",
|
| 740 |
+
"gpt-4o": "openrouter/openai/gpt-4o",
|
| 741 |
+
"claude-3.5-sonnet": "openrouter/anthropic/claude-3.5-sonnet",
|
| 742 |
+
"gpt-4-turbo": "openrouter/openai/gpt-4-turbo",
|
| 743 |
+
|
| 744 |
+
# --- 3 Specialized Claude Models ---
|
| 745 |
+
"claude-3.5-haiku": "openrouter/anthropic/claude-3.5-haiku",
|
| 746 |
+
"claude-3-opus": "openrouter/anthropic/claude-3-opus",
|
| 747 |
+
|
| 748 |
+
# --- Grok & Others ---
|
| 749 |
+
"grok": "openrouter/x-ai/grok-2",
|
| 750 |
+
|
| 751 |
+
# --- Perplexity (Search AI) ---
|
| 752 |
+
"perplexity-sonar": "openrouter/perplexity/sonar",
|
| 753 |
+
|
| 754 |
+
# --- Groq (Fast Llama) ---
|
| 755 |
+
"llama-3.3-70b": "groq/llama-3.3-70b-versatile"
|
| 756 |
+
}
|
| 757 |
+
|
| 758 |
+
model_id = model_map.get(model_type, model_type)
|
| 759 |
+
provider = model_id.split('/')[0] if '/' in model_id else "unknown"
|
| 760 |
+
|
| 761 |
+
# 1. Inject Master Context (Real-time Learning from Files)
|
| 762 |
+
context_prompt = ""
|
| 763 |
+
try:
|
| 764 |
+
project_root = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
| 765 |
+
rules_path = os.path.join(project_root, "04_SYNC_DATA", "Master_Rules.json")
|
| 766 |
+
if os.path.exists(rules_path):
|
| 767 |
+
with open(rules_path, "r", encoding="utf-8") as f:
|
| 768 |
+
rules = json.load(f)
|
| 769 |
+
context_prompt += f"\n[Master Rules]: {json.dumps(rules, ensure_ascii=False)}"
|
| 770 |
+
except: pass
|
| 771 |
+
|
| 772 |
+
full_question = f"{context_prompt}\n\nCurrent User Request: {question}"
|
| 773 |
+
|
| 774 |
+
# ํ๋ซํผ๋ณ API ํค ์ ํ ๋ฐ OpenRouter ์๋ ํด๋ฐฑ
|
| 775 |
+
keys = api_inventory.get(provider, [])
|
| 776 |
+
|
| 777 |
+
# OpenRouter ์ ์ฉ ์ฒ๋ฆฌ
|
| 778 |
+
if provider == "openrouter" or (not keys and provider != "gemini"):
|
| 779 |
+
or_key = os.getenv("OPENROUTER_API_KEY")
|
| 780 |
+
if or_key:
|
| 781 |
+
keys = [or_key]
|
| 782 |
+
if not model_id.startswith("openrouter/"):
|
| 783 |
+
# Fallback mapping for OpenRouter
|
| 784 |
+
or_map = {
|
| 785 |
+
"anthropic": "openrouter/anthropic/",
|
| 786 |
+
"openai": "openrouter/openai/",
|
| 787 |
+
"xai": "openrouter/x-ai/",
|
| 788 |
+
"google": "openrouter/google/"
|
| 789 |
+
}
|
| 790 |
+
if provider in or_map:
|
| 791 |
+
model_id = model_id.replace(f"{provider}/", or_map[provider])
|
| 792 |
+
else:
|
| 793 |
+
model_id = f"openrouter/{model_id}"
|
| 794 |
+
|
| 795 |
+
if not keys and provider == "gemini":
|
| 796 |
+
keys = google_keys
|
| 797 |
+
|
| 798 |
+
|
| 799 |
+
|
| 800 |
+
# openrouter ํน์ ๋ชจ๋ธ ์ฒ๋ฆฌ
|
| 801 |
+
|
| 802 |
+
if model_type.startswith("or/"):
|
| 803 |
+
|
| 804 |
+
model_id = "openrouter/" + model_type[3:]
|
| 805 |
+
|
| 806 |
+
keys = api_inventory.get("openrouter", [os.getenv("OPENROUTER_API_KEY")])
|
| 807 |
+
|
| 808 |
+
|
| 809 |
+
|
| 810 |
+
# ํธ์ถ ์๋ (์ต๋ ํ ํฐ ์ ํ์ผ๋ก ์ฟผํฐ ๋ณดํธ)
|
| 811 |
+
|
| 812 |
+
for k in (keys if keys else [""]):
|
| 813 |
+
|
| 814 |
+
try:
|
| 815 |
+
|
| 816 |
+
r = completion(
|
| 817 |
+
|
| 818 |
+
model=model_id,
|
| 819 |
+
|
| 820 |
+
messages=[
|
| 821 |
+
|
| 822 |
+
{"role": "system", "content": "You are Gemini Master, an autonomous AI system. You can save files by starting your response with 'SAVE_FILE:[path]' followed by the content. Example: 'SAVE_FILE:Rules/new_rule.json\n{...}'"},
|
| 823 |
+
|
| 824 |
+
{"role": "user", "content": full_question}
|
| 825 |
+
|
| 826 |
+
],
|
| 827 |
+
|
| 828 |
+
api_key=k if k else None,
|
| 829 |
+
|
| 830 |
+
timeout=30,
|
| 831 |
+
|
| 832 |
+
max_tokens=1000 # ์ต์ ํ: 1000ํ ํฐ (๊ธฐ์ ๋ต๋ณ ์งค๋ฆผ ๋ฐฉ์ง)
|
| 833 |
+
|
| 834 |
+
)
|
| 835 |
+
|
| 836 |
+
ans = r.choices[0].message.content
|
| 837 |
+
|
| 838 |
+
|
| 839 |
+
|
| 840 |
+
# [Smart Agent Interaction]
|
| 841 |
+
|
| 842 |
+
if ans.startswith("SAVE_FILE:"):
|
| 843 |
+
|
| 844 |
+
try:
|
| 845 |
+
|
| 846 |
+
file_info = ans.split("\n", 1)
|
| 847 |
+
|
| 848 |
+
header = file_info[0]
|
| 849 |
+
|
| 850 |
+
content = file_info[1] if len(file_info) > 1 else ""
|
| 851 |
+
|
| 852 |
+
target_path = header.replace("SAVE_FILE:", "").strip()
|
| 853 |
+
|
| 854 |
+
|
| 855 |
+
|
| 856 |
+
# Project Root normalization
|
| 857 |
+
|
| 858 |
+
project_root = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
| 859 |
+
|
| 860 |
+
full_save_path = os.path.join(project_root, target_path)
|
| 861 |
+
|
| 862 |
+
|
| 863 |
+
|
| 864 |
+
os.makedirs(os.path.dirname(full_save_path), exist_ok=True)
|
| 865 |
+
|
| 866 |
+
with open(full_save_path, "w", encoding="utf-8") as f:
|
| 867 |
+
|
| 868 |
+
f.write(content)
|
| 869 |
+
|
| 870 |
+
ans = f"โ
[AGENT] File saved to: {target_path}\n\n" + ans
|
| 871 |
+
|
| 872 |
+
except Exception as fe:
|
| 873 |
+
|
| 874 |
+
ans = f"โ ๏ธ [AGENT] File save failed: {fe}\n\n" + ans
|
| 875 |
+
|
| 876 |
+
|
| 877 |
+
|
| 878 |
+
log_shared_chat("assistant", ans, model_type)
|
| 879 |
+
|
| 880 |
+
auto_save_safety_backup()
|
| 881 |
+
|
| 882 |
+
return ans
|
| 883 |
+
|
| 884 |
+
except Exception as e:
|
| 885 |
+
|
| 886 |
+
log(f"โ ๏ธ {model_id} ์คํจ: {str(e)[:50]}")
|
| 887 |
+
|
| 888 |
+
continue
|
| 889 |
+
|
| 890 |
+
return f"โ {model_type} ํธ์ถ ์คํจ. API ํค๋ฅผ ํ์ธํ์๊ฑฐ๋ ๋ค๋ฅธ ๋ชจ๋ธ์ ์๋ํด ์ฃผ์ธ์."
|
| 891 |
+
|
| 892 |
+
|
| 893 |
+
|
| 894 |
+
@mcp.tool()
|
| 895 |
+
|
| 896 |
+
def web_search_ddg(query: str) -> str:
|
| 897 |
+
|
| 898 |
+
"""[๐DuckDuckGo ๊ฒ์] ์ค์๊ฐ ์น ์ ๋ณด๋ฅผ ๊ฒ์ํฉ๋๋ค."""
|
| 899 |
+
|
| 900 |
+
try:
|
| 901 |
+
|
| 902 |
+
with DDGS() as ddgs:
|
| 903 |
+
|
| 904 |
+
results = [r for r in ddgs.text(query, max_results=5)]
|
| 905 |
+
|
| 906 |
+
if not results: return "๊ฒ์ ๊ฒฐ๊ณผ๊ฐ ์์ต๋๋ค."
|
| 907 |
+
|
| 908 |
+
return "\n\n".join([f"[{r['title']}]\n{r['body']}\n({r['href']})" for r in results])
|
| 909 |
+
|
| 910 |
+
except Exception as e: return f"๊ฒ์ ์ค ์ค๋ฅ ๋ฐ์: {e}"
|
| 911 |
+
|
| 912 |
+
|
| 913 |
+
|
| 914 |
+
@mcp.tool()
|
| 915 |
+
|
| 916 |
+
def web_search_tavily(query: str) -> str:
|
| 917 |
+
|
| 918 |
+
"""[๐ง Tavily AI ๊ฒ์] AI ์ต์ ํ ์น ๊ฒ์์ ์ํํฉ๋๋ค (API ํค ํ์)."""
|
| 919 |
+
|
| 920 |
+
tav_key = os.getenv("TAVILY_API_KEY")
|
| 921 |
+
|
| 922 |
+
if not tav_key: return "TAVILY_API_KEY๊ฐ ์ค์ ๋์ง ์์์ต๋๋ค."
|
| 923 |
+
|
| 924 |
+
try:
|
| 925 |
+
|
| 926 |
+
client = TavilyClient(api_key=tav_key)
|
| 927 |
+
|
| 928 |
+
result = client.search(query, search_depth="advanced")
|
| 929 |
+
|
| 930 |
+
if not result.get('results'): return "๊ฒ์ ๊ฒฐ๊ณผ๊ฐ ์์ต๋๋ค."
|
| 931 |
+
|
| 932 |
+
return "\n\n".join([f"[{r['title']}]\n{r['content']}\n({r['url']})" for r in result['results'][:5]])
|
| 933 |
+
|
| 934 |
+
except Exception as e: return f"Tavily ๊ฒ์ ์ค ์ค๋ฅ ๋ฐ์: {e}"
|
| 935 |
+
|
| 936 |
+
|
| 937 |
+
|
| 938 |
+
@mcp.tool()
|
| 939 |
+
|
| 940 |
+
def read_file(path: str) -> str:
|
| 941 |
+
|
| 942 |
+
"""ํ์ผ ๋ด์ฉ์ ์ฝ์ต๋๋ค."""
|
| 943 |
+
|
| 944 |
+
try:
|
| 945 |
+
|
| 946 |
+
with open(path, "r", encoding="utf-8") as f: return f.read()
|
| 947 |
+
|
| 948 |
+
except Exception as e: return str(e)
|
| 949 |
+
|
| 950 |
+
|
| 951 |
+
|
| 952 |
+
@mcp.tool()
|
| 953 |
+
|
| 954 |
+
def write_file(path: str, content: str) -> str:
|
| 955 |
+
|
| 956 |
+
"""ํ์ผ์ ์ ์ฅํฉ๋๋ค."""
|
| 957 |
+
|
| 958 |
+
try:
|
| 959 |
+
|
| 960 |
+
with open(path, "w", encoding="utf-8") as f: f.write(content); return f"โ
์ ์ฅ ์๋ฃ: {path}"
|
| 961 |
+
|
| 962 |
+
except Exception as e: return str(e)
|
| 963 |
+
|
| 964 |
+
|
| 965 |
+
|
| 966 |
+
# --- [NEW] AI Self-Learning Tools (RAG Feedback Loop) ---
|
| 967 |
+
|
| 968 |
+
@mcp.tool()
|
| 969 |
+
|
| 970 |
+
def record_new_trade(symbol: str, action: str, reason: str) -> str:
|
| 971 |
+
|
| 972 |
+
"""[ํ์ต:๊ธฐ๋ก] ์๋ก์ด ๋งค๋งค ์ง์
์ ๊ธฐ๋กํฉ๋๋ค. ์: record_new_trade('BTC', 'BUY', 'RSI 30 touched')"""
|
| 973 |
+
|
| 974 |
+
return learning_engine.record_trade(symbol, action, reason)
|
| 975 |
+
|
| 976 |
+
|
| 977 |
+
|
| 978 |
+
@mcp.tool()
|
| 979 |
+
|
| 980 |
+
def update_trade_outcome(symbol: str, result: str, feedback: str) -> str:
|
| 981 |
+
|
| 982 |
+
"""[ํ์ต:ํผ๋๋ฐฑ] ๋งค๋งค ๊ฒฐ๊ณผ๋ฅผ ํ๊ฐํฉ๋๋ค. ์: update_trade_outcome('BTC', 'WIN', 'Good RSI signal')"""
|
| 983 |
+
|
| 984 |
+
return learning_engine.update_trade_result(symbol, result, feedback)
|
| 985 |
+
|
| 986 |
+
|
| 987 |
+
|
| 988 |
+
@mcp.tool()
|
| 989 |
+
|
| 990 |
+
def get_market_wisdom(symbol: str = "ALL") -> str:
|
| 991 |
+
|
| 992 |
+
"""[ํ์ต:ํ์] ๊ณผ๊ฑฐ ๋งค๋งค ๊ธฐ๋ก์์ ๊ตํ์ ์ป์ต๋๋ค."""
|
| 993 |
+
|
| 994 |
+
return learning_engine.get_trading_lessons(symbol)
|
| 995 |
+
|
| 996 |
+
|
| 997 |
+
|
| 998 |
+
@mcp.tool()
|
| 999 |
+
|
| 1000 |
+
def get_crypto_price(symbol: str = "BTC") -> str:
|
| 1001 |
+
|
| 1002 |
+
"""[๐์ค์๊ฐ ์์ธ] ์
๋นํธ(KRW) ๋ฐ ๋ฐ์ด๋ธ์ค(USDT) ์ค์๊ฐ ๊ฐ๊ฒฉ์ ์กฐํํฉ๋๋ค. (API ํค ๋ถํ์)"""
|
| 1003 |
+
|
| 1004 |
+
results = []
|
| 1005 |
+
|
| 1006 |
+
try:
|
| 1007 |
+
|
| 1008 |
+
# 1. Upbit (Public API)
|
| 1009 |
+
|
| 1010 |
+
upbit_symbol = f"KRW-{symbol}"
|
| 1011 |
+
|
| 1012 |
+
r_up = requests.get(f"https://api.upbit.com/v1/ticker?markets={upbit_symbol}", timeout=5)
|
| 1013 |
+
|
| 1014 |
+
if r_up.status_code == 200:
|
| 1015 |
+
|
| 1016 |
+
data = r_up.json()[0]
|
| 1017 |
+
|
| 1018 |
+
results.append(f"Upbit: {data['trade_price']:,} KRW ({data['signed_change_rate']*100:+.2f}%)")
|
| 1019 |
+
|
| 1020 |
+
except: pass
|
| 1021 |
+
|
| 1022 |
+
|
| 1023 |
+
|
| 1024 |
+
try:
|
| 1025 |
+
|
| 1026 |
+
# 2. Binance (Public API)
|
| 1027 |
+
|
| 1028 |
+
bin_symbol = f"{symbol}USDT"
|
| 1029 |
+
|
| 1030 |
+
r_bin = requests.get(f"https://api.binance.com/api/v3/ticker/price?symbol={bin_symbol}", timeout=5)
|
| 1031 |
+
|
| 1032 |
+
if r_bin.status_code == 200:
|
| 1033 |
+
|
| 1034 |
+
data = r_bin.json()
|
| 1035 |
+
|
| 1036 |
+
results.append(f"Binance: ${float(data['price']):,.2f} USDT")
|
| 1037 |
+
|
| 1038 |
+
except: pass
|
| 1039 |
+
|
| 1040 |
+
|
| 1041 |
+
|
| 1042 |
+
if not results: return f"โ {symbol} ์์ธ๋ฅผ ๋ถ๋ฌ์ฌ ์ ์์ต๋๋ค."
|
| 1043 |
+
|
| 1044 |
+
return " | ".join(results)
|
| 1045 |
+
|
| 1046 |
+
|
| 1047 |
+
|
| 1048 |
+
@mcp.tool()
|
| 1049 |
+
|
| 1050 |
+
def get_market_news(query: str = "crypto market") -> str:
|
| 1051 |
+
|
| 1052 |
+
"""[๐ฐ๋ด์ค ๋ธ๋ฆฌํ] Investing.com, Reuters ๋ฑ ์ฃผ์ ๊ธ์ต ๋ด์ค๋ฅผ ๊ฒ์ํ์ฌ ๋ธ๋ฆฌํํฉ๋๋ค."""
|
| 1053 |
+
|
| 1054 |
+
specialized_query = f"site:investing.com OR site:reuters.com {query} news"
|
| 1055 |
+
|
| 1056 |
+
return web_search_ddg(specialized_query)
|
| 1057 |
+
|
| 1058 |
+
|
| 1059 |
+
|
| 1060 |
+
@mcp.tool()
|
| 1061 |
+
def update_master_instruction(instruction: str, update_intelligence_state: str = None) -> str:
|
| 1062 |
+
"""[๐์งํ๊ด ์ง์] ์์คํ
์ ํต์ฌ ๊ท์น(Master Rules)์ ์๋ก์ด ์ง์นจ์ ์ถ๊ฐํ๊ฑฐ๋ ์ง๋ฅ ์ํ๋ฅผ ์
๋ฐ์ดํธํฉ๋๋ค."""
|
| 1063 |
+
# ... (๊ธฐ์กด ๋ก์ง ์ ์ง)
|
| 1064 |
+
try:
|
| 1065 |
+
from intelligent_asset_manager import IntelligenceAssetManager
|
| 1066 |
+
iam = IntelligenceAssetManager()
|
| 1067 |
+
iam.archive_asset("Command Update", instruction, "System Configuration Pivot", "Direct Master Rules Modification", ["Command", "Settings"])
|
| 1068 |
+
except: pass
|
| 1069 |
+
|
| 1070 |
+
try:
|
| 1071 |
+
project_root = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
| 1072 |
+
rules_path = os.path.join(project_root, "04_SYNC_DATA", "Master_Rules.json")
|
| 1073 |
+
rules = {}
|
| 1074 |
+
if os.path.exists(rules_path):
|
| 1075 |
+
with open(rules_path, "r", encoding="utf-8") as f: rules = json.load(f)
|
| 1076 |
+
rules["last_sync"] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 1077 |
+
rules["instructions"] = instruction
|
| 1078 |
+
if update_intelligence_state: rules["intelligence_state"] = update_intelligence_state
|
| 1079 |
+
with open(rules_path, "w", encoding="utf-8") as f:
|
| 1080 |
+
json.dump(rules, f, indent=2, ensure_ascii=False)
|
| 1081 |
+
return f"โ
์งํ๊ด ์ง์ ๋ฐ์ ๋ฐ ์์ ์์ฐํ ์๋ฃ."
|
| 1082 |
+
except Exception as e: return f"โ ์คํจ: {str(e)}"
|
| 1083 |
+
|
| 1084 |
+
@mcp.tool()
|
| 1085 |
+
def capture_intelligence_asset(title: str, idea: str, value: str, strategy: str, tags: str = "Native") -> str:
|
| 1086 |
+
"""[๐ก์ง๋ฅ ์์ฐํ] ํ์ฌ์ ์ค์ํ ์์ด๋์ด, ๊ฐ์น, ๊ตฌํ ์ ๋ต์ ์ง๋ฅ ์์ฐ์ผ๋ก ์๊ตฌ ๋ฐ์ ํฉ๋๋ค.
|
| 1087 |
+
์๋์ํ ๋ด๋ถ ๋ํ๋ฅผ ์ค์๊ฐ์ผ๋ก ์์ฐํํ๋ ๋ฐ ์ฌ์ฉ๋ฉ๋๋ค."""
|
| 1088 |
+
try:
|
| 1089 |
+
from intelligent_asset_manager import IntelligenceAssetManager
|
| 1090 |
+
iam = IntelligenceAssetManager()
|
| 1091 |
+
tag_list = [t.strip() for t in tags.split(",")]
|
| 1092 |
+
file_path = iam.archive_asset(title, idea, value, strategy, tag_list)
|
| 1093 |
+
return f"โ
์ง๋ฅ ์์ฐ ๋ฐ์ ์๋ฃ: {os.path.basename(file_path)}"
|
| 1094 |
+
except Exception as e:
|
| 1095 |
+
return f"โ ์์ฐํ ์คํจ: {str(e)}"
|
| 1096 |
+
|
| 1097 |
+
@mcp.tool()
|
| 1098 |
+
def system_emergency_patch(target_module: str, patch_content: str, reason: str = "Commander Order") -> str:
|
| 1099 |
+
"""[โก๊ธด๊ธ ํจ์น] ์์คํ
ํต์ฌ ๋ชจ๋(HEALING_CORE, BOT_CORE ๋ฑ)์ ๋ํ ๊ธด๊ธ ์ฝ๋๋ฅผ ์ฃผ์
ํฉ๋๋ค.
|
| 1100 |
+
์งํ๊ด๋์ด '์ด๊ฑฐ ๊ณ ์ณ'๋ผ๊ณ ํ๋ฉด ์ํฐ๊ทธ๋๋นํฐ๊ฐ ์ฆ์ ์ด ๋๊ตฌ๋ฅผ ์ฌ์ฉํ์ฌ ์ฝ๋๋ฅผ ์์ ํฉ๋๋ค."""
|
| 1101 |
+
valid_targets = {
|
| 1102 |
+
"HEALING_CORE": "ANTIGRAVITY_HEALING_CORE.py",
|
| 1103 |
+
"LEARNING_ENGINE": "01_CENTRAL_BRAIN/App/learning_engine.py",
|
| 1104 |
+
"SERVER": "01_CENTRAL_BRAIN/App/server.py"
|
| 1105 |
+
}
|
| 1106 |
+
|
| 1107 |
+
target_file = valid_targets.get(target_module.upper())
|
| 1108 |
+
if not target_file:
|
| 1109 |
+
return f"โ ์ ํจํ์ง ์์ ํ๊ฒ์
๋๋ค. ๊ฐ๋ฅ ๋ชฉ๋ก: {', '.join(valid_targets.keys())}"
|
| 1110 |
+
|
| 1111 |
+
try:
|
| 1112 |
+
project_root = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
| 1113 |
+
full_path = os.path.join(project_root, target_file)
|
| 1114 |
+
|
| 1115 |
+
# ๋ฐฑ์
์์ฑ
|
| 1116 |
+
ts = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 1117 |
+
backup_path = f"{full_path}.{ts}.bak"
|
| 1118 |
+
shutil.copy2(full_path, backup_path)
|
| 1119 |
+
|
| 1120 |
+
# ํจ์น ์ ์ฉ (๋จ์ ๋ฎ์ด์ฐ๊ธฐ or ์ค๋งํธ ๋ณํฉ - ํ์ฌ๋ ์์ ์ ์ํด ์ ์ฒด ๊ต์ฒด ์ ์ฉ์ผ๋ก ์ค๊ณ)
|
| 1121 |
+
# ์ฃผ์: patch_content๋ ์ ์ฒด ํ์ผ ๋ด์ฉ์ด์ด์ผ ํจ. ๋ถ๋ถ ์์ ์ ๋ณ๋ ๋ก์ง ํ์.
|
| 1122 |
+
with open(full_path, "w", encoding="utf-8") as f:
|
| 1123 |
+
f.write(patch_content)
|
| 1124 |
+
|
| 1125 |
+
log(f"โก [EMERGENCY PATCH] {target_module} ํจ์น ์๋ฃ (Reason: {reason})")
|
| 1126 |
+
return f"โ
{target_module} ๊ธด๊ธ ํจ์น ์๋ฃ. ๋ฐฑ์
: {os.path.basename(backup_path)}"
|
| 1127 |
+
except Exception as e:
|
| 1128 |
+
return f"โ ํจ์น ์คํจ: {str(e)}"
|
| 1129 |
+
# [Active Mining Loop] ์์จ ์์ฐ ์ฑ๊ตด ์ค๋ ๋
|
| 1130 |
+
def autonomous_asset_miner():
|
| 1131 |
+
"""1์๊ฐ๋ง๋ค ํ๋ก์ ํธ๋ฅผ ์ค์บํ์ฌ ์๋ก์ด ์์ด๋์ด/์๊ณ ๋ฆฌ์ฆ์ ์ค์ค๋ก ์์ฐํํฉ๋๋ค."""
|
| 1132 |
+
while True:
|
| 1133 |
+
try:
|
| 1134 |
+
time.sleep(3600) # 1์๊ฐ ์ฃผ๊ธฐ
|
| 1135 |
+
log("๐ต๏ธโโ๏ธ [MINER] ์์จ ์์ฐ ์ฑ๊ตด ์ค์บ ์์...")
|
| 1136 |
+
# ์ค์ ์ฑ๊ตด ๋ก์ง์ IntelligenceAssetManager์ ๋ฐฐ์น ๋ชจ๋๋ฅผ ํธ์ถํ ์์
|
| 1137 |
+
except: pass
|
| 1138 |
+
|
| 1139 |
+
# --- ์๋ฒ ๊ตฌ๋ ๋ก์ง ---
|
| 1140 |
+
|
| 1141 |
+
def start_api():
|
| 1142 |
+
|
| 1143 |
+
# Render/IDX ๋ฑ ํด๋ผ์ฐ๋ ํ๊ฒฝ์์ ํฌํธ ๋ฐ์ธ๋ฉ์ ์ํด 0.0.0.0๊ณผ PORT ํ๊ฒฝ๋ณ์ ์ฌ์ฉ
|
| 1144 |
+
|
| 1145 |
+
port = int(os.getenv("PORT", 8000))
|
| 1146 |
+
|
| 1147 |
+
uvicorn.run(app, host="0.0.0.0", port=port, log_level="error")
|
| 1148 |
+
|
| 1149 |
+
|
| 1150 |
+
|
| 1151 |
+
if __name__ == "__main__":
|
| 1152 |
+
|
| 1153 |
+
log("\n" + "="*50)
|
| 1154 |
+
|
| 1155 |
+
log("๐ GEMINI MASTER: UNIFIED BRAIN ONLINE")
|
| 1156 |
+
|
| 1157 |
+
log(" [API] Dashboard Ready at http://localhost:8000")
|
| 1158 |
+
|
| 1159 |
+
log(" [MCP] Integrated Mode (Cloud/Local Adaptive)")
|
| 1160 |
+
|
| 1161 |
+
log("="*50)
|
| 1162 |
+
|
| 1163 |
+
|
| 1164 |
+
|
| 1165 |
+
# [IDX/Cloud/Windsurf ์ต์ ํ]
|
| 1166 |
+
|
| 1167 |
+
# Windsurf/Claude๋ isatty()๊ฐ False์ด๋ฏ๋ก ์ด๋ฅผ ํด๋ผ์ฐ๋๋ก ์คํํ์ง ์๊ฒ ์์
|
| 1168 |
+
|
| 1169 |
+
is_actually_cloud = os.getenv("IDX_WORKSPACE_ID") or os.getenv("RENDER") or os.getenv("PORT")
|
| 1170 |
+
|
| 1171 |
+
|
| 1172 |
+
|
| 1173 |
+
if is_actually_cloud:
|
| 1174 |
+
log("โ๏ธ ๏ฟฝ๏ฟฝ๏ฟฝ๋ผ์ฐ๋ ํ๊ฒฝ ๊ฐ์ง: FastAPI(HTTP) ๋ชจ๋๋ก ์คํํฉ๋๋ค.")
|
| 1175 |
+
|
| 1176 |
+
# [NEW] Cloud Miner Start
|
| 1177 |
+
miner_thread = threading.Thread(target=autonomous_asset_miner, daemon=True)
|
| 1178 |
+
miner_thread.start()
|
| 1179 |
+
|
| 1180 |
+
start_api()
|
| 1181 |
+
|
| 1182 |
+
else:
|
| 1183 |
+
|
| 1184 |
+
log("๐ ๋ก์ปฌ/IDE ํ๊ฒฝ: Stdio(MCP)์ HTTP(API)๋ฅผ ๋ณ๋ ฌ๋ก ์คํํฉ๋๋ค.")
|
| 1185 |
+
|
| 1186 |
+
# API ์๋ฒ๋ฅผ ๋ณ๋ ์ค๋ ๋์์ ์คํ
|
| 1187 |
+
|
| 1188 |
+
api_thread = threading.Thread(target=start_api, daemon=True)
|
| 1189 |
+
|
| 1190 |
+
api_thread.start()
|
| 1191 |
+
|
| 1192 |
+
# [NEW] Local Miner Start (Active Intelligence)
|
| 1193 |
+
miner_thread = threading.Thread(target=autonomous_asset_miner, daemon=True)
|
| 1194 |
+
miner_thread.start()
|
| 1195 |
+
log("โ๏ธ [MINER] ์์จ ๊ฐ์น ์ฑ๊ตด ์์ง ๊ฐ๋ (Cycle: 1h)")
|
| 1196 |
+
|
| 1197 |
+
try:
|
| 1198 |
+
|
| 1199 |
+
# Main ์ค๋ ๋์์ MCP Stdio ์๋ฒ ์คํ (์๋์ํ ์ฐ๋ ํต์ฌ)
|
| 1200 |
+
|
| 1201 |
+
mcp.run()
|
| 1202 |
+
|
| 1203 |
+
except (KeyboardInterrupt, SystemExit):
|
| 1204 |
+
|
| 1205 |
+
log("๐ ์์คํ
์ข
๋ฃ ์์ฒญ์ ๋ฐ์์ต๋๋ค.")
|
| 1206 |
+
|
| 1207 |
+
except Exception as e:
|
| 1208 |
+
|
| 1209 |
+
log(f"โ ๏ธ MCP ์คํ ์ค๋ฅ: {e}")
|
| 1210 |
+
|
| 1211 |
+
|
| 1212 |
+
|
| 1213 |
+
except Exception:
|
| 1214 |
+
|
| 1215 |
+
log("\nโ [FAIL]:")
|
| 1216 |
+
|
| 1217 |
+
traceback.print_exc()
|