File size: 7,251 Bytes
c7188b5
 
 
 
 
 
 
8a8939e
f3ab7bd
e9b69d7
3fefcc3
 
 
 
 
 
e5ac220
3fefcc3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33990d0
 
 
86c9f34
c7188b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9febc25
c7188b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2dd74e7
e175515
33990d0
c7188b5
 
 
 
 
 
 
 
 
 
 
 
d60e9d2
c7188b5
d167a15
d60e9d2
d167a15
c7188b5
d60e9d2
 
d167a15
 
 
 
 
 
 
 
 
 
 
 
d60e9d2
 
d167a15
c7188b5
 
 
35475f8
33990d0
 
 
c7188b5
 
 
3fefcc3
 
c7188b5
 
 
33990d0
c7188b5
d60e9d2
3fefcc3
 
c7188b5
 
 
 
 
 
 
 
 
 
 
3fefcc3
 
 
 
 
c7188b5
 
 
 
 
10d06f8
c7188b5
a4e9d4c
c7188b5
 
fbfc146
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
# pixal_agent_full.py
import os
import datetime
import gradio as gr
import requests
from typing import Optional, List
from langchain.llms.base import LLM
from langchain.agents import initialize_agent, AgentType,load_tools
from langchain.tools import Tool
from langchain_experimental.tools.python.tool import PythonREPLTool
import queue
from typing import Any, Dict
import gradio as gr
from langchain.callbacks.base import BaseCallbackHandler


from langchain.tools import YouTubeSearchTool as YTS
# 2. μ»€μŠ€ν…€ 콜백 ν•Έλ“€λŸ¬
class StreamingAgentCallbackHandler(BaseCallbackHandler):
    def __init__(self, q: queue.Queue):
        self.q = q

    def on_agent_action(self, action, **kwargs):
        log = f"🧠 Thought: {action.log.strip()}\nπŸ”§ Action: {action.tool}({action.tool_input})"
        self.q.put(log)

    def on_tool_end(self, output, **kwargs):
        self.q.put(f"πŸ“¦ Observation: {output}\n")

    def on_agent_finish(self, finish, **kwargs):
        self.q.put(f"\nβœ… Final Answer: {finish.return_values['output']}")

    def on_llm_new_token(self, token: str, **kwargs):
        # μ„ νƒμ μœΌλ‘œ 토큰 슀트리밍 좜λ ₯
        pass

    def on_llm_end(self, response, **kwargs):
        self.q.put("[END]")

    def on_llm_error(self, error, **kwargs):
        self.q.put(f"[ERROR] {str(error)}")

from langchain_community.retrievers import WikipediaRetriever
from langchain.tools.retriever import create_retriever_tool
retriever = WikipediaRetriever(lang="ko",top_k_results=10)
wiki=Tool(func=retriever.get_relevant_documents,name="WIKI SEARCH",description="μœ„ν‚€λ°±κ³Όμ—μ„œ ν•„μš”ν•œ 정보λ₯Ό λΆˆλŸ¬μ˜΅λ‹ˆλ‹€.κ²°κ΄΄λ₯Ό κ²€μ¦ν•˜μ—¬ μ‚¬μš©ν•˜μ‹œμ˜€.")
# ──────────────────────────────
# βœ… GitHub Models LLM
# ──────────────────────────────
class GitHubModelLLM(LLM):
    model: str = "openai/gpt-4.1"
    endpoint: str = "https://models.github.ai/inference"
    token: Optional[str] = None

    @property
    def _llm_type(self) -> str:
        return "github_models_api"

    def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
        if not self.token:
            raise ValueError("GitHub API token이 ν•„μš”ν•©λ‹ˆλ‹€.")

        headers = {
            "Authorization": "Bearer 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",
            "Content-Type": "application/json",
        }
        body = {"model": self.model, "messages": [{"role": "user", "content": prompt}]}

        resp = requests.post(f"{self.endpoint}/chat/completions", json=body, headers=headers)
        if resp.status_code != 200:
            raise ValueError(f"API 였λ₯˜: {resp.status_code} - {resp.text}")
        return resp.json()["choices"][0]["message"]["content"]

# ──────────────────────────────
# βœ… LLM μ„€μ •
# ──────────────────────────────
token = os.getenv("GITHUB_TOKEN") or os.getenv("token")
if not token:
    print("⚠️ GitHub Token이 ν•„μš”ν•©λ‹ˆλ‹€. 예: setx GITHUB_TOKEN your_token")

llm = GitHubModelLLM(model="openai/gpt-4.1", token=token)

# ──────────────────────────────
# βœ… LangChain κΈ°λ³Έ 도ꡬ 뢈러였기
# ──────────────────────────────
tools = load_tools(
    ["ddg-search", "requests_all", "llm-math"],
    llm=llm,allow_dangerous_tools=True
)+[YTS()]+[wiki]
# ──────────────────────────────
# βœ… Python μ‹€ν–‰ 도ꡬ (LangChain λ‚΄μž₯)
# ──────────────────────────────
python_tool = PythonREPLTool()
tools.append(Tool(name="python_repl", func=python_tool.run, description="Python μ½”λ“œλ₯Ό μ‹€ν–‰ν•©λ‹ˆλ‹€."))

# ──────────────────────────────
# βœ… 파일 도ꡬ
# ──────────────────────────────


# ──────────────────────────────
# βœ… μ •ν™•ν•œ ν•œκ΅­ μ‹œκ°„ ν•¨μˆ˜ (Asia/Seoul)
# ──────────────────────────────
import requests
from datetime import datetime
from zoneinfo import ZoneInfo

def time_now(_=""):
    try:
        # μ •ν™•ν•œ UTC μ‹œκ°μ„ μ™ΈλΆ€ APIμ—μ„œ κ°€μ Έμ˜΄
        resp = requests.get("https://timeapi.io/api/Time/current/zone?timeZone=Asia/Seoul", timeout=5)
        if resp.status_code == 200:
            data = resp.json()
            dt = data["dateTime"].split(".")[0].replace("T", " ")
            return f"ν˜„μž¬ μ‹œκ°: {dt} (Asia/Seoul, μ„œλ²„ κΈ°μ€€ NTP 동기화)"
        else:
            # API μ‹€νŒ¨ μ‹œ 둜컬 μ‹œμŠ€ν…œ μ‹œκ°μœΌλ‘œ λŒ€μ²΄
            tz = ZoneInfo("Asia/Seoul")
            now = datetime.now(tz)
            return f"ν˜„μž¬ μ‹œκ°(둜컬): {now.strftime('%Y-%m-%d %H:%M:%S')} (Asia/Seoul)"
    except Exception as e:
        tz = ZoneInfo("Asia/Seoul")
        now = datetime.now(tz)
        return f"ν˜„μž¬ μ‹œκ°(λ°±μ—…): {now.strftime('%Y-%m-%d %H:%M:%S')} (Asia/Seoul, 였λ₯˜: {e})"
# ──────────────────────────────
# βœ… 도ꡬ 등둝
# ──────────────────────────────
tools.extend([Tool(name="time_now", func=time_now, description="ν˜„μž¬ μ‹œκ°„μ„ λ°˜ν™˜ν•©λ‹ˆλ‹€.")])
from langchain.memory import ConversationBufferMemory as MEM
from langchain.agents.agent_toolkits import FileManagementToolkit as FMT
tools.extend(FMT(root_dir=str(os.getcwd())).get_tools())
# ──────────────────────────────
# βœ… Agent μ΄ˆκΈ°ν™”
# ──────────────────────────────
q_stream = queue.Queue()
handler = StreamingAgentCallbackHandler(q_stream)
agent = initialize_agent(
    tools,
    llm,
    memory=MEM(),
    agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
    verbose=True,
    handle_parsing_errors=True,
    callbacks=[handler]
)

# ──────────────────────────────
# βœ… Gradio UI
# ──────────────────────────────
def chat(message, history):
    try:
        response = agent.run(message)
    except Exception as e:
        response = f"⚠️ 였λ₯˜: {e}"
    history = history + [(message, response)]
    while True:
        msg = q_stream.get()
        if msg == "[END]":
            break
        yield msg + "\n"

with gr.Blocks(theme=gr.themes.Soft(), title="PIXAL Assistant (LangChain + GitHub LLM)") as demo:
    gr.Markdown("""
    ## πŸ€– PIXAL Assistant  
    **LangChain 기반 λ©€ν‹°νˆ΄ μ—μ΄μ „νŠΈ**  
    🧰 DuckDuckGo · Wikipedia · Math · Requests · Python REPL · File · Time
    """)
    ai1=gr.ChatInterface(chat)

if __name__ == "__main__":
   ai1.launch()