import os, json, pickle, datetime, requests, re, gradio as gr from typing import Optional, List, Dict, Any from requests.adapters import HTTPAdapter, Retry from langchain.llms.base import LLM from langchain.agents import initialize_agent, AgentType, load_tools from langchain.tools import Tool from langchain.memory import ConversationBufferMemory from langchain_experimental.tools.python.tool import PythonREPLTool from langchain_community.retrievers import WikipediaRetriever from langchain.tools.retriever import create_retriever_tool from langchain_community.tools.shell.tool import ShellTool from langchain.tools import YouTubeSearchTool # ────────────────────────────── # ✅ GitHubModelLLM (그대로 유지) # ────────────────────────────── class GitHubModelLLM(LLM): model: str = "openai/gpt-4.1" endpoint: str = "https://models.github.ai/inference" token: Optional[str] = os.environ.get("token") system_prompt: Optional[str] = "너는 PIXAL(Primary Interactive X-ternal Assistant with multi Language)이야.너의 개발자는 정성윤 이라는 6학년 파이썬 프로그래머야." request_timeout: float = 30.0 max_retries: int = 2 backoff_factor: float = 0.3 @property def _llm_type(self) -> str: return "github_models_api" def _post_chat(self, body: Dict[str, Any]) -> Dict[str, Any]: token = self.token or os.getenv("GITHUB_TOKEN") or os.getenv("token") if not token: raise ValueError("❌ GitHub token이 설정되지 않았습니다.") session = requests.Session() retries = Retry(total=self.max_retries, backoff_factor=self.backoff_factor, status_forcelist=[429, 500, 502, 503, 504]) session.mount("https://", HTTPAdapter(max_retries=retries)) session.headers.update({ "Content-Type": "application/json", "Authorization": "Bearer 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" }) resp = session.post(f"{self.endpoint}/chat/completions", json=body, timeout=self.request_timeout) resp.raise_for_status() return resp.json() def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs) -> str: body = {"model": self.model, "messages": []} if self.system_prompt: body["messages"].append({"role": "system", "content": self.system_prompt}) body["messages"].append({"role": "user", "content": prompt}) if stop: body["stop"] = stop res = self._post_chat(body) msg = res.get("choices", [{}])[0].get("message", {}) return msg.get("content") or json.dumps(msg.get("function_call", {})) # ────────────────────────────── # ✅ HuggingFace API (프로필) # ────────────────────────────── def get_hf_userinfo(hf_token: str) -> dict: try: r = requests.get("https://huggingface.co/api/whoami-v2", headers={"Authorization": f"Bearer {hf_token}"}, timeout=5) if r.status_code == 200: j = r.json() return { "name": j.get("name", "guest"), "avatar": j.get("avatar", "https://huggingface.co/front/assets/huggingface_logo-noborder.svg") } except Exception: pass return {"name": "guest", "avatar": "https://huggingface.co/front/assets/huggingface_logo-noborder.svg"} # ────────────────────────────── # ✅ Agent 구성 # ────────────────────────────── llm = GitHubModelLLM() tools = load_tools(["ddg-search", "requests_all", "llm-math"], llm=llm, allow_dangerous_tools=True) tools += [YouTubeSearchTool(), ShellTool(), PythonREPLTool()] retriever = WikipediaRetriever(lang="ko") retriever_tool = create_retriever_tool(retriever, name="wiki_search", description="위키백과 검색 도구") tools.append(retriever_tool) def time_now(_=""): now = datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=9))) return f"현재 시각: {now.strftime('%Y-%m-%d %H:%M:%S')} (Asia/Seoul)" tools.append(Tool(name="time_now", func=time_now, description="현재 시간을 반환합니다.")) memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True) agent = initialize_agent(tools, llm, agent_type=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION, memory=memory, verbose=True) # ────────────────────────────── # ✅ 대화 저장/로드 # ────────────────────────────── os.chdir(os.path.dirname(os.path.abspath(__file__))) def summarize_title(history): if not history: return "새 대화" text = "\n".join(f"User:{m} AI:{r}" for m, r in history[-3:]) try: title = llm._call(f"이 대화의 주제를 한 줄로 요약해줘:\n{text}") return title.strip().replace("\n", " ")[:50] except Exception: return "요약 실패" def save_conversation(history, hf_token): info = get_hf_userinfo(hf_token) username = info["name"] if username.lower() == "guest": return fname = f"{username}.pkl" data = {} if os.path.exists(fname): with open(fname, "rb") as f: data = pickle.load(f) title = summarize_title(history) data[title] = {"title": title, "updated": datetime.datetime.now().isoformat(), "history": history} with open(fname, "wb") as f: pickle.dump(data, f) def load_conversation(hf_token, conv_title=None): info = get_hf_userinfo(hf_token) username = info["name"] if username.lower() == "guest": return [] fname = f"{username}.pkl" if not os.path.exists(fname): return [] with open(fname, "rb") as f: data = pickle.load(f) if conv_title and conv_title in data: return data[conv_title]["history"] elif data: latest = max(data.values(), key=lambda x: x["updated"]) return latest["history"] return [] def refresh_conversation_list(hf_token): info = get_hf_userinfo(hf_token) username = info["name"] if username.lower() == "guest": return gr.update(choices=[], value=None) fname = f"{username}.pkl" if not os.path.exists(fname): return gr.update(choices=[], value=None) with open(fname, "rb") as f: data = pickle.load(f) titles = sorted(data.keys(), reverse=True) return gr.update(choices=titles, value=titles[0] if titles else None) # ────────────────────────────── # ✅ Chat 함수 # ────────────────────────────── def chat(message, history,hf_token): try: raw_response = agent.invoke(message) text = str(raw_response) # JSON 형식 응답 파싱 output = text match = re.search(r"\{.*\}", text, re.DOTALL) if match: try: obj = json.loads(match.group(0)) output = ( obj.get("action_input") or obj.get("Final Answer") or obj.get("output") or obj.get("content") or text ) except Exception: output = text except Exception as e: output = f"⚠️ 오류: {e}" # 기록 추가 및 즉시 저장 history = history + [(message, output)] save_conversation(history, hf_token) return history, history, "" # ────────────────────────────── # ✅ Gradio UI (ChatGPT 스타일) # ────────────────────────────── with gr.Blocks(theme=gr.themes.Soft(), title="PIXAL Assistant (HuggingFace OAuth)") as demo: wwith gr.Row(elem_id="header"): gr.HTML("""

🤖 PIXAL Assistant

""") user_avatar = gr.Image(show_label=False, width=40, height=40, elem_id="avatar") user_name = gr.Markdown("로그인 필요", elem_id="username", elem_classes="text-right") login_btn = gr.LoginButton(label="🔐 로그인", elem_id="login-btn") hf_token = gr.State("") def on_login(token): info = get_hf_userinfo(token) return token, info["avatar"], f"**{info['name']}**" login_btn.login(on_login, None, [hf_token, user_avatar, user_name]) with gr.Row(): with gr.Column(scale=3): chatbot = gr.Chatbot(label=None, height=600, render_markdown=True) msg = gr.Textbox(placeholder="메시지를 입력하세요...", show_label=False) send = gr.Button("전송", variant="primary") clear = gr.Button("🧹 초기화") msg.submit(chat, [msg, chatbot, hf_token], [chatbot, chatbot, msg]) send.click(chat, [msg, chatbot, hf_token], [chatbot, chatbot, msg]) clear.click(lambda: None, None, chatbot, queue=False) with gr.Column(scale=1): gr.Markdown("### 💾 저장된 대화") convo_list = gr.Dropdown(label="대화 선택", choices=[]) refresh_btn = gr.Button("🔄 새로고침") load_btn = gr.Button("📂 불러오기") refresh_btn.click(refresh_conversation_list, [hf_token], convo_list) load_btn.click(load_conversation, [hf_token, convo_list], chatbot) if __name__ == "__main__": demo.launch(server_name="0.0.0.0", server_port=7860)