Spaces:
Running
Running
| # pixal_agent_full.py | |
| import os | |
| import datetime | |
| import gradio as gr | |
| import requests | |
| from typing import Optional, List | |
| from langchain.llms.base import LLM | |
| from langchain.agents import initialize_agent, AgentType,load_tools | |
| from langchain.agents import AgentExecutor, create_structured_chat_agent | |
| from langchain.tools import Tool | |
| from langchain_experimental.tools.python.tool import PythonREPLTool | |
| import queue | |
| from typing import Any, Dict | |
| import gradio as gr | |
| from langchain.callbacks.base import BaseCallbackHandler | |
| from langchain.tools import YouTubeSearchTool as YTS | |
| # 2. 컀μ€ν μ½λ°± νΈλ€λ¬ | |
| # github_model_llm.py | |
| """ | |
| GitHub Models API κΈ°λ° LLM λνΌ (LangChain LLM νΈν) | |
| - OpenAI-style chat completions νΈν | |
| - function calling (OPENAI_MULTI_FUNCTIONS) μ§μ: functions, function_call μ λ¬ κ°λ₯ | |
| - system prompt (system_prompt) μ§μ | |
| - μ΅μ : temperature, max_tokens, top_p λ± μ λ¬ | |
| - raw response λ°ν λ©μλ ν¬ν¨ | |
| """ | |
| from typing import Optional, List, Dict, Any | |
| import os | |
| import time | |
| import json | |
| import requests | |
| from requests.adapters import HTTPAdapter, Retry | |
| from langchain.llms.base import LLM | |
| ''' | |
| class GitHubModelLLM(LLM): | |
| def __init__( | |
| self, | |
| model: str = "openai/gpt-4.1", | |
| token: Optional[str] = os.environ["token"], | |
| endpoint: str = "https://models.github.ai/inference", | |
| system_prompt: Optional[str] = "λλ PIXAL(Primary Interactive X-ternal Assistant with multi Language)μ΄μΌ.λμ κ°λ°μλ μ μ±μ€ μ΄λΌλ 6νλ νμ΄μ¬ νλ‘κ·Έλλ¨ΈμΌ.", | |
| request_timeout: float = 30.0, | |
| max_retries: int = 2, | |
| backoff_factor: float = 0.3, | |
| **kwargs, | |
| ): | |
| """ | |
| Args: | |
| model: λͺ¨λΈ μ΄λ¦ (μ: "openai/gpt-4.1") | |
| token: GitHub Models API ν ν° (Bearer). νκ²½λ³μ GITHUB_TOKEN / token μ¬μ© κ°λ₯ as fallback. | |
| endpoint: API endpoint (κΈ°λ³Έ: https://models.github.ai/inference) | |
| system_prompt: (μ ν) system role λ©μμ§λ‘ νμ μμ λΆμ | |
| request_timeout: μμ² νμμμ (μ΄) | |
| max_retries: λ€νΈμν¬ μ¬μλ νμ | |
| backoff_factor: μ¬μλ μ§μ 보μ | |
| kwargs: LangChain LLM λΆλͺ¨μ μ λ¬ν μΆκ° μΈμ | |
| """ | |
| super().__init__(**kwargs) | |
| self.model = model | |
| self.endpoint = endpoint.rstrip("/") | |
| self.token = token or os.getenv("GITHUB_TOKEN") or os.getenv("token") | |
| self.system_prompt = system_prompt | |
| self.request_timeout = request_timeout | |
| # requests μΈμ + μ¬μλ μ€μ | |
| self.session = requests.Session() | |
| retries = Retry(total=max_retries, backoff_factor=backoff_factor, | |
| status_forcelist=[429, 500, 502, 503, 504], | |
| allowed_methods=["POST", "GET"]) | |
| self.session.mount("https://", HTTPAdapter(max_retries=retries)) | |
| self.session.headers.update({ | |
| "Content-Type": "application/json" | |
| }) | |
| if self.token: | |
| self.session.headers.update({"Authorization": f"Bearer {self.token}"}) | |
| @property | |
| def _llm_type(self) -> str: | |
| return "github_models_api" | |
| # ---------- νΈμ internal helper ---------- | |
| def _build_messages(self, prompt: str, extra_messages: Optional[List[Dict[str, Any]]] = None) -> List[Dict[str, Any]]: | |
| """ | |
| messages λ°°μ΄ μμ±: system (optional) + extra_messages (if any) + user prompt | |
| extra_messages: μ΄λ―Έ role keysλ‘ κ΅¬μ±λ λ©μμ§ λ¦¬μ€νΈ (μ: conversation history) | |
| """ | |
| msgs: List[Dict[str, Any]] = [] | |
| if self.system_prompt: | |
| msgs.append({"role": "system", "content": self.system_prompt}) | |
| if extra_messages: | |
| # ensure format: list of {"role":..,"content":..} | |
| msgs.extend(extra_messages) | |
| msgs.append({"role": "user", "content": prompt}) | |
| return msgs | |
| def _post_chat(self, body: Dict[str, Any]) -> Dict[str, Any]: | |
| url = f"{self.endpoint}/chat/completions" | |
| # ensure Authorization present | |
| if "Authorization" not in self.session.headers and not self.token: | |
| raise ValueError("GitHub Models token not set. Provide token param or set GITHUB_TOKEN env var.") | |
| resp = self.session.post(url, json=body, timeout=self.request_timeout) | |
| try: | |
| resp.raise_for_status() | |
| except requests.HTTPError as e: | |
| # try to surface JSON error if present | |
| content = resp.text | |
| try: | |
| j = resp.json() | |
| content = json.dumps(j, ensure_ascii=False, indent=2) | |
| except Exception: | |
| pass | |
| raise RuntimeError(f"GitHub Models API error: {e} - {content}") | |
| return resp.json() | |
| # ---------- LangChain LLM interface ---------- | |
| def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs) -> str: | |
| """ | |
| LangChain LLM `_call` ꡬν (λκΈ°). | |
| Supports kwargs: | |
| - functions: list[dict] (function schemas) | |
| - function_call: "auto" | {"name": "..."} | etc. | |
| - messages: list[dict] (if you want to pass full conversation instead of prompt) | |
| - temperature, top_p, max_tokens, n, stream, etc. | |
| Returns: | |
| assistant content (string). If function_call is returned by model, returns the 'content' if present, | |
| otherwise returns function_call object as JSON string (so caller can parse). | |
| """ | |
| # support passing full messages via kwargs['messages'] | |
| messages = None | |
| extra_messages = None | |
| if "messages" in kwargs and isinstance(kwargs["messages"], list): | |
| messages = kwargs.pop("messages") | |
| else: | |
| # optionally allow 'history' or 'extra_messages' | |
| extra_messages = kwargs.pop("extra_messages", None) | |
| if messages is None: | |
| messages = self._build_messages(prompt, extra_messages=extra_messages) | |
| body: Dict[str, Any] = { | |
| "model": self.model, | |
| "messages": messages, | |
| } | |
| # pass optional top-level params (temperature, max_tokens, etc.) from kwargs | |
| for opt in ["temperature", "top_p", "max_tokens", "n", "stream", "presence_penalty", "frequency_penalty"]: | |
| if opt in kwargs: | |
| body[opt] = kwargs.pop(opt) | |
| # pass function-calling related keys verbatim if provided | |
| if "functions" in kwargs: | |
| body["functions"] = kwargs.pop("functions") | |
| if "function_call" in kwargs: | |
| body["function_call"] = kwargs.pop("function_call") | |
| # include stop if present | |
| if stop: | |
| body["stop"] = stop | |
| # send request | |
| raw = self._post_chat(body) | |
| # save raw for caller if needed | |
| self._last_raw = raw | |
| # parse assistant message | |
| choices = raw.get("choices") or [] | |
| if not choices: | |
| return "" | |
| message_obj = choices[0].get("message", {}) | |
| # if assistant returned a function_call, include that info | |
| if "function_call" in message_obj: | |
| # return function_call as JSON string so agent/tool orchestrator can parse it | |
| # but if content also exists, prefer content | |
| func = message_obj["function_call"] | |
| # sometimes content may be absent; return structured JSON string | |
| return json.dumps({"function_call": func}, ensure_ascii=False) | |
| # otherwise return assistant content | |
| return message_obj.get("content", "") or "" | |
| # optional: expose raw response getter | |
| def last_raw_response(self) -> Optional[Dict[str, Any]]: | |
| return getattr(self, "_last_raw", None) | |
| # optional: provide a convenience chat method to get full message object | |
| def chat_completions(self, prompt: str, messages: Optional[List[Dict[str, Any]]] = None, **kwargs) -> Dict[str, Any]: | |
| """ | |
| Directly call chat completions and return full parsed JSON response. | |
| - If `messages` provided, it's used as the full messages array (system/user/assistant roles as needed) | |
| - else uses prompt + system_prompt to construct messages. | |
| """ | |
| if messages is None: | |
| messages = self._build_messages(prompt) | |
| body: Dict[str, Any] = {"model": self.model, "messages": messages} | |
| for opt in ["temperature", "top_p", "max_tokens", "n", "stream"]: | |
| if opt in kwargs: | |
| body[opt] = kwargs.pop(opt) | |
| if "functions" in kwargs: | |
| body["functions"] = kwargs.pop("functions") | |
| if "function_call" in kwargs: | |
| body["function_call"] = kwargs.pop("function_call") | |
| raw = self._post_chat(body) | |
| self._last_raw = raw | |
| return raw | |
| ''' | |
| from typing import Optional, List, Dict, Any | |
| from langchain.llms.base import LLM | |
| import requests, os, json | |
| from requests.adapters import HTTPAdapter, Retry | |
| class GitHubModelLLM(LLM): | |
| """GitHub Models API κΈ°λ° LangChain LLM (Pydantic νΈν)""" | |
| model: str = "openai/gpt-4.1" | |
| endpoint: str = "https://models.github.ai/inference" | |
| token: Optional[str] = os.environ["token"] | |
| system_prompt: Optional[str] = "λλ PIXAL(Primary Interactive X-ternal Assistant with multi Language)μ΄μΌ.λμ κ°λ°μλ μ μ±μ€ μ΄λΌλ 6νλ νμ΄μ¬ νλ‘κ·Έλλ¨ΈμΌ." | |
| request_timeout: float = 30.0 | |
| max_retries: int = 2 | |
| backoff_factor: float = 0.3 | |
| def _llm_type(self) -> str: | |
| return "github_models_api" | |
| def _post_chat(self, body: Dict[str, Any]) -> Dict[str, Any]: | |
| token = self.token or os.getenv("GITHUB_TOKEN") or os.getenv("token") | |
| if not token: | |
| raise ValueError("β GitHub tokenμ΄ μ€μ λμ§ μμμ΅λλ€.") | |
| session = requests.Session() | |
| retries = Retry(total=self.max_retries, backoff_factor=self.backoff_factor, | |
| status_forcelist=[429, 500, 502, 503, 504]) | |
| session.mount("https://", HTTPAdapter(max_retries=retries)) | |
| session.headers.update({ | |
| "Content-Type": "application/json", | |
| "Authorization": "Bearer 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" | |
| }) | |
| resp = session.post(f"{self.endpoint}/chat/completions", json=body, timeout=self.request_timeout) | |
| resp.raise_for_status() | |
| return resp.json() | |
| def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs) -> str: | |
| body = { | |
| "model": self.model, | |
| "messages": [] | |
| } | |
| if self.system_prompt: | |
| body["messages"].append({"role": "system", "content": self.system_prompt}) | |
| body["messages"].append({"role": "user", "content": prompt}) | |
| for key in ["temperature", "max_tokens", "functions", "function_call"]: | |
| if key in kwargs: | |
| body[key] = kwargs[key] | |
| if stop: | |
| body["stop"] = stop | |
| res = self._post_chat(body) | |
| msg = res.get("choices", [{}])[0].get("message", {}) | |
| return msg.get("content") or json.dumps(msg.get("function_call", {})) | |
| from langchain_community.retrievers import WikipediaRetriever | |
| from langchain.tools.retriever import create_retriever_tool | |
| retriever = WikipediaRetriever(lang="ko",top_k_results=10) | |
| wiki=Tool(func=retriever.get_relevant_documents,name="WIKI SEARCH",description="μν€λ°±κ³Όμμ νμν μ 보λ₯Ό λΆλ¬μ΅λλ€.κ²°κ΄΄λ₯Ό κ²μ¦νμ¬ μ¬μ©νμμ€.") | |
| # ββββββββββββββββββββββββββββββ | |
| # β GitHub Models LLM | |
| # ββββββββββββββββββββββββββββββ | |
| ''' | |
| class GitHubModelLLM(LLM): | |
| model: str = "openai/gpt-4.1" | |
| endpoint: str = "https://models.github.ai/inference" | |
| token: Optional[str] = None | |
| @property | |
| def _llm_type(self) -> str: | |
| return "github_models_api" | |
| def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: | |
| if not self.token: | |
| raise ValueError("GitHub API tokenμ΄ νμν©λλ€.") | |
| headers = { | |
| "Authorization": "Bearer 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", | |
| "Content-Type": "application/json", | |
| } | |
| body = {"model": self.model, "messages": [{"role": "user", "content": prompt}]} | |
| resp = requests.post(f"{self.endpoint}/chat/completions", json=body, headers=headers) | |
| if resp.status_code != 200: | |
| raise ValueError(f"API μ€λ₯: {resp.status_code} - {resp.text}") | |
| return resp.json()["choices"][0]["message"]["content"] | |
| ''' | |
| # ββββββββββββββββββββββββββββββ | |
| # β LLM μ€μ | |
| # ββββββββββββββββββββββββββββββ | |
| token = os.getenv("GITHUB_TOKEN") or os.getenv("token") | |
| if not token: | |
| print("β οΈ GitHub Tokenμ΄ νμν©λλ€. μ: setx GITHUB_TOKEN your_token") | |
| llm = GitHubModelLLM() | |
| # ββββββββββββββββββββββββββββββ | |
| # β LangChain κΈ°λ³Έ λꡬ λΆλ¬μ€κΈ° | |
| # ββββββββββββββββββββββββββββββ | |
| tools = load_tools( | |
| ["ddg-search", "requests_all", "llm-math"], | |
| llm=llm,allow_dangerous_tools=True | |
| )+[YTS()]+[wiki] | |
| # ββββββββββββββββββββββββββββββ | |
| # β Python μ€ν λꡬ (LangChain λ΄μ₯) | |
| # ββββββββββββββββββββββββββββββ | |
| python_tool = PythonREPLTool() | |
| tools.append(Tool(name="python_repl", func=python_tool.run, description="Python μ½λλ₯Ό μ€νν©λλ€.")) | |
| from langchain import hub | |
| prompt=hub.pull("hwchase17/structured-chat-agent") | |
| from langchain_community.tools.shell.tool import ShellTool | |
| shell_tool = ShellTool() | |
| tools.append(Tool(name="shell_exec", func=shell_tool.run, description="λ‘컬 λͺ λ Ήμ΄λ₯Ό μ€νν©λλ€.")) | |
| # ββββββββββββββββββββββββββββββ | |
| # β νμΌ λꡬ | |
| # ββββββββββββββββββββββββββββββ | |
| # ββββββββββββββββββββββββββββββ | |
| # β μ νν νκ΅ μκ° ν¨μ (Asia/Seoul) | |
| # ββββββββββββββββββββββββββββββ | |
| import requests | |
| from zoneinfo import ZoneInfo | |
| def time_now(_=""): | |
| try: | |
| # μ νν UTC μκ°μ μΈλΆ APIμμ κ°μ Έμ΄ | |
| resp = requests.get("https://timeapi.io/api/Time/current/zone?timeZone=Asia/Seoul", timeout=5) | |
| if resp.status_code == 200: | |
| data = resp.json() | |
| dt = data["dateTime"].split(".")[0].replace("T", " ") | |
| return f"νμ¬ μκ°: {dt} (Asia/Seoul, μλ² κΈ°μ€ NTP λκΈ°ν)" | |
| else: | |
| # API μ€ν¨ μ λ‘컬 μμ€ν μκ°μΌλ‘ λ체 | |
| tz = ZoneInfo("Asia/Seoul") | |
| now = datetime.datetime.now(tz) | |
| return f"νμ¬ μκ°(λ‘컬): {now.strftime('%Y-%m-%d %H:%M:%S')} (Asia/Seoul)" | |
| except Exception as e: | |
| tz = ZoneInfo("Asia/Seoul") | |
| now = datetime.datetime.now(tz) | |
| return f"νμ¬ μκ°(λ°±μ ): {now.strftime('%Y-%m-%d %H:%M:%S')} (Asia/Seoul, μ€λ₯: {e})" | |
| # ββββββββββββββββββββββββββββββ | |
| # β λꡬ λ±λ‘ | |
| # ββββββββββββββββββββββββββββββ | |
| tools.extend([Tool(name="time_now", func=time_now, description="νμ¬ μκ°μ λ°νν©λλ€.")]) | |
| from langchain.memory import ConversationBufferMemory as MEM | |
| from langchain.agents.agent_toolkits import FileManagementToolkit as FMT | |
| tools.extend(FMT(root_dir=str(os.getcwd())).get_tools()) | |
| # ββββββββββββββββββββββββββββββ | |
| # β Agent μ΄κΈ°ν | |
| # ββββββββββββββββββββββββββββββ | |
| mem=MEM() | |
| agent=initialize_agent(tools,llm,agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,verbose=True,memory=mem) | |
| #agent = create_structured_chat_agent(llm, tools, prompt) | |
| #agent= AgentExecutor(agent=agent, tools=tools,memory=mem) | |
| # ... (μμ LLM, tools, agent μ€μ λΆλΆμ λμΌ) | |
| import json | |
| # ββββββββββββββββββββββββββββββ | |
| # β λν μμ½ ν¨μ | |
| # ββββββββββββββββββββββββββββββ | |
| def summarize_title(history): | |
| """λν μ λͺ© μμ½""" | |
| if not history: return "μ λν" | |
| text = "\n".join(f"User:{h[0]} AI:{h[1]}" for h in history[-3:]) | |
| try: | |
| title = llm._call(f"λ€μ λνμ μ£Όμ λ₯Ό ν μ€λ‘ μμ½ν΄μ€:\n{text}") | |
| return title.strip().replace("\n", " ")[:60] | |
| except Exception: | |
| return "μμ½ μ€ν¨" | |
| import pickle | |
| import os, datetime | |
| # νμ¬ λλ ν°λ¦¬λ‘ κ³ μ | |
| os.chdir(os.path.dirname(os.path.abspath(__file__))) | |
| os.makedirs("user_logs", exist_ok=True) | |
| # --- λν κΈ°λ‘ μ μ₯/λΆλ¬μ€κΈ° --- | |
| def save_conversation(username, history, conv_name="current"): | |
| """κ³μ λ³λ‘ λνκΈ°λ‘μ pickleλ‘ μ€μκ° μ μ₯""" | |
| os.makedirs(f"user_logs/{username}", exist_ok=True) | |
| title = summarize_title(history) | |
| fname = f"user_logs/{username}/{conv_name}.pkl" | |
| with open(fname, "wb") as f: | |
| pickle.dump({"title": title, "history": history}, f) | |
| def load_conversation(username, conv_name="current"): | |
| path = f"user_logs/{username}/{conv_name}.pkl" | |
| if not os.path.exists(path): | |
| return [] | |
| with open(path, "rb") as f: | |
| data = pickle.load(f) | |
| return data.get("history", []) | |
| def list_conversations(username): | |
| os.makedirs(f"user_logs/{username}", exist_ok=True) | |
| files = [f for f in os.listdir(f"user_logs/{username}") if f.endswith(".pkl")] | |
| titles = [] | |
| for f in files: | |
| with open(f"user_logs/{username}/" + f, "rb") as fp: | |
| data = pickle.load(fp) | |
| titles.append((data.get("title", f), f)) | |
| return titles | |
| # --- chat ν¨μ μμ --- | |
| def chat(message, history, username="guest", conv_name="current"): | |
| try: | |
| raw_response = agent.run(message) | |
| text = str(raw_response) | |
| # JSON νμ μλ΅ νμ± | |
| output = text | |
| match = re.search(r"\{.*\}", text, re.DOTALL) | |
| if match: | |
| try: | |
| obj = json.loads(match.group(0)) | |
| output = ( | |
| obj.get("action_input") | |
| or obj.get("Final Answer") | |
| or obj.get("output") | |
| or obj.get("content") | |
| or text | |
| ) | |
| except Exception: | |
| output = text | |
| except Exception as e: | |
| output = f"β οΈ μ€λ₯: {e}" | |
| # κΈ°λ‘ μΆκ° λ° μ¦μ μ μ₯ | |
| history = history + [(message, output)] | |
| save_conversation(username, history, conv_name) | |
| return history, history, "" | |
| # --- λΆλ¬μ€κΈ° λ²νΌ ν¨μ --- | |
| def load_selected(username, file): | |
| path = f"user_logs/{username}/{file}" | |
| if not os.path.exists(path): | |
| return [] | |
| with open(path, "rb") as f: | |
| data = pickle.load(f) | |
| return data.get("history", []) | |
| # ββββββββββββββββββββββββββββββ | |
| # β λ‘κ·ΈμΈ ν μ¬μ©μ μ 보 κ°μ Έμ€κΈ° | |
| # ββββββββββββββββββββββββββββββ | |
| def get_hf_user(token): | |
| """HF OAuth ν ν°μΌλ‘ μ¬μ©μ μ 보 μ‘°ν""" | |
| try: | |
| r = requests.get("https://huggingface.co/api/whoami-v2", headers={"Authorization": f"Bearer {token}"}) | |
| if r.status_code == 200: | |
| data = r.json() | |
| return data.get("name") or data.get("email") or "unknown_user" | |
| except Exception: | |
| pass | |
| return "guest" | |
| import re, json | |
| ''' | |
| def chat(message, history, hf_token): | |
| username = get_hf_user(hf_token) if hf_token else "guest" | |
| try: | |
| response = agent.invoke(message) | |
| if isinstance(response, dict): | |
| if "action_input" in response: | |
| response = response["action_input"] | |
| elif "output" in response: | |
| response = response["output"] | |
| elif "text" in response: | |
| response = response["text"] | |
| else: | |
| response = str(response) | |
| elif isinstance(response, str): | |
| # "Final Answer"κ° ν¬ν¨λ λ¬Έμμ΄μ΄λ©΄ κ·Έ λΆλΆλ§ μΆμΆ | |
| if '"action_input":' in response: | |
| import re, json | |
| match = re.search(r'["\']action_input["\']\s*:\s*["\'](.*?)["\']', response) | |
| if match: | |
| response = match.group(1) | |
| elif "Final Answer" in response: | |
| # {"action": "Final Answer", "action_input": "..."} νμμΌ λ | |
| try: | |
| data = json.loads(response) | |
| if isinstance(data, dict) and "action_input" in data: | |
| response = data["action_input"] | |
| except Exception: | |
| response = response.replace("Final Answer", "").strip() | |
| except Exception as e: | |
| response = f"β οΈ μ€λ₯: {e}" | |
| history = history + [(message, response)] | |
| if username: | |
| save_conversation(username, history) | |
| return history, history, "" # μ λ ₯ μ΄κΈ°ν | |
| ''' | |
| # μ: hf_token (νΉμ username) μ μ λ ₯μΌλ‘ λ°λλ‘ λ³κ²½ | |
| def refresh_conversation_list(username="guest"): | |
| """κ³μ λ³ λν λͺ©λ‘μ μλ‘κ³ μΉ¨ (Gradio Dropdown μ λ°μ΄νΈμ©)""" | |
| base_dir = os.path.join("user_logs", username) | |
| os.makedirs(base_dir, exist_ok=True) | |
| files = sorted( | |
| [f for f in os.listdir(base_dir) if f.endswith(".pkl")], | |
| reverse=True | |
| ) | |
| # μ λͺ© λͺ©λ‘ λ§λ€κΈ° | |
| titles = [] | |
| for f in files: | |
| try: | |
| with open(os.path.join(base_dir, f), "rb") as fp: | |
| data = pickle.load(fp) | |
| title = data.get("title", f.replace(".pkl", "")) | |
| except Exception: | |
| title = f.replace(".pkl", "") | |
| titles.append(title) | |
| # Dropdown μ λ°μ΄νΈ | |
| if titles: | |
| return gr.update(choices=titles, value=titles[0]) | |
| else: | |
| return gr.update(choices=[], value=None) | |
| # ββββββββββββββββββββββββββββββ | |
| # β Gradio UI with HF Auth | |
| # ββββββββββββββββββββββββββββββ | |
| with gr.Blocks(theme=gr.themes.Soft(), title="PIXAL Assistant (HF Auth)") as demo: | |
| gr.Markdown("## π€ PIXAL Assistant β Hugging Face κ³μ κΈ°λ° λν μ μ₯") | |
| hf_login = gr.LoginButton() | |
| hf_token = gr.State() | |
| def login(token): # λ‘κ·ΈμΈ ν token λ°ν | |
| return token | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| chatbot = gr.Chatbot(label="PIXAL λν", height=600, render_markdown=True) | |
| msg = gr.Textbox(label="λ©μμ§", placeholder="μ λ ₯ ν Enter λλ μ μ‘ ν΄λ¦") | |
| send = gr.Button("μ μ‘") | |
| clear = gr.Button("μ΄κΈ°ν") | |
| msg.submit(chat, [msg, chatbot, hf_token], [chatbot, chatbot, msg]) | |
| send.click(chat, [msg, chatbot, hf_token], [chatbot, chatbot, msg]) | |
| clear.click(lambda: None, None, chatbot, queue=False) | |
| with gr.Column(scale=1): | |
| gr.Markdown("### πΎ μ μ₯λ λν κΈ°λ‘") | |
| convo_files = gr.Dropdown(label="λν μ ν", choices=[]) | |
| refresh_btn = gr.Button("π λͺ©λ‘ μλ‘κ³ μΉ¨") | |
| load_btn = gr.Button("λΆλ¬μ€κΈ°") | |
| refresh_btn.click(refresh_conversation_list, None, convo_files) | |
| load_btn.click(load_selected, [convo_files], chatbot) | |
| if __name__ == "__main__": | |
| demo.launch(server_name="0.0.0.0", server_port=7860) | |
| ''' | |
| def chat(message, history): | |
| try: | |
| response = agent.run(message) | |
| # JSON ννλ‘ μΆλ ₯λ κ°λ₯μ±μ΄ μλ κ²½μ° μ²λ¦¬ | |
| if isinstance(response, dict): | |
| if "action_input" in response: | |
| response = response["action_input"] | |
| elif "output" in response: | |
| response = response["output"] | |
| elif "text" in response: | |
| response = response["text"] | |
| else: | |
| response = str(response) | |
| elif isinstance(response, str): | |
| # "Final Answer"κ° ν¬ν¨λ λ¬Έμμ΄μ΄λ©΄ κ·Έ λΆλΆλ§ μΆμΆ | |
| if '"action_input":' in response: | |
| import re, json | |
| match = re.search(r'["\']action_input["\']\s*:\s*["\'](.*?)["\']', response) | |
| if match: | |
| response = match.group(1) | |
| elif "Final Answer" in response: | |
| # {"action": "Final Answer", "action_input": "..."} νμμΌ λ | |
| try: | |
| data = json.loads(response) | |
| if isinstance(data, dict) and "action_input" in data: | |
| response = data["action_input"] | |
| except Exception: | |
| response = response.replace("Final Answer", "").strip() | |
| except Exception as e: | |
| response = f"β οΈ μ€λ₯: {e}" | |
| history = history + [(message, response)] | |
| return history, history,"" | |
| # ββββββββββββββββββββββββββββββ | |
| # β Gradio UI | |
| # ββββββββββββββββββββββββββββββ | |
| def load_selected(file): | |
| return load_conversation(file) | |
| # ββββββββββββββββββββββββββββββ | |
| # β Gradio UI | |
| # ββββββββββββββββββββββββββββββ | |
| with gr.Blocks(theme=gr.themes.Soft(), title="PIXAL Assistant") as demo: | |
| gr.Markdown("## π€ PIXAL Assistant β LangChain κΈ°λ° λ©ν°ν΄ μμ΄μ νΈ") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| chatbot = gr.Chatbot(label="PIXAL λν", height=600) | |
| msg = gr.Textbox(label="λ©μμ§", placeholder="μ λ ₯ ν Enter λλ μ μ‘ ν΄λ¦") | |
| send = gr.Button("μ μ‘") | |
| clear = gr.Button("μ΄κΈ°ν") | |
| username = gr.Textbox(label="Hugging Face μ¬μ©μλͺ ", placeholder="λ‘κ·ΈμΈ λμ μ΄λ¦ μ λ ₯", value=os.getenv("HF_USER", "guest")) | |
| msg.submit(chat, [msg, chatbot, username], [chatbot, chatbot, msg]) | |
| send.click(chat, [msg, chatbot, username], [chatbot, chatbot, msg]) | |
| clear.click(lambda: None, None, chatbot, queue=False) | |
| with gr.Column(scale=1): | |
| gr.Markdown("### πΎ μ μ₯λ λν κΈ°λ‘") | |
| convo_files = gr.Dropdown(label="λν μ ν", choices=[]) | |
| refresh_btn = gr.Button("π λͺ©λ‘ μλ‘κ³ μΉ¨") | |
| load_btn = gr.Button("λΆλ¬μ€κΈ°") | |
| def refresh_list(user): | |
| if not user: return gr.Dropdown.update(choices=[]) | |
| return gr.Dropdown.update(choices=[x[1] for x in list_conversations(user)]) | |
| refresh_btn.click(refresh_list, [username], convo_files) | |
| load_btn.click(lambda f: load_conversation(f), [convo_files], chatbot) | |
| if __name__ == "__main__": | |
| demo.launch(server_name="0.0.0.0", server_port=7860) | |
| ''' | |