peterpeter8585 commited on
Commit
1f04f3b
Β·
verified Β·
1 Parent(s): fcd9ef3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +89 -156
app.py CHANGED
@@ -1,24 +1,29 @@
1
- import os, json, pickle, datetime, requests, re, gradio as gr
 
2
  from typing import Optional, List, Dict, Any
3
  from requests.adapters import HTTPAdapter, Retry
4
  from langchain.llms.base import LLM
 
5
  from langchain.agents import initialize_agent, AgentType, load_tools
6
  from langchain.tools import Tool
7
- from langchain.memory import ConversationBufferMemory
8
  from langchain_experimental.tools.python.tool import PythonREPLTool
9
  from langchain_community.retrievers import WikipediaRetriever
10
- from langchain.tools.retriever import create_retriever_tool
11
- from langchain_community.tools.shell.tool import ShellTool
12
- from langchain.tools import YouTubeSearchTool
13
 
14
  # ──────────────────────────────
15
  # βœ… GitHubModelLLM (κ·ΈλŒ€λ‘œ μœ μ§€)
16
  # ──────────────────────────────
 
 
 
 
 
 
17
  class GitHubModelLLM(LLM):
 
18
  model: str = "openai/gpt-4.1"
19
  endpoint: str = "https://models.github.ai/inference"
20
  token: Optional[str] = os.environ.get("token")
21
- system_prompt: Optional[str] = "λ„ˆλŠ” PIXAL(Primary Interactive X-ternal Assistant with multi Language)이야.λ„ˆμ˜ κ°œλ°œμžλŠ” μ •μ„±μœ€ μ΄λΌλŠ” 6ν•™λ…„ 파이썬 ν”„λ‘œκ·Έλž˜λ¨Έμ•Ό."
22
  request_timeout: float = 30.0
23
  max_retries: int = 2
24
  backoff_factor: float = 0.3
@@ -29,202 +34,130 @@ class GitHubModelLLM(LLM):
29
 
30
  def _post_chat(self, body: Dict[str, Any]) -> Dict[str, Any]:
31
  token = self.token or os.getenv("GITHUB_TOKEN") or os.getenv("token")
32
- if not token:
33
- raise ValueError("❌ GitHub token이 μ„€μ •λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.")
34
  session = requests.Session()
35
  retries = Retry(total=self.max_retries, backoff_factor=self.backoff_factor,
36
  status_forcelist=[429, 500, 502, 503, 504])
37
  session.mount("https://", HTTPAdapter(max_retries=retries))
38
  session.headers.update({
39
  "Content-Type": "application/json",
40
- "Authorization": "Bearer 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"
41
  })
42
  resp = session.post(f"{self.endpoint}/chat/completions", json=body, timeout=self.request_timeout)
43
  resp.raise_for_status()
44
  return resp.json()
45
 
46
- def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs) -> str:
47
- body = {"model": self.model, "messages": []}
 
 
 
 
 
 
 
 
 
48
  if self.system_prompt:
49
- body["messages"].append({"role": "system", "content": self.system_prompt})
50
- body["messages"].append({"role": "user", "content": prompt})
 
 
 
 
 
 
 
 
 
 
51
  if stop:
52
  body["stop"] = stop
 
 
53
  res = self._post_chat(body)
54
  msg = res.get("choices", [{}])[0].get("message", {})
55
  return msg.get("content") or json.dumps(msg.get("function_call", {}))
56
-
57
- # ──────────────────────────────
58
- # βœ… HuggingFace API (ν”„λ‘œν•„)
59
- # ──────────────────────────────
60
- def get_hf_userinfo(hf_token: str) -> dict:
61
- try:
62
- r = requests.get("https://huggingface.co/api/whoami-v2",
63
- headers={"Authorization": f"Bearer {hf_token}"}, timeout=5)
64
- if r.status_code == 200:
65
- j = r.json()
66
- return {
67
- "name": j.get("name", "guest"),
68
- "avatar": j.get("avatar", "https://huggingface.co/front/assets/huggingface_logo-noborder.svg")
69
- }
70
- except Exception:
71
- pass
72
- return {"name": "guest", "avatar": "https://huggingface.co/front/assets/huggingface_logo-noborder.svg"}
73
-
74
  # ──────────────────────────────
75
- # βœ… Agent ꡬ성
76
  # ──────────────────────────────
77
  llm = GitHubModelLLM()
78
- tools = load_tools(["ddg-search", "requests_all", "llm-math"], llm=llm, allow_dangerous_tools=True)
79
- tools += [YouTubeSearchTool(), ShellTool(), PythonREPLTool()]
80
  retriever = WikipediaRetriever(lang="ko")
81
- retriever_tool=Tool(func=retriever.get_relevant_documents,description="μœ„ν‚€λ°±κ³Ό 검색기.κΌ­ 정보λ₯Ό κ²€μ¦ν•˜μ—¬ μ‚¬μš©ν•˜μ‹œμ˜€",name="Wiki")
82
- tools.append(retriever_tool)
83
-
84
- def time_now(_=""):
85
- now = datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=9)))
86
- return f"ν˜„μž¬ μ‹œκ°: {now.strftime('%Y-%m-%d %H:%M:%S')} (Asia/Seoul)"
87
- tools.append(Tool(name="time_now", func=time_now, description="ν˜„μž¬ μ‹œκ°„μ„ λ°˜ν™˜ν•©λ‹ˆλ‹€."))
88
 
 
89
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
90
- agent = initialize_agent(tools, llm, agent_type=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
91
- memory=memory, verbose=True)
92
-
93
- # ──────────────────────────────
94
- # βœ… λŒ€ν™” μ €μž₯/λ‘œλ“œ
95
- # ──────────────────────────────
96
- os.chdir(os.path.dirname(os.path.abspath(__file__)))
97
 
98
- def summarize_title(history):
99
- if not history:
100
- return "μƒˆ λŒ€ν™”"
101
- text = "\n".join(f"User:{m} AI:{r}" for m, r in history[-3:])
102
- try:
103
- title = llm._call(f"이 λŒ€ν™”μ˜ 주제λ₯Ό ν•œ μ€„λ‘œ μš”μ•½ν•΄μ€˜:\n{text}")
104
- return title.strip().replace("\n", " ")[:50]
105
- except Exception:
106
- return "μš”μ•½ μ‹€νŒ¨"
107
-
108
- def save_conversation(history, hf_token):
109
- info = get_hf_userinfo(hf_token)
110
- username = info["name"]
111
- if username.lower() == "guest":
112
- return
113
- fname = f"{username}.pkl"
114
- data = {}
115
- if os.path.exists(fname):
116
- with open(fname, "rb") as f:
117
- data = pickle.load(f)
118
- title = summarize_title(history)
119
- data[title] = {"title": title, "updated": datetime.datetime.now().isoformat(), "history": history}
120
- with open(fname, "wb") as f:
121
- pickle.dump(data, f)
122
-
123
- def load_conversation(hf_token, conv_title=None):
124
- info = get_hf_userinfo(hf_token)
125
- username = info["name"]
126
- if username.lower() == "guest":
127
- return []
128
- fname = f"{username}.pkl"
129
- if not os.path.exists(fname):
130
- return []
131
- with open(fname, "rb") as f:
132
- data = pickle.load(f)
133
- if conv_title and conv_title in data:
134
- return data[conv_title]["history"]
135
- elif data:
136
- latest = max(data.values(), key=lambda x: x["updated"])
137
- return latest["history"]
138
- return []
139
-
140
- def refresh_conversation_list(hf_token):
141
- info = get_hf_userinfo(hf_token)
142
- username = info["name"]
143
- if username.lower() == "guest":
144
- return gr.update(choices=[], value=None)
145
- fname = f"{username}.pkl"
146
- if not os.path.exists(fname):
147
- return gr.update(choices=[], value=None)
148
- with open(fname, "rb") as f:
149
- data = pickle.load(f)
150
- titles = sorted(data.keys(), reverse=True)
151
- return gr.update(choices=titles, value=titles[0] if titles else None)
152
 
153
  # ──────────────────────────────
154
- # βœ… Chat ν•¨μˆ˜
155
  # ──────────────────────────────
156
- def chat(message, history,hf_token):
157
  try:
158
- raw_response = agent.invoke(message)
159
- text = str(raw_response)
160
-
161
- # JSON ν˜•μ‹ 응닡 νŒŒμ‹±
162
- output = text
163
  match = re.search(r"\{.*\}", text, re.DOTALL)
164
  if match:
165
  try:
166
  obj = json.loads(match.group(0))
167
- output = (
168
- obj.get("action_input")
169
- or obj.get("Final Answer")
170
- or obj.get("output")
171
- or obj.get("content")
172
- or text
173
- )
174
  except Exception:
175
- output = text
 
 
176
  except Exception as e:
177
- output = f"⚠️ 였λ₯˜: {e}"
178
 
179
- # 기둝 μΆ”κ°€ 및 μ¦‰μ‹œ μ €μž₯
180
- history = history + [(message, output)]
181
- save_conversation(history, hf_token)
182
  return history, history, ""
183
 
184
  # ──────────────────────────────
185
  # βœ… Gradio UI (ChatGPT μŠ€νƒ€μΌ)
186
  # ──────────────────────────────
187
- with gr.Blocks(theme=gr.themes.Soft(), title="PIXAL Assistant (HuggingFace OAuth)") as demo:
188
- with gr.Row(elem_id="header"):
189
- gr.HTML("""
190
- <div style="background:#f5f5f5;padding:12px;border-bottom:1px solid #ddd;
191
  display:flex;align-items:center;justify-content:space-between;">
192
  <h2 style="margin:0;">πŸ€– PIXAL Assistant</h2>
 
193
  </div>
194
  """)
195
- user_avatar = gr.Image(show_label=False, width=40, height=40, elem_id="avatar")
196
- user_name = gr.Markdown("둜그인 ν•„μš”", elem_id="username", elem_classes="text-right")
197
- # --- κΈ°μ‘΄ μ½”λ“œ 쀑 μˆ˜μ • λΆ€λΆ„λ§Œ ---
198
 
199
- login_btn = gr.LoginButton(elem_id="login-btn")
200
- hf_token = gr.State()
201
-
202
- def on_login(token):
203
- info = get_hf_userinfo(token)
204
- return token, info["avatar"], f"**{info['name']}**"
205
-
206
- # πŸ”½ 기쑴의 login_btn.login(...) β†’ click()으둜 μˆ˜μ •
207
- login_btn.click(on_login, inputs=login_btn, outputs=[hf_token, user_avatar, user_name])
208
 
209
  with gr.Row():
210
- with gr.Column(scale=3):
211
- chatbot = gr.Chatbot(label=None, height=600, render_markdown=True)
212
- msg = gr.Textbox(placeholder="λ©”μ‹œμ§€λ₯Ό μž…λ ₯ν•˜μ„Έμš”...", show_label=False)
213
- send = gr.Button("전솑", variant="primary")
214
- clear = gr.Button("🧹 μ΄ˆκΈ°ν™”")
215
-
216
- msg.submit(chat, [msg, chatbot, hf_token], [chatbot, chatbot, msg])
217
- send.click(chat, [msg, chatbot, hf_token], [chatbot, chatbot, msg])
218
- clear.click(lambda: None, None, chatbot, queue=False)
219
-
220
- with gr.Column(scale=1):
221
- gr.Markdown("### πŸ’Ύ μ €μž₯된 λŒ€ν™”")
222
- convo_list = gr.Dropdown(label="λŒ€ν™” 선택", choices=[])
223
- refresh_btn = gr.Button("πŸ”„ μƒˆλ‘œκ³ μΉ¨")
224
- load_btn = gr.Button("πŸ“‚ 뢈러였기")
225
-
226
- refresh_btn.click(refresh_conversation_list, [hf_token], convo_list)
227
- load_btn.click(load_conversation, [hf_token, convo_list], chatbot)
228
 
229
  if __name__ == "__main__":
230
- demo.launch(server_name="0.0.0.0", server_port=7860)
 
1
+ # app_pixal_chat.py
2
+ import os, re, json, gradio as gr, requests
3
  from typing import Optional, List, Dict, Any
4
  from requests.adapters import HTTPAdapter, Retry
5
  from langchain.llms.base import LLM
6
+ from langchain.memory import ConversationBufferMemory
7
  from langchain.agents import initialize_agent, AgentType, load_tools
8
  from langchain.tools import Tool
 
9
  from langchain_experimental.tools.python.tool import PythonREPLTool
10
  from langchain_community.retrievers import WikipediaRetriever
 
 
 
11
 
12
  # ──────────────────────────────
13
  # βœ… GitHubModelLLM (κ·ΈλŒ€λ‘œ μœ μ§€)
14
  # ──────────────────────────────
15
+
16
+ from typing import Optional, List, Dict, Any
17
+ from langchain.llms.base import LLM
18
+ import requests, os, json
19
+ from requests.adapters import HTTPAdapter, Retry
20
+
21
  class GitHubModelLLM(LLM):
22
+ """GitHub Models API 기반 LangChain LLM (λŒ€ν™” λ©”λͺ¨λ¦¬ 톡합 지원)"""
23
  model: str = "openai/gpt-4.1"
24
  endpoint: str = "https://models.github.ai/inference"
25
  token: Optional[str] = os.environ.get("token")
26
+ system_prompt: Optional[str] = "λ„ˆλŠ” PIXAL(Primary Interactive X-ternal Assistant with multi Language)이야. λ„ˆμ˜ κ°œλ°œμžλŠ” μ •μ„±μœ€ μ΄λΌλŠ” 6ν•™λ…„ 파이썬 ν”„λ‘œκ·Έλž˜λ¨Έμ•Ό.이것은 μ‹œμŠ€ν…œ λ©”μ‹œμ§€μž…λ‹ˆλ‹€.μ°Έκ³  ν•˜μ‹­μ‹œμ˜€."
27
  request_timeout: float = 30.0
28
  max_retries: int = 2
29
  backoff_factor: float = 0.3
 
34
 
35
  def _post_chat(self, body: Dict[str, Any]) -> Dict[str, Any]:
36
  token = self.token or os.getenv("GITHUB_TOKEN") or os.getenv("token")
 
 
37
  session = requests.Session()
38
  retries = Retry(total=self.max_retries, backoff_factor=self.backoff_factor,
39
  status_forcelist=[429, 500, 502, 503, 504])
40
  session.mount("https://", HTTPAdapter(max_retries=retries))
41
  session.headers.update({
42
  "Content-Type": "application/json",
43
+ "Authorization": f"Bearer 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"
44
  })
45
  resp = session.post(f"{self.endpoint}/chat/completions", json=body, timeout=self.request_timeout)
46
  resp.raise_for_status()
47
  return resp.json()
48
 
49
+ def _call(
50
+ self,
51
+ prompt: str,
52
+ stop: Optional[List[str]] = None,
53
+ **kwargs
54
+ ) -> str:
55
+ """λŒ€ν™” λ©”λͺ¨λ¦¬(chat_history)λ₯Ό ν¬ν•¨ν•˜μ—¬ λͺ¨λΈ 호좜"""
56
+ # πŸ’¬ λ©”λͺ¨λ¦¬μ— μ €μž₯된 λŒ€ν™” λ©”μ‹œμ§€ 뢈러였기
57
+ memory = kwargs.get("memory")
58
+ messages = []
59
+
60
  if self.system_prompt:
61
+ messages.append({"role": "system", "content": self.system_prompt})
62
+
63
+ # memoryκ°€ μžˆμ„ 경우 (이전 λŒ€ν™” 포함)
64
+ if memory and hasattr(memory, "chat_memory"):
65
+ for msg in memory.chat_memory.messages:
66
+ role = "user" if msg.type == "human" else "assistant"
67
+ messages.append({"role": role, "content": msg.content})
68
+
69
+ # ν˜„μž¬ μ‚¬μš©μž μž…λ ₯
70
+ messages.append({"role": "user", "content": prompt})
71
+
72
+ body = {"model": self.model, "messages": messages}
73
  if stop:
74
  body["stop"] = stop
75
+
76
+ # API 호좜
77
  res = self._post_chat(body)
78
  msg = res.get("choices", [{}])[0].get("message", {})
79
  return msg.get("content") or json.dumps(msg.get("function_call", {}))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  # ──────────────────────────────
81
+ # βœ… LangChain 도ꡬ & μ—μ΄μ „νŠΈ ꡬ성
82
  # ──────────────────────────────
83
  llm = GitHubModelLLM()
84
+ tools = load_tools(["ddg-search", "requests_all", "llm-math"], llm=llm,allow_dangerous_tools=True)
85
+ tools.append(Tool(name="python_repl", func=PythonREPLTool().run, description="Python μ½”λ“œ μ‹€ν–‰ 도ꡬ"))
86
  retriever = WikipediaRetriever(lang="ko")
87
+ tools.append(Tool(name="wiki", func=retriever.get_relavant_documents, description="μœ„ν‚€λ°±κ³Ό 검색"))
 
 
 
 
 
 
88
 
89
+ # βœ… λŒ€ν™” κΈ°μ–΅ λ©”λͺ¨λ¦¬
90
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
 
 
 
 
 
 
 
91
 
92
+ # βœ… Agent (Memory 연동)
93
+ agent = initialize_agent(
94
+ tools,
95
+ llm,
96
+ agent_type=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
97
+ memory=memory,
98
+ verbose=True
99
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
 
101
  # ──────────────────────────────
102
+ # βœ… Chat ν•¨μˆ˜ (Memory μœ μ§€)
103
  # ──────────────────────────────
104
+ def chat(message, history):
105
  try:
106
+ # λŒ€ν™” 기둝을 LangChain memory에 반영
107
+ memory.chat_memory.add_user_message(message)
108
+ raw = agent.run(message)
109
+ # JSON ν˜•νƒœλ‘œ λ°˜ν™˜ μ‹œ νŒŒμ‹±
110
+ text = str(raw)
111
  match = re.search(r"\{.*\}", text, re.DOTALL)
112
  if match:
113
  try:
114
  obj = json.loads(match.group(0))
115
+ text = obj.get("action_input") or obj.get("Final Answer") or obj.get("content") or text
 
 
 
 
 
 
116
  except Exception:
117
+ pass
118
+ # AI 응닡을 memory에 μΆ”κ°€
119
+ memory.chat_memory.add_ai_message(text)
120
  except Exception as e:
121
+ text = f"⚠️ 였λ₯˜: {e}"
122
 
123
+ history = history + [(message, text)]
 
 
124
  return history, history, ""
125
 
126
  # ──────────────────────────────
127
  # βœ… Gradio UI (ChatGPT μŠ€νƒ€μΌ)
128
  # ──────────────────────────────
129
+ with gr.Blocks(theme=gr.themes.Soft(), title="PIXAL Assistant") as demo:
130
+ gr.HTML("""
131
+ <div style="background:#f1f5f9;padding:12px;border-bottom:1px solid #d1d5db;
 
132
  display:flex;align-items:center;justify-content:space-between;">
133
  <h2 style="margin:0;">πŸ€– PIXAL Assistant</h2>
134
+ <span style="font-size:0.9em;color:#555;">LangChain + GitHub LLM</span>
135
  </div>
136
  """)
 
 
 
137
 
138
+ chatbot = gr.Chatbot(
139
+ label=None,
140
+ height=720,
141
+ bubble_full_width=False,
142
+ render_markdown=True,
143
+ avatar_images=("https://avatars.githubusercontent.com/u/9919?s=280&v=4", None),
144
+ )
 
 
145
 
146
  with gr.Row():
147
+ msg = gr.Textbox(placeholder="λ©”μ‹œμ§€λ₯Ό μž…λ ₯ν•˜μ„Έμš”...", show_label=False, scale=8)
148
+ send = gr.Button("전솑", variant="primary", scale=1)
149
+ clear = gr.Button("🧹 μ΄ˆκΈ°ν™”", scale=1)
150
+
151
+ msg.submit(chat, [msg, chatbot], [chatbot, chatbot, msg])
152
+ send.click(chat, [msg, chatbot], [chatbot, chatbot, msg])
153
+ clear.click(lambda: None, None, chatbot, queue=False)
154
+
155
+ gr.Markdown("""
156
+ <div style="text-align:center;color:#777;font-size:0.85em;margin-top:8px;">
157
+ πŸ’‘ λŒ€ν™” 기둝은 μ„Έμ…˜ λ™μ•ˆ μœ μ§€λ©λ‹ˆλ‹€.
158
+ Made with ❀️ by PIXAL
159
+ </div>
160
+ """)
 
 
 
 
161
 
162
  if __name__ == "__main__":
163
+ demo.launch(server_name="0.0.0.0", server_port=7860)