Pixal1.0 / app.py
peterpeter8585's picture
Update app.py
f5fffed verified
raw
history blame
25.6 kB
# pixal_agent_full.py
import os
import datetime
import gradio as gr
import requests
from typing import Optional, List
from langchain.llms.base import LLM
from langchain.agents import initialize_agent, AgentType,load_tools
from langchain.agents import AgentExecutor, create_structured_chat_agent
from langchain.tools import Tool
from langchain_experimental.tools.python.tool import PythonREPLTool
import queue
from typing import Any, Dict
import gradio as gr
from langchain.callbacks.base import BaseCallbackHandler
from langchain.tools import YouTubeSearchTool as YTS
# 2. μ»€μŠ€ν…€ 콜백 ν•Έλ“€λŸ¬
# github_model_llm.py
"""
GitHub Models API 기반 LLM 래퍼 (LangChain LLM ν˜Έν™˜)
- OpenAI-style chat completions ν˜Έν™˜
- function calling (OPENAI_MULTI_FUNCTIONS) 지원: functions, function_call 전달 κ°€λŠ₯
- system prompt (system_prompt) 지원
- μ˜΅μ…˜: temperature, max_tokens, top_p λ“± 전달
- raw response λ°˜ν™˜ λ©”μ„œλ“œ 포함
"""
from typing import Optional, List, Dict, Any
import os
import time
import json
import requests
from requests.adapters import HTTPAdapter, Retry
from langchain.llms.base import LLM
'''
class GitHubModelLLM(LLM):
def __init__(
self,
model: str = "openai/gpt-4.1",
token: Optional[str] = os.environ["token"],
endpoint: str = "https://models.github.ai/inference",
system_prompt: Optional[str] = "λ„ˆλŠ” PIXAL(Primary Interactive X-ternal Assistant with multi Language)이야.λ„ˆμ˜ κ°œλ°œμžλŠ” μ •μ„±μœ€ μ΄λΌλŠ” 6ν•™λ…„ 파이썬 ν”„λ‘œκ·Έλž˜λ¨Έμ•Ό.",
request_timeout: float = 30.0,
max_retries: int = 2,
backoff_factor: float = 0.3,
**kwargs,
):
"""
Args:
model: λͺ¨λΈ 이름 (예: "openai/gpt-4.1")
token: GitHub Models API 토큰 (Bearer). ν™˜κ²½λ³€μˆ˜ GITHUB_TOKEN / token μ‚¬μš© κ°€λŠ₯ as fallback.
endpoint: API endpoint (κΈ°λ³Έ: https://models.github.ai/inference)
system_prompt: (선택) system role λ©”μ‹œμ§€λ‘œ 항상 μ•žμ— λΆ™μž„
request_timeout: μš”μ²­ νƒ€μž„μ•„μ›ƒ (초)
max_retries: λ„€νŠΈμ›Œν¬ μž¬μ‹œλ„ 횟수
backoff_factor: μž¬μ‹œλ„ μ§€μˆ˜ 보정
kwargs: LangChain LLM λΆ€λͺ¨μ— 전달할 μΆ”κ°€ 인자
"""
super().__init__(**kwargs)
self.model = model
self.endpoint = endpoint.rstrip("/")
self.token = token or os.getenv("GITHUB_TOKEN") or os.getenv("token")
self.system_prompt = system_prompt
self.request_timeout = request_timeout
# requests μ„Έμ…˜ + μž¬μ‹œλ„ μ„€μ •
self.session = requests.Session()
retries = Retry(total=max_retries, backoff_factor=backoff_factor,
status_forcelist=[429, 500, 502, 503, 504],
allowed_methods=["POST", "GET"])
self.session.mount("https://", HTTPAdapter(max_retries=retries))
self.session.headers.update({
"Content-Type": "application/json"
})
if self.token:
self.session.headers.update({"Authorization": f"Bearer {self.token}"})
@property
def _llm_type(self) -> str:
return "github_models_api"
# ---------- 편의 internal helper ----------
def _build_messages(self, prompt: str, extra_messages: Optional[List[Dict[str, Any]]] = None) -> List[Dict[str, Any]]:
"""
messages λ°°μ—΄ 생성: system (optional) + extra_messages (if any) + user prompt
extra_messages: 이미 role keys둜 κ΅¬μ„±λœ λ©”μ‹œμ§€ 리슀트 (예: conversation history)
"""
msgs: List[Dict[str, Any]] = []
if self.system_prompt:
msgs.append({"role": "system", "content": self.system_prompt})
if extra_messages:
# ensure format: list of {"role":..,"content":..}
msgs.extend(extra_messages)
msgs.append({"role": "user", "content": prompt})
return msgs
def _post_chat(self, body: Dict[str, Any]) -> Dict[str, Any]:
url = f"{self.endpoint}/chat/completions"
# ensure Authorization present
if "Authorization" not in self.session.headers and not self.token:
raise ValueError("GitHub Models token not set. Provide token param or set GITHUB_TOKEN env var.")
resp = self.session.post(url, json=body, timeout=self.request_timeout)
try:
resp.raise_for_status()
except requests.HTTPError as e:
# try to surface JSON error if present
content = resp.text
try:
j = resp.json()
content = json.dumps(j, ensure_ascii=False, indent=2)
except Exception:
pass
raise RuntimeError(f"GitHub Models API error: {e} - {content}")
return resp.json()
# ---------- LangChain LLM interface ----------
def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs) -> str:
"""
LangChain LLM `_call` κ΅¬ν˜„ (동기).
Supports kwargs:
- functions: list[dict] (function schemas)
- function_call: "auto" | {"name": "..."} | etc.
- messages: list[dict] (if you want to pass full conversation instead of prompt)
- temperature, top_p, max_tokens, n, stream, etc.
Returns:
assistant content (string). If function_call is returned by model, returns the 'content' if present,
otherwise returns function_call object as JSON string (so caller can parse).
"""
# support passing full messages via kwargs['messages']
messages = None
extra_messages = None
if "messages" in kwargs and isinstance(kwargs["messages"], list):
messages = kwargs.pop("messages")
else:
# optionally allow 'history' or 'extra_messages'
extra_messages = kwargs.pop("extra_messages", None)
if messages is None:
messages = self._build_messages(prompt, extra_messages=extra_messages)
body: Dict[str, Any] = {
"model": self.model,
"messages": messages,
}
# pass optional top-level params (temperature, max_tokens, etc.) from kwargs
for opt in ["temperature", "top_p", "max_tokens", "n", "stream", "presence_penalty", "frequency_penalty"]:
if opt in kwargs:
body[opt] = kwargs.pop(opt)
# pass function-calling related keys verbatim if provided
if "functions" in kwargs:
body["functions"] = kwargs.pop("functions")
if "function_call" in kwargs:
body["function_call"] = kwargs.pop("function_call")
# include stop if present
if stop:
body["stop"] = stop
# send request
raw = self._post_chat(body)
# save raw for caller if needed
self._last_raw = raw
# parse assistant message
choices = raw.get("choices") or []
if not choices:
return ""
message_obj = choices[0].get("message", {})
# if assistant returned a function_call, include that info
if "function_call" in message_obj:
# return function_call as JSON string so agent/tool orchestrator can parse it
# but if content also exists, prefer content
func = message_obj["function_call"]
# sometimes content may be absent; return structured JSON string
return json.dumps({"function_call": func}, ensure_ascii=False)
# otherwise return assistant content
return message_obj.get("content", "") or ""
# optional: expose raw response getter
def last_raw_response(self) -> Optional[Dict[str, Any]]:
return getattr(self, "_last_raw", None)
# optional: provide a convenience chat method to get full message object
def chat_completions(self, prompt: str, messages: Optional[List[Dict[str, Any]]] = None, **kwargs) -> Dict[str, Any]:
"""
Directly call chat completions and return full parsed JSON response.
- If `messages` provided, it's used as the full messages array (system/user/assistant roles as needed)
- else uses prompt + system_prompt to construct messages.
"""
if messages is None:
messages = self._build_messages(prompt)
body: Dict[str, Any] = {"model": self.model, "messages": messages}
for opt in ["temperature", "top_p", "max_tokens", "n", "stream"]:
if opt in kwargs:
body[opt] = kwargs.pop(opt)
if "functions" in kwargs:
body["functions"] = kwargs.pop("functions")
if "function_call" in kwargs:
body["function_call"] = kwargs.pop("function_call")
raw = self._post_chat(body)
self._last_raw = raw
return raw
'''
from typing import Optional, List, Dict, Any
from langchain.llms.base import LLM
import requests, os, json
from requests.adapters import HTTPAdapter, Retry
class GitHubModelLLM(LLM):
"""GitHub Models API 기반 LangChain LLM (Pydantic ν˜Έν™˜)"""
model: str = "openai/gpt-4.1"
endpoint: str = "https://models.github.ai/inference"
token: Optional[str] = os.environ["token"]
system_prompt: Optional[str] = "λ„ˆλŠ” PIXAL(Primary Interactive X-ternal Assistant with multi Language)이야.λ„ˆμ˜ κ°œλ°œμžλŠ” μ •μ„±μœ€ μ΄λΌλŠ” 6ν•™λ…„ 파이썬 ν”„λ‘œκ·Έλž˜λ¨Έμ•Ό."
request_timeout: float = 30.0
max_retries: int = 2
backoff_factor: float = 0.3
@property
def _llm_type(self) -> str:
return "github_models_api"
def _post_chat(self, body: Dict[str, Any]) -> Dict[str, Any]:
token = self.token or os.getenv("GITHUB_TOKEN") or os.getenv("token")
if not token:
raise ValueError("❌ GitHub token이 μ„€μ •λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.")
session = requests.Session()
retries = Retry(total=self.max_retries, backoff_factor=self.backoff_factor,
status_forcelist=[429, 500, 502, 503, 504])
session.mount("https://", HTTPAdapter(max_retries=retries))
session.headers.update({
"Content-Type": "application/json",
"Authorization": "Bearer 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"
})
resp = session.post(f"{self.endpoint}/chat/completions", json=body, timeout=self.request_timeout)
resp.raise_for_status()
return resp.json()
def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs) -> str:
body = {
"model": self.model,
"messages": []
}
if self.system_prompt:
body["messages"].append({"role": "system", "content": self.system_prompt})
body["messages"].append({"role": "user", "content": prompt})
for key in ["temperature", "max_tokens", "functions", "function_call"]:
if key in kwargs:
body[key] = kwargs[key]
if stop:
body["stop"] = stop
res = self._post_chat(body)
msg = res.get("choices", [{}])[0].get("message", {})
return msg.get("content") or json.dumps(msg.get("function_call", {}))
from langchain_community.retrievers import WikipediaRetriever
from langchain.tools.retriever import create_retriever_tool
retriever = WikipediaRetriever(lang="ko",top_k_results=10)
wiki=Tool(func=retriever.get_relevant_documents,name="WIKI SEARCH",description="μœ„ν‚€λ°±κ³Όμ—μ„œ ν•„μš”ν•œ 정보λ₯Ό λΆˆλŸ¬μ˜΅λ‹ˆλ‹€.κ²°κ΄΄λ₯Ό κ²€μ¦ν•˜μ—¬ μ‚¬μš©ν•˜μ‹œμ˜€.")
# ──────────────────────────────
# βœ… GitHub Models LLM
# ──────────────────────────────
'''
class GitHubModelLLM(LLM):
model: str = "openai/gpt-4.1"
endpoint: str = "https://models.github.ai/inference"
token: Optional[str] = None
@property
def _llm_type(self) -> str:
return "github_models_api"
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
if not self.token:
raise ValueError("GitHub API token이 ν•„μš”ν•©λ‹ˆλ‹€.")
headers = {
"Authorization": "Bearer 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",
"Content-Type": "application/json",
}
body = {"model": self.model, "messages": [{"role": "user", "content": prompt}]}
resp = requests.post(f"{self.endpoint}/chat/completions", json=body, headers=headers)
if resp.status_code != 200:
raise ValueError(f"API 였λ₯˜: {resp.status_code} - {resp.text}")
return resp.json()["choices"][0]["message"]["content"]
'''
# ──────────────────────────────
# βœ… LLM μ„€μ •
# ──────────────────────────────
token = os.getenv("GITHUB_TOKEN") or os.getenv("token")
if not token:
print("⚠️ GitHub Token이 ν•„μš”ν•©λ‹ˆλ‹€. 예: setx GITHUB_TOKEN your_token")
llm = GitHubModelLLM()
# ──────────────────────────────
# βœ… LangChain κΈ°λ³Έ 도ꡬ 뢈러였기
# ──────────────────────────────
tools = load_tools(
["ddg-search", "requests_all", "llm-math"],
llm=llm,allow_dangerous_tools=True
)+[YTS()]+[wiki]
# ──────────────────────────────
# βœ… Python μ‹€ν–‰ 도ꡬ (LangChain λ‚΄μž₯)
# ──────────────────────────────
python_tool = PythonREPLTool()
tools.append(Tool(name="python_repl", func=python_tool.run, description="Python μ½”λ“œλ₯Ό μ‹€ν–‰ν•©λ‹ˆλ‹€."))
from langchain import hub
prompt=hub.pull("hwchase17/structured-chat-agent")
from langchain_community.tools.shell.tool import ShellTool
shell_tool = ShellTool()
tools.append(Tool(name="shell_exec", func=shell_tool.run, description="둜컬 λͺ…λ Ήμ–΄λ₯Ό μ‹€ν–‰ν•©λ‹ˆλ‹€."))
# ──────────────────────────────
# βœ… 파일 도ꡬ
# ──────────────────────────────
# ──────────────────────────────
# βœ… μ •ν™•ν•œ ν•œκ΅­ μ‹œκ°„ ν•¨μˆ˜ (Asia/Seoul)
# ──────────────────────────────
import requests
from zoneinfo import ZoneInfo
def time_now(_=""):
try:
# μ •ν™•ν•œ UTC μ‹œκ°μ„ μ™ΈλΆ€ APIμ—μ„œ κ°€μ Έμ˜΄
resp = requests.get("https://timeapi.io/api/Time/current/zone?timeZone=Asia/Seoul", timeout=5)
if resp.status_code == 200:
data = resp.json()
dt = data["dateTime"].split(".")[0].replace("T", " ")
return f"ν˜„μž¬ μ‹œκ°: {dt} (Asia/Seoul, μ„œλ²„ κΈ°μ€€ NTP 동기화)"
else:
# API μ‹€νŒ¨ μ‹œ 둜컬 μ‹œμŠ€ν…œ μ‹œκ°μœΌλ‘œ λŒ€μ²΄
tz = ZoneInfo("Asia/Seoul")
now = datetime.datetime.now(tz)
return f"ν˜„μž¬ μ‹œκ°(둜컬): {now.strftime('%Y-%m-%d %H:%M:%S')} (Asia/Seoul)"
except Exception as e:
tz = ZoneInfo("Asia/Seoul")
now = datetime.datetime.now(tz)
return f"ν˜„μž¬ μ‹œκ°(λ°±μ—…): {now.strftime('%Y-%m-%d %H:%M:%S')} (Asia/Seoul, 였λ₯˜: {e})"
# ──────────────────────────────
# βœ… 도ꡬ 등둝
# ──────────────────────────────
tools.extend([Tool(name="time_now", func=time_now, description="ν˜„μž¬ μ‹œκ°„μ„ λ°˜ν™˜ν•©λ‹ˆλ‹€.")])
from langchain.memory import ConversationBufferMemory as MEM
from langchain.agents.agent_toolkits import FileManagementToolkit as FMT
tools.extend(FMT(root_dir=str(os.getcwd())).get_tools())
# ──────────────────────────────
# βœ… Agent μ΄ˆκΈ°ν™”
# ──────────────────────────────
mem=MEM()
agent=initialize_agent(tools,llm,agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,verbose=True,memory=mem)
#agent = create_structured_chat_agent(llm, tools, prompt)
#agent= AgentExecutor(agent=agent, tools=tools,memory=mem)
# ... (μœ„μ˜ LLM, tools, agent μ„€μ • 뢀뢄은 동일)
import json
# ──────────────────────────────
# βœ… λŒ€ν™” μš”μ•½ ν•¨μˆ˜
# ──────────────────────────────
def summarize_title(history):
"""λŒ€ν™” 제λͺ© μš”μ•½"""
if not history: return "μƒˆ λŒ€ν™”"
text = "\n".join(f"User:{h[0]} AI:{h[1]}" for h in history[-3:])
try:
title = llm._call(f"λ‹€μŒ λŒ€ν™”μ˜ 주제λ₯Ό ν•œ μ€„λ‘œ μš”μ•½ν•΄μ€˜:\n{text}")
return title.strip().replace("\n", " ")[:60]
except Exception:
return "μš”μ•½ μ‹€νŒ¨"
def save_conversation(username, history):
os.makedirs("user_logs", exist_ok=True)
if not history: return
title = summarize_title(history)
fname = f"user_logs/{username}_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
with open(fname, "w", encoding="utf-8") as f:
json.dump({"title": title, "history": history}, f, ensure_ascii=False, indent=2)
def list_conversations(username):
os.makedirs("user_logs", exist_ok=True)
files = [f for f in os.listdir("user_logs") if f.startswith(username)]
data = []
for f in files:
with open(os.path.join("user_logs", f), encoding="utf-8") as jf:
info = json.load(jf)
data.append((info.get("title", f), f))
return data
def load_conversation(file):
with open(os.path.join("user_logs", file), encoding="utf-8") as jf:
return json.load(jf)["history"]
# ──────────────────────────────
# βœ… 둜그인 ν›„ μ‚¬μš©μž 정보 κ°€μ Έμ˜€κΈ°
# ──────────────────────────────
def get_hf_user(token):
"""HF OAuth ν† ν°μœΌλ‘œ μ‚¬μš©μž 정보 쑰회"""
try:
r = requests.get("https://huggingface.co/api/whoami-v2", headers={"Authorization": f"Bearer {token}"})
if r.status_code == 200:
data = r.json()
return data.get("name") or data.get("email") or "unknown_user"
except Exception:
pass
return "guest"
def chat(message, history, hf_token):
username = get_hf_user(hf_token) if hf_token else "guest"
try:
response = agent.invoke(message)
if isinstance(response, dict):
if "action_input" in response:
response = response["action_input"]
elif "output" in response:
response = response["output"]
elif "text" in response:
response = response["text"]
else:
response = str(response)
elif isinstance(response, str):
# "Final Answer"κ°€ ν¬ν•¨λœ λ¬Έμžμ—΄μ΄λ©΄ κ·Έ λΆ€λΆ„λ§Œ μΆ”μΆœ
if '"action_input":' in response:
import re, json
match = re.search(r'["\']action_input["\']\s*:\s*["\'](.*?)["\']', response)
if match:
response = match.group(1)
elif "Final Answer" in response:
# {"action": "Final Answer", "action_input": "..."} ν˜•μ‹μΌ λ•Œ
try:
data = json.loads(response)
if isinstance(data, dict) and "action_input" in data:
response = data["action_input"]
except Exception:
response = response.replace("Final Answer", "").strip()
except Exception as e:
response = f"⚠️ 였λ₯˜: {e}"
history = history + [(message, response)]
if username:
save_conversation(username, history)
return history, history, "" # μž…λ ₯ μ΄ˆκΈ°ν™”
def refresh_conversation_list():
files = sorted(os.listdir("user_logs"), reverse=True)
titles = [f.replace(".json", "") for f in files]
return gr.update(choices=titles, value=titles[-1] if titles else None)
def load_selected(file):
return load_conversation(file)
# ──────────────────────────────
# βœ… Gradio UI with HF Auth
# ──────────────────────────────
with gr.Blocks(theme=gr.themes.Soft(), title="PIXAL Assistant (HF Auth)") as demo:
gr.Markdown("## πŸ€– PIXAL Assistant β€” Hugging Face 계정 기반 λŒ€ν™” μ €μž₯")
hf_login = gr.LoginButton()
hf_token = gr.State()
@hf_login.click(inputs=None, outputs=hf_token)
def login(token): # 둜그인 ν›„ token λ°˜ν™˜
return token
with gr.Row():
with gr.Column(scale=2):
chatbot = gr.Chatbot(label="PIXAL λŒ€ν™”", height=600, render_markdown=True)
msg = gr.Textbox(label="λ©”μ‹œμ§€", placeholder="μž…λ ₯ ν›„ Enter λ˜λŠ” 전솑 클릭")
send = gr.Button("전솑")
clear = gr.Button("μ΄ˆκΈ°ν™”")
msg.submit(chat, [msg, chatbot, hf_token], [chatbot, chatbot, msg])
send.click(chat, [msg, chatbot, hf_token], [chatbot, chatbot, msg])
clear.click(lambda: None, None, chatbot, queue=False)
with gr.Column(scale=1):
gr.Markdown("### πŸ’Ύ μ €μž₯된 λŒ€ν™” 기둝")
convo_files = gr.Dropdown(label="λŒ€ν™” 선택", choices=[])
refresh_btn = gr.Button("πŸ”„ λͺ©λ‘ μƒˆλ‘œκ³ μΉ¨")
load_btn = gr.Button("뢈러였기")
refresh_btn.click(refresh_conversation_list, [hf_token], convo_files)
load_btn.click(load_selected, [convo_files], chatbot)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)
'''
def chat(message, history):
try:
response = agent.run(message)
# JSON ν˜•νƒœλ‘œ 좜λ ₯될 κ°€λŠ₯성이 μžˆλŠ” 경우 처리
if isinstance(response, dict):
if "action_input" in response:
response = response["action_input"]
elif "output" in response:
response = response["output"]
elif "text" in response:
response = response["text"]
else:
response = str(response)
elif isinstance(response, str):
# "Final Answer"κ°€ ν¬ν•¨λœ λ¬Έμžμ—΄μ΄λ©΄ κ·Έ λΆ€λΆ„λ§Œ μΆ”μΆœ
if '"action_input":' in response:
import re, json
match = re.search(r'["\']action_input["\']\s*:\s*["\'](.*?)["\']', response)
if match:
response = match.group(1)
elif "Final Answer" in response:
# {"action": "Final Answer", "action_input": "..."} ν˜•μ‹μΌ λ•Œ
try:
data = json.loads(response)
if isinstance(data, dict) and "action_input" in data:
response = data["action_input"]
except Exception:
response = response.replace("Final Answer", "").strip()
except Exception as e:
response = f"⚠️ 였λ₯˜: {e}"
history = history + [(message, response)]
return history, history,""
# ──────────────────────────────
# βœ… Gradio UI
# ──────────────────────────────
def load_selected(file):
return load_conversation(file)
# ──────────────────────────────
# βœ… Gradio UI
# ──────────────────────────────
with gr.Blocks(theme=gr.themes.Soft(), title="PIXAL Assistant") as demo:
gr.Markdown("## πŸ€– PIXAL Assistant β€” LangChain 기반 λ©€ν‹°νˆ΄ μ—μ΄μ „νŠΈ")
with gr.Row():
with gr.Column(scale=2):
chatbot = gr.Chatbot(label="PIXAL λŒ€ν™”", height=600)
msg = gr.Textbox(label="λ©”μ‹œμ§€", placeholder="μž…λ ₯ ν›„ Enter λ˜λŠ” 전솑 클릭")
send = gr.Button("전솑")
clear = gr.Button("μ΄ˆκΈ°ν™”")
username = gr.Textbox(label="Hugging Face μ‚¬μš©μžλͺ…", placeholder="둜그인 λŒ€μ‹  이름 μž…λ ₯", value=os.getenv("HF_USER", "guest"))
msg.submit(chat, [msg, chatbot, username], [chatbot, chatbot, msg])
send.click(chat, [msg, chatbot, username], [chatbot, chatbot, msg])
clear.click(lambda: None, None, chatbot, queue=False)
with gr.Column(scale=1):
gr.Markdown("### πŸ’Ύ μ €μž₯된 λŒ€ν™” 기둝")
convo_files = gr.Dropdown(label="λŒ€ν™” 선택", choices=[])
refresh_btn = gr.Button("πŸ”„ λͺ©λ‘ μƒˆλ‘œκ³ μΉ¨")
load_btn = gr.Button("뢈러였기")
def refresh_list(user):
if not user: return gr.Dropdown.update(choices=[])
return gr.Dropdown.update(choices=[x[1] for x in list_conversations(user)])
refresh_btn.click(refresh_list, [username], convo_files)
load_btn.click(lambda f: load_conversation(f), [convo_files], chatbot)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)
'''