GerlandoRex's picture
fix: remove assignment to gh repo
2513120
raw
history blame
7.1 kB
import uuid
import os
import gradio as gr
from langchain_core.messages import HumanMessage, AIMessage
# Assuming mcpc_graph.py and its setup_graph function are in the same directory.
from mcpc_graph import setup_graph
async def chat_logic(
message,
history,
session_state,
github_repo,
github_token,
trello_api,
trello_token,
hf_token,
):
"""
Handles the main chat logic, including environment setup and streaming responses.
Args:
message (str): The user's input message.
history (list): The chat history managed by Gradio.
session_state (dict): A dictionary to maintain state across calls for a session.
github_repo (str): The GitHub repository (username/repo).
github_token (str): The GitHub personal access token.
trello_api (str): The Trello API key.
trello_token (str): The Trello API token.
hf_token (str): The Hugging Face API token.
Yields:
str: The bot's streaming response or an interruption message.
"""
# Retrieve the initialized graph and interrupt handler from the session state.
app = session_state.get("app")
human_resume_node = session_state.get("human_resume_node")
# If the graph is not initialized, this is the first message of the session.
# We configure the environment and set up the graph.
if app is None:
# Check if all required fields have been filled out.
if not all([github_repo, github_token, trello_api, trello_token, hf_token]):
yield "Error: Please provide all API keys and the GitHub repository in the 'API Configuration' section before starting the chat."
return
# Set environment variables for the current process.
os.environ["GITHUB_REPO"] = github_repo
os.environ["NEBIUS_API_KEY"] = hf_token
# Asynchronously initialize the graph and store it in the session state
# to reuse it for subsequent messages in the same session.
app, human_resume_node = await setup_graph(
github_token=github_token, trello_api=trello_api, trello_token=trello_token
)
session_state["app"] = app
session_state["human_resume_node"] = human_resume_node
# Ensure a unique thread_id for the conversation.
thread_id = session_state.get("thread_id")
if not thread_id:
thread_id = str(uuid.uuid4())
session_state["thread_id"] = thread_id
# Check if the current message is a response to a human interruption.
is_message_command = session_state.get("is_message_command", False)
config = {
"configurable": {"thread_id": thread_id},
"recursion_limit": 100,
}
if is_message_command:
# The user is providing feedback to an interruption.
app_input = human_resume_node.call_human_interrupt_agent(message)
session_state["is_message_command"] = False
else:
# A standard user message.
app_input = {"messages": [HumanMessage(content=message)]}
# app_input["github_repo"] = github_repo
# Stream the graph's response.
# This revised logic handles intermediate messages and prevents duplication.
async for res in app.astream(app_input, config=config, stream_mode="values"):
if "messages" in res:
last_message = res["messages"][-1]
# We only stream content from AIMessages. Any intermediate AIMessages
# (e.g., "I will now use a tool") will be overwritten by subsequent
# AIMessages in the UI, so only the final answer is visible.
if isinstance(last_message, AIMessage):
yield last_message.content
elif "__interrupt__" in res:
# Handle interruptions where the agent needs human feedback.
interruption_message = res["__interrupt__"][0]
session_state["is_message_command"] = True
yield interruption_message.value
return # Stop the stream and wait for the user's next message.
def create_gradio_app():
"""Creates and launches the Gradio web application."""
print("Launching Gradio app...")
with gr.Blocks(theme=gr.themes.Soft(), title="LangGraph Multi-Agent Chat") as demo:
session_state = gr.State({})
gr.Markdown(
"""
# LangGraph Multi-Agent Project Manager
Interact with a multi-agent system powered by LangGraph.
You can assign tasks related to Trello and Github.
The system can be interrupted for human feedback when it needs to use a tool.
"""
)
chatbot = gr.Chatbot(
[],
elem_id="chatbot",
bubble_full_width=False,
height=600,
label="Multi-Agent Chat",
show_label=False,
)
# --- FIX: Added an accordion for API keys and configuration ---
with gr.Accordion("API Configuration", open=True):
gr.Markdown(
"Please enter your credentials. The agent will be configured when you send your first message."
)
github_repo = gr.Textbox(
label="GitHub Repo",
placeholder="e.g., username/repository",
info="The target repository for GitHub operations.",
)
github_token = gr.Textbox(
label="GitHub Token",
placeholder="ghp_xxxxxxxxxxxx",
type="password",
info="A fine-grained personal access token.",
)
trello_api = gr.Textbox(
label="Trello API Key",
placeholder="Your Trello API key",
info="Your API key from trello.com/power-ups/admin.",
)
trello_token = gr.Textbox(
label="Trello Token",
placeholder="Your Trello token",
type="password",
info="A token generated from your Trello account.",
)
hf_token = gr.Textbox(
label="Hugging Face Token",
placeholder="hf_xxxxxxxxxxxx",
type="password",
info="Used for tools requiring Hugging Face models.",
)
gr.ChatInterface(
fn=chat_logic,
chatbot=chatbot,
additional_inputs=[
session_state,
github_repo,
github_token,
trello_api,
trello_token,
hf_token,
],
title=None,
description=None,
)
demo.queue()
demo.launch(debug=True)
if __name__ == "__main__":
try:
# The main function to create the app is now synchronous.
# Gradio handles the async calls within the chat logic.
import subprocess
subprocess.run(["pip", "install", "-e", "."])
create_gradio_app()
except KeyboardInterrupt:
print("\nShutting down Gradio app.")
except Exception as e:
print(f"An error occurred: {e}")