Merge remote-tracking branch 'origin/main' into feat/front_end
Browse files- pmcp/agents/agent_base.py +12 -5
- pmcp/agents/executor.py +19 -12
- pmcp/agents/github_agent.py +5 -7
- pmcp/agents/planner.py +7 -3
- pmcp/agents/trello_agent.py +6 -7
- pmcp/nodes/human_interrupt_node.py +15 -11
- pmcp/nodes/human_resume_node.py +6 -2
- pyproject.toml +1 -0
- requirements.txt +5 -0
- uv.lock +24 -0
pmcp/agents/agent_base.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
import logging
|
| 2 |
from typing import List, TypeVar, Optional
|
| 3 |
|
| 4 |
-
from langchain_core.messages import AnyMessage, BaseMessage
|
| 5 |
from pydantic import BaseModel
|
| 6 |
from langchain_core.tools import BaseTool
|
| 7 |
from langchain_openai import ChatOpenAI
|
|
@@ -29,14 +29,21 @@ class AgentBlueprint:
|
|
| 29 |
self.tools = {tool.name: tool for tool in tools}
|
| 30 |
self.logger = logging.getLogger(self.__class__.__name__)
|
| 31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
def call_agent(self, messages: List[AnyMessage]) -> BaseMessage:
|
| 33 |
-
response = self.llm.with_retry(stop_after_attempt=2).invoke(
|
|
|
|
|
|
|
| 34 |
response.name = self.agent_name
|
| 35 |
return response
|
| 36 |
|
| 37 |
async def acall_agent(self, messages: List[AnyMessage]) -> BaseMessage:
|
| 38 |
response = await self.llm.with_retry(stop_after_attempt=2).ainvoke(
|
| 39 |
-
input=messages
|
| 40 |
)
|
| 41 |
response.name = self.agent_name
|
| 42 |
return response
|
|
@@ -47,7 +54,7 @@ class AgentBlueprint:
|
|
| 47 |
response = (
|
| 48 |
self.llm.with_structured_output(clazz)
|
| 49 |
.with_retry(stop_after_attempt=2)
|
| 50 |
-
.invoke(input=messages)
|
| 51 |
)
|
| 52 |
|
| 53 |
return response
|
|
@@ -58,7 +65,7 @@ class AgentBlueprint:
|
|
| 58 |
response = (
|
| 59 |
await self.llm.with_structured_output(clazz)
|
| 60 |
.with_retry(stop_after_attempt=2)
|
| 61 |
-
.ainvoke(input=messages)
|
| 62 |
)
|
| 63 |
|
| 64 |
return response
|
|
|
|
| 1 |
import logging
|
| 2 |
from typing import List, TypeVar, Optional
|
| 3 |
|
| 4 |
+
from langchain_core.messages import AnyMessage, BaseMessage, SystemMessage
|
| 5 |
from pydantic import BaseModel
|
| 6 |
from langchain_core.tools import BaseTool
|
| 7 |
from langchain_openai import ChatOpenAI
|
|
|
|
| 29 |
self.tools = {tool.name: tool for tool in tools}
|
| 30 |
self.logger = logging.getLogger(self.__class__.__name__)
|
| 31 |
|
| 32 |
+
def __set_system_prompt(self, messages: List[AnyMessage]):
|
| 33 |
+
if self.system_prompt:
|
| 34 |
+
return [SystemMessage(content=self.system_prompt)] + messages
|
| 35 |
+
return messages
|
| 36 |
+
|
| 37 |
def call_agent(self, messages: List[AnyMessage]) -> BaseMessage:
|
| 38 |
+
response = self.llm.with_retry(stop_after_attempt=2).invoke(
|
| 39 |
+
input=self.__set_system_prompt(messages)
|
| 40 |
+
)
|
| 41 |
response.name = self.agent_name
|
| 42 |
return response
|
| 43 |
|
| 44 |
async def acall_agent(self, messages: List[AnyMessage]) -> BaseMessage:
|
| 45 |
response = await self.llm.with_retry(stop_after_attempt=2).ainvoke(
|
| 46 |
+
input=self.__set_system_prompt(messages)
|
| 47 |
)
|
| 48 |
response.name = self.agent_name
|
| 49 |
return response
|
|
|
|
| 54 |
response = (
|
| 55 |
self.llm.with_structured_output(clazz)
|
| 56 |
.with_retry(stop_after_attempt=2)
|
| 57 |
+
.invoke(input=self.__set_system_prompt(messages))
|
| 58 |
)
|
| 59 |
|
| 60 |
return response
|
|
|
|
| 65 |
response = (
|
| 66 |
await self.llm.with_structured_output(clazz)
|
| 67 |
.with_retry(stop_after_attempt=2)
|
| 68 |
+
.ainvoke(input=self.__set_system_prompt(messages))
|
| 69 |
)
|
| 70 |
|
| 71 |
return response
|
pmcp/agents/executor.py
CHANGED
|
@@ -5,8 +5,8 @@ from langchain_core.tools import BaseTool
|
|
| 5 |
from langchain_openai import ChatOpenAI
|
| 6 |
from langchain_core.messages import HumanMessage
|
| 7 |
|
| 8 |
-
from pmcp.models.plan import PlanStep
|
| 9 |
from pmcp.models.state import PlanningState
|
|
|
|
| 10 |
|
| 11 |
|
| 12 |
class ExecutorAgent:
|
|
@@ -18,16 +18,21 @@ class ExecutorAgent:
|
|
| 18 |
)
|
| 19 |
|
| 20 |
def call_executor_agent(self, state: PlanningState):
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
|
|
|
| 26 |
HumanMessage(
|
| 27 |
-
content=f"The {
|
| 28 |
)
|
| 29 |
-
]
|
| 30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
}
|
| 32 |
|
| 33 |
async def acall_executor_agent(self, state: PlanningState):
|
|
@@ -36,10 +41,12 @@ class ExecutorAgent:
|
|
| 36 |
messages = []
|
| 37 |
if len(state.plan.steps) > plan_step_index:
|
| 38 |
current_step = state.plan.steps[plan_step_index]
|
| 39 |
-
messages = [
|
|
|
|
| 40 |
content=f"The {current_step.agent} agent should perform the following action:\n{current_step.description}"
|
| 41 |
-
)
|
| 42 |
-
|
|
|
|
| 43 |
return {
|
| 44 |
"plan_step": plan_step_index + 1,
|
| 45 |
"messages": messages,
|
|
|
|
| 5 |
from langchain_openai import ChatOpenAI
|
| 6 |
from langchain_core.messages import HumanMessage
|
| 7 |
|
|
|
|
| 8 |
from pmcp.models.state import PlanningState
|
| 9 |
+
from loguru import logger
|
| 10 |
|
| 11 |
|
| 12 |
class ExecutorAgent:
|
|
|
|
| 18 |
)
|
| 19 |
|
| 20 |
def call_executor_agent(self, state: PlanningState):
|
| 21 |
+
plan_step_index = state.plan_step
|
| 22 |
+
current_step = None
|
| 23 |
+
messages = []
|
| 24 |
+
if len(state.plan.steps) > plan_step_index:
|
| 25 |
+
current_step = state.plan.steps[plan_step_index]
|
| 26 |
+
messages = [
|
| 27 |
HumanMessage(
|
| 28 |
+
content=f"The {current_step.agent} agent should perform the following action:\n{current_step.description}"
|
| 29 |
)
|
| 30 |
+
]
|
| 31 |
+
logger.info(f"The Executor is executing step: {current_step}")
|
| 32 |
+
return {
|
| 33 |
+
"plan_step": plan_step_index + 1,
|
| 34 |
+
"messages": messages,
|
| 35 |
+
"current_step": current_step,
|
| 36 |
}
|
| 37 |
|
| 38 |
async def acall_executor_agent(self, state: PlanningState):
|
|
|
|
| 41 |
messages = []
|
| 42 |
if len(state.plan.steps) > plan_step_index:
|
| 43 |
current_step = state.plan.steps[plan_step_index]
|
| 44 |
+
messages = [
|
| 45 |
+
HumanMessage(
|
| 46 |
content=f"The {current_step.agent} agent should perform the following action:\n{current_step.description}"
|
| 47 |
+
)
|
| 48 |
+
]
|
| 49 |
+
logger.info(f"The Executor is executing step: {current_step}")
|
| 50 |
return {
|
| 51 |
"plan_step": plan_step_index + 1,
|
| 52 |
"messages": messages,
|
pmcp/agents/github_agent.py
CHANGED
|
@@ -2,10 +2,10 @@ from typing import List, Optional
|
|
| 2 |
|
| 3 |
from pmcp.agents.agent_base import AgentBlueprint
|
| 4 |
from langchain_core.tools import BaseTool
|
| 5 |
-
from langchain_core.messages import SystemMessage
|
| 6 |
from langchain_openai import ChatOpenAI
|
| 7 |
|
| 8 |
from pmcp.models.state import PlanningState
|
|
|
|
| 9 |
|
| 10 |
SYSTEM_PROMPT = """
|
| 11 |
You are an assistant that can manage Trello boards and projects.
|
|
@@ -24,13 +24,11 @@ class GithubAgent:
|
|
| 24 |
)
|
| 25 |
|
| 26 |
def call_github_agent(self, state: PlanningState):
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
)
|
| 30 |
return {"messages": [response]}
|
| 31 |
|
| 32 |
async def acall_github_agent(self, state: PlanningState):
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
)
|
| 36 |
return {"messages": [response]}
|
|
|
|
| 2 |
|
| 3 |
from pmcp.agents.agent_base import AgentBlueprint
|
| 4 |
from langchain_core.tools import BaseTool
|
|
|
|
| 5 |
from langchain_openai import ChatOpenAI
|
| 6 |
|
| 7 |
from pmcp.models.state import PlanningState
|
| 8 |
+
from loguru import logger
|
| 9 |
|
| 10 |
SYSTEM_PROMPT = """
|
| 11 |
You are an assistant that can manage Trello boards and projects.
|
|
|
|
| 24 |
)
|
| 25 |
|
| 26 |
def call_github_agent(self, state: PlanningState):
|
| 27 |
+
logger.info("Calling Github agent...")
|
| 28 |
+
response = self.agent.call_agent(state.messages)
|
|
|
|
| 29 |
return {"messages": [response]}
|
| 30 |
|
| 31 |
async def acall_github_agent(self, state: PlanningState):
|
| 32 |
+
logger.info("Calling Github agent...")
|
| 33 |
+
response = await self.agent.acall_agent(state.messages)
|
|
|
|
| 34 |
return {"messages": [response]}
|
pmcp/agents/planner.py
CHANGED
|
@@ -2,11 +2,11 @@ from typing import List, Optional
|
|
| 2 |
|
| 3 |
from pmcp.agents.agent_base import AgentBlueprint
|
| 4 |
from langchain_core.tools import BaseTool
|
| 5 |
-
from langchain_core.messages import SystemMessage
|
| 6 |
from langchain_openai import ChatOpenAI
|
| 7 |
|
| 8 |
from pmcp.models.plan import Plan
|
| 9 |
from pmcp.models.state import PlanningState
|
|
|
|
| 10 |
|
| 11 |
SYSTEM_PROMPT = """
|
| 12 |
You are a Planner Agent responsible for breaking down high-level project goals into clear, actionable steps. You do not execute tasks yourself — instead, you delegate them to two specialized agents:
|
|
@@ -38,15 +38,19 @@ class PlannerAgent:
|
|
| 38 |
)
|
| 39 |
|
| 40 |
def call_planner_agent(self, state: PlanningState):
|
|
|
|
| 41 |
response = self.agent.call_agent_structured(
|
| 42 |
-
messages=
|
| 43 |
clazz=Plan,
|
| 44 |
)
|
|
|
|
| 45 |
return {"plan": response, "plan_step": 0, "current_step": None}
|
| 46 |
|
| 47 |
async def acall_planner_agent(self, state: PlanningState):
|
|
|
|
| 48 |
response = await self.agent.acall_agent_structured(
|
| 49 |
-
messages=
|
| 50 |
clazz=Plan,
|
| 51 |
)
|
|
|
|
| 52 |
return {"plan": response, "plan_step": 0, "current_step": None}
|
|
|
|
| 2 |
|
| 3 |
from pmcp.agents.agent_base import AgentBlueprint
|
| 4 |
from langchain_core.tools import BaseTool
|
|
|
|
| 5 |
from langchain_openai import ChatOpenAI
|
| 6 |
|
| 7 |
from pmcp.models.plan import Plan
|
| 8 |
from pmcp.models.state import PlanningState
|
| 9 |
+
from loguru import logger
|
| 10 |
|
| 11 |
SYSTEM_PROMPT = """
|
| 12 |
You are a Planner Agent responsible for breaking down high-level project goals into clear, actionable steps. You do not execute tasks yourself — instead, you delegate them to two specialized agents:
|
|
|
|
| 38 |
)
|
| 39 |
|
| 40 |
def call_planner_agent(self, state: PlanningState):
|
| 41 |
+
logger.info("Calling Planner agent...")
|
| 42 |
response = self.agent.call_agent_structured(
|
| 43 |
+
messages=state.messages,
|
| 44 |
clazz=Plan,
|
| 45 |
)
|
| 46 |
+
logger.info(f"Building plan: {response}")
|
| 47 |
return {"plan": response, "plan_step": 0, "current_step": None}
|
| 48 |
|
| 49 |
async def acall_planner_agent(self, state: PlanningState):
|
| 50 |
+
logger.info("Calling Planner agent...")
|
| 51 |
response = await self.agent.acall_agent_structured(
|
| 52 |
+
messages=state.messages,
|
| 53 |
clazz=Plan,
|
| 54 |
)
|
| 55 |
+
logger.info(f"Building plan: {response}")
|
| 56 |
return {"plan": response, "plan_step": 0, "current_step": None}
|
pmcp/agents/trello_agent.py
CHANGED
|
@@ -2,10 +2,11 @@ from typing import List
|
|
| 2 |
|
| 3 |
from pmcp.agents.agent_base import AgentBlueprint
|
| 4 |
from langchain_core.tools import BaseTool
|
| 5 |
-
from langchain_core.messages import SystemMessage
|
| 6 |
from langchain_openai import ChatOpenAI
|
| 7 |
|
| 8 |
from pmcp.models.state import PlanningState
|
|
|
|
|
|
|
| 9 |
|
| 10 |
SYSTEM_PROMPT = """
|
| 11 |
You are an assistant that can manage Trello boards and projects.
|
|
@@ -24,13 +25,11 @@ class TrelloAgent:
|
|
| 24 |
)
|
| 25 |
|
| 26 |
def call_trello_agent(self, state: PlanningState):
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
)
|
| 30 |
return {"messages": [response]}
|
| 31 |
|
| 32 |
async def acall_trello_agent(self, state: PlanningState):
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
)
|
| 36 |
return {"messages": [response]}
|
|
|
|
| 2 |
|
| 3 |
from pmcp.agents.agent_base import AgentBlueprint
|
| 4 |
from langchain_core.tools import BaseTool
|
|
|
|
| 5 |
from langchain_openai import ChatOpenAI
|
| 6 |
|
| 7 |
from pmcp.models.state import PlanningState
|
| 8 |
+
from loguru import logger
|
| 9 |
+
|
| 10 |
|
| 11 |
SYSTEM_PROMPT = """
|
| 12 |
You are an assistant that can manage Trello boards and projects.
|
|
|
|
| 25 |
)
|
| 26 |
|
| 27 |
def call_trello_agent(self, state: PlanningState):
|
| 28 |
+
logger.info("Calling Trello Agent...")
|
| 29 |
+
response = self.agent.call_agent(state.messages)
|
|
|
|
| 30 |
return {"messages": [response]}
|
| 31 |
|
| 32 |
async def acall_trello_agent(self, state: PlanningState):
|
| 33 |
+
logger.info("Calling Trello Agent...")
|
| 34 |
+
response = await self.agent.acall_agent(state.messages)
|
|
|
|
| 35 |
return {"messages": [response]}
|
pmcp/nodes/human_interrupt_node.py
CHANGED
|
@@ -2,9 +2,10 @@ from typing import List, Optional
|
|
| 2 |
|
| 3 |
from pmcp.agents.agent_base import AgentBlueprint
|
| 4 |
from langchain_core.tools import BaseTool
|
| 5 |
-
from langchain_core.messages import
|
| 6 |
from langchain_openai import ChatOpenAI
|
| 7 |
from langgraph.types import Command, interrupt
|
|
|
|
| 8 |
|
| 9 |
from pmcp.models.state import PlanningState
|
| 10 |
|
|
@@ -27,28 +28,31 @@ class HumanInterruptNode:
|
|
| 27 |
)
|
| 28 |
|
| 29 |
def call_human_interrupt_agent(self, state: PlanningState):
|
| 30 |
-
last_message = state.messages[-1]
|
| 31 |
-
|
| 32 |
-
#TODO: chiedere a Gerlax lo strumento
|
| 33 |
try:
|
| 34 |
tool_call = last_message.tool_calls[-1]
|
| 35 |
except Exception:
|
| 36 |
-
last_message = state.messages[
|
|
|
|
|
|
|
| 37 |
tool_call = last_message.tool_calls[-1]
|
| 38 |
|
| 39 |
-
|
| 40 |
-
|
| 41 |
if tool_call.get("name", "").startswith("get_"):
|
| 42 |
-
return Command(goto="tool")
|
| 43 |
-
|
| 44 |
response = self.agent.call_agent(
|
| 45 |
-
messages=[
|
|
|
|
|
|
|
|
|
|
| 46 |
)
|
| 47 |
human_review = interrupt(response.content)
|
| 48 |
|
| 49 |
confirm_action = human_review.confirm_action
|
| 50 |
changes_description = human_review.changes_description
|
| 51 |
-
|
| 52 |
if confirm_action:
|
| 53 |
return Command(goto="tool")
|
| 54 |
|
|
|
|
| 2 |
|
| 3 |
from pmcp.agents.agent_base import AgentBlueprint
|
| 4 |
from langchain_core.tools import BaseTool
|
| 5 |
+
from langchain_core.messages import AIMessage
|
| 6 |
from langchain_openai import ChatOpenAI
|
| 7 |
from langgraph.types import Command, interrupt
|
| 8 |
+
from langchain_core.messages.utils import filter_messages
|
| 9 |
|
| 10 |
from pmcp.models.state import PlanningState
|
| 11 |
|
|
|
|
| 28 |
)
|
| 29 |
|
| 30 |
def call_human_interrupt_agent(self, state: PlanningState):
|
| 31 |
+
last_message = filter_messages(state.messages, include_types=[AIMessage])[-1]
|
| 32 |
+
|
| 33 |
+
# TODO: chiedere a Gerlax lo strumento
|
| 34 |
try:
|
| 35 |
tool_call = last_message.tool_calls[-1]
|
| 36 |
except Exception:
|
| 37 |
+
last_message = filter_messages(state.messages, include_types=[AIMessage])[
|
| 38 |
+
-2
|
| 39 |
+
]
|
| 40 |
tool_call = last_message.tool_calls[-1]
|
| 41 |
|
|
|
|
|
|
|
| 42 |
if tool_call.get("name", "").startswith("get_"):
|
| 43 |
+
return Command(goto="tool")
|
| 44 |
+
|
| 45 |
response = self.agent.call_agent(
|
| 46 |
+
messages=[
|
| 47 |
+
AIMessage(content=f"Tool Calling details: {str(tool_call)}"),
|
| 48 |
+
]
|
| 49 |
+
+ state.messages,
|
| 50 |
)
|
| 51 |
human_review = interrupt(response.content)
|
| 52 |
|
| 53 |
confirm_action = human_review.confirm_action
|
| 54 |
changes_description = human_review.changes_description
|
| 55 |
+
|
| 56 |
if confirm_action:
|
| 57 |
return Command(goto="tool")
|
| 58 |
|
pmcp/nodes/human_resume_node.py
CHANGED
|
@@ -2,11 +2,12 @@ from typing import List, Optional
|
|
| 2 |
|
| 3 |
from pmcp.agents.agent_base import AgentBlueprint
|
| 4 |
from langchain_core.tools import BaseTool
|
| 5 |
-
from langchain_core.messages import
|
| 6 |
from langchain_openai import ChatOpenAI
|
| 7 |
from langgraph.types import Command
|
| 8 |
|
| 9 |
from pmcp.models.resume_trigger import ResumeTrigger
|
|
|
|
| 10 |
|
| 11 |
|
| 12 |
SYSTEM_PROMPT = """
|
|
@@ -29,8 +30,11 @@ class HumanResumeNode:
|
|
| 29 |
)
|
| 30 |
|
| 31 |
def call_human_interrupt_agent(self, user_message: str):
|
|
|
|
| 32 |
response = self.agent.call_agent_structured(
|
| 33 |
-
[
|
|
|
|
|
|
|
| 34 |
clazz=ResumeTrigger,
|
| 35 |
)
|
| 36 |
|
|
|
|
| 2 |
|
| 3 |
from pmcp.agents.agent_base import AgentBlueprint
|
| 4 |
from langchain_core.tools import BaseTool
|
| 5 |
+
from langchain_core.messages import HumanMessage
|
| 6 |
from langchain_openai import ChatOpenAI
|
| 7 |
from langgraph.types import Command
|
| 8 |
|
| 9 |
from pmcp.models.resume_trigger import ResumeTrigger
|
| 10 |
+
from loguru import logger
|
| 11 |
|
| 12 |
|
| 13 |
SYSTEM_PROMPT = """
|
|
|
|
| 30 |
)
|
| 31 |
|
| 32 |
def call_human_interrupt_agent(self, user_message: str):
|
| 33 |
+
logger.info("Human resumer agent...")
|
| 34 |
response = self.agent.call_agent_structured(
|
| 35 |
+
[
|
| 36 |
+
HumanMessage(content=user_message),
|
| 37 |
+
],
|
| 38 |
clazz=ResumeTrigger,
|
| 39 |
)
|
| 40 |
|
pyproject.toml
CHANGED
|
@@ -16,6 +16,7 @@ dependencies = [
|
|
| 16 |
"requests>=2.32.3",
|
| 17 |
"grandalf>=0.8",
|
| 18 |
"fastmcp>=2.5.2",
|
|
|
|
| 19 |
]
|
| 20 |
|
| 21 |
[dependency-groups]
|
|
|
|
| 16 |
"requests>=2.32.3",
|
| 17 |
"grandalf>=0.8",
|
| 18 |
"fastmcp>=2.5.2",
|
| 19 |
+
"loguru>=0.7.3",
|
| 20 |
]
|
| 21 |
|
| 22 |
[dependency-groups]
|
requirements.txt
CHANGED
|
@@ -49,6 +49,7 @@ click==8.2.1
|
|
| 49 |
colorama==0.4.6 ; sys_platform == 'win32'
|
| 50 |
# via
|
| 51 |
# click
|
|
|
|
| 52 |
# tqdm
|
| 53 |
distlib==0.3.9
|
| 54 |
# via virtualenv
|
|
@@ -167,6 +168,8 @@ langsmith==0.3.43
|
|
| 167 |
# via langchain-core
|
| 168 |
litellm==1.72.0
|
| 169 |
# via smolagents
|
|
|
|
|
|
|
| 170 |
lxml==5.4.0
|
| 171 |
# via duckduckgo-search
|
| 172 |
markdown-it-py==3.0.0
|
|
@@ -391,6 +394,8 @@ websockets==15.0.1
|
|
| 391 |
# via
|
| 392 |
# fastmcp
|
| 393 |
# gradio-client
|
|
|
|
|
|
|
| 394 |
xxhash==3.5.0
|
| 395 |
# via langgraph
|
| 396 |
yarl==1.20.0
|
|
|
|
| 49 |
colorama==0.4.6 ; sys_platform == 'win32'
|
| 50 |
# via
|
| 51 |
# click
|
| 52 |
+
# loguru
|
| 53 |
# tqdm
|
| 54 |
distlib==0.3.9
|
| 55 |
# via virtualenv
|
|
|
|
| 168 |
# via langchain-core
|
| 169 |
litellm==1.72.0
|
| 170 |
# via smolagents
|
| 171 |
+
loguru==0.7.3
|
| 172 |
+
# via pmcp
|
| 173 |
lxml==5.4.0
|
| 174 |
# via duckduckgo-search
|
| 175 |
markdown-it-py==3.0.0
|
|
|
|
| 394 |
# via
|
| 395 |
# fastmcp
|
| 396 |
# gradio-client
|
| 397 |
+
win32-setctime==1.2.0 ; sys_platform == 'win32'
|
| 398 |
+
# via loguru
|
| 399 |
xxhash==3.5.0
|
| 400 |
# via langgraph
|
| 401 |
yarl==1.20.0
|
uv.lock
CHANGED
|
@@ -922,6 +922,19 @@ wheels = [
|
|
| 922 |
{ url = "https://files.pythonhosted.org/packages/c2/98/bec08f5a3e504013db6f52b5fd68375bd92b463c91eb454d5a6460e957af/litellm-1.72.0-py3-none-any.whl", hash = "sha256:88360a7ae9aa9c96278ae1bb0a459226f909e711c5d350781296d0640386a824", size = 7979630, upload-time = "2025-06-01T02:12:50.458Z" },
|
| 923 |
]
|
| 924 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 925 |
[[package]]
|
| 926 |
name = "lxml"
|
| 927 |
version = "5.4.0"
|
|
@@ -1383,6 +1396,7 @@ dependencies = [
|
|
| 1383 |
{ name = "langchain-openai" },
|
| 1384 |
{ name = "langgraph" },
|
| 1385 |
{ name = "langgraph-checkpoint-sqlite" },
|
|
|
|
| 1386 |
{ name = "mcp", extra = ["cli"] },
|
| 1387 |
{ name = "requests" },
|
| 1388 |
{ name = "smolagents", extra = ["litellm", "mcp", "toolkit"] },
|
|
@@ -1404,6 +1418,7 @@ requires-dist = [
|
|
| 1404 |
{ name = "langchain-openai", specifier = ">=0.3.18" },
|
| 1405 |
{ name = "langgraph", specifier = ">=0.4.7" },
|
| 1406 |
{ name = "langgraph-checkpoint-sqlite", specifier = ">=2.0.10" },
|
|
|
|
| 1407 |
{ name = "mcp", extras = ["cli"], specifier = ">=1.9.0" },
|
| 1408 |
{ name = "requests", specifier = ">=2.32.3" },
|
| 1409 |
{ name = "smolagents", extras = ["litellm", "mcp", "toolkit"], specifier = ">=1.17.0" },
|
|
@@ -2156,6 +2171,15 @@ wheels = [
|
|
| 2156 |
{ url = "https://files.pythonhosted.org/packages/fa/a8/5b41e0da817d64113292ab1f8247140aac61cbf6cfd085d6a0fa77f4984f/websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f", size = 169743, upload-time = "2025-03-05T20:03:39.41Z" },
|
| 2157 |
]
|
| 2158 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2159 |
[[package]]
|
| 2160 |
name = "xxhash"
|
| 2161 |
version = "3.5.0"
|
|
|
|
| 922 |
{ url = "https://files.pythonhosted.org/packages/c2/98/bec08f5a3e504013db6f52b5fd68375bd92b463c91eb454d5a6460e957af/litellm-1.72.0-py3-none-any.whl", hash = "sha256:88360a7ae9aa9c96278ae1bb0a459226f909e711c5d350781296d0640386a824", size = 7979630, upload-time = "2025-06-01T02:12:50.458Z" },
|
| 923 |
]
|
| 924 |
|
| 925 |
+
[[package]]
|
| 926 |
+
name = "loguru"
|
| 927 |
+
version = "0.7.3"
|
| 928 |
+
source = { registry = "https://pypi.org/simple" }
|
| 929 |
+
dependencies = [
|
| 930 |
+
{ name = "colorama", marker = "sys_platform == 'win32'" },
|
| 931 |
+
{ name = "win32-setctime", marker = "sys_platform == 'win32'" },
|
| 932 |
+
]
|
| 933 |
+
sdist = { url = "https://files.pythonhosted.org/packages/3a/05/a1dae3dffd1116099471c643b8924f5aa6524411dc6c63fdae648c4f1aca/loguru-0.7.3.tar.gz", hash = "sha256:19480589e77d47b8d85b2c827ad95d49bf31b0dcde16593892eb51dd18706eb6", size = 63559, upload-time = "2024-12-06T11:20:56.608Z" }
|
| 934 |
+
wheels = [
|
| 935 |
+
{ url = "https://files.pythonhosted.org/packages/0c/29/0348de65b8cc732daa3e33e67806420b2ae89bdce2b04af740289c5c6c8c/loguru-0.7.3-py3-none-any.whl", hash = "sha256:31a33c10c8e1e10422bfd431aeb5d351c7cf7fa671e3c4df004162264b28220c", size = 61595, upload-time = "2024-12-06T11:20:54.538Z" },
|
| 936 |
+
]
|
| 937 |
+
|
| 938 |
[[package]]
|
| 939 |
name = "lxml"
|
| 940 |
version = "5.4.0"
|
|
|
|
| 1396 |
{ name = "langchain-openai" },
|
| 1397 |
{ name = "langgraph" },
|
| 1398 |
{ name = "langgraph-checkpoint-sqlite" },
|
| 1399 |
+
{ name = "loguru" },
|
| 1400 |
{ name = "mcp", extra = ["cli"] },
|
| 1401 |
{ name = "requests" },
|
| 1402 |
{ name = "smolagents", extra = ["litellm", "mcp", "toolkit"] },
|
|
|
|
| 1418 |
{ name = "langchain-openai", specifier = ">=0.3.18" },
|
| 1419 |
{ name = "langgraph", specifier = ">=0.4.7" },
|
| 1420 |
{ name = "langgraph-checkpoint-sqlite", specifier = ">=2.0.10" },
|
| 1421 |
+
{ name = "loguru", specifier = ">=0.7.3" },
|
| 1422 |
{ name = "mcp", extras = ["cli"], specifier = ">=1.9.0" },
|
| 1423 |
{ name = "requests", specifier = ">=2.32.3" },
|
| 1424 |
{ name = "smolagents", extras = ["litellm", "mcp", "toolkit"], specifier = ">=1.17.0" },
|
|
|
|
| 2171 |
{ url = "https://files.pythonhosted.org/packages/fa/a8/5b41e0da817d64113292ab1f8247140aac61cbf6cfd085d6a0fa77f4984f/websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f", size = 169743, upload-time = "2025-03-05T20:03:39.41Z" },
|
| 2172 |
]
|
| 2173 |
|
| 2174 |
+
[[package]]
|
| 2175 |
+
name = "win32-setctime"
|
| 2176 |
+
version = "1.2.0"
|
| 2177 |
+
source = { registry = "https://pypi.org/simple" }
|
| 2178 |
+
sdist = { url = "https://files.pythonhosted.org/packages/b3/8f/705086c9d734d3b663af0e9bb3d4de6578d08f46b1b101c2442fd9aecaa2/win32_setctime-1.2.0.tar.gz", hash = "sha256:ae1fdf948f5640aae05c511ade119313fb6a30d7eabe25fef9764dca5873c4c0", size = 4867, upload-time = "2024-12-07T15:28:28.314Z" }
|
| 2179 |
+
wheels = [
|
| 2180 |
+
{ url = "https://files.pythonhosted.org/packages/e1/07/c6fe3ad3e685340704d314d765b7912993bcb8dc198f0e7a89382d37974b/win32_setctime-1.2.0-py3-none-any.whl", hash = "sha256:95d644c4e708aba81dc3704a116d8cbc974d70b3bdb8be1d150e36be6e9d1390", size = 4083, upload-time = "2024-12-07T15:28:26.465Z" },
|
| 2181 |
+
]
|
| 2182 |
+
|
| 2183 |
[[package]]
|
| 2184 |
name = "xxhash"
|
| 2185 |
version = "3.5.0"
|