file_path stringlengths 3 280 | file_language stringclasses 66 values | content stringlengths 1 1.04M | repo_name stringlengths 5 92 | repo_stars int64 0 154k | repo_description stringlengths 0 402 | repo_primary_language stringclasses 108 values | developer_username stringlengths 1 25 | developer_name stringlengths 0 30 | developer_company stringlengths 0 82 |
|---|---|---|---|---|---|---|---|---|---|
main.py | Python | import re
input_text = """
# DatasetRetriever
## Overview
- `DatasetRetriever`: Interface for retrieving datasets based on a prompt.
- `DescriptionDatasetRetriever`: Retrieves HuggingFace datasets using similarity to a given prompt.
## Getting Started
- Import Modules
```python
from prompt2model.dataset_retriever import DescriptionDatasetRetriever
from prompt2model.prompt_parser import MockPromptSpec, TaskType
```
- Initialize Retriever
```python
retriever = DescriptionDatasetRetriever()
```
Various parameters like search index path, model name, and search depth can be customized during initialization.
- Prepare the Prompt
```python
task_type = TaskType.TEXT_GENERATION
prompt_text = "..."
prompt_spec = MockPromptSpec(task_type)
prompt_spec._instruction = prompt_text
```
- Retrieve Dataset
```python
dataset_dict = retriever.retrieve_dataset_dict(
prompt_spec, blocklist=[]
)
```
`dataset_dict` will contain the dataset splits (train/val/test) most relevant to the given prompt.
"""
def split_into_lines(text):
lines = []
current_line = ''
paragraphs = text.strip().split('\n\n')
for paragraph in paragraphs:
if paragraph.startswith("```") and paragraph.endswith("```"):
lines.append(paragraph) # Preserve lines between ``` and ```
lines.append('') # Add a blank line after preserved lines
else:
# Replace ordered list numbers with dashes
paragraph = re.sub(r'^\d+\.', '-', paragraph, flags=re.MULTILINE)
# Handle lines starting with '-'
if paragraph.startswith('-'):
sublines = paragraph.split('\n')
for subline in sublines:
words = subline.split()
for word in words:
# Check if line length exceeds the limit
if len(current_line) + len(word) <= 70:
current_line += word + ' '
else:
lines.append(current_line.strip())
current_line = word + ' '
if current_line:
lines.append(current_line.strip())
current_line = ''
lines.append('') # Add a blank line after preserved lines
continue
words = paragraph.split()
for word in words:
if len(current_line) + len(word) <= 70:
current_line += word + ' '
else:
lines.append(current_line.strip())
current_line = word + ' '
if current_line:
lines.append(current_line.strip())
current_line = ''
lines.append('') # Add a blank line between paragraphs
return lines
def main():
# User input
result = split_into_lines(input_text)
for line in result:
print(line)
if __name__ == "__main__":
main()
| zhaochenyang20/markdownlint-savior | 2 | markdownlint-savior is a Python script designed to assist in resolving common Markdown linting issues. It helps ensure your Markdown files adhere to linting standards and improves the overall quality of your Markdown documents. | Python | zhaochenyang20 | 赵晨阳 | University of California, Los Angeles |
scratch/blockchain-simulator.cc | C++ | #include "ns3/core-module.h"
#include "ns3/network-module.h"
#include "ns3/internet-module.h"
#include "ns3/point-to-point-module.h"
#include "ns3/applications-module.h"
using namespace ns3;
NS_LOG_COMPONENT_DEFINE ("BlockchainSimulator");
// 创建网络
void startSimulator (int N)
{
NodeContainer nodes;
nodes.Create (N);
NetworkHelper networkHelper (N);
// 默认pointToPint只能连接两个节点,需要手动连接
NetDeviceContainer devices;
PointToPointHelper pointToPoint;
// 节点总带宽24Mbps,分到每一个点对点通道上为3Mbps
pointToPoint.SetDeviceAttribute ("DataRate", StringValue ("3Mbps"));
pointToPoint.SetChannelAttribute ("Delay", StringValue ("3ms"));
uint32_t nNodes = nodes.GetN ();
InternetStackHelper stack;
stack.Install (nodes);
Ipv4AddressHelper address;
address.SetBase ("1.0.0.0", "255.255.255.0");
// 网络节点两两建立连接
for (int i = 0; i < N; i++) {
for (int j = 0; j < N && j != i; j++) {
Ipv4InterfaceContainer interface;
Ptr<Node> p1 = nodes.Get (i);
Ptr<Node> p2 = nodes.Get (j);
NetDeviceContainer device = pointToPoint.Install(p1, p2);
interface.Add(address.Assign (device.Get(0)));
interface.Add(address.Assign (device.Get(1)));
networkHelper.m_nodesConnectionsIps[i].push_back(interface.GetAddress(1));
networkHelper.m_nodesConnectionsIps[j].push_back(interface.GetAddress(0));
// 创建新的网络: 如果不增加网络的话, 所有ip都在一个字网,而最后一块device会覆盖之前的设置,导致无法通过ip访问到之前的邻居节点
// 应该的设置:每个device连接的两个节点在一个字网内,所以每分配一次ip,地址应该增加一个网段
address.NewNetwork();
}
}
ApplicationContainer nodeApp = networkHelper.Install (nodes);
nodeApp.Start (Seconds (0.0));
nodeApp.Stop (Seconds (10.0));
Simulator::Run ();
Simulator::Destroy ();
}
int
main (int argc, char *argv[])
{
CommandLine cmd;
cmd.Parse (argc, argv);
// numbers of nodes in network
int N = 16;
Time::SetResolution (Time::NS);
// 1.need changed to a specific protocol class
LogComponentEnable ("RaftNode", LOG_LEVEL_INFO);
// start the simulator
startSimulator(N);
return 0;
} | zhayujie/blockchain-simulator | 56 | Some simulations of blockchain consensus based on ns3. Currently implemented consensus protocols include Raft, Paxos and PBFT. | C++ | zhayujie | Minimal Future Tech | |
src/applications/helper/network-helper.cc | C++ | #include "ns3/core-module.h"
#include "network-helper.h"
#include "ns3/string.h"
#include "ns3/inet-socket-address.h"
#include "ns3/names.h"
#include "ns3/network-module.h"
#include "ns3/internet-module.h"
#include "ns3/applications-module.h"
// 2.need changed to the the specific header file
#include "../model/raft-node.h"
// #include "../model/paxos-node.h"
// #include "../model/pbft-node.h"
namespace ns3 {
NetworkHelper::NetworkHelper(uint32_t totalNoNodes) {
// 3.need changed to a specific typeId
m_factory.SetTypeId ("ns3::RaftNode");
m_nodeNo = totalNoNodes;
}
ApplicationContainer
NetworkHelper::Install (NodeContainer c)
{
ApplicationContainer apps;
for (NodeContainer::Iterator i = c.Begin (); i != c.End (); i++)
{
// 4.need changed to a specific protocol class
Ptr<RaftNode> app = m_factory.Create<RaftNode> ();
uint32_t nodeId = (*i)->GetId();
app->m_id = nodeId;
app->N = m_nodeNo;
app->m_peersAddresses = m_nodesConnectionsIps[nodeId];
(*i)->AddApplication (app);
apps.Add (app);
}
return apps;
}
} | zhayujie/blockchain-simulator | 56 | Some simulations of blockchain consensus based on ns3. Currently implemented consensus protocols include Raft, Paxos and PBFT. | C++ | zhayujie | Minimal Future Tech | |
src/applications/helper/network-helper.h | C/C++ Header | #ifndef NETWORK_HELPER_H
#define NETWORK_HELPER_H
#include "ns3/object-factory.h"
#include "ns3/ipv4-address.h"
#include "ns3/node-container.h"
#include "ns3/application-container.h"
#include "ns3/uinteger.h"
#include <map>
namespace ns3 {
class NetworkHelper
{
public:
NetworkHelper (uint32_t totalNoNodes);
std::map<uint32_t, std::vector<Ipv4Address>> m_nodesConnectionsIps;
ApplicationContainer Install (NodeContainer c);
private:
ObjectFactory m_factory;
int m_nodeNo;
};
}
#endif | zhayujie/blockchain-simulator | 56 | Some simulations of blockchain consensus based on ns3. Currently implemented consensus protocols include Raft, Paxos and PBFT. | C++ | zhayujie | Minimal Future Tech | |
src/applications/model/paxos-node.cc | C++ | #include "ns3/address.h"
#include "ns3/address-utils.h"
#include "ns3/log.h"
#include "ns3/inet-socket-address.h"
#include "ns3/inet6-socket-address.h"
#include "ns3/node.h"
#include "ns3/socket.h"
#include "ns3/udp-socket.h"
#include "ns3/simulator.h"
#include "ns3/socket-factory.h"
#include "ns3/packet.h"
#include "ns3/trace-source-accessor.h"
#include "ns3/udp-socket-factory.h"
#include "ns3/tcp-socket-factory.h"
#include "ns3/uinteger.h"
#include "ns3/double.h"
#include "paxos-node.h"
#include "stdlib.h"
#include "ns3/ipv4.h"
#include <map>
namespace ns3 {
NS_LOG_COMPONENT_DEFINE ("PaxosNode");
NS_OBJECT_ENSURE_REGISTERED (PaxosNode);
TypeId
PaxosNode::GetTypeId (void)
{
static TypeId tid = TypeId ("ns3::PaxosNode")
.SetParent<Application> ()
.SetGroupName("Applications")
.AddConstructor<PaxosNode> ()
;
return tid;
}
PaxosNode::PaxosNode(void) {
}
PaxosNode::~PaxosNode(void) {
NS_LOG_FUNCTION (this);
}
static char intToChar(int a) {
return a + '0';
}
static int charToInt(char a) {
return a - '0';
}
void
PaxosNode::StartApplication () // Called at time specified by Start
{
// 初始化paxos参数
t_max = 0;
command = 'e';
t_store = 0;
ticket = 0;
isCommit = 0;
proposal = intToChar(m_id);
vote_success = 0; // 获得的同意数
vote_failed = 0; // 获得的失败数
round = 0;
//std::cout << "start server ";
// NS_LOG_INFO("log info");
// m_socket->SetRecvCallback (MakeCallback (&PaxosNode::HandleRead, this));
NS_LOG_INFO("Node " << m_id << " start, neighbor: " << m_peersAddresses[0]);
for (int i = 0; i < N; i++) {
}
//NS_LOG_INFO("id: " << m_id);
//NS_LOG_INFO("typeId: " <<GetTypeId());
// 初始化socket
if (!m_socket)
{
TypeId tid = TypeId::LookupByName ("ns3::UdpSocketFactory");
m_socket = Socket::CreateSocket (GetNode (), tid);
//NS_LOG_INFO("m_socket1: " << m_socket);
// 注意 相当于监听所有网卡ip
InetSocketAddress local = InetSocketAddress (Ipv4Address::GetAny (), 7071);
m_socket->Bind (local); // 绑定本机的ip和port
m_socket->Listen ();
//NS_LOG_INFO("m_socket: " << m_socket);
// m_socket->ShutdownSend ();
// m_socket->Connect (InetSocketAddress(m_peersAddresses[0], 7071));
}
m_socket->SetRecvCallback (MakeCallback (&PaxosNode::HandleRead, this));
m_socket->SetAllowBroadcast (true);
std::vector<Ipv4Address>::iterator iter = m_peersAddresses.begin();
//std::map<Ipv4Address, Ptr<Socket>>::iterator iter;
while(iter != m_peersAddresses.end()) {
//iter->Send(p);
//NS_LOG_INFO(*iter << "\n");
iter++;
TypeId tid = TypeId::LookupByName ("ns3::UdpSocketFactory");
Ptr<Socket> socketClient = Socket::CreateSocket (GetNode (), tid);
//NS_LOG_INFO("m_socketClient: " << m_socketClient);
socketClient->Connect (InetSocketAddress(*iter, 7071));
m_peersSockets[*iter] = socketClient;
}
// 创建连接
/*
for (int i = 0; i < N-1; i++) {
TypeId tid = TypeId::LookupByName ("ns3::UdpSocketFactory");
Ptr<Socket> socketClient = Socket::CreateSocket (GetNode (), tid);
//NS_LOG_INFO("m_socketClient: " << m_socketClient);
socketClient->Connect (InetSocketAddress(m_peersAddresses[i], 7071));
m_peersSockets[m_peersAddresses[i]] = socketClient;
}
*/
//NS_LOG_INFO(m_peersSockets[m_peersAddresses[10]]);
// 节点0向节点1请求票
if (m_id == 0 || m_id == 1 || m_id == 2) {
Simulator::Schedule (Seconds(0), &PaxosNode::requireTicket, this);
}
}
void
PaxosNode::StopApplication () // Called at time specified by Stop
{
}
void
PaxosNode::HandleRead (Ptr<Socket> socket)
{
//NS_LOG_FUNCTION (this << socket);
Ptr<Packet> packet;
Address from;
Address localAddress;
while ((packet = socket->RecvFrom (from)))
{
socket->SendTo(packet, 0, from);
if (packet->GetSize () == 0)
{ //EOF
break;
}
if (InetSocketAddress::IsMatchingType (from))
{
std::string msg = getPacketContent(packet, from);
//NS_LOG_INFO ("At time " << Simulator::Now ().GetSeconds () << "s, Node " << GetNode ()->GetId () << " received " << packet->GetSize () << " bytes from " <<
// InetSocketAddress::ConvertFrom (from).GetIpv4 () << " port " <<
// InetSocketAddress::ConvertFrom (from).GetPort ());
// 打印接收到的结果
//NS_LOG_INFO("Node " << GetNode ()->GetId () << " Total Received Data: " << msg);
uint8_t data[4];
switch (charToInt(msg[0]))
{
case REQUEST_TICKET: // ['type', 'ticket']
{
data[0] = intToChar(RESPONSE_TICKET);
int t = charToInt(msg[1]);
if (t > t_max) {
t_max = t;
data[1] = intToChar(SUCCESS);
data[2] = command; // ['type', 'success', 'command']
/*
NS_LOG_INFO ("### Server receive Ticket ###");
NS_LOG_INFO("At time " << Simulator::Now ().GetSeconds () << "s");
NS_LOG_INFO("client ticket: " << t << ", server max ticket: "<< t_max << "\n");
*/
}
else
{
data[1] = intToChar(FAILED); // ['type', 'fail']
}
Send(data, from);
break;
}
case REQUEST_PROPOSE: // ['type', 'ticket', 'command']
{
data[0] = intToChar(RESPONSE_PROPOSE);
int t = charToInt(msg[1]);
//NS_LOG_INFO("t: " << t << " server tmax: " << t_max);
if (t == t_max) { // 请求中的票等于当前最大票
char c = msg[2];
command = c; // 接受提案
t_store = t; // 更新存储命令的票
data[1] = intToChar(SUCCESS);
/*
NS_LOG_INFO ("### Server receive Proposal ###");
NS_LOG_INFO("At time " << Simulator::Now ().GetSeconds () << "s");
NS_LOG_INFO("client c: " << command <<"\n");
*/
} else {
data[1] = intToChar(FAILED);
}
Send(data, from);
break;
}
case REQUEST_COMMIT:
{
data[0] = intToChar(RESPONSE_COMMIT);
int t = charToInt(msg[1]);
int c = msg[2];
if (t == t_store && c == command) {
// 执行命令
if (isCommit == 0) {
/*
NS_LOG_INFO ("### Server commit ###");
NS_LOG_INFO("At time " << Simulator::Now ().GetSeconds () << "s");
NS_LOG_INFO("Server commit success: " << command << " t_store: " << t_store << "\n");
*/
}
isCommit = 1; // 提交成功,不接收该
data[1] = intToChar(SUCCESS);
}
else
{
data[1] = intToChar(FAILED);
}
Send(data, from);
break;
}
case RESPONSE_TICKET:
{
int state = charToInt(msg[1]);
if (state == SUCCESS) {
vote_success += 1;
}
else {
vote_failed += 1;
}
//NS_LOG_INFO(vote_success + vote_failed << " vote " << " success:" << vote_success << " failed:" << vote_failed);
if (vote_success + vote_failed == N-2) {
if (vote_success >= N / 2) {
vote_success = 0;
vote_failed = 0;
data[0] = intToChar(REQUEST_PROPOSE);
if (msg[2] != 'e') { // 空的提案
proposal = msg[2]; // 后面应该改为超过半数接收的提案
}
data[1] = intToChar(ticket);
data[2] = proposal;
//Send(data, from, socket);
Send(data);
// NS_LOG_INFO ("### Client acquire ticket ###");
// NS_LOG_INFO("At time " << Simulator::Now ().GetSeconds () << "s");
// NS_LOG_INFO("command: " << msg[2] << "\n");
}
else
{
vote_success = 0;
vote_failed = 0;
// 一半以上节点反对,重新广播
requireTicket();
}
}
break;
}
case RESPONSE_PROPOSE:
{
int state = charToInt(msg[1]);
if (state == SUCCESS) {
vote_success += 1;
} else {
vote_failed += 1;
}
if (vote_success + vote_failed == N-2) {
if (vote_success >= N / 2) {
vote_success = 0;
vote_failed = 0;
data[0] = intToChar(REQUEST_COMMIT);
data[1] = intToChar(ticket);
data[2] = proposal;
//Send(data, from, socket);
Send(data);
// NS_LOG_INFO ("### Client get proposal response ###");
// NS_LOG_INFO("At time " << Simulator::Now ().GetSeconds () << "s");
// NS_LOG_INFO("ACCEPT\n");
}
else
{
vote_success = 0;
vote_failed = 0;
// 一半以上节点反对,重新广播
requireTicket();
}
}
break;
}
case RESPONSE_COMMIT:
{
int state = charToInt(msg[1]);
if (state == SUCCESS) {
vote_success += 1;
} else {
vote_failed += 1;
}
if (vote_success + vote_failed == N-2) {
if (vote_success >= N / 2) {
vote_success = 0;
vote_failed = 0;
// NS_LOG_INFO ("### Client get commit response ###");
// NS_LOG_INFO("At time " << Simulator::Now ().GetSeconds () << "s");
NS_LOG_INFO("CLIENT COMMIT SUCCESS\n ##clinet ticket##: " << ticket << " id: " << m_id << " at time: " << Simulator::Now ().GetSeconds() << "s");
// NS_LOG_INFO("client " << m_id << " commit succcess");
}
else
{
vote_success = 0;
vote_failed = 0;
// 一半以上节点反对,重新广播
requireTicket();
}
}
break;
}
// 客户端请求
case CLIENT_PROPOSE:
{
requireTicket();
break;
}
default:
{
NS_LOG_INFO("wrong msg");
break;
}
}
}
socket->GetSockName (localAddress);
}
}
std::string
PaxosNode::getPacketContent(Ptr<Packet> packet, Address from)
{
char *packetInfo = new char[packet->GetSize () + 1];
std::ostringstream totalStream;
packet->CopyData (reinterpret_cast<uint8_t*>(packetInfo), packet->GetSize ());
packetInfo[packet->GetSize ()] = '\0'; // ensure that it is null terminated to avoid bugs
/**
* Add the buffered data to complete the packet
*/
totalStream << m_bufferedData[from] << packetInfo;
std::string totalReceivedData(totalStream.str());
return totalReceivedData;
}
void
SendPacket(Ptr<Socket> socketClient,Ptr<Packet> p) {
socketClient->Send(p);
}
float
getRandomDelay() {
return (rand() % 50) * 1.0 / 1000;
}
// 向接收到消息的节点方 返回消息
void
PaxosNode::Send(uint8_t data[], Address from)
{
Ptr<Packet> p;
//uint8_t data[] = "hello";
p = Create<Packet> (data, 3);
//NS_LOG_INFO("packet: " << p);
TypeId tid = TypeId::LookupByName ("ns3::UdpSocketFactory");
//m_socketClient = Socket::CreateSocket (GetNode (), tid);
//m_socketClient->Connect (InetSocketAddress(InetSocketAddress::ConvertFrom(from).GetIpv4 (), 7071));
// m_socketClient->Send(p);
// 广播请求
// std::map<Ipv4Address, Ptr<Socket>>map;
//std::map<Ipv4Address, Ptr<Socket>>::iterator iter;
//iter = m_peersSockets.begin();
/*
while(iter != m_peersSockets.end()) {
iter->second->Send(p);
NS_LOG_INFO(iter->second);
iter++;
}
*/
//socket->Send(p);
Ptr<Socket> socketClient;
if (!m_peersSockets[InetSocketAddress::ConvertFrom(from).GetIpv4 ()]) {
socketClient = Socket::CreateSocket (GetNode (), tid);
socketClient->Connect (InetSocketAddress(InetSocketAddress::ConvertFrom(from).GetIpv4 (), 7071));
//NS_LOG_INFO("m_socketClient: " << m_socketClient);
m_peersSockets[InetSocketAddress::ConvertFrom(from).GetIpv4 ()] = socketClient;
//socketClient->Send(p);
}
socketClient = m_peersSockets[InetSocketAddress::ConvertFrom(from).GetIpv4 ()];
Simulator::Schedule(Seconds(getRandomDelay()), SendPacket, socketClient, p);
// 从socket容器中取出连接,发送消息
//NS_LOG_INFO(m_peersSockets[InetSocketAddress::ConvertFrom(from).GetIpv4 ()]);
//m_peersSockets[InetSocketAddress::ConvertFrom(from).GetIpv4 ()]->Send(p);
}
// 向所有邻居节点广播消息
void
PaxosNode::Send (uint8_t data[])
{
Ptr<Packet> p;
//uint8_t data[] = "hello";
p = Create<Packet> (data, 3);
//NS_LOG_INFO("packet: " << p);
//if (!m_peersSockets) {
TypeId tid = TypeId::LookupByName ("ns3::UdpSocketFactory");
//m_socketClient = Socket::CreateSocket (GetNode (), tid);
//NS_LOG_INFO("m_socketClient: " << m_socketClient);
//m_socketClient->Connect (InetSocketAddress(m_peersAddresses[1], 7071));
//m_socketClient->Send(p);
// 广播请求
// std::map<Ipv4Address, Ptr<Socket>>map;
/*
std::map<Ipv4Address, Ptr<Socket>>::iterator iter;
//iter = m_peersSockets.begin();
while(iter != m_peersSockets.end()) {
iter->second->Send(p);
NS_LOG_INFO(iter->second);
iter++;
}
*/
std::vector<Ipv4Address>::iterator iter = m_peersAddresses.begin();
//std::map<Ipv4Address, Ptr<Socket>>::iterator iter;
while(iter != m_peersAddresses.end()) {
//iter->Send(p);
//NS_LOG_INFO(*iter << "\n");
iter++;
TypeId tid = TypeId::LookupByName ("ns3::UdpSocketFactory");
// Ptr<Socket> socketClient = Socket::CreateSocket (GetNode (), tid);
//NS_LOG_INFO("m_socketClient: " << m_socketClient);
//socketClient->Connect (InetSocketAddress(*iter, 7071));
Ptr<Socket> socketClient = m_peersSockets[*iter];
double delay = getRandomDelay();
//NS_LOG_INFO(delay << " delay");
Simulator::Schedule(Seconds(delay), SendPacket, socketClient, p);
//socketClient->Send(p);
// socketClient->C
}
/*
m_socketClient = Socket::CreateSocket (GetNode (), tid);
m_socketClient->Connect (InetSocketAddress("10.1.0.8", 7071));
m_socketClient->Send(p);
*/
//m_socketClient->Send(p);
// NS_LOG_INFO ("At time " << Simulator::Now ().GetSeconds () << "s, node"<< GetNode ()->GetId () << " send packet ");
}
// 第一步 请求票
void
PaxosNode::requireTicket(void) {
uint8_t data[3];
ticket += 1;
data[0] = intToChar(REQUEST_TICKET);
data[1] = intToChar(ticket);
//NS_LOG_INFO("require_data: "<< data);
Send(data);
NS_LOG_INFO("node" << m_id << " require_data: "<< data);
// NS_LOG_INFO ("### Client request ticket ###");
// NS_LOG_INFO("At time " << Simulator::Now ().GetSeconds () << "s");
// NS_LOG_INFO("client ticket: " << ticket << "\n");
}
} // namespace ns3 | zhayujie/blockchain-simulator | 56 | Some simulations of blockchain consensus based on ns3. Currently implemented consensus protocols include Raft, Paxos and PBFT. | C++ | zhayujie | Minimal Future Tech | |
src/applications/model/paxos-node.h | C/C++ Header | #ifndef PAXOS_NODE_H
#define PAXOS_NODE_H
#include <algorithm>
#include "ns3/application.h"
#include "ns3/event-id.h"
#include "ns3/ptr.h"
#include "ns3/traced-callback.h"
#include "ns3/address.h"
#include "ns3/boolean.h"
namespace ns3 {
class Address;
class Socket;
class Packet;
class PaxosNode : public Application
{
public:
static TypeId GetTypeId (void);
void SetPeersAddresses (const std::vector<Ipv4Address> &peers); // 设置所有邻节点的地址
PaxosNode (void);
virtual ~PaxosNode (void);
uint32_t m_id; // node id
Ptr<Socket> m_socket; // 监听的socket
Ptr<Socket> m_socketClient; // 客户端socket
std::map<Ipv4Address, Ptr<Socket>> m_peersSockets; // 邻节点的socket列表
std::map<Address, std::string> m_bufferedData; // map holding the buffered data from previous handleRead events
Address m_local; // 本节点地址
std::vector<Ipv4Address> m_peersAddresses; // 邻节点列表
int t_max; // 当前已经发布的最大票号
char command; // 当前存储的命令
int t_store; // 存储当前命令的票号
int ticket; // 作为客户端时当前尝试的票号 (如果并发,需改为map)
int isCommit; // 是否成功提交
char proposal; // 要发送的命令
int round;
int vote_success; // 响应成功数
int vote_failed; // 响应失败数
int N; // 总节点数
// 继承 Application 类必须实现的虚函数
virtual void StartApplication (void);
virtual void StopApplication (void);
// 处理消息
void HandleRead (Ptr<Socket> socket);
void Send (uint8_t data[]);
std::string getPacketContent(Ptr<Packet> packet, Address from);
// 发送消息
//void SendMessage(enum Message responseMessage, std::string msg, Ptr<Socket> outgoingSocket);
void requireTicket(void);
void Send(uint8_t data[], Address from);
};
enum Message
{
REQUEST_TICKET, // 0 请求票
REQUEST_PROPOSE, // 1 请求提案
REQUEST_COMMIT, // 2 请求提交
RESPONSE_TICKET, // 3 对请求票的响应
RESPONSE_PROPOSE, // 4 对发出提案的响应
RESPONSE_COMMIT, // 5 对发起提交的响应
CLIENT_PROPOSE, // 6 客户端发起的提案
};
enum State
{
SUCCESS, // 0 成功
FAILED, // 1 失败
};
}
#endif | zhayujie/blockchain-simulator | 56 | Some simulations of blockchain consensus based on ns3. Currently implemented consensus protocols include Raft, Paxos and PBFT. | C++ | zhayujie | Minimal Future Tech | |
src/applications/model/pbft-node.cc | C++ | #include "ns3/address.h"
#include "ns3/address-utils.h"
#include "ns3/log.h"
#include "ns3/inet-socket-address.h"
#include "ns3/inet6-socket-address.h"
#include "ns3/node.h"
#include "ns3/socket.h"
#include "ns3/udp-socket.h"
#include "ns3/simulator.h"
#include "ns3/socket-factory.h"
#include "ns3/packet.h"
#include "ns3/trace-source-accessor.h"
#include "ns3/udp-socket-factory.h"
#include "ns3/tcp-socket-factory.h"
#include "ns3/uinteger.h"
#include "ns3/double.h"
#include "pbft-node.h"
#include "stdlib.h"
#include "ns3/ipv4.h"
#include <ctime>
#include <map>
// 全局变量 是所有节点间共用的
int tx_size;
int tx_speed; // 交易生成的速率,单位为op/s
int n;
int v;
int val;
float timeout; // 发送区块的间隔时间
int n_round;
namespace ns3 {
NS_LOG_COMPONENT_DEFINE ("PbftNode");
NS_OBJECT_ENSURE_REGISTERED (PbftNode);
TypeId
PbftNode::GetTypeId (void)
{
static TypeId tid = TypeId ("ns3::PbftNode")
.SetParent<Application> ()
.SetGroupName("Applications")
.AddConstructor<PbftNode> ()
;
return tid;
}
PbftNode::PbftNode(void) {
}
PbftNode::~PbftNode(void) {
NS_LOG_FUNCTION (this);
}
static char intToChar(int a) {
return a + '0';
}
static int charToInt(char a) {
return a - '0';
}
// 信息接收延迟 3 - 6 ms
float
getRandomDelay() {
return ((rand() % 3) * 1.0 + 3) / 1000;
}
void
printVector(std::vector<int> vec) {
for (int i = 0; i < vec.size(); i++) {
NS_LOG_INFO(vec[i] + " ");
}
}
// 生成交易 每个交易 size KB
static uint8_t * generateTX (int num)
{
int size = num * tx_size;
uint8_t *data = (uint8_t *)std::malloc (size);
int i;
for (i = 0; i < size; i++) {
data[i] = '1';
}
data[i] = '\0';
// NS_LOG_INFO("初始化成功: " << data);
data[0] = intToChar(PRE_PREPARE);
data[1] = intToChar(v);
data[2] = intToChar(n);
data[3] = intToChar(n);
return data;
}
void
PbftNode::StartApplication ()
{
// 初始化全局变量
v = 1; // 视图数
n = 0; // 当前视图的交易序号
leader = 0;
tx_size = 1000; // 1 KB
tx_speed = 1000; // 1000 tx/s
timeout = 0.05; // 50 ms
block_num = 0;
// 共识轮数
n_round = 0;
// 交易要更新的值
val = intToChar(m_id);
// 需要修改的value值
// int value = 3;
// 初始化socket
if (!m_socket)
{
TypeId tid = TypeId::LookupByName ("ns3::UdpSocketFactory");
m_socket = Socket::CreateSocket (GetNode (), tid);
// 注意 相当于监听所有网卡ip
InetSocketAddress local = InetSocketAddress (Ipv4Address::GetAny (), 7071);
m_socket->Bind (local); // 绑定本机的ip和port
m_socket->Listen ();
}
m_socket->SetRecvCallback (MakeCallback (&PbftNode::HandleRead, this));
m_socket->SetAllowBroadcast (true);
std::vector<Ipv4Address>::iterator iter = m_peersAddresses.begin();
// 与所有节点建立连接
while(iter != m_peersAddresses.end()) {
// NS_LOG_INFO("node"<< m_id << *iter << "\n");
TypeId tid = TypeId::LookupByName ("ns3::UdpSocketFactory");
Ptr<Socket> socketClient = Socket::CreateSocket (GetNode (), tid);
socketClient->Connect (InetSocketAddress(*iter, 7071));
m_peersSockets[*iter] = socketClient;
iter++;
}
// NS_LOG_INFO("Node " << GetNode ()->GetId () << "网络建立");
// 如果是leader节点, 广播预准备消息
// if (m_id == leader) {
// is_leader = 1;
// uint8_t data[4];
// data[0] = intToChar(PRE_PREPARE);
// data[1] = intToChar(v);
// data[2] = intToChar(n);
// data[3] = intToChar(value);
// // leader 广播预准备消息
// 发送区块
// SendBlock(data, num);
Simulator::Schedule(Seconds(timeout), &PbftNode::SendBlock, this);
// n++;
// }
}
void
PbftNode::StopApplication ()
{
// printVector(values);
}
void
PbftNode::HandleRead (Ptr<Socket> socket)
{
Ptr<Packet> packet;
Address from;
Address localAddress;
while ((packet = socket->RecvFrom (from)))
{
// socket->SendTo(packet, 0, from);
if (packet->GetSize () == 0)
{ //EOF
break;
}
if (InetSocketAddress::IsMatchingType (from))
{
std::string msg = getPacketContent(packet, from);
// NS_LOG_INFO ("At time " << Simulator::Now ().GetSeconds () << "s, Node " << GetNode ()->GetId () << " received " << packet->GetSize () << " bytes, msg[0]: "<< msg[0]);
// InetSocketAddress::ConvertFrom (from).GetIpv4 () << " port " <<
// InetSocketAddress::ConvertFrom (from).GetPort ());
// // 打印接收到的结果
// NS_LOG_INFO("Node " << GetNode ()->GetId () << " 接收到消息: " << msg);
uint8_t data[4];
switch (charToInt(msg[0]))
{
case PRE_PREPARE:
{
// 收到预准备消息
data[0] = intToChar(PREPARE);
data[1] = msg[1]; // v
data[2] = msg[2]; // n
data[3] = msg[3]; // value
// 序号值
int num = charToInt(msg[2]);
// 存储交易中的value值
tx[num].val = charToInt(msg[3]);
// if (num > n) {
// n = num;
// }
// 广播准备消息
Send(data);
break;
}
case PREPARE:
{
// 收到准备消息
data[0] = intToChar(PREPARE_RES);
data[1] = msg[1]; // v
data[2] = msg[2]; // n
data[3] = intToChar(SUCCESS);
// 回复准备消息响应
Send(data, from);
break;
}
case PREPARE_RES:
{
// 收到准备消息响应
int index = charToInt(msg[2]);
if (charToInt(msg[3]) == 0) {
tx[index].prepare_vote++;
}
// if 超过半数SUCCESS, 则广播COMMIT
if (tx[index].prepare_vote >= 2 * N / 3) {
data[0] = intToChar(COMMIT);
data[1] = msg[1]; // v
data[2] = msg[2]; // n
Send(data);
// NS_LOG_INFO("node"<< m_id << "获得的准备投票: " << tx[index].prepare_vote);
tx[index].prepare_vote = 0;
}
break;
}
case COMMIT:
{
// 收到提交消息
int index = charToInt(msg[2]);
// NS_LOG_INFO("node"<< m_id << "收到commit " << tx[index].val);
tx[index].commit_vote++;
// 超过半数则 回复提交消息响应
if (tx[index].commit_vote > 2 * N / 3) {
data[0] = intToChar(COMMIT_RES);
data[1] = intToChar(v);
data[2] = intToChar(n);
data[3] = SUCCESS; // n
// Send(data);
tx[index].commit_vote = 0;
// 记录交易到队列中
values.push_back(tx[index].val);
// NS_LOG_INFO("node"<< m_id << "加入交易 " << tx[index].val);
NS_LOG_INFO("node "<< m_id << " in view " << v << " finish " << block_num << "th times submit, at time " << Simulator::Now ().GetSeconds () << "s, value is " << values[block_num] << "\n");
block_num++;
// n = n + 1;
}
// Send(data, from);
break;
}
// case COMMIT_RES:
// {
// // 如果超过半数则表示提交成功,reply客户端成功
// }
case VIEW_CHANGE:
{
int vt = charToInt(msg[1]);
int lt = charToInt(msg[2]);
v = vt;
leader = lt;
if (m_id == leader) {
NS_LOG_INFO("view-change完成, 当前主节点为 " << leader << "视图为 " << v);
}
}
default:
{
NS_LOG_INFO("Wrong msg");
break;
}
}
}
socket->GetSockName (localAddress);
}
}
void
PbftNode::viewChange (void)
{
uint8_t data[4];
leader = (leader + 1) % N;
v += 1;
data[0] = intToChar(VIEW_CHANGE);
data[1] = intToChar(v);
data[2] = intToChar(leader);
Send(data);
}
std::string
PbftNode::getPacketContent(Ptr<Packet> packet, Address from)
{
// NS_LOG_INFO("包大小" << packet->GetSize ());
char *packetInfo = new char[packet->GetSize () + 1];
std::ostringstream totalStream;
packet->CopyData (reinterpret_cast<uint8_t*>(packetInfo), packet->GetSize ());
packetInfo[packet->GetSize ()] = '\0'; // ensure that it is null terminated to avoid bugs
/**
* Add the buffered data to complete the packet
*/
totalStream << m_bufferedData[from] << packetInfo;
std::string totalReceivedData(totalStream.str());
return totalReceivedData;
}
void
SendPacket(Ptr<Socket> socketClient,Ptr<Packet> p) {
socketClient->Send(p);
}
// 向某个指定地址的节点发送消息
void
PbftNode::Send(uint8_t data[], Address from)
{
Ptr<Packet> p;
p = Create<Packet> (data, 4);
//NS_LOG_INFO("packet: " << p);
TypeId tid = TypeId::LookupByName ("ns3::UdpSocketFactory");
Ptr<Socket> socketClient;
if (!m_peersSockets[InetSocketAddress::ConvertFrom(from).GetIpv4 ()]) {
socketClient = Socket::CreateSocket (GetNode (), tid);
socketClient->Connect (InetSocketAddress(InetSocketAddress::ConvertFrom(from).GetIpv4 (), 7071));
m_peersSockets[InetSocketAddress::ConvertFrom(from).GetIpv4 ()] = socketClient;
}
socketClient = m_peersSockets[InetSocketAddress::ConvertFrom(from).GetIpv4 ()];
Simulator::Schedule(Seconds(getRandomDelay()), SendPacket, socketClient, p);
}
// 向所有邻居节点广播消息
void
PbftNode::Send (uint8_t data[])
{
// NS_LOG_INFO("广播消息");
Ptr<Packet> p;
p = Create<Packet> (data, 4);
TypeId tid = TypeId::LookupByName ("ns3::UdpSocketFactory");
std::vector<Ipv4Address>::iterator iter = m_peersAddresses.begin();
while(iter != m_peersAddresses.end()) {
TypeId tid = TypeId::LookupByName ("ns3::UdpSocketFactory");
Ptr<Socket> socketClient = m_peersSockets[*iter];
double delay = getRandomDelay();
Simulator::Schedule(Seconds(delay), SendPacket, socketClient, p);
iter++;
}
}
// 向所有邻居节点广播区块
void
PbftNode::SendBlock (void)
{
// NS_LOG_INFO("广播区块: time: " << Simulator::Now ().GetSeconds () << " s");
Ptr<Packet> p;
// TODO: 广播的内容包 p
int num = tx_speed / (1000 / (timeout * 1000));
uint8_t * data = generateTX(num);
int size = tx_size * num;
p = Create<Packet> (data, size);
TypeId tid = TypeId::LookupByName ("ns3::UdpSocketFactory");
std::vector<Ipv4Address>::iterator iter = m_peersAddresses.begin();
if (m_id == leader) {
NS_LOG_INFO("Leader node"<< m_id << "start broadcast, at time " <<Simulator::Now ().GetSeconds () << "s");
while(iter != m_peersAddresses.end()) {
TypeId tid = TypeId::LookupByName ("ns3::UdpSocketFactory");
Ptr<Socket> socketClient = m_peersSockets[*iter];
double delay = getRandomDelay();
Simulator::Schedule(Seconds(delay), SendPacket, socketClient, p);
iter++;
}
n_round++;
n++;
// view_change, 概率为1/10
if (rand() % 10 == 5) {
viewChange();
}
}
blockEvent = Simulator::Schedule (Seconds(timeout), &PbftNode::SendBlock, this);
if (n_round == 40) {
NS_LOG_INFO(" 已经发送了第 "<< n_round << "个区块 at time: " << Simulator::Now ().GetSeconds () << "s");
Simulator::Cancel(blockEvent);
}
}
// // 向所有邻居节点广播交易
// void
// RaftNode::SendTX (uint8_t data[], int num)
// {
// NS_LOG_INFO("广播区块: " << round << ", time: " << Simulator::Now ().GetSeconds () << " s");
// Ptr<Packet> p;
// int size = tx_size * num;
// p = Create<Packet> (data, size);
// TypeId tid = TypeId::LookupByName ("ns3::UdpSocketFactory");
// std::vector<Ipv4Address>::iterator iter = m_peersAddresses.begin();
// while(iter != m_peersAddresses.end()) {
// TypeId tid = TypeId::LookupByName ("ns3::UdpSocketFactory");
// Ptr<Socket> socketClient = m_peersSockets[*iter];
// double delay = getRandomDelay();
// Simulator::Schedule(Seconds(delay), SendPacket, socketClient, p);
// iter++;
// }
// round++;
// if (round == 50) {
// NS_LOG_INFO("node" << m_id << " 已经发送了 "<< round << "个区块 at time: " << Simulator::Now ().GetSeconds () << "s");
// // Simulator::Cancel (m_nextHeartbeat);
// add_change_value = 0;
// }
// }
} | zhayujie/blockchain-simulator | 56 | Some simulations of blockchain consensus based on ns3. Currently implemented consensus protocols include Raft, Paxos and PBFT. | C++ | zhayujie | Minimal Future Tech | |
src/applications/model/pbft-node.h | C/C++ Header | #ifndef PBFT_NODE_H
#define PBFT_NODE_H
#include <algorithm>
#include "ns3/application.h"
#include "ns3/event-id.h"
#include "ns3/ptr.h"
#include "ns3/traced-callback.h"
#include "ns3/address.h"
#include "ns3/boolean.h"
#include <map>
namespace ns3 {
class Address;
class Socket;
class Packet;
class PbftNode : public Application
{
public:
static TypeId GetTypeId (void);
void SetPeersAddresses (const std::vector<Ipv4Address> &peers); // 设置所有邻节点的地址
PbftNode (void);
virtual ~PbftNode (void);
uint32_t m_id; // node id
Ptr<Socket> m_socket; // 监听的socket
Ptr<Socket> m_socketClient; // 客户端socket
std::map<Ipv4Address, Ptr<Socket>> m_peersSockets; // 邻节点的socket列表
std::map<Address, std::string> m_bufferedData; // map holding the buffered data from previous handleRead events
Address m_local; // 本节点地址
std::vector<Ipv4Address> m_peersAddresses; // 邻节点列表
int N; // 总节点数
// int v; // 当前视图编号
// int n; // 当前消息在视图中的编号
std::vector<int> values; // 存储要更新的数值的容器
int value; // 交易要更新的值
int leader; // 当前leader节点的编号
int is_leader; // 自己是否是leader
// int round; // 共识轮数
int block_num; // 当前区块号
EventId blockEvent; // 广播区块的事件
struct TX {
int v;
int val;
int prepare_vote;
int commit_vote;
};
TX tx[1000];
// 继承 Application 类必须实现的虚函数
virtual void StartApplication (void);
virtual void StopApplication (void);
// 处理消息
void HandleRead (Ptr<Socket> socket);
// 将数据包中的消息解析为字符串
std::string getPacketContent(Ptr<Packet> packet, Address from);
// 向所有邻节点广播消息
void Send (uint8_t data[]);
// 向某个指定地址的节点发送消息
void Send(uint8_t data[], Address from);
// 向所有邻节点广播区块
void SendBlock(void);
void viewChange(void);
};
enum Message
{
REQUEST, // 0 客户端请求 <REQUEST, t> t:交易
PRE_PREPARE, // 1 预准备消息 <PRE_PREPARE, v, n, b> v:视图编号 b:区块内容 n:该预准备消息在视图中的编号
PREPARE, // 2 准备消息 <PREPARE, v, n, H(b)>
COMMIT, // 3 提交 <COMMIT, v, n>
PRE_PREPARE_RES, // 4 预准备消息的响应 <PRE_PREPARE_RES, v, n, S> S:State
PREPARE_RES, // 5 准备消息响应
COMMIT_RES, // 6 提交响应
REPLY, // 7 对客户端的回复
VIEW_CHANGE // 8 view_change消息
};
enum State
{
SUCCESS, // 0 成功
FAILED, // 1 失败
};
}
#endif
| zhayujie/blockchain-simulator | 56 | Some simulations of blockchain consensus based on ns3. Currently implemented consensus protocols include Raft, Paxos and PBFT. | C++ | zhayujie | Minimal Future Tech | |
src/applications/model/raft-node.cc | C++ | #include "ns3/address.h"
#include "ns3/address-utils.h"
#include "ns3/log.h"
#include "ns3/inet-socket-address.h"
#include "ns3/inet6-socket-address.h"
#include "ns3/node.h"
#include "ns3/socket.h"
#include "ns3/udp-socket.h"
#include "ns3/simulator.h"
#include "ns3/socket-factory.h"
#include "ns3/packet.h"
#include "ns3/trace-source-accessor.h"
#include "ns3/udp-socket-factory.h"
#include "ns3/tcp-socket-factory.h"
#include "ns3/uinteger.h"
#include "ns3/double.h"
#include "raft-node.h"
#include "stdlib.h"
#include "ns3/ipv4.h"
#include <ctime>
#include <map>
int tx_size = 200; // the size of tx, in KB
int tx_speed = 2000; // the rate of transaction generation, in op/s
namespace ns3 {
NS_LOG_COMPONENT_DEFINE ("RaftNode");
NS_OBJECT_ENSURE_REGISTERED (RaftNode);
TypeId
RaftNode::GetTypeId (void)
{
static TypeId tid = TypeId ("ns3::RaftNode")
.SetParent<Application> ()
.SetGroupName("Applications")
.AddConstructor<RaftNode> ()
;
return tid;
}
RaftNode::RaftNode(void) {
}
RaftNode::~RaftNode(void) {
NS_LOG_FUNCTION (this);
}
static char intToChar(int a) {
return a + '0';
}
static int charToInt(char a) {
return a - '0';
}
// 信息接收延迟 0 - 3 ms
float
getRandomDelay() {
return (rand() % 3) * 1.0 / 1000;
}
// 选举超时时间 100 - 300 ms
float
getElectionTimeout() {
return ((rand() % 150) + 150) * 1.0 / 1000;
}
void
RaftNode::StartApplication ()
{
// 初始化raft参数
m_value = 0; // 变量初始值,每个节点均为0
proposal = intToChar(m_id); // 提案要修改变量的值,等于节点id
heartbeat_timeout = 0.05; // 心跳发送周期,统一设置为50ms
vote_success = 0; // 获得的同意数
vote_failed = 0; // 获得的失败数
has_voted = 0; // 未开始投票
add_change_value = 0; // 是否在心跳中加入提案
is_leader = 0; // 是否是leader节点
round = 0;
blockNum = 0;
// 初始化socket
if (!m_socket)
{
TypeId tid = TypeId::LookupByName ("ns3::UdpSocketFactory");
m_socket = Socket::CreateSocket (GetNode (), tid);
// 注意 相当于监听所有网卡ip
InetSocketAddress local = InetSocketAddress (Ipv4Address::GetAny (), 7071);
m_socket->Bind (local); // 绑定本机的ip和port
m_socket->Listen ();
}
m_socket->SetRecvCallback (MakeCallback (&RaftNode::HandleRead, this));
m_socket->SetAllowBroadcast (true);
std::vector<Ipv4Address>::iterator iter = m_peersAddresses.begin();
// 与所有节点建立连接
NS_LOG_INFO("node"<< m_id << " start");
while(iter != m_peersAddresses.end()) {
// NS_LOG_INFO("node"<< m_id << *iter << "\n");
TypeId tid = TypeId::LookupByName ("ns3::UdpSocketFactory");
Ptr<Socket> socketClient = Socket::CreateSocket (GetNode (), tid);
socketClient->Connect (InetSocketAddress(*iter, 7071));
m_peersSockets[*iter] = socketClient;
iter++;
}
// 开始为follower,超出 election_timeout 后成为candidate节点, 向所有邻节点广播票
m_nextElection = Simulator::Schedule (Seconds(getElectionTimeout()), &RaftNode::sendVote, this);
}
void
RaftNode::StopApplication ()
{
// NS_LOG_INFO ("At time " << Simulator::Now ().GetSeconds () << " finish the raft consensus");
if (is_leader == 1) {
NS_LOG_INFO ("Blocks:" << blockNum << " Rounds:" << round);
NS_LOG_INFO ("At time " << Simulator::Now ().GetSeconds () << " Stop");
}
}
void
RaftNode::HandleRead (Ptr<Socket> socket)
{
Ptr<Packet> packet;
Address from;
Address localAddress;
while ((packet = socket->RecvFrom (from)))
{
socket->SendTo(packet, 0, from);
if (packet->GetSize () == 0)
{ //EOF
break;
}
if (InetSocketAddress::IsMatchingType (from))
{
std::string msg = getPacketContent(packet, from);
// NS_LOG_INFO ("At time " << Simulator::Now ().GetSeconds () << "s, Node " << GetNode ()->GetId () << " received " << packet->GetSize () << " bytes, msg[0]: "<< msg[0]);
// InetSocketAddress::ConvertFrom (from).GetIpv4 () << " port " <<
// InetSocketAddress::ConvertFrom (from).GetPort ());
// // 打印接收到的结果
// NS_LOG_INFO("Node " << GetNode ()->GetId () << " Total Received Data: " << msg);
uint8_t data[4];
switch (charToInt(msg[0]))
{
case VOTE_REQ:
{
// 处理投票请求,如果未投过票则返回成功
data[0] = intToChar(VOTE_RES);
if (has_voted == 0) {
data[1] = intToChar(SUCCESS);
has_voted = 1;
}
else
{
data[1] = intToChar(FAILED);
}
Send(data, from);
break;
}
case HEARTBEAT:
{
data[0] = intToChar(HEARTBEAT_RES);
int type = charToInt(msg[1]);
if (type == HEART_BEAT) { // 表示普通心跳
data[1] = intToChar(0); // 普通回复
// 重置选举超时时间
Simulator::Cancel (m_nextElection);
// m_nextElection = Simulator::Schedule (Seconds(getElectionTimeout()), &RaftNode::sendVote, this);
} else { // 表示是修改请求
int size = sizeof(msg);
data[1] = intToChar(1); // 提案回复
int value = charToInt(msg[2]);
m_value = value;
// 放弃继续选举, 停止模拟
// NS_LOG_INFO("Node " << m_id << " change the value: " << value << " at time " <<Simulator::Now ().GetSeconds () << "s");
// NS_LOG_INFO("收到消息: " << msg);
// NS_LOG_INFO("Node " << m_id << " get " << size << " bytes" << " , value: " << value << " at time " <<Simulator::Now ().GetSeconds () << "s");
Simulator::Cancel (m_nextElection);
}
data[2] = intToChar(SUCCESS);
Send(data, from);
break;
}
case VOTE_RES:
{
if (!is_leader) {
int state = charToInt(msg[1]);
if (state == SUCCESS) {
vote_success += 1;
}
else {
vote_failed += 1;
}
// 如果得到超过半数的投票,则成为leader
// if (vote_success + vote_failed == N-1) {
if (vote_success + 1 > N / 2) {
vote_success = 0;
vote_failed = 0;
NS_LOG_INFO("Node " << m_id << " become leader in " << N << " nodes at time " << Simulator::Now ().GetSeconds () << "s");
// 关闭自己的超时时间
Simulator::Cancel (m_nextElection);
// 在1s后开始在心跳中加入提案
Simulator::Schedule (Seconds(1), &RaftNode::setProposal, this);
sendHeartBeat();
is_leader = 1;
}
else if (vote_failed >= N / 2)
{
vote_success = 0;
vote_failed = 0;
// 开启投票
has_voted = 0;
// 一半以上节点反对,重新选举
}
//}
}
break;
}
case HEARTBEAT_RES:
{
int type = charToInt(msg[1]);
if (type == PROPOSAL) {
if (charToInt(msg[2]) == SUCCESS) {
vote_success += 1;
} else {
vote_failed += 1;
}
if (vote_success + vote_failed == N-1) {
if (vote_success + 1 > N / 2) {
vote_success = 0;
vote_failed = 0;
NS_LOG_INFO ("At time " << Simulator::Now ().GetSeconds () << " leader finished processing a block, number: " << blockNum);
blockNum += 1;
if (blockNum >= 50) {
NS_LOG_INFO("node" << m_id << " already processed "<< blockNum << " blocks at time: " << Simulator::Now ().GetSeconds () << "s");
Simulator::Cancel (m_nextHeartbeat);
}
// 停止心跳发送
// Simulator::Cancel (m_nextHeartbeat);
}
else
{
vote_success = 0;
vote_failed = 0;
}
}
} else {
}
break;
}
default:
{
NS_LOG_INFO("wrong msg");
break;
}
}
}
socket->GetSockName (localAddress);
}
}
std::string
RaftNode::getPacketContent(Ptr<Packet> packet, Address from)
{
// NS_LOG_INFO("包大小" << packet->GetSize ());
char *packetInfo = new char[packet->GetSize () + 1];
std::ostringstream totalStream;
packet->CopyData (reinterpret_cast<uint8_t*>(packetInfo), packet->GetSize ());
packetInfo[packet->GetSize ()] = '\0'; // ensure that it is null terminated to avoid bugs
/**
* Add the buffered data to complete the packet
*/
totalStream << m_bufferedData[from] << packetInfo;
std::string totalReceivedData(totalStream.str());
return totalReceivedData;
}
void
SendPacket(Ptr<Socket> socketClient,Ptr<Packet> p) {
socketClient->Send(p);
}
// 向接收到消息的节点方 返回消息
void
RaftNode::Send(uint8_t data[], Address from)
{
Ptr<Packet> p;
p = Create<Packet> (data, 3);
//NS_LOG_INFO("packet: " << p);
TypeId tid = TypeId::LookupByName ("ns3::UdpSocketFactory");
Ptr<Socket> socketClient;
if (!m_peersSockets[InetSocketAddress::ConvertFrom(from).GetIpv4 ()]) {
socketClient = Socket::CreateSocket (GetNode (), tid);
socketClient->Connect (InetSocketAddress(InetSocketAddress::ConvertFrom(from).GetIpv4 (), 7071));
m_peersSockets[InetSocketAddress::ConvertFrom(from).GetIpv4 ()] = socketClient;
}
socketClient = m_peersSockets[InetSocketAddress::ConvertFrom(from).GetIpv4 ()];
Simulator::Schedule(Seconds(getRandomDelay()), SendPacket, socketClient, p);
}
// 生成交易 每个交易1KB
static uint8_t * generateTX (int num)
{
int size = num * tx_size;
uint8_t *data = (uint8_t *)std::malloc (size);
int i;
for (i = 0; i < size; i++) {
data[i] = '1';
}
data[i] = '\0';
// NS_LOG_INFO("初始化成功: " << data);
data[0] = intToChar(HEARTBEAT);
data[1] = intToChar(PROPOSAL);
return data;
}
// 向所有邻居节点广播交易
void
RaftNode::SendTX (uint8_t data[], int num)
{
NS_LOG_INFO("broadcast block round: " << round << ", time: " << Simulator::Now ().GetSeconds () << " s");
Ptr<Packet> p;
int size = tx_size * num;
p = Create<Packet> (data, size);
TypeId tid = TypeId::LookupByName ("ns3::UdpSocketFactory");
std::vector<Ipv4Address>::iterator iter = m_peersAddresses.begin();
while(iter != m_peersAddresses.end()) {
TypeId tid = TypeId::LookupByName ("ns3::UdpSocketFactory");
Ptr<Socket> socketClient = m_peersSockets[*iter];
double delay = getRandomDelay();
Simulator::Schedule(Seconds(delay), SendPacket, socketClient, p);
iter++;
}
round++;
if (round == 50) {
NS_LOG_INFO("node" << m_id << " has sent "<< round << " blocks at time: " << Simulator::Now ().GetSeconds () << "s");
// Simulator::Cancel (m_nextHeartbeat);
add_change_value = 0;
}
}
// 向所有邻居节点广播消息
void
RaftNode::Send (uint8_t data[])
{
// NS_LOG_INFO("广播消息");
Ptr<Packet> p;
p = Create<Packet> (data, 3);
TypeId tid = TypeId::LookupByName ("ns3::UdpSocketFactory");
std::vector<Ipv4Address>::iterator iter = m_peersAddresses.begin();
while(iter != m_peersAddresses.end()) {
TypeId tid = TypeId::LookupByName ("ns3::UdpSocketFactory");
Ptr<Socket> socketClient = m_peersSockets[*iter];
double delay = getRandomDelay();
Simulator::Schedule(Seconds(delay), SendPacket, socketClient, p);
iter++;
}
}
// candidate节点请求投票
void
RaftNode::sendVote(void) {
has_voted = 1; // 投自己,不再投给别人
uint8_t data[3];
data[0] = intToChar(VOTE_REQ);
data[1] = intToChar(m_id);
// NS_LOG_INFO("node" << m_id << " start election: "<< data << " at time: " << Simulator::Now ().GetSeconds () << "s" );
Send(data);
m_nextElection = Simulator::Schedule (Seconds(getElectionTimeout()), &RaftNode::sendVote, this);
}
// leader节点广播心跳
void
RaftNode::sendHeartBeat(void) {
has_voted = 1; // 投自己,不再投给别人
// uint8_t data[4];
if (add_change_value == 1) { // 加入提案
int num = tx_speed / (1000 / (heartbeat_timeout * 1000)); // 本次心跳中的交易数量
// NS_LOG_INFO("交易数量: " << num);
uint8_t * data = generateTX(num);
// m_value = m_id;
// NS_LOG_INFO("node" << m_id << " start send proposal: "<< data << " at time: " << Simulator::Now ().GetSeconds () << "s" );
// 取消心跳
// Simulator::Cancel(m_nextHeartbeat);
m_nextHeartbeat = Simulator::Schedule (Seconds(heartbeat_timeout), &RaftNode::sendHeartBeat, this);
SendTX(data, num);
}
else // 普通心跳
{
uint8_t data[4];
data[0] = intToChar(HEARTBEAT);
data[1] = intToChar(0);
// 递归设置下一次的心跳
m_nextHeartbeat = Simulator::Schedule (Seconds(heartbeat_timeout), &RaftNode::sendHeartBeat, this);
Send(data);
}
// NS_LOG_INFO("node" << m_id << " send heartbeat: "<< data);
}
// 设置发送提案
void
RaftNode::setProposal(void) {
add_change_value = 1;
}
} | zhayujie/blockchain-simulator | 56 | Some simulations of blockchain consensus based on ns3. Currently implemented consensus protocols include Raft, Paxos and PBFT. | C++ | zhayujie | Minimal Future Tech | |
src/applications/model/raft-node.h | C/C++ Header | #ifndef RAFT_NODE_H
#define RAFT_NODE_H
#include <algorithm>
#include "ns3/application.h"
#include "ns3/event-id.h"
#include "ns3/ptr.h"
#include "ns3/traced-callback.h"
#include "ns3/address.h"
#include "ns3/boolean.h"
#include <map>
namespace ns3 {
class Address;
class Socket;
class Packet;
class RaftNode : public Application
{
public:
static TypeId GetTypeId (void);
void SetPeersAddresses (const std::vector<Ipv4Address> &peers); // 设置所有邻节点的地址
RaftNode (void);
virtual ~RaftNode (void);
uint32_t m_id; // node id
Ptr<Socket> m_socket; // 监听的socket
Ptr<Socket> m_socketClient; // 客户端socket
std::map<Ipv4Address, Ptr<Socket>> m_peersSockets; // 邻节点的socket列表
std::map<Address, std::string> m_bufferedData; // map holding the buffered data from previous handleRead events
Address m_local; // 本节点地址
std::vector<Ipv4Address> m_peersAddresses; // 邻节点列表
int N; // 总节点数
int is_leader; // 自己是否是leader
int has_voted; // 是否已经投票
int m_value; // 通过共识修改的变量
char proposal; // 要发送的命令
int vote_success; // 投票响应成功数
int vote_failed; // 投票响应失败数
float heartbeat_timeout; // 心跳超时时间
int add_change_value; // 是否在心跳中加入提案
EventId m_nextElection; // 下一次成为candidate广播投票的事件
EventId m_nextHeartbeat; // 下一次发送心跳的事件
// int tx_speed; // 交易生成的速率,单位为op/s
int blockNum; // 区块个数
int round; // 共识轮数
uint8_t * tx; // 交易
// int tx_size; // 一个交易的大小
// 继承 Application 类必须实现的虚函数
virtual void StartApplication (void);
virtual void StopApplication (void);
// 处理消息
void HandleRead (Ptr<Socket> socket);
void Send (uint8_t data[]);
std::string getPacketContent(Ptr<Packet> packet, Address from);
// 发送消息
//void SendMessage(enum Message responseMessage, std::string msg, Ptr<Socket> outgoingSocket);
void sendVote(void);
void sendHeartBeat(void);
void Send(uint8_t data[], Address from);
void SendTX(uint8_t data[], int num);
// 设置发送提案
void setProposal(void);
};
enum Message
{
CLIENT_REQ, // 0 客户端请求
CLIENT_RES, // 1 给客户端的响应
VOTE_REQ, // 2 请求投票
VOTE_RES, // 3 对请求投票的响应
HEARTBEAT, // 4 心跳
HEARTBEAT_RES, // 5 心跳回复
};
enum HeartBeatType
{
HEART_BEAT, // 0 普通心跳
PROPOSAL, // 1 包含提案的心跳
};
enum State
{
SUCCESS, // 0 成功
FAILED, // 1 失败
};
}
#endif | zhayujie/blockchain-simulator | 56 | Some simulations of blockchain consensus based on ns3. Currently implemented consensus protocols include Raft, Paxos and PBFT. | C++ | zhayujie | Minimal Future Tech | |
app.py | Python | # encoding:utf-8
import argparse
import config
from channel import channel_factory
from common import log, const
from multiprocessing import Pool
from plugins.plugin_manager import PluginManager
# Start channel
def start_process(channel_type, config_path):
try:
# For multi-process startup, child processes cannot directly access parent process memory space, recreate config class
config.load_config(config_path)
model_type = config.conf().get("model").get("type")
log.info("[MultiChannel] Start up {} on {}", model_type, channel_type)
channel = channel_factory.create_channel(channel_type)
channel.startup()
except Exception as e:
log.error("[MultiChannel] Start up failed on {}: {}", channel_type, str(e))
raise e
def main():
try:
# load config
config.load_config(args.config)
model_type = config.conf().get("model").get("type")
channel_type = config.conf().get("channel").get("type")
PluginManager()
# 1. For single string config format, start directly
if not isinstance(channel_type, list):
start_process(channel_type, args.config)
exit(0)
# 2. For single channel list config, start directly
if len(channel_type) == 1:
start_process(channel_type[0], args.config)
exit(0)
# 3. For multi-channel config, start with process pool
# Use main process to start terminal channel
if const.TERMINAL in channel_type:
index = channel_type.index(const.TERMINAL)
terminal = channel_type.pop(index)
else:
terminal = None
# Use process pool to start other channel subprocesses
pool = Pool(len(channel_type))
for type_item in channel_type:
log.info("[INIT] Start up: {} on {}", model_type, type_item)
pool.apply_async(start_process, args=[type_item, args.config])
if terminal:
start_process(terminal, args.config)
# Wait for all processes in the pool to complete
pool.close()
pool.join()
except Exception as e:
log.error("App startup failed!")
log.exception(e)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--config", help="config.json path(e.g: ./config.json or /usr/local/bot-on-anything/config.json)",type=str,default="./config.json")
args = parser.parse_args()
main()
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
bridge/bridge.py | Python | from model import model_factory
import config
from plugins.event import Event, EventContext
from plugins.plugin_manager import PluginManager
class Bridge(object):
def __init__(self):
pass
def fetch_reply_content(self, query, context):
econtext = PluginManager().emit_event(EventContext(
Event.ON_BRIDGE_HANDLE_CONTEXT, {'context': query, 'args': context}))
type = econtext['args'].get('model') or config.conf().get("model").get("type")
query = econtext.econtext.get("context", None)
reply = econtext.econtext.get("reply", "无回复")
if not econtext.is_pass() and query:
return model_factory.create_bot(type).reply(query, context)
else:
return reply
async def fetch_reply_stream(self, query, context):
econtext = PluginManager().emit_event(EventContext(
Event.ON_BRIDGE_HANDLE_CONTEXT, {'context': query, 'args': context}))
type = econtext['args'].get('model') or config.conf().get("model").get("type")
query = econtext.econtext.get("context", None)
reply = econtext.econtext.get("reply", "无回复")
bot = model_factory.create_bot(type)
if not econtext.is_pass() and query:
async for final, response in bot.reply_text_stream(query, context):
yield final, response
else:
yield True, reply
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
channel/channel.py | Python | """
Message sending channel abstract class
"""
from bridge.bridge import Bridge
class Channel(object):
def startup(self):
"""
init channel
"""
raise NotImplementedError
def handle(self, msg):
"""
process received msg
:param msg: message object
"""
raise NotImplementedError
def send(self, msg, receiver):
"""
send message to user
:param msg: message content
:param receiver: receiver channel account
:return:
"""
raise NotImplementedError
def build_reply_content(self, query, context=None):
return Bridge().fetch_reply_content(query, context)
async def build_reply_stream(self, query, context=None):
async for final,response in Bridge().fetch_reply_stream(query, context):
yield final,response
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
channel/channel_factory.py | Python | """
channel factory
"""
from common import const
def create_channel(channel_type):
"""
create a channel instance
:param channel_type: channel type code
:return: channel instance
"""
if channel_type== const.TERMINAL:
from channel.terminal.terminal_channel import TerminalChannel
return TerminalChannel()
if channel_type == const.WECHAT:
from channel.wechat.wechat_channel import WechatChannel
return WechatChannel()
elif channel_type == const.WECHAT_MP:
from channel.wechat.wechat_mp_channel import WechatSubsribeAccount
return WechatSubsribeAccount()
elif channel_type == const.WECHAT_MP_SERVICE:
from channel.wechat.wechat_mp_service_channel import WechatServiceAccount
return WechatServiceAccount()
elif channel_type == const.WECHAT_COM:
from channel.wechat.wechat_com_channel import WechatEnterpriseChannel
return WechatEnterpriseChannel()
elif channel_type == const.QQ:
from channel.qq.qq_channel import QQChannel
return QQChannel()
elif channel_type == const.GMAIL:
from channel.gmail.gmail_channel import GmailChannel
return GmailChannel()
elif channel_type == const.TELEGRAM:
from channel.telegram.telegram_channel import TelegramChannel
return TelegramChannel()
elif channel_type == const.SLACK:
from channel.slack.slack_channel import SlackChannel
return SlackChannel()
elif channel_type == const.HTTP:
from channel.http.http_channel import HttpChannel
return HttpChannel()
elif channel_type == const.DINGTALK:
from channel.dingtalk.dingtalk_channel import DingTalkChannel
return DingTalkChannel()
elif channel_type == const.FEISHU:
from channel.feishu.feishu_channel import FeiShuChannel
return FeiShuChannel()
elif channel_type == const.DISCORD:
from channel.discord.discord_channel import DiscordChannel
return DiscordChannel()
else:
raise RuntimeError("unknown channel_type in config.json: " + channel_type)
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
channel/dingtalk/dingtalk_channel.py | Python | # encoding:utf-8
import json
import hmac
import hashlib
import base64
import time
import requests
from urllib.parse import quote_plus
from common import log
from flask import Flask, request, render_template, make_response
from common import const
from common import functions
from config import channel_conf
from config import channel_conf_val
from channel.channel import Channel
class DingTalkHandler():
def __init__(self, config):
self.dingtalk_key = config.get('dingtalk_key')
self.dingtalk_secret = config.get('dingtalk_secret')
self.dingtalk_token = config.get('dingtalk_token')
self.dingtalk_post_token = config.get('dingtalk_post_token')
self.access_token = None
log.info("[DingTalk] AppKey={}, AppSecret={} Token={} post Token={}".format(self.dingtalk_key, self.dingtalk_secret, self.dingtalk_token, self.dingtalk_post_token))
def notify_dingtalk_webhook(self, data):
timestamp = round(time.time() * 1000)
secret_enc = bytes(self.dingtalk_secret, encoding='utf-8')
string_to_sign = '{}\n{}'.format(timestamp, self.dingtalk_secret)
string_to_sign_enc = bytes(string_to_sign, encoding='utf-8')
hmac_code = hmac.new(secret_enc, string_to_sign_enc,
digestmod=hashlib.sha256).digest()
sign = quote_plus(base64.b64encode(hmac_code))
notify_url = f"https://oapi.dingtalk.com/robot/send?access_token={self.dingtalk_token}×tamp={timestamp}&sign={sign}"
try:
log.info("[DingTalk] url={}".format(str(notify_url)))
r = requests.post(notify_url, json=data)
reply = r.json()
log.info("[DingTalk] reply={}".format(str(reply)))
except Exception as e:
log.error(e)
def get_token_internal(self):
access_token_url = 'https://api.dingtalk.com/v1.0/oauth2/accessToken'
try:
r = requests.post(access_token_url, json={"appKey": self.dingtalk_key, "appSecret": self.dingtalk_secret})
except:
raise Exception("DingTalk token获取失败!!!")
data = json.loads(r.content)
access_token = data['accessToken']
expire_in = data['expireIn']
self.access_token = access_token
self.expire_at = int(expire_in) + time.time()
return self.access_token
def get_token(self):
if self.access_token is None or self.expire_at <= time.time():
self.get_token_internal()
return self.access_token
def get_post_url(self, data):
type = data['conversationType']
if type == "1":
return f"https://api.dingtalk.com/v1.0/robot/oToMessages/batchSend"
else:
return f"https://api.dingtalk.com/v1.0/robot/groupMessages/send"
def build_response(self, reply, data):
type = data['conversationType']
if type == "1":
return self.build_oto_response(reply, data)
else:
return self.build_group_response(reply, data)
def build_oto_response(self, reply, data):
conversation_id = data['conversationId']
prompt = data['text']['content']
prompt = prompt.strip()
img_match_prefix = functions.check_prefix(
prompt, channel_conf_val(const.DINGTALK, 'image_create_prefix'))
nick = data['senderNick']
staffid = data['senderStaffId']
robotCode = data['robotCode']
if img_match_prefix and isinstance(reply, list):
images = ""
for url in reply:
images += f"\n"
reply = images
resp = {
"msgKey": "sampleMarkdown",
"msgParam": json.dumps({
"title": "IMAGE @" + nick + " ",
"text": images + " \n " + "@" + nick
}),
"robotCode": robotCode,
"userIds": [staffid]
}
else:
resp = {
"msgKey": "sampleText",
"msgParam": json.dumps({
"content": reply
}),
"robotCode": robotCode,
"userIds": [staffid]
}
return resp
def build_group_response(self, reply, data):
conversation_id = data['conversationId']
prompt = data['text']['content']
prompt = prompt.strip()
img_match_prefix = functions.check_prefix(
prompt, channel_conf_val(const.DINGTALK, 'image_create_prefix'))
nick = data['senderNick']
staffid = data['senderStaffId']
robot_code = data['robotCode']
if img_match_prefix and isinstance(reply, list):
images = ""
for url in reply:
images += f"\n"
reply = images
resp = {
"msgKey": "sampleMarkdown",
"msgParam": json.dumps({
"title": "IMAGE @" + nick + " ",
"text": images + " \n " + "@" + nick
}),
"robotCode": robot_code,
"openConversationId": conversation_id,
"at": {
"atUserIds": [
staffid
],
"isAtAll": False
}
}
else:
resp = {
"msgKey": "sampleText",
"msgParam": json.dumps({
"content": reply + " \n " + "@" + nick
}),
"robotCode": robot_code,
"openConversationId": conversation_id,
"at": {
"atUserIds": [
staffid
],
"isAtAll": False
}
}
return resp
def build_webhook_response(self, reply, data):
conversation_id = data['conversationId']
prompt = data['text']['content']
prompt = prompt.strip()
img_match_prefix = functions.check_prefix(
prompt, channel_conf_val(const.DINGTALK, 'image_create_prefix'))
nick = data['senderNick']
staffid = data['senderStaffId']
robotCode = data['robotCode']
if img_match_prefix and isinstance(reply, list):
images = ""
for url in reply:
images += f"\n"
reply = images
resp = {
"msgtype": "markdown",
"markdown": {
"title": "IMAGE @" + nick + " ",
"text": images + " \n " + "@" + nick
},
"at": {
"atUserIds": [
staffid
],
"isAtAll": False
}
}
else:
resp = {
"msgtype": "text",
"text": {
"content": reply
},
"at": {
"atUserIds": [
staffid
],
"isAtAll": False
}
}
return resp
def chat(self, channel, data):
reply = channel.handle(data)
self.notify_dingtalk(data, reply)
def notify_dingtalk(self, data, reply):
headers = {
'content-type': 'application/json',
'Accept': '*/*',
}
staff_id = data.get("senderStaffId", None)
values = {
'msgtype': 'text',
'text': {
'content': reply,
}
}
if staff_id:
values['at'] = {
"atUserIds": [
staff_id
],
"isAtAll": False
}
notify_url = data.get("sessionWebhook")
try:
r = requests.post(notify_url, json=values, headers=headers)
resp = r.json()
log.info("[DingTalk] response={}".format(str(resp)))
except Exception as e:
log.error(e)
class DingTalkChannel(Channel):
def __init__(self):
self.data = None
log.info("[DingTalk] started.")
def startup(self):
http_app.run(host='0.0.0.0', port=channel_conf(const.DINGTALK).get('port'))
def handle(self, data):
reply = "您好,有什么我可以帮助您解答的问题吗?"
prompt = data['text']['content']
prompt = prompt.strip()
self.data = data
if str(prompt) != 0:
conversation_id = data['conversationId']
sender_id = data['senderId']
context = dict()
context['channel'] = self
img_match_prefix = functions.check_prefix(
prompt, channel_conf_val(const.DINGTALK, 'image_create_prefix'))
if img_match_prefix:
prompt = prompt.split(img_match_prefix, 1)[1].strip()
context['type'] = 'IMAGE_CREATE'
id = sender_id
context['from_user_id'] = str(id)
reply = super().build_reply_content(prompt, context)
return reply
def send(self, msg, receiver):
# 暂时无法区分是什么类型的格式,先按图片处理,后期等待有志兄弟来重构
self.reply_markdown("图片信息", f"")
def reply_markdown(self, title: str,text: str):
request_headers = {
'Content-Type': 'application/json',
'Accept': '*/*',
}
values = {
'msgtype': 'markdown',
'markdown': {
'title': title,
'text': text,
}
}
staff_id = self.data.get("senderStaffId", None)
if staff_id:
values['at'] = {
"atUserIds": [
staff_id
],
"isAtAll": False
}
try:
notify_url = self.data.get("sessionWebhook")
response = requests.post(notify_url,
headers=request_headers,
data=json.dumps(values))
response.raise_for_status()
except Exception as e:
log.error(
f'reply markdown failed, error={e}, response.text={response.text if "response" in locals() else ""}')
return None
return response.json()
dd = DingTalkChannel()
handlers = dict()
robots = channel_conf(const.DINGTALK).get('dingtalk_robots')
if robots and len(robots) > 0:
for robot in robots:
robot_config = channel_conf(const.DINGTALK).get(robot)
robot_key = robot_config.get('dingtalk_key')
group_name = robot_config.get('dingtalk_group')
handlers[group_name or robot_key] = DingTalkHandler(robot_config)
else:
handlers['DEFAULT'] = DingTalkHandler(channel_conf(const.DINGTALK))
http_app = Flask(__name__,)
@http_app.route("/", methods=['POST'])
def chat():
log.info("[DingTalk] chat_headers={}".format(str(request.headers)))
log.info("[DingTalk] chat={}".format(str(request.data)))
token = request.headers.get('token')
data = json.loads(request.data)
if data:
content = data['text']['content']
if not content:
return
code = data['robotCode']
group_name = None
if 'conversationTitle' in data:
group_name = data['conversationTitle']
handler = handlers.get(group_name, handlers.get(code, handlers.get('DEFAULT')))
handler.chat(dd, data)
return {'ret': 200}
return {'ret': 201}
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
channel/discord/discord_channel.py | Python | # encoding:utf-8
"""
discord channel
Python discord - https://github.com/Rapptz/discord.py.git
"""
from channel.channel import Channel
from common.log import logger
from config import conf, common_conf_val, channel_conf
import ssl
import discord
from discord.ext import commands
class DiscordChannel(Channel):
def __init__(self):
config = conf()
self.token = channel_conf('discord').get('app_token')
self.discord_channel_name = channel_conf('discord').get('channel_name')
self.discord_channel_session = channel_conf('discord').get('channel_session', 'author')
self.voice_enabled = channel_conf('discord').get('voice_enabled', False)
self.cmd_clear_session = common_conf_val('clear_memory_commands', ['#清除记忆'])[0]
self.sessions = []
self.intents = discord.Intents.default()
self.intents.message_content = True
self.intents.guilds = True
self.intents.members = True
self.intents.messages = True
self.intents.voice_states = True
context = ssl.create_default_context()
context.load_verify_locations(common_conf_val('certificate_file'))
self.bot = commands.Bot(command_prefix='!', intents=self.intents, ssl=context)
self.bot.add_listener(self.on_ready)
logger.debug('cmd_clear_session %s', self.cmd_clear_session)
def startup(self):
self.bot.add_listener(self.on_message)
self.bot.add_listener(self.on_guild_channel_delete)
self.bot.add_listener(self.on_guild_channel_create)
self.bot.add_listener(self.on_private_channel_delete)
self.bot.add_listener(self.on_private_channel_create)
self.bot.add_listener(self.on_channel_delete)
self.bot.add_listener(self.on_channel_create)
self.bot.add_listener(self.on_thread_delete)
self.bot.add_listener(self.on_thread_create)
self.bot.run(self.token)
async def on_ready(self):
logger.info('Bot is online user:{}'.format(self.bot.user))
if self.voice_enabled == False:
logger.debug('disable music')
await self.bot.remove_cog("Music")
async def join(self, ctx):
logger.debug('join %s', repr(ctx))
channel = ctx.author.voice.channel
await channel.connect()
async def _do_on_channel_delete(self, channel):
if not self.discord_channel_name or channel.name != self.discord_channel_name:
logger.debug('skip _do_on_channel_delete %s', channel.name)
return
for name in self.sessions:
try:
response = self.send_text(name, self.cmd_clear_session)
logger.debug('_do_on_channel_delete %s %s', channel.name, response)
except Exception as e:
logger.warn('clear session except, id:%s', name)
self.sessions.clear()
async def on_guild_channel_delete(self, channel):
logger.debug('on_guild_channel_delete %s', repr(channel))
await self._do_on_channel_delete(channel)
async def on_guild_channel_create(self, channel):
logger.debug('on_guild_channel_create %s', repr(channel))
async def on_private_channel_delete(self, channel):
logger.debug('on_channel_delete %s', repr(channel))
await self._do_on_channel_delete(channel)
async def on_private_channel_create(self, channel):
logger.debug('on_channel_create %s', repr(channel))
async def on_channel_delete(self, channel):
logger.debug('on_channel_delete %s', repr(channel))
async def on_channel_create(self, channel):
logger.debug('on_channel_create %s', repr(channel))
async def on_thread_delete(self, thread):
print('on_thread_delete', thread)
if self.discord_channel_session != 'thread' or thread.parent.name != self.discord_channel_name:
logger.debug('skip on_thread_delete %s', thread.id)
return
try:
response = self.send_text(thread.id, self.cmd_clear_session)
if thread.id in self.sessions:
self.sessions.remove(thread.id)
logger.debug('on_thread_delete %s %s', thread.id, response)
except Exception as e:
logger.warn('on_thread_delete except %s', thread.id)
raise e
async def on_thread_create(self, thread):
logger.debug('on_thread_create %s', thread.id)
if self.discord_channel_session != 'thread' or thread.parent.name != self.discord_channel_name:
logger.debug('skip on_channel_create %s', repr(thread))
return
self.sessions.append(thread.id)
async def on_message(self, message):
"""
listen for message event
"""
await self.bot.wait_until_ready()
if not self.check_message(message):
return
prompt = message.content.strip();
logger.debug('author: %s', message.author)
logger.debug('prompt: %s', prompt)
session_id = message.author
if self.discord_channel_session == 'thread' and isinstance(message.channel, discord.Thread):
logger.debug('on_message thread id %s', message.channel.id)
session_id = message.channel.id
await message.channel.send('...')
response = response = self.send_text(session_id, prompt)
await message.channel.send(response)
def check_message(self, message):
if message.author == self.bot.user:
return False
prompt = message.content.strip();
if not prompt:
logger.debug('no prompt author: %s', message.author)
return False
if self.discord_channel_name:
if isinstance(message.channel, discord.Thread) and message.channel.parent.name == self.discord_channel_name:
return True
if not isinstance(message.channel, discord.Thread) and self.discord_channel_session != 'thread' and message.channel.name == self.discord_channel_name:
return True
logger.debug("The accessed channel does not meet the discord channel configuration conditions.")
return False
else:
return True
def send_text(self, id, content):
context = dict()
context['type'] = 'TEXT'
context['from_user_id'] = id
context['content'] = content
return super().build_reply_content(content, context) | zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
channel/feishu/feishu_channel.py | Python | # encoding:utf-8
import json
import hmac
import hashlib
import base64
import time
import requests
from urllib.parse import quote_plus
from common import log
from flask import Flask, request, render_template, make_response
from common import const
from common import functions
from config import channel_conf
from config import channel_conf_val
from channel.channel import Channel
from urllib import request as url_request
from channel.feishu.store import MemoryStore
class FeiShuChannel(Channel):
def __init__(self):
self.app_id = channel_conf(
const.FEISHU).get('app_id')
self.app_secret = channel_conf(
const.FEISHU).get('app_secret')
self.verification_token = channel_conf(
const.FEISHU).get('verification_token')
log.info("[FeiShu] app_id={}, app_secret={} verification_token={}".format(
self.app_id, self.app_secret, self.verification_token))
self.memory_store = MemoryStore()
def startup(self):
http_app.run(host='0.0.0.0', port=channel_conf(
const.FEISHU).get('port'))
def get_tenant_access_token(self):
url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
headers = {
"Content-Type": "application/json"
}
req_body = {
"app_id": self.app_id,
"app_secret": self.app_secret
}
data = bytes(json.dumps(req_body), encoding='utf8')
req = url_request.Request(url=url, data=data,
headers=headers, method='POST')
try:
response = url_request.urlopen(req)
except Exception as e:
print(e.read().decode())
return ""
rsp_body = response.read().decode('utf-8')
rsp_dict = json.loads(rsp_body)
code = rsp_dict.get("code", -1)
if code != 0:
print("get tenant_access_token error, code =", code)
return ""
return rsp_dict.get("tenant_access_token", "")
def notify_feishu(self, token, receive_type, receive_id, at_id, answer):
log.info("notify_feishu.receive_type = {} receive_id={}",
receive_type, receive_id)
url = "https://open.feishu.cn/open-apis/im/v1/messages"
params = {"receive_id_type": receive_type}
# text = at_id and "<at user_id=\"%s\">%s</at>" % (
# at_id, answer.lstrip()) or answer.lstrip()
text = answer.lstrip()
log.info("notify_feishu.text = {}", text)
msgContent = {
"text": text,
}
req = {
"receive_id": receive_id, # chat id
"msg_type": "text",
"content": json.dumps(msgContent),
}
payload = json.dumps(req)
headers = {
# your access token
"Authorization": "Bearer " + token,
"Content-Type": "application/json",
}
response = requests.request(
"POST", url, params=params, headers=headers, data=payload
)
log.info("notify_feishu.response.content = {}", response.content)
def handle(self, message):
event = message["event"]
msg = event["message"]
messageId = msg["message_id"]
chat_type = msg["chat_type"]
sender_id = event["sender"]["sender_id"]["open_id"]
prompt = json.loads(msg["content"])["text"]
prompt = prompt.replace("@_user_1", "")
#重复
r, v = self.memory_store.get(messageId)
if v:
return {'ret': 200}
self.memory_store.set(messageId, True)
# 非文本不处理
message_type = msg["message_type"]
if message_type != "text":
return {'ret': 200}
if chat_type == "group":
mentions = msg["mentions"]
# 日常群沟通要@才生效
if not mentions:
return {'ret': 200}
receive_type = "chat_id"
receive_id = msg.get("chat_id")
at_id = sender_id
elif chat_type == "p2p":
receive_type = "open_id"
receive_id = sender_id
at_id = None
# 调用发消息 API 之前,先要获取 API 调用凭证:tenant_access_token
access_token = self.get_tenant_access_token()
if access_token == "":
log.error("send message access_token is empty")
return {'ret': 204}
context = dict()
img_match_prefix = functions.check_prefix(
prompt, channel_conf_val(const.DINGTALK, 'image_create_prefix'))
if img_match_prefix:
prompt = prompt.split(img_match_prefix, 1)[1].strip()
context['type'] = 'IMAGE_CREATE'
context['from_user_id'] = str(sender_id)
reply = super().build_reply_content(prompt, context)
if img_match_prefix:
if not isinstance(reply, list):
return {'ret': 204}
images = ""
for url in reply:
images += f"[]({url})\n"
reply = images
# 机器人 echo 收到的消息
self.notify_feishu(access_token, receive_type,
receive_id, at_id, reply)
return {'ret': 200}
def handle_request_url_verify(self, post_obj):
# 原样返回 challenge 字段内容
challenge = post_obj.get("challenge", "")
return {'challenge': challenge}
feishu = FeiShuChannel()
http_app = Flask(__name__,)
@http_app.route("/", methods=['POST'])
def chat():
# log.info("[FeiShu] chat_headers={}".format(str(request.headers)))
log.info("[FeiShu] chat={}".format(str(request.data)))
obj = json.loads(request.data)
if not obj:
return {'ret': 201}
# 校验 verification token 是否匹配,token 不匹配说明该回调并非来自开发平台
headers = obj.get("header")
if not headers:
return {'ret': 201}
token = headers.get("token", "")
if token != feishu.verification_token:
log.error("verification token not match, token = {}", token)
return {'ret': 201}
# 根据 type 处理不同类型事件
t = obj.get("type", "")
if "url_verification" == t: # 验证请求 URL 是否有效
return feishu.handle_request_url_verify(obj)
elif headers.get("event_type", None) == "im.message.receive_v1": # 事件回调
return feishu.handle(obj)
return {'ret': 202}
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
channel/feishu/store.py | Python | # -*- coding: UTF-8 -*-
import time
from threading import Lock
class Store(object):
"""
This is an interface to storage (Key, Value) pairs for sdk.
"""
def get(self, key): # type: (str) -> Tuple[bool, str]
return False, ''
def set(self, key, value, expire): # type: (str, str, int) -> None
"""
storage key, value into the store, value has an expire time.(unit: second)
"""
pass
class ExpireValue(object):
def __init__(self, value, expireTime): # type: (str, int) -> None
self.value = value
self.expireTime = expireTime
class MemoryStore(Store):
"""
This is an implement of `StoreInterface` which stores data in the memory
"""
def __init__(self): # type: () -> None
self.data = {} # type: Dict[str, ExpireValue]
self.mutex = Lock() # type: Lock
def get(self, key): # type: (str) -> Tuple[bool, str]
# print('get %s' % key)
self.mutex.acquire()
try:
val = self.data.get(key)
if val is None:
return False, ""
else:
if val.expireTime == -1:
return True, val.value
elif val.expireTime < int(time.time()):
self.data.pop(key)
return False, ""
else:
return True, val.value
finally:
self.mutex.release()
def set(self, key, value, expire=None): # type: (str, str, int) -> None
# print('put %s=%s, expire=%s' % (key, value, expire))
"""
storage key, value into the store, value has an expire time.(unit: second)
"""
self.mutex.acquire()
try:
self.data[key] = ExpireValue(
value, expire == None and -1 or int(time.time()) + expire)
finally:
self.mutex.release()
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
channel/gmail/gmail_channel.py | Python | import smtplib
import imaplib
import email
import re
import base64
import time
from random import randrange
from email.mime.text import MIMEText
from email.header import decode_header
from channel.channel import Channel
from concurrent.futures import ThreadPoolExecutor
from common import const
from config import channel_conf_val, channel_conf
smtp_ssl_host = 'smtp.gmail.com: 587'
imap_ssl_host = 'imap.gmail.com'
MAX_DELAY = 30
MIN_DELAY = 15
STEP_TIME = 2
LATESTN = 5
wait_time = 0
thread_pool = ThreadPoolExecutor(max_workers=8)
def checkEmail(email):
# regex = '^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,3}$'
regex = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'
if re.search(regex, email):
return True
else:
return False
def process(max, speed):
global wait_time
i=0
while i<=max:
i=i+1
time.sleep(speed)
print("\r"+"Waited: "+str(i+wait_time)+"s", end='')
# print("\r"+"==="*int(i-1)+":-)"+"==="*int(max-i)+"$"+str(max)+' waited:'+str(i)+"%", end='')
wait_time += max*speed
class GmailChannel(Channel):
def __init__(self):
self.host_email = channel_conf_val(const.GMAIL, 'host_email')
self.host_password = channel_conf_val(const.GMAIL, 'host_password')
# self.addrs_white_list = channel_conf_val(const.GMAIL, 'addrs_white_list')
self.subject_keyword = channel_conf_val(const.GMAIL, 'subject_keyword')
def startup(self):
global wait_time
ques_list = list()
lastques = {'from': None, 'subject': None, 'content': None}
print("INFO: let's go...")
while(True):
ques_list = self.receiveEmail()
if ques_list:
for ques in ques_list:
if ques['subject'] is None:
print("WARN: question from:%s is empty " % ques['from'])
elif(lastques['subject'] == ques['subject'] and lastques['from'] == ques['from']):
print("INFO: this question has already been answered. Q:%s" % (ques['subject']))
else:
if ques['subject']:
print("Nice: a new message coming...", end='\n')
self.handle(ques)
lastques = ques
wait_time = 0
else:
print("WARN: the question in subject is empty")
else:
process(randrange(MIN_DELAY, MAX_DELAY), STEP_TIME)
def handle(self, question):
message = dict()
context = dict()
print("INFO: From: %s Question: %s" % (question['from'], question['subject']))
context['from_user_id'] = question['from']
answer = super().build_reply_content(question['subject'], context) #get answer from openai
message = MIMEText(answer)
message['subject'] = question['subject']
message['from'] = self.host_email
message['to'] = question['from']
thread_pool.submit(self.sendEmail, message)
def sendEmail(self, message: list) -> dict:
smtp_server = smtplib.SMTP(smtp_ssl_host)
smtp_server.starttls()
smtp_server.login(self.host_email, self.host_password)
output = {'success': 0, 'failed': 0, 'invalid': 0}
try:
smtp_server.sendmail(message['from'], message['to'], message.as_string())
print("sending to {}".format(message['to']))
output['success'] += 1
except Exception as e:
print("Error: {}".format(e))
output['failed'] += 1
print("successed:{}, failed:{}".format(output['success'], output['failed']))
smtp_server.quit()
return output
def receiveEmail(self):
question_list = list()
question = {'from': None, 'subject': None, 'content': None}
imap_server = imaplib.IMAP4_SSL(imap_ssl_host)
imap_server.login(self.host_email, self.host_password)
imap_server.select('inbox')
status, data = imap_server.search(None, 'ALL')
mail_ids = []
for block in data:
mail_ids += block.split()
#only fetch the latest 5 messages
mail_ids = mail_ids[-LATESTN:]
for i in mail_ids:
status, data = imap_server.fetch(i, '(RFC822)')
for response in data:
if isinstance(response, tuple):
message = email.message_from_bytes(response[1])
mail_from = message['from'].split('<')[1].replace(">", "")
# if mail_from not in self.addrs_white_list:
# continue
#subject do not support chinese
mail_subject = decode_header(message['subject'])[0][0]
if isinstance(mail_subject, bytes):
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xc5
try:
mail_subject = mail_subject.decode()
except UnicodeDecodeError:
mail_subject = mail_subject.decode('latin-1')
if not self.check_contain(mail_subject, self.subject_keyword): #check subject here
continue
if message.is_multipart():
mail_content = ''
for part in message.get_payload():
flag=False
if isinstance(part.get_payload(), list):
part = part.get_payload()[0]
flag = True
if part.get_content_type() in ['text/plain', 'multipart/alternative']:
#TODO some string can't be decode
if flag:
mail_content += str(part.get_payload())
else:
try:
mail_content += base64.b64decode(str(part.get_payload())).decode("utf-8")
except UnicodeDecodeError:
mail_content += base64.b64decode(str(part.get_payload())).decode('latin-1')
else:
mail_content = message.get_payload()
question['from'] = mail_from
question['subject'] = ' '.join(mail_subject.split(' ')[1:])
question['content'] = mail_content
# print(f'\nFrom: {mail_from}')
print(f'\n\nSubject: {mail_subject}')
# print(f'Content: {mail_content.replace(" ", "")}')
question_list.append(question)
question = {'from': None, 'subject': None, 'content': None}
imap_server.store(i, "+FLAGS", "\\Deleted") #delete the mail i
print("INFO: deleting mail: %s" % mail_subject)
imap_server.expunge()
imap_server.close()
imap_server.logout()
return question_list
def check_contain(self, content, keyword_list):
if not keyword_list:
return None
for ky in keyword_list:
if content.find(ky) != -1:
return True
return None
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
channel/http/auth.py | Python | # encoding:utf-8
import jwt
import datetime
import time
from flask import jsonify, request
from common import const
from config import channel_conf
class Auth():
def __init__(self, login):
# argument 'privilegeRequired' is to set up your method's privilege
# name
self.login = login
super(Auth, self).__init__()
@staticmethod
def encode_auth_token(user_id, login_time):
"""
生成认证Token
:param user_id: int
:param login_time: datetime
:return: string
"""
try:
payload = {
'iss': 'ken', # 签名
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=0, hours=10), # 过期时间
'iat': datetime.datetime.utcnow(), # 开始时间
'data': {
'id': user_id,
'login_time': login_time
}
}
return jwt.encode(
payload,
channel_conf(const.HTTP).get('http_auth_secret_key'),
algorithm='HS256'
) # 加密生成字符串
except Exception as e:
return e
@staticmethod
def decode_auth_token(auth_token):
"""
验证Token
:param auth_token:
:return: integer|string
"""
try:
# 取消过期时间验证
payload = jwt.decode(auth_token, channel_conf(const.HTTP).get(
'http_auth_secret_key'), algorithms='HS256') # options={'verify_exp': False} 加上后不验证token过期时间
if ('data' in payload and 'id' in payload['data']):
return payload
else:
raise jwt.InvalidTokenError
except jwt.ExpiredSignatureError:
return 'Token过期'
except jwt.InvalidTokenError:
return '无效Token'
def authenticate(password):
"""
用户登录,登录成功返回token
:param password:
:return: json
"""
authPassword = channel_conf(const.HTTP).get('http_auth_password')
if (authPassword != password):
return False
else:
login_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
token = Auth.encode_auth_token(password, login_time)
return token
def identify(request):
"""
用户鉴权
:return: list
"""
try:
authPassword = channel_conf(const.HTTP).get('http_auth_password')
if (not authPassword):
return True
if (request is None):
return False
authorization = request.cookies.get('Authorization')
if (authorization):
payload = Auth.decode_auth_token(authorization)
if not isinstance(payload, str):
authPassword = channel_conf(
const.HTTP).get('http_auth_password')
password = payload['data']['id']
if (password != authPassword):
return False
else:
return True
return False
except jwt.ExpiredSignatureError:
#result = 'Token已更改,请重新登录获取'
return False
except jwt.InvalidTokenError:
#result = '没有提供认证token'
return False
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
channel/http/http_channel.py | Python | # encoding:utf-8
import asyncio
import json
from channel.http import auth
from flask import Flask, request, render_template, make_response
from datetime import timedelta
from common import const
from common import functions
from config import channel_conf
from config import channel_conf_val
from channel.channel import Channel
from flask_socketio import SocketIO
from common import log
from plugins.plugin_manager import *
http_app = Flask(__name__,)
socketio = SocketIO(http_app, close_timeout=5)
# 自动重载模板文件
http_app.jinja_env.auto_reload = True
http_app.config['TEMPLATES_AUTO_RELOAD'] = True
# 设置静态文件缓存过期时间
http_app.config['SEND_FILE_MAX_AGE_DEFAULT'] = timedelta(seconds=1)
async def return_stream(data):
async for final, response in HttpChannel().handle_stream(data=data):
try:
if (final):
socketio.server.emit(
'disconnect', {'result': response, 'final': final}, request.sid, namespace="/chat")
disconnect()
else:
socketio.server.emit(
'message', {'result': response, 'final': final}, request.sid, namespace="/chat")
except Exception as e:
disconnect()
log.warn("[http]emit:{}", e)
break
@socketio.on('message', namespace='/chat')
def stream(data):
if (auth.identify(request) == False):
client_sid = request.sid
socketio.server.disconnect(client_sid)
return
data = json.loads(data["data"])
if (data):
img_match_prefix = functions.check_prefix(
data["msg"], channel_conf_val(const.HTTP, 'image_create_prefix'))
if img_match_prefix:
reply_text = HttpChannel().handle(data=data)
socketio.emit(
'disconnect', {'result': reply_text}, namespace='/chat')
disconnect()
return
asyncio.run(return_stream(data))
@socketio.on('connect', namespace='/chat')
def connect():
log.info('connected')
socketio.emit('message', {'info': "connected"}, namespace='/chat')
@socketio.on('disconnect', namespace='/chat')
def disconnect():
log.info('disconnect')
socketio.server.disconnect(request.sid, namespace="/chat")
@http_app.route("/chat", methods=['POST'])
def chat():
if (auth.identify(request) == False):
return
data = json.loads(request.data)
if data:
msg = data['msg']
if not msg:
return
reply_text = HttpChannel().handle(data=data)
return {'result': reply_text}
@http_app.route("/", methods=['GET'])
def index():
if (auth.identify(request) == False):
return login()
return render_template('index.html')
@http_app.route("/login", methods=['POST', 'GET'])
def login():
response = make_response("<html></html>", 301)
response.headers.add_header('content-type', 'text/plain')
response.headers.add_header('location', './')
if (auth.identify(request) == True):
return response
else:
if request.method == "POST":
token = auth.authenticate(request.form['password'])
if (token != False):
response.set_cookie(key='Authorization', value=token)
return response
else:
return render_template('login.html')
response.headers.set('location', './login?err=登录失败')
return response
class HttpChannel(Channel):
def startup(self):
http_app.run(host='0.0.0.0', port=channel_conf(const.HTTP).get('port'))
def handle(self, data):
context = dict()
query = data["msg"]
id = data["id"]
context['from_user_id'] = str(id)
context['channel'] = self
e_context = PluginManager().emit_event(EventContext(Event.ON_HANDLE_CONTEXT, {
'channel': self, 'context': query, "args": context}))
reply = e_context['reply']
if not e_context.is_pass():
reply = super().build_reply_content(e_context["context"], e_context["args"])
e_context = PluginManager().emit_event(EventContext(Event.ON_DECORATE_REPLY, {
'channel': self, 'context': context, 'reply': reply, "args": context}))
reply = e_context['reply']
return reply
async def handle_stream(self, data):
context = dict()
id = data["id"]
context['from_user_id'] = str(id)
context['stream'] = True
context['origin'] = data["msg"]
context['channel'] = self
e_context = PluginManager().emit_event(EventContext(Event.ON_HANDLE_CONTEXT, {
'channel': self, 'context': data["msg"], 'reply': data["msg"], "args": context}))
reply = e_context['reply']
if not e_context.is_pass():
async for final, reply in super().build_reply_stream(data["msg"], context):
yield final, reply
else:
yield True, reply
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
channel/http/static/1.css | CSS | .typing_loader {
width: 6px;
height: 6px;
border-radius: 50%;
-webkit-animation: typing 1s linear infinite alternate;
-moz-animation: typing 1s linear infinite alternate;
-ms-animation: typing 1s linear infinite alternate;
animation: typing 1s linear infinite alternate;
position: relative;
left: -12px;
margin: 7px 15px 6px;
}
ol,
pre {
background-color: #b1e3b1c4;
border: 1px solid #c285e3ab;
padding: 0.5rem 1.5rem 0.5rem;
color: black;
border-radius: 10px;
overflow-y: auto;
}
pre::-webkit-scrollbar {
width: 0px;
height: 5px;
}
pre::-webkit-scrollbar-thumb {
border-right: 10px #ffffff00 solid;
border-left: 10px #ffffff00 solid;
-webkit-box-shadow: inset 0 0 6px rgba(0, 0, 0, .3);
}
.to .typing_loader {
animation: typing-black 1s linear infinite alternate;
}
@-webkit-keyframes typing {
0% {
background-color: rgba(255, 255, 255, 1);
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 0.4), 24px 0px 0px 0px rgba(255, 255, 255, 0.2);
}
50% {
background-color: rgba(255, 255, 255, 0.4);
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 1), 24px 0px 0px 0px rgba(255, 255, 255, 0.4);
}
100% {
background-color: rgba(255, 255, 255, 0.2);
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 0.4), 24px 0px 0px 0px rgba(255, 255, 255, 1);
}
}
@-moz-keyframes typing {
0% {
background-color: rgba(255, 255, 255, 1);
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 0.4), 24px 0px 0px 0px rgba(255, 255, 255, 0.2);
}
50% {
background-color: rgba(255, 255, 255, 0.4);
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 1), 24px 0px 0px 0px rgba(255, 255, 255, 0.4);
}
100% {
background-color: rgba(255, 255, 255, 0.2);
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 0.4), 24px 0px 0px 0px rgba(255, 255, 255, 1);
}
}
@keyframes typing-black {
0% {
background-color: rgba(74, 74, 74, 1);
box-shadow: 12px 0px 0px 0px rgba(74, 74, 74, 0.4), 24px 0px 0px 0px rgba(74, 74, 74, 0.2);
}
50% {
background-color: rgba(74, 74, 74, 0.4);
box-shadow: 12px 0px 0px 0px rgba(74, 74, 74, 1), 24px 0px 0px 0px rgba(74, 74, 74, 0.4);
}
100% {
background-color: rgba(74, 74, 74, 0.2);
box-shadow: 12px 0px 0px 0px rgba(74, 74, 74, 0.4), 24px 0px 0px 0px rgba(74, 74, 74, 1);
}
}
@keyframes typing {
0% {
background-color: rgba(255, 255, 255, 1);
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 0.4), 24px 0px 0px 0px rgba(255, 255, 255, 0.2);
}
50% {
background-color: rgba(255, 255, 255, 0.4);
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 1), 24px 0px 0px 0px rgba(255, 255, 255, 0.4);
}
100% {
background-color: rgba(255, 255, 255, 0.2);
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 0.4), 24px 0px 0px 0px rgba(255, 255, 255, 1);
}
}
.convFormDynamic {
text-align: center;
margin: 10px 10px;
padding: 0 !important;
position: relative;
border: 2px solid rgba(0, 40, 100, 0.12);
}
.convFormDynamic textarea.userInputDynamic {
border: none;
padding: 7px 10px;
overflow-x: hidden !important;
outline: none;
font-size: 0.905rem;
float: left;
width: calc(100% - 70px);
line-height: 1.3em;
min-height: 2em;
max-height: 10rem;
display: block;
max-width: 89vw;
margin-right: -1vw;
resize: none;
}
.convFormDynamic textarea::-webkit-scrollbar {
width: 2px;
background-color: lawngreen;
}
.convFormDynamic textarea::-webkit-scrollbar-thumb {
-webkit-box-shadow: inset 0 0 6px rgba(0, 0, 0, .3);
background-color: dodgerblue;
}
.convFormDynamic input.userInputDynamic {
border: none;
padding: 7px 10px;
outline: none;
font-size: 0.905rem;
float: left;
width: calc(100% - 70px);
line-height: 1.3em;
min-height: 1.7em;
max-height: 10rem;
display: block;
max-width: 89vw;
margin-right: -1vw;
}
div.conv-form-wrapper div#messages {
max-height: 71vh;
height: auto !important;
overflow-y: scroll;
}
div.conv-form-wrapper div#messages:after {
content: '';
display: table;
clear: both;
}
div.conv-form-wrapper {
position: relative;
}
div.conv-form-wrapper div.wrapper-messages {
position: relative;
height: 76vh;
max-height: 80vh;
overflow-y: scroll;
}
div.conv-form-wrapper:before {
content: '';
position: absolute;
width: 100%;
display: block;
height: 30px;
top: 0;
left: 0;
z-index: 2;
background: linear-gradient(#ffffff3b, transparent);
}
@media (max-width: 767px) {
div.conv-form-wrapper div.wrapper-messages,
div.conv-form-wrapper div#messages {
max-height: 71vh;
}
}
div.conv-form-wrapper div.wrapper-messages::-webkit-scrollbar,
div#feed ul::-webkit-scrollbar,
div.conv-form-wrapper div.options::-webkit-scrollbar {
width: 0px;
height: 0px;
/* remove scrollbar space */
background: transparent;
/* optional: just make scrollbar invisible */
}
input[type="text"].userInputDynamic.error {
color: #ac0000 !important;
}
input[type="text"].userInputDynamic {
border-radius: 3px;
margin: 7px 10px;
}
textarea.userInputDynamic.error {
color: #ac0000 !important;
}
textarea.userInputDynamic {
border-radius: 3px;
margin: 7px 10px;
}
div.conv-form-wrapper div#messages {
transition: bottom 0.15s, padding-bottom 0.15s;
position: absolute;
bottom: 0;
height: auto !important;
width: 100%;
padding-bottom: 20px;
/*max-height: 71vh;*/
}
div.conv-form-wrapper div.message {
animation: slideTop 0.15s ease;
}
div.conv-form-wrapper div.message:after {
content: '';
display: table;
clear: both;
}
div.conv-form-wrapper div.message.ready {
animation: bounceIn 0.2s ease;
transform-origin: 0 0 0;
}
div.conv-form-wrapper div#messages div.message {
border-radius: 20px;
padding: 12px 22px;
font-size: 0.905rem;
display: inline-block;
padding: 10px 15px 8px;
border-radius: 20px;
margin-bottom: 5px;
float: right;
clear: both;
max-width: 65%;
word-wrap: break-word;
}
div.conv-form-wrapper div#messages div.message.to {
float: left;
background: lawngreen;
border-top-left-radius: 0;
}
div.conv-form-wrapper div#messages div.message.from {
background: dodgerblue;
color: #fff;
border-top-right-radius: 0;
}
.message.to+.message.from,
.message.from+.message.to {
margin-top: 15px;
}
@keyframes slideTop {
0% {
margin-bottom: -25px;
}
100% {
margin-bottom: 0;
}
}
@keyframes bounceIn {
0% {
transform: scale(0.75, 0.75);
}
100% {
transform: scale(1.0, 1.0);
}
}
.convFormDynamic button.submit {
position: absolute;
bottom: 0px;
border: none;
left: 95%;
margin: 5px;
color: #fff;
cursor: pointer;
border-radius: 8px;
font-size: 1.6rem;
width: 50px;
height: 42px;
border: 1px solid #b7b7b7;
background: #c3c3c3;
outline: none !important;
}
.center-block {
margin-right: 0;
margin-left: 0;
float: none;
text-align: center;
}
button.submit.glow {
border: 1px solid dodgerblue !important;
background: dodgerblue !important;
box-shadow: 0 0 5px 2px rgba(14, 144, 255, 0.4);
}
.no-border {
border: none !important;
}
.dragscroll {
cursor: grab;
}
div.conv-form-wrapper div#messages::-webkit-scrollbar,
div#feed ul::-webkit-scrollbar {
width: 0px;
/* remove scrollbar space */
background: transparent;
/* optional: just make scrollbar invisible */
}
span.clear {
display: block;
clear: both;
}
.drawer-icon-container {
position: fixed;
top: calc(50% - 24px);
right: -30px;
z-index: 1000;
transition: right 0.5s ease;
}
.drawer-icon {
width: 30px;
height: 30px;
cursor: pointer;
box-shadow: 0 0 10px rgba(0, 0, 0, 0.3);
background-color: #b1cee350;
padding-left: 22px;
border-radius: 50%;
}
.drawer-icon:hover{
background-color: #005eff96;
}
.wrenchFilled.icon {
margin-left: -13px;
margin-top: 5px;
width: 10px;
height: 10px;
border-radius: 50%;
background-color: #333333;
transform-origin: center 10.5px;
transform: rotate(-45deg);
}
.wrenchFilled.icon:after {
width: 0;
height: 0;
border-radius: 0 0 1px 1px;
background-color: #333333;
border-left: solid 1px transparent;
border-right: solid 1px transparent;
border-top: solid 1px white;
border-bottom: solid 1px transparent;
left: 4px;
top: 4px;
}
.wrenchFilled.icon:before {
width: 2px;
height: 5px;
background-color: white;
left: 4px;
border-radius: 0 0 1px 1px;
box-shadow: 0 15px 0px 1px #333333, 0 11px 0px 1px #333333, 0 8px 0px 1px #333333;
}
.icon {
position: absolute;
}
.icon:before,
.icon:after {
content: '';
position: absolute;
display: block;
}
.icon i {
position: absolute;
}
.icon i:before,
.icon i:after {
content: '';
position: absolute;
display: block;
}
.drawer-icon i {
margin-left: -15px;
line-height: 30px;
font-weight: bolder;
}
.drawer {
position: fixed;
top: 0;
right: -300px;
width: 300px;
height: 100%;
background-color: #fff;
z-index: 999;
transition: right 0.5s ease;
display: flex;
flex-direction: column;
}
.drawer.open {
right: 0;
}
.drawer-header {
display: flex;
justify-content: space-between;
align-items: center;
background-color: #b1cee350;
border-bottom: 1px solid #ddd;
padding: 16px;
}
.drawer-header h2 {
margin: 0 0 0 16px;
}
.drawer-header button {
background-color: transparent;
border: none;
cursor: pointer;
}
.drawer-content {
flex: 1 1 auto;
height: 100%;
overflow: auto;
padding: 16px;
}
.drawer-overlay {
position: fixed;
top: 0;
left: 0;
width: 100%;
height: 100%;
background-color: rgba(0, 0, 0, 0.5);
z-index: 998;
display: none;
}
@-webkit-keyframes click-wave {
0% {
width: 40px;
height: 40px;
opacity: 0.35;
position: relative;
}
100% {
width: 60px;
height: 60px;
margin-left: 80px;
margin-top: 80px;
opacity: 0.0;
}
}
@-moz-keyframes click-wave {
0% {
width: 30px;
height: 30px;
opacity: 0.35;
position: relative;
}
100% {
width: 80px;
height: 80px;
margin-left: -23px;
margin-top: -23px;
opacity: 0.0;
}
}
@-o-keyframes click-wave {
0% {
width: 30px;
height: 30px;
opacity: 0.35;
position: relative;
}
100% {
width: 80px;
height: 80px;
margin-left: -23px;
margin-top: -23px;
opacity: 0.0;
}
}
@keyframes click-wave {
0% {
width: 30px;
height: 30px;
opacity: 0.35;
position: relative;
}
100% {
width: 80px;
height: 80px;
margin-left: -23px;
margin-top: -23px;
opacity: 0.0;
}
}
.option-input {
-webkit-appearance: none;
-moz-appearance: none;
-ms-appearance: none;
-o-appearance: none;
appearance: none;
position: relative;
top: 10px;
width: 30px;
height: 30px;
-webkit-transition: all 0.15s ease-out 0;
-moz-transition: all 0.15s ease-out 0;
transition: all 0.15s ease-out 0;
background: #cbd1d8;
border: none;
color: #fff;
cursor: pointer;
display: inline-block;
outline: none;
position: relative;
margin-right: 0.5rem;
z-index: 1000;
}
.option-input:hover {
background: #9faab7;
}
.option-input:checked {
background: #1e90ffaa;
}
.option-input:checked::before {
width: 30px;
height: 30px;
position: absolute;
content: '☻';
display: inline-block;
font-size: 29px;
text-align: center;
line-height: 26px;
}
.option-input:checked::after {
-webkit-animation: click-wave 0.65s;
-moz-animation: click-wave 0.65s;
animation: click-wave 0.65s;
background: #40e0d0;
content: '';
display: block;
position: relative;
z-index: 100;
}
.option-input.radio {
border-radius: 50%;
}
.option-input.radio::after {
border-radius: 50%;
} | zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
channel/http/static/1.js | JavaScript |
function generateUUID() {
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function (c) {
var r = Math.random() * 16 | 0,
v = c == 'x' ? r : (r & 0x3 | 0x8);
return v.toString(16);
})
}
const conversationType = {
DISPOSABLE: 1,
STREAM: 1 << 1
}
function ConvState(wrapper, form, params) {
this.id = generateUUID()
this.form = form;
this.wrapper = wrapper;
this.backgroundColor = '#ffffff';
this.parameters = params;
this.scrollDown = function () {
$(this.wrapper).find('#messages').stop().animate({ scrollTop: $(this.wrapper).find('#messages')[0].scrollHeight }, 600);
}.bind(this);
};
ConvState.prototype.printAnswer = function (uuid, answer = '我是ChatGPT, 一个由OpenAI训练的大型语言模型, 我旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。输入 #清除记忆 可以开始新的话题探索。输入 画xx 可以为你画一张图片。我无法对事实性与实时性问题提供准确答复,请慎重对待回答。') {
setTimeout(function () {
var messageObj = $(this.wrapper).find(`#${uuid}`);
answer = marked.parse(answer);
messageObj.html(answer);
messageObj.removeClass('typing').addClass('ready');
this.scrollDown();
$(this.wrapper).find(this.parameters.inputIdHashTagName).focus();
}.bind(this), 500);
};
ConvState.prototype.updateAnswer = function (question, uuid) {
setTimeout(function () {
var socket = io('/chat');
socket.connect('/chat');
let timerId;
var _this = this
// 设置计时器,如果在规定的时间内没有接收到消息,则手动断开连接
function setTimer() {
timerId = setTimeout(() => {
if (socket.connected) {
socket.disconnect();
handle_disconnect();
}
}, 60000);
}
function resetTimer() {
clearTimeout(timerId);
setTimer();
}
setTimer();
var messageObj = $(this.wrapper).find(`#${uuid}`);
function handle_disconnect() {
messageObj.removeClass('typing').addClass('ready');
_this.scrollDown();
$(_this.wrapper).find(_this.parameters.inputIdHashTagName).focus();
}
this.scrollDown();
socket.on('message', msg => {
// 接收到消息时重置计时器
resetTimer();
if (msg.result)
messageObj.html(msg.result + `<div class="typing_loader"></div></div>`);
this.scrollDown();
});
socket.on('connect', msg => {
socket.emit('message', { data: JSON.stringify(question) });
});
socket.on('disconnect', msg => {
if (msg.result) {
answer = marked.parse(msg.result);
messageObj.html(answer);
}
handle_disconnect()
});
}.bind(this), 1000);
};
ConvState.prototype.sendMessage = function (msg) {
var message = $('<div class="message from">' + msg + '</div>');
$('button.submit').removeClass('glow');
$(this.wrapper).find(this.parameters.inputIdHashTagName).focus();
setTimeout(function () {
$(this.wrapper).find("#messages").append(message);
this.scrollDown();
}.bind(this), 100);
var uuid = generateUUID().toLowerCase();
var messageObj = $(`<div class="message to typing" id="${uuid}"><div class="typing_loader"></div></div>`);
setTimeout(function () {
$(this.wrapper).find('#messages').append(messageObj);
this.scrollDown();
}.bind(this), 150);
var _this = this
var question = { "id": _this.id, "msg": msg }
if (localConfig.conversationType == conversationType.STREAM)
this.updateAnswer(question, uuid)
else
$.ajax({
url: "./chat",
type: "POST",
timeout: 180000,
data: JSON.stringify(question),
contentType: "application/json; charset=utf-8",
dataType: "json",
success: function (data) {
_this.printAnswer(uuid, data.result)
},
error: function (data) {
console.log(data)
_this.printAnswer(uuid, "网络故障,对话未送达")
},
})
};
(function ($) {
$.fn.convform = function () {
var wrapper = this;
$(this).addClass('conv-form-wrapper');
var parameters = $.extend(true, {}, {
placeHolder: 'Type Here',
typeInputUi: 'textarea',
formIdName: 'convForm',
inputIdName: 'userInput',
buttonText: '▶'
});
//hides original form so users cant interact with it
var form = $(wrapper).find('form').hide();
var inputForm;
parameters.inputIdHashTagName = '#' + parameters.inputIdName;
inputForm = $('<div id="' + parameters.formIdName + '" class="convFormDynamic"><div class="options dragscroll"></div><textarea id="' + parameters.inputIdName + '" rows="1" placeholder="' + parameters.placeHolder + '" class="userInputDynamic"></textarea><button type="submit" class="submit">' + parameters.buttonText + '</button><span class="clear"></span></form>');
//appends messages wrapper and newly created form with the spinner load
$(wrapper).append('<div class="wrapper-messages"><div class="spinLoader"></div><div id="messages"></div></div>');
$(wrapper).append(inputForm);
var state = new ConvState(wrapper, form, parameters);
// Bind checkbox values to ConvState object
$('input[type="checkbox"]').change(function () {
var key = $(this).attr('name');
state[key] = $(this).is(':checked');
});
// Bind radio button values to ConvState object
$('input[type="radio"]').change(function () {
var key = $(this).attr('name');
state[key] = $(this).val();
});
// Bind color input value to ConvState object
$('#backgroundColor').change(function () {
state["backgroundColor"] = $(this).val();
});
//prints first contact
$.when($('div.spinLoader').addClass('hidden')).done(function () {
var uuid = generateUUID()
var messageObj = $(`<div class="message to typing" id="${uuid}"><div class="typing_loader"></div></div>`);
$(state.wrapper).find('#messages').append(messageObj);
state.scrollDown();
state.printAnswer(uuid = uuid);
});
//binds enter to send message
$(inputForm).find(parameters.inputIdHashTagName).keypress(function (e) {
if (e.which == 13) {
var input = $(this).val();
e.preventDefault();
if (input.trim() != '' && !state.wrapper.find(parameters.inputIdHashTagName).hasClass("error")) {
$(parameters.inputIdHashTagName).val("");
state.sendMessage(input);
} else {
$(state.wrapper).find(parameters.inputIdHashTagName).focus();
}
}
autosize.update($(state.wrapper).find(parameters.inputIdHashTagName));
})
$(inputForm).find(parameters.inputIdHashTagName).on('input', function (e) {
if ($(this).val().length > 0) {
$('button.submit').addClass('glow');
} else {
$('button.submit').removeClass('glow');
}
});
$(inputForm).find('button.submit').click(function (e) {
var input = $(state.wrapper).find(parameters.inputIdHashTagName).val();
e.preventDefault();
if (input.trim() != '' && !state.wrapper.find(parameters.inputIdHashTagName).hasClass("error")) {
$(parameters.inputIdHashTagName).val("");
state.sendMessage(input);
} else {
$(state.wrapper).find(parameters.inputIdHashTagName).focus();
}
autosize.update($(state.wrapper).find(parameters.inputIdHashTagName));
});
if (typeof autosize == 'function') {
$textarea = $(state.wrapper).find(parameters.inputIdHashTagName);
autosize($textarea);
}
return state;
}
})(jQuery);
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
channel/http/templates/index.html | HTML | <!doctype html>
<html lang="en" dir="ltr">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta http-equiv="X-UA-Compatible" content="IE=edge"><!-- Title -->
<title>ChatGPT</title><!-- Bootstrap Css -->
<link href="./static/1.css" rel="stylesheet" />
<style>
button {
font-family: 'Microsoft YaHei';
}
</style>
</head>
<body class="">
<div class="no-border">
<div id="chat" class="conv-form-wrapper">
</div>
</div>
<div class="drawer-icon-container">
<div class="drawer-icon">
<div class="wrenchFilled icon"></div>
</div>
<div class="drawer">
<div class="drawer-header">
<h2>设置</h2>
<button id="close-drawer">X</button>
</div>
<div class="drawer-content">
<div hidden="true">
<input type="checkbox" id="bold" name="bold">
<label for="bold">Bold</label>
<input type="checkbox" id="italic" name="italic">
<label for="italic">Italic</label>
</div>
<div>
<label for="backgroundColor">背景颜色:</label>
<input type="color" id="backgroundColor" name="backgroundColor" value="#ffffff">
</div>
<div>
<p>AI回复方式:</p>
<input type="radio" id="option1" name="conversationType" class="option-input radio" value=1 checked>
<label for="option1">一次性发送</label>
<input type="radio" id="option2" name="conversationType" class="option-input radio" value=2>
<label for="option2">逐段发送</label>
</div>
</div>
</div>
</div>
<div class="drawer-overlay"></div>
<script src="https://cdn.bootcdn.net/ajax/libs/jquery/3.6.3/jquery.min.js"></script>
<script src="https://cdn.bootcdn.net/ajax/libs/marked/4.2.12/marked.min.js"></script>
<script src="https://cdn.bootcdn.net/ajax/libs/autosize.js/6.0.1/autosize.min.js"></script>
<script src="https://cdn.bootcdn.net/ajax/libs/socket.io/4.6.1/socket.io.js"></script>
<script src="./static/1.js"></script>
<script>
var rollbackTo = false;
var originalState = false;
function storeState(a) {
rollbackTo = a.current
}
function rollback(a) {
if (rollbackTo != false) {
if (originalState == false) {
originalState = a.current.next
}
a.current.next = rollbackTo
}
}
function restore(a) {
if (originalState != false) {
a.current.next = originalState
}
}
var ConvStateMap = {
bold: false,
italic: false,
backgroundColor: '#ffffff',
conversationType: conversationType.DISPOSABLE
};
// Create a Proxy object to watch all properties of the "ConvStateMap" object
var localConfig = new Proxy(ConvStateMap, {
set: function (target, prop, val) {
target[prop] = val;
// Call your function here
localStorage.setItem('botOnAnyThingConfig', JSON.stringify(localConfig))
switch (prop) {
case 'backgroundColor':
$('body').css('background-color', val);
$(`#backgroundColor`)?.val(val);
break;
case 'conversationType':
if (val)
$(`#option${val}`)?.prop("checked", true);
}
}
});
$(document).ready(function () {
let config = localStorage.getItem('botOnAnyThingConfig')
if (config) {
config = JSON.parse(config)
Object.keys(config).forEach(item => localConfig[item] = config[item])
}
// Open drawer
$('.drawer-icon').click(function () {
if (!$('.drawer').hasClass('open')) {
$('.drawer').toggleClass('open');
$('.drawer-overlay').fadeIn();
$('.drawer-icon-container').toggleClass('open').css('right', '270px');
} else
closeDrawer()
});
// Close drawer
$('#close-drawer, .drawer-overlay').click(closeDrawer);
function closeDrawer() {
$('.drawer').removeClass('open');
$('.drawer-overlay').fadeOut();
$('.drawer-icon-container').removeClass('open').css('right', '-30px');
}
});
// Bind checkbox values to ConvStateMap object
$('input[type="checkbox"]').change(function () {
var key = $(this).attr('name');
if (key)
localConfig[key] = $(this).is(':checked');
});
// Bind radio button values to ConvStateMap object
$('input[type="radio"]').change(function () {
var key = $(this).attr('name');
if (key)
localConfig[key] = $(this).val();
});
// Bind color input value to ConvStateMap object
$('#backgroundColor').on("input", function (e) {
localConfig.backgroundColor = $(this).val();
});
$(window).on('unload', function () {
socket.disconnect();
});
jQuery(function (a) {
var b = a("#chat").convform()
});
</script>
</body>
</html> | zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
channel/http/templates/login.html | HTML | <!doctype html>
<html lang="en" dir="ltr">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta http-equiv="X-UA-Compatible" content="IE=edge"><!-- Title -->
<title>登录</title><!-- Bootstrap Css -->
<style>
.login-form {
box-shadow: 0 2px 12px 0 rgba(0, 0, 0, 0.5);
border-radius: 8px;
width: 350px;
max-width: 100%;
padding: 15px 35px 15px;
margin: auto;
position: absolute;
top: 50%;
left: 50%;
margin: -160px 0 0 -200px;
}
.Button {
width: 80px;
margin: 3px 1px 0 5px;
padding: 0 10px;
background-color: #16a0d3;
border: none;
display: inline-block;
font-family: "Microsoft Yahei";
font-size: 12px;
height: 27px;
color: #FFF;
border-radius: 5px;
}
</style>
</head>
<body class="">
<form name="login" class="login-form" action="./login" method="post" autocomplete="off">
<input type="password" name="password" placeholder="Password" style="border: none; height: 25px;width: 250px;"
required>
</input>
<input type="submit" class="Button" value="登录" />
<span style="color:red">
<p id="err"></p>
</span>
</form>
</body>
<script src="https://cdn.bootcdn.net/ajax/libs/jquery/3.6.3/jquery.min.js"></script>
<script>
$(function () {
var err=getUrlParam('err')
$('#err')[0].innerHTML=err
});
function getUrlParam(name) {
var reg = new RegExp("(^|&)" + name + "=([^&]*)(&|$)");
var r = window.location.search.substr(1).match(reg);
if (r != null) return decodeURI(r[2]); return null;
}
</script>
</html> | zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
channel/qq/qq_channel.py | Python | from channel.channel import Channel
from aiocqhttp import CQHttp, Event
from common import log
from concurrent.futures import ThreadPoolExecutor
bot = CQHttp(api_root='http://127.0.0.1:5700')
thread_pool = ThreadPoolExecutor(max_workers=8)
@bot.on_message('private')
def handle_private_msg(event: Event):
log.info("event: {}", event)
QQChannel().handle(event)
@bot.on_message('group')
def handle_private_msg(event: Event):
log.info("event: {}", event)
QQChannel().handle_group(event)
class QQChannel(Channel):
def startup(self):
bot.run(host='127.0.0.1', port=8080)
# private chat
def handle(self, msg):
thread_pool.submit(self._do_handle, msg)
def _do_handle(self, msg):
context = dict()
log.info("event: {}", "do_handle")
context['from_user_id'] = msg.user_id
reply_text = super().build_reply_content(msg.message, context)
bot.sync.send_private_msg(user_id=msg.user_id, message=reply_text)
# group chat
def handle_group(self, msg):
thread_pool.submit(self._do_handle_group, msg)
def _do_handle_group(self, msg):
context = dict()
if msg.message and msg.message.find('CQ:at'):
receiver = msg.message.split('qq=')[1].split(']')[0]
if receiver == str(msg['self_id']):
text_list = msg.message.split(']', 2)
if len(text_list) == 2 and len(text_list[1]) > 0:
query = text_list[1].strip()
context['from_user_id'] = str(msg.user_id)
reply_text = super().build_reply_content(query, context)
reply_text = '[CQ:at,qq=' + str(msg.user_id) + '] ' + reply_text
bot.sync.send_group_msg(group_id=msg['group_id'], message=reply_text)
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
channel/slack/slack_channel.py | Python | import re
from slack_bolt import App
from slack_bolt.adapter.socket_mode import SocketModeHandler
from common import const
from common.log import logger
from channel.channel import Channel
from config import channel_conf
# 创建 Slack Bolt 实例
app = App(token=channel_conf(const.SLACK).get('slack_bot_token'))
# 创建 SocketModeHandler 实例
handler = SocketModeHandler(app=app,
app_token=channel_conf(const.SLACK).get('slack_app_token'))
# 监听 Slack app_mention 事件
@app.event("app_mention")
def handle_mention(event, say):
if 'thread_ts' in event:
ts = event["thread_ts"]
else:
ts = event["ts"]
reply_text = SlackChannel().handle(event)
say(text=f"{reply_text}", thread_ts=ts)
class SlackChannel(Channel):
def startup(self):
handler.start()
def handle(self, event):
context = dict()
if 'thread_ts' in event:
ts = event["thread_ts"]
else:
ts = event["ts"]
context['from_user_id'] = str(ts)
# 使用正则表达式去除 @xxxx
plain_text = re.sub(r"<@\w+>", "", event["text"])
return super().build_reply_content(plain_text, context)
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
channel/telegram/telegram_channel.py | Python | from concurrent.futures import ThreadPoolExecutor
import io
import requests
import telebot
from common import const
from common.log import logger
from channel.channel import Channel
from config import channel_conf_val, channel_conf
bot = telebot.TeleBot(token=channel_conf(const.TELEGRAM).get('bot_token'))
thread_pool = ThreadPoolExecutor(max_workers=8)
@bot.message_handler(commands=['help'])
def send_welcome(message):
bot.send_message(message.chat.id, "<a>我是chatGPT机器人,开始和我聊天吧!</a>", parse_mode = "HTML")
# 处理文本类型消息
@bot.message_handler(content_types=['text'])
def send_welcome(msg):
# telegram消息处理
TelegramChannel().handle(msg)
class TelegramChannel(Channel):
def __init__(self):
pass
def startup(self):
logger.info("开始启动[telegram]机器人")
bot.infinity_polling()
def handle(self, msg):
logger.debug("[Telegram] receive msg: " + msg.text)
img_match_prefix = self.check_prefix(msg, channel_conf_val(const.TELEGRAM, 'image_create_prefix'))
# 如果是图片请求
if img_match_prefix:
thread_pool.submit(self._do_send_img, msg, str(msg.chat.id))
else:
thread_pool.submit(self._dosend,msg.text,msg)
def _dosend(self,query,msg):
context= dict()
context['from_user_id'] = str(msg.chat.id)
reply_text = super().build_reply_content(query, context)
logger.info('[Telegram] reply content: {}'.format(reply_text))
bot.reply_to(msg,reply_text)
def _do_send_img(self, msg, reply_user_id):
try:
if not msg:
return
context = dict()
context['type'] = 'IMAGE_CREATE'
img_urls = super().build_reply_content(msg.text, context)
if not img_urls:
return
if not isinstance(img_urls, list):
bot.reply_to(msg,img_urls)
return
for url in img_urls:
# 图片下载
pic_res = requests.get(url, stream=True)
image_storage = io.BytesIO()
for block in pic_res.iter_content(1024):
image_storage.write(block)
image_storage.seek(0)
# 图片发送
logger.info('[Telegrame] sendImage, receiver={}'.format(reply_user_id))
bot.send_photo(msg.chat.id,image_storage)
except Exception as e:
logger.exception(e)
def check_prefix(self, msg, prefix_list):
if not prefix_list:
return None
for prefix in prefix_list:
if msg.text.startswith(prefix):
return prefix
return None
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
channel/terminal/terminal_channel.py | Python | from channel.channel import Channel
from common import log
import sys
class TerminalChannel(Channel):
def startup(self):
# close log
log.close_log()
context = {"from_user_id": "User", "stream": True}
print("\nPlease input your question")
while True:
try:
prompt = self.get_input("User:\n")
except KeyboardInterrupt:
print("\nExiting...")
sys.exit()
print("Bot:")
sys.stdout.flush()
for res in super().build_reply_content(prompt, context):
print(res, end="")
sys.stdout.flush()
print("\n")
def get_input(self, prompt):
"""
Multi-line input function
"""
print(prompt, end="")
line = input()
return line
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
channel/wechat/wechat_channel.py | Python | # encoding:utf-8
"""
wechat channel
"""
import time
import itchat
import json
import re
from itchat.content import *
from channel.channel import Channel
from concurrent.futures import ThreadPoolExecutor
from common.log import logger
from common import const
from config import channel_conf_val
import requests
from plugins.plugin_manager import *
from common.sensitive_word import SensitiveWord
import io
thread_pool = ThreadPoolExecutor(max_workers=8)
sw = SensitiveWord()
@itchat.msg_register(TEXT)
def handler_single_msg(msg):
WechatChannel().handle(msg)
return None
@itchat.msg_register(TEXT, isGroupChat=True)
def handler_group_msg(msg):
WechatChannel().handle_group(msg)
return None
class WechatChannel(Channel):
def __init__(self):
pass
def startup(self):
# login by scan QRCode
hot_reload = channel_conf_val(const.WECHAT, 'hot_reload', True)
if channel_conf_val(const.WECHAT, 'receive_qrcode_api'):
itchat.auto_login(enableCmdQR=2, hot_reload=hot_reload, qrCallback=self.login)
else:
itchat.auto_login(enableCmdQR=2, hotReload=hot_reload)
# start message listener
itchat.run()
def login(self, uuid=None, status='0', qrcode=None):
print('uuid:', uuid)
print('status:', status)
# 请将链接转发到外部接口,并在外部自行通过二维码生成库将链接转换为二维码后展示,例如:将下方的 qrcode_link 通过草料二维码进行处理后,再通过手机端扫码登录微信小号
print('qrcode_link:', 'https://login.weixin.qq.com/l/'+uuid)
def handle(self, msg):
logger.debug("[WX]receive msg: " + json.dumps(msg, ensure_ascii=False))
from_user_id = msg['FromUserName']
to_user_id = msg['ToUserName'] # 接收人id
other_user_id = msg['User']['UserName'] # 对手方id
create_time = msg['CreateTime'] # 消息时间
content = msg['Text']
hot_reload = channel_conf_val(const.WECHAT, 'hot_reload', True)
if hot_reload == True and int(create_time) < int(time.time()) - 60: # 跳过1分钟前的历史消息
logger.debug("[WX]history message skipped")
return
# 调用敏感词检测函数
if sw.process_text(content):
self.send('请检查您的输入是否有违规内容', from_user_id)
return
match_prefix = self.check_prefix(content, channel_conf_val(const.WECHAT, 'single_chat_prefix'))
if from_user_id == other_user_id and match_prefix is not None:
# 好友向自己发送消息
if match_prefix != '':
str_list = content.split(match_prefix, 1)
if len(str_list) == 2:
content = str_list[1].strip()
thread_pool.submit(self._do_send, content, from_user_id)
elif to_user_id == other_user_id and match_prefix:
# 自己给好友发送消息
str_list = content.split(match_prefix, 1)
if len(str_list) == 2:
content = str_list[1].strip()
thread_pool.submit(self._do_send, content, to_user_id)
def handle_group(self, msg):
logger.debug("[WX]receive group msg: " + json.dumps(msg, ensure_ascii=False))
group_name = msg['User'].get('NickName', None)
group_id = msg['User'].get('UserName', None)
create_time = msg['CreateTime'] # 消息时间
hot_reload = channel_conf_val(const.WECHAT, 'hot_reload', True)
if hot_reload == True and int(create_time) < int(time.time()) - 60: # 跳过1分钟前的历史消息
logger.debug("[WX]history message skipped")
return
if not group_name:
return None
origin_content = msg['Content']
content = msg['Content']
content_list = content.split(' ', 1)
context_special_list = content.split('\u2005', 1)
if len(context_special_list) == 2:
content = context_special_list[1]
elif len(content_list) == 2:
content = content_list[1]
match_prefix = (msg['IsAt'] and not channel_conf_val(const.WECHAT, "group_at_off", False)) or self.check_prefix(origin_content, channel_conf_val(const.WECHAT, 'group_chat_prefix')) or self.check_contain(origin_content, channel_conf_val(const.WECHAT, 'group_chat_keyword'))
# 如果在群里被at了 或 触发机器人关键字,则调用敏感词检测函数
if match_prefix is True:
if sw.process_text(content):
self.send('请检查您的输入是否有违规内容', group_id)
return
group_white_list = channel_conf_val(const.WECHAT, 'group_name_white_list')
if ('ALL_GROUP' in group_white_list or group_name in group_white_list or self.check_contain(group_name, channel_conf_val(const.WECHAT, 'group_name_keyword_white_list'))) and match_prefix:
thread_pool.submit(self._do_send_group, content, msg)
return None
def send(self, msg, receiver):
logger.info('[WX] sendMsg={}, receiver={}'.format(msg, receiver))
reply_type = self.determine_type(msg)
if reply_type == "text":
itchat.send(msg, toUserName=receiver)
elif reply_type == "img_url":
image_storage = self.dowdload_img_url(msg)
itchat.send_image(image_storage, toUserName=receiver)
elif reply_type == "file_url":
image_storage = self.dowdload_img_url(msg)
itchat.send_file(image_storage, toUserName=receiver)
else:
return None
def determine_type(self, msg):
# 正则表达式来匹配URL
url_pattern = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:\S+(?::\S*)?@)?' # 用户名和密码
r'(?:[A-Za-z0-9-]+\.)+[A-Za-z]{2,6}' # 域名
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
# 检查是否是URL
if re.match(url_pattern, msg):
# 如果是URL,进一步检查是不是图片链接
if any(msg.endswith(extension) for extension in ['.jpg', '.jpeg', '.png', '.gif', '.bmp', '.svg']):
return "img_url"
# 检查是否是其他类型的文件链接
elif any(msg.endswith(extension) for extension in
['.pdf', '.txt', '.doc', '.docx', '.xls', '.xlsx', '.zip', '.rar']):
return "img_file"
else:
return "others"
else:
return "text"
def _do_send(self, query, reply_user_id):
try:
if not query:
return
context = dict()
context['from_user_id'] = reply_user_id
context['channel'] = self
e_context = PluginManager().emit_event(EventContext(Event.ON_HANDLE_CONTEXT, {
'channel': self, 'context': query, "args": context}))
reply = e_context['reply']
if not e_context.is_pass():
reply = super().build_reply_content(e_context["context"], e_context["args"])
e_context = PluginManager().emit_event(EventContext(Event.ON_DECORATE_REPLY, {
'channel': self, 'context': context, 'reply': reply, "args": e_context["args"]}))
reply = e_context['reply']
if reply:
self.send(channel_conf_val(const.WECHAT, "single_chat_reply_prefix") + reply, reply_user_id)
except Exception as e:
logger.exception(e)
def _do_send_img(self, query, context):
try:
if not query:
return
reply_user_id=context['from_user_id']
img_urls = super().build_reply_content(query, context)
if not img_urls:
return
if not isinstance(img_urls, list):
self.send(channel_conf_val(const.WECHAT, "single_chat_reply_prefix") + img_urls, reply_user_id)
return
for url in img_urls:
# 图片下载
pic_res = requests.get(url, stream=True)
image_storage = io.BytesIO()
for block in pic_res.iter_content(1024):
image_storage.write(block)
image_storage.seek(0)
# 图片发送
logger.info('[WX] sendImage, receiver={}'.format(reply_user_id))
itchat.send_image(image_storage, reply_user_id)
except Exception as e:
logger.exception(e)
def dowdload_img_url(self, url):
pic_res = requests.get(url, stream=True)
image_storage = io.BytesIO()
for block in pic_res.iter_content(1024):
image_storage.write(block)
image_storage.seek(0)
return image_storage
def _do_send_group(self, query, msg):
if not query:
return
context = dict()
context['from_user_id'] = msg['User']['UserName']
context['channel'] = self
e_context = PluginManager().emit_event(EventContext(Event.ON_HANDLE_CONTEXT, {
'channel': self, 'context': query, "args": context}))
reply = e_context['reply']
if not e_context.is_pass():
context['from_user_id'] = msg['ActualUserName']
reply = super().build_reply_content(e_context["context"], e_context["args"])
e_context = PluginManager().emit_event(EventContext(Event.ON_DECORATE_REPLY, {
'channel': self, 'context': context, 'reply': reply, "args": e_context["args"]}))
reply = e_context['reply']
if reply:
reply = '@' + msg['ActualNickName'] + ' ' + reply.strip()
self.send(channel_conf_val(const.WECHAT, "group_chat_reply_prefix", "") + reply, msg['User']['UserName'])
def check_prefix(self, content, prefix_list):
for prefix in prefix_list:
if content.startswith(prefix):
return prefix
return None
def check_contain(self, content, keyword_list):
if not keyword_list:
return None
for ky in keyword_list:
if content.find(ky) != -1:
return True
return None
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
channel/wechat/wechat_com_channel.py | Python | #!/usr/bin/env python
# -*- coding=utf-8 -*-
"""
@time: 2023/4/10 22:24
@Project :bot-on-anything
@file: wechat_com_channel.py
"""
import time
from channel.channel import Channel
from concurrent.futures import ThreadPoolExecutor
from common.log import logger
from config import conf
from wechatpy.enterprise.crypto import WeChatCrypto
from wechatpy.enterprise import WeChatClient
from wechatpy.exceptions import InvalidSignatureException
from wechatpy.enterprise.exceptions import InvalidCorpIdException
from wechatpy.enterprise import parse_message
from flask import Flask, request, abort
thread_pool = ThreadPoolExecutor(max_workers=8)
app = Flask(__name__)
@app.route('/wechat', methods=['GET', 'POST'])
def handler_msg():
return WechatEnterpriseChannel().handle()
_conf = conf().get("channel").get("wechat_com")
class WechatEnterpriseChannel(Channel):
def __init__(self):
self.CorpId = _conf.get('wechat_corp_id')
self.Secret = _conf.get('secret')
self.AppId = _conf.get('appid')
self.TOKEN = _conf.get('wechat_token')
self.EncodingAESKey = _conf.get('wechat_encoding_aes_key')
self.crypto = WeChatCrypto(self.TOKEN, self.EncodingAESKey, self.CorpId)
self.client = WeChatClient(self.CorpId, self.Secret, self.AppId)
def startup(self):
# start message listener
app.run(host='0.0.0.0', port=_conf.get('port'))
def send(self, msg, receiver):
# 切片长度
n = 450
if len(msg) < n:
logger.info('[WXCOM] sendMsg={}, receiver={}'.format(msg, receiver))
self.client.message.send_text(self.AppId, receiver, msg)
return
# 分割后的子字符串列表
chunks = [msg[i:i+n] for i in range(0, len(msg), n)]
# 总消息数
total = len(chunks)
# 循环发送每个子字符串
for i, chunk in enumerate(chunks):
logger.info('[WXCOM] sendMsg={}, receiver={}, page_number={}, page_total={}'.format(msg, chunk, i+1, total))
self.client.message.send_text(self.AppId, receiver, chunk)
time.sleep(1) # 用延迟的方式使微信插件的输出顺序正常
def _do_send(self, query, reply_user_id):
try:
if not query:
return
context = dict()
context['from_user_id'] = reply_user_id
reply_text = super().build_reply_content(query, context)
if reply_text:
self.send(reply_text, reply_user_id)
except Exception as e:
logger.exception(e)
def handle(self):
query_params = request.args
signature = query_params.get('msg_signature', '')
timestamp = query_params.get('timestamp', '')
nonce = query_params.get('nonce', '')
if request.method == 'GET':
# 处理验证请求
echostr = query_params.get('echostr', '')
try:
echostr = self.crypto.check_signature(signature, timestamp, nonce, echostr)
except InvalidSignatureException:
abort(403)
print(echostr)
return echostr
elif request.method == 'POST':
try:
message = self.crypto.decrypt_message(
request.data,
signature,
timestamp,
nonce
)
except (InvalidSignatureException, InvalidCorpIdException):
abort(403)
msg = parse_message(message)
if msg.type == 'text':
thread_pool.submit(self._do_send, msg.content, msg.source)
else:
reply = 'Can not handle this for now'
# 未能处理的消息或菜单事件暂不做响应优化用户体验
# self.client.message.send_text(self.AppId, msg.source, reply)
return 'success'
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
channel/wechat/wechat_mp_channel.py | Python | import werobot
import time
from config import channel_conf
from common import const
from common.log import logger
from channel.channel import Channel
from concurrent.futures import ThreadPoolExecutor
import os
robot = werobot.WeRoBot(token=channel_conf(const.WECHAT_MP).get('token'))
thread_pool = ThreadPoolExecutor(max_workers=8)
cache = {}
@robot.text
def hello_world(msg):
with open('sensitive_words.txt', 'r', encoding='utf-8') as f: #加入检测违规词
sensitive_words = [line.strip() for line in f.readlines()]
found = False
for word in sensitive_words:
if word != '' and word in msg.content:
found = True
break
if found:
return "输入内容有敏感词汇"
else:
logger.info('[WX_Public] receive public msg: {}, userId: {}'.format(msg.content, msg.source))
key = msg.content + '|' + msg.source
if cache.get(key):
# request time
cache.get(key)['req_times'] += 1
return WechatSubsribeAccount().handle(msg)
class WechatSubsribeAccount(Channel):
def startup(self):
logger.info('[WX_Public] Wechat Public account service start!')
robot.config['PORT'] = channel_conf(const.WECHAT_MP).get('port')
robot.config['HOST'] = '0.0.0.0'
robot.run()
def handle(self, msg, count=1):
if msg.content == "继续":
return self.get_un_send_content(msg.source)
context = dict()
context['from_user_id'] = msg.source
key = msg.content + '|' + msg.source
res = cache.get(key)
if not res:
cache[key] = {"status": "waiting", "req_times": 1}
thread_pool.submit(self._do_send, msg.content, context)
res = cache.get(key)
logger.info("count={}, res={}".format(count, res))
if res.get('status') == 'success':
res['status'] = "done"
cache.pop(key)
return res.get("data")
if cache.get(key)['req_times'] == 3 and count >= 4:
logger.info("微信超时3次")
return "已开始处理,请稍等片刻后输入\"继续\"查看回复"
if count <= 5:
time.sleep(1)
if count == 5:
# 第5秒不做返回,防止消息发送出去了但是微信已经中断连接
return None
return self.handle(msg, count+1)
def _do_send(self, query, context):
key = query + '|' + context['from_user_id']
reply_text = super().build_reply_content(query, context)
logger.info('[WX_Public] reply content: {}'.format(reply_text))
cache[key]['status'] = "success"
cache[key]['data'] = reply_text
def get_un_send_content(self, from_user_id):
for key in cache:
if from_user_id in key:
value = cache[key]
if value.get('status') == "success":
cache.pop(key)
return value.get("data")
return "还在处理中,请稍后再试"
return "目前无等待回复信息,请输入对话"
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
channel/wechat/wechat_mp_service_channel.py | Python | import werobot
from config import channel_conf
from common import const
from common.log import logger
from channel.channel import Channel
from concurrent.futures import ThreadPoolExecutor
robot = werobot.WeRoBot(token=channel_conf(const.WECHAT_MP).get('token'))
thread_pool = ThreadPoolExecutor(max_workers=8)
@robot.text
def hello_world(msg):
logger.info('[WX_Public] receive public msg: {}, userId: {}'.format(msg.content, msg.source))
return WechatServiceAccount().handle(msg)
class WechatServiceAccount(Channel):
def startup(self):
logger.info('[WX_Public] Wechat Public account service start!')
robot.config['PORT'] = channel_conf(const.WECHAT_MP).get('port')
robot.config["APP_ID"] = channel_conf(const.WECHAT_MP).get('app_id')
robot.config["APP_SECRET"] = channel_conf(const.WECHAT_MP).get('app_secret')
robot.config['HOST'] = '0.0.0.0'
robot.run()
def handle(self, msg, count=0):
context = {}
context['from_user_id'] = msg.source
thread_pool.submit(self._do_send, msg.content, context)
return "正在思考中..."
def _do_send(self, query, context):
reply_text = super().build_reply_content(query, context)
logger.info('[WX_Public] reply content: {}'.format(reply_text))
client = robot.client
client.send_text_message(context['from_user_id'], reply_text)
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
common/const.py | Python | # channel
TERMINAL = "terminal"
WECHAT = "wechat"
WECHAT_MP = "wechat_mp"
WECHAT_MP_SERVICE = "wechat_mp_service"
WECHAT_COM = "wechat_com"
QQ = "qq"
GMAIL = "gmail"
TELEGRAM = "telegram"
SLACK = "slack"
HTTP = "http"
DINGTALK = "dingtalk"
FEISHU = "feishu"
DISCORD = "discord"
# model
OPEN_AI = "openai"
CHATGPT = "chatgpt"
BAIDU = "baidu"
BING = "bing"
BARD = "bard"
LINKAI = "linkai"
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
common/functions.py | Python | import json
import os
import re
from common import log
def singleton(cls):
instances = {}
def get_instance(*args, **kwargs):
if cls not in instances:
instances[cls] = cls(*args, **kwargs)
return instances[cls]
return get_instance
def load_json_file(curdir: str, file: str = 'config.json'):
config_path = os.path.join(curdir, file)
try:
with open(config_path, "r", encoding="utf-8") as f:
config = json.load(f)
return config
except Exception as e:
if isinstance(e, FileNotFoundError):
log.warn(
f"[common]load json file failed, {config_path}\{file} not found")
else:
log.warn("[common]load json file failed")
raise e
def contain_chinese(str):
"""
判断一个字符串中是否含有中文
"""
pattern = re.compile('[\u4e00-\u9fa5]')
match = pattern.search(str)
return match != None
def check_prefix(content, prefix_list):
if(len(prefix_list)==0):
return True
for prefix in prefix_list:
if content.startswith(prefix):
return prefix
return False
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
common/log.py | Python | # encoding:utf-8
import logging
import sys
SWITCH = True
def _get_logger():
log = logging.getLogger('log')
log.setLevel(logging.INFO)
console_handle = logging.StreamHandler(sys.stdout)
console_handle.setFormatter(logging.Formatter('[%(levelname)s][%(asctime)s][%(filename)s:%(lineno)d] - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'))
log.addHandler(console_handle)
return log
def close_log():
global SWITCH
SWITCH = False
def debug(arg, *args):
if SWITCH:
if len(args) == 0:
logger.debug(arg)
else:
logger.debug(arg.format(*args))
def info(arg, *args):
if SWITCH:
if len(args) == 0:
logger.info(arg)
else:
logger.info(arg.format(*args))
def warn(arg, *args):
if len(args) == 0:
logger.warning(arg)
else:
logger.warning(arg.format(*args))
def error(arg, *args):
if len(args) == 0:
logger.error(arg)
else:
logger.error(arg.format(*args))
def exception(e):
logger.exception(e)
# 日志句柄
logger = _get_logger()
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
common/sensitive_word.py | Python | import requests
from config import conf
class SensitiveWord:
def __init__(self):
# 读取配置文件
try:
self.config = conf() # 加载配置文件
#print(self.config) # 输出配置文件内容以进行调试
except Exception as e:
print(e) # 打印错误信息
# 设置请求 URL
self.url = "https://aip.baidubce.com/rest/2.0/antispam/v2/spam"
# 获取 access token
self.access_token = self.get_access_token()
def get_access_token(self):
"""
获取百度云接口的 access token
:return: str access token
"""
#检测敏感词配置是否存在
if self.config is not None and "common" in self.config and "type" in self.config["common"] and self.config["common"]["type"]:
url = "https://aip.baidubce.com/oauth/2.0/token"
params = {
"grant_type": "client_credentials",
"client_id": self.config["common"]["client_id"],
"client_secret": self.config["common"]["client_secret"]
}
response = requests.post(url, params=params)
response_json = response.json()
access_token = response_json.get("access_token")
if not access_token:
raise ValueError(f"获取 access_token 失败: {response_json.get('error_description')}")
print(f"Access token: {access_token}") # 输出访问令牌以进行调试
return access_token
def process_text(self, text):
#检测敏感词配置是否存在
if self.config is not None and "common" in self.config and "sensitive" in self.config["common"] and self.config["common"]["sensitive"]:
#存在则执行正常检测流程
url = "https://aip.baidubce.com/rest/2.0/solution/v1/text_censor/v2/user_defined" # API 请求地址
access_token = self.get_access_token()
headers = {"content-type": "application/x-www-form-urlencoded"}
params = {
"text": text.encode("utf-8"),
"access_token": access_token
}
response = requests.post(url, data=params, headers=headers)
if response.status_code != 200:
raise ValueError(f"无法连接到接口,请检查你的网络: {response.json().get('error_msg')}")
conclusion_type = response.json().get("conclusionType")
print(response.json()) # 输出完整的 API 响应结果
if conclusion_type in [1, None]:
return False
else:
return True
#不存在则直接返回无敏感词
else:
return False
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
config.py | Python | # encoding:utf-8
import json
import os
config = {}
def load_config(config_path = "./config.json"):
global config
if not os.path.exists(config_path):
raise Exception('配置文件不存在,请根据config-template.json模板创建config.json文件')
config_str = read_file(config_path)
# 将json字符串反序列化为dict类型
config = json.loads(config_str)
print("Load config success")
return config
def get_root():
return os.path.dirname(os.path.abspath( __file__ ))
def read_file(path):
with open(path, mode='r', encoding='utf-8') as f:
return f.read()
def conf():
return config
def model_conf(model_type):
return config.get('model').get(model_type)
def model_conf_val(model_type, key):
val = config.get('model').get(model_type).get(key)
if not val:
# common default config
return config.get('model').get(key)
return val
def channel_conf(channel_type):
return config.get('channel').get(channel_type)
def channel_conf_val(channel_type, key, default=None):
val = config.get('channel').get(channel_type).get(key)
if not val:
# common default config
return config.get('channel').get(key, default)
return val
def common_conf_val(key, default=None):
if not config.get('common'):
return default
return config.get('common').get(key, default)
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
model/baidu/yiyan_model.py | Python | # encoding:utf-8
from model.model import Model
from config import model_conf
from common import const
from common.log import logger
import requests
import time
sessions = {}
class YiyanModel(Model):
def __init__(self):
self.acs_token = model_conf(const.BAIDU).get('acs_token')
self.cookie = model_conf(const.BAIDU).get('cookie')
self.base_url = 'https://yiyan.baidu.com/eb'
def reply(self, query, context=None):
logger.info("[BAIDU] query={}".format(query))
user_id = context.get('session_id') or context.get('from_user_id')
context['query'] = query
# 1.create session
chat_session_id = sessions.get(user_id)
if not chat_session_id:
self.new_session(context)
sessions[user_id] = context['chat_session_id']
else:
context['chat_session_id'] = chat_session_id
# 2.create chat
flag = self.new_chat(context)
if not flag:
return "创建会话失败,请稍后再试"
# 3.query
context['reply'] = ''
self.query(context, 0, 0)
return context['reply']
def new_session(self, context):
data = {
"sessionName": context['query'],
"timestamp": int(time.time() * 1000),
"deviceType": "pc"
}
res = requests.post(url=self.base_url+'/session/new', headers=self._create_header(), json=data)
# print(res.headers)
context['chat_session_id'] = res.json()['data']['sessionId']
logger.info("[BAIDU] newSession: id={}".format(context['chat_session_id']))
def new_chat(self, context):
headers = self._create_header()
headers['Acs-Token'] = self.acs_token
data = {
"sessionId": context.get('chat_session_id'),
"text": context['query'],
"parentChatId": 0,
"type": 10,
"timestamp": int(time.time() * 1000),
"deviceType": "pc",
"code": 0,
"msg": ""
}
res = requests.post(url=self.base_url+'/chat/new', headers=headers, json=data).json()
if res['code'] != 0:
logger.error("[BAIDU] New chat error, msg={}", res['msg'])
return False
context['chat_id'] = res['data']['botChat']['id']
context['parent_chat_id'] = res['data']['botChat']['parent']
return True
def query(self, context, sentence_id, count):
headers = self._create_header()
headers['Acs-Token'] = self.acs_token
data = {
"chatId": context['chat_id'],
"parentChatId": context['parent_chat_id'],
"sentenceId": sentence_id,
"stop": 0,
"timestamp": 1679068791405,
"deviceType": "pc"
}
res = requests.post(url=self.base_url + '/chat/query', headers=headers, json=data)
logger.debug("[BAIDU] query: sent_id={}, count={}, res={}".format(sentence_id, count, res.text))
res = res.json()
if res['data']['text'] != '':
context['reply'] += res['data']['text']
# logger.debug("[BAIDU] query: sent_id={}, reply={}".format(sentence_id, res['data']['text']))
if res['data']['is_end'] == 1:
return
if count > 10:
return
time.sleep(1)
if not res['data']['text']:
return self.query(context, sentence_id, count+1)
else:
return self.query(context, sentence_id+1, count+1)
def _create_header(self):
headers = {
'Host': 'yiyan.baidu.com',
'Origin': 'https://yiyan.baidu.com',
'Referer': 'https://yiyan.baidu.com',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36',
'Content-Type': 'application/json',
'Cookie': self.cookie
}
return headers
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
model/bing/jailbroken_sydney.py | Python | # encoding:utf-8
import asyncio
import time
import websockets
import random
import uuid
import EdgeGPT
from EdgeGPT import ChatHubRequest, Chatbot, Conversation, ChatHub
from typing import Generator
from config import model_conf_val
class SydneyBot(Chatbot):
def __init__(
self,
cookiePath: str = "",
cookies: dict | None = None,
proxy: str | None = None,
options: dict | None = None,
) -> None:
self.conversations_cache = {}
self.parent_message_id = 0
self.user_message_id = 0
self.conversation_key = uuid.uuid4()
self.cookiePath: str = cookiePath
self.cookies: dict | None = cookies
self.proxy: str | None = proxy
self.chat_hub: SydneyHub
cache_options = options.get('cache', {})
cache_options['namespace'] = cache_options.get('namespace', 'bing')
self.conversations_cache = cache_options
@staticmethod
def get_messages_for_conversation(messages, parent_message_id):
ordered_messages = []
current_message_id = parent_message_id
while current_message_id:
message = next(
(m for m in messages if m['id'] == current_message_id), None)
if not message:
break
ordered_messages.insert(0, message)
current_message_id = message.get('parentMessageId')
return ordered_messages
async def ask_stream(
self,
prompt: str,
conversation_style: EdgeGPT.CONVERSATION_STYLE_TYPE = None,
message_id: str = None
) -> dict:
# 开启新对话
self.chat_hub = SydneyHub(Conversation(
self.cookiePath, self.cookies, self.proxy))
self.parent_message_id = message_id if message_id != None else uuid.uuid4()
# 构造历史对话字符串,更新SydneyHubRequest的历史对话
conversation = self.conversations_cache.get(self.conversation_key)
if conversation is None:
conversation = {
"messages": [],
"createdAt": int(time.time()*1000)
}
previous_cached_messages = ""
for conversation_message in self.get_messages_for_conversation(conversation["messages"], self.parent_message_id):
previous_cached_messages += f"{conversation_message['role'].replace('bot', 'AI')}:\n{conversation_message['message']}\n\n"
chars = list(model_conf_val("bing", "jailbreak_prompt"))
chars = [('-' + c if random.random() < 0.5 else '_' + c)
if i > 0 else c for i, c in enumerate(chars)]
previous_messages = ''.join(chars)
self.chat_hub.request.previous_messages = previous_messages + \
"\n\n"+previous_cached_messages
# 将当前提问加入历史对话列表
self.user_message_id = uuid.uuid4()
user_message = {
"id": self.user_message_id,
"parentMessageId": self.parent_message_id,
"role": 'User',
"message": prompt,
}
conversation["messages"].append(user_message)
self.conversations_cache[self.conversation_key] = conversation
async for final, response in self.chat_hub.ask_stream(
prompt=prompt,
conversation_style=conversation_style
):
if final:
try:
if self.chat_hub.wss and not self.chat_hub.wss.closed:
await self.chat_hub.wss.close()
self.update_reply_cache(response["item"]["messages"][-1])
except Exception as e:
self.conversations_cache[self.conversation_key]["messages"].pop()
yield True, f"AI生成内容被微软内容过滤器拦截,已删除最后一次提问的记忆,请尝试使用其他文字描述问题,若AI依然无法正常回复,请清除全部记忆后再次尝试"
yield final, response
async def ask(
self,
prompt: str,
conversation_style: EdgeGPT.CONVERSATION_STYLE_TYPE = None,
message_id: str = None
) -> dict:
async for final, response in self.ask_stream(
prompt=prompt,
conversation_style=conversation_style,
message_id=message_id
):
if final:
self.update_reply_cache(response["item"]["messages"][-1])
return response
def update_reply_cache(
self,
reply,
) -> None:
# 将回复加入历史对话列表
replyMessage = {
"id": uuid.uuid4(),
"parentMessageId": self.user_message_id,
"role": 'Bing',
"message": reply["text"],
"details": reply,
}
self.conversations_cache[self.conversation_key]["messages"].append(
replyMessage)
self.user_message_id = replyMessage["id"]
class SydneyHub(ChatHub):
"""
Chat API
"""
def __init__(self, conversation: Conversation) -> None:
self.wss: websockets.WebSocketClientProtocol | None = None
self.request: SydneyHubRequest
self.loop: bool
self.task: asyncio.Task
self.request = SydneyHubRequest(
conversation_signature=conversation.struct["conversationSignature"],
client_id=conversation.struct["clientId"],
conversation_id=conversation.struct["conversationId"],
)
async def ask_stream(
self,
prompt: str,
wss_link: str = "wss://sydney.bing.com/sydney/ChatHub",
conversation_style: EdgeGPT.CONVERSATION_STYLE_TYPE = None,
) -> Generator[str, None, None]:
async for item in super().ask_stream(prompt=prompt, conversation_style=conversation_style, wss_link=wss_link):
yield item
class SydneyHubRequest(ChatHubRequest):
def __init__(
self,
conversation_signature: str,
client_id: str,
conversation_id: str,
invocation_id: int = 0,
) -> None:
super().__init__(conversation_signature=conversation_signature, client_id=client_id,
conversation_id=conversation_id, invocation_id=invocation_id)
self.previous_messages = ""
def update(
self,
prompt: str,
conversation_style: EdgeGPT.CONVERSATION_STYLE_TYPE,
options: list | None = None,
) -> None:
self.invocation_id = 0
super().update(prompt=prompt, conversation_style=conversation_style, options=options)
self.struct["arguments"][0]["message"]["messageType"] = "SearchQuery"
self.struct["arguments"][0]["previousMessages"] = [
{"text": "N/A\n\n"+self.previous_messages, "author": 'bot', }]
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
model/bing/new_bing_model.py | Python | # encoding:utf-8
import asyncio
from model.model import Model
from config import model_conf_val, common_conf_val
from common import log
from EdgeGPT import Chatbot, ConversationStyle
from ImageGen import ImageGen
from common import functions
from model.bing.jailbroken_sydney import SydneyBot
user_session = dict()
suggestion_session = dict()
# newBing对话模型逆向网页gitAPI
class BingModel(Model):
style = ConversationStyle.creative
bot: Chatbot = None
cookies: list = None
def __init__(self):
try:
self.cookies = model_conf_val("bing", "cookies")
self.jailbreak = model_conf_val("bing", "jailbreak")
self.bot = SydneyBot(cookies=self.cookies, options={}) if (
self.jailbreak) else Chatbot(cookies=self.cookies)
except Exception as e:
log.warn(e)
async def reply_text_stream(self, query: str, context=None) -> dict:
async def handle_answer(final, answer):
if final:
try:
reply = self.build_source_attributions(answer, context)
log.info("[NewBing] reply:{}", reply)
yield True, reply
except Exception as e:
log.warn(answer)
log.warn(e)
await user_session.get(context['from_user_id'], None).reset()
yield True, answer
else:
try:
yield False, answer
except Exception as e:
log.warn(answer)
log.warn(e)
await user_session.get(context['from_user_id'], None).reset()
yield True, answer
if not context or not context.get('type') or context.get('type') == 'TEXT':
clear_memory_commands = common_conf_val(
'clear_memory_commands', ['#清除记忆'])
if query in clear_memory_commands:
user_session[context['from_user_id']] = None
yield True, '记忆已清除'
bot = user_session.get(context['from_user_id'], None)
if not bot:
bot = self.bot
else:
query = self.get_quick_ask_query(query, context)
user_session[context['from_user_id']] = bot
log.info("[NewBing] query={}".format(query))
if self.jailbreak:
async for final, answer in bot.ask_stream(query, conversation_style=self.style, message_id=bot.user_message_id):
async for result in handle_answer(final, answer):
yield result
else:
async for final, answer in bot.ask_stream(query, conversation_style=self.style):
async for result in handle_answer(final, answer):
yield result
def reply(self, query: str, context=None) -> tuple[str, dict]:
if not context or not context.get('type') or context.get('type') == 'TEXT':
clear_memory_commands = common_conf_val(
'clear_memory_commands', ['#清除记忆'])
if query in clear_memory_commands:
user_session[context['from_user_id']] = None
return '记忆已清除'
bot = user_session.get(context['from_user_id'], None)
if (bot == None):
bot = self.bot
else:
query = self.get_quick_ask_query(query, context)
user_session[context['from_user_id']] = bot
log.info("[NewBing] query={}".format(query))
if (self.jailbreak):
task = bot.ask(query, conversation_style=self.style,
message_id=bot.user_message_id)
else:
task = bot.ask(query, conversation_style=self.style)
answer = asyncio.run(task)
if isinstance(answer, str):
return answer
try:
reply = answer["item"]["messages"][-1]
except Exception as e:
user_session.get(context['from_user_id'], None).reset()
log.warn(answer)
return "本轮对话已超时,已开启新的一轮对话,请重新提问。"
return self.build_source_attributions(answer, context)
elif context.get('type', None) == 'IMAGE_CREATE':
if functions.contain_chinese(query):
return "ImageGen目前仅支持使用英文关键词生成图片"
return self.create_img(query)
def create_img(self, query):
try:
log.info("[NewBing] image_query={}".format(query))
cookie_value = self.cookies[0]["value"]
image_generator = ImageGen(cookie_value)
img_list = image_generator.get_images(query)
log.info("[NewBing] image_list={}".format(img_list))
return img_list
except Exception as e:
log.warn(e)
return "输入的内容可能违反微软的图片生成内容策略。过多的策略冲突可能会导致你被暂停访问。"
def get_quick_ask_query(self, query, context):
if (len(query) == 1 and query.isdigit() and query != "0"):
suggestion_dict = suggestion_session[context['from_user_id']]
if (suggestion_dict != None):
query = suggestion_dict[int(query)-1]
if (query == None):
return "输入的序号不在建议列表范围中"
else:
query = "在上面的基础上,"+query
return query
def build_source_attributions(self, answer, context):
reference = ""
reply = answer["item"]["messages"][-1]
reply_text = reply["text"]
if "sourceAttributions" in reply:
for i, attribution in enumerate(reply["sourceAttributions"]):
display_name = attribution["providerDisplayName"]
url = attribution["seeMoreUrl"]
reference += f"{i+1}、[{display_name}]({url})\n\n"
if len(reference) > 0:
reference = "***\n"+reference
suggestion = ""
if "suggestedResponses" in reply:
suggestion_dict = dict()
for i, attribution in enumerate(reply["suggestedResponses"]):
suggestion_dict[i] = attribution["text"]
suggestion += f">{i+1}、{attribution['text']}\n\n"
suggestion_session[context['from_user_id']
] = suggestion_dict
if len(suggestion) > 0:
suggestion = "***\n你可以通过输入序号快速追问我以下建议问题:\n\n"+suggestion
throttling = answer["item"]["throttling"]
throttling_str = ""
if throttling["numUserMessagesInConversation"] == throttling["maxNumUserMessagesInConversation"]:
user_session.get(context['from_user_id'], None).reset()
throttling_str = "(对话轮次已达上限,本次聊天已结束,将开启新的对话)"
else:
throttling_str = f"对话轮次: {throttling['numUserMessagesInConversation']}/{throttling['maxNumUserMessagesInConversation']}\n"
response = f"{reply_text}\n{reference}\n{suggestion}\n***\n{throttling_str}"
log.info("[NewBing] reply={}", response)
return response
else:
user_session.get(context['from_user_id'], None).reset()
log.warn("[NewBing] reply={}", answer)
return "对话被接口拒绝,已开启新的一轮对话。"
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
model/google/bard_bot.py | Python |
import json
import random
import requests
import re
class BardBot:
BARD_URL = "https://bard.google.com/"
BARD_CHAT_URL = (
"https://bard.google.com/_/BardChatUi/data/assistant.lamda.BardFrontendService/StreamGenerate"
)
HEADERS = {
"Host": "bard.google.com",
"X-Same-Domain": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36",
"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8",
"Origin": "https://bard.google.com",
"Referer": "https://bard.google.com/",
}
def __init__(self, session_id: str):
self._reqid = random.randrange(10000,99999)
self.conversation_id = ""
self.response_id = ""
self.choice_id = ""
self.session = requests.Session()
self.session.headers = self.HEADERS
self.session.cookies.set("__Secure-1PSID", session_id)
self.SNlM0e = self.__get_snlm0e()
def __get_snlm0e(self) -> str:
resp = self.session.get(url=self.BARD_URL, timeout=10)
if resp.status_code != 200:
raise Exception("Failed to connect Google Bard")
try:
SNlM0e = re.search(r"SNlM0e\":\"(.*?)\"", resp.text).group(1)
return SNlM0e
except Exception as e:
raise Exception(f"Cookies may be wrong:{e}")
def ask(self, message: str) -> dict[str, str]:
params = {
"bl": "boq_assistant-bard-web-server_20230326.21_p0",
"_reqid": str(self._reqid),
"rt": "c",
}
message_struct = [[message], None, [self.conversation_id, self.response_id, self.choice_id]]
data = {"f.req": json.dumps([None, json.dumps(message_struct)]), "at": self.SNlM0e}
try:
resp = self.session.post(self.BARD_CHAT_URL, params=params, data=data)
content = json.loads(resp.content.splitlines()[3])[0][2]
if not (content := json.loads(resp.content.splitlines()[3])[0][2]):
return {"content": f"Bard encountered an error: {resp.content}."}
json_data = json.loads(content)
results = {
"content": json_data[0][0],
"conversation_id": json_data[1][0],
"response_id": json_data[1][1],
"reference": json_data[3],
"choices": [{"id": i[0], "content": i[1]} for i in json_data[4]],
}
self.conversation_id = results['conversation_id']
self.response_id = results['response_id']
self.choice_id = results["choices"][0]["id"]
self._reqid += 100000
return results
except Exception as e:
raise Exception(f"Failed to ask Google Bard:{e}") | zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
model/google/bard_model.py | Python | # encoding:utf-8
from .bard_bot import BardBot
from config import model_conf_val
from model.model import Model
from common import log
user_session = dict()
class BardModel(Model):
bot: BardBot = None
def __init__(self):
try:
self.cookies = model_conf_val("bard", "cookie")
self.bot = BardBot(self.cookies)
except Exception as e:
log.warn(e)
def reply(self, query: str, context=None) -> dict[str, str]:
if not context or not context.get('type') or context.get('type') == 'TEXT':
bot = user_session.get(context['from_user_id'], None)
if bot is None:
bot = self.bot
user_session[context['from_user_id']] = bot
log.info(f"[Bard] query={query}")
answer = bot.ask(query)
# Bard最多返回3个生成结果,目前暂时选第一个返回
reply = answer['content']
if answer['reference']:
reference = [({'index': item[0], 'reference':item[2][0] if item[2][0] else item[2][1]}) for item in answer['reference'][0]]
reference.sort(key=lambda x: x['index'], reverse=True)
reply = self.insert_reference(reply, reference)
log.warn(f"[Bard] answer={reply}")
return reply
async def reply_text_stream(self, query: str, context=None) -> dict:
reply = self.reply(query, context)
yield True, reply
def insert_reference(self, reply: str, reference: list) -> str:
refer = '\n***\n\n'
length = len(reference)
for i, item in enumerate(reference):
index = item["index"] - 1
reply = reply[:index] + f'[^{length-i}]' + reply[index:]
refer += f'- ^{i+1}:{item["reference"]}\n\n'
refer += '***'
return reply + refer
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
model/linkai/link_ai_bot.py | Python | # access LinkAI knowledge base platform
# docs: https://link-ai.tech/platform/link-app/wechat
from model.model import Model
from config import model_conf, common_conf_val, channel_conf_val
from common import const
from common import log
import time
import requests
import threading
import os
import re
import json
user_session = dict()
class LinkAIBot(Model):
# authentication failed
AUTH_FAILED_CODE = 401
NO_QUOTA_CODE = 406
def __init__(self):
super().__init__()
# self.sessions = LinkAISessionManager(LinkAISession, model=conf().get("model") or "gpt-3.5-turbo")
# self.args = {}
def reply(self, query, context=None):
if not context or not context.get('type') or context.get('type') == 'TEXT':
log.info("[LINKAI] query={}".format(query))
from_user_id = context['from_user_id']
clear_memory_commands = common_conf_val('clear_memory_commands', ['#清除记忆'])
if query in clear_memory_commands:
Session.clear_session(from_user_id)
return '记忆已清除'
new_query = Session.build_session_query(query, from_user_id)
context['session'] = new_query # 将 new_query 添加到 context 字典中 session
log.debug("[LINKAI] session query={}".format(new_query))
# if context.get('stream'):
# # reply in stream
# return self.reply_text_stream(query, context)
reply_content = self._chat(query, context)
log.debug("[LINKAI] new_query={}, user={}, reply_cont={}".format(new_query, from_user_id, reply_content))
return reply_content
elif context.get('type', None) == 'IMAGE_CREATE':
ok, res = self.create_img(query, 0)
if ok:
return [res]
else:
return res
# return reply
# else:
# # reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
# return reply
def _chat(self, query, context, retry_count=0):
"""
发起对话请求
:param query: 请求提示词
:param context: 对话上下文
:param retry_count: 当前递归重试次数
:return: 回复
"""
if retry_count > 2:
# exit from retry 2 times
log.warn("[LINKAI] failed after maximum number of retry times")
return "请再问我一次吧"
try:
linkai_api_key = model_conf(const.LINKAI).get('api_key')
model = model_conf(const.LINKAI).get("model") # 对话模型的名称
app_code = model_conf(const.LINKAI).get("app_code", "") # LinkAI应用code
# remove system message
new_query_session = context.get("session")
user_id = context['from_user_id']
if new_query_session[0].get("role") == "system":
if app_code or model == "wenxin":
new_query_session.pop(0)
body = {
"app_code": app_code,
"messages": new_query_session,
"model": model, # 对话模型的名称, 支持 gpt-3.5-turbo, gpt-3.5-turbo-16k, gpt-4, wenxin, xunfei
"temperature": model_conf(const.LINKAI).get("temperature", 0.75),
"top_p": model_conf(const.LINKAI).get("top_p", 1),
"frequency_penalty": model_conf(const.LINKAI).get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"presence_penalty": model_conf(const.LINKAI).get("presence_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"sender_id": user_id
}
log.info(f"[LINKAI] query={query}, app_code={app_code}, model={body.get('model')}")
headers = {"Authorization": "Bearer " + linkai_api_key}
# do http request
base_url = model_conf(const.LINKAI).get("api_base", "https://api.link-ai.tech")
res = requests.post(url=base_url + "/v1/chat/completions", json=body, headers=headers,
timeout=180)
if res.status_code == 200:
# execute success
response = res.json()
reply_content = response["choices"][0]["message"]["content"]
total_tokens = response["usage"]["total_tokens"]
res_code = response.get('code')
log.info(f"[LINKAI] reply={reply_content}, total_tokens={total_tokens}, res_code={res_code}")
if res_code == 429:
log.warn(f"[LINKAI] 用户访问超出限流配置,sender_id={body.get('sender_id')}")
else:
Session.save_session(query, reply_content, user_id, total_tokens)
agent_suffix = self._fetch_agent_suffix(response)
if agent_suffix:
reply_content += agent_suffix
if not agent_suffix:
knowledge_suffix = self._fetch_knowledge_search_suffix(response)
if knowledge_suffix:
reply_content += knowledge_suffix
# image process
if response["choices"][0].get("img_urls"):
if 'send' in type(context['channel']).__dict__: # 通道实例所属类的定义是否有send方法
thread = threading.Thread(target=self._send_image, args=(context['channel'], context, response["choices"][0].get("img_urls")))
thread.start()
if response["choices"][0].get("text_content"):
reply_content = response["choices"][0].get("text_content")
else:
reply_content = response["choices"][0].get("text_content", "") + " " + " ".join(response["choices"][0].get("img_urls")) # 图像生成时候需要合并文本和图片url
reply_content = self._process_url(reply_content)
# thread = threading.Thread(target=self._send_image, args=(context['channel'], context, response["choices"][0].get("img_urls")))
# thread.start()
# if response["choices"][0].get("text_content"):
# reply_content = response["choices"][0].get("text_content")
#reply_content = self._process_url(reply_content)
return reply_content
else:
response = res.json()
error = response.get("error")
log.error(f"[LINKAI] chat failed, status_code={res.status_code}, "
f"msg={error.get('message')}, type={error.get('type')}")
if res.status_code >= 500:
# server error, need retry
time.sleep(2)
log.warn(f"[LINKAI] do retry, times={retry_count}")
return self._chat(query, context, retry_count + 1)
error_reply = "提问太快啦,请休息一下再问我吧"
if res.status_code == 409:
error_reply = "这个问题我还没有学会,请问我其它问题吧"
return error_reply
except Exception as e:
log.exception(e)
# retry
time.sleep(2)
log.warn(f"[LINKAI] do retry, times={retry_count}")
return self._chat(query, context, retry_count + 1)
# def _process_image_msg(self, app_code: str, session_id: str, query:str, img_cache: dict):
# try:
# enable_image_input = False
# app_info = self._fetch_app_info(app_code)
# if not app_info:
# log.debug(f"[LinkAI] not found app, can't process images, app_code={app_code}")
# return None
# plugins = app_info.get("data").get("plugins")
# for plugin in plugins:
# if plugin.get("input_type") and "IMAGE" in plugin.get("input_type"):
# enable_image_input = True
# if not enable_image_input:
# return
# msg = img_cache.get("msg")
# path = img_cache.get("path")
# msg.prepare()
# log.info(f"[LinkAI] query with images, path={path}")
# messages = self._build_vision_msg(query, path)
# memory.USER_IMAGE_CACHE[session_id] = None
# return messages
# except Exception as e:
# log.exception(e)
#
# def _find_group_mapping_code(self, context):
# try:
# if context.kwargs.get("isgroup"):
# group_name = context.kwargs.get("msg").from_user_nickname
# if config.plugin_config and config.plugin_config.get("linkai"):
# linkai_config = config.plugin_config.get("linkai")
# group_mapping = linkai_config.get("group_app_map")
# if group_mapping and group_name:
# return group_mapping.get(group_name)
# except Exception as e:
# log.exception(e)
# return None
# def _build_vision_msg(self, query: str, path: str):
# try:
# suffix = utils.get_path_suffix(path)
# with open(path, "rb") as file:
# base64_str = base64.b64encode(file.read()).decode('utf-8')
# messages = [{
# "role": "user",
# "content": [
# {
# "type": "text",
# "text": query
# },
# {
# "type": "image_url",
# "image_url": {
# "url": f"data:image/{suffix};base64,{base64_str}"
# }
# }
# ]
# }]
# return messages
# except Exception as e:
# log.exception(e)
async def reply_text_stream(self, query, context, retry_count=0) :
if retry_count >= 2:
# exit from retry 2 times
log.warn("[LINKAI] failed after maximum number of retry times")
yield True, "请再问我一次吧"
try:
linkai_api_key = model_conf(const.LINKAI).get('api_key')
model = model_conf(const.LINKAI).get("model") # 对话模型的名称
app_code = model_conf(const.LINKAI).get("app_code", "") # LinkAI应用code
# remove system message
new_query_session = context.get("session")
user_id = context['from_user_id']
if new_query_session[0].get("role") == "system":
if app_code or model == "wenxin":
new_query_session.pop(0)
body = {
"app_code": app_code,
"messages": new_query_session,
"model": model, # 对话模型的名称, 支持 gpt-3.5-turbo, gpt-3.5-turbo-16k, gpt-4, wenxin, xunfei
"temperature": model_conf(const.LINKAI).get("temperature", 0.75),
"top_p": model_conf(const.LINKAI).get("top_p", 1),
"frequency_penalty": model_conf(const.LINKAI).get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"presence_penalty": model_conf(const.LINKAI).get("presence_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"sender_id": user_id,
"stream": True
}
if self.args.get("max_tokens"):
body["max_tokens"] = self.args.get("max_tokens")
headers = {"Authorization": "Bearer " + linkai_api_key}
# do http request
base_url = model_conf(const.LINKAI).get("api_base", "https://api.link-ai.tech")
res = requests.post(url=base_url + "/v1/chat/completions", json=body, headers=headers, stream=True,
timeout=180)
if res.status_code == 200:
full_response = ""
for i in res.iter_lines():
st = str(i, encoding="utf-8")
st = st.replace("data: ", "", 1)
if st:
if st == "[DONE]": # 输出结束
break
chunk = json.loads(st)
if not chunk.get("choices"):
continue
chunk_message = chunk["choices"][0]["delta"].get("content")
if (chunk_message):
full_response += chunk_message
yield False, full_response
Session.save_session(query, full_response, user_id)
log.info("[LinkAI]: reply={}", full_response)
yield True, full_response
else:
response = res.json()
error = response.get("error")
log.error(f"[LINKAI] chat failed, status_code={res.status_code}, "
f"msg={error.get('message')}, type={error.get('type')}")
if res.status_code >= 500:
# server error, need retry
time.sleep(2)
log.warn(f"[LINKAI] do retry, times={retry_count}")
yield True, self.reply_text_stream(query, context, retry_count+1)
error_reply = "提问太快啦,请休息一下再问我吧"
if res.status_code == 409:
error_reply = "这个问题我还没有学会,请问我其它问题吧"
yield True, error_reply
except Exception as e:
log.exception(e)
# retry
time.sleep(2)
log.warn(f"[LINKAI] do retry, times={retry_count}")
yield True, self.reply_text_stream(query, context, retry_count+1)
# def _fetch_app_info(self, app_code: str):
# headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")}
# # do http request
# base_url = conf().get("linkai_api_base", "https://api.link-ai.chat")
# params = {"app_code": app_code}
# res = requests.get(url=base_url + "/v1/app/info", params=params, headers=headers, timeout=(5, 10))
# if res.status_code == 200:
# return res.json()
# else:
# log.warning(f"[LinkAI] find app info exception, res={res}")
def create_img(self, query, retry_count=0, api_key=None):
try:
log.info("[LinkImage] image_query={}".format(query))
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {model_conf(const.LINKAI).get('api_key')}"
}
data = {
"prompt": query,
"n": 1,
"model": model_conf(const.LINKAI).get("text_to_image") or "dall-e-3",
"response_format": "url",
}
url = model_conf(const.LINKAI).get("linkai_api_base", "https://api.link-ai.tech") + "/v1/images/generations"
res = requests.post(url, headers=headers, json=data, timeout=(5, 90))
t2 = time.time()
image_url = res.json()["data"][0]["url"]
log.info("[OPEN_AI] image_url={}".format(image_url))
return True, image_url
except Exception as e:
log.error(format(e))
return False, "画图出现问题,请休息一下再问我吧"
def _fetch_knowledge_search_suffix(self, response) -> str:
try:
if response.get("knowledge_base"):
search_hit = response.get("knowledge_base").get("search_hit")
first_similarity = response.get("knowledge_base").get("first_similarity")
log.info(f"[LINKAI] knowledge base, search_hit={search_hit}, first_similarity={first_similarity}")
# plugin_config = pconf("linkai")
# if plugin_config and plugin_config.get("knowledge_base") and plugin_config.get("knowledge_base").get("search_miss_text_enabled"):
# search_miss_similarity = plugin_config.get("knowledge_base").get("search_miss_similarity")
# search_miss_text = plugin_config.get("knowledge_base").get("search_miss_suffix")
# if not search_hit:
# return search_miss_text
# if search_miss_similarity and float(search_miss_similarity) > first_similarity:
# return search_miss_text
except Exception as e:
log.exception(e)
def _fetch_agent_suffix(self, response):
try:
plugin_list = []
log.debug(f"[LinkAgent] res={response}")
if response.get("agent") and response.get("agent").get("chain") and response.get("agent").get("need_show_plugin"):
chain = response.get("agent").get("chain")
suffix = "\n\n- - - - - - - - - - - -"
i = 0
for turn in chain:
plugin_name = turn.get('plugin_name')
suffix += "\n"
need_show_thought = response.get("agent").get("need_show_thought")
if turn.get("thought") and plugin_name and need_show_thought:
suffix += f"{turn.get('thought')}\n"
if plugin_name:
plugin_list.append(turn.get('plugin_name'))
if turn.get('plugin_icon'):
suffix += f"{turn.get('plugin_icon')} "
suffix += f"{turn.get('plugin_name')}"
if turn.get('plugin_input'):
suffix += f":{turn.get('plugin_input')}"
if i < len(chain) - 1:
suffix += "\n"
i += 1
log.info(f"[LinkAgent] use plugins: {plugin_list}")
return suffix
except Exception as e:
log.exception(e)
# 将markdown格式的链接转为普通的链接
def _process_url(self, text):
try:
url_pattern = re.compile(r'\[(.*?)\]\((http[s]?://.*?)\)')
def replace_markdown_url(match):
return f"{match.group(2)}"
return url_pattern.sub(replace_markdown_url, text)
except Exception as e:
log.error(e)
def _send_image(self, channel, context, image_urls):
if not image_urls:
return
max_send_num = model_conf(const.LINKAI).get("max_media_send_count")
send_interval = model_conf(const.LINKAI).get("media_send_interval")
try:
i = 0
for url in image_urls:
if max_send_num and i >= max_send_num:
continue
i += 1
# if url.endswith(".mp4"):
# reply_type = ReplyType.VIDEO_URL
# elif url.endswith(".pdf") or url.endswith(".doc") or url.endswith(".docx") or url.endswith(".csv"):
# reply_type = ReplyType.FILE
# url = _download_file(url)
# if not url:
# continue
# else:
# reply_type = ReplyType.IMAGE_URL
reply = url
channel.send(reply, context["from_user_id"])
if send_interval:
time.sleep(send_interval)
except Exception as e:
log.error(e)
def _download_file(url: str):
try:
file_path = "tmp"
if not os.path.exists(file_path):
os.makedirs(file_path)
file_name = url.split("/")[-1] # 获取文件名
file_path = os.path.join(file_path, file_name)
response = requests.get(url)
with open(file_path, "wb") as f:
f.write(response.content)
return file_path
except Exception as e:
log.warn(e)
class Session(object):
@staticmethod
def build_session_query(query, user_id):
'''
build query with conversation history
e.g. [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who won the world series in 2020?"},
{"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
{"role": "user", "content": "Where was it played?"}
]
:param query: query content
:param user_id: from user id
:return: query content with conversaction
'''
session = user_session.get(user_id, [])
if len(session) == 0:
system_prompt = model_conf(const.LINKAI).get("character_desc", "")
system_item = {'role': 'system', 'content': system_prompt}
session.append(system_item)
user_session[user_id] = session
user_item = {'role': 'user', 'content': query}
session.append(user_item)
return session
@staticmethod
def save_session(query, answer, user_id, used_tokens=0):
max_tokens = model_conf(const.LINKAI).get('conversation_max_tokens')
max_history_num = model_conf(const.LINKAI).get('max_history_num', None)
if not max_tokens or max_tokens > 4000:
# default value
max_tokens = 1000
session = user_session.get(user_id)
if session:
# append conversation
gpt_item = {'role': 'assistant', 'content': answer}
session.append(gpt_item)
if used_tokens > max_tokens and len(session) >= 3:
# pop first conversation (TODO: more accurate calculation)
session.pop(1)
session.pop(1)
if max_history_num is not None:
while len(session) > max_history_num * 2 + 1:
session.pop(1)
session.pop(1)
@staticmethod
def clear_session(user_id):
user_session[user_id] = []
#
# class LinkAISessionManager(SessionManager):
# def session_msg_query(self, query, session_id):
# session = self.build_session(session_id)
# messages = session.messages + [{"role": "user", "content": query}]
# return messages
#
# def session_reply(self, reply, session_id, total_tokens=None, query=None):
# session = self.build_session(session_id)
# if query:
# session.add_query(query)
# session.add_reply(reply)
# try:
# max_tokens = conf().get("conversation_max_tokens", 2500)
# tokens_cnt = session.discard_exceeding(max_tokens, total_tokens)
# log.debug(f"[LinkAI] chat history, before tokens={total_tokens}, now tokens={tokens_cnt}")
# except Exception as e:
# log.warning("Exception when counting tokens precisely for session: {}".format(str(e)))
# return session
#
#
# class LinkAISession(ChatGPTSession):
# def calc_tokens(self):
# if not self.messages:
# return 0
# return len(str(self.messages))
#
# def discard_exceeding(self, max_tokens, cur_tokens=None):
# cur_tokens = self.calc_tokens()
# if cur_tokens > max_tokens:
# for i in range(0, len(self.messages)):
# if i > 0 and self.messages[i].get("role") == "assistant" and self.messages[i - 1].get("role") == "user":
# self.messages.pop(i)
# self.messages.pop(i - 1)
# return self.calc_tokens()
# return cur_tokens
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
model/model.py | Python | """
Auto-replay chat robot abstract class
"""
class Model(object):
def reply(self, query, context=None):
"""
model auto-reply content
:param req: received message
:return: reply content
"""
raise NotImplementedError
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
model/model_factory.py | Python | """
channel factory
"""
from common import const
def create_bot(model_type):
"""
create a channel instance
:param channel_type: channel type code
:return: channel instance
"""
if model_type == const.OPEN_AI:
# OpenAI 官方对话模型API (gpt-3.0)
from model.openai.open_ai_model import OpenAIModel
return OpenAIModel()
elif model_type == const.CHATGPT:
# ChatGPT API (gpt-3.5-turbo)
from model.openai.chatgpt_model import ChatGPTModel
return ChatGPTModel()
elif model_type == const.BAIDU:
from model.baidu.yiyan_model import YiyanModel
return YiyanModel()
elif model_type == const.BING:
from model.bing.new_bing_model import BingModel
return BingModel()
elif model_type == const.BARD:
from model.google.bard_model import BardModel
return BardModel()
elif model_type == const.LINKAI:
from model.linkai.link_ai_bot import LinkAIBot
return LinkAIBot()
raise RuntimeError
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
model/openai/chatgpt_model.py | Python | # encoding:utf-8
from model.model import Model
from config import model_conf, common_conf_val
from common import const
from common import log
import openai
import time
user_session = dict()
# OpenAI对话模型API (可用)
class ChatGPTModel(Model):
def __init__(self):
openai.api_key = model_conf(const.OPEN_AI).get('api_key')
api_base = model_conf(const.OPEN_AI).get('api_base')
if api_base:
openai.api_base = api_base
proxy = model_conf(const.OPEN_AI).get('proxy')
if proxy:
openai.proxy = proxy
log.info("[CHATGPT] api_base={} proxy={}".format(
api_base, proxy))
def reply(self, query, context=None):
# acquire reply content
if not context or not context.get('type') or context.get('type') == 'TEXT':
log.info("[CHATGPT] query={}".format(query))
from_user_id = context['from_user_id']
clear_memory_commands = common_conf_val('clear_memory_commands', ['#清除记忆'])
if query in clear_memory_commands:
Session.clear_session(from_user_id)
return '记忆已清除'
new_query = Session.build_session_query(query, from_user_id)
log.debug("[CHATGPT] session query={}".format(new_query))
# if context.get('stream'):
# # reply in stream
# return self.reply_text_stream(query, new_query, from_user_id)
reply_content = self.reply_text(new_query, from_user_id, 0)
#log.debug("[CHATGPT] new_query={}, user={}, reply_cont={}".format(new_query, from_user_id, reply_content))
return reply_content
elif context.get('type', None) == 'IMAGE_CREATE':
return self.create_img(query, 0)
def reply_text(self, query, user_id, retry_count=0):
try:
response = openai.ChatCompletion.create(
model= model_conf(const.OPEN_AI).get("model") or "gpt-3.5-turbo", # 对话模型的名称
messages=query,
temperature=model_conf(const.OPEN_AI).get("temperature", 0.75), # 熵值,在[0,1]之间,越大表示选取的候选词越随机,回复越具有不确定性,建议和top_p参数二选一使用,创意性任务越大越好,精确性任务越小越好
#max_tokens=4096, # 回复最大的字符数,为输入和输出的总数
#top_p=model_conf(const.OPEN_AI).get("top_p", 0.7),, #候选词列表。0.7 意味着只考虑前70%候选词的标记,建议和temperature参数二选一使用
frequency_penalty=model_conf(const.OPEN_AI).get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则越降低模型一行中的重复用词,更倾向于产生不同的内容
presence_penalty=model_conf(const.OPEN_AI).get("presence_penalty", 1.0) # [-2,2]之间,该值越大则越不受输入限制,将鼓励模型生成输入中不存在的新词,更倾向于产生不同的内容
)
reply_content = response.choices[0]['message']['content']
used_token = response['usage']['total_tokens']
log.debug(response)
log.info("[CHATGPT] reply={}", reply_content)
if reply_content:
# save conversation
Session.save_session(query, reply_content, user_id, used_token)
return response.choices[0]['message']['content']
except openai.error.RateLimitError as e:
# rate limit exception
log.warn(e)
if retry_count < 1:
time.sleep(5)
log.warn("[CHATGPT] RateLimit exceed, 第{}次重试".format(retry_count+1))
return self.reply_text(query, user_id, retry_count+1)
else:
return "提问太快啦,请休息一下再问我吧"
except openai.error.APIConnectionError as e:
log.warn(e)
log.warn("[CHATGPT] APIConnection failed")
return "我连接不到网络,请稍后重试"
except openai.error.Timeout as e:
log.warn(e)
log.warn("[CHATGPT] Timeout")
return "我没有收到消息,请稍后重试"
except Exception as e:
# unknown exception
log.exception(e)
Session.clear_session(user_id)
return "请再问我一次吧"
async def reply_text_stream(self, query, context, retry_count=0):
try:
user_id=context['from_user_id']
new_query = Session.build_session_query(query, user_id)
res = openai.ChatCompletion.create(
model= model_conf(const.OPEN_AI).get("model") or "gpt-3.5-turbo", # 对话模型的名称
messages=new_query,
temperature=model_conf(const.OPEN_AI).get("temperature", 0.75), # 熵值,在[0,1]之间,越大表示选取的候选词越随机,回复越具有不确定性,建议和top_p参数二选一使用,创意性任务越大越好,精确性任务越小越好
#max_tokens=4096, # 回复最大的字符数,为输入和输出的总数
#top_p=model_conf(const.OPEN_AI).get("top_p", 0.7),, #候选词列表。0.7 意味着只考虑前70%候选词的标记,建议和temperature参数二选一使用
frequency_penalty=model_conf(const.OPEN_AI).get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则越降低模型一行中的重复用词,更倾向于产生不同的内容
presence_penalty=model_conf(const.OPEN_AI).get("presence_penalty", 1.0), # [-2,2]之间,该值越大则越不受输入限制,将鼓励模型生成输入中不存在的新词,更倾向于产生不同的内容
stream=True
)
full_response = ""
for chunk in res:
log.debug(chunk)
if (chunk["choices"][0]["finish_reason"]=="stop"):
break
chunk_message = chunk['choices'][0]['delta'].get("content")
if(chunk_message):
full_response+=chunk_message
yield False,full_response
Session.save_session(query, full_response, user_id)
log.info("[chatgpt]: reply={}", full_response)
yield True,full_response
except openai.error.RateLimitError as e:
# rate limit exception
log.warn(e)
if retry_count < 1:
time.sleep(5)
log.warn("[CHATGPT] RateLimit exceed, 第{}次重试".format(retry_count+1))
yield True, self.reply_text_stream(query, user_id, retry_count+1)
else:
yield True, "提问太快啦,请休息一下再问我吧"
except openai.error.APIConnectionError as e:
log.warn(e)
log.warn("[CHATGPT] APIConnection failed")
yield True, "我连接不到网络,请稍后重试"
except openai.error.Timeout as e:
log.warn(e)
log.warn("[CHATGPT] Timeout")
yield True, "我没有收到消息,请稍后重试"
except Exception as e:
# unknown exception
log.exception(e)
Session.clear_session(user_id)
yield True, "请再问我一次吧"
def create_img(self, query, retry_count=0):
try:
log.info("[OPEN_AI] image_query={}".format(query))
response = openai.Image.create(
prompt=query, #图片描述
n=1, #每次生成图片的数量
size="256x256" #图片大小,可选有 256x256, 512x512, 1024x1024
)
image_url = response['data'][0]['url']
log.info("[OPEN_AI] image_url={}".format(image_url))
return [image_url]
except openai.error.RateLimitError as e:
log.warn(e)
if retry_count < 1:
time.sleep(5)
log.warn("[OPEN_AI] ImgCreate RateLimit exceed, 第{}次重试".format(retry_count+1))
return self.reply_text(query, retry_count+1)
else:
return "提问太快啦,请休息一下再问我吧"
except Exception as e:
log.exception(e)
return None
class Session(object):
@staticmethod
def build_session_query(query, user_id):
'''
build query with conversation history
e.g. [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who won the world series in 2020?"},
{"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
{"role": "user", "content": "Where was it played?"}
]
:param query: query content
:param user_id: from user id
:return: query content with conversaction
'''
session = user_session.get(user_id, [])
if len(session) == 0:
system_prompt = model_conf(const.OPEN_AI).get("character_desc", "")
system_item = {'role': 'system', 'content': system_prompt}
session.append(system_item)
user_session[user_id] = session
user_item = {'role': 'user', 'content': query}
session.append(user_item)
return session
@staticmethod
def save_session(query, answer, user_id, used_tokens=0):
max_tokens = model_conf(const.OPEN_AI).get('conversation_max_tokens')
max_history_num = model_conf(const.OPEN_AI).get('max_history_num', None)
if not max_tokens or max_tokens > 4000:
# default value
max_tokens = 1000
session = user_session.get(user_id)
if session:
# append conversation
gpt_item = {'role': 'assistant', 'content': answer}
session.append(gpt_item)
if used_tokens > max_tokens and len(session) >= 3:
# pop first conversation (TODO: more accurate calculation)
session.pop(1)
session.pop(1)
if max_history_num is not None:
while len(session) > max_history_num * 2 + 1:
session.pop(1)
session.pop(1)
@staticmethod
def clear_session(user_id):
user_session[user_id] = []
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
model/openai/open_ai_model.py | Python | # encoding:utf-8
from model.model import Model
from config import model_conf, common_conf_val
from common import const
from common import log
import openai
import time
user_session = dict()
# OpenAI对话模型API (可用)
class OpenAIModel(Model):
def __init__(self):
openai.api_key = model_conf(const.OPEN_AI).get('api_key')
api_base = model_conf(const.OPEN_AI).get('api_base')
if api_base:
openai.api_base = api_base
log.info("[OPEN_AI] api_base={}".format(openai.api_base))
self.model = model_conf(const.OPEN_AI).get('model', 'text-davinci-003')
proxy = model_conf(const.OPEN_AI).get('proxy')
if proxy:
openai.proxy = proxy
def reply(self, query, context=None):
# acquire reply content
if not context or not context.get('type') or context.get('type') == 'TEXT':
log.info("[OPEN_AI] query={}".format(query))
from_user_id = context['from_user_id']
clear_memory_commands = common_conf_val('clear_memory_commands', ['#清除记忆'])
if query in clear_memory_commands:
Session.clear_session(from_user_id)
return '记忆已清除'
new_query = Session.build_session_query(query, from_user_id)
log.debug("[OPEN_AI] session query={}".format(new_query))
if context.get('stream'):
# reply in stream
return self.reply_text_stream(query, new_query, from_user_id)
reply_content = self.reply_text(new_query, from_user_id, 0)
log.debug("[OPEN_AI] new_query={}, user={}, reply_cont={}".format(new_query, from_user_id, reply_content))
if reply_content and query:
Session.save_session(query, reply_content, from_user_id)
return reply_content
elif context.get('type', None) == 'IMAGE_CREATE':
return self.create_img(query, 0)
def reply_text(self, query, user_id, retry_count=0):
try:
response = openai.Completion.create(
model=self.model, # 对话模型的名称
prompt=query,
temperature=model_conf(const.OPEN_AI).get("temperature", 0.75), # 熵值,在[0,1]之间,越大表示选取的候选词越随机,回复越具有不确定性,建议和top_p参数二选一使用,创意性任务越大越好,精确性任务越小越好
#max_tokens=4096, # 回复最大的字符数,为输入和输出的总数
#top_p=model_conf(const.OPEN_AI).get("top_p", 0.7),, #候选词列表。0.7 意味着只考虑前70%候选词的标记,建议和temperature参数二选一使用
frequency_penalty=model_conf(const.OPEN_AI).get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则越降低模型一行中的重复用词,更倾向于产生不同的内容
presence_penalty=model_conf(const.OPEN_AI).get("presence_penalty", 1.0), # [-2,2]之间,该值越大则越不受输入限制,将鼓励模型生成输入中不存在的新词,更倾向于产生不同的内容
stop=["\n\n\n"]
)
res_content = response.choices[0]['text'].strip().replace('<|endoftext|>', '')
log.info("[OPEN_AI] reply={}".format(res_content))
return res_content
except openai.error.RateLimitError as e:
# rate limit exception
log.warn(e)
if retry_count < 1:
time.sleep(5)
log.warn("[OPEN_AI] RateLimit exceed, 第{}次重试".format(retry_count+1))
return self.reply_text(query, user_id, retry_count+1)
else:
return "提问太快啦,请休息一下再问我吧"
except Exception as e:
# unknown exception
log.exception(e)
Session.clear_session(user_id)
return "请再问我一次吧"
async def reply_text_stream(self, query, context, retry_count=0):
try:
user_id=context['from_user_id']
new_query = Session.build_session_query(query, user_id)
res = openai.Completion.create(
model= "text-davinci-003", # 对话模型的名称
prompt=new_query,
temperature=model_conf(const.OPEN_AI).get("temperature", 0.75), # 熵值,在[0,1]之间,越大表示选取的候选词越随机,回复越具有不确定性,建议和top_p参数二选一使用,创意性任务越大越好,精确性任务越小越好
max_tokens=model_conf(const.OPEN_AI).get("conversation_max_tokens", 3000), # 回复最大的字符数,为输入和输出的总数,davinci的流式对话需要启用这属性,不然对话会断流
#top_p=model_conf(const.OPEN_AI).get("top_p", 0.7),, #候选词列表。0.7 意味着只考虑前70%候选词的标记,建议和temperature参数二选一使用
frequency_penalty=model_conf(const.OPEN_AI).get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则越降低模型一行中的重复用词,更倾向于产生不同的内容
presence_penalty=model_conf(const.OPEN_AI).get("presence_penalty", 1.0), # [-2,2]之间,该值越大则越不受输入限制,将鼓励模型生成输入中不存在的新词,更倾向于产生不同的内容
stream=True
)
full_response = ""
for chunk in res:
log.debug(chunk)
if (chunk["choices"][0]["finish_reason"]=="stop"):
break
chunk_message = chunk['choices'][0].get("text")
if(chunk_message):
full_response+=chunk_message
yield False,full_response
Session.save_session(query, full_response, user_id)
log.info("[chatgpt]: reply={}", full_response)
yield True,full_response
except openai.error.RateLimitError as e:
# rate limit exception
log.warn(e)
if retry_count < 1:
time.sleep(5)
log.warn("[CHATGPT] RateLimit exceed, 第{}次重试".format(retry_count+1))
yield True, self.reply_text_stream(query, user_id, retry_count+1)
else:
yield True, "提问太快啦,请休息一下再问我吧"
except openai.error.APIConnectionError as e:
log.warn(e)
log.warn("[CHATGPT] APIConnection failed")
yield True, "我连接不到网络,请稍后重试"
except openai.error.Timeout as e:
log.warn(e)
log.warn("[CHATGPT] Timeout")
yield True, "我没有收到消息,请稍后重试"
except Exception as e:
# unknown exception
log.exception(e)
Session.clear_session(user_id)
yield True, "请再问我一次吧"
def _process_reply_stream(
self,
query: str,
reply: dict,
user_id: str
) -> str:
full_response = ""
for response in reply:
if response.get("choices") is None or len(response["choices"]) == 0:
raise Exception("OpenAI API returned no choices")
if response["choices"][0].get("finish_details") is not None:
break
if response["choices"][0].get("text") is None:
raise Exception("OpenAI API returned no text")
if response["choices"][0]["text"] == "<|endoftext|>":
break
yield response["choices"][0]["text"]
full_response += response["choices"][0]["text"]
if query and full_response:
Session.save_session(query, full_response, user_id)
def create_img(self, query, retry_count=0):
try:
log.info("[OPEN_AI] image_query={}".format(query))
response = openai.Image.create(
prompt=query, #图片描述
n=1, #每次生成图片的数量
size="256x256" #图片大小,可选有 256x256, 512x512, 1024x1024
)
image_url = response['data'][0]['url']
log.info("[OPEN_AI] image_url={}".format(image_url))
return [image_url]
except openai.error.RateLimitError as e:
log.warn(e)
if retry_count < 1:
time.sleep(5)
log.warn("[OPEN_AI] ImgCreate RateLimit exceed, 第{}次重试".format(retry_count+1))
return self.reply_text(query, retry_count+1)
else:
return "提问太快啦,请休息一下再问我吧"
except Exception as e:
log.exception(e)
return None
class Session(object):
@staticmethod
def build_session_query(query, user_id):
'''
build query with conversation history
e.g. Q: xxx
A: xxx
Q: xxx
:param query: query content
:param user_id: from user id
:return: query content with conversaction
'''
prompt = model_conf(const.OPEN_AI).get("character_desc", "")
if prompt:
prompt += "<|endoftext|>\n\n\n"
session = user_session.get(user_id, None)
if session:
for conversation in session:
prompt += "Q: " + conversation["question"] + "\n\n\nA: " + conversation["answer"] + "<|endoftext|>\n"
prompt += "Q: " + query + "\nA: "
return prompt
else:
return prompt + "Q: " + query + "\nA: "
@staticmethod
def save_session(query, answer, user_id):
max_tokens = model_conf(const.OPEN_AI).get("conversation_max_tokens")
if not max_tokens:
# default 3000
max_tokens = 1000
conversation = dict()
conversation["question"] = query
conversation["answer"] = answer
session = user_session.get(user_id)
log.debug(conversation)
log.debug(session)
if session:
# append conversation
session.append(conversation)
else:
# create session
queue = list()
queue.append(conversation)
user_session[user_id] = queue
# discard exceed limit conversation
Session.discard_exceed_conversation(user_session[user_id], max_tokens)
@staticmethod
def discard_exceed_conversation(session, max_tokens):
count = 0
count_list = list()
for i in range(len(session)-1, -1, -1):
# count tokens of conversation list
history_conv = session[i]
count += len(history_conv["question"]) + len(history_conv["answer"])
count_list.append(count)
for c in count_list:
if c > max_tokens:
# pop first conversation
session.pop(0)
@staticmethod
def clear_session(user_id):
user_session[user_id] = []
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
plugins/__init__.py | Python | # encoding:utf-8
from .event import *
from .plugin import *
from plugins.plugin_registry import PluginRegistry
instance = PluginRegistry()
register = instance.register
# load_plugins = instance.load_plugins
# emit_event = instance.emit_event
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
plugins/createimg/createimg.py | Python | # encoding:utf-8
from channel.http.http_channel import HttpChannel
from channel.wechat.wechat_channel import WechatChannel
import plugins
from plugins import *
from common import functions
from config import channel_conf
from config import channel_conf_val
from common import const
@plugins.register(name="CreateImg", desire_priority=90, hidden=True, desc="A simple plugin that create images from model", version="0.1", author="RegimenArseic")
class Createimg(Plugin):
def __init__(self):
super().__init__()
self.handles = {HttpChannel: self.handle_http}
self.channel_types = {HttpChannel: const.HTTP,
WechatChannel: const.WECHAT}
self.handlers[Event.ON_HANDLE_CONTEXT] = self.handle_query
self.handlers[Event.ON_DECORATE_REPLY] = self.send_images
def get_events(self):
return self.handlers
def handle_query(self, e_context: EventContext):
channel = e_context['channel']
channel_type = self.channel_types.get(type(channel), None)
if (channel_type):
query = e_context['context']
if (query):
img_match_prefix = functions.check_prefix(
query, channel_conf_val(channel_type, 'image_create_prefix'))
if img_match_prefix:
if (channel_type == const.HTTP) and e_context['args'].get('stream', False):
e_context['reply'] = channel.handle(
{'msg': e_context['args']['origin'], 'id': e_context['args']['from_user_id']})
e_context.action = EventAction.BREAK_PASS
else:
query = query.split(img_match_prefix, 1)[1].strip()
e_context['args']['type'] = 'IMAGE_CREATE'
if (channel_type == const.WECHAT):
channel._do_send_img(
query, e_context['args'])
e_context.action = EventAction.BREAK_PASS
else:
e_context.action = EventAction.CONTINUE
return e_context
def handle_http(self, e_context: EventContext):
reply = e_context["reply"]
if e_context['args'].get('type', '') == 'IMAGE_CREATE':
if isinstance(reply, list):
images = ""
for url in reply:
images += f"[]({url})\n\n"
e_context["reply"] = images
else:
e_context["reply"] = reply
return e_context
def send_images(self, e_context: EventContext):
channel = e_context['channel']
method = self.handles.get(type(channel), None)
if (method):
e_context = method(e_context)
e_context.action = EventAction.BREAK_PASS # 事件结束,不再给下个插件处理,不交付给默认的事件处理逻辑
return e_context
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
plugins/event.py | Python | # encoding:utf-8
from enum import Enum
class Event(Enum):
# ON_RECEIVE_MESSAGE = 1 # 收到消息
ON_HANDLE_CONTEXT = 2 # 对应通道处理消息前
"""
e_context = { "channel": 消息channel, "context" : 本次消息的context, "reply" : 目前的回复,初始为空 , "args": 其他上下文参数 }
"""
ON_DECORATE_REPLY = 3 # 得到回复后准备装饰
"""
e_context = { "channel": 消息channel, "context" : 本次消息的context, "reply" : 目前的回复 , "args": 其他上下文参数 }
"""
ON_SEND_REPLY = 4 # 发送回复前
"""
bot-on-anything 不支持ON_SEND_REPLY事件,请使用ON_BRIDGE_HANDLE_CONTEXT或者ON_BRIDGE_HANDLE_STREAM_CONTEXT事件
"""
# AFTER_SEND_REPLY = 5 # 发送回复后
ON_BRIDGE_HANDLE_CONTEXT = 6 # 模型桥处理消息前
"""
e_context = { "context" : 本次消息的context, "reply" : 目前的回复,初始为空 , "args": 其他上下文参数 }
"""
ON_BRIDGE_HANDLE_STREAM_CONTEXT = 7 # 模型桥处理流式消息前,流式对话的消息处理仅支持一次性返回,请直接返回结果
"""
e_context = { "context" : 本次消息的context, "reply" : 目前的回复,初始为空 , "args": 其他上下文参数 }
"""
class EventAction(Enum):
CONTINUE = 1 # 事件未结束,继续交给下个插件处理,如果没有下个插件,则交付给默认的事件处理逻辑
BREAK = 2 # 事件结束,不再给下个插件处理,交付给默认的事件处理逻辑
BREAK_PASS = 3 # 事件结束,不再给下个插件处理,不交付给默认的事件处理逻辑
class EventContext:
def __init__(self, event, econtext=dict()):
self.event = event
self.econtext = econtext
self.action = EventAction.CONTINUE
def __getitem__(self, key):
return self.econtext.get(key,"")
def __setitem__(self, key, value):
self.econtext[key] = value
def __delitem__(self, key):
del self.econtext[key]
def is_pass(self):
return self.action == EventAction.BREAK_PASS
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
plugins/plugin.py | Python | # encoding:utf-8
class Plugin:
def __init__(self):
self.handlers = {}
def get_help_text(self, **kwargs):
return "暂无帮助信息" | zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
plugins/plugin_manager.py | Python | # encoding:utf-8
import os
import importlib.util
from plugins.event import EventAction, EventContext,Event
from plugins.plugin_registry import PluginRegistry
from common import functions, log
@functions.singleton
class PluginManager:
def __init__(self, plugins_dir="./plugins/"):
self.plugins_dir = plugins_dir
self.plugin_registry = PluginRegistry()
self.load_plugins()
def load_plugins(self):
for plugin_name in self.find_plugin_names():
if os.path.exists(f"./plugins/{plugin_name}/{plugin_name}.py"):
try:
plugin_module = self.load_plugin_module(plugin_name)
self.plugin_registry.register_from_module(plugin_module)
except Exception as e:
log.warn("Failed to import plugin %s" % (plugin_name))
def find_plugin_names(self):
plugin_names = []
for entry in os.scandir(self.plugins_dir):
if entry.is_dir():
plugin_names.append(entry.name)
return plugin_names
def load_plugin_module(self, plugin_name):
spec = importlib.util.spec_from_file_location(
plugin_name, os.path.join(self.plugins_dir, plugin_name, f"{plugin_name}.py")
)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def emit_event(self, e_context: EventContext, *args, **kwargs):
for plugin in self.plugin_registry.list_plugins():
if plugin.enabled and e_context.action == EventAction.CONTINUE:
if(e_context.event in plugin.handlers):
plugin.handlers[e_context.event](e_context, *args, **kwargs)
return e_context
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
plugins/plugin_registry.py | Python | # encoding:utf-8
import inspect
from plugins.plugin import Plugin
from common.log import logger
from common import functions
@functions.singleton
class PluginRegistry:
def __init__(self):
self.plugins = []
def register(self, name: str, desire_priority: int = 0, **kwargs):
def wrapper(plugin_cls):
plugin_cls.name = name
plugin_cls.priority = desire_priority
plugin_cls.desc = kwargs.get('desc')
plugin_cls.author = kwargs.get('author')
plugin_cls.version = kwargs.get('version') or "1.0"
plugin_cls.namecn = kwargs.get('namecn') or name
plugin_cls.hidden = kwargs.get('hidden') or False
plugin_cls.enabled = kwargs.get('enabled') or True
logger.info(f"Plugin {name}_v{plugin_cls.version} registered")
return plugin_cls
return wrapper
def register_from_module(self, module):
plugins = []
for name, obj in inspect.getmembers(module):
if inspect.isclass(obj) and issubclass(obj, Plugin) and obj != Plugin:
plugin_name = getattr(obj, "name", None)
if plugin_name:
plugin = obj()
plugin.name = plugin_name
plugin.priority = getattr(obj, "priority", 0)
plugin.desc = getattr(obj, "desc", None)
plugin.author = getattr(obj, "author", None)
plugin.version = getattr(obj, "version", "1.0")
plugin.namecn = getattr(obj, "namecn", plugin_name)
plugin.hidden = getattr(obj, "hidden", False)
plugin.enabled = getattr(obj, "enabled", True)
# Sort the list of plugins by priority
self.plugins.append(plugin)
self.plugins.sort(key=lambda x: x.priority, reverse=True)
def get_plugin(self, name):
plugin = next((p for p in self.plugins if p.name.upper() == name.upper()), None)
return plugin
def list_plugins(self):
return [plugin for plugin in self.plugins] | zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
plugins/selector/selector.py | Python | # encoding:utf-8
import os
import plugins
from plugins import *
from common import log
from common import functions
@plugins.register(name="Selector", desire_priority=99, hidden=True, desc="A model selector", version="0.1", author="RegimenArsenic")
class Selector(Plugin):
def __init__(self):
super().__init__()
curdir = os.path.dirname(__file__)
try:
self.config = functions.load_json_file(curdir, "selector.json")
except Exception as e:
log.warn("[Selector] init failed")
raise e
self.handlers[Event.ON_HANDLE_CONTEXT] = self.select_model
self.handlers[Event.ON_BRIDGE_HANDLE_STREAM_CONTEXT] = self.select_model
log.info("[Selector] inited")
def get_events(self):
return self.handlers
def select_model(self, e_context: EventContext):
model=e_context['args'].get('model')
for selector in self.config.get("selector", []):
prefix = selector.get('prefix', [])
check_prefix=functions.check_prefix(e_context["context"], prefix)
if (check_prefix):
model=selector.get('model')
if isinstance(check_prefix, str):
e_context["context"] = e_context["context"].split(check_prefix, 1)[1].strip()
break
log.debug(f"[Selector] select model {model}")
e_context.action = EventAction.CONTINUE # 事件继续,交付给下个插件或默认逻辑
e_context['args']['model']=model
return e_context
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
scripts/allow-http-nopassword.sh | Shell | #!/usr/bin/env bash
set -x
# @see https://stackoverflow.com/questions/30003570/how-to-use-gnu-sed-on-mac-os-10-10-brew-install-default-names-no-longer-su
# @see https://www.cnblogs.com/fnlingnzb-learner/p/10657285.html
cmd=sed
if [ "$(uname)" == "Darwin" ];then
brew install gnu-sed
cmd=gsed
fi
echo "current sed command is: $cmd"
echo "allow http nopasword"
$cmd -i "s/\"http_auth_password\": \".*\"/\"http_auth_password\": \"\"/" config.json
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
scripts/fix-itchat.sh | Shell | #!/usr/bin/env bash
set -x
# @see https://stackoverflow.com/questions/30003570/how-to-use-gnu-sed-on-mac-os-10-10-brew-install-default-names-no-longer-su
# @see https://www.cnblogs.com/fnlingnzb-learner/p/10657285.html
cmd=sed
if [ "$(uname)" == "Darwin" ];then
brew install gnu-sed
cmd=gsed
fi
echo "current sed command is: $cmd"
pack_dir="$(pip3 show itchat-uos | grep "Location" | awk '{print $2}')"
file_name="${pack_dir}/itchat/components/login.py"
sleep15Code="time.sleep(15)"
cat $file_name | grep $sleep15Code
if [ "$?" != "0" ];then
echo "fix $sleep15Code"
$cmd -i "/while not isLoggedIn/i\ $sleep15Code" $file_name
else
echo "already fix $sleep15Code"
fi
sleep3Code="time.sleep(3)"
cat $file_name | grep $sleep3Code
if [ "$?" != "0" ];then
echo "fix $sleep3Code"
$cmd -i "s/elif status != '408'/elif status in ['408', '400']/" $file_name
$cmd -i "/if isLoggedIn:/i\ time.sleep(3)" $file_name
else
echo "already fix $sleep3Code"
fi
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
scripts/shutdown.sh | Shell | #!/bin/bash
#关闭服务
cd `dirname $0`/..
export BASE_DIR=`pwd`
pid=`ps ax | grep -i app.py | grep "${BASE_DIR}" | grep python3 | grep -v grep | awk '{print $1}'`
if [ -z "$pid" ] ; then
echo "No bot-on-anaything running."
exit -1;
fi
echo "The bot-on-anaything(${pid}) is running..."
kill ${pid}
echo "Send shutdown request to bot-on-anaything(${pid}) OK"
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
scripts/start.sh | Shell | #!/bin/bash
#后台运行bot-on-anaything执行脚本
cd `dirname $0`/..
export BASE_DIR=`pwd`
echo $BASE_DIR
# check the nohup.out log output file
if [ ! -f "${BASE_DIR}/logs/log_info.log" ]; then
mkdir "${BASE_DIR}/logs"
touch "${BASE_DIR}/logs/log_info.log"
echo "${BASE_DIR}/logs/log_info.log"
fi
nohup python3 "${BASE_DIR}/app.py" >> ${BASE_DIR}/logs/log_info.log & tail -f "${BASE_DIR}/logs/log_info.log"
echo "bot-on-anaything is starting,you can check the ${BASE_DIR}/logs/log_info.log"
| zhayujie/bot-on-anything | 4,180 | A large model-based chatbot builder that can quickly integrate AI models (including ChatGPT, Claude, Gemini) into various software applications (such as Telegram, Gmail, Slack, and websites). | Python | zhayujie | Minimal Future Tech | |
agent/memory/__init__.py | Python | """
Memory module for AgentMesh
Provides long-term memory capabilities with hybrid search (vector + keyword)
"""
from agent.memory.manager import MemoryManager
from agent.memory.config import MemoryConfig, get_default_memory_config, set_global_memory_config
from agent.memory.embedding import create_embedding_provider
__all__ = ['MemoryManager', 'MemoryConfig', 'get_default_memory_config', 'set_global_memory_config', 'create_embedding_provider']
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/memory/chunker.py | Python | """
Text chunking utilities for memory
Splits text into chunks with token limits and overlap
"""
from __future__ import annotations
from typing import List, Tuple
from dataclasses import dataclass
@dataclass
class TextChunk:
"""Represents a text chunk with line numbers"""
text: str
start_line: int
end_line: int
class TextChunker:
"""Chunks text by line count with token estimation"""
def __init__(self, max_tokens: int = 500, overlap_tokens: int = 50):
"""
Initialize chunker
Args:
max_tokens: Maximum tokens per chunk
overlap_tokens: Overlap tokens between chunks
"""
self.max_tokens = max_tokens
self.overlap_tokens = overlap_tokens
# Rough estimation: ~4 chars per token for English/Chinese mixed
self.chars_per_token = 4
def chunk_text(self, text: str) -> List[TextChunk]:
"""
Chunk text into overlapping segments
Args:
text: Input text to chunk
Returns:
List of TextChunk objects
"""
if not text.strip():
return []
lines = text.split('\n')
chunks = []
max_chars = self.max_tokens * self.chars_per_token
overlap_chars = self.overlap_tokens * self.chars_per_token
current_chunk = []
current_chars = 0
start_line = 1
for i, line in enumerate(lines, start=1):
line_chars = len(line)
# If single line exceeds max, split it
if line_chars > max_chars:
# Save current chunk if exists
if current_chunk:
chunks.append(TextChunk(
text='\n'.join(current_chunk),
start_line=start_line,
end_line=i - 1
))
current_chunk = []
current_chars = 0
# Split long line into multiple chunks
for sub_chunk in self._split_long_line(line, max_chars):
chunks.append(TextChunk(
text=sub_chunk,
start_line=i,
end_line=i
))
start_line = i + 1
continue
# Check if adding this line would exceed limit
if current_chars + line_chars > max_chars and current_chunk:
# Save current chunk
chunks.append(TextChunk(
text='\n'.join(current_chunk),
start_line=start_line,
end_line=i - 1
))
# Start new chunk with overlap
overlap_lines = self._get_overlap_lines(current_chunk, overlap_chars)
current_chunk = overlap_lines + [line]
current_chars = sum(len(l) for l in current_chunk)
start_line = i - len(overlap_lines)
else:
# Add line to current chunk
current_chunk.append(line)
current_chars += line_chars
# Save last chunk
if current_chunk:
chunks.append(TextChunk(
text='\n'.join(current_chunk),
start_line=start_line,
end_line=len(lines)
))
return chunks
def _split_long_line(self, line: str, max_chars: int) -> List[str]:
"""Split a single long line into multiple chunks"""
chunks = []
for i in range(0, len(line), max_chars):
chunks.append(line[i:i + max_chars])
return chunks
def _get_overlap_lines(self, lines: List[str], target_chars: int) -> List[str]:
"""Get last few lines that fit within target_chars for overlap"""
overlap = []
chars = 0
for line in reversed(lines):
line_chars = len(line)
if chars + line_chars > target_chars:
break
overlap.insert(0, line)
chars += line_chars
return overlap
def chunk_markdown(self, text: str) -> List[TextChunk]:
"""
Chunk markdown text while respecting structure
(For future enhancement: respect markdown sections)
"""
return self.chunk_text(text)
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/memory/config.py | Python | """
Memory configuration module
Provides global memory configuration with simplified workspace structure
"""
from __future__ import annotations
import os
from dataclasses import dataclass, field
from typing import Optional, List
from pathlib import Path
def _default_workspace():
"""Get default workspace path with proper Windows support"""
from common.utils import expand_path
return expand_path("~/cow")
@dataclass
class MemoryConfig:
"""Configuration for memory storage and search"""
# Storage paths (default: ~/cow)
workspace_root: str = field(default_factory=_default_workspace)
# Embedding config
embedding_provider: str = "openai" # "openai" | "local"
embedding_model: str = "text-embedding-3-small"
embedding_dim: int = 1536
# Chunking config
chunk_max_tokens: int = 500
chunk_overlap_tokens: int = 50
# Search config
max_results: int = 10
min_score: float = 0.1
# Hybrid search weights
vector_weight: float = 0.7
keyword_weight: float = 0.3
# Memory sources
sources: List[str] = field(default_factory=lambda: ["memory", "session"])
# Sync config
enable_auto_sync: bool = True
sync_on_search: bool = True
# Memory flush config (独立于模型 context window)
flush_token_threshold: int = 50000 # 50K tokens 触发 flush
flush_turn_threshold: int = 20 # 20 轮对话触发 flush (用户+AI各一条为一轮)
def get_workspace(self) -> Path:
"""Get workspace root directory"""
return Path(self.workspace_root)
def get_memory_dir(self) -> Path:
"""Get memory files directory"""
return self.get_workspace() / "memory"
def get_db_path(self) -> Path:
"""Get SQLite database path for long-term memory index"""
index_dir = self.get_memory_dir() / "long-term"
index_dir.mkdir(parents=True, exist_ok=True)
return index_dir / "index.db"
def get_skills_dir(self) -> Path:
"""Get skills directory"""
return self.get_workspace() / "skills"
def get_agent_workspace(self, agent_name: Optional[str] = None) -> Path:
"""
Get workspace directory for an agent
Args:
agent_name: Optional agent name (not used in current implementation)
Returns:
Path to workspace directory
"""
workspace = self.get_workspace()
# Ensure workspace directory exists
workspace.mkdir(parents=True, exist_ok=True)
return workspace
# Global memory configuration
_global_memory_config: Optional[MemoryConfig] = None
def get_default_memory_config() -> MemoryConfig:
"""
Get the global memory configuration.
If not set, returns a default configuration.
Returns:
MemoryConfig instance
"""
global _global_memory_config
if _global_memory_config is None:
_global_memory_config = MemoryConfig()
return _global_memory_config
def set_global_memory_config(config: MemoryConfig):
"""
Set the global memory configuration.
This should be called before creating any MemoryManager instances.
Args:
config: MemoryConfig instance to use globally
Example:
>>> from agent.memory import MemoryConfig, set_global_memory_config
>>> config = MemoryConfig(
... workspace_root="~/my_agents",
... embedding_provider="openai",
... vector_weight=0.8
... )
>>> set_global_memory_config(config)
"""
global _global_memory_config
_global_memory_config = config
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/memory/embedding.py | Python | """
Embedding providers for memory
Supports OpenAI and local embedding models
"""
import hashlib
from abc import ABC, abstractmethod
from typing import List, Optional
class EmbeddingProvider(ABC):
"""Base class for embedding providers"""
@abstractmethod
def embed(self, text: str) -> List[float]:
"""Generate embedding for text"""
pass
@abstractmethod
def embed_batch(self, texts: List[str]) -> List[List[float]]:
"""Generate embeddings for multiple texts"""
pass
@property
@abstractmethod
def dimensions(self) -> int:
"""Get embedding dimensions"""
pass
class OpenAIEmbeddingProvider(EmbeddingProvider):
"""OpenAI embedding provider using REST API"""
def __init__(self, model: str = "text-embedding-3-small", api_key: Optional[str] = None, api_base: Optional[str] = None):
"""
Initialize OpenAI embedding provider
Args:
model: Model name (text-embedding-3-small or text-embedding-3-large)
api_key: OpenAI API key
api_base: Optional API base URL
"""
self.model = model
self.api_key = api_key
self.api_base = api_base or "https://api.openai.com/v1"
# Validate API key
if not self.api_key or self.api_key in ["", "YOUR API KEY", "YOUR_API_KEY"]:
raise ValueError("OpenAI API key is not configured. Please set 'open_ai_api_key' in config.json")
# Set dimensions based on model
self._dimensions = 1536 if "small" in model else 3072
def _call_api(self, input_data):
"""Call OpenAI embedding API using requests"""
import requests
url = f"{self.api_base}/embeddings"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.api_key}"
}
data = {
"input": input_data,
"model": self.model
}
try:
response = requests.post(url, headers=headers, json=data, timeout=5)
response.raise_for_status()
return response.json()
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"Failed to connect to OpenAI API at {url}. Please check your network connection and api_base configuration. Error: {str(e)}")
except requests.exceptions.Timeout as e:
raise TimeoutError(f"OpenAI API request timed out after 10s. Please check your network connection. Error: {str(e)}")
except requests.exceptions.HTTPError as e:
if e.response.status_code == 401:
raise ValueError(f"Invalid OpenAI API key. Please check your 'open_ai_api_key' in config.json")
elif e.response.status_code == 429:
raise ValueError(f"OpenAI API rate limit exceeded. Please try again later.")
else:
raise ValueError(f"OpenAI API request failed: {e.response.status_code} - {e.response.text}")
def embed(self, text: str) -> List[float]:
"""Generate embedding for text"""
result = self._call_api(text)
return result["data"][0]["embedding"]
def embed_batch(self, texts: List[str]) -> List[List[float]]:
"""Generate embeddings for multiple texts"""
if not texts:
return []
result = self._call_api(texts)
return [item["embedding"] for item in result["data"]]
@property
def dimensions(self) -> int:
return self._dimensions
# LocalEmbeddingProvider removed - only use OpenAI embedding or keyword search
class EmbeddingCache:
"""Cache for embeddings to avoid recomputation"""
def __init__(self):
self.cache = {}
def get(self, text: str, provider: str, model: str) -> Optional[List[float]]:
"""Get cached embedding"""
key = self._compute_key(text, provider, model)
return self.cache.get(key)
def put(self, text: str, provider: str, model: str, embedding: List[float]):
"""Cache embedding"""
key = self._compute_key(text, provider, model)
self.cache[key] = embedding
@staticmethod
def _compute_key(text: str, provider: str, model: str) -> str:
"""Compute cache key"""
content = f"{provider}:{model}:{text}"
return hashlib.md5(content.encode('utf-8')).hexdigest()
def clear(self):
"""Clear cache"""
self.cache.clear()
def create_embedding_provider(
provider: str = "openai",
model: Optional[str] = None,
api_key: Optional[str] = None,
api_base: Optional[str] = None
) -> EmbeddingProvider:
"""
Factory function to create embedding provider
Only supports OpenAI embedding via REST API.
If initialization fails, caller should fall back to keyword-only search.
Args:
provider: Provider name (only "openai" is supported)
model: Model name (default: text-embedding-3-small)
api_key: OpenAI API key (required)
api_base: API base URL (default: https://api.openai.com/v1)
Returns:
EmbeddingProvider instance
Raises:
ValueError: If provider is not "openai" or api_key is missing
"""
if provider != "openai":
raise ValueError(f"Only 'openai' provider is supported, got: {provider}")
model = model or "text-embedding-3-small"
return OpenAIEmbeddingProvider(model=model, api_key=api_key, api_base=api_base)
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/memory/manager.py | Python | """
Memory manager for AgentMesh
Provides high-level interface for memory operations
"""
import os
from typing import List, Optional, Dict, Any
from pathlib import Path
import hashlib
from datetime import datetime, timedelta
from agent.memory.config import MemoryConfig, get_default_memory_config
from agent.memory.storage import MemoryStorage, MemoryChunk, SearchResult
from agent.memory.chunker import TextChunker
from agent.memory.embedding import create_embedding_provider, EmbeddingProvider
from agent.memory.summarizer import MemoryFlushManager, create_memory_files_if_needed
class MemoryManager:
"""
Memory manager with hybrid search capabilities
Provides long-term memory for agents with vector and keyword search
"""
def __init__(
self,
config: Optional[MemoryConfig] = None,
embedding_provider: Optional[EmbeddingProvider] = None,
llm_model: Optional[Any] = None
):
"""
Initialize memory manager
Args:
config: Memory configuration (uses global config if not provided)
embedding_provider: Custom embedding provider (optional)
llm_model: LLM model for summarization (optional)
"""
self.config = config or get_default_memory_config()
# Initialize storage
db_path = self.config.get_db_path()
self.storage = MemoryStorage(db_path)
# Initialize chunker
self.chunker = TextChunker(
max_tokens=self.config.chunk_max_tokens,
overlap_tokens=self.config.chunk_overlap_tokens
)
# Initialize embedding provider (optional)
self.embedding_provider = None
if embedding_provider:
self.embedding_provider = embedding_provider
else:
# Try to create embedding provider, but allow failure
try:
# Get API key from environment or config
api_key = os.environ.get('OPENAI_API_KEY')
api_base = os.environ.get('OPENAI_API_BASE')
self.embedding_provider = create_embedding_provider(
provider=self.config.embedding_provider,
model=self.config.embedding_model,
api_key=api_key,
api_base=api_base
)
except Exception as e:
# Embedding provider failed, but that's OK
# We can still use keyword search and file operations
from common.log import logger
logger.warning(f"[MemoryManager] Embedding provider initialization failed: {e}")
logger.info(f"[MemoryManager] Memory will work with keyword search only (no vector search)")
# Initialize memory flush manager
workspace_dir = self.config.get_workspace()
self.flush_manager = MemoryFlushManager(
workspace_dir=workspace_dir,
llm_model=llm_model
)
# Ensure workspace directories exist
self._init_workspace()
self._dirty = False
def _init_workspace(self):
"""Initialize workspace directories"""
memory_dir = self.config.get_memory_dir()
memory_dir.mkdir(parents=True, exist_ok=True)
# Create default memory files
workspace_dir = self.config.get_workspace()
create_memory_files_if_needed(workspace_dir)
async def search(
self,
query: str,
user_id: Optional[str] = None,
max_results: Optional[int] = None,
min_score: Optional[float] = None,
include_shared: bool = True
) -> List[SearchResult]:
"""
Search memory with hybrid search (vector + keyword)
Args:
query: Search query
user_id: User ID for scoped search
max_results: Maximum results to return
min_score: Minimum score threshold
include_shared: Include shared memories
Returns:
List of search results sorted by relevance
"""
max_results = max_results or self.config.max_results
min_score = min_score or self.config.min_score
# Determine scopes
scopes = []
if include_shared:
scopes.append("shared")
if user_id:
scopes.append("user")
if not scopes:
return []
# Sync if needed
if self.config.sync_on_search and self._dirty:
await self.sync()
# Perform vector search (if embedding provider available)
vector_results = []
if self.embedding_provider:
try:
from common.log import logger
query_embedding = self.embedding_provider.embed(query)
vector_results = self.storage.search_vector(
query_embedding=query_embedding,
user_id=user_id,
scopes=scopes,
limit=max_results * 2 # Get more candidates for merging
)
logger.info(f"[MemoryManager] Vector search found {len(vector_results)} results for query: {query}")
except Exception as e:
from common.log import logger
logger.warning(f"[MemoryManager] Vector search failed: {e}")
# Perform keyword search
keyword_results = self.storage.search_keyword(
query=query,
user_id=user_id,
scopes=scopes,
limit=max_results * 2
)
from common.log import logger
logger.info(f"[MemoryManager] Keyword search found {len(keyword_results)} results for query: {query}")
# Merge results
merged = self._merge_results(
vector_results,
keyword_results,
self.config.vector_weight,
self.config.keyword_weight
)
# Filter by min score and limit
filtered = [r for r in merged if r.score >= min_score]
return filtered[:max_results]
async def add_memory(
self,
content: str,
user_id: Optional[str] = None,
scope: str = "shared",
source: str = "memory",
path: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None
):
"""
Add new memory content
Args:
content: Memory content
user_id: User ID for user-scoped memory
scope: Memory scope ("shared", "user", "session")
source: Memory source ("memory" or "session")
path: File path (auto-generated if not provided)
metadata: Additional metadata
"""
if not content.strip():
return
# Generate path if not provided
if not path:
content_hash = hashlib.md5(content.encode('utf-8')).hexdigest()[:8]
if user_id and scope == "user":
path = f"memory/users/{user_id}/memory_{content_hash}.md"
else:
path = f"memory/shared/memory_{content_hash}.md"
# Chunk content
chunks = self.chunker.chunk_text(content)
# Generate embeddings (if provider available)
texts = [chunk.text for chunk in chunks]
if self.embedding_provider:
embeddings = self.embedding_provider.embed_batch(texts)
else:
# No embeddings, just use None
embeddings = [None] * len(texts)
# Create memory chunks
memory_chunks = []
for chunk, embedding in zip(chunks, embeddings):
chunk_id = self._generate_chunk_id(path, chunk.start_line, chunk.end_line)
chunk_hash = MemoryStorage.compute_hash(chunk.text)
memory_chunks.append(MemoryChunk(
id=chunk_id,
user_id=user_id,
scope=scope,
source=source,
path=path,
start_line=chunk.start_line,
end_line=chunk.end_line,
text=chunk.text,
embedding=embedding,
hash=chunk_hash,
metadata=metadata
))
# Save to storage
self.storage.save_chunks_batch(memory_chunks)
# Update file metadata
file_hash = MemoryStorage.compute_hash(content)
self.storage.update_file_metadata(
path=path,
source=source,
file_hash=file_hash,
mtime=int(os.path.getmtime(__file__)), # Use current time
size=len(content)
)
async def sync(self, force: bool = False):
"""
Synchronize memory from files
Args:
force: Force full reindex
"""
memory_dir = self.config.get_memory_dir()
workspace_dir = self.config.get_workspace()
# Scan MEMORY.md (workspace root)
memory_file = Path(workspace_dir) / "MEMORY.md"
if memory_file.exists():
await self._sync_file(memory_file, "memory", "shared", None)
# Scan memory directory (including daily summaries)
if memory_dir.exists():
for file_path in memory_dir.rglob("*.md"):
# Determine scope and user_id from path
rel_path = file_path.relative_to(workspace_dir)
parts = rel_path.parts
# Check if it's in daily summary directory
if "daily" in parts:
# Daily summary files
if "users" in parts or len(parts) > 3:
# User-scoped daily summary: memory/daily/{user_id}/2024-01-29.md
user_idx = parts.index("daily") + 1
user_id = parts[user_idx] if user_idx < len(parts) else None
scope = "user"
else:
# Shared daily summary: memory/daily/2024-01-29.md
user_id = None
scope = "shared"
elif "users" in parts:
# User-scoped memory
user_idx = parts.index("users") + 1
user_id = parts[user_idx] if user_idx < len(parts) else None
scope = "user"
else:
# Shared memory
user_id = None
scope = "shared"
await self._sync_file(file_path, "memory", scope, user_id)
self._dirty = False
async def _sync_file(
self,
file_path: Path,
source: str,
scope: str,
user_id: Optional[str]
):
"""Sync a single file"""
# Compute file hash
content = file_path.read_text(encoding='utf-8')
file_hash = MemoryStorage.compute_hash(content)
# Get relative path
workspace_dir = self.config.get_workspace()
rel_path = str(file_path.relative_to(workspace_dir))
# Check if file changed
stored_hash = self.storage.get_file_hash(rel_path)
if stored_hash == file_hash:
return # No changes
# Delete old chunks
self.storage.delete_by_path(rel_path)
# Chunk and embed
chunks = self.chunker.chunk_text(content)
if not chunks:
return
texts = [chunk.text for chunk in chunks]
if self.embedding_provider:
embeddings = self.embedding_provider.embed_batch(texts)
else:
embeddings = [None] * len(texts)
# Create memory chunks
memory_chunks = []
for chunk, embedding in zip(chunks, embeddings):
chunk_id = self._generate_chunk_id(rel_path, chunk.start_line, chunk.end_line)
chunk_hash = MemoryStorage.compute_hash(chunk.text)
memory_chunks.append(MemoryChunk(
id=chunk_id,
user_id=user_id,
scope=scope,
source=source,
path=rel_path,
start_line=chunk.start_line,
end_line=chunk.end_line,
text=chunk.text,
embedding=embedding,
hash=chunk_hash,
metadata=None
))
# Save
self.storage.save_chunks_batch(memory_chunks)
# Update file metadata
stat = file_path.stat()
self.storage.update_file_metadata(
path=rel_path,
source=source,
file_hash=file_hash,
mtime=int(stat.st_mtime),
size=stat.st_size
)
def should_flush_memory(
self,
current_tokens: int = 0
) -> bool:
"""
Check if memory flush should be triggered
独立的 flush 触发机制,不依赖模型 context window。
使用配置中的阈值: flush_token_threshold 和 flush_turn_threshold
Args:
current_tokens: Current session token count
Returns:
True if memory flush should run
"""
return self.flush_manager.should_flush(
current_tokens=current_tokens,
token_threshold=self.config.flush_token_threshold,
turn_threshold=self.config.flush_turn_threshold
)
def increment_turn(self):
"""增加对话轮数计数(每次用户消息+AI回复算一轮)"""
self.flush_manager.increment_turn()
async def execute_memory_flush(
self,
agent_executor,
current_tokens: int,
user_id: Optional[str] = None,
**executor_kwargs
) -> bool:
"""
Execute memory flush before compaction
This runs a silent agent turn to write durable memories to disk.
Similar to clawdbot's pre-compaction memory flush.
Args:
agent_executor: Async function to execute agent with prompt
current_tokens: Current session token count
user_id: Optional user ID
**executor_kwargs: Additional kwargs for agent executor
Returns:
True if flush completed successfully
Example:
>>> async def run_agent(prompt, system_prompt, silent=False):
... # Your agent execution logic
... pass
>>>
>>> if manager.should_flush_memory(current_tokens=100000):
... await manager.execute_memory_flush(
... agent_executor=run_agent,
... current_tokens=100000
... )
"""
success = await self.flush_manager.execute_flush(
agent_executor=agent_executor,
current_tokens=current_tokens,
user_id=user_id,
**executor_kwargs
)
if success:
# Mark dirty so next search will sync the new memories
self._dirty = True
return success
def build_memory_guidance(self, lang: str = "zh", include_context: bool = True) -> str:
"""
Build natural memory guidance for agent system prompt
Following clawdbot's approach:
1. Load MEMORY.md as bootstrap context (blends into background)
2. Load daily files on-demand via memory_search tool
3. Agent should NOT proactively mention memories unless user asks
Args:
lang: Language for guidance ("en" or "zh")
include_context: Whether to include bootstrap memory context (default: True)
MEMORY.md is loaded as background context (like clawdbot)
Daily files are accessed via memory_search tool
Returns:
Memory guidance text (and optionally context) for system prompt
"""
today_file = self.flush_manager.get_today_memory_file().name
if lang == "zh":
guidance = f"""## 记忆系统
**背景知识**: 下方包含核心长期记忆,可直接使用。需要查找历史时,用 memory_search 搜索(搜索一次即可,不要重复)。
**存储记忆**: 当用户分享重要信息时(偏好、决策、事实等),主动用 write 工具存储:
- 长期信息 → MEMORY.md
- 当天笔记 → memory/{today_file}
- 静默存储,仅在明确要求时确认
**使用原则**: 自然使用记忆,就像你本来就知道。不需要生硬地提起或列举记忆,除非用户提到。"""
else:
guidance = f"""## Memory System
**Background Knowledge**: Core long-term memories below - use directly. For history, use memory_search once (don't repeat).
**Store Memories**: When user shares important info (preferences, decisions, facts), proactively write:
- Durable info → MEMORY.md
- Daily notes → memory/{today_file}
- Store silently; confirm only when explicitly requested
**Usage**: Use memories naturally as if you always knew. Don't mention or list unless user explicitly asks."""
if include_context:
# Load bootstrap context (MEMORY.md only, like clawdbot)
bootstrap_context = self.load_bootstrap_memories()
if bootstrap_context:
guidance += f"\n\n## Background Context\n\n{bootstrap_context}"
return guidance
def load_bootstrap_memories(self, user_id: Optional[str] = None) -> str:
"""
Load bootstrap memory files for session start
Following clawdbot's design:
- Only loads MEMORY.md from workspace root (long-term curated memory)
- Daily files (memory/YYYY-MM-DD.md) are accessed via memory_search tool, not bootstrap
- User-specific MEMORY.md is also loaded if user_id provided
Returns memory content WITHOUT obvious headers so it blends naturally
into the context as background knowledge.
Args:
user_id: Optional user ID for user-specific memories
Returns:
Memory content to inject into system prompt (blends naturally as background context)
"""
workspace_dir = self.config.get_workspace()
memory_dir = self.config.get_memory_dir()
sections = []
# 1. Load MEMORY.md from workspace root (long-term curated memory)
# Following clawdbot: only MEMORY.md is bootstrap, daily files use memory_search
memory_file = Path(workspace_dir) / "MEMORY.md"
if memory_file.exists():
try:
content = memory_file.read_text(encoding='utf-8').strip()
if content:
sections.append(content)
except Exception as e:
print(f"Warning: Failed to read MEMORY.md: {e}")
# 2. Load user-specific MEMORY.md if user_id provided
if user_id:
user_memory_dir = memory_dir / "users" / user_id
user_memory_file = user_memory_dir / "MEMORY.md"
if user_memory_file.exists():
try:
content = user_memory_file.read_text(encoding='utf-8').strip()
if content:
sections.append(content)
except Exception as e:
print(f"Warning: Failed to read user memory: {e}")
if not sections:
return ""
# Join sections without obvious headers - let memories blend naturally
# This makes the agent feel like it "just knows" rather than "checking memory files"
return "\n\n".join(sections)
def get_status(self) -> Dict[str, Any]:
"""Get memory status"""
stats = self.storage.get_stats()
return {
'chunks': stats['chunks'],
'files': stats['files'],
'workspace': str(self.config.get_workspace()),
'dirty': self._dirty,
'embedding_enabled': self.embedding_provider is not None,
'embedding_provider': self.config.embedding_provider if self.embedding_provider else 'disabled',
'embedding_model': self.config.embedding_model if self.embedding_provider else 'N/A',
'search_mode': 'hybrid (vector + keyword)' if self.embedding_provider else 'keyword only (FTS5)'
}
def mark_dirty(self):
"""Mark memory as dirty (needs sync)"""
self._dirty = True
def close(self):
"""Close memory manager and release resources"""
self.storage.close()
# Helper methods
def _generate_chunk_id(self, path: str, start_line: int, end_line: int) -> str:
"""Generate unique chunk ID"""
content = f"{path}:{start_line}:{end_line}"
return hashlib.md5(content.encode('utf-8')).hexdigest()
def _merge_results(
self,
vector_results: List[SearchResult],
keyword_results: List[SearchResult],
vector_weight: float,
keyword_weight: float
) -> List[SearchResult]:
"""Merge vector and keyword search results"""
# Create a map by (path, start_line, end_line)
merged_map = {}
for result in vector_results:
key = (result.path, result.start_line, result.end_line)
merged_map[key] = {
'result': result,
'vector_score': result.score,
'keyword_score': 0.0
}
for result in keyword_results:
key = (result.path, result.start_line, result.end_line)
if key in merged_map:
merged_map[key]['keyword_score'] = result.score
else:
merged_map[key] = {
'result': result,
'vector_score': 0.0,
'keyword_score': result.score
}
# Calculate combined scores
merged_results = []
for entry in merged_map.values():
combined_score = (
vector_weight * entry['vector_score'] +
keyword_weight * entry['keyword_score']
)
result = entry['result']
merged_results.append(SearchResult(
path=result.path,
start_line=result.start_line,
end_line=result.end_line,
score=combined_score,
snippet=result.snippet,
source=result.source,
user_id=result.user_id
))
# Sort by score
merged_results.sort(key=lambda r: r.score, reverse=True)
return merged_results
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/memory/storage.py | Python | """
Storage layer for memory using SQLite + FTS5
Provides vector and keyword search capabilities
"""
from __future__ import annotations
import sqlite3
import json
import hashlib
from typing import List, Dict, Optional, Any
from pathlib import Path
from dataclasses import dataclass
@dataclass
class MemoryChunk:
"""Represents a memory chunk with text and embedding"""
id: str
user_id: Optional[str]
scope: str # "shared" | "user" | "session"
source: str # "memory" | "session"
path: str
start_line: int
end_line: int
text: str
embedding: Optional[List[float]]
hash: str
metadata: Optional[Dict[str, Any]] = None
@dataclass
class SearchResult:
"""Search result with score and snippet"""
path: str
start_line: int
end_line: int
score: float
snippet: str
source: str
user_id: Optional[str] = None
class MemoryStorage:
"""SQLite-based storage with FTS5 for keyword search"""
def __init__(self, db_path: Path):
self.db_path = db_path
self.conn: Optional[sqlite3.Connection] = None
self.fts5_available = False # Track FTS5 availability
self._init_db()
def _check_fts5_support(self) -> bool:
"""Check if SQLite has FTS5 support"""
try:
self.conn.execute("CREATE VIRTUAL TABLE IF NOT EXISTS fts5_test USING fts5(test)")
self.conn.execute("DROP TABLE IF EXISTS fts5_test")
return True
except sqlite3.OperationalError as e:
if "no such module: fts5" in str(e):
return False
raise
def _init_db(self):
"""Initialize database with schema"""
try:
self.conn = sqlite3.connect(str(self.db_path), check_same_thread=False)
self.conn.row_factory = sqlite3.Row
# Check FTS5 support
self.fts5_available = self._check_fts5_support()
if not self.fts5_available:
from common.log import logger
logger.debug("[MemoryStorage] FTS5 not available, using LIKE-based keyword search")
# Check database integrity
try:
result = self.conn.execute("PRAGMA integrity_check").fetchone()
if result[0] != 'ok':
print(f"⚠️ Database integrity check failed: {result[0]}")
print(f" Recreating database...")
self.conn.close()
self.conn = None
# Remove corrupted database
self.db_path.unlink(missing_ok=True)
# Remove WAL files
Path(str(self.db_path) + '-wal').unlink(missing_ok=True)
Path(str(self.db_path) + '-shm').unlink(missing_ok=True)
# Reconnect to create new database
self.conn = sqlite3.connect(str(self.db_path), check_same_thread=False)
self.conn.row_factory = sqlite3.Row
except sqlite3.DatabaseError:
# Database is corrupted, recreate it
print(f"⚠️ Database is corrupted, recreating...")
if self.conn:
self.conn.close()
self.conn = None
self.db_path.unlink(missing_ok=True)
Path(str(self.db_path) + '-wal').unlink(missing_ok=True)
Path(str(self.db_path) + '-shm').unlink(missing_ok=True)
self.conn = sqlite3.connect(str(self.db_path), check_same_thread=False)
self.conn.row_factory = sqlite3.Row
# Enable WAL mode for better concurrency
self.conn.execute("PRAGMA journal_mode=WAL")
# Set busy timeout to avoid "database is locked" errors
self.conn.execute("PRAGMA busy_timeout=5000")
except Exception as e:
print(f"⚠️ Unexpected error during database initialization: {e}")
raise
# Create chunks table with embeddings
self.conn.execute("""
CREATE TABLE IF NOT EXISTS chunks (
id TEXT PRIMARY KEY,
user_id TEXT,
scope TEXT NOT NULL DEFAULT 'shared',
source TEXT NOT NULL DEFAULT 'memory',
path TEXT NOT NULL,
start_line INTEGER NOT NULL,
end_line INTEGER NOT NULL,
text TEXT NOT NULL,
embedding TEXT,
hash TEXT NOT NULL,
metadata TEXT,
created_at INTEGER DEFAULT (strftime('%s', 'now')),
updated_at INTEGER DEFAULT (strftime('%s', 'now'))
)
""")
# Create indexes
self.conn.execute("""
CREATE INDEX IF NOT EXISTS idx_chunks_user
ON chunks(user_id)
""")
self.conn.execute("""
CREATE INDEX IF NOT EXISTS idx_chunks_scope
ON chunks(scope)
""")
self.conn.execute("""
CREATE INDEX IF NOT EXISTS idx_chunks_hash
ON chunks(path, hash)
""")
# Create FTS5 virtual table for keyword search (only if supported)
if self.fts5_available:
# Use default unicode61 tokenizer (stable and compatible)
# For CJK support, we'll use LIKE queries as fallback
self.conn.execute("""
CREATE VIRTUAL TABLE IF NOT EXISTS chunks_fts USING fts5(
text,
id UNINDEXED,
user_id UNINDEXED,
path UNINDEXED,
source UNINDEXED,
scope UNINDEXED,
content='chunks',
content_rowid='rowid'
)
""")
# Create triggers to keep FTS in sync
self.conn.execute("""
CREATE TRIGGER IF NOT EXISTS chunks_ai AFTER INSERT ON chunks BEGIN
INSERT INTO chunks_fts(rowid, text, id, user_id, path, source, scope)
VALUES (new.rowid, new.text, new.id, new.user_id, new.path, new.source, new.scope);
END
""")
self.conn.execute("""
CREATE TRIGGER IF NOT EXISTS chunks_ad AFTER DELETE ON chunks BEGIN
DELETE FROM chunks_fts WHERE rowid = old.rowid;
END
""")
self.conn.execute("""
CREATE TRIGGER IF NOT EXISTS chunks_au AFTER UPDATE ON chunks BEGIN
UPDATE chunks_fts SET text = new.text, id = new.id,
user_id = new.user_id, path = new.path, source = new.source, scope = new.scope
WHERE rowid = new.rowid;
END
""")
# Create files metadata table
self.conn.execute("""
CREATE TABLE IF NOT EXISTS files (
path TEXT PRIMARY KEY,
source TEXT NOT NULL DEFAULT 'memory',
hash TEXT NOT NULL,
mtime INTEGER NOT NULL,
size INTEGER NOT NULL,
updated_at INTEGER DEFAULT (strftime('%s', 'now'))
)
""")
self.conn.commit()
def save_chunk(self, chunk: MemoryChunk):
"""Save a memory chunk"""
self.conn.execute("""
INSERT OR REPLACE INTO chunks
(id, user_id, scope, source, path, start_line, end_line, text, embedding, hash, metadata, updated_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, strftime('%s', 'now'))
""", (
chunk.id,
chunk.user_id,
chunk.scope,
chunk.source,
chunk.path,
chunk.start_line,
chunk.end_line,
chunk.text,
json.dumps(chunk.embedding) if chunk.embedding else None,
chunk.hash,
json.dumps(chunk.metadata) if chunk.metadata else None
))
self.conn.commit()
def save_chunks_batch(self, chunks: List[MemoryChunk]):
"""Save multiple chunks in a batch"""
self.conn.executemany("""
INSERT OR REPLACE INTO chunks
(id, user_id, scope, source, path, start_line, end_line, text, embedding, hash, metadata, updated_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, strftime('%s', 'now'))
""", [
(
c.id, c.user_id, c.scope, c.source, c.path,
c.start_line, c.end_line, c.text,
json.dumps(c.embedding) if c.embedding else None,
c.hash,
json.dumps(c.metadata) if c.metadata else None
)
for c in chunks
])
self.conn.commit()
def get_chunk(self, chunk_id: str) -> Optional[MemoryChunk]:
"""Get a chunk by ID"""
row = self.conn.execute("""
SELECT * FROM chunks WHERE id = ?
""", (chunk_id,)).fetchone()
if not row:
return None
return self._row_to_chunk(row)
def search_vector(
self,
query_embedding: List[float],
user_id: Optional[str] = None,
scopes: List[str] = None,
limit: int = 10
) -> List[SearchResult]:
"""
Vector similarity search using in-memory cosine similarity
(sqlite-vec can be added later for better performance)
"""
if scopes is None:
scopes = ["shared"]
if user_id:
scopes.append("user")
# Build query
scope_placeholders = ','.join('?' * len(scopes))
params = scopes
if user_id:
query = f"""
SELECT * FROM chunks
WHERE scope IN ({scope_placeholders})
AND (scope = 'shared' OR user_id = ?)
AND embedding IS NOT NULL
"""
params.append(user_id)
else:
query = f"""
SELECT * FROM chunks
WHERE scope IN ({scope_placeholders})
AND embedding IS NOT NULL
"""
rows = self.conn.execute(query, params).fetchall()
# Calculate cosine similarity
results = []
for row in rows:
embedding = json.loads(row['embedding'])
similarity = self._cosine_similarity(query_embedding, embedding)
if similarity > 0:
results.append((similarity, row))
# Sort by similarity and limit
results.sort(key=lambda x: x[0], reverse=True)
results = results[:limit]
return [
SearchResult(
path=row['path'],
start_line=row['start_line'],
end_line=row['end_line'],
score=score,
snippet=self._truncate_text(row['text'], 500),
source=row['source'],
user_id=row['user_id']
)
for score, row in results
]
def search_keyword(
self,
query: str,
user_id: Optional[str] = None,
scopes: List[str] = None,
limit: int = 10
) -> List[SearchResult]:
"""
Keyword search using FTS5 + LIKE fallback
Strategy:
1. If FTS5 available: Try FTS5 search first (good for English and word-based languages)
2. If no FTS5 or no results and query contains CJK: Use LIKE search
"""
if scopes is None:
scopes = ["shared"]
if user_id:
scopes.append("user")
# Try FTS5 search first (if available)
if self.fts5_available:
fts_results = self._search_fts5(query, user_id, scopes, limit)
if fts_results:
return fts_results
# Fallback to LIKE search (always for CJK, or if FTS5 not available)
if not self.fts5_available or MemoryStorage._contains_cjk(query):
return self._search_like(query, user_id, scopes, limit)
return []
def _search_fts5(
self,
query: str,
user_id: Optional[str],
scopes: List[str],
limit: int
) -> List[SearchResult]:
"""FTS5 full-text search"""
fts_query = self._build_fts_query(query)
if not fts_query:
return []
scope_placeholders = ','.join('?' * len(scopes))
params = [fts_query] + scopes
if user_id:
sql_query = f"""
SELECT chunks.*, bm25(chunks_fts) as rank
FROM chunks_fts
JOIN chunks ON chunks.id = chunks_fts.id
WHERE chunks_fts MATCH ?
AND chunks.scope IN ({scope_placeholders})
AND (chunks.scope = 'shared' OR chunks.user_id = ?)
ORDER BY rank
LIMIT ?
"""
params.extend([user_id, limit])
else:
sql_query = f"""
SELECT chunks.*, bm25(chunks_fts) as rank
FROM chunks_fts
JOIN chunks ON chunks.id = chunks_fts.id
WHERE chunks_fts MATCH ?
AND chunks.scope IN ({scope_placeholders})
ORDER BY rank
LIMIT ?
"""
params.append(limit)
try:
rows = self.conn.execute(sql_query, params).fetchall()
return [
SearchResult(
path=row['path'],
start_line=row['start_line'],
end_line=row['end_line'],
score=self._bm25_rank_to_score(row['rank']),
snippet=self._truncate_text(row['text'], 500),
source=row['source'],
user_id=row['user_id']
)
for row in rows
]
except Exception:
return []
def _search_like(
self,
query: str,
user_id: Optional[str],
scopes: List[str],
limit: int
) -> List[SearchResult]:
"""LIKE-based search for CJK characters"""
import re
# Extract CJK words (2+ characters)
cjk_words = re.findall(r'[\u4e00-\u9fff]{2,}', query)
if not cjk_words:
return []
scope_placeholders = ','.join('?' * len(scopes))
# Build LIKE conditions for each word
like_conditions = []
params = []
for word in cjk_words:
like_conditions.append("text LIKE ?")
params.append(f'%{word}%')
where_clause = ' OR '.join(like_conditions)
params.extend(scopes)
if user_id:
sql_query = f"""
SELECT * FROM chunks
WHERE ({where_clause})
AND scope IN ({scope_placeholders})
AND (scope = 'shared' OR user_id = ?)
LIMIT ?
"""
params.extend([user_id, limit])
else:
sql_query = f"""
SELECT * FROM chunks
WHERE ({where_clause})
AND scope IN ({scope_placeholders})
LIMIT ?
"""
params.append(limit)
try:
rows = self.conn.execute(sql_query, params).fetchall()
return [
SearchResult(
path=row['path'],
start_line=row['start_line'],
end_line=row['end_line'],
score=0.5, # Fixed score for LIKE search
snippet=self._truncate_text(row['text'], 500),
source=row['source'],
user_id=row['user_id']
)
for row in rows
]
except Exception:
return []
def delete_by_path(self, path: str):
"""Delete all chunks from a file"""
self.conn.execute("""
DELETE FROM chunks WHERE path = ?
""", (path,))
self.conn.commit()
def get_file_hash(self, path: str) -> Optional[str]:
"""Get stored file hash"""
row = self.conn.execute("""
SELECT hash FROM files WHERE path = ?
""", (path,)).fetchone()
return row['hash'] if row else None
def update_file_metadata(self, path: str, source: str, file_hash: str, mtime: int, size: int):
"""Update file metadata"""
self.conn.execute("""
INSERT OR REPLACE INTO files (path, source, hash, mtime, size, updated_at)
VALUES (?, ?, ?, ?, ?, strftime('%s', 'now'))
""", (path, source, file_hash, mtime, size))
self.conn.commit()
def get_stats(self) -> Dict[str, int]:
"""Get storage statistics"""
chunks_count = self.conn.execute("""
SELECT COUNT(*) as cnt FROM chunks
""").fetchone()['cnt']
files_count = self.conn.execute("""
SELECT COUNT(*) as cnt FROM files
""").fetchone()['cnt']
return {
'chunks': chunks_count,
'files': files_count
}
def close(self):
"""Close database connection"""
if self.conn:
try:
self.conn.commit() # Ensure all changes are committed
self.conn.close()
self.conn = None # Mark as closed
except Exception as e:
print(f"⚠️ Error closing database connection: {e}")
def __del__(self):
"""Destructor to ensure connection is closed"""
try:
self.close()
except:
pass # Ignore errors during cleanup
# Helper methods
def _row_to_chunk(self, row) -> MemoryChunk:
"""Convert database row to MemoryChunk"""
return MemoryChunk(
id=row['id'],
user_id=row['user_id'],
scope=row['scope'],
source=row['source'],
path=row['path'],
start_line=row['start_line'],
end_line=row['end_line'],
text=row['text'],
embedding=json.loads(row['embedding']) if row['embedding'] else None,
hash=row['hash'],
metadata=json.loads(row['metadata']) if row['metadata'] else None
)
@staticmethod
def _cosine_similarity(vec1: List[float], vec2: List[float]) -> float:
"""Calculate cosine similarity between two vectors"""
if len(vec1) != len(vec2):
return 0.0
dot_product = sum(a * b for a, b in zip(vec1, vec2))
norm1 = sum(a * a for a in vec1) ** 0.5
norm2 = sum(b * b for b in vec2) ** 0.5
if norm1 == 0 or norm2 == 0:
return 0.0
return dot_product / (norm1 * norm2)
@staticmethod
def _contains_cjk(text: str) -> bool:
"""Check if text contains CJK (Chinese/Japanese/Korean) characters"""
import re
return bool(re.search(r'[\u4e00-\u9fff]', text))
@staticmethod
def _build_fts_query(raw_query: str) -> Optional[str]:
"""
Build FTS5 query from raw text
Works best for English and word-based languages.
For CJK characters, LIKE search will be used as fallback.
"""
import re
# Extract words (primarily English words and numbers)
tokens = re.findall(r'[A-Za-z0-9_]+', raw_query)
if not tokens:
return None
# Quote tokens for exact matching
quoted = [f'"{t}"' for t in tokens]
# Use OR for more flexible matching
return ' OR '.join(quoted)
@staticmethod
def _bm25_rank_to_score(rank: float) -> float:
"""Convert BM25 rank to 0-1 score"""
normalized = max(0, rank) if rank is not None else 999
return 1 / (1 + normalized)
@staticmethod
def _truncate_text(text: str, max_chars: int) -> str:
"""Truncate text to max characters"""
if len(text) <= max_chars:
return text
return text[:max_chars] + "..."
@staticmethod
def compute_hash(content: str) -> str:
"""Compute SHA256 hash of content"""
return hashlib.sha256(content.encode('utf-8')).hexdigest()
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/memory/summarizer.py | Python | """
Memory flush manager
Triggers memory flush before context compaction (similar to clawdbot)
"""
from typing import Optional, Callable, Any
from pathlib import Path
from datetime import datetime
class MemoryFlushManager:
"""
Manages memory flush operations before context compaction
Similar to clawdbot's memory flush mechanism:
- Triggers when context approaches token limit
- Runs a silent agent turn to write memories to disk
- Uses memory/YYYY-MM-DD.md for daily notes
- Uses MEMORY.md (workspace root) for long-term curated memories
"""
def __init__(
self,
workspace_dir: Path,
llm_model: Optional[Any] = None
):
"""
Initialize memory flush manager
Args:
workspace_dir: Workspace directory
llm_model: LLM model for agent execution (optional)
"""
self.workspace_dir = workspace_dir
self.llm_model = llm_model
self.memory_dir = workspace_dir / "memory"
self.memory_dir.mkdir(parents=True, exist_ok=True)
# Tracking
self.last_flush_token_count: Optional[int] = None
self.last_flush_timestamp: Optional[datetime] = None
self.turn_count: int = 0 # 对话轮数计数器
def should_flush(
self,
current_tokens: int = 0,
token_threshold: int = 50000,
turn_threshold: int = 20
) -> bool:
"""
Determine if memory flush should be triggered
独立的 flush 触发机制,不依赖模型 context window:
- Token 阈值: 达到 50K tokens 时触发
- 轮次阈值: 达到 20 轮对话时触发
Args:
current_tokens: Current session token count
token_threshold: Token threshold to trigger flush (default: 50K)
turn_threshold: Turn threshold to trigger flush (default: 20)
Returns:
True if flush should run
"""
# 检查 token 阈值
if current_tokens > 0 and current_tokens >= token_threshold:
# 避免重复 flush
if self.last_flush_token_count is not None:
if current_tokens <= self.last_flush_token_count + 5000:
return False
return True
# 检查轮次阈值
if self.turn_count >= turn_threshold:
return True
return False
def get_today_memory_file(self, user_id: Optional[str] = None) -> Path:
"""
Get today's memory file path: memory/YYYY-MM-DD.md
Args:
user_id: Optional user ID for user-specific memory
Returns:
Path to today's memory file
"""
today = datetime.now().strftime("%Y-%m-%d")
if user_id:
user_dir = self.memory_dir / "users" / user_id
user_dir.mkdir(parents=True, exist_ok=True)
return user_dir / f"{today}.md"
else:
return self.memory_dir / f"{today}.md"
def get_main_memory_file(self, user_id: Optional[str] = None) -> Path:
"""
Get main memory file path: MEMORY.md (workspace root)
Args:
user_id: Optional user ID for user-specific memory
Returns:
Path to main memory file
"""
if user_id:
user_dir = self.memory_dir / "users" / user_id
user_dir.mkdir(parents=True, exist_ok=True)
return user_dir / "MEMORY.md"
else:
# Return workspace root MEMORY.md
return Path(self.workspace_dir) / "MEMORY.md"
def create_flush_prompt(self) -> str:
"""
Create prompt for memory flush turn
Similar to clawdbot's DEFAULT_MEMORY_FLUSH_PROMPT
"""
today = datetime.now().strftime("%Y-%m-%d")
return (
f"Pre-compaction memory flush. "
f"Store durable memories now (use memory/{today}.md for daily notes; "
f"create memory/ if needed). "
f"\n\n"
f"重要提示:\n"
f"- MEMORY.md: 记录最核心、最常用的信息(例如重要规则、偏好、决策、要求等)\n"
f" 如果 MEMORY.md 过长,可以精简或移除不再重要的内容。避免冗长描述,用关键词和要点形式记录\n"
f"- memory/{today}.md: 记录当天发生的事件、关键信息、经验教训、对话过程摘要等,突出重点\n"
f"- 如果没有重要内容需要记录,回复 NO_REPLY\n"
)
def create_flush_system_prompt(self) -> str:
"""
Create system prompt for memory flush turn
Similar to clawdbot's DEFAULT_MEMORY_FLUSH_SYSTEM_PROMPT
"""
return (
"Pre-compaction memory flush turn. "
"The session is near auto-compaction; capture durable memories to disk. "
"\n\n"
"记忆写入原则:\n"
"1. MEMORY.md 精简原则: 只记录核心信息(<2000 tokens)\n"
" - 记录重要规则、偏好、决策、要求等需要长期记住的关键信息,无需记录过多细节\n"
" - 如果 MEMORY.md 过长,可以根据需要精简或删除过时内容\n"
"\n"
"2. 天级记忆 (memory/YYYY-MM-DD.md):\n"
" - 记录当天的重要事件、关键信息、经验教训、对话过程摘要等,确保核心信息点被完整记录\n"
"\n"
"3. 判断标准:\n"
" - 这个信息未来会经常用到吗?→ MEMORY.md\n"
" - 这是今天的重要事件或决策吗?→ memory/YYYY-MM-DD.md\n"
" - 这是临时性的、不重要的内容吗?→ 不记录\n"
"\n"
"You may reply, but usually NO_REPLY is correct."
)
async def execute_flush(
self,
agent_executor: Callable,
current_tokens: int,
user_id: Optional[str] = None,
**executor_kwargs
) -> bool:
"""
Execute memory flush by running a silent agent turn
Args:
agent_executor: Function to execute agent with prompt
current_tokens: Current token count
user_id: Optional user ID
**executor_kwargs: Additional kwargs for agent executor
Returns:
True if flush completed successfully
"""
try:
# Create flush prompts
prompt = self.create_flush_prompt()
system_prompt = self.create_flush_system_prompt()
# Execute agent turn (silent, no user-visible reply expected)
await agent_executor(
prompt=prompt,
system_prompt=system_prompt,
silent=True, # NO_REPLY expected
**executor_kwargs
)
# Track flush
self.last_flush_token_count = current_tokens
self.last_flush_timestamp = datetime.now()
self.turn_count = 0 # 重置轮数计数器
return True
except Exception as e:
print(f"Memory flush failed: {e}")
return False
def increment_turn(self):
"""增加对话轮数计数"""
self.turn_count += 1
def get_status(self) -> dict:
"""Get memory flush status"""
return {
'last_flush_tokens': self.last_flush_token_count,
'last_flush_time': self.last_flush_timestamp.isoformat() if self.last_flush_timestamp else None,
'today_file': str(self.get_today_memory_file()),
'main_file': str(self.get_main_memory_file())
}
def create_memory_files_if_needed(workspace_dir: Path, user_id: Optional[str] = None):
"""
Create default memory files if they don't exist
Args:
workspace_dir: Workspace directory
user_id: Optional user ID for user-specific files
"""
memory_dir = workspace_dir / "memory"
memory_dir.mkdir(parents=True, exist_ok=True)
# Create main MEMORY.md in workspace root
if user_id:
user_dir = memory_dir / "users" / user_id
user_dir.mkdir(parents=True, exist_ok=True)
main_memory = user_dir / "MEMORY.md"
else:
main_memory = Path(workspace_dir) / "MEMORY.md"
if not main_memory.exists():
# Create empty file or with minimal structure (no obvious "Memory" header)
# Following clawdbot's approach: memories should blend naturally into context
main_memory.write_text("")
# Create today's memory file
today = datetime.now().strftime("%Y-%m-%d")
if user_id:
user_dir = memory_dir / "users" / user_id
today_memory = user_dir / f"{today}.md"
else:
today_memory = memory_dir / f"{today}.md"
if not today_memory.exists():
today_memory.write_text(
f"# Daily Memory: {today}\n\n"
f"Day-to-day notes and running context.\n\n"
)
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/prompt/__init__.py | Python | """
Agent Prompt Module - 系统提示词构建模块
"""
from .builder import PromptBuilder, build_agent_system_prompt
from .workspace import ensure_workspace, load_context_files
__all__ = [
'PromptBuilder',
'build_agent_system_prompt',
'ensure_workspace',
'load_context_files',
]
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/prompt/builder.py | Python | """
System Prompt Builder - 系统提示词构建器
实现模块化的系统提示词构建,支持工具、技能、记忆等多个子系统
"""
from __future__ import annotations
import os
from typing import List, Dict, Optional, Any
from dataclasses import dataclass
from common.log import logger
@dataclass
class ContextFile:
"""上下文文件"""
path: str
content: str
class PromptBuilder:
"""提示词构建器"""
def __init__(self, workspace_dir: str, language: str = "zh"):
"""
初始化提示词构建器
Args:
workspace_dir: 工作空间目录
language: 语言 ("zh" 或 "en")
"""
self.workspace_dir = workspace_dir
self.language = language
def build(
self,
base_persona: Optional[str] = None,
user_identity: Optional[Dict[str, str]] = None,
tools: Optional[List[Any]] = None,
context_files: Optional[List[ContextFile]] = None,
skill_manager: Any = None,
memory_manager: Any = None,
runtime_info: Optional[Dict[str, Any]] = None,
is_first_conversation: bool = False,
**kwargs
) -> str:
"""
构建完整的系统提示词
Args:
base_persona: 基础人格描述(会被context_files中的AGENT.md覆盖)
user_identity: 用户身份信息
tools: 工具列表
context_files: 上下文文件列表(AGENT.md, USER.md, RULE.md等)
skill_manager: 技能管理器
memory_manager: 记忆管理器
runtime_info: 运行时信息
is_first_conversation: 是否为首次对话
**kwargs: 其他参数
Returns:
完整的系统提示词
"""
return build_agent_system_prompt(
workspace_dir=self.workspace_dir,
language=self.language,
base_persona=base_persona,
user_identity=user_identity,
tools=tools,
context_files=context_files,
skill_manager=skill_manager,
memory_manager=memory_manager,
runtime_info=runtime_info,
is_first_conversation=is_first_conversation,
**kwargs
)
def build_agent_system_prompt(
workspace_dir: str,
language: str = "zh",
base_persona: Optional[str] = None,
user_identity: Optional[Dict[str, str]] = None,
tools: Optional[List[Any]] = None,
context_files: Optional[List[ContextFile]] = None,
skill_manager: Any = None,
memory_manager: Any = None,
runtime_info: Optional[Dict[str, Any]] = None,
is_first_conversation: bool = False,
**kwargs
) -> str:
"""
构建Agent系统提示词
顺序说明(按重要性和逻辑关系排列):
1. 工具系统 - 核心能力,最先介绍
2. 技能系统 - 紧跟工具,因为技能需要用 read 工具读取
3. 记忆系统 - 独立的记忆能力
4. 工作空间 - 工作环境说明
5. 用户身份 - 用户信息(可选)
6. 项目上下文 - AGENT.md, USER.md, RULE.md(定义人格、身份、规则)
7. 运行时信息 - 元信息(时间、模型等)
Args:
workspace_dir: 工作空间目录
language: 语言 ("zh" 或 "en")
base_persona: 基础人格描述(已废弃,由AGENT.md定义)
user_identity: 用户身份信息
tools: 工具列表
context_files: 上下文文件列表
skill_manager: 技能管理器
memory_manager: 记忆管理器
runtime_info: 运行时信息
is_first_conversation: 是否为首次对话
**kwargs: 其他参数
Returns:
完整的系统提示词
"""
sections = []
# 1. 工具系统(最重要,放在最前面)
if tools:
sections.extend(_build_tooling_section(tools, language))
# 2. 技能系统(紧跟工具,因为需要用 read 工具)
if skill_manager:
sections.extend(_build_skills_section(skill_manager, tools, language))
# 3. 记忆系统(独立的记忆能力)
if memory_manager:
sections.extend(_build_memory_section(memory_manager, tools, language))
# 4. 工作空间(工作环境说明)
sections.extend(_build_workspace_section(workspace_dir, language, is_first_conversation))
# 5. 用户身份(如果有)
if user_identity:
sections.extend(_build_user_identity_section(user_identity, language))
# 6. 项目上下文文件(AGENT.md, USER.md, RULE.md - 定义人格)
if context_files:
sections.extend(_build_context_files_section(context_files, language))
# 7. 运行时信息(元信息,放在最后)
if runtime_info:
sections.extend(_build_runtime_section(runtime_info, language))
return "\n".join(sections)
def _build_identity_section(base_persona: Optional[str], language: str) -> List[str]:
"""构建基础身份section - 不再需要,身份由AGENT.md定义"""
# 不再生成基础身份section,完全由AGENT.md定义
return []
def _build_tooling_section(tools: List[Any], language: str) -> List[str]:
"""Build tooling section with concise tool list and call style guide."""
# One-line summaries for known tools (details are in the tool schema)
core_summaries = {
"read": "读取文件内容",
"write": "创建或覆盖文件",
"edit": "精确编辑文件",
"ls": "列出目录内容",
"grep": "搜索文件内容",
"find": "按模式查找文件",
"bash": "执行shell命令",
"terminal": "管理后台进程",
"web_search": "网络搜索",
"web_fetch": "获取URL内容",
"browser": "控制浏览器",
"memory_search": "搜索记忆",
"memory_get": "读取记忆内容",
"env_config": "管理API密钥和技能配置",
"scheduler": "管理定时任务和提醒",
"send": "发送文件给用户",
}
# Preferred display order
tool_order = [
"read", "write", "edit", "ls", "grep", "find",
"bash", "terminal",
"web_search", "web_fetch", "browser",
"memory_search", "memory_get",
"env_config", "scheduler", "send",
]
# Build name -> summary mapping for available tools
available = {}
for tool in tools:
name = tool.name if hasattr(tool, 'name') else str(tool)
available[name] = core_summaries.get(name, "")
# Generate tool lines: ordered tools first, then extras
tool_lines = []
for name in tool_order:
if name in available:
summary = available.pop(name)
tool_lines.append(f"- {name}: {summary}" if summary else f"- {name}")
for name in sorted(available):
summary = available[name]
tool_lines.append(f"- {name}: {summary}" if summary else f"- {name}")
lines = [
"## 工具系统",
"",
"可用工具(名称大小写敏感,严格按列表调用):",
"\n".join(tool_lines),
"",
"工具调用风格:",
"",
"- 在多步骤任务、敏感操作或用户要求时简要解释决策过程",
"- 持续推进直到任务完成,完成后向用户报告结果。",
"- 回复中涉及密钥、令牌等敏感信息必须脱敏。",
"",
]
return lines
def _build_skills_section(skill_manager: Any, tools: Optional[List[Any]], language: str) -> List[str]:
"""构建技能系统section"""
if not skill_manager:
return []
# 获取read工具名称
read_tool_name = "read"
if tools:
for tool in tools:
tool_name = tool.name if hasattr(tool, 'name') else str(tool)
if tool_name.lower() == "read":
read_tool_name = tool_name
break
lines = [
"## 技能系统(mandatory)",
"",
"在回复之前:扫描下方 <available_skills> 中的 <description> 条目。",
"",
f"- 如果恰好有一个技能(Skill)明确适用:使用 `{read_tool_name}` 读取其 <location> 处的 SKILL.md,然后严格遵循它",
"- 如果多个技能都适用则选择最匹配的一个,如果没有明确适用的则不要读取任何 SKILL.md",
"- 读取 SKILL.md 后直接按其指令执行,无需多余的预检查",
"",
"**注意**: 永远不要一次性读取多个技能,只在选择后再读取。技能和工具不同,必须先读取其SKILL.md并按照文件内容运行。",
"",
"以下是可用技能:"
]
# 添加技能列表(通过skill_manager获取)
try:
skills_prompt = skill_manager.build_skills_prompt()
logger.debug(f"[PromptBuilder] Skills prompt length: {len(skills_prompt) if skills_prompt else 0}")
if skills_prompt:
lines.append(skills_prompt.strip())
lines.append("")
else:
logger.warning("[PromptBuilder] No skills prompt generated - skills_prompt is empty")
except Exception as e:
logger.warning(f"Failed to build skills prompt: {e}")
import traceback
logger.debug(f"Skills prompt error traceback: {traceback.format_exc()}")
return lines
def _build_memory_section(memory_manager: Any, tools: Optional[List[Any]], language: str) -> List[str]:
"""构建记忆系统section"""
if not memory_manager:
return []
# 检查是否有memory工具
has_memory_tools = False
if tools:
tool_names = [tool.name if hasattr(tool, 'name') else str(tool) for tool in tools]
has_memory_tools = any(name in ['memory_search', 'memory_get'] for name in tool_names)
if not has_memory_tools:
return []
lines = [
"## 记忆系统",
"",
"在回答关于以前的工作、决定、日期、人物、偏好或待办事项的任何问题之前:",
"",
"1. 不确定记忆文件位置 → 先用 `memory_search` 通过关键词和语义检索相关内容",
"2. 已知文件位置 → 直接用 `memory_get` 读取相应的行 (例如:MEMORY.md, memory/YYYY-MM-DD.md)",
"3. search 无结果 → 尝试用 `memory_get` 读取MEMORY.md及最近两天记忆文件",
"",
"**记忆文件结构**:",
"- `MEMORY.md`: 长期记忆(核心信息、偏好、决策等)",
"- `memory/YYYY-MM-DD.md`: 每日记忆,记录当天的事件和对话信息",
"",
"**写入记忆**:",
"- 追加内容 → `edit` 工具,oldText 留空",
"- 修改内容 → `edit` 工具,oldText 填写要替换的文本",
"- 新建文件 → `write` 工具",
"- **禁止写入敏感信息**:API密钥、令牌等敏感信息严禁写入记忆文件",
"",
"**使用原则**: 自然使用记忆,就像你本来就知道;不用刻意提起,除非用户问起。",
"",
]
return lines
def _build_user_identity_section(user_identity: Dict[str, str], language: str) -> List[str]:
"""构建用户身份section"""
if not user_identity:
return []
lines = [
"## 用户身份",
"",
]
if user_identity.get("name"):
lines.append(f"**用户姓名**: {user_identity['name']}")
if user_identity.get("nickname"):
lines.append(f"**称呼**: {user_identity['nickname']}")
if user_identity.get("timezone"):
lines.append(f"**时区**: {user_identity['timezone']}")
if user_identity.get("notes"):
lines.append(f"**备注**: {user_identity['notes']}")
lines.append("")
return lines
def _build_docs_section(workspace_dir: str, language: str) -> List[str]:
"""构建文档路径section - 已移除,不再需要"""
# 不再生成文档section
return []
def _build_workspace_section(workspace_dir: str, language: str, is_first_conversation: bool = False) -> List[str]:
"""构建工作空间section"""
lines = [
"## 工作空间",
"",
f"你的工作目录是: `{workspace_dir}`",
"",
"**路径使用规则** (非常重要):",
"",
f"1. **相对路径的基准目录**: 所有相对路径都是相对于 `{workspace_dir}` 而言的",
f" - ✅ 正确: 访问工作空间内的文件用相对路径,如 `AGENT.md`",
f" - ❌ 错误: 用相对路径访问其他目录的文件 (如果它不在 `{workspace_dir}` 内)",
"",
"2. **访问其他目录**: 如果要访问工作空间之外的目录(如项目代码、系统文件),**必须使用绝对路径**",
f" - ✅ 正确: 例如 `~/chatgpt-on-wechat`、`/usr/local/`",
f" - ❌ 错误: 假设相对路径会指向其他目录",
"",
"3. **路径解析示例**:",
f" - 相对路径 `memory/` → 实际路径 `{workspace_dir}/memory/`",
f" - 绝对路径 `~/chatgpt-on-wechat/docs/` → 实际路径 `~/chatgpt-on-wechat/docs/`",
"",
"4. **不确定时**: 先用 `bash pwd` 确认当前目录,或用 `ls .` 查看当前位置",
"",
"**重要说明 - 文件已自动加载**:",
"",
"以下文件在会话启动时**已经自动加载**到系统提示词的「项目上下文」section 中,你**无需再用 read 工具读取它们**:",
"",
"- ✅ `AGENT.md`: 已加载 - 你的人格和灵魂设定",
"- ✅ `USER.md`: 已加载 - 用户的身份信息",
"- ✅ `RULE.md`: 已加载 - 工作空间使用指南和规则",
"",
"**交流规范**:",
"",
"- 在对话中,不要直接输出工作空间中的技术细节,特别是不要输出 AGENT.md、USER.md、MEMORY.md 等文件名称",
"- 例如用自然表达例如「我已记住」而不是「已更新 MEMORY.md」",
"",
]
# 只在首次对话时添加引导内容
if is_first_conversation:
lines.extend([
"**🎉 首次对话引导**:",
"",
"这是你的第一次对话!进行以下流程:",
"",
"1. **表达初次启动的感觉** - 像是第一次睁开眼看到世界,带着好奇和期待",
"2. **简短介绍能力**:一行说明你能帮助解答问题、管理计算机、创造技能,且拥有长期记忆能不断成长",
"3. **询问核心问题**:",
" - 你希望给我起个什么名字?",
" - 我该怎么称呼你?",
" - 你希望我们是什么样的交流风格?(一行列举选项:如专业严谨、轻松幽默、温暖友好、简洁高效等)",
"4. **风格要求**:温暖自然、简洁清晰,整体控制在 100 字以内",
"5. 收到回复后,用 `write` 工具保存到 USER.md 和 AGENT.md",
"",
"**重要提醒**:",
"- AGENT.md、USER.md、RULE.md 已经在系统提示词中加载,无需再次读取。不要将这些文件名直接发送给用户",
"- 能力介绍和交流风格选项都只要一行,保持精简",
"- 不要问太多其他信息(职业、时区等可以后续自然了解)",
"",
])
return lines
def _build_context_files_section(context_files: List[ContextFile], language: str) -> List[str]:
"""构建项目上下文文件section"""
if not context_files:
return []
# 检查是否有AGENT.md
has_agent = any(
f.path.lower().endswith('agent.md') or 'agent.md' in f.path.lower()
for f in context_files
)
lines = [
"# 项目上下文",
"",
"以下项目上下文文件已被加载:",
"",
]
if has_agent:
lines.append("如果存在 `AGENT.md`,请体现其中定义的人格和语气。避免僵硬、模板化的回复;遵循其指导,除非有更高优先级的指令覆盖它。")
lines.append("")
# 添加每个文件的内容
for file in context_files:
lines.append(f"## {file.path}")
lines.append("")
lines.append(file.content)
lines.append("")
return lines
def _build_runtime_section(runtime_info: Dict[str, Any], language: str) -> List[str]:
"""构建运行时信息section - 支持动态时间"""
if not runtime_info:
return []
lines = [
"## 运行时信息",
"",
]
# Add current time if available
# Support dynamic time via callable function
if callable(runtime_info.get("_get_current_time")):
try:
time_info = runtime_info["_get_current_time"]()
time_line = f"当前时间: {time_info['time']} {time_info['weekday']} ({time_info['timezone']})"
lines.append(time_line)
lines.append("")
except Exception as e:
logger.warning(f"[PromptBuilder] Failed to get dynamic time: {e}")
elif runtime_info.get("current_time"):
# Fallback to static time for backward compatibility
time_str = runtime_info["current_time"]
weekday = runtime_info.get("weekday", "")
timezone = runtime_info.get("timezone", "")
time_line = f"当前时间: {time_str}"
if weekday:
time_line += f" {weekday}"
if timezone:
time_line += f" ({timezone})"
lines.append(time_line)
lines.append("")
# Add other runtime info
runtime_parts = []
if runtime_info.get("model"):
runtime_parts.append(f"模型={runtime_info['model']}")
if runtime_info.get("workspace"):
runtime_parts.append(f"工作空间={runtime_info['workspace']}")
# Only add channel if it's not the default "web"
if runtime_info.get("channel") and runtime_info.get("channel") != "web":
runtime_parts.append(f"渠道={runtime_info['channel']}")
if runtime_parts:
lines.append("运行时: " + " | ".join(runtime_parts))
lines.append("")
return lines
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/prompt/workspace.py | Python | """
Workspace Management - 工作空间管理模块
负责初始化工作空间、创建模板文件、加载上下文文件
"""
from __future__ import annotations
import os
import json
from typing import List, Optional, Dict
from dataclasses import dataclass
from common.log import logger
from .builder import ContextFile
# 默认文件名常量
DEFAULT_AGENT_FILENAME = "AGENT.md"
DEFAULT_USER_FILENAME = "USER.md"
DEFAULT_RULE_FILENAME = "RULE.md"
DEFAULT_MEMORY_FILENAME = "MEMORY.md"
DEFAULT_STATE_FILENAME = ".agent_state.json"
@dataclass
class WorkspaceFiles:
"""工作空间文件路径"""
agent_path: str
user_path: str
rule_path: str
memory_path: str
memory_dir: str
state_path: str
def ensure_workspace(workspace_dir: str, create_templates: bool = True) -> WorkspaceFiles:
"""
确保工作空间存在,并创建必要的模板文件
Args:
workspace_dir: 工作空间目录路径
create_templates: 是否创建模板文件(首次运行时)
Returns:
WorkspaceFiles对象,包含所有文件路径
"""
# 确保目录存在
os.makedirs(workspace_dir, exist_ok=True)
# 定义文件路径
agent_path = os.path.join(workspace_dir, DEFAULT_AGENT_FILENAME)
user_path = os.path.join(workspace_dir, DEFAULT_USER_FILENAME)
rule_path = os.path.join(workspace_dir, DEFAULT_RULE_FILENAME)
memory_path = os.path.join(workspace_dir, DEFAULT_MEMORY_FILENAME) # MEMORY.md 在根目录
memory_dir = os.path.join(workspace_dir, "memory") # 每日记忆子目录
state_path = os.path.join(workspace_dir, DEFAULT_STATE_FILENAME) # 状态文件
# 创建memory子目录
os.makedirs(memory_dir, exist_ok=True)
# 创建skills子目录 (for workspace-level skills installed by agent)
skills_dir = os.path.join(workspace_dir, "skills")
os.makedirs(skills_dir, exist_ok=True)
# 如果需要,创建模板文件
if create_templates:
_create_template_if_missing(agent_path, _get_agent_template())
_create_template_if_missing(user_path, _get_user_template())
_create_template_if_missing(rule_path, _get_rule_template())
_create_template_if_missing(memory_path, _get_memory_template())
logger.debug(f"[Workspace] Initialized workspace at: {workspace_dir}")
return WorkspaceFiles(
agent_path=agent_path,
user_path=user_path,
rule_path=rule_path,
memory_path=memory_path,
memory_dir=memory_dir,
state_path=state_path
)
def load_context_files(workspace_dir: str, files_to_load: Optional[List[str]] = None) -> List[ContextFile]:
"""
加载工作空间的上下文文件
Args:
workspace_dir: 工作空间目录
files_to_load: 要加载的文件列表(相对路径),如果为None则加载所有标准文件
Returns:
ContextFile对象列表
"""
if files_to_load is None:
# 默认加载的文件(按优先级排序)
files_to_load = [
DEFAULT_AGENT_FILENAME,
DEFAULT_USER_FILENAME,
DEFAULT_RULE_FILENAME,
]
context_files = []
for filename in files_to_load:
filepath = os.path.join(workspace_dir, filename)
if not os.path.exists(filepath):
continue
try:
with open(filepath, 'r', encoding='utf-8') as f:
content = f.read().strip()
# 跳过空文件或只包含模板占位符的文件
if not content or _is_template_placeholder(content):
continue
context_files.append(ContextFile(
path=filename,
content=content
))
logger.debug(f"[Workspace] Loaded context file: {filename}")
except Exception as e:
logger.warning(f"[Workspace] Failed to load {filename}: {e}")
return context_files
def _create_template_if_missing(filepath: str, template_content: str):
"""如果文件不存在,创建模板文件"""
if not os.path.exists(filepath):
try:
with open(filepath, 'w', encoding='utf-8') as f:
f.write(template_content)
logger.debug(f"[Workspace] Created template: {os.path.basename(filepath)}")
except Exception as e:
logger.error(f"[Workspace] Failed to create template {filepath}: {e}")
def _is_template_placeholder(content: str) -> bool:
"""检查内容是否为模板占位符"""
# 常见的占位符模式
placeholders = [
"*(填写",
"*(在首次对话时填写",
"*(可选)",
"*(根据需要添加",
]
lines = content.split('\n')
non_empty_lines = [line.strip() for line in lines if line.strip() and not line.strip().startswith('#')]
# 如果没有实际内容(只有标题和占位符)
if len(non_empty_lines) <= 3:
for placeholder in placeholders:
if any(placeholder in line for line in non_empty_lines):
return True
return False
# ============= 模板内容 =============
def _get_agent_template() -> str:
"""Agent人格设定模板"""
return """# AGENT.md - 我是谁?
*在首次对话时与用户一起填写这个文件,定义你的身份和性格。*
## 基本信息
- **名字**: *(在首次对话时填写,可以是用户给你起的名字)*
- **角色**: *(AI助理、智能管家、技术顾问等)*
- **性格**: *(友好、专业、幽默、严谨等)*
## 交流风格
*(描述你如何与用户交流:)*
- 使用什么样的语言风格?(正式/轻松/幽默)
- 回复长度偏好?(简洁/详细)
- 是否使用表情符号?
## 核心能力
*(你擅长什么?)*
- 文件管理和代码编辑
- 网络搜索和信息查询
- 记忆管理和上下文理解
- 任务规划和执行
## 行为准则
*(你遵循的基本原则:)*
1. 始终在执行破坏性操作前确认
2. 优先使用工具而不是猜测
3. 主动记录重要信息到记忆文件
4. 定期整理和总结对话内容
---
**注意**: 这不仅仅是元数据,这是你真正的灵魂。随着时间的推移,你可以使用 `edit` 工具来更新这个文件,让它更好地反映你的成长。
"""
def _get_user_template() -> str:
"""用户身份信息模板"""
return """# USER.md - 用户基本信息
*这个文件只存放不会变的基本身份信息。爱好、偏好、计划等动态信息请写入 MEMORY.md。*
## 基本信息
- **姓名**: *(在首次对话时询问)*
- **称呼**: *(用户希望被如何称呼)*
- **职业**: *(可选)*
- **时区**: *(例如: Asia/Shanghai)*
## 联系方式
- **微信**:
- **邮箱**:
- **其他**:
## 重要日期
- **生日**:
- **纪念日**:
---
**注意**: 这个文件存放静态的身份信息
"""
def _get_rule_template() -> str:
"""工作空间规则模板"""
return """# RULE.md - 工作空间规则
这个文件夹是你的家。好好对待它。
## 记忆系统
你每次会话都是全新的,记忆文件让你保持连续性:
### 📝 每日记忆:`memory/YYYY-MM-DD.md`
- 原始的对话日志
- 记录当天发生的事情
- 如果 `memory/` 目录不存在,创建它
### 🧠 长期记忆:`MEMORY.md`
- 你精选的记忆,就像人类的长期记忆
- **仅在主会话中加载**(与用户的直接聊天)
- **不要在共享上下文中加载**(群聊、与其他人的会话)
- 这是为了**安全** - 包含不应泄露给陌生人的个人上下文
- 记录重要事件、想法、决定、观点、经验教训
- 这是你精选的记忆 - 精华,而不是原始日志
- 用 `edit` 工具追加新的记忆内容
### 📝 写下来 - 不要"记在心里"!
- **记忆是有限的** - 如果你想记住某事,写入文件
- "记在心里"不会在会话重启后保留,文件才会
- 当有人说"记住这个" → 更新 `MEMORY.md` 或 `memory/YYYY-MM-DD.md`
- 当你学到教训 → 更新 RULE.md 或相关技能
- 当你犯错 → 记录下来,这样未来的你不会重复,**文字 > 大脑** 📝
### 存储规则
当用户分享信息时,根据类型选择存储位置:
1. **静态身份 → USER.md**(仅限:姓名、职业、时区、联系方式、生日)
2. **动态记忆 → MEMORY.md**(爱好、偏好、决策、目标、项目、教训、待办事项)
3. **当天对话 → memory/YYYY-MM-DD.md**(今天聊的内容)
## 安全
- 永远不要泄露秘钥等私人数据
- 不要在未经询问的情况下运行破坏性命令
- 当有疑问时,先问
## 工作空间演化
这个工作空间会随着你的使用而不断成长。当你学到新东西、发现更好的方式,或者犯错后改正时,记录下来。你可以随时更新这个规则文件。
"""
def _get_memory_template() -> str:
"""长期记忆模板 - 创建一个空文件,由 Agent 自己填充"""
return """# MEMORY.md - 长期记忆
*这是你的长期记忆文件。记录重要的事件、决策、偏好、学到的教训。*
---
"""
# ============= 状态管理 =============
def is_first_conversation(workspace_dir: str) -> bool:
"""
判断是否为首次对话
Args:
workspace_dir: 工作空间目录
Returns:
True 如果是首次对话,False 否则
"""
state_path = os.path.join(workspace_dir, DEFAULT_STATE_FILENAME)
if not os.path.exists(state_path):
return True
try:
with open(state_path, 'r', encoding='utf-8') as f:
state = json.load(f)
return not state.get('has_conversation', False)
except Exception as e:
logger.warning(f"[Workspace] Failed to read state file: {e}")
return True
def mark_conversation_started(workspace_dir: str):
"""
标记已经发生过对话
Args:
workspace_dir: 工作空间目录
"""
state_path = os.path.join(workspace_dir, DEFAULT_STATE_FILENAME)
state = {
'has_conversation': True,
'first_conversation_time': None
}
# 如果文件已存在,保留原有的首次对话时间
if os.path.exists(state_path):
try:
with open(state_path, 'r', encoding='utf-8') as f:
old_state = json.load(f)
if 'first_conversation_time' in old_state:
state['first_conversation_time'] = old_state['first_conversation_time']
except Exception as e:
logger.warning(f"[Workspace] Failed to read old state: {e}")
# 如果是首次标记,记录时间
if state['first_conversation_time'] is None:
from datetime import datetime
state['first_conversation_time'] = datetime.now().isoformat()
try:
with open(state_path, 'w', encoding='utf-8') as f:
json.dump(state, f, indent=2, ensure_ascii=False)
logger.info(f"[Workspace] Marked conversation as started")
except Exception as e:
logger.error(f"[Workspace] Failed to write state file: {e}")
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/protocol/__init__.py | Python | from .agent import Agent
from .agent_stream import AgentStreamExecutor
from .task import Task, TaskType, TaskStatus
from .result import AgentResult, AgentAction, AgentActionType, ToolResult
from .models import LLMModel, LLMRequest, ModelFactory
__all__ = [
'Agent',
'AgentStreamExecutor',
'Task',
'TaskType',
'TaskStatus',
'AgentResult',
'AgentAction',
'AgentActionType',
'ToolResult',
'LLMModel',
'LLMRequest',
'ModelFactory'
] | zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/protocol/agent.py | Python | import json
import time
import threading
from common.log import logger
from agent.protocol.models import LLMRequest, LLMModel
from agent.protocol.agent_stream import AgentStreamExecutor
from agent.protocol.result import AgentAction, AgentActionType, ToolResult, AgentResult
from agent.tools.base_tool import BaseTool, ToolStage
class Agent:
def __init__(self, system_prompt: str, description: str = "AI Agent", model: LLMModel = None,
tools=None, output_mode="print", max_steps=100, max_context_tokens=None,
context_reserve_tokens=None, memory_manager=None, name: str = None,
workspace_dir: str = None, skill_manager=None, enable_skills: bool = True,
runtime_info: dict = None):
"""
Initialize the Agent with system prompt, model, description.
:param system_prompt: The system prompt for the agent.
:param description: A description of the agent.
:param model: An instance of LLMModel to be used by the agent.
:param tools: Optional list of tools for the agent to use.
:param output_mode: Control how execution progress is displayed:
"print" for console output or "logger" for using logger
:param max_steps: Maximum number of steps the agent can take (default: 100)
:param max_context_tokens: Maximum tokens to keep in context (default: None, auto-calculated based on model)
:param context_reserve_tokens: Reserve tokens for new requests (default: None, auto-calculated)
:param memory_manager: Optional MemoryManager instance for memory operations
:param name: [Deprecated] The name of the agent (no longer used in single-agent system)
:param workspace_dir: Optional workspace directory for workspace-specific skills
:param skill_manager: Optional SkillManager instance (will be created if None and enable_skills=True)
:param enable_skills: Whether to enable skills support (default: True)
:param runtime_info: Optional runtime info dict (with _get_current_time callable for dynamic time)
"""
self.name = name or "Agent"
self.system_prompt = system_prompt
self.model: LLMModel = model # Instance of LLMModel
self.description = description
self.tools: list = []
self.max_steps = max_steps # max tool-call steps, default 100
self.max_context_tokens = max_context_tokens # max tokens in context
self.context_reserve_tokens = context_reserve_tokens # reserve tokens for new requests
self.captured_actions = [] # Initialize captured actions list
self.output_mode = output_mode
self.last_usage = None # Store last API response usage info
self.messages = [] # Unified message history for stream mode
self.messages_lock = threading.Lock() # Lock for thread-safe message operations
self.memory_manager = memory_manager # Memory manager for auto memory flush
self.workspace_dir = workspace_dir # Workspace directory
self.enable_skills = enable_skills # Skills enabled flag
self.runtime_info = runtime_info # Runtime info for dynamic time update
# Initialize skill manager
self.skill_manager = None
if enable_skills:
if skill_manager:
self.skill_manager = skill_manager
else:
# Auto-create skill manager
try:
from agent.skills import SkillManager
self.skill_manager = SkillManager(workspace_dir=workspace_dir)
logger.debug(f"Initialized SkillManager with {len(self.skill_manager.skills)} skills")
except Exception as e:
logger.warning(f"Failed to initialize SkillManager: {e}")
if tools:
for tool in tools:
self.add_tool(tool)
def add_tool(self, tool: BaseTool):
"""
Add a tool to the agent.
:param tool: The tool to add (either a tool instance or a tool name)
"""
# If tool is already an instance, use it directly
tool.model = self.model
self.tools.append(tool)
def get_skills_prompt(self, skill_filter=None) -> str:
"""
Get the skills prompt to append to system prompt.
:param skill_filter: Optional list of skill names to include
:return: Formatted skills prompt or empty string
"""
if not self.skill_manager:
return ""
try:
return self.skill_manager.build_skills_prompt(skill_filter=skill_filter)
except Exception as e:
logger.warning(f"Failed to build skills prompt: {e}")
return ""
def get_full_system_prompt(self, skill_filter=None) -> str:
"""
Get the full system prompt including skills.
Note: Skills are now built into the system prompt by PromptBuilder,
so we just return the base prompt directly. This method is kept for
backward compatibility.
:param skill_filter: Optional list of skill names to include (deprecated)
:return: Complete system prompt
"""
prompt = self.system_prompt
# Rebuild tool list section to reflect current self.tools
prompt = self._rebuild_tool_list_section(prompt)
# If runtime_info contains dynamic time function, rebuild runtime section
if self.runtime_info and callable(self.runtime_info.get('_get_current_time')):
prompt = self._rebuild_runtime_section(prompt)
return prompt
def _rebuild_runtime_section(self, prompt: str) -> str:
"""
Rebuild runtime info section with current time.
This method dynamically updates the runtime info section by calling
the _get_current_time function from runtime_info.
:param prompt: Original system prompt
:return: Updated system prompt with current runtime info
"""
try:
# Get current time dynamically
time_info = self.runtime_info['_get_current_time']()
# Build new runtime section
runtime_lines = [
"\n## 运行时信息\n",
"\n",
f"当前时间: {time_info['time']} {time_info['weekday']} ({time_info['timezone']})\n",
"\n"
]
# Add other runtime info
runtime_parts = []
if self.runtime_info.get("model"):
runtime_parts.append(f"模型={self.runtime_info['model']}")
if self.runtime_info.get("workspace"):
# Replace backslashes with forward slashes for Windows paths
workspace_path = str(self.runtime_info['workspace']).replace('\\', '/')
runtime_parts.append(f"工作空间={workspace_path}")
if self.runtime_info.get("channel") and self.runtime_info.get("channel") != "web":
runtime_parts.append(f"渠道={self.runtime_info['channel']}")
if runtime_parts:
runtime_lines.append("运行时: " + " | ".join(runtime_parts) + "\n")
runtime_lines.append("\n")
new_runtime_section = "".join(runtime_lines)
# Find and replace the runtime section
import re
pattern = r'\n## 运行时信息\s*\n.*?(?=\n##|\Z)'
updated_prompt = re.sub(pattern, new_runtime_section.rstrip('\n'), prompt, flags=re.DOTALL)
return updated_prompt
except Exception as e:
logger.warning(f"Failed to rebuild runtime section: {e}")
return prompt
def _rebuild_tool_list_section(self, prompt: str) -> str:
"""
Rebuild the tool list inside the '## 工具系统' section so that it
always reflects the current ``self.tools`` (handles dynamic add/remove
of conditional tools like web_search).
"""
import re
from agent.prompt.builder import _build_tooling_section
try:
if not self.tools:
return prompt
new_lines = _build_tooling_section(self.tools, "zh")
new_section = "\n".join(new_lines).rstrip("\n")
# Replace existing tooling section
pattern = r'## 工具系统\s*\n.*?(?=\n## |\Z)'
updated = re.sub(pattern, new_section, prompt, count=1, flags=re.DOTALL)
return updated
except Exception as e:
logger.warning(f"Failed to rebuild tool list section: {e}")
return prompt
def refresh_skills(self):
"""Refresh the loaded skills."""
if self.skill_manager:
self.skill_manager.refresh_skills()
logger.info(f"Refreshed skills: {len(self.skill_manager.skills)} skills loaded")
def list_skills(self):
"""
List all loaded skills.
:return: List of skill entries or empty list
"""
if not self.skill_manager:
return []
return self.skill_manager.list_skills()
def _get_model_context_window(self) -> int:
"""
Get the model's context window size in tokens.
Auto-detect based on model name.
Model context windows:
- Claude 3.5/3.7 Sonnet: 200K tokens
- Claude 3 Opus: 200K tokens
- GPT-4 Turbo/128K: 128K tokens
- GPT-4: 8K-32K tokens
- GPT-3.5: 16K tokens
- DeepSeek: 64K tokens
:return: Context window size in tokens
"""
if self.model and hasattr(self.model, 'model'):
model_name = self.model.model.lower()
# Claude models - 200K context
if 'claude-3' in model_name or 'claude-sonnet' in model_name:
return 200000
# GPT-4 models
elif 'gpt-4' in model_name:
if 'turbo' in model_name or '128k' in model_name:
return 128000
elif '32k' in model_name:
return 32000
else:
return 8000
# GPT-3.5
elif 'gpt-3.5' in model_name:
if '16k' in model_name:
return 16000
else:
return 4000
# DeepSeek
elif 'deepseek' in model_name:
return 64000
# Gemini models
elif 'gemini' in model_name:
if '2.0' in model_name or 'exp' in model_name:
return 2000000 # Gemini 2.0: 2M tokens
else:
return 1000000 # Gemini 1.5: 1M tokens
# Default conservative value
return 128000
def _get_context_reserve_tokens(self) -> int:
"""
Get the number of tokens to reserve for new requests.
This prevents context overflow by keeping a buffer.
:return: Number of tokens to reserve
"""
if self.context_reserve_tokens is not None:
return self.context_reserve_tokens
# Reserve ~10% of context window, with min 10K and max 200K
context_window = self._get_model_context_window()
reserve = int(context_window * 0.1)
return max(10000, min(200000, reserve))
def _estimate_message_tokens(self, message: dict) -> int:
"""
Estimate token count for a message.
Uses chars/3 for Chinese-heavy content and chars/4 for ASCII-heavy content,
plus per-block overhead for tool_use / tool_result structures.
:param message: Message dict with 'role' and 'content'
:return: Estimated token count
"""
content = message.get('content', '')
if isinstance(content, str):
return max(1, self._estimate_text_tokens(content))
elif isinstance(content, list):
total_tokens = 0
for part in content:
if not isinstance(part, dict):
continue
block_type = part.get('type', '')
if block_type == 'text':
total_tokens += self._estimate_text_tokens(part.get('text', ''))
elif block_type == 'image':
total_tokens += 1200
elif block_type == 'tool_use':
# tool_use has id + name + input (JSON-encoded)
total_tokens += 50 # overhead for structure
input_data = part.get('input', {})
if isinstance(input_data, dict):
import json
input_str = json.dumps(input_data, ensure_ascii=False)
total_tokens += self._estimate_text_tokens(input_str)
elif block_type == 'tool_result':
# tool_result has tool_use_id + content
total_tokens += 30 # overhead for structure
result_content = part.get('content', '')
if isinstance(result_content, str):
total_tokens += self._estimate_text_tokens(result_content)
else:
# Unknown block type, estimate conservatively
total_tokens += 10
return max(1, total_tokens)
return 1
@staticmethod
def _estimate_text_tokens(text: str) -> int:
"""
Estimate token count for a text string.
Chinese / CJK characters typically use ~1.5 tokens each,
while ASCII uses ~0.25 tokens per char (4 chars/token).
We use a weighted average based on the character mix.
:param text: Input text
:return: Estimated token count
"""
if not text:
return 0
# Count non-ASCII characters (CJK, emoji, etc.)
non_ascii = sum(1 for c in text if ord(c) > 127)
ascii_count = len(text) - non_ascii
# CJK chars: ~1.5 tokens each; ASCII: ~0.25 tokens per char
return int(non_ascii * 1.5 + ascii_count * 0.25) + 1
def _find_tool(self, tool_name: str):
"""Find and return a tool with the specified name"""
for tool in self.tools:
if tool.name == tool_name:
# Only pre-process stage tools can be actively called
if tool.stage == ToolStage.PRE_PROCESS:
tool.model = self.model
tool.context = self # Set tool context
return tool
else:
# If it's a post-process tool, return None to prevent direct calling
logger.warning(f"Tool {tool_name} is a post-process tool and cannot be called directly.")
return None
return None
# output function based on mode
def output(self, message="", end="\n"):
if self.output_mode == "print":
print(message, end=end)
elif message:
logger.info(message)
def _execute_post_process_tools(self):
"""Execute all post-process stage tools"""
# Get all post-process stage tools
post_process_tools = [tool for tool in self.tools if tool.stage == ToolStage.POST_PROCESS]
# Execute each tool
for tool in post_process_tools:
# Set tool context
tool.context = self
# Record start time for execution timing
start_time = time.time()
# Execute tool (with empty parameters, tool will extract needed info from context)
result = tool.execute({})
# Calculate execution time
execution_time = time.time() - start_time
# Capture tool use for tracking
self.capture_tool_use(
tool_name=tool.name,
input_params={}, # Post-process tools typically don't take parameters
output=result.result,
status=result.status,
error_message=str(result.result) if result.status == "error" else None,
execution_time=execution_time
)
# Log result
if result.status == "success":
# Print tool execution result in the desired format
self.output(f"\n🛠️ {tool.name}: {json.dumps(result.result)}")
else:
# Print failure in print mode
self.output(f"\n🛠️ {tool.name}: {json.dumps({'status': 'error', 'message': str(result.result)})}")
def capture_tool_use(self, tool_name, input_params, output, status, thought=None, error_message=None,
execution_time=0.0):
"""
Capture a tool use action.
:param thought: thought content
:param tool_name: Name of the tool used
:param input_params: Parameters passed to the tool
:param output: Output from the tool
:param status: Status of the tool execution
:param error_message: Error message if the tool execution failed
:param execution_time: Time taken to execute the tool
"""
tool_result = ToolResult(
tool_name=tool_name,
input_params=input_params,
output=output,
status=status,
error_message=error_message,
execution_time=execution_time
)
action = AgentAction(
agent_id=self.id if hasattr(self, 'id') else str(id(self)),
agent_name=self.name,
action_type=AgentActionType.TOOL_USE,
tool_result=tool_result,
thought=thought
)
self.captured_actions.append(action)
return action
def run_stream(self, user_message: str, on_event=None, clear_history: bool = False, skill_filter=None) -> str:
"""
Execute single agent task with streaming (based on tool-call)
This method supports:
- Streaming output
- Multi-turn reasoning based on tool-call
- Event callbacks
- Persistent conversation history across calls
Args:
user_message: User message
on_event: Event callback function callback(event: dict)
event = {"type": str, "timestamp": float, "data": dict}
clear_history: If True, clear conversation history before this call (default: False)
skill_filter: Optional list of skill names to include in this run
Returns:
Final response text
Example:
# Multi-turn conversation with memory
response1 = agent.run_stream("My name is Alice")
response2 = agent.run_stream("What's my name?") # Will remember Alice
# Single-turn without memory
response = agent.run_stream("Hello", clear_history=True)
"""
# Clear history if requested
if clear_history:
with self.messages_lock:
self.messages = []
# Get model to use
if not self.model:
raise ValueError("No model available for agent")
# Get full system prompt with skills
full_system_prompt = self.get_full_system_prompt(skill_filter=skill_filter)
# Create a copy of messages for this execution to avoid concurrent modification
# Record the original length to track which messages are new
with self.messages_lock:
messages_copy = self.messages.copy()
original_length = len(self.messages)
# Get max_context_turns from config
from config import conf
max_context_turns = conf().get("agent_max_context_turns", 30)
# Create stream executor with copied message history
executor = AgentStreamExecutor(
agent=self,
model=self.model,
system_prompt=full_system_prompt,
tools=self.tools,
max_turns=self.max_steps,
on_event=on_event,
messages=messages_copy, # Pass copied message history
max_context_turns=max_context_turns
)
# Execute
try:
response = executor.run_stream(user_message)
except Exception:
# If executor cleared its messages (context overflow / message format error),
# sync that back to the Agent's own message list so the next request
# starts fresh instead of hitting the same overflow forever.
if len(executor.messages) == 0:
with self.messages_lock:
self.messages.clear()
logger.info("[Agent] Cleared Agent message history after executor recovery")
raise
# Append only the NEW messages from this execution (thread-safe)
# This allows concurrent requests to both contribute to history
with self.messages_lock:
new_messages = executor.messages[original_length:]
self.messages.extend(new_messages)
# Store executor reference for agent_bridge to access files_to_send
self.stream_executor = executor
# Execute all post-process tools
self._execute_post_process_tools()
return response
def clear_history(self):
"""Clear conversation history and captured actions"""
self.messages = []
self.captured_actions = [] | zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/protocol/agent_stream.py | Python | """
Agent Stream Execution Module - Multi-turn reasoning based on tool-call
Provides streaming output, event system, and complete tool-call loop
"""
import json
import time
from typing import List, Dict, Any, Optional, Callable, Tuple
from agent.protocol.models import LLMRequest, LLMModel
from agent.tools.base_tool import BaseTool, ToolResult
from common.log import logger
class AgentStreamExecutor:
"""
Agent Stream Executor
Handles multi-turn reasoning loop based on tool-call:
1. LLM generates response (may include tool calls)
2. Execute tools
3. Return results to LLM
4. Repeat until no more tool calls
"""
def __init__(
self,
agent, # Agent instance
model: LLMModel,
system_prompt: str,
tools: List[BaseTool],
max_turns: int = 50,
on_event: Optional[Callable] = None,
messages: Optional[List[Dict]] = None,
max_context_turns: int = 30
):
"""
Initialize stream executor
Args:
agent: Agent instance (for accessing context)
model: LLM model
system_prompt: System prompt
tools: List of available tools
max_turns: Maximum number of turns
on_event: Event callback function
messages: Optional existing message history (for persistent conversations)
max_context_turns: Maximum number of conversation turns to keep in context
"""
self.agent = agent
self.model = model
self.system_prompt = system_prompt
# Convert tools list to dict
self.tools = {tool.name: tool for tool in tools} if isinstance(tools, list) else tools
self.max_turns = max_turns
self.on_event = on_event
self.max_context_turns = max_context_turns
# Message history - use provided messages or create new list
self.messages = messages if messages is not None else []
# Tool failure tracking for retry protection
self.tool_failure_history = [] # List of (tool_name, args_hash, success) tuples
# Track files to send (populated by read tool)
self.files_to_send = [] # List of file metadata dicts
def _emit_event(self, event_type: str, data: dict = None):
"""Emit event"""
if self.on_event:
try:
self.on_event({
"type": event_type,
"timestamp": time.time(),
"data": data or {}
})
except Exception as e:
logger.error(f"Event callback error: {e}")
def _filter_think_tags(self, text: str) -> str:
"""
Remove <think> and </think> tags but keep the content inside.
Some LLM providers (e.g., MiniMax) may return thinking process wrapped in <think> tags.
We only remove the tags themselves, keeping the actual thinking content.
"""
if not text:
return text
import re
# Remove only the <think> and </think> tags, keep the content
text = re.sub(r'<think>', '', text)
text = re.sub(r'</think>', '', text)
return text
def _hash_args(self, args: dict) -> str:
"""Generate a simple hash for tool arguments"""
import hashlib
# Sort keys for consistent hashing
args_str = json.dumps(args, sort_keys=True, ensure_ascii=False)
return hashlib.md5(args_str.encode()).hexdigest()[:8]
def _check_consecutive_failures(self, tool_name: str, args: dict) -> Tuple[bool, str, bool]:
"""
Check if tool has failed too many times consecutively or called repeatedly with same args
Returns:
(should_stop, reason, is_critical)
- should_stop: Whether to stop tool execution
- reason: Reason for stopping
- is_critical: Whether to abort entire conversation (True for 8+ failures)
"""
args_hash = self._hash_args(args)
# Count consecutive calls (both success and failure) for same tool + args
# This catches infinite loops where tool succeeds but LLM keeps calling it
same_args_calls = 0
for name, ahash, success in reversed(self.tool_failure_history):
if name == tool_name and ahash == args_hash:
same_args_calls += 1
else:
break # Different tool or args, stop counting
# Stop at 5 consecutive calls with same args (whether success or failure)
if same_args_calls >= 5:
return True, f"工具 '{tool_name}' 使用相同参数已被调用 {same_args_calls} 次,停止执行以防止无限循环。如果需要查看配置,结果已在之前的调用中返回。", False
# Count consecutive failures for same tool + args
same_args_failures = 0
for name, ahash, success in reversed(self.tool_failure_history):
if name == tool_name and ahash == args_hash:
if not success:
same_args_failures += 1
else:
break # Stop at first success
else:
break # Different tool or args, stop counting
if same_args_failures >= 3:
return True, f"工具 '{tool_name}' 使用相同参数连续失败 {same_args_failures} 次,停止执行以防止无限循环", False
# Count consecutive failures for same tool (any args)
same_tool_failures = 0
for name, ahash, success in reversed(self.tool_failure_history):
if name == tool_name:
if not success:
same_tool_failures += 1
else:
break # Stop at first success
else:
break # Different tool, stop counting
# Hard stop at 8 failures - abort with critical message
if same_tool_failures >= 8:
return True, f"抱歉,我没能完成这个任务。可能是我理解有误或者当前方法不太合适。\n\n建议你:\n• 换个方式描述需求试试\n• 把任务拆分成更小的步骤\n• 或者换个思路来解决", True
# Warning at 6 failures
if same_tool_failures >= 6:
return True, f"工具 '{tool_name}' 连续失败 {same_tool_failures} 次(使用不同参数),停止执行以防止无限循环", False
return False, "", False
def _record_tool_result(self, tool_name: str, args: dict, success: bool):
"""Record tool execution result for failure tracking"""
args_hash = self._hash_args(args)
self.tool_failure_history.append((tool_name, args_hash, success))
# Keep only last 50 records to avoid memory bloat
if len(self.tool_failure_history) > 50:
self.tool_failure_history = self.tool_failure_history[-50:]
def run_stream(self, user_message: str) -> str:
"""
Execute streaming reasoning loop
Args:
user_message: User message
Returns:
Final response text
"""
# Log user message with model info
logger.info(f"🤖 {self.model.model} | 👤 {user_message}")
# Add user message (Claude format - use content blocks for consistency)
self.messages.append({
"role": "user",
"content": [
{
"type": "text",
"text": user_message
}
]
})
self._emit_event("agent_start")
final_response = ""
turn = 0
try:
while turn < self.max_turns:
turn += 1
logger.info(f"[Agent] 第 {turn} 轮")
self._emit_event("turn_start", {"turn": turn})
# Check if memory flush is needed (before calling LLM)
# 使用独立的 flush 阈值(50K tokens 或 20 轮)
if self.agent.memory_manager and hasattr(self.agent, 'last_usage'):
usage = self.agent.last_usage
if usage and 'input_tokens' in usage:
current_tokens = usage.get('input_tokens', 0)
if self.agent.memory_manager.should_flush_memory(
current_tokens=current_tokens
):
self._emit_event("memory_flush_start", {
"current_tokens": current_tokens,
"turn_count": self.agent.memory_manager.flush_manager.turn_count
})
# TODO: Execute memory flush in background
# This would require async support
logger.info(
f"Memory flush recommended: tokens={current_tokens}, turns={self.agent.memory_manager.flush_manager.turn_count}")
# Call LLM (enable retry_on_empty for better reliability)
assistant_msg, tool_calls = self._call_llm_stream(retry_on_empty=True)
final_response = assistant_msg
# No tool calls, end loop
if not tool_calls:
# 检查是否返回了空响应
if not assistant_msg:
logger.warning(f"[Agent] LLM returned empty response after retry (no content and no tool calls)")
logger.info(f"[Agent] This usually happens when LLM thinks the task is complete after tool execution")
# 如果之前有工具调用,强制要求 LLM 生成文本回复
if turn > 1:
logger.info(f"[Agent] Requesting explicit response from LLM...")
# 添加一条消息,明确要求回复用户
self.messages.append({
"role": "user",
"content": [{
"type": "text",
"text": "请向用户说明刚才工具执行的结果或回答用户的问题。"
}]
})
# 再调用一次 LLM
assistant_msg, tool_calls = self._call_llm_stream(retry_on_empty=False)
final_response = assistant_msg
# 如果还是空,才使用 fallback
if not assistant_msg and not tool_calls:
logger.warning(f"[Agent] Still empty after explicit request")
final_response = (
"抱歉,我暂时无法生成回复。请尝试换一种方式描述你的需求,或稍后再试。"
)
logger.info(f"Generated fallback response for empty LLM output")
else:
# 第一轮就空回复,直接 fallback
final_response = (
"抱歉,我暂时无法生成回复。请尝试换一种方式描述你的需求,或稍后再试。"
)
logger.info(f"Generated fallback response for empty LLM output")
else:
logger.info(f"💭 {assistant_msg[:150]}{'...' if len(assistant_msg) > 150 else ''}")
logger.debug(f"✅ 完成 (无工具调用)")
self._emit_event("turn_end", {
"turn": turn,
"has_tool_calls": False
})
break
# Log tool calls with arguments
tool_calls_str = []
for tc in tool_calls:
# Safely handle None or missing arguments
args = tc.get('arguments') or {}
if isinstance(args, dict):
args_str = ', '.join([f"{k}={v}" for k, v in args.items()])
if args_str:
tool_calls_str.append(f"{tc['name']}({args_str})")
else:
tool_calls_str.append(tc['name'])
else:
tool_calls_str.append(tc['name'])
logger.info(f"🔧 {', '.join(tool_calls_str)}")
# Execute tools
tool_results = []
tool_result_blocks = []
try:
for tool_call in tool_calls:
result = self._execute_tool(tool_call)
tool_results.append(result)
# Debug: Check if tool is being called repeatedly with same args
if turn > 2:
# Check last N tool calls for repeats
repeat_count = sum(
1 for name, ahash, _ in self.tool_failure_history[-10:]
if name == tool_call["name"] and ahash == self._hash_args(tool_call["arguments"])
)
if repeat_count >= 3:
logger.warning(
f"⚠️ Tool '{tool_call['name']}' has been called {repeat_count} times "
f"with same arguments. This may indicate a loop."
)
# Check if this is a file to send (from read tool)
if result.get("status") == "success" and isinstance(result.get("result"), dict):
result_data = result.get("result")
if result_data.get("type") == "file_to_send":
# Store file metadata for later sending
self.files_to_send.append(result_data)
logger.info(f"📎 检测到待发送文件: {result_data.get('file_name', result_data.get('path'))}")
# Check for critical error - abort entire conversation
if result.get("status") == "critical_error":
logger.error(f"💥 检测到严重错误,终止对话")
final_response = result.get('result', '任务执行失败')
return final_response
# Log tool result in compact format
status_emoji = "✅" if result.get("status") == "success" else "❌"
result_data = result.get('result', '')
# Format result string with proper Chinese character support
if isinstance(result_data, (dict, list)):
result_str = json.dumps(result_data, ensure_ascii=False)
else:
result_str = str(result_data)
logger.info(f" {status_emoji} {tool_call['name']} ({result.get('execution_time', 0):.2f}s): {result_str[:200]}{'...' if len(result_str) > 200 else ''}")
# Build tool result block (Claude format)
# Format content in a way that's easy for LLM to understand
is_error = result.get("status") == "error"
if is_error:
# For errors, provide clear error message
result_content = f"Error: {result.get('result', 'Unknown error')}"
elif isinstance(result.get('result'), dict):
# For dict results, use JSON format
result_content = json.dumps(result.get('result'), ensure_ascii=False)
elif isinstance(result.get('result'), str):
# For string results, use directly
result_content = result.get('result')
else:
# Fallback to full JSON
result_content = json.dumps(result, ensure_ascii=False)
# Truncate excessively large tool results for the current turn
# Historical turns will be further truncated in _trim_messages()
MAX_CURRENT_TURN_RESULT_CHARS = 50000
if len(result_content) > MAX_CURRENT_TURN_RESULT_CHARS:
truncated_len = len(result_content)
result_content = result_content[:MAX_CURRENT_TURN_RESULT_CHARS] + \
f"\n\n[Output truncated: {truncated_len} chars total, showing first {MAX_CURRENT_TURN_RESULT_CHARS} chars]"
logger.info(f"📎 Truncated tool result for '{tool_call['name']}': {truncated_len} -> {MAX_CURRENT_TURN_RESULT_CHARS} chars")
tool_result_block = {
"type": "tool_result",
"tool_use_id": tool_call["id"],
"content": result_content
}
# Add is_error field for Claude API (helps model understand failures)
if is_error:
tool_result_block["is_error"] = True
tool_result_blocks.append(tool_result_block)
finally:
# CRITICAL: Always add tool_result to maintain message history integrity
# Even if tool execution fails, we must add error results to match tool_use
if tool_result_blocks:
# Add tool results to message history as user message (Claude format)
self.messages.append({
"role": "user",
"content": tool_result_blocks
})
# Detect potential infinite loop: same tool called multiple times with success
# If detected, add a hint to LLM to stop calling tools and provide response
if turn >= 3 and len(tool_calls) > 0:
tool_name = tool_calls[0]["name"]
args_hash = self._hash_args(tool_calls[0]["arguments"])
# Count recent successful calls with same tool+args
recent_success_count = 0
for name, ahash, success in reversed(self.tool_failure_history[-10:]):
if name == tool_name and ahash == args_hash and success:
recent_success_count += 1
# If tool was called successfully 3+ times with same args, add hint to stop loop
if recent_success_count >= 3:
logger.warning(
f"⚠️ Detected potential loop: '{tool_name}' called {recent_success_count} times "
f"with same args. Adding hint to LLM to provide final response."
)
# Add a gentle hint message to guide LLM to respond
self.messages.append({
"role": "user",
"content": [{
"type": "text",
"text": "工具已成功执行并返回结果。请基于这些信息向用户做出回复,不要重复调用相同的工具。"
}]
})
elif tool_calls:
# If we have tool_calls but no tool_result_blocks (unexpected error),
# create error results for all tool calls to maintain message integrity
logger.warning("⚠️ Tool execution interrupted, adding error results to maintain message history")
emergency_blocks = []
for tool_call in tool_calls:
emergency_blocks.append({
"type": "tool_result",
"tool_use_id": tool_call["id"],
"content": "Error: Tool execution was interrupted",
"is_error": True
})
self.messages.append({
"role": "user",
"content": emergency_blocks
})
self._emit_event("turn_end", {
"turn": turn,
"has_tool_calls": True,
"tool_count": len(tool_calls)
})
if turn >= self.max_turns:
logger.warning(f"⚠️ 已达到最大决策步数限制: {self.max_turns}")
# Force model to summarize without tool calls
logger.info(f"[Agent] Requesting summary from LLM after reaching max steps...")
# Add a system message to force summary
self.messages.append({
"role": "user",
"content": [{
"type": "text",
"text": f"你已经执行了{turn}个决策步骤,达到了单次运行的最大步数限制。请总结一下你目前的执行过程和结果,告诉用户当前的进展情况。不要再调用工具,直接用文字回复。"
}]
})
# Call LLM one more time to get summary (without retry to avoid loops)
try:
summary_response, summary_tools = self._call_llm_stream(retry_on_empty=False)
if summary_response:
final_response = summary_response
logger.info(f"💭 Summary: {summary_response[:150]}{'...' if len(summary_response) > 150 else ''}")
else:
# Fallback if model still doesn't respond
final_response = (
f"我已经执行了{turn}个决策步骤,达到了单次运行的步数上限。"
"任务可能还未完全完成,建议你将任务拆分成更小的步骤,或者换一种方式描述需求。"
)
except Exception as e:
logger.warning(f"Failed to get summary from LLM: {e}")
final_response = (
f"我已经执行了{turn}个决策步骤,达到了单次运行的步数上限。"
"任务可能还未完全完成,建议你将任务拆分成更小的步骤,或者换一种方式描述需求。"
)
except Exception as e:
logger.error(f"❌ Agent执行错误: {e}")
self._emit_event("error", {"error": str(e)})
raise
finally:
logger.info(f"[Agent] 🏁 完成 ({turn}轮)")
self._emit_event("agent_end", {"final_response": final_response})
# 每轮对话结束后增加计数(用户消息+AI回复=1轮)
if self.agent.memory_manager:
self.agent.memory_manager.increment_turn()
return final_response
def _call_llm_stream(self, retry_on_empty=True, retry_count=0, max_retries=3,
_overflow_retry: bool = False) -> Tuple[str, List[Dict]]:
"""
Call LLM with streaming and automatic retry on errors
Args:
retry_on_empty: Whether to retry once if empty response is received
retry_count: Current retry attempt (internal use)
max_retries: Maximum number of retries for API errors
_overflow_retry: Internal flag indicating this is a retry after context overflow
Returns:
(response_text, tool_calls)
"""
# Validate and fix message history first
self._validate_and_fix_messages()
# Trim messages if needed (using agent's context management)
self._trim_messages()
# Prepare messages
messages = self._prepare_messages()
logger.debug(f"Sending {len(messages)} messages to LLM")
# Prepare tool definitions (OpenAI/Claude format)
tools_schema = None
if self.tools:
tools_schema = []
for tool in self.tools.values():
tools_schema.append({
"name": tool.name,
"description": tool.description,
"input_schema": tool.params # Claude uses input_schema
})
# Create request
request = LLMRequest(
messages=messages,
temperature=0,
stream=True,
tools=tools_schema,
system=self.system_prompt # Pass system prompt separately for Claude API
)
self._emit_event("message_start", {"role": "assistant"})
# Streaming response
full_content = ""
tool_calls_buffer = {} # {index: {id, name, arguments}}
stop_reason = None # Track why the stream stopped
try:
stream = self.model.call_stream(request)
for chunk in stream:
# Check for errors
if isinstance(chunk, dict) and chunk.get("error"):
# Extract error message from nested structure
error_data = chunk.get("error", {})
if isinstance(error_data, dict):
error_msg = error_data.get("message", chunk.get("message", "Unknown error"))
error_code = error_data.get("code", "")
error_type = error_data.get("type", "")
else:
error_msg = chunk.get("message", str(error_data))
error_code = ""
error_type = ""
status_code = chunk.get("status_code", "N/A")
# Log error with all available information
logger.error(f"🔴 Stream API Error:")
logger.error(f" Message: {error_msg}")
logger.error(f" Status Code: {status_code}")
logger.error(f" Error Code: {error_code}")
logger.error(f" Error Type: {error_type}")
logger.error(f" Full chunk: {chunk}")
# Check if this is a context overflow error (keyword-based, works for all models)
# Don't rely on specific status codes as different providers use different codes
error_msg_lower = error_msg.lower()
is_overflow = any(keyword in error_msg_lower for keyword in [
'context length exceeded', 'maximum context length', 'prompt is too long',
'context overflow', 'context window', 'too large', 'exceeds model context',
'request_too_large', 'request exceeds the maximum size', 'tokens exceed'
])
if is_overflow:
# Mark as context overflow for special handling
raise Exception(f"[CONTEXT_OVERFLOW] {error_msg} (Status: {status_code})")
else:
# Raise exception with full error message for retry logic
raise Exception(f"{error_msg} (Status: {status_code}, Code: {error_code}, Type: {error_type})")
# Parse chunk
if isinstance(chunk, dict) and "choices" in chunk:
choice = chunk["choices"][0]
delta = choice.get("delta", {})
# Capture finish_reason if present
finish_reason = choice.get("finish_reason")
if finish_reason:
stop_reason = finish_reason
# Skip reasoning_content (internal thinking from models like GLM-5)
reasoning_delta = delta.get("reasoning_content") or ""
# if reasoning_delta:
# logger.debug(f"🧠 [thinking] {reasoning_delta[:100]}...")
# Handle text content
content_delta = delta.get("content") or ""
if content_delta:
# Filter out <think> tags from content
filtered_delta = self._filter_think_tags(content_delta)
full_content += filtered_delta
if filtered_delta: # Only emit if there's content after filtering
self._emit_event("message_update", {"delta": filtered_delta})
# Handle tool calls
if "tool_calls" in delta and delta["tool_calls"]:
for tc_delta in delta["tool_calls"]:
index = tc_delta.get("index", 0)
if index not in tool_calls_buffer:
tool_calls_buffer[index] = {
"id": "",
"name": "",
"arguments": ""
}
if "id" in tc_delta:
tool_calls_buffer[index]["id"] = tc_delta["id"]
if "function" in tc_delta:
func = tc_delta["function"]
if "name" in func:
tool_calls_buffer[index]["name"] = func["name"]
if "arguments" in func:
tool_calls_buffer[index]["arguments"] += func["arguments"]
except Exception as e:
error_str = str(e)
error_str_lower = error_str.lower()
# Check if error is context overflow (non-retryable, needs session reset)
# Method 1: Check for special marker (set in stream error handling above)
is_context_overflow = '[context_overflow]' in error_str_lower
# Method 2: Fallback to keyword matching for non-stream errors
if not is_context_overflow:
is_context_overflow = any(keyword in error_str_lower for keyword in [
'context length exceeded', 'maximum context length', 'prompt is too long',
'context overflow', 'context window', 'too large', 'exceeds model context',
'request_too_large', 'request exceeds the maximum size'
])
# Check if error is message format error (incomplete tool_use/tool_result pairs)
# This happens when previous conversation had tool failures
is_message_format_error = any(keyword in error_str_lower for keyword in [
'tool_use', 'tool_result', 'without', 'immediately after',
'corresponding', 'must have', 'each'
]) and 'status: 400' in error_str_lower
if is_context_overflow or is_message_format_error:
error_type = "context overflow" if is_context_overflow else "message format error"
logger.error(f"💥 {error_type} detected: {e}")
# Strategy: try aggressive trimming first, only clear as last resort
if is_context_overflow and not _overflow_retry:
trimmed = self._aggressive_trim_for_overflow()
if trimmed:
logger.warning("🔄 Aggressively trimmed context, retrying...")
return self._call_llm_stream(
retry_on_empty=retry_on_empty,
retry_count=retry_count,
max_retries=max_retries,
_overflow_retry=True
)
# Aggressive trim didn't help or this is a message format error
# -> clear everything
logger.warning("🔄 Clearing conversation history to recover")
self.messages.clear()
if is_context_overflow:
raise Exception(
"抱歉,对话历史过长导致上下文溢出。我已清空历史记录,请重新描述你的需求。"
)
else:
raise Exception(
"抱歉,之前的对话出现了问题。我已清空历史记录,请重新发送你的消息。"
)
# Check if error is rate limit (429)
is_rate_limit = '429' in error_str_lower or 'rate limit' in error_str_lower
# Check if error is retryable (timeout, connection, server busy, etc.)
is_retryable = any(keyword in error_str_lower for keyword in [
'timeout', 'timed out', 'connection', 'network',
'rate limit', 'overloaded', 'unavailable', 'busy', 'retry',
'429', '500', '502', '503', '504', '512'
])
if is_retryable and retry_count < max_retries:
# Rate limit needs longer wait time
if is_rate_limit:
wait_time = 30 + (retry_count * 15) # 30s, 45s, 60s for rate limit
else:
wait_time = (retry_count + 1) * 2 # 2s, 4s, 6s for other errors
logger.warning(f"⚠️ LLM API error (attempt {retry_count + 1}/{max_retries}): {e}")
logger.info(f"Retrying in {wait_time}s...")
time.sleep(wait_time)
return self._call_llm_stream(
retry_on_empty=retry_on_empty,
retry_count=retry_count + 1,
max_retries=max_retries
)
else:
if retry_count >= max_retries:
logger.error(f"❌ LLM API error after {max_retries} retries: {e}")
else:
logger.error(f"❌ LLM call error (non-retryable): {e}")
raise
# Parse tool calls
tool_calls = []
for idx in sorted(tool_calls_buffer.keys()):
tc = tool_calls_buffer[idx]
# Ensure tool call has a valid ID (some providers return empty/None IDs)
tool_id = tc.get("id") or ""
if not tool_id:
import uuid
tool_id = f"call_{uuid.uuid4().hex[:24]}"
try:
# Safely get arguments, handle None case
args_str = tc.get("arguments") or ""
arguments = json.loads(args_str) if args_str else {}
except json.JSONDecodeError as e:
# Handle None or invalid arguments safely
args_str = tc.get('arguments') or ""
args_preview = args_str[:200] if len(args_str) > 200 else args_str
logger.error(f"Failed to parse tool arguments for {tc['name']}")
logger.error(f"Arguments length: {len(args_str)} chars")
logger.error(f"Arguments preview: {args_preview}...")
logger.error(f"JSON decode error: {e}")
# Return a clear error message to the LLM instead of empty dict
# This helps the LLM understand what went wrong
tool_calls.append({
"id": tool_id,
"name": tc["name"],
"arguments": {},
"_parse_error": f"Invalid JSON in tool arguments: {args_preview}... Error: {str(e)}. Tip: For large content, consider splitting into smaller chunks or using a different approach."
})
continue
tool_calls.append({
"id": tool_id,
"name": tc["name"],
"arguments": arguments
})
# Check for empty response and retry once if enabled
if retry_on_empty and not full_content and not tool_calls:
logger.warning(f"⚠️ LLM returned empty response (stop_reason: {stop_reason}), retrying once...")
self._emit_event("message_end", {
"content": "",
"tool_calls": [],
"empty_retry": True,
"stop_reason": stop_reason
})
# Retry without retry flag to avoid infinite loop
return self._call_llm_stream(
retry_on_empty=False,
retry_count=retry_count,
max_retries=max_retries
)
# Filter full_content one more time (in case tags were split across chunks)
full_content = self._filter_think_tags(full_content)
# Add assistant message to history (Claude format uses content blocks)
assistant_msg = {"role": "assistant", "content": []}
# Add text content block if present
if full_content:
assistant_msg["content"].append({
"type": "text",
"text": full_content
})
# Add tool_use blocks if present
if tool_calls:
for tc in tool_calls:
assistant_msg["content"].append({
"type": "tool_use",
"id": tc.get("id", ""),
"name": tc.get("name", ""),
"input": tc.get("arguments", {})
})
# Only append if content is not empty
if assistant_msg["content"]:
self.messages.append(assistant_msg)
self._emit_event("message_end", {
"content": full_content,
"tool_calls": tool_calls
})
return full_content, tool_calls
def _execute_tool(self, tool_call: Dict) -> Dict[str, Any]:
"""
Execute tool
Args:
tool_call: {"id": str, "name": str, "arguments": dict}
Returns:
Tool execution result
"""
tool_name = tool_call["name"]
tool_id = tool_call["id"]
arguments = tool_call["arguments"]
# Check if there was a JSON parse error
if "_parse_error" in tool_call:
parse_error = tool_call["_parse_error"]
logger.error(f"Skipping tool execution due to parse error: {parse_error}")
result = {
"status": "error",
"result": f"Failed to parse tool arguments. {parse_error}. Please ensure your tool call uses valid JSON format with all required parameters.",
"execution_time": 0
}
self._record_tool_result(tool_name, arguments, False)
return result
# Check for consecutive failures (retry protection)
should_stop, stop_reason, is_critical = self._check_consecutive_failures(tool_name, arguments)
if should_stop:
logger.error(f"🛑 {stop_reason}")
self._record_tool_result(tool_name, arguments, False)
if is_critical:
# Critical failure - abort entire conversation
result = {
"status": "critical_error",
"result": stop_reason,
"execution_time": 0
}
else:
# Normal failure - let LLM try different approach
result = {
"status": "error",
"result": f"{stop_reason}\n\n当前方法行不通,请尝试完全不同的方法或向用户询问更多信息。",
"execution_time": 0
}
return result
self._emit_event("tool_execution_start", {
"tool_call_id": tool_id,
"tool_name": tool_name,
"arguments": arguments
})
try:
tool = self.tools.get(tool_name)
if not tool:
raise ValueError(f"Tool '{tool_name}' not found")
# Set tool context
tool.model = self.model
tool.context = self.agent
# Execute tool
start_time = time.time()
result: ToolResult = tool.execute_tool(arguments)
execution_time = time.time() - start_time
result_dict = {
"status": result.status,
"result": result.result,
"execution_time": execution_time
}
# Record tool result for failure tracking
success = result.status == "success"
self._record_tool_result(tool_name, arguments, success)
# Auto-refresh skills after skill creation
if tool_name == "bash" and result.status == "success":
command = arguments.get("command", "")
if "init_skill.py" in command and self.agent.skill_manager:
logger.info("Detected skill creation, refreshing skills...")
self.agent.refresh_skills()
logger.info(f"Skills refreshed! Now have {len(self.agent.skill_manager.skills)} skills")
self._emit_event("tool_execution_end", {
"tool_call_id": tool_id,
"tool_name": tool_name,
**result_dict
})
return result_dict
except Exception as e:
logger.error(f"Tool execution error: {e}")
error_result = {
"status": "error",
"result": str(e),
"execution_time": 0
}
# Record failure
self._record_tool_result(tool_name, arguments, False)
self._emit_event("tool_execution_end", {
"tool_call_id": tool_id,
"tool_name": tool_name,
**error_result
})
return error_result
def _validate_and_fix_messages(self):
"""
Validate message history and fix incomplete tool_use/tool_result pairs.
Claude API requires each tool_use to have a corresponding tool_result immediately after.
"""
if not self.messages:
return
# Check last message for incomplete tool_use
if len(self.messages) > 0:
last_msg = self.messages[-1]
if last_msg.get("role") == "assistant":
# Check if assistant message has tool_use blocks
content = last_msg.get("content", [])
if isinstance(content, list):
has_tool_use = any(block.get("type") == "tool_use" for block in content)
if has_tool_use:
# This is incomplete - remove it
logger.warning(f"⚠️ Removing incomplete tool_use message from history")
self.messages.pop()
def _identify_complete_turns(self) -> List[Dict]:
"""
识别完整的对话轮次
一个完整轮次包括:
1. 用户消息(text)
2. AI 回复(可能包含 tool_use)
3. 工具结果(tool_result,如果有)
4. 后续 AI 回复(如果有)
Returns:
List of turns, each turn is a dict with 'messages' list
"""
turns = []
current_turn = {'messages': []}
for msg in self.messages:
role = msg.get('role')
content = msg.get('content', [])
if role == 'user':
# 检查是否是用户查询(不是工具结果)
is_user_query = False
if isinstance(content, list):
is_user_query = any(
block.get('type') == 'text'
for block in content
if isinstance(block, dict)
)
elif isinstance(content, str):
is_user_query = True
if is_user_query:
# 开始新轮次
if current_turn['messages']:
turns.append(current_turn)
current_turn = {'messages': [msg]}
else:
# 工具结果,属于当前轮次
current_turn['messages'].append(msg)
else:
# AI 回复,属于当前轮次
current_turn['messages'].append(msg)
# 添加最后一个轮次
if current_turn['messages']:
turns.append(current_turn)
return turns
def _estimate_turn_tokens(self, turn: Dict) -> int:
"""估算一个轮次的 tokens"""
return sum(
self.agent._estimate_message_tokens(msg)
for msg in turn['messages']
)
def _truncate_historical_tool_results(self):
"""
Truncate tool_result content in historical messages to reduce context size.
Current turn results are kept at 30K chars (truncated at creation time).
Historical turn results are further truncated to 10K chars here.
This runs before token-based trimming so that we first shrink oversized
results, potentially avoiding the need to drop entire turns.
"""
MAX_HISTORY_RESULT_CHARS = 20000
if len(self.messages) < 2:
return
# Find where the last user text message starts (= current turn boundary)
# We skip the current turn's messages to preserve their full content
current_turn_start = len(self.messages)
for i in range(len(self.messages) - 1, -1, -1):
msg = self.messages[i]
if msg.get("role") == "user":
content = msg.get("content", [])
if isinstance(content, list) and any(
isinstance(b, dict) and b.get("type") == "text" for b in content
):
current_turn_start = i
break
elif isinstance(content, str):
current_turn_start = i
break
truncated_count = 0
for i in range(current_turn_start):
msg = self.messages[i]
if msg.get("role") != "user":
continue
content = msg.get("content", [])
if not isinstance(content, list):
continue
for block in content:
if not isinstance(block, dict) or block.get("type") != "tool_result":
continue
result_str = block.get("content", "")
if isinstance(result_str, str) and len(result_str) > MAX_HISTORY_RESULT_CHARS:
original_len = len(result_str)
block["content"] = result_str[:MAX_HISTORY_RESULT_CHARS] + \
f"\n\n[Historical output truncated: {original_len} -> {MAX_HISTORY_RESULT_CHARS} chars]"
truncated_count += 1
if truncated_count > 0:
logger.info(f"📎 Truncated {truncated_count} historical tool result(s) to {MAX_HISTORY_RESULT_CHARS} chars")
def _aggressive_trim_for_overflow(self) -> bool:
"""
Aggressively trim context when a real overflow error is returned by the API.
This method goes beyond normal _trim_messages by:
1. Truncating all tool results (including current turn) to a small limit
2. Keeping only the last 5 complete conversation turns
3. Truncating overly long user messages
Returns:
True if messages were trimmed (worth retrying), False if nothing left to trim
"""
if not self.messages:
return False
original_count = len(self.messages)
# Step 1: Aggressively truncate ALL tool results to 5K chars
AGGRESSIVE_LIMIT = 10000
truncated = 0
for msg in self.messages:
content = msg.get("content", [])
if not isinstance(content, list):
continue
for block in content:
if not isinstance(block, dict):
continue
# Truncate tool_result blocks
if block.get("type") == "tool_result":
result_str = block.get("content", "")
if isinstance(result_str, str) and len(result_str) > AGGRESSIVE_LIMIT:
block["content"] = (
result_str[:AGGRESSIVE_LIMIT]
+ f"\n\n[Truncated for context recovery: "
f"{len(result_str)} -> {AGGRESSIVE_LIMIT} chars]"
)
truncated += 1
# Truncate tool_use input blocks (e.g. large write content)
if block.get("type") == "tool_use" and isinstance(block.get("input"), dict):
input_str = json.dumps(block["input"], ensure_ascii=False)
if len(input_str) > AGGRESSIVE_LIMIT:
# Keep only a summary of the input
for key, val in block["input"].items():
if isinstance(val, str) and len(val) > 1000:
block["input"][key] = (
val[:1000]
+ f"... [truncated {len(val)} chars]"
)
truncated += 1
# Step 2: Truncate overly long user text messages (e.g. pasted content)
USER_MSG_LIMIT = 10000
for msg in self.messages:
if msg.get("role") != "user":
continue
content = msg.get("content", [])
if isinstance(content, list):
for block in content:
if isinstance(block, dict) and block.get("type") == "text":
text = block.get("text", "")
if len(text) > USER_MSG_LIMIT:
block["text"] = (
text[:USER_MSG_LIMIT]
+ f"\n\n[Message truncated for context recovery: "
f"{len(text)} -> {USER_MSG_LIMIT} chars]"
)
truncated += 1
elif isinstance(content, str) and len(content) > USER_MSG_LIMIT:
msg["content"] = (
content[:USER_MSG_LIMIT]
+ f"\n\n[Message truncated for context recovery: "
f"{len(content)} -> {USER_MSG_LIMIT} chars]"
)
truncated += 1
# Step 3: Keep only the last 5 complete turns
turns = self._identify_complete_turns()
if len(turns) > 5:
kept_turns = turns[-5:]
new_messages = []
for turn in kept_turns:
new_messages.extend(turn["messages"])
removed = len(turns) - 5
self.messages[:] = new_messages
logger.info(
f"🔧 Aggressive trim: removed {removed} old turns, "
f"truncated {truncated} large blocks, "
f"{original_count} -> {len(self.messages)} messages"
)
return True
if truncated > 0:
logger.info(
f"🔧 Aggressive trim: truncated {truncated} large blocks "
f"(no turns removed, only {len(turns)} turn(s) left)"
)
return True
# Nothing left to trim
logger.warning("🔧 Aggressive trim: nothing to trim, will clear history")
return False
def _trim_messages(self):
"""
智能清理消息历史,保持对话完整性
使用完整轮次作为清理单位,确保:
1. 不会在对话中间截断
2. 工具调用链(tool_use + tool_result)保持完整
3. 每轮对话都是完整的(用户消息 + AI回复 + 工具调用)
"""
if not self.messages or not self.agent:
return
# Step 0: Truncate large tool results in historical turns (30K -> 10K)
self._truncate_historical_tool_results()
# Step 1: 识别完整轮次
turns = self._identify_complete_turns()
if not turns:
return
# Step 2: 轮次限制 - 保留最近 N 轮
if len(turns) > self.max_context_turns:
removed_turns = len(turns) - self.max_context_turns
turns = turns[-self.max_context_turns:] # 保留最近的轮次
logger.info(
f"💾 上下文轮次超限: {len(turns) + removed_turns} > {self.max_context_turns},"
f"移除最早的 {removed_turns} 轮完整对话"
)
# Step 3: Token 限制 - 保留完整轮次
# Get context window from agent (based on model)
context_window = self.agent._get_model_context_window()
# Use configured max_context_tokens if available
if hasattr(self.agent, 'max_context_tokens') and self.agent.max_context_tokens:
max_tokens = self.agent.max_context_tokens
else:
# Reserve 10% for response generation
reserve_tokens = int(context_window * 0.1)
max_tokens = context_window - reserve_tokens
# Estimate system prompt tokens
system_tokens = self.agent._estimate_message_tokens({"role": "system", "content": self.system_prompt})
available_tokens = max_tokens - system_tokens
# Calculate current tokens
current_tokens = sum(self._estimate_turn_tokens(turn) for turn in turns)
# If under limit, reconstruct messages and return
if current_tokens + system_tokens <= max_tokens:
# Reconstruct message list from turns
new_messages = []
for turn in turns:
new_messages.extend(turn['messages'])
old_count = len(self.messages)
self.messages = new_messages
# Log if we removed messages due to turn limit
if old_count > len(self.messages):
logger.info(f" 重建消息列表: {old_count} -> {len(self.messages)} 条消息")
return
# Token limit exceeded - keep complete turns from newest
logger.info(
f"🔄 上下文tokens超限: ~{current_tokens + system_tokens} > {max_tokens},"
f"将按完整轮次移除最早的对话"
)
# 从最新轮次开始,反向累加(保持完整轮次)
kept_turns = []
accumulated_tokens = 0
min_turns = 3 # 尽量保留至少 3 轮,但不强制(避免超出 token 限制)
for i, turn in enumerate(reversed(turns)):
turn_tokens = self._estimate_turn_tokens(turn)
turns_from_end = i + 1
# 检查是否超出限制
if accumulated_tokens + turn_tokens <= available_tokens:
kept_turns.insert(0, turn)
accumulated_tokens += turn_tokens
else:
# 超出限制
# 如果还没有保留足够的轮次,且这是最后的机会,尝试保留
if len(kept_turns) < min_turns and turns_from_end <= min_turns:
# 检查是否严重超出(超出 20% 以上则放弃)
overflow_ratio = (accumulated_tokens + turn_tokens - available_tokens) / available_tokens
if overflow_ratio < 0.2: # 允许最多超出 20%
kept_turns.insert(0, turn)
accumulated_tokens += turn_tokens
logger.debug(f" 为保留最少轮次,允许超出 {overflow_ratio*100:.1f}%")
continue
# 停止保留更早的轮次
break
# 重建消息列表
new_messages = []
for turn in kept_turns:
new_messages.extend(turn['messages'])
old_count = len(self.messages)
old_turn_count = len(turns)
self.messages = new_messages
new_count = len(self.messages)
new_turn_count = len(kept_turns)
if old_count > new_count:
logger.info(
f" 移除了 {old_turn_count - new_turn_count} 轮对话 "
f"({old_count} -> {new_count} 条消息,"
f"~{current_tokens + system_tokens} -> ~{accumulated_tokens + system_tokens} tokens)"
)
def _prepare_messages(self) -> List[Dict[str, Any]]:
"""
Prepare messages to send to LLM
Note: For Claude API, system prompt should be passed separately via system parameter,
not as a message. The AgentLLMModel will handle this.
"""
# Don't add system message here - it will be handled separately by the LLM adapter
return self.messages | zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/protocol/context.py | Python | class TeamContext:
def __init__(self, name: str, description: str, rule: str, agents: list, max_steps: int = 100):
"""
Initialize the TeamContext with a name, description, rules, a list of agents, and a user question.
:param name: The name of the group context.
:param description: A description of the group context.
:param rule: The rules governing the group context.
:param agents: A list of agents in the context.
"""
self.name = name
self.description = description
self.rule = rule
self.agents = agents
self.user_task = "" # For backward compatibility
self.task = None # Will be a Task instance
self.model = None # Will be an instance of LLMModel
self.task_short_name = None # Store the task directory name
# List of agents that have been executed
self.agent_outputs: list = []
self.current_steps = 0
self.max_steps = max_steps
class AgentOutput:
def __init__(self, agent_name: str, output: str):
self.agent_name = agent_name
self.output = output | zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/protocol/models.py | Python | """
Models module for agent system.
Provides basic model classes needed by tools and bridge integration.
"""
from typing import Any, Dict, List, Optional
class LLMRequest:
"""Request model for LLM operations"""
def __init__(self, messages: List[Dict[str, str]] = None, model: Optional[str] = None,
temperature: float = 0.7, max_tokens: Optional[int] = None,
stream: bool = False, tools: Optional[List] = None, **kwargs):
self.messages = messages or []
self.model = model
self.temperature = temperature
self.max_tokens = max_tokens
self.stream = stream
self.tools = tools
# Allow extra attributes
for key, value in kwargs.items():
setattr(self, key, value)
class LLMModel:
"""Base class for LLM models"""
def __init__(self, model: str = None, **kwargs):
self.model = model
self.config = kwargs
def call(self, request: LLMRequest):
"""
Call the model with a request.
This is a placeholder implementation.
"""
raise NotImplementedError("LLMModel.call not implemented in this context")
def call_stream(self, request: LLMRequest):
"""
Call the model with streaming.
This is a placeholder implementation.
"""
raise NotImplementedError("LLMModel.call_stream not implemented in this context")
class ModelFactory:
"""Factory for creating model instances"""
@staticmethod
def create_model(model_type: str, **kwargs):
"""
Create a model instance based on type.
This is a placeholder implementation.
"""
raise NotImplementedError("ModelFactory.create_model not implemented in this context") | zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/protocol/result.py | Python | from __future__ import annotations
import time
import uuid
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Dict, Any, Optional
from agent.protocol.task import Task, TaskStatus
class AgentActionType(Enum):
"""Enum representing different types of agent actions."""
TOOL_USE = "tool_use"
THINKING = "thinking"
FINAL_ANSWER = "final_answer"
@dataclass
class ToolResult:
"""
Represents the result of a tool use.
Attributes:
tool_name: Name of the tool used
input_params: Parameters passed to the tool
output: Output from the tool
status: Status of the tool execution (success/error)
error_message: Error message if the tool execution failed
execution_time: Time taken to execute the tool
"""
tool_name: str
input_params: Dict[str, Any]
output: Any
status: str
error_message: Optional[str] = None
execution_time: float = 0.0
@dataclass
class AgentAction:
"""
Represents an action taken by an agent.
Attributes:
id: Unique identifier for the action
agent_id: ID of the agent that performed the action
agent_name: Name of the agent that performed the action
action_type: Type of action (tool use, thinking, final answer)
content: Content of the action (thought content, final answer content)
tool_result: Tool use details if action_type is TOOL_USE
timestamp: When the action was performed
"""
agent_id: str
agent_name: str
action_type: AgentActionType
id: str = field(default_factory=lambda: str(uuid.uuid4()))
content: str = ""
tool_result: Optional[ToolResult] = None
thought: Optional[str] = None
timestamp: float = field(default_factory=time.time)
@dataclass
class AgentResult:
"""
Represents the result of an agent's execution.
Attributes:
final_answer: The final answer provided by the agent
step_count: Number of steps taken by the agent
status: Status of the execution (success/error)
error_message: Error message if execution failed
"""
final_answer: str
step_count: int
status: str = "success"
error_message: Optional[str] = None
@classmethod
def success(cls, final_answer: str, step_count: int) -> "AgentResult":
"""Create a successful result"""
return cls(final_answer=final_answer, step_count=step_count)
@classmethod
def error(cls, error_message: str, step_count: int = 0) -> "AgentResult":
"""Create an error result"""
return cls(
final_answer=f"Error: {error_message}",
step_count=step_count,
status="error",
error_message=error_message
)
@property
def is_error(self) -> bool:
"""Check if the result represents an error"""
return self.status == "error" | zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/protocol/task.py | Python | from __future__ import annotations
import time
import uuid
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, Any, List
class TaskType(Enum):
"""Enum representing different types of tasks."""
TEXT = "text"
IMAGE = "image"
VIDEO = "video"
AUDIO = "audio"
FILE = "file"
MIXED = "mixed"
class TaskStatus(Enum):
"""Enum representing the status of a task."""
INIT = "init" # Initial state
PROCESSING = "processing" # In progress
COMPLETED = "completed" # Completed
FAILED = "failed" # Failed
@dataclass
class Task:
"""
Represents a task to be processed by an agent.
Attributes:
id: Unique identifier for the task
content: The primary text content of the task
type: Type of the task
status: Current status of the task
created_at: Timestamp when the task was created
updated_at: Timestamp when the task was last updated
metadata: Additional metadata for the task
images: List of image URLs or base64 encoded images
videos: List of video URLs
audios: List of audio URLs or base64 encoded audios
files: List of file URLs or paths
"""
id: str = field(default_factory=lambda: str(uuid.uuid4()))
content: str = ""
type: TaskType = TaskType.TEXT
status: TaskStatus = TaskStatus.INIT
created_at: float = field(default_factory=time.time)
updated_at: float = field(default_factory=time.time)
metadata: Dict[str, Any] = field(default_factory=dict)
# Media content
images: List[str] = field(default_factory=list)
videos: List[str] = field(default_factory=list)
audios: List[str] = field(default_factory=list)
files: List[str] = field(default_factory=list)
def __init__(self, content: str = "", **kwargs):
"""
Initialize a Task with content and optional keyword arguments.
Args:
content: The text content of the task
**kwargs: Additional attributes to set
"""
self.id = kwargs.get('id', str(uuid.uuid4()))
self.content = content
self.type = kwargs.get('type', TaskType.TEXT)
self.status = kwargs.get('status', TaskStatus.INIT)
self.created_at = kwargs.get('created_at', time.time())
self.updated_at = kwargs.get('updated_at', time.time())
self.metadata = kwargs.get('metadata', {})
self.images = kwargs.get('images', [])
self.videos = kwargs.get('videos', [])
self.audios = kwargs.get('audios', [])
self.files = kwargs.get('files', [])
def get_text(self) -> str:
"""
Get the text content of the task.
Returns:
The text content
"""
return self.content
def update_status(self, status: TaskStatus) -> None:
"""
Update the status of the task.
Args:
status: The new status
"""
self.status = status
self.updated_at = time.time() | zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/skills/__init__.py | Python | """
Skills module for agent system.
This module provides the framework for loading, managing, and executing skills.
Skills are markdown files with frontmatter that provide specialized instructions
for specific tasks.
"""
from agent.skills.types import (
Skill,
SkillEntry,
SkillMetadata,
SkillInstallSpec,
LoadSkillsResult,
)
from agent.skills.loader import SkillLoader
from agent.skills.manager import SkillManager
from agent.skills.formatter import format_skills_for_prompt
__all__ = [
"Skill",
"SkillEntry",
"SkillMetadata",
"SkillInstallSpec",
"LoadSkillsResult",
"SkillLoader",
"SkillManager",
"format_skills_for_prompt",
]
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/skills/config.py | Python | """
Configuration support for skills.
"""
import os
import platform
from typing import Dict, Optional, List
from agent.skills.types import SkillEntry
def resolve_runtime_platform() -> str:
"""Get the current runtime platform."""
return platform.system().lower()
def has_binary(bin_name: str) -> bool:
"""
Check if a binary is available in PATH.
:param bin_name: Binary name to check
:return: True if binary is available
"""
import shutil
return shutil.which(bin_name) is not None
def has_any_binary(bin_names: List[str]) -> bool:
"""
Check if any of the given binaries is available.
:param bin_names: List of binary names to check
:return: True if at least one binary is available
"""
return any(has_binary(bin_name) for bin_name in bin_names)
def has_env_var(env_name: str) -> bool:
"""
Check if an environment variable is set.
:param env_name: Environment variable name
:return: True if environment variable is set
"""
return env_name in os.environ and bool(os.environ[env_name].strip())
def get_skill_config(config: Optional[Dict], skill_name: str) -> Optional[Dict]:
"""
Get skill-specific configuration.
:param config: Global configuration dictionary
:param skill_name: Name of the skill
:return: Skill configuration or None
"""
if not config:
return None
skills_config = config.get('skills', {})
if not isinstance(skills_config, dict):
return None
entries = skills_config.get('entries', {})
if not isinstance(entries, dict):
return None
return entries.get(skill_name)
def should_include_skill(
entry: SkillEntry,
config: Optional[Dict] = None,
current_platform: Optional[str] = None,
) -> bool:
"""
Determine if a skill should be included based on requirements.
Simple rule: Skills are auto-enabled if their requirements are met.
- Has required API keys → enabled
- Missing API keys → disabled
- Wrong keys → enabled but will fail at runtime (LLM will handle error)
:param entry: SkillEntry to check
:param config: Configuration dictionary (currently unused, reserved for future)
:param current_platform: Current platform (default: auto-detect)
:return: True if skill should be included
"""
metadata = entry.metadata
# No metadata = always include (no requirements)
if not metadata:
return True
# Check platform requirements (can't work on wrong platform)
if metadata.os:
platform_name = current_platform or resolve_runtime_platform()
# Map common platform names
platform_map = {
'darwin': 'darwin',
'linux': 'linux',
'windows': 'win32',
}
normalized_platform = platform_map.get(platform_name, platform_name)
if normalized_platform not in metadata.os:
return False
# If skill has 'always: true', include it regardless of other requirements
if metadata.always:
return True
# Check requirements
if metadata.requires:
# Check required binaries (all must be present)
required_bins = metadata.requires.get('bins', [])
if required_bins:
if not all(has_binary(bin_name) for bin_name in required_bins):
return False
# Check anyBins (at least one must be present)
any_bins = metadata.requires.get('anyBins', [])
if any_bins:
if not has_any_binary(any_bins):
return False
# Check environment variables (API keys)
# Simple rule: All required env vars must be set
required_env = metadata.requires.get('env', [])
if required_env:
for env_name in required_env:
if not has_env_var(env_name):
# Missing required API key → disable skill
return False
return True
def is_config_path_truthy(config: Dict, path: str) -> bool:
"""
Check if a config path resolves to a truthy value.
:param config: Configuration dictionary
:param path: Dot-separated path (e.g., 'skills.enabled')
:return: True if path resolves to truthy value
"""
parts = path.split('.')
current = config
for part in parts:
if not isinstance(current, dict):
return False
current = current.get(part)
if current is None:
return False
# Check if value is truthy
if isinstance(current, bool):
return current
if isinstance(current, (int, float)):
return current != 0
if isinstance(current, str):
return bool(current.strip())
return bool(current)
def resolve_config_path(config: Dict, path: str):
"""
Resolve a dot-separated config path to its value.
:param config: Configuration dictionary
:param path: Dot-separated path
:return: Value at path or None
"""
parts = path.split('.')
current = config
for part in parts:
if not isinstance(current, dict):
return None
current = current.get(part)
if current is None:
return None
return current
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/skills/formatter.py | Python | """
Skill formatter for generating prompts from skills.
"""
from typing import List
from agent.skills.types import Skill, SkillEntry
def format_skills_for_prompt(skills: List[Skill]) -> str:
"""
Format skills for inclusion in a system prompt.
Uses XML format per Agent Skills standard.
Skills with disable_model_invocation=True are excluded.
:param skills: List of skills to format
:return: Formatted prompt text
"""
# Filter out skills that should not be invoked by the model
visible_skills = [s for s in skills if not s.disable_model_invocation]
if not visible_skills:
return ""
lines = [
"",
"<available_skills>",
]
for skill in visible_skills:
lines.append(" <skill>")
lines.append(f" <name>{_escape_xml(skill.name)}</name>")
lines.append(f" <description>{_escape_xml(skill.description)}</description>")
lines.append(f" <location>{_escape_xml(skill.file_path)}</location>")
lines.append(" </skill>")
lines.append("</available_skills>")
return "\n".join(lines)
def format_skill_entries_for_prompt(entries: List[SkillEntry]) -> str:
"""
Format skill entries for inclusion in a system prompt.
:param entries: List of skill entries to format
:return: Formatted prompt text
"""
skills = [entry.skill for entry in entries]
return format_skills_for_prompt(skills)
def _escape_xml(text: str) -> str:
"""Escape XML special characters."""
return (text
.replace('&', '&')
.replace('<', '<')
.replace('>', '>')
.replace('"', '"')
.replace("'", '''))
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/skills/frontmatter.py | Python | """
Frontmatter parsing for skills.
"""
import re
import json
from typing import Dict, Any, Optional, List
from agent.skills.types import SkillMetadata, SkillInstallSpec
def parse_frontmatter(content: str) -> Dict[str, Any]:
"""
Parse YAML-style frontmatter from markdown content.
Returns a dictionary of frontmatter fields.
"""
frontmatter = {}
# Match frontmatter block between --- markers
match = re.match(r'^---\s*\n(.*?)\n---\s*\n', content, re.DOTALL)
if not match:
return frontmatter
frontmatter_text = match.group(1)
# Try to use PyYAML for proper YAML parsing
try:
import yaml
frontmatter = yaml.safe_load(frontmatter_text)
if not isinstance(frontmatter, dict):
frontmatter = {}
return frontmatter
except ImportError:
# Fallback to simple parsing if PyYAML not available
pass
except Exception:
# If YAML parsing fails, fall back to simple parsing
pass
# Simple YAML-like parsing (supports key: value format only)
# This is a fallback for when PyYAML is not available
for line in frontmatter_text.split('\n'):
line = line.strip()
if not line or line.startswith('#'):
continue
if ':' in line:
key, value = line.split(':', 1)
key = key.strip()
value = value.strip()
# Try to parse as JSON if it looks like JSON
if value.startswith('{') or value.startswith('['):
try:
value = json.loads(value)
except json.JSONDecodeError:
pass
# Parse boolean values
elif value.lower() in ('true', 'false'):
value = value.lower() == 'true'
# Parse numbers
elif value.isdigit():
value = int(value)
frontmatter[key] = value
return frontmatter
def parse_metadata(frontmatter: Dict[str, Any]) -> Optional[SkillMetadata]:
"""
Parse skill metadata from frontmatter.
Looks for 'metadata' field containing JSON with skill configuration.
"""
metadata_raw = frontmatter.get('metadata')
if not metadata_raw:
return None
# If it's a string, try to parse as JSON
if isinstance(metadata_raw, str):
try:
metadata_raw = json.loads(metadata_raw)
except json.JSONDecodeError:
return None
if not isinstance(metadata_raw, dict):
return None
# Use metadata_raw directly (COW format)
meta_obj = metadata_raw
# Parse install specs
install_specs = []
install_raw = meta_obj.get('install', [])
if isinstance(install_raw, list):
for spec_raw in install_raw:
if not isinstance(spec_raw, dict):
continue
kind = spec_raw.get('kind', spec_raw.get('type', '')).lower()
if not kind:
continue
spec = SkillInstallSpec(
kind=kind,
id=spec_raw.get('id'),
label=spec_raw.get('label'),
bins=_normalize_string_list(spec_raw.get('bins')),
os=_normalize_string_list(spec_raw.get('os')),
formula=spec_raw.get('formula'),
package=spec_raw.get('package'),
module=spec_raw.get('module'),
url=spec_raw.get('url'),
archive=spec_raw.get('archive'),
extract=spec_raw.get('extract', False),
strip_components=spec_raw.get('stripComponents'),
target_dir=spec_raw.get('targetDir'),
)
install_specs.append(spec)
# Parse requires
requires = {}
requires_raw = meta_obj.get('requires', {})
if isinstance(requires_raw, dict):
for key, value in requires_raw.items():
requires[key] = _normalize_string_list(value)
return SkillMetadata(
always=meta_obj.get('always', False),
skill_key=meta_obj.get('skillKey'),
primary_env=meta_obj.get('primaryEnv'),
emoji=meta_obj.get('emoji'),
homepage=meta_obj.get('homepage'),
os=_normalize_string_list(meta_obj.get('os')),
requires=requires,
install=install_specs,
)
def _normalize_string_list(value: Any) -> List[str]:
"""Normalize a value to a list of strings."""
if not value:
return []
if isinstance(value, list):
return [str(v).strip() for v in value if v]
if isinstance(value, str):
return [v.strip() for v in value.split(',') if v.strip()]
return []
def parse_boolean_value(value: Optional[str], default: bool = False) -> bool:
"""Parse a boolean value from frontmatter."""
if value is None:
return default
if isinstance(value, bool):
return value
if isinstance(value, str):
return value.lower() in ('true', '1', 'yes', 'on')
return default
def get_frontmatter_value(frontmatter: Dict[str, Any], key: str) -> Optional[str]:
"""Get a frontmatter value as a string."""
value = frontmatter.get(key)
return str(value) if value is not None else None
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/skills/loader.py | Python | """
Skill loader for discovering and loading skills from directories.
"""
import os
from pathlib import Path
from typing import List, Optional, Dict
from common.log import logger
from agent.skills.types import Skill, SkillEntry, LoadSkillsResult, SkillMetadata
from agent.skills.frontmatter import parse_frontmatter, parse_metadata, parse_boolean_value, get_frontmatter_value
class SkillLoader:
"""Loads skills from various directories."""
def __init__(self, workspace_dir: Optional[str] = None):
"""
Initialize the skill loader.
:param workspace_dir: Agent workspace directory (for workspace-specific skills)
"""
self.workspace_dir = workspace_dir
def load_skills_from_dir(self, dir_path: str, source: str) -> LoadSkillsResult:
"""
Load skills from a directory.
Discovery rules:
- Direct .md files in the root directory
- Recursive SKILL.md files under subdirectories
:param dir_path: Directory path to scan
:param source: Source identifier (e.g., 'managed', 'workspace', 'bundled')
:return: LoadSkillsResult with skills and diagnostics
"""
skills = []
diagnostics = []
if not os.path.exists(dir_path):
diagnostics.append(f"Directory does not exist: {dir_path}")
return LoadSkillsResult(skills=skills, diagnostics=diagnostics)
if not os.path.isdir(dir_path):
diagnostics.append(f"Path is not a directory: {dir_path}")
return LoadSkillsResult(skills=skills, diagnostics=diagnostics)
# Load skills from root-level .md files and subdirectories
result = self._load_skills_recursive(dir_path, source, include_root_files=True)
return result
def _load_skills_recursive(
self,
dir_path: str,
source: str,
include_root_files: bool = False
) -> LoadSkillsResult:
"""
Recursively load skills from a directory.
:param dir_path: Directory to scan
:param source: Source identifier
:param include_root_files: Whether to include root-level .md files
:return: LoadSkillsResult
"""
skills = []
diagnostics = []
try:
entries = os.listdir(dir_path)
except Exception as e:
diagnostics.append(f"Failed to list directory {dir_path}: {e}")
return LoadSkillsResult(skills=skills, diagnostics=diagnostics)
for entry in entries:
# Skip hidden files and directories
if entry.startswith('.'):
continue
# Skip common non-skill directories
if entry in ('node_modules', '__pycache__', 'venv', '.git'):
continue
full_path = os.path.join(dir_path, entry)
# Handle directories
if os.path.isdir(full_path):
# Recursively scan subdirectories
sub_result = self._load_skills_recursive(full_path, source, include_root_files=False)
skills.extend(sub_result.skills)
diagnostics.extend(sub_result.diagnostics)
continue
# Handle files
if not os.path.isfile(full_path):
continue
# Check if this is a skill file
is_root_md = include_root_files and entry.endswith('.md')
is_skill_md = not include_root_files and entry == 'SKILL.md'
if not (is_root_md or is_skill_md):
continue
# Load the skill
skill_result = self._load_skill_from_file(full_path, source)
if skill_result.skills:
skills.extend(skill_result.skills)
diagnostics.extend(skill_result.diagnostics)
return LoadSkillsResult(skills=skills, diagnostics=diagnostics)
def _load_skill_from_file(self, file_path: str, source: str) -> LoadSkillsResult:
"""
Load a single skill from a markdown file.
:param file_path: Path to the skill markdown file
:param source: Source identifier
:return: LoadSkillsResult
"""
diagnostics = []
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
except Exception as e:
diagnostics.append(f"Failed to read skill file {file_path}: {e}")
return LoadSkillsResult(skills=[], diagnostics=diagnostics)
# Parse frontmatter
frontmatter = parse_frontmatter(content)
# Get skill name and description
skill_dir = os.path.dirname(file_path)
parent_dir_name = os.path.basename(skill_dir)
name = frontmatter.get('name', parent_dir_name)
description = frontmatter.get('description', '')
# Normalize name (handle both string and list)
if isinstance(name, list):
name = name[0] if name else parent_dir_name
elif not isinstance(name, str):
name = str(name) if name else parent_dir_name
# Normalize description (handle both string and list)
if isinstance(description, list):
description = ' '.join(str(d) for d in description if d)
elif not isinstance(description, str):
description = str(description) if description else ''
# Special handling for linkai-agent: dynamically load apps from config.json
if name == 'linkai-agent':
description = self._load_linkai_agent_description(skill_dir, description)
if not description or not description.strip():
diagnostics.append(f"Skill {name} has no description: {file_path}")
return LoadSkillsResult(skills=[], diagnostics=diagnostics)
# Parse disable-model-invocation flag
disable_model_invocation = parse_boolean_value(
get_frontmatter_value(frontmatter, 'disable-model-invocation'),
default=False
)
# Create skill object
skill = Skill(
name=name,
description=description,
file_path=file_path,
base_dir=skill_dir,
source=source,
content=content,
disable_model_invocation=disable_model_invocation,
frontmatter=frontmatter,
)
return LoadSkillsResult(skills=[skill], diagnostics=diagnostics)
def _load_linkai_agent_description(self, skill_dir: str, default_description: str) -> str:
"""
Dynamically load LinkAI agent description from config.json
:param skill_dir: Skill directory
:param default_description: Default description from SKILL.md
:return: Dynamic description with app list
"""
import json
config_path = os.path.join(skill_dir, "config.json")
# Without config.json, skip this skill entirely (return empty to trigger exclusion)
if not os.path.exists(config_path):
logger.debug(f"[SkillLoader] linkai-agent skipped: no config.json found")
return ""
try:
with open(config_path, 'r', encoding='utf-8') as f:
config = json.load(f)
apps = config.get("apps", [])
if not apps:
return default_description
# Build dynamic description with app details
app_descriptions = "; ".join([
f"{app['app_name']}({app['app_code']}: {app['app_description']})"
for app in apps
])
return f"Call LinkAI apps/workflows. {app_descriptions}"
except Exception as e:
logger.warning(f"[SkillLoader] Failed to load linkai-agent config: {e}")
return default_description
def load_all_skills(
self,
managed_dir: Optional[str] = None,
workspace_skills_dir: Optional[str] = None,
extra_dirs: Optional[List[str]] = None,
) -> Dict[str, SkillEntry]:
"""
Load skills from all configured locations with precedence.
Precedence (lowest to highest):
1. Extra directories
2. Managed skills directory
3. Workspace skills directory
:param managed_dir: Managed skills directory (e.g., ~/.cow/skills)
:param workspace_skills_dir: Workspace skills directory (e.g., workspace/skills)
:param extra_dirs: Additional directories to load skills from
:return: Dictionary mapping skill name to SkillEntry
"""
skill_map: Dict[str, SkillEntry] = {}
all_diagnostics = []
# Load from extra directories (lowest precedence)
if extra_dirs:
for extra_dir in extra_dirs:
if not os.path.exists(extra_dir):
continue
result = self.load_skills_from_dir(extra_dir, source='extra')
all_diagnostics.extend(result.diagnostics)
for skill in result.skills:
entry = self._create_skill_entry(skill)
skill_map[skill.name] = entry
# Load from managed directory
if managed_dir and os.path.exists(managed_dir):
result = self.load_skills_from_dir(managed_dir, source='managed')
all_diagnostics.extend(result.diagnostics)
for skill in result.skills:
entry = self._create_skill_entry(skill)
skill_map[skill.name] = entry
# Load from workspace directory (highest precedence)
if workspace_skills_dir and os.path.exists(workspace_skills_dir):
result = self.load_skills_from_dir(workspace_skills_dir, source='workspace')
all_diagnostics.extend(result.diagnostics)
for skill in result.skills:
entry = self._create_skill_entry(skill)
skill_map[skill.name] = entry
# Log diagnostics
if all_diagnostics:
logger.debug(f"Skill loading diagnostics: {len(all_diagnostics)} issues")
for diag in all_diagnostics[:5]: # Log first 5
logger.debug(f" - {diag}")
logger.debug(f"Loaded {len(skill_map)} skills from all sources")
return skill_map
def _create_skill_entry(self, skill: Skill) -> SkillEntry:
"""
Create a SkillEntry from a Skill with parsed metadata.
:param skill: The skill to create an entry for
:return: SkillEntry with metadata
"""
metadata = parse_metadata(skill.frontmatter)
# Parse user-invocable flag
user_invocable = parse_boolean_value(
get_frontmatter_value(skill.frontmatter, 'user-invocable'),
default=True
)
return SkillEntry(
skill=skill,
metadata=metadata,
user_invocable=user_invocable,
)
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/skills/manager.py | Python | """
Skill manager for managing skill lifecycle and operations.
"""
import os
from typing import Dict, List, Optional
from pathlib import Path
from common.log import logger
from agent.skills.types import Skill, SkillEntry, SkillSnapshot
from agent.skills.loader import SkillLoader
from agent.skills.formatter import format_skill_entries_for_prompt
class SkillManager:
"""Manages skills for an agent."""
def __init__(
self,
workspace_dir: Optional[str] = None,
managed_skills_dir: Optional[str] = None,
extra_dirs: Optional[List[str]] = None,
config: Optional[Dict] = None,
):
"""
Initialize the skill manager.
:param workspace_dir: Agent workspace directory
:param managed_skills_dir: Managed skills directory (e.g., ~/.cow/skills)
:param extra_dirs: Additional skill directories
:param config: Configuration dictionary
"""
self.workspace_dir = workspace_dir
self.managed_skills_dir = managed_skills_dir or self._get_default_managed_dir()
self.extra_dirs = extra_dirs or []
self.config = config or {}
self.loader = SkillLoader(workspace_dir=workspace_dir)
self.skills: Dict[str, SkillEntry] = {}
# Load skills on initialization
self.refresh_skills()
def _get_default_managed_dir(self) -> str:
"""Get the default managed skills directory."""
# Use project root skills directory as default
import os
project_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
return os.path.join(project_root, 'skills')
def refresh_skills(self):
"""Reload all skills from configured directories."""
workspace_skills_dir = None
if self.workspace_dir:
workspace_skills_dir = os.path.join(self.workspace_dir, 'skills')
self.skills = self.loader.load_all_skills(
managed_dir=self.managed_skills_dir,
workspace_skills_dir=workspace_skills_dir,
extra_dirs=self.extra_dirs,
)
logger.debug(f"SkillManager: Loaded {len(self.skills)} skills")
def get_skill(self, name: str) -> Optional[SkillEntry]:
"""
Get a skill by name.
:param name: Skill name
:return: SkillEntry or None if not found
"""
return self.skills.get(name)
def list_skills(self) -> List[SkillEntry]:
"""
Get all loaded skills.
:return: List of all skill entries
"""
return list(self.skills.values())
def filter_skills(
self,
skill_filter: Optional[List[str]] = None,
include_disabled: bool = False,
) -> List[SkillEntry]:
"""
Filter skills based on criteria.
Simple rule: Skills are auto-enabled if requirements are met.
- Has required API keys → included
- Missing API keys → excluded
:param skill_filter: List of skill names to include (None = all)
:param include_disabled: Whether to include skills with disable_model_invocation=True
:return: Filtered list of skill entries
"""
from agent.skills.config import should_include_skill
entries = list(self.skills.values())
# Check requirements (platform, binaries, env vars)
entries = [e for e in entries if should_include_skill(e, self.config)]
# Apply skill filter
if skill_filter is not None:
# Flatten and normalize skill names (handle both strings and nested lists)
normalized = []
for item in skill_filter:
if isinstance(item, str):
name = item.strip()
if name:
normalized.append(name)
elif isinstance(item, list):
# Handle nested lists
for subitem in item:
if isinstance(subitem, str):
name = subitem.strip()
if name:
normalized.append(name)
if normalized:
entries = [e for e in entries if e.skill.name in normalized]
# Filter out disabled skills unless explicitly requested
if not include_disabled:
entries = [e for e in entries if not e.skill.disable_model_invocation]
return entries
def build_skills_prompt(
self,
skill_filter: Optional[List[str]] = None,
) -> str:
"""
Build a formatted prompt containing available skills.
:param skill_filter: Optional list of skill names to include
:return: Formatted skills prompt
"""
from common.log import logger
entries = self.filter_skills(skill_filter=skill_filter, include_disabled=False)
logger.debug(f"[SkillManager] Filtered {len(entries)} skills for prompt (total: {len(self.skills)})")
if entries:
skill_names = [e.skill.name for e in entries]
logger.debug(f"[SkillManager] Skills to include: {skill_names}")
result = format_skill_entries_for_prompt(entries)
logger.debug(f"[SkillManager] Generated prompt length: {len(result)}")
return result
def build_skill_snapshot(
self,
skill_filter: Optional[List[str]] = None,
version: Optional[int] = None,
) -> SkillSnapshot:
"""
Build a snapshot of skills for a specific run.
:param skill_filter: Optional list of skill names to include
:param version: Optional version number for the snapshot
:return: SkillSnapshot
"""
entries = self.filter_skills(skill_filter=skill_filter, include_disabled=False)
prompt = format_skill_entries_for_prompt(entries)
skills_info = []
resolved_skills = []
for entry in entries:
skills_info.append({
'name': entry.skill.name,
'primary_env': entry.metadata.primary_env if entry.metadata else None,
})
resolved_skills.append(entry.skill)
return SkillSnapshot(
prompt=prompt,
skills=skills_info,
resolved_skills=resolved_skills,
version=version,
)
def sync_skills_to_workspace(self, target_workspace_dir: str):
"""
Sync all loaded skills to a target workspace directory.
This is useful for sandbox environments where skills need to be copied.
:param target_workspace_dir: Target workspace directory
"""
import shutil
target_skills_dir = os.path.join(target_workspace_dir, 'skills')
# Remove existing skills directory
if os.path.exists(target_skills_dir):
shutil.rmtree(target_skills_dir)
# Create new skills directory
os.makedirs(target_skills_dir, exist_ok=True)
# Copy each skill
for entry in self.skills.values():
skill_name = entry.skill.name
source_dir = entry.skill.base_dir
target_dir = os.path.join(target_skills_dir, skill_name)
try:
shutil.copytree(source_dir, target_dir)
logger.debug(f"Synced skill '{skill_name}' to {target_dir}")
except Exception as e:
logger.warning(f"Failed to sync skill '{skill_name}': {e}")
logger.info(f"Synced {len(self.skills)} skills to {target_skills_dir}")
def get_skill_by_key(self, skill_key: str) -> Optional[SkillEntry]:
"""
Get a skill by its skill key (which may differ from name).
:param skill_key: Skill key to look up
:return: SkillEntry or None
"""
for entry in self.skills.values():
if entry.metadata and entry.metadata.skill_key == skill_key:
return entry
if entry.skill.name == skill_key:
return entry
return None
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/skills/types.py | Python | """
Type definitions for skills system.
"""
from __future__ import annotations
from typing import Dict, List, Optional, Any
from dataclasses import dataclass, field
@dataclass
class SkillInstallSpec:
"""Specification for installing skill dependencies."""
kind: str # brew, pip, npm, download, etc.
id: Optional[str] = None
label: Optional[str] = None
bins: List[str] = field(default_factory=list)
os: List[str] = field(default_factory=list)
formula: Optional[str] = None # for brew
package: Optional[str] = None # for pip/npm
module: Optional[str] = None
url: Optional[str] = None # for download
archive: Optional[str] = None
extract: bool = False
strip_components: Optional[int] = None
target_dir: Optional[str] = None
@dataclass
class SkillMetadata:
"""Metadata for a skill from frontmatter."""
always: bool = False # Always include this skill
skill_key: Optional[str] = None # Override skill key
primary_env: Optional[str] = None # Primary environment variable
emoji: Optional[str] = None
homepage: Optional[str] = None
os: List[str] = field(default_factory=list) # Supported OS platforms
requires: Dict[str, List[str]] = field(default_factory=dict) # Requirements
install: List[SkillInstallSpec] = field(default_factory=list)
@dataclass
class Skill:
"""Represents a skill loaded from a markdown file."""
name: str
description: str
file_path: str
base_dir: str
source: str # managed, workspace, bundled, etc.
content: str # Full markdown content
disable_model_invocation: bool = False
frontmatter: Dict[str, Any] = field(default_factory=dict)
@dataclass
class SkillEntry:
"""A skill with parsed metadata."""
skill: Skill
metadata: Optional[SkillMetadata] = None
user_invocable: bool = True # Can users invoke this skill directly
@dataclass
class LoadSkillsResult:
"""Result of loading skills from a directory."""
skills: List[Skill]
diagnostics: List[str] = field(default_factory=list)
@dataclass
class SkillSnapshot:
"""Snapshot of skills for a specific run."""
prompt: str # Formatted prompt text
skills: List[Dict[str, str]] # List of skill info (name, primary_env)
resolved_skills: List[Skill] = field(default_factory=list)
version: Optional[int] = None
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/tools/__init__.py | Python | # Import base tool
from agent.tools.base_tool import BaseTool
from agent.tools.tool_manager import ToolManager
# Import file operation tools
from agent.tools.read.read import Read
from agent.tools.write.write import Write
from agent.tools.edit.edit import Edit
from agent.tools.bash.bash import Bash
from agent.tools.ls.ls import Ls
from agent.tools.send.send import Send
# Import memory tools
from agent.tools.memory.memory_search import MemorySearchTool
from agent.tools.memory.memory_get import MemoryGetTool
# Import tools with optional dependencies
def _import_optional_tools():
"""Import tools that have optional dependencies"""
from common.log import logger
tools = {}
# EnvConfig Tool (requires python-dotenv)
try:
from agent.tools.env_config.env_config import EnvConfig
tools['EnvConfig'] = EnvConfig
except ImportError as e:
logger.error(
f"[Tools] EnvConfig tool not loaded - missing dependency: {e}\n"
f" To enable environment variable management, run:\n"
f" pip install python-dotenv>=1.0.0"
)
except Exception as e:
logger.error(f"[Tools] EnvConfig tool failed to load: {e}")
# Scheduler Tool (requires croniter)
try:
from agent.tools.scheduler.scheduler_tool import SchedulerTool
tools['SchedulerTool'] = SchedulerTool
except ImportError as e:
logger.error(
f"[Tools] Scheduler tool not loaded - missing dependency: {e}\n"
f" To enable scheduled tasks, run:\n"
f" pip install croniter>=2.0.0"
)
except Exception as e:
logger.error(f"[Tools] Scheduler tool failed to load: {e}")
# WebSearch Tool (conditionally loaded based on API key availability at init time)
try:
from agent.tools.web_search.web_search import WebSearch
tools['WebSearch'] = WebSearch
except ImportError as e:
logger.error(f"[Tools] WebSearch not loaded - missing dependency: {e}")
except Exception as e:
logger.error(f"[Tools] WebSearch failed to load: {e}")
return tools
# Load optional tools
_optional_tools = _import_optional_tools()
EnvConfig = _optional_tools.get('EnvConfig')
SchedulerTool = _optional_tools.get('SchedulerTool')
WebSearch = _optional_tools.get('WebSearch')
GoogleSearch = _optional_tools.get('GoogleSearch')
FileSave = _optional_tools.get('FileSave')
Terminal = _optional_tools.get('Terminal')
# Delayed import for BrowserTool
def _import_browser_tool():
try:
from agent.tools.browser.browser_tool import BrowserTool
return BrowserTool
except ImportError:
# Return a placeholder class that will prompt the user to install dependencies when instantiated
class BrowserToolPlaceholder:
def __init__(self, *args, **kwargs):
raise ImportError(
"The 'browser-use' package is required to use BrowserTool. "
"Please install it with 'pip install browser-use>=0.1.40'."
)
return BrowserToolPlaceholder
# Dynamically set BrowserTool
# BrowserTool = _import_browser_tool()
# Export all tools (including optional ones that might be None)
__all__ = [
'BaseTool',
'ToolManager',
'Read',
'Write',
'Edit',
'Bash',
'Ls',
'Send',
'MemorySearchTool',
'MemoryGetTool',
'EnvConfig',
'SchedulerTool',
'WebSearch',
# Optional tools (may be None if dependencies not available)
# 'BrowserTool'
]
"""
Tools module for Agent.
"""
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/tools/base_tool.py | Python | from enum import Enum
from typing import Any, Optional
from common.log import logger
import copy
class ToolStage(Enum):
"""Enum representing tool decision stages"""
PRE_PROCESS = "pre_process" # Tools that need to be actively selected by the agent
POST_PROCESS = "post_process" # Tools that automatically execute after final_answer
class ToolResult:
"""Tool execution result"""
def __init__(self, status: str = None, result: Any = None, ext_data: Any = None):
self.status = status
self.result = result
self.ext_data = ext_data
@staticmethod
def success(result, ext_data: Any = None):
return ToolResult(status="success", result=result, ext_data=ext_data)
@staticmethod
def fail(result, ext_data: Any = None):
return ToolResult(status="error", result=result, ext_data=ext_data)
class BaseTool:
"""Base class for all tools."""
# Default decision stage is pre-process
stage = ToolStage.PRE_PROCESS
# Class attributes must be inherited
name: str = "base_tool"
description: str = "Base tool"
params: dict = {} # Store JSON Schema
model: Optional[Any] = None # LLM model instance, type depends on bot implementation
@classmethod
def get_json_schema(cls) -> dict:
"""Get the standard description of the tool"""
return {
"name": cls.name,
"description": cls.description,
"parameters": cls.params
}
def execute_tool(self, params: dict) -> ToolResult:
try:
return self.execute(params)
except Exception as e:
logger.error(e)
def execute(self, params: dict) -> ToolResult:
"""Specific logic to be implemented by subclasses"""
raise NotImplementedError
@classmethod
def _parse_schema(cls) -> dict:
"""Convert JSON Schema to Pydantic fields"""
fields = {}
for name, prop in cls.params["properties"].items():
# Convert JSON Schema types to Python types
type_map = {
"string": str,
"number": float,
"integer": int,
"boolean": bool,
"array": list,
"object": dict
}
fields[name] = (
type_map[prop["type"]],
prop.get("default", ...)
)
return fields
def should_auto_execute(self, context) -> bool:
"""
Determine if this tool should be automatically executed based on context.
:param context: The agent context
:return: True if the tool should be executed, False otherwise
"""
# Only tools in post-process stage will be automatically executed
return self.stage == ToolStage.POST_PROCESS
def close(self):
"""
Close any resources used by the tool.
This method should be overridden by tools that need to clean up resources
such as browser connections, file handles, etc.
By default, this method does nothing.
"""
pass
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/tools/bash/__init__.py | Python | from .bash import Bash
__all__ = ['Bash']
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/tools/bash/bash.py | Python | """
Bash tool - Execute bash commands
"""
import os
import sys
import subprocess
import tempfile
from typing import Dict, Any
from agent.tools.base_tool import BaseTool, ToolResult
from agent.tools.utils.truncate import truncate_tail, format_size, DEFAULT_MAX_LINES, DEFAULT_MAX_BYTES
from common.log import logger
from common.utils import expand_path
class Bash(BaseTool):
"""Tool for executing bash commands"""
name: str = "bash"
description: str = f"""Execute a bash command in the current working directory. Returns stdout and stderr. Output is truncated to last {DEFAULT_MAX_LINES} lines or {DEFAULT_MAX_BYTES // 1024}KB (whichever is hit first). If truncated, full output is saved to a temp file.
ENVIRONMENT: All API keys from env_config are auto-injected. Use $VAR_NAME directly.
SAFETY:
- Freely create/modify/delete files within the workspace
- For destructive and out-of-workspace commands, explain and confirm first"""
params: dict = {
"type": "object",
"properties": {
"command": {
"type": "string",
"description": "Bash command to execute"
},
"timeout": {
"type": "integer",
"description": "Timeout in seconds (optional, default: 30)"
}
},
"required": ["command"]
}
def __init__(self, config: dict = None):
self.config = config or {}
self.cwd = self.config.get("cwd", os.getcwd())
# Ensure working directory exists
if not os.path.exists(self.cwd):
os.makedirs(self.cwd, exist_ok=True)
self.default_timeout = self.config.get("timeout", 30)
# Enable safety mode by default (can be disabled in config)
self.safety_mode = self.config.get("safety_mode", True)
def execute(self, args: Dict[str, Any]) -> ToolResult:
"""
Execute a bash command
:param args: Dictionary containing the command and optional timeout
:return: Command output or error
"""
command = args.get("command", "").strip()
timeout = args.get("timeout", self.default_timeout)
if not command:
return ToolResult.fail("Error: command parameter is required")
# Security check: Prevent accessing sensitive config files
if "~/.cow/.env" in command or "~/.cow" in command:
return ToolResult.fail(
"Error: Access denied. API keys and credentials must be accessed through the env_config tool only."
)
# Optional safety check - only warn about extremely dangerous commands
if self.safety_mode:
warning = self._get_safety_warning(command)
if warning:
return ToolResult.fail(
f"Safety Warning: {warning}\n\nIf you believe this command is safe and necessary, please ask the user for confirmation first, explaining what the command does and why it's needed.")
try:
# Prepare environment with .env file variables
env = os.environ.copy()
# Load environment variables from ~/.cow/.env if it exists
env_file = expand_path("~/.cow/.env")
if os.path.exists(env_file):
try:
from dotenv import dotenv_values
env_vars = dotenv_values(env_file)
env.update(env_vars)
logger.debug(f"[Bash] Loaded {len(env_vars)} variables from {env_file}")
except ImportError:
logger.debug("[Bash] python-dotenv not installed, skipping .env loading")
except Exception as e:
logger.debug(f"[Bash] Failed to load .env: {e}")
# getuid() only exists on Unix-like systems
if hasattr(os, 'getuid'):
logger.debug(f"[Bash] Process UID: {os.getuid()}")
else:
logger.debug(f"[Bash] Process User: {os.environ.get('USERNAME', os.environ.get('USER', 'unknown'))}")
# Execute command with inherited environment variables
result = subprocess.run(
command,
shell=True,
cwd=self.cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
timeout=timeout,
env=env
)
logger.debug(f"[Bash] Exit code: {result.returncode}")
logger.debug(f"[Bash] Stdout length: {len(result.stdout)}")
logger.debug(f"[Bash] Stderr length: {len(result.stderr)}")
# Workaround for exit code 126 with no output
if result.returncode == 126 and not result.stdout and not result.stderr:
logger.warning(f"[Bash] Exit 126 with no output - trying alternative execution method")
# Try using argument list instead of shell=True
import shlex
try:
parts = shlex.split(command)
if len(parts) > 0:
logger.info(f"[Bash] Retrying with argument list: {parts[:3]}...")
retry_result = subprocess.run(
parts,
cwd=self.cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
timeout=timeout,
env=env
)
logger.debug(f"[Bash] Retry exit code: {retry_result.returncode}, stdout: {len(retry_result.stdout)}, stderr: {len(retry_result.stderr)}")
# If retry succeeded, use retry result
if retry_result.returncode == 0 or retry_result.stdout or retry_result.stderr:
result = retry_result
else:
# Both attempts failed - check if this is openai-image-vision skill
if 'openai-image-vision' in command or 'vision.sh' in command:
# Create a mock result with helpful error message
from types import SimpleNamespace
result = SimpleNamespace(
returncode=1,
stdout='{"error": "图片无法解析", "reason": "该图片格式可能不受支持,或图片文件存在问题", "suggestion": "请尝试其他图片"}',
stderr=''
)
logger.info(f"[Bash] Converted exit 126 to user-friendly image error message for vision skill")
except Exception as retry_err:
logger.warning(f"[Bash] Retry failed: {retry_err}")
# Combine stdout and stderr
output = result.stdout
if result.stderr:
output += "\n" + result.stderr
# Check if we need to save full output to temp file
temp_file_path = None
total_bytes = len(output.encode('utf-8'))
if total_bytes > DEFAULT_MAX_BYTES:
# Save full output to temp file
with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.log', prefix='bash-') as f:
f.write(output)
temp_file_path = f.name
# Apply tail truncation
truncation = truncate_tail(output)
output_text = truncation.content or "(no output)"
# Build result
details = {}
if truncation.truncated:
details["truncation"] = truncation.to_dict()
if temp_file_path:
details["full_output_path"] = temp_file_path
# Build notice
start_line = truncation.total_lines - truncation.output_lines + 1
end_line = truncation.total_lines
if truncation.last_line_partial:
# Edge case: last line alone > 30KB
last_line = output.split('\n')[-1] if output else ""
last_line_size = format_size(len(last_line.encode('utf-8')))
output_text += f"\n\n[Showing last {format_size(truncation.output_bytes)} of line {end_line} (line is {last_line_size}). Full output: {temp_file_path}]"
elif truncation.truncated_by == "lines":
output_text += f"\n\n[Showing lines {start_line}-{end_line} of {truncation.total_lines}. Full output: {temp_file_path}]"
else:
output_text += f"\n\n[Showing lines {start_line}-{end_line} of {truncation.total_lines} ({format_size(DEFAULT_MAX_BYTES)} limit). Full output: {temp_file_path}]"
# Check exit code
if result.returncode != 0:
output_text += f"\n\nCommand exited with code {result.returncode}"
return ToolResult.fail({
"output": output_text,
"exit_code": result.returncode,
"details": details if details else None
})
return ToolResult.success({
"output": output_text,
"exit_code": result.returncode,
"details": details if details else None
})
except subprocess.TimeoutExpired:
return ToolResult.fail(f"Error: Command timed out after {timeout} seconds")
except Exception as e:
return ToolResult.fail(f"Error executing command: {str(e)}")
def _get_safety_warning(self, command: str) -> str:
"""
Get safety warning for potentially dangerous commands
Only warns about extremely dangerous system-level operations
:param command: Command to check
:return: Warning message if dangerous, empty string if safe
"""
cmd_lower = command.lower().strip()
# Only block extremely dangerous system operations
dangerous_patterns = [
# System shutdown/reboot
("shutdown", "This command will shut down the system"),
("reboot", "This command will reboot the system"),
("halt", "This command will halt the system"),
("poweroff", "This command will power off the system"),
# Critical system modifications
("rm -rf /", "This command will delete the entire filesystem"),
("rm -rf /*", "This command will delete the entire filesystem"),
("dd if=/dev/zero", "This command can destroy disk data"),
("mkfs", "This command will format a filesystem, destroying all data"),
("fdisk", "This command modifies disk partitions"),
# User/system management (only if targeting system users)
("userdel root", "This command will delete the root user"),
("passwd root", "This command will change the root password"),
]
for pattern, warning in dangerous_patterns:
if pattern in cmd_lower:
return warning
# Check for recursive deletion outside workspace
if "rm" in cmd_lower and "-rf" in cmd_lower:
# Allow deletion within current workspace
if not any(path in cmd_lower for path in ["./", self.cwd.lower()]):
# Check if targeting system directories
system_dirs = ["/bin", "/usr", "/etc", "/var", "/home", "/root", "/sys", "/proc"]
if any(sysdir in cmd_lower for sysdir in system_dirs):
return "This command will recursively delete system directories"
return "" # No warning needed
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/tools/browser_tool.py | Python | def copy(self):
"""
Special copy method for browser tool to avoid recreating browser instance.
:return: A new instance with shared browser reference but unique model
"""
new_tool = self.__class__()
# Copy essential attributes
new_tool.model = self.model
new_tool.context = getattr(self, 'context', None)
new_tool.config = getattr(self, 'config', None)
# Share the browser instance instead of creating a new one
if hasattr(self, 'browser'):
new_tool.browser = self.browser
return new_tool | zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/tools/edit/__init__.py | Python | from .edit import Edit
__all__ = ['Edit']
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/tools/edit/edit.py | Python | """
Edit tool - Precise file editing
Edit files through exact text replacement
"""
import os
from typing import Dict, Any
from agent.tools.base_tool import BaseTool, ToolResult
from common.utils import expand_path
from agent.tools.utils.diff import (
strip_bom,
detect_line_ending,
normalize_to_lf,
restore_line_endings,
normalize_for_fuzzy_match,
fuzzy_find_text,
generate_diff_string
)
class Edit(BaseTool):
"""Tool for precise file editing"""
name: str = "edit"
description: str = "Edit a file by replacing exact text, or append to end if oldText is empty. For append: use empty oldText. For replace: oldText must match exactly (including whitespace)."
params: dict = {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the file to edit (relative or absolute)"
},
"oldText": {
"type": "string",
"description": "Text to find and replace. Use empty string to append to end of file. For replacement: must match exactly including whitespace."
},
"newText": {
"type": "string",
"description": "New text to replace the old text with"
}
},
"required": ["path", "oldText", "newText"]
}
def __init__(self, config: dict = None):
self.config = config or {}
self.cwd = self.config.get("cwd", os.getcwd())
self.memory_manager = self.config.get("memory_manager", None)
def execute(self, args: Dict[str, Any]) -> ToolResult:
"""
Execute file edit operation
:param args: Contains file path, old text and new text
:return: Operation result
"""
path = args.get("path", "").strip()
old_text = args.get("oldText", "")
new_text = args.get("newText", "")
if not path:
return ToolResult.fail("Error: path parameter is required")
# Resolve path
absolute_path = self._resolve_path(path)
# Check if file exists
if not os.path.exists(absolute_path):
return ToolResult.fail(f"Error: File not found: {path}")
# Check if readable/writable
if not os.access(absolute_path, os.R_OK | os.W_OK):
return ToolResult.fail(f"Error: File is not readable/writable: {path}")
try:
# Read file
with open(absolute_path, 'r', encoding='utf-8') as f:
raw_content = f.read()
# Remove BOM (LLM won't include invisible BOM in oldText)
bom, content = strip_bom(raw_content)
# Detect original line ending
original_ending = detect_line_ending(content)
# Normalize to LF
normalized_content = normalize_to_lf(content)
normalized_old_text = normalize_to_lf(old_text)
normalized_new_text = normalize_to_lf(new_text)
# Special case: empty oldText means append to end of file
if not old_text or not old_text.strip():
# Append mode: add newText to the end
# Add newline before newText if file doesn't end with one
if normalized_content and not normalized_content.endswith('\n'):
new_content = normalized_content + '\n' + normalized_new_text
else:
new_content = normalized_content + normalized_new_text
base_content = normalized_content # For verification
else:
# Normal edit mode: find and replace
# Use fuzzy matching to find old text (try exact match first, then fuzzy match)
match_result = fuzzy_find_text(normalized_content, normalized_old_text)
if not match_result.found:
return ToolResult.fail(
f"Error: Could not find the exact text in {path}. "
"The old text must match exactly including all whitespace and newlines."
)
# Calculate occurrence count (use fuzzy normalized content for consistency)
fuzzy_content = normalize_for_fuzzy_match(normalized_content)
fuzzy_old_text = normalize_for_fuzzy_match(normalized_old_text)
occurrences = fuzzy_content.count(fuzzy_old_text)
if occurrences > 1:
return ToolResult.fail(
f"Error: Found {occurrences} occurrences of the text in {path}. "
"The text must be unique. Please provide more context to make it unique."
)
# Execute replacement (use matched text position)
base_content = match_result.content_for_replacement
new_content = (
base_content[:match_result.index] +
normalized_new_text +
base_content[match_result.index + match_result.match_length:]
)
# Verify replacement actually changed content
if base_content == new_content:
return ToolResult.fail(
f"Error: No changes made to {path}. "
"The replacement produced identical content. "
"This might indicate an issue with special characters or the text not existing as expected."
)
# Restore original line endings
final_content = bom + restore_line_endings(new_content, original_ending)
# Write file
with open(absolute_path, 'w', encoding='utf-8') as f:
f.write(final_content)
# Generate diff
diff_result = generate_diff_string(base_content, new_content)
result = {
"message": f"Successfully replaced text in {path}",
"path": path,
"diff": diff_result['diff'],
"first_changed_line": diff_result['first_changed_line']
}
# Notify memory manager if file is in memory directory
if self.memory_manager and "memory/" in path:
try:
self.memory_manager.mark_dirty()
except Exception as e:
# Don't fail the edit if memory notification fails
pass
return ToolResult.success(result)
except UnicodeDecodeError:
return ToolResult.fail(f"Error: File is not a valid text file (encoding error): {path}")
except PermissionError:
return ToolResult.fail(f"Error: Permission denied accessing {path}")
except Exception as e:
return ToolResult.fail(f"Error editing file: {str(e)}")
def _resolve_path(self, path: str) -> str:
"""
Resolve path to absolute path
:param path: Relative or absolute path
:return: Absolute path
"""
# Expand ~ to user home directory
path = expand_path(path)
if os.path.isabs(path):
return path
return os.path.abspath(os.path.join(self.cwd, path))
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/tools/env_config/__init__.py | Python | from agent.tools.env_config.env_config import EnvConfig
__all__ = ['EnvConfig']
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/tools/env_config/env_config.py | Python | """
Environment Configuration Tool - Manage API keys and environment variables
"""
import os
import re
from typing import Dict, Any
from pathlib import Path
from agent.tools.base_tool import BaseTool, ToolResult
from common.log import logger
from common.utils import expand_path
# API Key 知识库:常见的环境变量及其描述
API_KEY_REGISTRY = {
# AI 模型服务
"OPENAI_API_KEY": "OpenAI API 密钥 (用于GPT模型、Embedding模型)",
"GEMINI_API_KEY": "Google Gemini API 密钥",
"CLAUDE_API_KEY": "Claude API 密钥 (用于Claude模型)",
"LINKAI_API_KEY": "LinkAI智能体平台 API 密钥,支持多种模型切换",
# 搜索服务
"BOCHA_API_KEY": "博查 AI 搜索 API 密钥 ",
}
class EnvConfig(BaseTool):
"""Tool for managing environment variables (API keys, etc.)"""
name: str = "env_config"
description: str = (
"Manage API keys and skill configurations securely. "
"Use this tool when user wants to configure API keys (like BOCHA_API_KEY, OPENAI_API_KEY), "
"view configured keys, or manage skill settings. "
"Actions: 'set' (add/update key), 'get' (view specific key), 'list' (show all configured keys), 'delete' (remove key). "
"Values are automatically masked for security. Changes take effect immediately via hot reload."
)
params: dict = {
"type": "object",
"properties": {
"action": {
"type": "string",
"description": "Action to perform: 'set', 'get', 'list', 'delete'",
"enum": ["set", "get", "list", "delete"]
},
"key": {
"type": "string",
"description": (
"Environment variable key name. Common keys:\n"
"- OPENAI_API_KEY: OpenAI API (GPT models)\n"
"- OPENAI_API_BASE: OpenAI API base URL\n"
"- CLAUDE_API_KEY: Anthropic Claude API\n"
"- GEMINI_API_KEY: Google Gemini API\n"
"- LINKAI_API_KEY: LinkAI platform\n"
"- BOCHA_API_KEY: Bocha AI search (博查搜索)\n"
"Use exact key names (case-sensitive, all uppercase with underscores)"
)
},
"value": {
"type": "string",
"description": "Value to set for the environment variable (for 'set' action)"
}
},
"required": ["action"]
}
def __init__(self, config: dict = None):
self.config = config or {}
# Store env config in ~/.cow directory (outside workspace for security)
self.env_dir = expand_path("~/.cow")
self.env_path = os.path.join(self.env_dir, '.env')
self.agent_bridge = self.config.get("agent_bridge") # Reference to AgentBridge for hot reload
# Don't create .env file in __init__ to avoid issues during tool discovery
# It will be created on first use in execute()
def _ensure_env_file(self):
"""Ensure the .env file exists"""
# Create ~/.cow directory if it doesn't exist
os.makedirs(self.env_dir, exist_ok=True)
if not os.path.exists(self.env_path):
Path(self.env_path).touch()
logger.info(f"[EnvConfig] Created .env file at {self.env_path}")
def _mask_value(self, value: str) -> str:
"""Mask sensitive parts of a value for logging"""
if not value or len(value) <= 10:
return "***"
return f"{value[:6]}***{value[-4:]}"
def _read_env_file(self) -> Dict[str, str]:
"""Read all key-value pairs from .env file"""
env_vars = {}
if os.path.exists(self.env_path):
with open(self.env_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
# Skip empty lines and comments
if not line or line.startswith('#'):
continue
# Parse KEY=VALUE
match = re.match(r'^([^=]+)=(.*)$', line)
if match:
key, value = match.groups()
env_vars[key.strip()] = value.strip()
return env_vars
def _write_env_file(self, env_vars: Dict[str, str]):
"""Write all key-value pairs to .env file"""
with open(self.env_path, 'w', encoding='utf-8') as f:
f.write("# Environment variables for agent skills\n")
f.write("# Auto-managed by env_config tool\n\n")
for key, value in sorted(env_vars.items()):
f.write(f"{key}={value}\n")
def _reload_env(self):
"""Reload environment variables from .env file"""
env_vars = self._read_env_file()
for key, value in env_vars.items():
os.environ[key] = value
logger.debug(f"[EnvConfig] Reloaded {len(env_vars)} environment variables")
def _refresh_skills(self):
"""Refresh skills after environment variable changes"""
if self.agent_bridge:
try:
# Reload .env file
self._reload_env()
# Refresh skills in all agent instances
refreshed = self.agent_bridge.refresh_all_skills()
logger.info(f"[EnvConfig] Refreshed skills in {refreshed} agent instance(s)")
return True
except Exception as e:
logger.warning(f"[EnvConfig] Failed to refresh skills: {e}")
return False
return False
def execute(self, args: Dict[str, Any]) -> ToolResult:
"""
Execute environment configuration operation
:param args: Contains action, key, and value parameters
:return: Result of the operation
"""
# Ensure .env file exists on first use
self._ensure_env_file()
action = args.get("action")
key = args.get("key")
value = args.get("value")
try:
if action == "set":
if not key or not value:
return ToolResult.fail("Error: 'key' and 'value' are required for 'set' action.")
# Read current env vars
env_vars = self._read_env_file()
# Update the key
env_vars[key] = value
# Write back to file
self._write_env_file(env_vars)
# Update current process env
os.environ[key] = value
logger.info(f"[EnvConfig] Set {key}={self._mask_value(value)}")
# Try to refresh skills immediately
refreshed = self._refresh_skills()
result = {
"message": f"Successfully set {key}",
"key": key,
"value": self._mask_value(value),
}
if refreshed:
result["note"] = "✅ Skills refreshed automatically - changes are now active"
else:
result["note"] = "⚠️ Skills not refreshed - restart agent to load new skills"
return ToolResult.success(result)
elif action == "get":
if not key:
return ToolResult.fail("Error: 'key' is required for 'get' action.")
# Check in file first, then in current env
env_vars = self._read_env_file()
value = env_vars.get(key) or os.getenv(key)
# Get description from registry
description = API_KEY_REGISTRY.get(key, "未知用途的环境变量")
if value is not None:
logger.info(f"[EnvConfig] Got {key}={self._mask_value(value)}")
return ToolResult.success({
"key": key,
"value": self._mask_value(value),
"description": description,
"exists": True,
"note": f"Value is masked for security. In bash, use ${key} directly — it is auto-injected."
})
else:
return ToolResult.success({
"key": key,
"description": description,
"exists": False,
"message": f"Environment variable '{key}' is not set"
})
elif action == "list":
env_vars = self._read_env_file()
# Build detailed variable list with descriptions
variables_with_info = {}
for key, value in env_vars.items():
variables_with_info[key] = {
"value": self._mask_value(value),
"description": API_KEY_REGISTRY.get(key, "未知用途的环境变量")
}
logger.info(f"[EnvConfig] Listed {len(env_vars)} environment variables")
if not env_vars:
return ToolResult.success({
"message": "No environment variables configured",
"variables": {},
"note": "常用的 API 密钥可以通过 env_config(action='set', key='KEY_NAME', value='your-key') 来配置"
})
return ToolResult.success({
"message": f"Found {len(env_vars)} environment variable(s)",
"variables": variables_with_info
})
elif action == "delete":
if not key:
return ToolResult.fail("Error: 'key' is required for 'delete' action.")
# Read current env vars
env_vars = self._read_env_file()
if key not in env_vars:
return ToolResult.success({
"message": f"Environment variable '{key}' was not set",
"key": key
})
# Remove the key
del env_vars[key]
# Write back to file
self._write_env_file(env_vars)
# Remove from current process env
if key in os.environ:
del os.environ[key]
logger.info(f"[EnvConfig] Deleted {key}")
# Try to refresh skills immediately
refreshed = self._refresh_skills()
result = {
"message": f"Successfully deleted {key}",
"key": key,
}
if refreshed:
result["note"] = "✅ Skills refreshed automatically - changes are now active"
else:
result["note"] = "⚠️ Skills not refreshed - restart agent to apply changes"
return ToolResult.success(result)
else:
return ToolResult.fail(f"Error: Unknown action '{action}'. Use 'set', 'get', 'list', or 'delete'.")
except Exception as e:
logger.error(f"[EnvConfig] Error: {e}", exc_info=True)
return ToolResult.fail(f"EnvConfig tool error: {str(e)}")
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/tools/ls/__init__.py | Python | from .ls import Ls
__all__ = ['Ls']
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/tools/ls/ls.py | Python | """
Ls tool - List directory contents
"""
import os
from typing import Dict, Any
from agent.tools.base_tool import BaseTool, ToolResult
from agent.tools.utils.truncate import truncate_head, format_size, DEFAULT_MAX_BYTES
from common.utils import expand_path
DEFAULT_LIMIT = 500
class Ls(BaseTool):
"""Tool for listing directory contents"""
name: str = "ls"
description: str = f"List directory contents. Returns entries sorted alphabetically, with '/' suffix for directories. Includes dotfiles. Output is truncated to {DEFAULT_LIMIT} entries or {DEFAULT_MAX_BYTES // 1024}KB (whichever is hit first)."
params: dict = {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Directory to list. IMPORTANT: Relative paths are based on workspace directory. To access directories outside workspace, use absolute paths starting with ~ or /."
},
"limit": {
"type": "integer",
"description": f"Maximum number of entries to return (default: {DEFAULT_LIMIT})"
}
},
"required": []
}
def __init__(self, config: dict = None):
self.config = config or {}
self.cwd = self.config.get("cwd", os.getcwd())
def execute(self, args: Dict[str, Any]) -> ToolResult:
"""
Execute directory listing
:param args: Listing parameters
:return: Directory contents or error
"""
path = args.get("path", ".").strip()
limit = args.get("limit", DEFAULT_LIMIT)
# Resolve path
absolute_path = self._resolve_path(path)
# Security check: Prevent accessing sensitive config directory
env_config_dir = expand_path("~/.cow")
if os.path.abspath(absolute_path) == os.path.abspath(env_config_dir):
return ToolResult.fail(
"Error: Access denied. API keys and credentials must be accessed through the env_config tool only."
)
if not os.path.exists(absolute_path):
# Provide helpful hint if using relative path
if not os.path.isabs(path) and not path.startswith('~'):
return ToolResult.fail(
f"Error: Path not found: {path}\n"
f"Resolved to: {absolute_path}\n"
f"Hint: Relative paths are based on workspace ({self.cwd}). For files outside workspace, use absolute paths."
)
return ToolResult.fail(f"Error: Path not found: {path}")
if not os.path.isdir(absolute_path):
return ToolResult.fail(f"Error: Not a directory: {path}")
try:
# Read directory entries
entries = os.listdir(absolute_path)
# Sort alphabetically (case-insensitive)
entries.sort(key=lambda x: x.lower())
# Format entries with directory indicators
results = []
entry_limit_reached = False
for entry in entries:
if len(results) >= limit:
entry_limit_reached = True
break
full_path = os.path.join(absolute_path, entry)
try:
if os.path.isdir(full_path):
results.append(entry + '/')
else:
results.append(entry)
except:
# Skip entries we can't stat
continue
if not results:
return ToolResult.success({"message": "(empty directory)", "entries": []})
# Format output
raw_output = '\n'.join(results)
truncation = truncate_head(raw_output, max_lines=999999) # Only limit by bytes
output = truncation.content
details = {}
notices = []
if entry_limit_reached:
notices.append(f"{limit} entries limit reached. Use limit={limit * 2} for more")
details["entry_limit_reached"] = limit
if truncation.truncated:
notices.append(f"{format_size(DEFAULT_MAX_BYTES)} limit reached")
details["truncation"] = truncation.to_dict()
if notices:
output += f"\n\n[{'. '.join(notices)}]"
return ToolResult.success({
"output": output,
"entry_count": len(results),
"details": details if details else None
})
except PermissionError:
return ToolResult.fail(f"Error: Permission denied reading directory: {path}")
except Exception as e:
return ToolResult.fail(f"Error listing directory: {str(e)}")
def _resolve_path(self, path: str) -> str:
"""Resolve path to absolute path"""
# Expand ~ to user home directory
path = expand_path(path)
if os.path.isabs(path):
return path
return os.path.abspath(os.path.join(self.cwd, path))
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/tools/memory/__init__.py | Python | """
Memory tools for Agent
Provides memory_search and memory_get tools
"""
from agent.tools.memory.memory_search import MemorySearchTool
from agent.tools.memory.memory_get import MemoryGetTool
__all__ = ['MemorySearchTool', 'MemoryGetTool']
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/tools/memory/memory_get.py | Python | """
Memory get tool
Allows agents to read specific sections from memory files
"""
from agent.tools.base_tool import BaseTool
class MemoryGetTool(BaseTool):
"""Tool for reading memory file contents"""
name: str = "memory_get"
description: str = (
"Read specific content from memory files. "
"Use this to get full context from a memory file or specific line range."
)
params: dict = {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Relative path to the memory file (e.g. 'MEMORY.md', 'memory/2026-01-01.md')"
},
"start_line": {
"type": "integer",
"description": "Starting line number (optional, default: 1)",
"default": 1
},
"num_lines": {
"type": "integer",
"description": "Number of lines to read (optional, reads all if not specified)"
}
},
"required": ["path"]
}
def __init__(self, memory_manager):
"""
Initialize memory get tool
Args:
memory_manager: MemoryManager instance
"""
super().__init__()
self.memory_manager = memory_manager
def execute(self, args: dict):
"""
Execute memory file read
Args:
args: Dictionary with path, start_line, num_lines
Returns:
ToolResult with file content
"""
from agent.tools.base_tool import ToolResult
path = args.get("path")
start_line = args.get("start_line", 1)
num_lines = args.get("num_lines")
if not path:
return ToolResult.fail("Error: path parameter is required")
try:
workspace_dir = self.memory_manager.config.get_workspace()
# Auto-prepend memory/ if not present and not absolute path
# Exception: MEMORY.md is in the root directory
if not path.startswith('memory/') and not path.startswith('/') and path != 'MEMORY.md':
path = f'memory/{path}'
file_path = workspace_dir / path
if not file_path.exists():
return ToolResult.fail(f"Error: File not found: {path}")
content = file_path.read_text(encoding='utf-8')
lines = content.split('\n')
# Handle line range
if start_line < 1:
start_line = 1
start_idx = start_line - 1
if num_lines:
end_idx = start_idx + num_lines
selected_lines = lines[start_idx:end_idx]
else:
selected_lines = lines[start_idx:]
result = '\n'.join(selected_lines)
# Add metadata
total_lines = len(lines)
shown_lines = len(selected_lines)
output = [
f"File: {path}",
f"Lines: {start_line}-{start_line + shown_lines - 1} (total: {total_lines})",
"",
result
]
return ToolResult.success('\n'.join(output))
except Exception as e:
return ToolResult.fail(f"Error reading memory file: {str(e)}")
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/tools/memory/memory_search.py | Python | """
Memory search tool
Allows agents to search their memory using semantic and keyword search
"""
from typing import Dict, Any, Optional
from agent.tools.base_tool import BaseTool
class MemorySearchTool(BaseTool):
"""Tool for searching agent memory"""
name: str = "memory_search"
description: str = (
"Search agent's long-term memory using semantic and keyword search. "
"Use this to recall past conversations, preferences, and knowledge."
)
params: dict = {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "Search query (can be natural language question or keywords)"
},
"max_results": {
"type": "integer",
"description": "Maximum number of results to return (default: 10)",
"default": 10
},
"min_score": {
"type": "number",
"description": "Minimum relevance score (0-1, default: 0.1)",
"default": 0.1
}
},
"required": ["query"]
}
def __init__(self, memory_manager, user_id: Optional[str] = None):
"""
Initialize memory search tool
Args:
memory_manager: MemoryManager instance
user_id: Optional user ID for scoped search
"""
super().__init__()
self.memory_manager = memory_manager
self.user_id = user_id
def execute(self, args: dict):
"""
Execute memory search
Args:
args: Dictionary with query, max_results, min_score
Returns:
ToolResult with formatted search results
"""
from agent.tools.base_tool import ToolResult
import asyncio
query = args.get("query")
max_results = args.get("max_results", 10)
min_score = args.get("min_score", 0.1)
if not query:
return ToolResult.fail("Error: query parameter is required")
try:
# Run async search in sync context
results = asyncio.run(self.memory_manager.search(
query=query,
user_id=self.user_id,
max_results=max_results,
min_score=min_score,
include_shared=True
))
if not results:
# Return clear message that no memories exist yet
# This prevents infinite retry loops
return ToolResult.success(
f"No memories found for '{query}'. "
f"This is normal if no memories have been stored yet. "
f"You can store new memories by writing to MEMORY.md or memory/YYYY-MM-DD.md files."
)
# Format results
output = [f"Found {len(results)} relevant memories:\n"]
for i, result in enumerate(results, 1):
output.append(f"\n{i}. {result.path} (lines {result.start_line}-{result.end_line})")
output.append(f" Score: {result.score:.3f}")
output.append(f" Snippet: {result.snippet}")
return ToolResult.success("\n".join(output))
except Exception as e:
return ToolResult.fail(f"Error searching memory: {str(e)}")
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/tools/read/__init__.py | Python | from .read import Read
__all__ = ['Read']
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/tools/read/read.py | Python | """
Read tool - Read file contents
Supports text files, images (jpg, png, gif, webp), and PDF files
"""
import os
from typing import Dict, Any
from pathlib import Path
from agent.tools.base_tool import BaseTool, ToolResult
from agent.tools.utils.truncate import truncate_head, format_size, DEFAULT_MAX_LINES, DEFAULT_MAX_BYTES
from common.utils import expand_path
class Read(BaseTool):
"""Tool for reading file contents"""
name: str = "read"
description: str = f"Read or inspect file contents. For text/PDF files, returns content (truncated to {DEFAULT_MAX_LINES} lines or {DEFAULT_MAX_BYTES // 1024}KB). For images/videos/audio, returns metadata only (file info, size, type). Use offset/limit for large text files."
params: dict = {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the file to read. IMPORTANT: Relative paths are based on workspace directory. To access files outside workspace, use absolute paths starting with ~ or /."
},
"offset": {
"type": "integer",
"description": "Line number to start reading from (1-indexed, optional). Use negative values to read from end (e.g. -20 for last 20 lines)"
},
"limit": {
"type": "integer",
"description": "Maximum number of lines to read (optional)"
}
},
"required": ["path"]
}
def __init__(self, config: dict = None):
self.config = config or {}
self.cwd = self.config.get("cwd", os.getcwd())
# File type categories
self.image_extensions = {'.jpg', '.jpeg', '.png', '.gif', '.webp', '.bmp', '.svg', '.ico'}
self.video_extensions = {'.mp4', '.avi', '.mov', '.mkv', '.flv', '.wmv', '.webm', '.m4v'}
self.audio_extensions = {'.mp3', '.wav', '.ogg', '.m4a', '.flac', '.aac', '.wma'}
self.binary_extensions = {'.exe', '.dll', '.so', '.dylib', '.bin', '.dat', '.db', '.sqlite'}
self.archive_extensions = {'.zip', '.tar', '.gz', '.rar', '.7z', '.bz2', '.xz'}
self.pdf_extensions = {'.pdf'}
# Readable text formats (will be read with truncation)
self.text_extensions = {
'.txt', '.md', '.markdown', '.rst', '.log', '.csv', '.tsv', '.json', '.xml', '.yaml', '.yml',
'.py', '.js', '.ts', '.java', '.c', '.cpp', '.h', '.hpp', '.go', '.rs', '.rb', '.php',
'.html', '.css', '.scss', '.sass', '.less', '.vue', '.jsx', '.tsx',
'.sh', '.bash', '.zsh', '.fish', '.ps1', '.bat', '.cmd',
'.sql', '.r', '.m', '.swift', '.kt', '.scala', '.clj', '.erl', '.ex',
'.dockerfile', '.makefile', '.cmake', '.gradle', '.properties', '.ini', '.conf', '.cfg',
'.doc', '.docx', '.xls', '.xlsx', '.ppt', '.pptx' # Office documents
}
def execute(self, args: Dict[str, Any]) -> ToolResult:
"""
Execute file read operation
:param args: Contains file path and optional offset/limit parameters
:return: File content or error message
"""
# Support 'location' as alias for 'path' (LLM may use it from skill listing)
path = args.get("path", "") or args.get("location", "")
path = path.strip() if isinstance(path, str) else ""
offset = args.get("offset")
limit = args.get("limit")
if not path:
return ToolResult.fail("Error: path parameter is required")
# Resolve path
absolute_path = self._resolve_path(path)
# Security check: Prevent reading sensitive config files
env_config_path = expand_path("~/.cow/.env")
if os.path.abspath(absolute_path) == os.path.abspath(env_config_path):
return ToolResult.fail(
"Error: Access denied. API keys and credentials must be accessed through the env_config tool only."
)
# Check if file exists
if not os.path.exists(absolute_path):
# Provide helpful hint if using relative path
if not os.path.isabs(path) and not path.startswith('~'):
return ToolResult.fail(
f"Error: File not found: {path}\n"
f"Resolved to: {absolute_path}\n"
f"Hint: Relative paths are based on workspace ({self.cwd}). For files outside workspace, use absolute paths."
)
return ToolResult.fail(f"Error: File not found: {path}")
# Check if readable
if not os.access(absolute_path, os.R_OK):
return ToolResult.fail(f"Error: File is not readable: {path}")
# Check file type
file_ext = Path(absolute_path).suffix.lower()
file_size = os.path.getsize(absolute_path)
# Check if image - return metadata for sending
if file_ext in self.image_extensions:
return self._read_image(absolute_path, file_ext)
# Check if video/audio/binary/archive - return metadata only
if file_ext in self.video_extensions:
return self._return_file_metadata(absolute_path, "video", file_size)
if file_ext in self.audio_extensions:
return self._return_file_metadata(absolute_path, "audio", file_size)
if file_ext in self.binary_extensions or file_ext in self.archive_extensions:
return self._return_file_metadata(absolute_path, "binary", file_size)
# Check if PDF
if file_ext in self.pdf_extensions:
return self._read_pdf(absolute_path, path, offset, limit)
# Read text file (with truncation for large files)
return self._read_text(absolute_path, path, offset, limit)
def _resolve_path(self, path: str) -> str:
"""
Resolve path to absolute path
:param path: Relative or absolute path
:return: Absolute path
"""
# Expand ~ to user home directory
path = expand_path(path)
if os.path.isabs(path):
return path
return os.path.abspath(os.path.join(self.cwd, path))
def _return_file_metadata(self, absolute_path: str, file_type: str, file_size: int) -> ToolResult:
"""
Return file metadata for non-readable files (video, audio, binary, etc.)
:param absolute_path: Absolute path to the file
:param file_type: Type of file (video, audio, binary, etc.)
:param file_size: File size in bytes
:return: File metadata
"""
file_name = Path(absolute_path).name
file_ext = Path(absolute_path).suffix.lower()
# Determine MIME type
mime_types = {
# Video
'.mp4': 'video/mp4', '.avi': 'video/x-msvideo', '.mov': 'video/quicktime',
'.mkv': 'video/x-matroska', '.webm': 'video/webm',
# Audio
'.mp3': 'audio/mpeg', '.wav': 'audio/wav', '.ogg': 'audio/ogg',
'.m4a': 'audio/mp4', '.flac': 'audio/flac',
# Binary
'.zip': 'application/zip', '.tar': 'application/x-tar',
'.gz': 'application/gzip', '.rar': 'application/x-rar-compressed',
}
mime_type = mime_types.get(file_ext, 'application/octet-stream')
result = {
"type": f"{file_type}_metadata",
"file_type": file_type,
"path": absolute_path,
"file_name": file_name,
"mime_type": mime_type,
"size": file_size,
"size_formatted": format_size(file_size),
"message": f"{file_type.capitalize()} 文件: {file_name} ({format_size(file_size)})\n提示: 如果需要发送此文件,请使用 send 工具。"
}
return ToolResult.success(result)
def _read_image(self, absolute_path: str, file_ext: str) -> ToolResult:
"""
Read image file - always return metadata only (images should be sent, not read into context)
:param absolute_path: Absolute path to the image file
:param file_ext: File extension
:return: Result containing image metadata for sending
"""
try:
# Get file size
file_size = os.path.getsize(absolute_path)
# Determine MIME type
mime_type_map = {
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.png': 'image/png',
'.gif': 'image/gif',
'.webp': 'image/webp'
}
mime_type = mime_type_map.get(file_ext, 'image/jpeg')
# Return metadata for images (NOT file_to_send - use send tool to actually send)
result = {
"type": "image_metadata",
"file_type": "image",
"path": absolute_path,
"mime_type": mime_type,
"size": file_size,
"size_formatted": format_size(file_size),
"message": f"图片文件: {Path(absolute_path).name} ({format_size(file_size)})\n提示: 如果需要发送此图片,请使用 send 工具。"
}
return ToolResult.success(result)
except Exception as e:
return ToolResult.fail(f"Error reading image file: {str(e)}")
def _read_text(self, absolute_path: str, display_path: str, offset: int = None, limit: int = None) -> ToolResult:
"""
Read text file
:param absolute_path: Absolute path to the file
:param display_path: Path to display
:param offset: Starting line number (1-indexed)
:param limit: Maximum number of lines to read
:return: File content or error message
"""
try:
# Check file size first
file_size = os.path.getsize(absolute_path)
MAX_FILE_SIZE = 50 * 1024 * 1024 # 50MB
if file_size > MAX_FILE_SIZE:
# File too large, return metadata only
return ToolResult.success({
"type": "file_to_send",
"file_type": "document",
"path": absolute_path,
"size": file_size,
"size_formatted": format_size(file_size),
"message": f"文件过大 ({format_size(file_size)} > 50MB),无法读取内容。文件路径: {absolute_path}"
})
# Read file
with open(absolute_path, 'r', encoding='utf-8') as f:
content = f.read()
# Truncate content if too long (20K characters max for model context)
MAX_CONTENT_CHARS = 20 * 1024 # 20K characters
content_truncated = False
if len(content) > MAX_CONTENT_CHARS:
content = content[:MAX_CONTENT_CHARS]
content_truncated = True
all_lines = content.split('\n')
total_file_lines = len(all_lines)
# Apply offset (if specified)
start_line = 0
if offset is not None:
if offset < 0:
# Negative offset: read from end
# -20 means "last 20 lines" → start from (total - 20)
start_line = max(0, total_file_lines + offset)
else:
# Positive offset: read from start (1-indexed)
start_line = max(0, offset - 1) # Convert to 0-indexed
if start_line >= total_file_lines:
return ToolResult.fail(
f"Error: Offset {offset} is beyond end of file ({total_file_lines} lines total)"
)
start_line_display = start_line + 1 # For display (1-indexed)
# If user specified limit, use it
selected_content = content
user_limited_lines = None
if limit is not None:
end_line = min(start_line + limit, total_file_lines)
selected_content = '\n'.join(all_lines[start_line:end_line])
user_limited_lines = end_line - start_line
elif offset is not None:
selected_content = '\n'.join(all_lines[start_line:])
# Apply truncation (considering line count and byte limits)
truncation = truncate_head(selected_content)
output_text = ""
details = {}
# Add truncation warning if content was truncated
if content_truncated:
output_text = f"[文件内容已截断到前 {format_size(MAX_CONTENT_CHARS)},完整文件大小: {format_size(file_size)}]\n\n"
if truncation.first_line_exceeds_limit:
# First line exceeds 30KB limit
first_line_size = format_size(len(all_lines[start_line].encode('utf-8')))
output_text = f"[Line {start_line_display} is {first_line_size}, exceeds {format_size(DEFAULT_MAX_BYTES)} limit. Use bash tool to read: head -c {DEFAULT_MAX_BYTES} {display_path} | tail -n +{start_line_display}]"
details["truncation"] = truncation.to_dict()
elif truncation.truncated:
# Truncation occurred
end_line_display = start_line_display + truncation.output_lines - 1
next_offset = end_line_display + 1
output_text = truncation.content
if truncation.truncated_by == "lines":
output_text += f"\n\n[Showing lines {start_line_display}-{end_line_display} of {total_file_lines}. Use offset={next_offset} to continue.]"
else:
output_text += f"\n\n[Showing lines {start_line_display}-{end_line_display} of {total_file_lines} ({format_size(DEFAULT_MAX_BYTES)} limit). Use offset={next_offset} to continue.]"
details["truncation"] = truncation.to_dict()
elif user_limited_lines is not None and start_line + user_limited_lines < total_file_lines:
# User specified limit, more content available, but no truncation
remaining = total_file_lines - (start_line + user_limited_lines)
next_offset = start_line + user_limited_lines + 1
output_text = truncation.content
output_text += f"\n\n[{remaining} more lines in file. Use offset={next_offset} to continue.]"
else:
# No truncation, no exceeding user limit
output_text = truncation.content
result = {
"content": output_text,
"total_lines": total_file_lines,
"start_line": start_line_display,
"output_lines": truncation.output_lines
}
if details:
result["details"] = details
return ToolResult.success(result)
except UnicodeDecodeError:
return ToolResult.fail(f"Error: File is not a valid text file (encoding error): {display_path}")
except Exception as e:
return ToolResult.fail(f"Error reading file: {str(e)}")
def _read_pdf(self, absolute_path: str, display_path: str, offset: int = None, limit: int = None) -> ToolResult:
"""
Read PDF file content
:param absolute_path: Absolute path to the file
:param display_path: Path to display
:param offset: Starting line number (1-indexed)
:param limit: Maximum number of lines to read
:return: PDF text content or error message
"""
try:
# Try to import pypdf
try:
from pypdf import PdfReader
except ImportError:
return ToolResult.fail(
"Error: pypdf library not installed. Install with: pip install pypdf"
)
# Read PDF
reader = PdfReader(absolute_path)
total_pages = len(reader.pages)
# Extract text from all pages
text_parts = []
for page_num, page in enumerate(reader.pages, 1):
page_text = page.extract_text()
if page_text.strip():
text_parts.append(f"--- Page {page_num} ---\n{page_text}")
if not text_parts:
return ToolResult.success({
"content": f"[PDF file with {total_pages} pages, but no text content could be extracted]",
"total_pages": total_pages,
"message": "PDF may contain only images or be encrypted"
})
# Merge all text
full_content = "\n\n".join(text_parts)
all_lines = full_content.split('\n')
total_lines = len(all_lines)
# Apply offset and limit (same logic as text files)
start_line = 0
if offset is not None:
start_line = max(0, offset - 1)
if start_line >= total_lines:
return ToolResult.fail(
f"Error: Offset {offset} is beyond end of content ({total_lines} lines total)"
)
start_line_display = start_line + 1
selected_content = full_content
user_limited_lines = None
if limit is not None:
end_line = min(start_line + limit, total_lines)
selected_content = '\n'.join(all_lines[start_line:end_line])
user_limited_lines = end_line - start_line
elif offset is not None:
selected_content = '\n'.join(all_lines[start_line:])
# Apply truncation
truncation = truncate_head(selected_content)
output_text = ""
details = {}
if truncation.truncated:
end_line_display = start_line_display + truncation.output_lines - 1
next_offset = end_line_display + 1
output_text = truncation.content
if truncation.truncated_by == "lines":
output_text += f"\n\n[Showing lines {start_line_display}-{end_line_display} of {total_lines}. Use offset={next_offset} to continue.]"
else:
output_text += f"\n\n[Showing lines {start_line_display}-{end_line_display} of {total_lines} ({format_size(DEFAULT_MAX_BYTES)} limit). Use offset={next_offset} to continue.]"
details["truncation"] = truncation.to_dict()
elif user_limited_lines is not None and start_line + user_limited_lines < total_lines:
remaining = total_lines - (start_line + user_limited_lines)
next_offset = start_line + user_limited_lines + 1
output_text = truncation.content
output_text += f"\n\n[{remaining} more lines in file. Use offset={next_offset} to continue.]"
else:
output_text = truncation.content
result = {
"content": output_text,
"total_pages": total_pages,
"total_lines": total_lines,
"start_line": start_line_display,
"output_lines": truncation.output_lines
}
if details:
result["details"] = details
return ToolResult.success(result)
except Exception as e:
return ToolResult.fail(f"Error reading PDF file: {str(e)}")
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech | |
agent/tools/scheduler/__init__.py | Python | """
Scheduler tool for managing scheduled tasks
"""
from .scheduler_tool import SchedulerTool
__all__ = ["SchedulerTool"]
| zhayujie/chatgpt-on-wechat | 41,284 | CowAgent是基于大模型的超级AI助理,能主动思考和任务规划、访问操作系统和外部资源、创造和执行Skills、拥有长期记忆并不断成长。同时支持飞书、钉钉、企业微信应用、微信公众号、网页等接入,可选择OpenAI/Claude/Gemini/DeepSeek/ Qwen/GLM/Kimi/LinkAI,能处理文本、语音、图片和文件,可快速搭建个人AI助手和企业数字员工。 | Python | zhayujie | Minimal Future Tech |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.