id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
13,632 | import sys
import os
import matplotlib.pyplot as plt
import torch
import torchvision
from torchvision import transforms
from torch.utils import data
from d2l import torch as d2l
import d2lutil.common as common
def cross_entropy(y_hat, y):
return - torch.log(y_hat.gather(1, y.view(-1, 1))) | null |
13,633 | import sys
import os
import matplotlib.pyplot as plt
import torch
import torchvision
from torchvision import transforms
from torch.utils import data
from d2l import torch as d2l
import d2lutil.common as common
def accuracy(y_hat, y):
return (y_hat.argmax(dim=1) == y).float().mean().item() | null |
13,634 | import sys
import os
import matplotlib.pyplot as plt
import torch
import torchvision
from torchvision import transforms
from torch.utils import data
from d2l import torch as d2l
import d2lutil.common as common
d2l.use_svg_display()
y = torch.LongTensor([0, 2])
y_hat.gather(1, y.view(-1, 1))
y_hat, y):
return (y_hat.argmax(dim=1) == y).float().mean().item(
for X, y in data_iter:
acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
n += y.shape[0]
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
for X, y in train_iter:
y_hat = net(X)
l = loss(y_hat, y).sum()
# 梯度清零
if params is not None and params[0].grad is not None:
for param in params:
param.grad.data.zero_()
l.backward()
# 执行优化方法
if optimizer is not None:
optimizer.step()
else:
d2l.sgd(params, lr, batch_size)
train_l_sum += l.item()
train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()
n += y.shape[0]
test_acc = evaluate_accuracy(test_iter, net)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'
% (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc))
y in test_iter:
def train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,
params=None, lr=None, optimizer=None):
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
for X, y in train_iter:
y_hat = net(X)
l = loss(y_hat, y).sum()
# 梯度清零
if params is not None and params[0].grad is not None:
for param in params:
param.grad.data.zero_()
l.backward()
# 执行优化方法
if optimizer is not None:
optimizer.step()
else:
d2l.sgd(params, lr, batch_size)
train_l_sum += l.item()
train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()
n += y.shape[0]
test_acc = evaluate_accuracy(test_iter, net)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'
% (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc)) | null |
13,635 | import sys
import torch
from d2l import torch as d2l
from torch import nn
weight(m
if type(m) == nn.Linear:
nn.init.normal_(m.weight, std=0.01)
def init_weight(m):
if type(m) == nn.Linear:
nn.init.normal_(m.weight, std=0.01) | null |
13,636 | import sys
import torch
from d2l import torch as d2l
from torch import nn
def accuracy(y_hat, y):
return (y_hat.argmax(dim=1) == y).float().mean().item() | null |
13,637 | import sys
import torch
from d2l import torch as d2l
from torch import nn en(), nn.Linear(784, 10))
for X, y in data_iter:
acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
n += y.shape[0]
def train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,
params=None, lr=None, optimizer=None):
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
for X, y in train_iter:
y_hat = net(X)
l = loss(y_hat, y).sum()
# 梯度清零
if params is not None and params[0].grad is not None:
for param in params:
param.grad.data.zero_()
l.backward()
# 执行优化方法
if optimizer is not None:
optimizer.step()
else:
d2l.sgd(params, lr, batch_size)
train_l_sum += l.item()
train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()
n += y.shape[0]
test_acc = evaluate_accuracy(test_iter, net)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'
% (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc)) | null |
13,638 | import random
import torch
X = torch.normal(0, 1, (nums_example, len(w)))
y = torch.matmul(X, w) + b
print("y_shape:", y.shape)
y += torch.normal(0, 0.01, y.shape) urn X, y.reshape(-1, 1) torch.tensor([2, -3.4])
for X, y in read_data(batch_size, features, labels):
print("X:", X, "\ny", y)
break;normal(0, 0.01, size=(2, 1), requires_grad=True)
(X, w, b):
return torch.matmul(X, w) + b
with torch.no_grad(): # with torch.no_grad() 则主要是用于停止autograd模块的工作,
for param in params:
param -= lr * param.grad / batch_size ## 这里用param = param - lr * param.grad / batch_size会导致导数丢失, zero_()函数报错
param.grad.zero_() ## 导数如果丢失了,会报错‘NoneType’ object has no attribute ‘zero_’
03
num_epochs = 3
for epoc
print("w误差 ", true_w - w, "\nb误差 ", true_b - b)
def create_data(w, b, nums_example):
X = torch.normal(0, 1, (nums_example, len(w)))
y = torch.matmul(X, w) + b
print("y_shape:", y.shape)
y += torch.normal(0, 0.01, y.shape) # 加入噪声
return X, y.reshape(-1, 1) # y从行向量转为列向量 | null |
13,639 | import random
import torch
torch.tensor([2, -3.4])
nums_example = len(features)
indices = list(range(nums_example)) s_example, batch_size): # range(start, stop, step)
index_tensor = torch.tensor(indices[i: min(i + batch_size, nums_example)])
yield features[index_tensor], lables[index_tensor] # 通过索引访问向量
with torch.no_grad(): # with torch.no_grad() 则主要是用于停止autograd模块的工作,
for param in params:
param -= lr * param.grad / batch_size ## 这里用param = param - lr * param.grad / batch_size会导致导数丢失, zero_()函数报错
param.grad.zero_() ## 导数如果丢失了,会报错‘NoneType’ object has no attribute ‘zero_’
03
num_epochs = 3
for epoc
def read_data(batch_size, features, lables):
nums_example = len(features)
indices = list(range(nums_example)) # 生成0-999的元组,然后将range()返回的可迭代对象转为一个列表
random.shuffle(indices) # 将序列的所有元素随机排序。
for i in range(0, nums_example, batch_size): # range(start, stop, step)
index_tensor = torch.tensor(indices[i: min(i + batch_size, nums_example)])
yield features[index_tensor], lables[index_tensor] # 通过索引访问向量 | null |
13,640 | import random
import torch
torch.tensor([2, -3.4])
with torch.no_grad(): # with torch.no_grad() 则主要是用于停止autograd模块的工作,
for param in params:
param -= lr * param.grad / batch_size ## 这里用param = param - lr * param.grad / batch_size会导致导数丢失, zero_()函数报错
param.grad.zero_() ## 导数如果丢失了,会报错‘NoneType’ object has no attribute ‘zero_’
03
num_epochs = 3
for epoc
def net(X, w, b):
return torch.matmul(X, w) + b | null |
13,641 | import random
import torch
def loss(y_hat, y):
# print("y_hat_shape:",y_hat.shape,"\ny_shape:",y.shape)
return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2 # 这里为什么要加 y_hat_shape: torch.Size([10, 1]) y_shape: torch.Size([10]) | null |
13,642 | import random
import torch
torch.tensor([2, -3.4])
with torch.no_grad(): # with torch.no_grad() 则主要是用于停止autograd模块的工作,
for param in params:
param -= lr * param.grad / batch_size ## 这里用param = param - lr * param.grad / batch_size会导致导数丢失, zero_()函数报错
param.grad.zero_() ## 导数如果丢失了,会报错‘NoneType’ object has no attribute ‘zero_’
03
num_epochs = 3
for epoc
def sgd(params, batch_size, lr):
with torch.no_grad(): # with torch.no_grad() 则主要是用于停止autograd模块的工作,
for param in params:
param -= lr * param.grad / batch_size ## 这里用param = param - lr * param.grad / batch_size会导致导数丢失, zero_()函数报错
param.grad.zero_() ## 导数如果丢失了,会报错‘NoneType’ object has no attribute ‘zero_’ | null |
13,643 | import torch
import torchvision
import torchvision.transforms as transforms
from d2l import torch as d2l
from torch.utils import data
from torchvision.datasets.mnist import read_image_file, read_label_file
from torchvision.datasets.utils import extract_archive
def hello():
print("semilogy_HELLO") | null |
13,644 | import torch
import torchvision
import torchvision.transforms as transforms
from d2l import torch as d2l
from torch.utils import data
from torchvision.datasets.mnist import read_image_file, read_label_file
from torchvision.datasets.utils import extract_archive
def load_fashion_mnist(batch_size):
extract_archive('D://d2l-data//t10k-images-idx3-ubyte.gz', 'D://d2l-data//FashionMNIST//raw', False)
extract_archive('D://d2l-data//train-images-idx3-ubyte.gz', 'D://d2l-data//FashionMNIST//raw', False)
extract_archive('D://d2l-data//t10k-labels-idx1-ubyte.gz', 'D://d2l-data//FashionMNIST//raw', False)
extract_archive('D://d2l-data//train-labels-idx1-ubyte.gz', 'D://d2l-data//FashionMNIST//raw', False)
training_set = (
read_image_file('D://d2l-data//FashionMNIST//raw//train-images-idx3-ubyte'),
read_label_file('D://d2l-data//FashionMNIST//raw//train-labels-idx1-ubyte')
)
test_set = (
read_image_file('D://d2l-data//FashionMNIST//raw//t10k-images-idx3-ubyte'),
read_label_file('D://d2l-data//FashionMNIST//raw//t10k-labels-idx1-ubyte')
)
with open('D://d2l-data//FashionMNIST//processed//training.pt', 'wb') as f:
torch.save(training_set, f)
with open('D://d2l-data//FashionMNIST//processed//test.pt', 'wb') as f:
torch.save(test_set, f)
print('Done!')
#train_data, train_targets = torch.load('D://d2l-data//FashionMNIST//processed//training.pt')
#test_data, test_targets = torch.load('D://d2l-data//FashionMNIST//processed//test.pt')
mnist_train = torchvision.datasets.FashionMNIST(root="D:/d2l-data/", train=True, transform=transforms.ToTensor(),
download=False)
mnist_test = torchvision.datasets.FashionMNIST(root="D:/d2l-data/", train=False, transform=transforms.ToTensor(),
download=False)
# 这里有个坑 如果线程数num_workers设置大于0会报错 An attempt has been made to start a new process before the current process has finished its bootstrapping
train_iter = data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=0)
test_iter = data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=0)
return (train_iter, test_iter) | null |
13,645 | import os
import torch
from torch import nn
net = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))
()) e) in net[0].named_parameters()]) me, param.shape)
t_normal)
nn.init.xavier_uniform_(m.weight)
it)
nn.ReLU(), nn.Linear(8, 1))
net(X
net[2].weight.data[0, 0] = 100ta[0])
def block2():
net = nn.Sequential()
for i in range(4):
net.add_module(f'block{i}', block1())
return net | null |
13,646 | import os
import torch
from torch import nn
if type(m) == nn.Linear:
nn.init.normal_(m.weight, mean=0, std=0.01)
nn.init.zeros_(m.bias)
if type(m) == nn.Linear:
nn.init.constant_(m.weight, 1)
nn.init.zeros_(m.bias)
nn.init.xavier_uniform_(m.weight)
if type(m) == nn.Linear:
print("Init", *[(name, param.shape) for name, param in m.named_parameters()][0])
nn.init.uniform_(m.weight, -10, 10)
m.weight.data *= m.weight.data.abs() >= 5
Linear(4, 8), nn.ReLU(), shared, nn.ReLU(), shared,
nn.ReLU(), nn.Linear(8, 1))
def init_normal(m):
if type(m) == nn.Linear:
nn.init.normal_(m.weight, mean=0, std=0.01)
nn.init.zeros_(m.bias) | null |
13,647 | import os
import torch
from torch import nn
if type(m) == nn.Linear:
nn.init.normal_(m.weight, mean=0, std=0.01)
nn.init.zeros_(m.bias)
if type(m) == nn.Linear:
nn.init.constant_(m.weight, 1)
nn.init.zeros_(m.bias)
nn.init.xavier_uniform_(m.weight)
if type(m) == nn.Linear:
print("Init", *[(name, param.shape) for name, param in m.named_parameters()][0])
nn.init.uniform_(m.weight, -10, 10)
m.weight.data *= m.weight.data.abs() >= 5
Linear(4, 8), nn.ReLU(), shared, nn.ReLU(), shared,
nn.ReLU(), nn.Linear(8, 1))
def init_constant(m):
if type(m) == nn.Linear:
nn.init.constant_(m.weight, 1)
nn.init.zeros_(m.bias) | null |
13,648 | import os
import torch
from torch import nn
if type(m) == nn.Linear:
nn.init.normal_(m.weight, mean=0, std=0.01)
nn.init.zeros_(m.bias)
if type(m) == nn.Linear:
nn.init.constant_(m.weight, 1)
nn.init.zeros_(m.bias)
nn.init.xavier_uniform_(m.weight)
if type(m) == nn.Linear:
print("Init", *[(name, param.shape) for name, param in m.named_parameters()][0])
nn.init.uniform_(m.weight, -10, 10)
m.weight.data *= m.weight.data.abs() >= 5
Linear(4, 8), nn.ReLU(), shared, nn.ReLU(), shared,
nn.ReLU(), nn.Linear(8, 1))
def xavier(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight) | null |
13,649 | import os
import torch
from torch import nn
if type(m) == nn.Linear:
nn.init.normal_(m.weight, mean=0, std=0.01)
nn.init.zeros_(m.bias)
if type(m) == nn.Linear:
nn.init.constant_(m.weight, 1)
nn.init.zeros_(m.bias)
nn.init.xavier_uniform_(m.weight)
if type(m) == nn.Linear:
print("Init", *[(name, param.shape) for name, param in m.named_parameters()][0])
nn.init.uniform_(m.weight, -10, 10)
m.weight.data *= m.weight.data.abs() >= 5
Linear(4, 8), nn.ReLU(), shared, nn.ReLU(), shared,
nn.ReLU(), nn.Linear(8, 1))
def init_42(m):
if type(m) == nn.Linear:
nn.init.constant_(m.weight, 42) | null |
13,650 | import os
import torch
from torch import nn
print(net(X))
print('1.访问第二个全连接层的参数') for name, param in net.named_parameters()])
):
return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU())
print('3.嵌套块的参数')
print(rgnet)
print(rgnet(X))
if type(m) == nn.Linear:
nn.init.normal_(m.weight, mean=0, std=0.01)
nn.init.zeros_(m.bias)
print('4.1内置的初始化器')
print(net[0].weight.data[0], net[0].bias.data[0])
if type(m) == nn.Linear:
nn.init.constant_(m.weight, 1)
nn.init.zeros_(m.bias)
print('4.2所有参数初始化为给定的常数')
print(net[0].weight.data[0], net[0].bias.data[0])
nn.init.xavier_uniform_(m.weight)
print('4.3使用Xavier初始化方法初始化第一层,然后第二层初始化为常量值42')
print(net[0].weight)
print(net[2].weight.data)
if type(m) == nn.Linear:
print("Init", *[(name, param.shape) for name, param in m.named_parameters()][0])
nn.init.uniform_(m.weight, -10, 10)
m.weight.data *= m.weight.data.abs() >= 5
print('5.参数自定义初始化')
print(net[0].weight[:2])Linear(4, 8), nn.ReLU(), shared, nn.ReLU(), shared,
nn.ReLU(), nn.Linear(8, 1))nt(net[2].weight.data[0] == net[4].weight.data[0])
def my_init(m):
if type(m) == nn.Linear:
print("Init", *[(name, param.shape) for name, param in m.named_parameters()][0])
nn.init.uniform_(m.weight, -10, 10)
m.weight.data *= m.weight.data.abs() >= 5 | null |
13,651 | import torch
print('1.自动梯度计算')
print('y:', y)
print('x.grad:', x.grad)
print('x.grad:', x.grad)
print('2.Python控制流的梯度计算')
def f(a):
b = a * 2
print(b.norm())
while b.norm() < 1000: # 求L2范数:元素平方和的平方根
b = b * 2
if b.sum() > 0:
c = b
else:
c = 100 * b
return c | null |
13,652 | import numpy as np
from d2l import torch as d2l
import os
def f(x):
return 3 * x ** 2 - 4 * x | null |
13,653 | import numpy as np
from d2l import torch as d2l
import os
def numerical_lim(f, x, h):
return (f(x + h) - f(x)) / h | null |
13,654 | import os
import torch
from torch import nn
Y = conv2d(X)
2:])
def comp_conv2d(conv2d, X):
# 这里的(1,1)表示批量大小和通道数都是1
X = X.reshape((1, 1) + X.shape)
Y = conv2d(X)
# 省略前两个维度:批量大小和通道
return Y.reshape(Y.shape[2:]) | null |
13,655 | import os
import torch
from torch import nn
from d2l import torch as d2l
torch.tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]],
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])
orr2d_multi_in(X, k) for k in K], 0 torch.stack((K, K + 1, K + 2), 0)
torch.normal(0, 1, (3, 3, 3))
def corr2d_multi_in_out(X, K):
# 迭代“K”的第0个维度,每次都对输入“X”执行互相关运算。
# 最后将所有结果都叠加在一起
return torch.stack([corr2d_multi_in(X, k) for k in K], 0) | null |
13,656 | import os
import torch
from torch import nn
from d2l import torch as d2l
torch.tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]],
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])
print(corr2d_multi_in(X, K))
orr2d_multi_in(X, k) for k in K], 0 torch.stack((K, K + 1, K + 2), 0)
print(K.shape)
print(corr2d_multi_in_out(X, K))
c_i, h, w = X.shape
print('c_i: ', c_i) h: ', h) nt('w: ', w) = K.shape[0]
Y = torch.mm(K, X) (c_o, h, w torch.normal(0, 1, (3, 3, 3))
def corr2d_multi_in_out_1x1(X, K):
c_i, h, w = X.shape
print('c_i: ', c_i) # 输入的通道数
print('h: ', h) # 输入的高
print('w: ', w) # 输入的宽
c_o = K.shape[0] # 卷积核的通道数
X = X.view(c_i, h * w) # 3 * 9
K = K.view(c_o, c_i) # 2 * 3
Y = torch.mm(K, X) # 全连接层的矩阵乘法
return Y.view(c_o, h, w) | null |
13,657 | import os
import torch
from torch import nn
from d2l import torch as d2l
ech.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))n range(Y.shape[0]):
for j in range(Y.shape[1]):
Y[i, j] = (X[i:i + h, j:j + w] * K).sum() # X是输入矩阵
return Y
Y = Y1.reshape((1, 1, 6, 7))
if (i + 1) % 2 == 0:
print(f'epoch {i+1}, loss {l.sum():.3f}')
The provided code snippet includes necessary dependencies for implementing the `corr2d` function. Write a Python function `def corr2d(X, K)` to solve the following problem:
计算二维互相关运算
Here is the function:
def corr2d(X, K): #@save
"""计算二维互相关运算"""
h, w = K.shape # 卷积核的大小
Y = torch.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)) # 输出矩阵大小
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
Y[i, j] = (X[i:i + h, j:j + w] * K).sum() # X是输入矩阵
return Y | 计算二维互相关运算 |
13,658 | import os
import time
import importlib
from ace import util
from ace.logger import Logger
logger = Logger(os.path.basename(__file__))
RESOURCE_LOADER_DIRECTORIES = [
"custom",
"core",
]
def load_resource(resource_class_name, import_path):
try:
module = importlib.import_module(import_path)
except ImportError:
logger.debug(
f"No import available for module {import_path}",
)
return
try:
resource_class = getattr(module, resource_class_name)
return resource_class
except AttributeError:
logger.error(
f"Failed to get class {resource_class} from module {import_path}",
exc_info=True,
)
def loader(resource_name):
try:
resource_class_name = util.snake_to_class(resource_name)
logger.debug(
f"Converted resource_name to resource_class: {resource_class_name}"
)
subdirectory = os.environ.get("ACE_RESOURCE_SUBDIRECTORY") or "hello_layers"
logger.debug(f"ACE_RESOURCE_SUBDIRECTORY: {subdirectory}")
for directory in RESOURCE_LOADER_DIRECTORIES:
import_path = f"ace.resources.{directory}.{subdirectory}.{resource_name}"
resource_class = load_resource(resource_class_name, import_path)
if resource_class:
break
if not resource_class:
import_path = f"ace.framework.resources.{resource_name}"
logger.debug(f"No custom resource found, importing from {import_path}")
resource_class = load_resource(resource_class_name, import_path)
if not resource_class:
logger.error(
f"No import available for resource {resource_name}",
)
return False
logger.debug(f"Imported {resource_class_name} from {import_path}")
resource = resource_class()
logger.debug(f"Created an instance of {resource_class}")
logger.info(f"Calling start_resource method on the {resource_class} instance")
resource.start_resource()
logger.debug(f"Called start_resource method on the {resource_class} instance")
return True
except Exception as e:
logger.error(f"An error occurred: {e}", exc_info=True) | null |
13,659 | import os
import sys
import inspect
import psutil
def get_package_root(obj):
package_name = obj.__class__.__module__.split(".")[0]
package_root = os.path.dirname(os.path.abspath(sys.modules[package_name].__file__))
return package_root | null |
13,660 | import os
import sys
import inspect
import psutil
def get_file_directory():
filepath = inspect.stack()[1].filename
return os.path.dirname(os.path.abspath(filepath)) | null |
13,661 | import os
import sys
import inspect
import psutil
def get_system_resource_usage():
# CPU Load
cpu_load = psutil.cpu_percent(interval=1)
cpu_string = f"CPU: {cpu_load}%"
# Memory Details
memory_info = psutil.virtual_memory()
total_memory = memory_info.total / (1024**3) # Convert to GB
free_memory = memory_info.available / (1024**3) # Convert to GB
memory_string = (
f"Memory: {free_memory:.2f}/{total_memory:.2f} GB ({memory_info.percent}%)"
)
# Disk Details
disk_info = psutil.disk_usage("/")
total_disk = disk_info.total / (1024**3) # Convert to GB
free_disk = disk_info.free / (1024**3) # Convert to GB
disk_string = (
f"Disk: {free_disk:.2f}/{total_disk:.2f} GB ({100 - disk_info.percent}%)"
)
return f"{cpu_string} | {memory_string} | {disk_string}" | null |
13,662 | import os
def get_template_dir():
return os.path.join(os.path.dirname(__file__), "prompts/templates") | null |
13,663 | import os
def get_identities_dir():
return os.path.join(os.path.dirname(__file__), "prompts/identities") | null |
13,664 | import json
import os
from dotenv import load_dotenv
def has_environment_variable(name):
value = os.getenv(name)
return value is not None and value.strip() != "" | null |
13,665 | import json
import os
from dotenv import load_dotenv
def parse_json(input_string):
try:
return json.loads(input_string)
except json.JSONDecodeError:
return None | null |
13,666 | import logging
import os
from ace import constants
logging.basicConfig(level=logging.DEBUG)
def get_log_level(level_str):
level = logging.getLevelName(level_str)
if not isinstance(level, int):
raise ValueError(f"Invalid log level: {level_str}")
return level | null |
13,667 | import layer
import top_layer as top
import openai
import os
from dotenv import load_dotenv
def stream_chat(stream):
chat_message = ""
for item in stream:
chat_message += item
return chat_message | null |
13,668 | import openai
import yaml
from time import time, sleep
from datetime import datetime
import textwrap
import time
from functools import wraps
import glob
import os
from pathlib import Path
def retry(wait_time=360, max_retries=3):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
for i in range(max_retries):
try:
return func(*args, **kwargs)
except Exception as e:
if i == max_retries - 1: # If it's the last retry, raise the exception
raise
print(f"\n\nError: {e}. Retrying in {wait_time} seconds...")
time.sleep(wait_time)
return wrapper
return decorator | null |
13,669 | import openai
import yaml
from time import time, sleep
from datetime import datetime
import textwrap
import time
from functools import wraps
import glob
import os
from pathlib import Path
def get_message_logs(files):
messages = []
for file in files:
with open(file, 'r', encoding='utf-8') as f:
message = yaml.safe_load(f)
messages.append(message)
return messages
def get_messages(bus, layer):
if bus == 'north':
files = glob.glob(f'logs/layer{layer + 1}/*.yaml')
messages = get_message_logs(files)
filtered_messages = [m for m in messages if m['bus'] == 'north' and m['layer'] > layer]
else:
files = glob.glob(f'logs/layer{layer - 1}/*.yaml') if layer > 1 else []
messages = get_message_logs(files)
filtered_messages = [m for m in messages if m['bus'] == 'south' and m['layer'] < layer]
sorted_messages = sorted(filtered_messages, key=lambda m: m['timestamp'], reverse=True)
return sorted_messages[:1] | null |
13,670 | import openai
import yaml
from time import time, sleep
from datetime import datetime
import textwrap
import time
from functools import wraps
import glob
import os
from pathlib import Path
def chat_print(text):
formatted_lines = [textwrap.fill(line, width=120, initial_indent=' ', subsequent_indent=' ') for line in text.split('\n')]
formatted_text = '\n'.join(formatted_lines)
print('\n\n\nLAYER:\n\n%s' % formatted_text) | null |
13,671 | import ace_layers as ace
def get_messages(layer_num):
try:
# FETCH FROM BUS
north_bus = ace.get_messages('north', layer_num)
north_messages = ace.format_messages(north_bus)
south_bus = ace.get_messages('south', layer_num)
south_messages = ace.format_messages(south_bus)
return '''NORTH messages:\n%s\n\n\nSOUTH messages:\n%s''' % (north_messages, south_messages)
except Exception as oops:
print(f'\n\nError in GET_MESSAGES in LAYER {layer_num}: "{oops}"') | null |
13,672 | import ace_layers as ace
def chat_completion(layer_num, messages):
try:
# FORMAT FOR API
response = ace.get_response(layer_num).strip()
conversation = list()
conversation.append({'role': 'system', 'content': ace.open_file(f"layer{layer_num}.txt").replace('<<INTERNAL>>', response)})
conversation.append({'role': 'user', 'content': messages})
response = ace.chatbot(conversation)
for item in response:
if 'content' in item['choices'][0]['delta']:
yield item['choices'][0]['delta']['content']
except Exception as oops:
print(f'\n\nError in CHAT_COMPLETION in LAYER {layer_num}: "{oops}"') | null |
13,673 | import ace_layers as ace
def save_response(layer_num, response):
try:
# POST TO BUS
ace.set_response(layer_num, response)
south_out = response.splitlines()[0].replace('SOUTH:','').strip()
north_out = response.splitlines()[1].replace('NORTH:','').strip()
ace.post_message('south', layer_num, south_out)
ace.post_message('north', layer_num, north_out)
return "response saved successfully"
except Exception as oops:
print(f'\n\nError in SAVE_RESPONSE of LAYER {layer_num}: "{oops}"') | null |
13,674 | import ace_layers as ace
def get_messages():
try:
# FETCH FROM BUS
north_bus = ace.get_messages('north', 1)
return ace.format_messages(north_bus)
except Exception as oops:
print(f'\n\nError in GET_MESSAGES of LAYER 1: "{oops}"') | null |
13,675 | import ace_layers as ace
def chat_completion(messages):
try:
# FORMAT FOR API
response = ace.get_response(1).strip()
conversation = list()
conversation.append({'role': 'system', 'content': ace.open_file('layer1.txt').replace('<<INTERNAL>>', response)})
conversation.append({'role': 'user', 'content': messages})
response = ace.chatbot(conversation)
for item in response:
if 'content' in item['choices'][0]['delta']:
yield item['choices'][0]['delta']['content']
except Exception as oops:
print(f'\n\nError in CHAT_COMPLETION of LAYER 1: "{oops}"') | null |
13,676 | import ace_layers as ace
def save_response(response):
try:
ace.set_response(1, response)
ace.post_message('south', 1, response)
return "responses saved"
except Exception as oops:
print(f'\n\nError in SAVE_RESPONSE of LAYER 1: "{oops}"') | null |
13,677 | from flask import Flask, request, Response
import top_layer
import layer
import os
import openai
from flask_cors import CORS, cross_origin
from dotenv import load_dotenv
def get_messages():
layer_num = int(request.args.get('layer'))
if (layer_num > 1):
return layer.get_messages(layer_num), 200
else:
return top_layer.get_messages(), 200 | null |
13,678 | from flask import Flask, request, Response
import top_layer
import layer
import os
import openai
from flask_cors import CORS, cross_origin
from dotenv import load_dotenv
def chat_completion():
message = request.json
layer_num = message['layer']
messages = message['messages']
stream = ''
if (layer_num > 1):
stream = layer.chat_completion(layer_num, messages)
else:
stream = top_layer.chat_completion(messages)
return Response(stream, mimetype="text/event-stream") | null |
13,679 | from flask import Flask, request, Response
import top_layer
import layer
import os
import openai
from flask_cors import CORS, cross_origin
from dotenv import load_dotenv
def save_response():
message = request.json
layer_num = message['layer']
response = message['response']
if (layer_num > 1):
return layer.save_response(layer_num, response), 200
else:
return top_layer.save_response(response), 200 | null |
13,680 | import asyncio
import asyncpg
from typing import Dict, List
import uuid
import json
import aio_pika
from fastapi import FastAPI, Response
from fastapi.middleware.cors import CORSMiddleware
from settings import settings
from base.amqp.connection import get_connection
from base.amqp.exchange import create_exchange
from fastapi import FastAPI, HTTPException, Depends, status, WebSocket, WebSocketDisconnect
from sqlalchemy.orm import Session
from database.connection import get_db
from database import dao
from database.asyncpg_connection import get_asyncpg_db
from schema import (
LayerConfigAdd,
LayerStateCreate,
Mission,
LayerConfigModel,
LayerStateModel,
LayerTestRequest,
LayerTestResponseModel,
AncestralPromptAdd,
AncestralPromptModel,
LayerTestHistoryModel,
RabbitMQLogModel,
)
from base import ai
import logging
async def handle_options_request(path: str, response: Response):
response.status_code = status.HTTP_200_OK
response.headers["Access-Control-Allow-Origin"] = "*"
response.headers["Access-Control-Allow-Headers"] = "*"
return response | null |
13,681 | import asyncio
import asyncpg
from typing import Dict, List
import uuid
import json
import aio_pika
from fastapi import FastAPI, Response
from fastapi.middleware.cors import CORSMiddleware
from settings import settings
from base.amqp.connection import get_connection
from base.amqp.exchange import create_exchange
from fastapi import FastAPI, HTTPException, Depends, status, WebSocket, WebSocketDisconnect
from sqlalchemy.orm import Session
from database.connection import get_db
from database import dao
from database.asyncpg_connection import get_asyncpg_db
from schema import (
LayerConfigAdd,
LayerStateCreate,
Mission,
LayerConfigModel,
LayerStateModel,
LayerTestRequest,
LayerTestResponseModel,
AncestralPromptAdd,
AncestralPromptModel,
LayerTestHistoryModel,
RabbitMQLogModel,
)
from base import ai
import logging
settings = Settings(
role_name="Aspirational Layer",
control_bus_sub_queue="bus.control.L1",
data_bus_pub_queue="bus.data.L1",
control_bus_pub_queue="bus.control.L2",
data_bus_sub_queue="bus.data.L2",
debug = True,
)
async def get_connection(
loop,
username: str,
password: str,
amqp_host_name: str,
role_name: str = "undefined",
delay_factor=5,
heartbeat=500,
):
connection = None
while True:
try:
connection = await aio_pika.connect_robust(
host=amqp_host_name,
login=username,
password=password,
loop=loop,
heartbeat=heartbeat,
)
logger.info(f"{role_name} connection established...")
return connection
except Exception as e:
logger.error(f"Error connecting to RabbitMQ: {e}. Retrying in {delay_factor} seconds...")
await asyncio.sleep(delay_factor)
async def create_exchange(connection: aio_pika.Connection, queue_name: str):
channel = await connection.channel()
logging_queue = await channel.declare_queue(settings.logging_queue, durable=True)
exchange_name = f"exchange.{queue_name}"
exchange = await channel.declare_exchange(exchange_name, aio_pika.ExchangeType.FANOUT)
queue = await channel.declare_queue(queue_name, durable=True)
await queue.bind(exchange)
if settings.logging_queue:
await logging_queue.bind(exchange)
return exchange
class Mission(BaseModel):
mission: str
async def send_mission(data: Mission) -> Dict[str, str]:
loop = asyncio.get_event_loop()
connection = await get_connection(
loop,
username=settings.amqp_username,
password=settings.amqp_password,
amqp_host_name=settings.amqp_host_name,
)
headers = {
"source_bus": "User Input",
"destination_bus": "Control Bus",
"publisher": settings.role_name,
}
exchange = await create_exchange(connection, settings.mission_queue)
message_body = aio_pika.Message(
body=data.mission.encode(),
headers=headers,
content_type="text/plain",
)
await exchange.publish(
message_body,
routing_key=settings.mission_queue,
)
return {"status": "mission sent"} | null |
13,682 | import asyncio
import asyncpg
from typing import Dict, List
import uuid
import json
import aio_pika
from fastapi import FastAPI, Response
from fastapi.middleware.cors import CORSMiddleware
from settings import settings
from base.amqp.connection import get_connection
from base.amqp.exchange import create_exchange
from fastapi import FastAPI, HTTPException, Depends, status, WebSocket, WebSocketDisconnect
from sqlalchemy.orm import Session
from database.connection import get_db
from database import dao
from database.asyncpg_connection import get_asyncpg_db
from schema import (
LayerConfigAdd,
LayerStateCreate,
Mission,
LayerConfigModel,
LayerStateModel,
LayerTestRequest,
LayerTestResponseModel,
AncestralPromptAdd,
AncestralPromptModel,
LayerTestHistoryModel,
RabbitMQLogModel,
)
from base import ai
import logging
logger = logging.getLogger(__name__)
async def send_update(message: str, websocket: WebSocket):
logger.info(f"parsing {message=}")
record = json.loads(message)
await websocket.send_json(record)
async def get_asyncpg_db():
conn = await asyncpg.connect(settings.database_uri)
try:
yield conn
finally:
await conn.close()
async def websocket_endpoint(websocket: WebSocket, db: asyncpg.Connection = Depends(get_asyncpg_db)):
await websocket.accept()
await db.add_listener(
"new_record",
lambda c, p, t, m: asyncio.create_task(send_update(m, websocket))
)
try:
while True:
data = await websocket.receive_text() # Here for handling messages from the client, if needed.
logger.info(f"received {data=}")
except WebSocketDisconnect:
pass
finally:
await db.remove_listener(
"new_record",
lambda c, p, t, m: asyncio.create_task(send_update(m, websocket))
) | null |
13,683 | import asyncio
import asyncpg
from typing import Dict, List
import uuid
import json
import aio_pika
from fastapi import FastAPI, Response
from fastapi.middleware.cors import CORSMiddleware
from settings import settings
from base.amqp.connection import get_connection
from base.amqp.exchange import create_exchange
from fastapi import FastAPI, HTTPException, Depends, status, WebSocket, WebSocketDisconnect
from sqlalchemy.orm import Session
from database.connection import get_db
from database import dao
from database.asyncpg_connection import get_asyncpg_db
from schema import (
LayerConfigAdd,
LayerStateCreate,
Mission,
LayerConfigModel,
LayerStateModel,
LayerTestRequest,
LayerTestResponseModel,
AncestralPromptAdd,
AncestralPromptModel,
LayerTestHistoryModel,
RabbitMQLogModel,
)
from base import ai
import logging
def get_active_ancestral_prompt(
session: Session = Depends(get_db),
):
def get_db():
class LayerTestRequest(LayerNameBase, BaseModel):
class LayerTestResponseModel(LayerNameBase, BaseModel):
class AncestralPromptModel(BaseModel):
async def test_layer(
req: LayerTestRequest,
session: Session = Depends(get_db),
):
ancestral_prompt = None
with session as db:
db_ancestral_prompt = dao.get_active_ancestral_prompt(db=db)
ancestral_prompt = AncestralPromptModel.model_validate(db_ancestral_prompt)
reasoning_completion = ai.reason(
ancestral_prompt=ancestral_prompt.prompt,
input=req.input,
source_bus=req.source_bus,
llm_model_parameters=req.llm_model_parameters,
prompts=req.prompts,
llm_messages=req.llm_messages,
)
data_bus_message, control_bus_message = await ai.determine_action(
ancestral_prompt=ancestral_prompt.prompt,
source_bus=req.source_bus,
reasoning_completion=reasoning_completion,
prompts=req.prompts,
llm_model_parameters=req.llm_model_parameters,
role_name=req.layer_name,
llm_messages=req.llm_messages,
)
return LayerTestResponseModel(
layer_name=req.layer_name,
reasoning_result=reasoning_completion,
data_bus_action=data_bus_message,
control_bus_action=control_bus_message,
ancestral_prompt=ancestral_prompt.prompt,
) | null |
13,684 | import asyncio
import asyncpg
from typing import Dict, List
import uuid
import json
import aio_pika
from fastapi import FastAPI, Response
from fastapi.middleware.cors import CORSMiddleware
from settings import settings
from base.amqp.connection import get_connection
from base.amqp.exchange import create_exchange
from fastapi import FastAPI, HTTPException, Depends, status, WebSocket, WebSocketDisconnect
from sqlalchemy.orm import Session
from database.connection import get_db
from database import dao
from database.asyncpg_connection import get_asyncpg_db
from schema import (
LayerConfigAdd,
LayerStateCreate,
Mission,
LayerConfigModel,
LayerStateModel,
LayerTestRequest,
LayerTestResponseModel,
AncestralPromptAdd,
AncestralPromptModel,
LayerTestHistoryModel,
RabbitMQLogModel,
)
from base import ai
import logging
def get_db():
class LayerTestHistoryModel(BaseModel):
async def get_test_runs(
layer_name: str,
session: Session = Depends(get_db),
):
with session as db:
results = dao.get_all_test_runs(db=db, layer_name=layer_name)
resp = [LayerTestHistoryModel.model_validate(result) for result in results]
return resp | null |
13,685 | import asyncio
import asyncpg
from typing import Dict, List
import uuid
import json
import aio_pika
from fastapi import FastAPI, Response
from fastapi.middleware.cors import CORSMiddleware
from settings import settings
from base.amqp.connection import get_connection
from base.amqp.exchange import create_exchange
from fastapi import FastAPI, HTTPException, Depends, status, WebSocket, WebSocketDisconnect
from sqlalchemy.orm import Session
from database.connection import get_db
from database import dao
from database.asyncpg_connection import get_asyncpg_db
from schema import (
LayerConfigAdd,
LayerStateCreate,
Mission,
LayerConfigModel,
LayerStateModel,
LayerTestRequest,
LayerTestResponseModel,
AncestralPromptAdd,
AncestralPromptModel,
LayerTestHistoryModel,
RabbitMQLogModel,
)
from base import ai
import logging
def get_db():
db: Session = SessionLocal()
try:
yield db
db.commit()
except:
db.rollback()
raise
finally:
db.close()
class AncestralPromptAdd(BaseModel):
ancestral_prompt_id: Optional[uuid.UUID] = None # if passed it uses this as the parent
prompt: str
is_active: Optional[bool] = False
class AncestralPromptModel(BaseModel):
ancestral_prompt_id: uuid.UUID
parent_ancestral_prompt_id: Optional[uuid.UUID]
prompt: str
is_active: bool
created_at: datetime
updated_at: datetime
class Config:
from_attributes = True
def add_ancestral_prompt(
ancestral_prompt: AncestralPromptAdd,
session: Session = Depends(get_db),
):
with session as db:
results = dao.add_ancestral_prompt(db=db, **ancestral_prompt.model_dump())
return AncestralPromptModel.model_validate(results) | null |
13,686 | import asyncio
import asyncpg
from typing import Dict, List
import uuid
import json
import aio_pika
from fastapi import FastAPI, Response
from fastapi.middleware.cors import CORSMiddleware
from settings import settings
from base.amqp.connection import get_connection
from base.amqp.exchange import create_exchange
from fastapi import FastAPI, HTTPException, Depends, status, WebSocket, WebSocketDisconnect
from sqlalchemy.orm import Session
from database.connection import get_db
from database import dao
from database.asyncpg_connection import get_asyncpg_db
from schema import (
LayerConfigAdd,
LayerStateCreate,
Mission,
LayerConfigModel,
LayerStateModel,
LayerTestRequest,
LayerTestResponseModel,
AncestralPromptAdd,
AncestralPromptModel,
LayerTestHistoryModel,
RabbitMQLogModel,
)
from base import ai
import logging
def get_db():
db: Session = SessionLocal()
try:
yield db
db.commit()
except:
db.rollback()
raise
finally:
db.close()
class AncestralPromptModel(BaseModel):
ancestral_prompt_id: uuid.UUID
parent_ancestral_prompt_id: Optional[uuid.UUID]
prompt: str
is_active: bool
created_at: datetime
updated_at: datetime
class Config:
from_attributes = True
def set_active_ancestral_prompt(
ancestral_prompt_id: uuid.UUID,
session: Session = Depends(get_db),
):
with session as db:
results = dao.set_active_ancestral_prompt(
db=db,
ancestral_prompt_id=ancestral_prompt_id,
)
return AncestralPromptModel.model_validate(results) | null |
13,687 | import asyncio
import asyncpg
from typing import Dict, List
import uuid
import json
import aio_pika
from fastapi import FastAPI, Response
from fastapi.middleware.cors import CORSMiddleware
from settings import settings
from base.amqp.connection import get_connection
from base.amqp.exchange import create_exchange
from fastapi import FastAPI, HTTPException, Depends, status, WebSocket, WebSocketDisconnect
from sqlalchemy.orm import Session
from database.connection import get_db
from database import dao
from database.asyncpg_connection import get_asyncpg_db
from schema import (
LayerConfigAdd,
LayerStateCreate,
Mission,
LayerConfigModel,
LayerStateModel,
LayerTestRequest,
LayerTestResponseModel,
AncestralPromptAdd,
AncestralPromptModel,
LayerTestHistoryModel,
RabbitMQLogModel,
)
from base import ai
import logging
def get_db():
db: Session = SessionLocal()
try:
yield db
db.commit()
except:
db.rollback()
raise
finally:
db.close()
class AncestralPromptModel(BaseModel):
ancestral_prompt_id: uuid.UUID
parent_ancestral_prompt_id: Optional[uuid.UUID]
prompt: str
is_active: bool
created_at: datetime
updated_at: datetime
class Config:
from_attributes = True
def get_all_ancestral_prompts(
session: Session = Depends(get_db),
):
with session as db:
results = dao.get_ancestral_prompts(db=db)
return [AncestralPromptModel.model_validate(result) for result in results] | null |
13,688 | import asyncio
import asyncpg
from typing import Dict, List
import uuid
import json
import aio_pika
from fastapi import FastAPI, Response
from fastapi.middleware.cors import CORSMiddleware
from settings import settings
from base.amqp.connection import get_connection
from base.amqp.exchange import create_exchange
from fastapi import FastAPI, HTTPException, Depends, status, WebSocket, WebSocketDisconnect
from sqlalchemy.orm import Session
from database.connection import get_db
from database import dao
from database.asyncpg_connection import get_asyncpg_db
from schema import (
LayerConfigAdd,
LayerStateCreate,
Mission,
LayerConfigModel,
LayerStateModel,
LayerTestRequest,
LayerTestResponseModel,
AncestralPromptAdd,
AncestralPromptModel,
LayerTestHistoryModel,
RabbitMQLogModel,
)
from base import ai
import logging
def get_db():
db: Session = SessionLocal()
try:
yield db
db.commit()
except:
db.rollback()
raise
finally:
db.close()
class AncestralPromptModel(BaseModel):
ancestral_prompt_id: uuid.UUID
parent_ancestral_prompt_id: Optional[uuid.UUID]
prompt: str
is_active: bool
created_at: datetime
updated_at: datetime
class Config:
from_attributes = True
def get_ancestral_prompt_by_id(
ancestral_prompt_id: uuid.UUID,
session: Session = Depends(get_db),
):
with session as db:
results = dao.get_ancestral_prompt_by_id(
db=db,
ancestral_prompt_id=ancestral_prompt_id,
)
return [AncestralPromptModel.model_validate(result) for result in results] | null |
13,689 | import asyncio
import asyncpg
from typing import Dict, List
import uuid
import json
import aio_pika
from fastapi import FastAPI, Response
from fastapi.middleware.cors import CORSMiddleware
from settings import settings
from base.amqp.connection import get_connection
from base.amqp.exchange import create_exchange
from fastapi import FastAPI, HTTPException, Depends, status, WebSocket, WebSocketDisconnect
from sqlalchemy.orm import Session
from database.connection import get_db
from database import dao
from database.asyncpg_connection import get_asyncpg_db
from schema import (
LayerConfigAdd,
LayerStateCreate,
Mission,
LayerConfigModel,
LayerStateModel,
LayerTestRequest,
LayerTestResponseModel,
AncestralPromptAdd,
AncestralPromptModel,
LayerTestHistoryModel,
RabbitMQLogModel,
)
from base import ai
import logging
def get_db():
db: Session = SessionLocal()
try:
yield db
db.commit()
except:
db.rollback()
raise
finally:
db.close()
class LayerConfigModel(LayerNameBase, BaseModel):
config_id: uuid.UUID
parent_config_id: Optional[uuid.UUID] = None
prompts: Prompts
llm_model_parameters: OpenAiGPTChatParameters
is_active: bool = True
created_at: datetime
updated_at: datetime
class Config:
from_attributes = True
def get_all_layer_config(
layer_name: str,
session: Session = Depends(get_db),
):
with session as db:
results = dao.get_all_layer_config(db, layer_name)
return [LayerConfigModel.model_validate(result) for result in results] | null |
13,690 | import asyncio
import asyncpg
from typing import Dict, List
import uuid
import json
import aio_pika
from fastapi import FastAPI, Response
from fastapi.middleware.cors import CORSMiddleware
from settings import settings
from base.amqp.connection import get_connection
from base.amqp.exchange import create_exchange
from fastapi import FastAPI, HTTPException, Depends, status, WebSocket, WebSocketDisconnect
from sqlalchemy.orm import Session
from database.connection import get_db
from database import dao
from database.asyncpg_connection import get_asyncpg_db
from schema import (
LayerConfigAdd,
LayerStateCreate,
Mission,
LayerConfigModel,
LayerStateModel,
LayerTestRequest,
LayerTestResponseModel,
AncestralPromptAdd,
AncestralPromptModel,
LayerTestHistoryModel,
RabbitMQLogModel,
)
from base import ai
import logging
def get_db():
db: Session = SessionLocal()
try:
yield db
db.commit()
except:
db.rollback()
raise
finally:
db.close()
class LayerConfigModel(LayerNameBase, BaseModel):
config_id: uuid.UUID
parent_config_id: Optional[uuid.UUID] = None
prompts: Prompts
llm_model_parameters: OpenAiGPTChatParameters
is_active: bool = True
created_at: datetime
updated_at: datetime
class Config:
from_attributes = True
def get_layer_config(
layer_name: str,
session: Session = Depends(get_db),
):
with session as db:
results = dao.get_layer_config(db, layer_name)
return LayerConfigModel.model_validate(results) | null |
13,691 | import asyncio
import asyncpg
from typing import Dict, List
import uuid
import json
import aio_pika
from fastapi import FastAPI, Response
from fastapi.middleware.cors import CORSMiddleware
from settings import settings
from base.amqp.connection import get_connection
from base.amqp.exchange import create_exchange
from fastapi import FastAPI, HTTPException, Depends, status, WebSocket, WebSocketDisconnect
from sqlalchemy.orm import Session
from database.connection import get_db
from database import dao
from database.asyncpg_connection import get_asyncpg_db
from schema import (
LayerConfigAdd,
LayerStateCreate,
Mission,
LayerConfigModel,
LayerStateModel,
LayerTestRequest,
LayerTestResponseModel,
AncestralPromptAdd,
AncestralPromptModel,
LayerTestHistoryModel,
RabbitMQLogModel,
)
from base import ai
import logging
def get_db():
db: Session = SessionLocal()
try:
yield db
db.commit()
except:
db.rollback()
raise
finally:
db.close()
class LayerConfigModel(LayerNameBase, BaseModel):
config_id: uuid.UUID
parent_config_id: Optional[uuid.UUID] = None
prompts: Prompts
llm_model_parameters: OpenAiGPTChatParameters
is_active: bool = True
created_at: datetime
updated_at: datetime
class Config:
from_attributes = True
def get_layer_logs(
layer_name: str,
session: Session = Depends(get_db),
):
try:
with session as db:
results = dao.get_layer_logs(db, layer_name)
return LayerConfigModel.model_validate(results)
except ValueError as ve:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=ve.args[0]) | null |
13,692 | import asyncio
import asyncpg
from typing import Dict, List
import uuid
import json
import aio_pika
from fastapi import FastAPI, Response
from fastapi.middleware.cors import CORSMiddleware
from settings import settings
from base.amqp.connection import get_connection
from base.amqp.exchange import create_exchange
from fastapi import FastAPI, HTTPException, Depends, status, WebSocket, WebSocketDisconnect
from sqlalchemy.orm import Session
from database.connection import get_db
from database import dao
from database.asyncpg_connection import get_asyncpg_db
from schema import (
LayerConfigAdd,
LayerStateCreate,
Mission,
LayerConfigModel,
LayerStateModel,
LayerTestRequest,
LayerTestResponseModel,
AncestralPromptAdd,
AncestralPromptModel,
LayerTestHistoryModel,
RabbitMQLogModel,
)
from base import ai
import logging
def get_db():
db: Session = SessionLocal()
try:
yield db
db.commit()
except:
db.rollback()
raise
finally:
db.close()
class LayerConfigAdd(LayerNameBase, BaseModel):
config_id: Optional[uuid.UUID] = None # if passed it uses this as the parent config id
prompts: Prompts
llm_model_parameters: OpenAiGPTChatParameters
class LayerConfigModel(LayerNameBase, BaseModel):
config_id: uuid.UUID
parent_config_id: Optional[uuid.UUID] = None
prompts: Prompts
llm_model_parameters: OpenAiGPTChatParameters
is_active: bool = True
created_at: datetime
updated_at: datetime
class Config:
from_attributes = True
def add_layer_config(
layer_config: LayerConfigAdd,
session: Session = Depends(get_db),
):
try:
with session as db:
results = dao.add_layer_config(db, **layer_config.model_dump())
return LayerConfigModel.model_validate(results)
except Exception as e:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=e.args[0]) | null |
13,693 | import asyncio
import asyncpg
from typing import Dict, List
import uuid
import json
import aio_pika
from fastapi import FastAPI, Response
from fastapi.middleware.cors import CORSMiddleware
from settings import settings
from base.amqp.connection import get_connection
from base.amqp.exchange import create_exchange
from fastapi import FastAPI, HTTPException, Depends, status, WebSocket, WebSocketDisconnect
from sqlalchemy.orm import Session
from database.connection import get_db
from database import dao
from database.asyncpg_connection import get_asyncpg_db
from schema import (
LayerConfigAdd,
LayerStateCreate,
Mission,
LayerConfigModel,
LayerStateModel,
LayerTestRequest,
LayerTestResponseModel,
AncestralPromptAdd,
AncestralPromptModel,
LayerTestHistoryModel,
RabbitMQLogModel,
)
from base import ai
import logging
def get_db():
db: Session = SessionLocal()
try:
yield db
db.commit()
except:
db.rollback()
raise
finally:
db.close()
class LayerConfigModel(LayerNameBase, BaseModel):
config_id: uuid.UUID
parent_config_id: Optional[uuid.UUID] = None
prompts: Prompts
llm_model_parameters: OpenAiGPTChatParameters
is_active: bool = True
created_at: datetime
updated_at: datetime
class Config:
from_attributes = True
def set_active_config(
config_id: uuid.UUID,
session: Session = Depends(get_db),
):
with session as db:
results = dao.set_active_layer_config(db=db, config_id=config_id)
return LayerConfigModel.model_validate(results) | null |
13,694 | import asyncio
import asyncpg
from typing import Dict, List
import uuid
import json
import aio_pika
from fastapi import FastAPI, Response
from fastapi.middleware.cors import CORSMiddleware
from settings import settings
from base.amqp.connection import get_connection
from base.amqp.exchange import create_exchange
from fastapi import FastAPI, HTTPException, Depends, status, WebSocket, WebSocketDisconnect
from sqlalchemy.orm import Session
from database.connection import get_db
from database import dao
from database.asyncpg_connection import get_asyncpg_db
from schema import (
LayerConfigAdd,
LayerStateCreate,
Mission,
LayerConfigModel,
LayerStateModel,
LayerTestRequest,
LayerTestResponseModel,
AncestralPromptAdd,
AncestralPromptModel,
LayerTestHistoryModel,
RabbitMQLogModel,
)
from base import ai
import logging
def get_db():
db: Session = SessionLocal()
try:
yield db
db.commit()
except:
db.rollback()
raise
finally:
db.close()
class LayerStateCreate(LayerNameBase, BaseModel):
process_messages: bool
class LayerStateModel(LayerNameBase,BaseModel):
layer_id: uuid.UUID
process_messages: bool
created_at: datetime
updated_at: datetime
class Config:
from_attributes = True
def create_layer_state(
layer_state: LayerStateCreate,
session: Session = Depends(get_db),
):
with session as db:
results = dao.create_layer_state(db, **layer_state.model_dump())
return LayerStateModel.model_validate(results) | null |
13,695 | import asyncio
import asyncpg
from typing import Dict, List
import uuid
import json
import aio_pika
from fastapi import FastAPI, Response
from fastapi.middleware.cors import CORSMiddleware
from settings import settings
from base.amqp.connection import get_connection
from base.amqp.exchange import create_exchange
from fastapi import FastAPI, HTTPException, Depends, status, WebSocket, WebSocketDisconnect
from sqlalchemy.orm import Session
from database.connection import get_db
from database import dao
from database.asyncpg_connection import get_asyncpg_db
from schema import (
LayerConfigAdd,
LayerStateCreate,
Mission,
LayerConfigModel,
LayerStateModel,
LayerTestRequest,
LayerTestResponseModel,
AncestralPromptAdd,
AncestralPromptModel,
LayerTestHistoryModel,
RabbitMQLogModel,
)
from base import ai
import logging
def get_db():
db: Session = SessionLocal()
try:
yield db
db.commit()
except:
db.rollback()
raise
finally:
db.close()
class LayerStateModel(LayerNameBase,BaseModel):
layer_id: uuid.UUID
process_messages: bool
created_at: datetime
updated_at: datetime
class Config:
from_attributes = True
def get_layer_state_by_name(
layer_name: str,
session: Session = Depends(get_db),
):
with session as db:
results = dao.get_layer_state_by_name(db, layer_name)
return LayerStateModel.model_validate(results) | null |
13,696 | import asyncio
import asyncpg
from typing import Dict, List
import uuid
import json
import aio_pika
from fastapi import FastAPI, Response
from fastapi.middleware.cors import CORSMiddleware
from settings import settings
from base.amqp.connection import get_connection
from base.amqp.exchange import create_exchange
from fastapi import FastAPI, HTTPException, Depends, status, WebSocket, WebSocketDisconnect
from sqlalchemy.orm import Session
from database.connection import get_db
from database import dao
from database.asyncpg_connection import get_asyncpg_db
from schema import (
LayerConfigAdd,
LayerStateCreate,
Mission,
LayerConfigModel,
LayerStateModel,
LayerTestRequest,
LayerTestResponseModel,
AncestralPromptAdd,
AncestralPromptModel,
LayerTestHistoryModel,
RabbitMQLogModel,
)
from base import ai
import logging
def get_db():
db: Session = SessionLocal()
try:
yield db
db.commit()
except:
db.rollback()
raise
finally:
db.close()
class LayerStateModel(LayerNameBase,BaseModel):
layer_id: uuid.UUID
process_messages: bool
created_at: datetime
updated_at: datetime
class Config:
from_attributes = True
def update_layer_state(
layer_name: str,
session: Session = Depends(get_db),
):
with session as db:
results = dao.update_layer_state(
db=db,
layer_name=layer_name,
process_messages=False,
)
return LayerStateModel.model_validate(results) | null |
13,697 | import asyncio
import asyncpg
from typing import Dict, List
import uuid
import json
import aio_pika
from fastapi import FastAPI, Response
from fastapi.middleware.cors import CORSMiddleware
from settings import settings
from base.amqp.connection import get_connection
from base.amqp.exchange import create_exchange
from fastapi import FastAPI, HTTPException, Depends, status, WebSocket, WebSocketDisconnect
from sqlalchemy.orm import Session
from database.connection import get_db
from database import dao
from database.asyncpg_connection import get_asyncpg_db
from schema import (
LayerConfigAdd,
LayerStateCreate,
Mission,
LayerConfigModel,
LayerStateModel,
LayerTestRequest,
LayerTestResponseModel,
AncestralPromptAdd,
AncestralPromptModel,
LayerTestHistoryModel,
RabbitMQLogModel,
)
from base import ai
import logging
def get_db():
db: Session = SessionLocal()
try:
yield db
db.commit()
except:
db.rollback()
raise
finally:
db.close()
class LayerStateModel(LayerNameBase,BaseModel):
layer_id: uuid.UUID
process_messages: bool
created_at: datetime
updated_at: datetime
class Config:
from_attributes = True
def update_layer_state(
layer_name: str,
session: Session = Depends(get_db),
):
with session as db:
results = dao.update_layer_state(
db=db,
layer_name=layer_name,
process_messages=True,
)
return LayerStateModel.model_validate(results) | null |
13,698 | from database.models import Base
from database.connection import engine, get_db
from sqlalchemy import text
import logging
logger = logging.getLogger(__name__)
Base = declarative_base()
Base.metadata.create_all(engine)
engine = create_engine(
settings.database_uri,
poolclass=NullPool,
)
def get_db():
db: Session = SessionLocal()
try:
yield db
db.commit()
except:
db.rollback()
raise
finally:
db.close()
def init_db():
Base.metadata.create_all(engine)
trigger_creation_sql = text(
"""
CREATE OR REPLACE TRIGGER after_insert_trigger
AFTER INSERT
ON public.rabbitmq_logs
FOR EACH ROW
EXECUTE FUNCTION public.notify_insert();
"""
)
function_creation_sql = text(
"""
CREATE OR REPLACE FUNCTION public.notify_insert()
RETURNS trigger
LANGUAGE 'plpgsql'
COST 100
VOLATILE NOT LEAKPROOF
AS $BODY$
DECLARE
row_json text;
BEGIN
row_json := row_to_json(NEW)::text;
PERFORM pg_notify('new_record', row_json);
RETURN NEW;
END;
$BODY$;
"""
)
with get_db() as session:
try:
session.execute(function_creation_sql)
except Exception as e:
logger.warning('failed to create function')
try:
session.execute(trigger_creation_sql)
except Exception as e:
logger.warning('failed to create trigger')
logger.info("init complete") | null |
13,699 | from database.connection import get_db
from database.models import RabbitMQLog
from settings import settings
from init import init_db
import logging
import pika
import time
logger = logging.getLogger(__name__)
def callback(ch, method, properties, body):
def get_channel():
settings = Settings(
role_name="Aspirational Layer",
control_bus_sub_queue="bus.control.L1",
data_bus_pub_queue="bus.data.L1",
control_bus_pub_queue="bus.control.L2",
data_bus_sub_queue="bus.data.L2",
debug = True,
)
def run():
logger.info("running logger...")
channel = get_channel()
channel.basic_consume(
queue=settings.logging_queue, on_message_callback=callback, auto_ack=False
)
channel.start_consuming() | null |
13,700 | from base.prompts import get_action_prompt, get_reasoning_input
from base.settings import Settings
import openai
from database.dao_models import LlmMessage, LayerConfigModel, Prompts, OpenAiGPTChatParameters
from typing import List
import re
import time
from datetime import datetime, timezone
import time
import logging
def determine_none(input_text):
match = re.search(r"\[Message\]\n(none)", input_text)
if match:
return "none"
return input_text | null |
13,701 | from .models import LayerConfig, LayerState, RabbitMQLog, AncestralPrompt, TestRun
from sqlalchemy.orm import Session
from sqlalchemy import desc
import uuid
from typing import Optional, List, Dict, Any
class TestRun(Base):
__tablename__ = 'test_run'
test_run_id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, unique=True, nullable=False)
input = Column(String, nullable=False)
layer_name = Column(String, nullable=False)
prompts = Column(JSON, nullable=False)
source_bus = Column(String, nullable=False)
llm_messages = Column(JSON, nullable=False)
llm_model_parameters = Column(JSON, nullable=False)
reasoning_result = Column(Text, nullable=False)
data_bus_action = Column(Text, nullable=False)
control_bus_action = Column(Text, nullable=False)
created_at = Column(DateTime, default=datetime.utcnow)
ancestral_prompt_id = Column(UUID(as_uuid=True), ForeignKey('ancestral_prompt.ancestral_prompt_id'), nullable=True)
ancestral_prompt = relationship("AncestralPrompt", back_populates="test_runs")
def store_test_results(
db: Session,
input: str,
layer_name: str,
prompts: Dict[str, str],
source_bus: str,
llm_messages: List[Dict[str, str]],
llm_model_parameters: Dict[str, Any],
reasoning_result: str,
data_bus_action: str,
control_bus_action: str,
ancestral_prompt_id: uuid.UUID,
):
new_test_run = TestRun(
input=input,
layer_name=layer_name,
prompts=prompts,
source_bus=source_bus,
llm_messages=llm_messages,
llm_model_parameters=llm_model_parameters,
reasoning_result=reasoning_result,
data_bus_action=data_bus_action,
control_bus_action=control_bus_action,
ancestral_prompt_id=ancestral_prompt_id,
)
db.add(new_test_run)
db.commit()
db.refresh(new_test_run)
return new_test_run | null |
13,702 | from .models import LayerConfig, LayerState, RabbitMQLog, AncestralPrompt, TestRun
from sqlalchemy.orm import Session
from sqlalchemy import desc
import uuid
from typing import Optional, List, Dict, Any
class AncestralPrompt(Base):
AncestralPrompt.test_runs = relationship("TestRun", order_by=TestRun.test_run_id, back_populates="ancestral_prompt")
def add_ancestral_prompt(
db: Session,
ancestral_prompt_id: Optional[uuid.UUID],
prompt: str,
is_active: Optional[bool] = False,
):
new_prompt = None
current_prompt = None
if ancestral_prompt_id:
current_prompt = db.query(AncestralPrompt).filter_by(ancestral_prompt_id=ancestral_prompt_id).first()
if not current_prompt:
new_prompt = AncestralPrompt(prompt=prompt)
else:
new_prompt = AncestralPrompt(
parent_ancestral_prompt_id=current_prompt.ancestral_prompt_id,
prompt=prompt,
)
if is_active:
db.query(AncestralPrompt).update({AncestralPrompt.is_active: False})
new_prompt.is_active = is_active
db.add(new_prompt)
db.commit()
db.refresh(new_prompt)
return new_prompt | null |
13,703 | from .models import LayerConfig, LayerState, RabbitMQLog, AncestralPrompt, TestRun
from sqlalchemy.orm import Session
from sqlalchemy import desc
import uuid
from typing import Optional, List, Dict, Any
class AncestralPrompt(Base):
__tablename__ = 'ancestral_prompt'
ancestral_prompt_id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, unique=True, nullable=False)
parent_ancestral_prompt_id = Column(UUID(as_uuid=True), nullable=True)
prompt = Column(Text)
is_active = Column(Boolean, default=True)
created_at = Column(DateTime, default=datetime.utcnow)
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
AncestralPrompt.test_runs = relationship("TestRun", order_by=TestRun.test_run_id, back_populates="ancestral_prompt")
def get_ancestral_prompt_by_id(
db: Session,
ancestral_prompt_id: uuid.UUID
):
db_prompt = db.query(AncestralPrompt).filter_by(ancestral_prompt_id=ancestral_prompt_id).first()
return db_prompt | null |
13,704 | from .models import LayerConfig, LayerState, RabbitMQLog, AncestralPrompt, TestRun
from sqlalchemy.orm import Session
from sqlalchemy import desc
import uuid
from typing import Optional, List, Dict, Any
class AncestralPrompt(Base):
__tablename__ = 'ancestral_prompt'
ancestral_prompt_id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, unique=True, nullable=False)
parent_ancestral_prompt_id = Column(UUID(as_uuid=True), nullable=True)
prompt = Column(Text)
is_active = Column(Boolean, default=True)
created_at = Column(DateTime, default=datetime.utcnow)
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
AncestralPrompt.test_runs = relationship("TestRun", order_by=TestRun.test_run_id, back_populates="ancestral_prompt")
def set_active_ancestral_prompt(
db: Session,
ancestral_prompt_id: uuid.UUID
):
db_prompt = db.query(AncestralPrompt).filter_by(ancestral_prompt_id=ancestral_prompt_id).first()
if db_prompt:
db.query(AncestralPrompt).update({AncestralPrompt.is_active: False})
db_prompt.is_active = True
db.add(db_prompt)
db.commit()
db.refresh(db_prompt)
return db_prompt | null |
13,705 | from .models import LayerConfig, LayerState, RabbitMQLog, AncestralPrompt, TestRun
from sqlalchemy.orm import Session
from sqlalchemy import desc
import uuid
from typing import Optional, List, Dict, Any
class AncestralPrompt(Base):
__tablename__ = 'ancestral_prompt'
ancestral_prompt_id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, unique=True, nullable=False)
parent_ancestral_prompt_id = Column(UUID(as_uuid=True), nullable=True)
prompt = Column(Text)
is_active = Column(Boolean, default=True)
created_at = Column(DateTime, default=datetime.utcnow)
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
AncestralPrompt.test_runs = relationship("TestRun", order_by=TestRun.test_run_id, back_populates="ancestral_prompt")
def get_ancestral_prompt(db: Session, ancestral_prompt_id: uuid.UUID):
db_prompt = db.query(AncestralPrompt).filter_by(ancestral_prompt_id=ancestral_prompt_id).first()
return db_prompt | null |
13,706 | from .models import LayerConfig, LayerState, RabbitMQLog, AncestralPrompt, TestRun
from sqlalchemy.orm import Session
from sqlalchemy import desc
import uuid
from typing import Optional, List, Dict, Any
class RabbitMQLog(Base):
__tablename__ = "rabbitmq_logs"
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, unique=True, nullable=False)
message_content = Column(Text)
queue = Column(String(255))
# Headers manually added to the RabbitMQ message log
source_bus = Column(String(50))
parent_message_id = Column(UUID(as_uuid=True))
destination_bus = Column(String(50))
layer_name = Column(String(50))
llm_messages = Column(JSON)
config_id = Column(UUID(as_uuid=True), ForeignKey('layer_config.config_id'))
input = Column(Text)
reasoning = Column(Text)
# Properties from the message's properties
content_type = Column(String(50))
content_encoding = Column(String(50))
delivery_mode = Column(Integer)
priority = Column(Integer)
correlation_id = Column(String(255))
reply_to = Column(String(255))
expiration = Column(String(50))
message_id = Column(String(255))
type = Column(String(50))
user_id = Column(String(50))
app_id = Column(String(50))
cluster_id = Column(String(255))
def from_message(cls, method, properties, body):
headers = properties.headers or {}
# Deserialize Memory
deserialized_llm_messages = None
try:
if headers.get('llm_messages'):
llm_messages = headers.get('llm_messages')
deserialized_llm_messages = json.loads(llm_messages)
except:
print("Error decoding JSON for llm_messages:", llm_messages)
log_entry = cls(
queue=method.routing_key,
message_content=body.decode(),
# Message Properties
content_type=properties.content_type,
content_encoding=properties.content_encoding,
delivery_mode=properties.delivery_mode,
priority=properties.priority,
correlation_id=properties.correlation_id,
reply_to=properties.reply_to,
expiration=properties.expiration,
message_id=properties.message_id,
type=properties.type,
user_id=properties.user_id,
app_id=properties.app_id,
cluster_id=properties.cluster_id,
# Extracting and assigning header fields
source_bus=headers.get('source_bus'),
parent_message_id=headers.get('parent_message_id'),
destination_bus=headers.get('destination_bus'),
layer_name=headers.get('layer_name'),
llm_messages=deserialized_llm_messages,
config_id=uuid.UUID(headers.get('config_id')) if headers.get('config_id') else None,
input=headers.get('input'),
reasoning=headers.get('reasoning'),
)
return log_entry
layer_config = relationship("LayerConfig", back_populates="rabbitmq_logs")
class LayerConfig(Base):
__tablename__ = 'layer_config'
config_id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, unique=True, nullable=False)
parent_config_id = Column(UUID(as_uuid=True), nullable=True)
layer_name = Column(String, nullable=False)
prompts = Column(JSON, nullable=False)
llm_model_parameters = Column(JSON, nullable=False)
is_active = Column(Boolean, default=True)
created_at = Column(DateTime, default=datetime.utcnow)
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
rabbitmq_logs = relationship("RabbitMQLog", back_populates="layer_config")
def get_layer_logs(db: Session, layer_name: str):
logs_and_config = (
db.query(RabbitMQLog, LayerConfig)
.join(LayerConfig, RabbitMQLog.config_id == LayerConfig.config_id)
.filter(LayerConfig.layer_name == layer_name)
.all()
)
if not logs_and_config:
raise ValueError("No logs found for layer_name: {}".format(layer_name))
return logs_and_config | null |
13,707 | from .models import LayerConfig, LayerState, RabbitMQLog, AncestralPrompt, TestRun
from sqlalchemy.orm import Session
from sqlalchemy import desc
import uuid
from typing import Optional, List, Dict, Any
def create_layer_state(db: Session, layer_name: str, process_messages: bool = False):
db_layer_state = LayerState(layer_name=layer_name, process_messages=process_messages)
db.add(db_layer_state)
db.commit()
db.refresh(db_layer_state)
return db_layer_state
class LayerState(Base):
__tablename__ = 'layer_state'
layer_id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, unique=True, nullable=False)
layer_name = Column(String, nullable=False)
process_messages = Column(Boolean, default=False)
created_at = Column(DateTime, default=datetime.utcnow)
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
class LayerConfig(Base):
__tablename__ = 'layer_config'
config_id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, unique=True, nullable=False)
parent_config_id = Column(UUID(as_uuid=True), nullable=True)
layer_name = Column(String, nullable=False)
prompts = Column(JSON, nullable=False)
llm_model_parameters = Column(JSON, nullable=False)
is_active = Column(Boolean, default=True)
created_at = Column(DateTime, default=datetime.utcnow)
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
rabbitmq_logs = relationship("RabbitMQLog", back_populates="layer_config")
def add_layer_config(
db: Session,
config_id: Optional[uuid.UUID],
layer_name: str,
prompts,
llm_model_parameters,
):
layer_state = db.query(LayerState).filter_by(layer_name=layer_name).first()
if not layer_state:
layer_state = create_layer_state(
db=db,
layer_name=layer_name,
process_messages=False,
)
db.query(LayerConfig).filter_by(
layer_name=layer_name
).update({LayerConfig.is_active: False})
# Ensure the specific config is deactivated
current_config = None
if config_id:
current_config = (
db.query(LayerConfig)
.filter_by(config_id=config_id)
.first()
)
if not current_config:
new_config = LayerConfig(
layer_name=layer_name,
prompts=prompts,
llm_model_parameters=llm_model_parameters,
is_active=True
)
else:
new_config = LayerConfig(
parent_config_id=current_config.config_id,
layer_name=layer_name,
prompts=prompts,
llm_model_parameters=llm_model_parameters,
is_active=True
)
db.add(new_config)
db.commit()
db.refresh(new_config)
return new_config | null |
13,708 | from .models import LayerConfig, LayerState, RabbitMQLog, AncestralPrompt, TestRun
from sqlalchemy.orm import Session
from sqlalchemy import desc
import uuid
from typing import Optional, List, Dict, Any
class LayerConfig(Base):
def get_all_layer_config(db: Session, layer_name: str):
return (
db.query(LayerConfig)
.filter_by(layer_name=layer_name)
.order_by(
desc(LayerConfig.is_active),
desc(LayerConfig.updated_at)
)
.all()
) | null |
13,709 | from .models import LayerConfig, LayerState, RabbitMQLog, AncestralPrompt, TestRun
from sqlalchemy.orm import Session
from sqlalchemy import desc
import uuid
from typing import Optional, List, Dict, Any
class LayerConfig(Base):
__tablename__ = 'layer_config'
config_id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, unique=True, nullable=False)
parent_config_id = Column(UUID(as_uuid=True), nullable=True)
layer_name = Column(String, nullable=False)
prompts = Column(JSON, nullable=False)
llm_model_parameters = Column(JSON, nullable=False)
is_active = Column(Boolean, default=True)
created_at = Column(DateTime, default=datetime.utcnow)
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
rabbitmq_logs = relationship("RabbitMQLog", back_populates="layer_config")
def get_layer_config(db: Session, layer_name: str):
return (
db.query(LayerConfig)
.filter_by(layer_name=layer_name)
.filter_by(is_active=True)
.first()
) | null |
13,710 | from .models import LayerConfig, LayerState, RabbitMQLog, AncestralPrompt, TestRun
from sqlalchemy.orm import Session
from sqlalchemy import desc
import uuid
from typing import Optional, List, Dict, Any
class LayerState(Base):
__tablename__ = 'layer_state'
layer_id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, unique=True, nullable=False)
layer_name = Column(String, nullable=False)
process_messages = Column(Boolean, default=False)
created_at = Column(DateTime, default=datetime.utcnow)
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
def get_layer_state_by_name(db: Session, layer_name: str):
layer_state = db.query(LayerState).filter(LayerState.layer_name == layer_name).first()
if not layer_state:
layer_state = LayerState(layer_name=layer_name)
db.add(layer_state)
db.commit()
return layer_state | null |
13,711 | from .models import LayerConfig, LayerState, RabbitMQLog, AncestralPrompt, TestRun
from sqlalchemy.orm import Session
from sqlalchemy import desc
import uuid
from typing import Optional, List, Dict, Any
class LayerState(Base):
__tablename__ = 'layer_state'
layer_id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, unique=True, nullable=False)
layer_name = Column(String, nullable=False)
process_messages = Column(Boolean, default=False)
created_at = Column(DateTime, default=datetime.utcnow)
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
def update_layer_state(
db: Session,
process_messages: bool,
layer_name: Optional[str] = None
):
if layer_name is not None:
db_layer_state = db.query(LayerState).filter(LayerState.layer_name == layer_name).first()
else:
raise ValueError("Layer_name must be provided.")
if not db_layer_state:
db_layer_state = LayerState(layer_name=layer_name)
db.add(db_layer_state)
db.commit()
db_layer_state.process_messages = process_messages
db.commit()
db.refresh(db_layer_state)
return db_layer_state | null |
13,713 | import json
import os
from dotenv import load_dotenv
def get_environment_variable(name):
value = os.getenv(name)
if value is None or value.strip() == "":
raise EnvironmentError(f"{name} environment variable not set! Check your .env file.")
return value | null |
13,715 | import asyncio
from dotenv import load_dotenv
from ace.ace_system import AceSystem
from channels.discord.discord_bot import DiscordBot
from channels.web.fastapi_app import FastApiApp
from llm.gpt import GPT
from media.giphy_finder import GiphyFinder
from memory.weaviate_memory_manager import WeaviateMemoryManager
from util import get_environment_variable
class AceSystem:
def __init__(self, llm: GPT, model: str, memory_manager: WeaviateMemoryManager, serpapi_key: str):
self.northbound_bus = Bus('northbound')
self.southbound_bus = Bus('southbound')
self.l1_aspirational_layer: L1AspirationalLayer = L1AspirationalLayer()
self.l2_global_strategy_layer: L2GlobalStrategyLayer = L2GlobalStrategyLayer(
llm,
model,
memory_manager,
self.l1_aspirational_layer
)
self.l3_agent: L3AgentLayer = L3AgentLayer(
llm,
model,
memory_manager,
serpapi_key
)
self.layers = [
self.l1_aspirational_layer,
self.l3_agent
]
def get_layer(self, layer_id: str):
for layer in self.layers:
if layer.get_id() == layer_id:
return layer
return None
def get_layers(self):
return self.layers
async def start(self):
# This would be the place for things like this:
# self.northbound_bus.subscribe(self.l1_aspirational_layer.on_northbound_message)
pass
class DiscordBot:
def __init__(self, bot_token, bot_name, ace_system: AceSystem, media_generators: [MediaGenerator]):
intents = discord.Intents.default()
intents.message_content = True
self.client = discord.Client(intents=intents)
self.bot_token = bot_token
self.bot_name = bot_name.lower()
self.register_events()
self.ace_system = ace_system
self.media_generators = media_generators
def register_events(self):
async def on_ready():
print(f'We have logged in to discord as {self.client.user}')
async def on_message(message):
await self.process_message(message)
async def process_message(self, message):
# Check if the message is from an allowed channel
if message.channel.name not in ["bot-testing", "team5-stacey", "chat1"]:
return
if self.is_message_from_me(message):
return
print(f"Got discord message from {message.author}: {message.content}")
print(pprint.pformat(message.author))
discord_communication_channel = DiscordCommunicationChannel(
self.client, message.channel, message, self.media_generators
)
try:
await self.ace_system.l3_agent.process_incoming_user_message(discord_communication_channel)
except Exception as e:
print("Damn! Something went wrong!", e)
traceback_str = traceback.format_exc() # Get the string representation of the traceback
print("Traceback:", traceback_str)
await message.channel.send(f"Damn! Something went wrong!: {str(e)}")
def is_message_from_me(self, message):
return message.author == self.client.user
async def start(self):
return await self.client.start(self.bot_token)
class FastApiApp:
def __init__(self, ace_system, media_generators: [MediaGenerator], llm: GPT):
self.app = FastAPI()
self.ace = ace_system
self.media_generators = media_generators
self.layer_connection_managers = {
layer.get_id(): WebSocketConnectionManager() for layer in ace_system.get_layers()
}
self.bus_connection_managers = {
'northbound': WebSocketConnectionManager(),
'southbound': WebSocketConnectionManager()
}
self.chatConnectionManager = WebSocketConnectionManager()
self.llmConnectionManager = WebSocketConnectionManager()
self.app.add_exception_handler(Exception, self.custom_exception_handler)
# Setup CORS
self.app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
self.setup_routes()
self.llm = llm
# noinspection PyUnusedLocal
async def custom_exception_handler(self, request: Request, exc: Exception):
"""
Custom exception handler that logs the stack trace and returns a JSON response.
"""
print("custom_exception_handler called")
traceback_str = traceback.format_exc()
print(traceback_str)
return JSONResponse(content={"error": str(exc), "traceback": traceback_str}, status_code=500)
async def llm_completion_listener(self, completion: ChatCompletion):
await self.llmConnectionManager.send_message(completion)
def setup_routes(self):
app = self.app
async def websocket_endpoint_layer(websocket: WebSocket, layer_id: str):
if layer_id not in self.layer_connection_managers:
raise HTTPException(status_code=404, detail="Layer not found")
print(f"websocket_endpoint_layer for {layer_id} called")
await self.layer_connection_managers[layer_id].connect(websocket)
async def websocket_endpoint_bus(websocket: WebSocket, bus_name: str):
print("Blaj")
if bus_name not in self.bus_connection_managers:
print(f"Bus not found: {bus_name}")
raise HTTPException(status_code=404, detail="Bus not found")
print(f"websocket_endpoint_bus for {bus_name} called")
await self.bus_connection_managers[bus_name].connect(websocket)
async def websocket_endpoint_chat(websocket: WebSocket):
print("websocket_endpoint_chat called")
await self.chatConnectionManager.connect(websocket)
async def websocket_endpoint_llmlog(websocket: WebSocket):
print("websocket_endpoint_llmlog called")
await self.llmConnectionManager.connect(websocket)
# noinspection PyUnusedLocal
async def custom_exception_handler(request: Request, exc: Exception):
"""
Custom exception handler that logs the stack trace and returns a JSON response.
"""
traceback_str = traceback.format_exc()
print(traceback_str)
return JSONResponse(content={"error": str(exc), "traceback": traceback_str}, status_code=500)
async def chat(request: Request):
data = await request.json()
messages: [ChatMessage] = data.get('messages', [])
communication_channel = WebCommunicationChannel(messages, self.chatConnectionManager, self.media_generators)
try:
await self.ace.l3_agent.process_incoming_user_message(communication_channel)
return JSONResponse(content={"success": True}, status_code=200)
except Exception as e:
print("Damn, something went wrong while processing incoming user message!")
traceback_str = traceback.format_exc()
print(traceback_str)
return create_chat_message("Stacey", f"Damn! Something went wrong: {str(e)}")
async def chat_get(message: str):
"""
For testing purposes. Lets you send a single chat message and see the response (if any)
"""
if not message:
raise HTTPException(status_code=400, detail="message parameter is required")
messages = [create_chat_message("api-user", message)]
communication_channel = WebCommunicationChannel(messages, self.chatConnectionManager, self.media_generators)
try:
await self.ace.l3_agent.process_incoming_user_message(communication_channel)
return "Message sent to Stacey"
except Exception as e:
traceback_str = traceback.format_exc()
print(traceback_str)
return JSONResponse(content={"error": str(e), "traceback": traceback_str}, status_code=400)
async def get_llm_completions():
return self.llm.get_completion_log()
async def view_bus(name: str):
if name == 'northbound':
return self.ace.northbound_bus.messages()
elif name == 'southbound':
return self.ace.southbound_bus.messages()
else:
raise HTTPException(status_code=400, detail="Invalid bus name. Choose 'northbound' or 'southbound'.")
async def get_layer_state(layer_id: str):
if layer_id not in self.layer_connection_managers:
raise HTTPException(status_code=404, detail="Layer not found")
layer = self.ace.get_layer(layer_id)
if not layer:
raise HTTPException(status_code=404, detail="Layer not found: " + layer_id)
layer_state: LayerState = layer.get_layer_state() # assuming get_current_state() is a method
return layer_state
async def publish_message(request: Request):
print("publish_message called")
data = await request.json()
print("data: " + str(data))
sender = data.get('sender')
message = data.get('message')
bus_name = data.get('bus')
if not sender or not message or not bus_name:
print("sender, message, and bus are required fields")
raise HTTPException(status_code=400, detail="sender, message, and bus are required fields")
if bus_name == 'northbound':
bus = self.ace.northbound_bus
elif bus_name == 'southbound':
bus = self.ace.southbound_bus
else:
raise HTTPException(status_code=400, detail="Invalid bus name. Choose 'northbound' or 'southbound'.")
await bus.publish(sender, message)
return {"success": True, "message": "Message published successfully"}
async def clear_messages(request: Request):
data = await request.json()
bus_name = data.get('bus')
if not bus_name:
raise HTTPException(status_code=400, detail="'bus' is a required field")
if bus_name == 'northbound':
bus = self.ace.northbound_bus
elif bus_name == 'southbound':
bus = self.ace.southbound_bus
else:
raise HTTPException(status_code=400, detail="Invalid bus name. Choose 'northbound' or 'southbound'.")
bus.clear_messages()
return {"success": True, "message": "Messages cleared successfully"}
def root():
return ('<html>Hi! Stacey here. Yes, the backend is up and running! '
'<a href="chat?message=hi">/chat?message=hi</a></html>')
def setup_listeners(self):
for bus in [self.ace.northbound_bus, self.ace.southbound_bus]:
bus.subscribe(self.create_bus_listener(bus))
for layer in self.ace.get_layers():
layer.add_layer_state_listener(self.create_layer_state_listener(layer))
self.llm.add_completion_listener(self.llm_completion_listener)
def create_bus_listener(self, bus):
async def listener(sender, message):
try:
print(f"flask_app detected message on {bus.name} from {sender}: {message}")
await self.bus_connection_managers[bus.name].send_message({
'eventType': 'busMessage',
'data': {
'bus': bus.name,
'sender': sender,
'message': message
}
})
except Exception as e:
print(f"Error in bus listener: {e}")
return listener
def create_layer_state_listener(self, layer):
async def listener(layer_state: LayerState):
try:
print(f"flask_app detected state change in layer {layer.get_id()}: {layer_state}")
await self.layer_connection_managers[layer.get_id()].send_message(layer_state)
except Exception as e:
print(f"Error in layer status listener: {e}")
return listener
async def run(self):
self.setup_listeners()
config = uvicorn.Config(app=self.app, host="localhost", port=5000)
server = uvicorn.Server(config)
return await server.serve()
class GPT:
def __init__(self):
self.log = Logger(self.__class__.__name__)
self.client = OpenAI()
def create_conversation_completion(
self, model, conversation: List[GptMessage]
) -> GptMessage:
# print("_create_conversation_completion called for conversation: " + str(conversation))
# openai.api_key = self.api_key
chat_completion = self.client.chat.completions.create(
model=model, messages=conversation
)
response = chat_completion.choices[0].message
return response
def create_image(self, prompt, size="256x256") -> str:
self.log.debug("Generating image for prompt: " + prompt)
openai.api_key = self.api_key
result = openai.Image.create(prompt=prompt, n=1, size=size)
image_url = result.data[0].url
self.log.debug(
".... finished generating image for prompt" + prompt + ":\n" + image_url
)
return image_url
class GiphyFinder:
def __init__(self, giphy_api_key):
self.giphy_api_key = giphy_api_key
async def get_giphy_url(self, query: str):
url = f'https://api.giphy.com/v1/gifs/translate?api_key={self.giphy_api_key}&s={query}&limit=1'
async with httpx.AsyncClient() as client:
response = await client.get(url)
if response.status_code == 200:
data = response.json()
gif_url = data['data']['images']['original']['url']
return gif_url
else:
print(f'Failed to retrieve GIF: {response.status_code}')
return None
class WeaviateMemoryManager:
def __init__(self, weaviate_url, openai_api_key):
self.client = weaviate.Client(
url=weaviate_url,
additional_headers={
"X-OpenAI-Api-Key": openai_api_key,
}
)
self.create_weaviate_class_if_doesnt_already_exist(data_class_definition)
def save_memory(self, memory: Memory):
self.client.data_object.create(
memory,
data_class_name
)
def get_all_memories(self) -> list[Memory]:
"""
Ordered by relevance
"""
result = (
self.client.query
.get("Memory", ["time_utc", "content"])
.do()
)
return result["data"]["Get"][data_class_name]
def remove_closest_memory(self, search_text, max_distance) -> Optional[Memory]:
result = (
self.client.query
.get("Memory", ["time_utc", "content"])
.with_near_text({
"concepts": search_text,
"distance": max_distance
})
.with_limit(1)
.with_additional(["distance", "id"])
.do()
)
print("weaviate query result: " + str(result))
memories = result["data"]["Get"][data_class_name]
if not memories:
return None # No matching memory found
closest_memory = memories[0]
uuid_to_delete = closest_memory['_additional']['id']
self.client.data_object.delete(
uuid=uuid_to_delete,
class_name=data_class_name,
)
return closest_memory
def find_relevant_memories(self, search_text, limit) -> list[Memory]:
"""
Ordered by relevance
"""
result = (
self.client.query
.get("Memory", ["time_utc", "content"])
.with_near_text({
"concepts": search_text
})
.with_limit(limit)
.with_additional(["distance"])
.do()
)
print("weaviate query result: " + str(result))
return result["data"]["Get"][data_class_name]
def create_weaviate_class_if_doesnt_already_exist(self, class_definition):
existing_classes = self.client.schema.get()
if not any(class_info['class'] == data_class_name for class_info in existing_classes['classes']):
self.client.schema.create_class(class_definition)
print(f"Weaviate schema {data_class_name} created successfully")
def get_environment_variable(name):
value = os.getenv(name)
if value is None or value.strip() == "":
raise EnvironmentError(
f"{name} environment variable not set! Check your .env file."
)
return value
async def stacey_main(start_discord, start_web):
load_dotenv()
openai_api_key = get_environment_variable('OPENAI_API_KEY')
llm = GPT(openai_api_key)
weaviate_url = get_environment_variable('WEAVIATE_URL')
memory_manager = WeaviateMemoryManager(weaviate_url, openai_api_key)
serpapi_key = get_environment_variable('SERPAPI_KEY')
ace = AceSystem(llm, get_environment_variable("DEFAULT_MODEL"), memory_manager, serpapi_key)
giphy = GiphyFinder(get_environment_variable('GIPHY_API_KEY'))
media_generators = [
{"keyword": "IMAGE", "generator_function": llm.create_image},
{"keyword": "GIF", "generator_function": giphy.get_giphy_url}
]
await ace.start()
discord_task = asyncio.create_task(asyncio.sleep(0))
if start_discord:
discord_bot_token = get_environment_variable('DISCORD_BOT_TOKEN')
discord_bot = DiscordBot(discord_bot_token, "stacey", ace, media_generators)
print('Starting discord bot')
discord_task = asyncio.create_task(discord_bot.start())
print('Started discord bot')
web_task = asyncio.create_task(asyncio.sleep(0))
if start_web:
web_backend = FastApiApp(ace, media_generators, llm)
print('Starting web backend')
web_task = asyncio.create_task(web_backend.run())
print('Started web backend')
await asyncio.gather(discord_task, web_task) | null |
13,716 | import asyncio
import re
from typing import TypedDict, Callable, Awaitable, Union
class MediaGenerator(TypedDict):
keyword: str
generator_function: Callable[[str], Awaitable[Union[str, None]]]
async def replace_media_prompt_with_media_url_formatted_as_markdown(media_generators: [MediaGenerator], message):
for generator in media_generators:
keyword = re.escape(generator['keyword']) # Escape the keyword to ensure it's safe for regex
pattern = re.compile(f"{keyword}\\[([^\\]]+)]") # Create a regex pattern for this media generator
matches = pattern.findall(message)
coroutines = [generator['generator_function'](match) for match in matches]
results = await asyncio.gather(*coroutines)
for match, media_url in zip(matches, results):
try:
replacement = f""
message = message.replace(f"{generator['keyword']}[{match}]", replacement)
except Exception as exc:
print(f'Generated an exception: {exc}')
return message | null |
13,717 | import asyncio
import re
from typing import TypedDict, Callable, Awaitable, Union
class MediaGenerator(TypedDict):
keyword: str
generator_function: Callable[[str], Awaitable[Union[str, None]]]
async def split_message_by_media(media_generators: [MediaGenerator], message):
segments = []
last_end = 0 # Initialize last_end outside the loop
for generator in media_generators:
keyword = re.escape(generator['keyword'])
pattern = re.compile(f"{keyword}\\[([^\\]]+)]")
coroutines = []
positions = []
for match in pattern.finditer(message):
media_prompt = match.group(1)
text_segment = message[last_end:match.start()].strip()
if text_segment:
segments.append(text_segment)
segments.append(None)
coroutines.append(generator['generator_function'](media_prompt))
positions.append(len(segments) - 1)
last_end = match.end() # Update last_end for each match
results = await asyncio.gather(*coroutines)
for result, position in zip(results, positions):
try:
segments[position] = result
except Exception as exc:
print(f'Generated an exception: {exc}')
final_text_segment = message[last_end:].strip() # Move this line outside the loop
if final_text_segment: # Check and append final_text_segment outside the loop
segments.append(final_text_segment)
return segments | null |
13,718 | from datetime import datetime, timezone
from typing import TypedDict, Dict
class ChatMessage(TypedDict):
sender: str
content: str
time_utc: str # // formatted like 2023-01-30T13:45:00Z
def create_chat_message(sender: str, content: str) -> ChatMessage:
now_utc = datetime.now(timezone.utc)
formatted_time = now_utc.strftime('%Y-%m-%dT%H:%M:%SZ')
return {"sender": sender, "content": content, "time_utc": formatted_time} | null |
13,719 | from datetime import datetime, timezone
from typing import TypedDict, Dict
class ChatMessage(TypedDict):
sender: str
content: str
time_utc: str # // formatted like 2023-01-30T13:45:00Z
def stringify_chat_message(chat_message: ChatMessage):
return f"<{chat_message['time_utc']}> [{chat_message['sender']}] {chat_message['content']}"
def stringify_chat_history(conversation: [ChatMessage]):
return "\n".join(f"- {stringify_chat_message(message)}" for message in conversation) | null |
13,720 | from datetime import datetime, timezone
from typing import TypedDict, Dict
class Memory(TypedDict):
time_utc: str
content: str
def create_memory(content: str) -> Memory:
return {
"time_utc": datetime.now(timezone.utc).isoformat(),
"content": content
} | null |
13,721 | import httpx
from bs4 import BeautifulSoup
from actions.action import Action
async def get_compressed_web_content(url) -> str:
async with httpx.AsyncClient() as client:
response = await client.get(url)
response.raise_for_status() # Raise HTTPError for bad responses (4xx and 5xx)
soup = BeautifulSoup(response.text, 'html.parser')
elements_to_remove = ['script', 'style', 'head', 'svg']
for script_or_style in soup(elements_to_remove):
script_or_style.extract()
for tag in soup.find_all(True):
tag.attrs = {}
return soup.prettify() | null |
13,722 | from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.tabbedpanel import TabbedPanel, TabbedPanelItem
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.button import Button
from kivy.lang import Builder
from kivy.uix.scrollview import ScrollView
from flask import Flask, request, jsonify
import threading
import requests
def layer_update():
data = request.json
layer_number = data.get('layer_number')
message = data.get('message', '')
kivy_app.update_label(layer_number, message)
return jsonify({"status": "received"}) | null |
13,723 | from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.tabbedpanel import TabbedPanel, TabbedPanelItem
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.button import Button
from kivy.lang import Builder
from kivy.uix.scrollview import ScrollView
from flask import Flask, request, jsonify
import threading
import requests
app = Flask(__name__)
def run_flask_app():
app.run(port=5000, use_reloader=False, threaded=True) | null |
13,724 | import os
import time
import importlib
from ace import util
from ace.logger import Logger
logger = Logger(os.path.basename(__file__))
def loader(resource_name):
try:
resource_class_name = util.snake_to_class(resource_name)
logger.debug(f"Converted resource_name to resource_class: {resource_class_name}")
module = importlib.import_module(f'ace.framework.resources.{resource_name}')
resource_class = getattr(module, resource_class_name)
logger.debug(f"Imported {resource_class_name} from ace.framework.resource.{resource_name}")
resource = resource_class()
logger.debug(f"Created an instance of {resource_class}")
logger.info(f"Calling start_resource method on the {resource_class} instance")
resource.start_resource()
logger.debug(f"Called start_resource method on the {resource_class} instance")
return True
except ImportError:
logger.error(f"Failed to import module ace.framework.resource.{resource_name}", exc_info=True)
except AttributeError:
logger.error(f"Failed to get class {resource_class} from module ace.framework.resource.{resource_name}", exc_info=True)
except Exception as e:
logger.error(f"An error occurred: {e}", exc_info=True) | null |
13,727 | import os
import sys
import inspect
import psutil
def snake_to_class(string):
parts = string.split("_")
return "".join(word.title() for word in parts) | null |
13,728 | import os
import sys
import inspect
import psutil
def get_system_resource_usage():
# CPU Load
cpu_load = psutil.cpu_percent(interval=1)
cpu_string = f"CPU: {cpu_load}%"
# Memory Details
memory_info = psutil.virtual_memory()
total_memory = memory_info.total / (1024 ** 3) # Convert to GB
free_memory = memory_info.available / (1024 ** 3) # Convert to GB
memory_string = f"Memory: {free_memory:.2f}/{total_memory:.2f} GB ({memory_info.percent}%)"
# Disk Details
disk_info = psutil.disk_usage('/')
total_disk = disk_info.total / (1024 ** 3) # Convert to GB
free_disk = disk_info.free / (1024 ** 3) # Convert to GB
disk_string = f"Disk: {free_disk:.2f}/{total_disk:.2f} GB ({100 - disk_info.percent}%)"
return f"{cpu_string} | {memory_string} | {disk_string}" | null |
13,732 | import logging
import os
from ace import constants
logging.basicConfig(level=logging.DEBUG)
def get_log_level(level_str):
level = logging.getLevelName(level_str)
if not isinstance(level, int):
raise ValueError(f'Invalid log level: {level_str}')
return level | null |
13,733 | from ace.settings import Settings
import aio_pika
from ace.logger import Logger
logger = Logger(__name__)
class Settings(BaseSettings):
name: str
label: str
amqp_host_name: str = (
os.getenv("ACE_RABBITMQ_HOSTNAME") or constants.DEFAULT_RABBITMQ_HOSTNAME
)
amqp_username: str = (
os.getenv("ACE_RABBITMQ_USERNAME") or constants.DEFAULT_RABBITMQ_USERNAME
)
amqp_password: str = (
os.getenv("ACE_RABBITMQ_PASSWORD") or constants.DEFAULT_RABBITMQ_PASSWORD
)
logging_queue: str = "logging"
resource_log_queue: str = "resource_log"
log_dir: str = "/var/log/ace"
system_integrity_queue: str = "system_integrity"
system_integrity_data_queue: str = "system_integrity_data"
debug_data_queue: str = "debug_data"
telemetry_subscribe_queue: str = "telemetry_subscribe"
telemetry_subscriptions: List[str] = []
layers: List[str] = [
"layer_1",
"layer_2",
"layer_3",
"layer_4",
"layer_5",
"layer_6",
]
other_resources: List[str] = [
"debug",
"telemetry_manager",
"logging",
"busses",
]
async def setup_exchange(settings: Settings, channel: aio_pika.Channel, queue_name: str, durable=True):
exchange_name = f"exchange.{queue_name}"
logger.debug(f"Setup exchange: {exchange_name}")
await channel.declare_exchange(exchange_name, aio_pika.ExchangeType.FANOUT)
queue = await channel.declare_queue(queue_name, durable=durable)
await queue.bind(exchange_name)
logger.debug(f"Bound {queue_name} to exchange {exchange_name}")
if settings.system_integrity_queue and queue_name != settings.system_integrity_data_queue:
system_integrity_queue = await channel.declare_queue(settings.system_integrity_queue, durable=True)
await system_integrity_queue.bind(exchange_name)
logger.debug(f"Bound {settings.system_integrity_queue} to exchange {exchange_name}")
if settings.logging_queue and queue_name != settings.resource_log_queue:
logging_queue = await channel.declare_queue(settings.logging_queue, durable=True)
await logging_queue.bind(exchange_name)
logger.debug(f"Bound {settings.logging_queue} to exchange {exchange_name}") | null |
13,734 | from ace.settings import Settings
import aio_pika
from ace.logger import Logger
logger = Logger(__name__)
class Settings(BaseSettings):
async def teardown_exchange(settings: Settings, channel: aio_pika.Channel, queue_name: str, durable=True):
exchange_name = f"exchange.{queue_name}"
logger.debug(f"Teardown exchange: {exchange_name}")
if settings.system_integrity_queue and queue_name != settings.system_integrity_data_queue:
system_integrity_queue = await channel.declare_queue(settings.system_integrity_queue, durable=True)
await system_integrity_queue.unbind(exchange_name)
await system_integrity_queue.delete(if_empty=False, if_unused=False)
logger.debug(f"Removed {settings.system_integrity_queue}")
if settings.logging_queue:
logging_queue = await channel.declare_queue(settings.logging_queue, durable=True)
await logging_queue.unbind(exchange_name)
await logging_queue.delete(if_empty=False, if_unused=False)
logger.debug(f"Removed {settings.logging_queue}")
queue = await channel.declare_queue(queue_name, durable=durable)
await queue.unbind(exchange_name)
await queue.delete(if_empty=False, if_unused=False)
logger.debug(f"Removed {queue_name}")
exchange = await channel.get_exchange(exchange_name)
await exchange.delete() | null |
13,735 | import asyncio
import aio_pika
from ace.settings import Settings
from ace.logger import Logger
logger = Logger(__name__)
class Settings(BaseSettings):
async def get_connection(settings: Settings,
loop=asyncio.get_event_loop(),
max_retries=5,
delay_factor=2,
heartbeat=600,
blocked_connection_timeout=300,
):
host = settings.amqp_host_name
username = settings.amqp_username
password = settings.amqp_password
connection = None
retries = 0
while retries < max_retries:
try:
connection = await aio_pika.connect_robust(
f"amqp://{username}:{password}@{host}",
heartbeat=heartbeat,
blocked_connection_timeout=blocked_connection_timeout,
)
logger.info(f"{settings.name} connection established...")
return connection
except (aio_pika.exceptions.AMQPConnectionError, aio_pika.exceptions.AMQPChannelError) as e:
print(f"Connection attempt {retries + 1} failed with error: {e}")
retries += 1
await asyncio.sleep(retries * delay_factor) # Exponential backoff
raise Exception(f"Failed to establish a connection and channel after maximum retries: {max_retries}.") | null |
13,736 | import requests
import json
import re
import openai
from time import time, sleep
from datetime import datetime
from halo import Halo
import textwrap
import yaml
def save_file(filepath, content):
with open(filepath, 'w', encoding='utf-8') as outfile:
outfile.write(content) | null |
13,737 | import requests
import json
import re
import openai
from time import time, sleep
from datetime import datetime
from halo import Halo
import textwrap
import yaml
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8', errors='ignore') as infile:
return infile.read() | null |
13,738 | import requests
import json
import re
import openai
from time import time, sleep
from datetime import datetime
from halo import Halo
import textwrap
import yaml
def send_message(bus, layer, message):
url = 'http://127.0.0.1:900/message'
headers = {'Content-Type': 'application/json'}
data = {'bus': bus, 'layer': layer, 'message': message}
response = requests.post(url, headers=headers, data=json.dumps(data))
if response.status_code == 200:
print('Message sent successfully')
else:
print('Failed to send message') | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.