id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
167,968 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.ops.deform_conv import DeformConv2d
import numpy as np
from einops import repeat
import timm
from pyiqa.utils.registry import ARCH_REGISTRY
from pyiqa.archs.arch_util import load_pretrained_network, to_2tuple
def get_attn_pad_mask(seq_q, seq_k, i_pad):
batch_size, len_q = seq_q.size()
batch_size, len_k = seq_k.size()
pad_attn_mask = seq_k.data.eq(i_pad)
pad_attn_mask= pad_attn_mask.unsqueeze(1).expand(batch_size, len_q, len_k)
return pad_attn_mask | null |
167,969 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.ops.deform_conv import DeformConv2d
import numpy as np
from einops import repeat
import timm
from pyiqa.utils.registry import ARCH_REGISTRY
from pyiqa.archs.arch_util import load_pretrained_network, to_2tuple
def get_attn_decoder_mask(seq):
subsequent_mask = torch.ones_like(seq).unsqueeze(-1).expand(seq.size(0), seq.size(1), seq.size(1))
subsequent_mask = subsequent_mask.triu(diagonal=1) # upper triangular part of a matrix(2-D)
return subsequent_mask | null |
167,970 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.ops.deform_conv import DeformConv2d
import numpy as np
from einops import repeat
import timm
from pyiqa.utils.registry import ARCH_REGISTRY
from pyiqa.archs.arch_util import load_pretrained_network, to_2tuple
to_2tuple = _ntuple(2)
def random_crop(x, y, crop_size, crop_num):
b, c, h, w = x.shape
ch, cw = to_2tuple(crop_size)
crops_x = []
crops_y = []
for i in range(crop_num):
sh = np.random.randint(0, h - ch)
sw = np.random.randint(0, w - cw)
crops_x.append(x[..., sh: sh + ch, sw: sw + cw])
crops_y.append(y[..., sh: sh + ch, sw: sw + cw])
crops_x = torch.stack(crops_x, dim=1)
crops_y = torch.stack(crops_y, dim=1)
return crops_x.reshape(b * crop_num, c, ch, cw), crops_y.reshape(b * crop_num, c, ch, cw) | null |
167,971 | import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import torchvision.transforms.functional as TF
import timm
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
from pyiqa.utils.registry import ARCH_REGISTRY
from pyiqa.archs.arch_util import dist_to_mos, load_pretrained_network, random_crop
import copy
from .clip_model import load
from .topiq_swin import create_swin
from facexlib.utils.face_restoration_helper import FaceRestoreHelper
import os
import warnings
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) | null |
167,972 | import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import torchvision.transforms.functional as TF
import timm
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
from pyiqa.utils.registry import ARCH_REGISTRY
from pyiqa.archs.arch_util import dist_to_mos, load_pretrained_network, random_crop
import copy
from .clip_model import load
from .topiq_swin import create_swin
from facexlib.utils.face_restoration_helper import FaceRestoreHelper
import os
import warnings
The provided code snippet includes necessary dependencies for implementing the `_get_activation_fn` function. Write a Python function `def _get_activation_fn(activation)` to solve the following problem:
Return an activation function given a string
Here is the function:
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.") | Return an activation function given a string |
167,973 | import torch
from torch.nn import functional as F
import numpy as np
from pyiqa.utils.color_util import to_y_channel
from pyiqa.utils.registry import ARCH_REGISTRY
def sp5_filters():
r'''Define spatial filters.
'''
filters = {}
filters['harmonics'] = np.array([1, 3, 5])
filters['mtx'] = (
np.array([[0.3333, 0.2887, 0.1667, 0.0000, -0.1667, -0.2887], [0.0000, 0.1667, 0.2887, 0.3333, 0.2887, 0.1667],
[0.3333, -0.0000, -0.3333, -0.0000, 0.3333,
-0.0000], [0.0000, 0.3333, 0.0000, -0.3333, 0.0000, 0.3333],
[0.3333, -0.2887, 0.1667, -0.0000, -0.1667, 0.2887],
[-0.0000, 0.1667, -0.2887, 0.3333, -0.2887, 0.1667]]))
filters['hi0filt'] = (
np.array([[
-0.00033429, -0.00113093, -0.00171484, -0.00133542, -0.00080639, -0.00133542, -0.00171484, -0.00113093,
-0.00033429
],
[
-0.00113093, -0.00350017, -0.00243812, 0.00631653, 0.01261227, 0.00631653, -0.00243812,
-0.00350017, -0.00113093
],
[
-0.00171484, -0.00243812, -0.00290081, -0.00673482, -0.00981051, -0.00673482, -0.00290081,
-0.00243812, -0.00171484
],
[
-0.00133542, 0.00631653, -0.00673482, -0.07027679, -0.11435863, -0.07027679, -0.00673482,
0.00631653, -0.00133542
],
[
-0.00080639, 0.01261227, -0.00981051, -0.11435863, 0.81380200, -0.11435863, -0.00981051,
0.01261227, -0.00080639
],
[
-0.00133542, 0.00631653, -0.00673482, -0.07027679, -0.11435863, -0.07027679, -0.00673482,
0.00631653, -0.00133542
],
[
-0.00171484, -0.00243812, -0.00290081, -0.00673482, -0.00981051, -0.00673482, -0.00290081,
-0.00243812, -0.00171484
],
[
-0.00113093, -0.00350017, -0.00243812, 0.00631653, 0.01261227, 0.00631653, -0.00243812,
-0.00350017, -0.00113093
],
[
-0.00033429, -0.00113093, -0.00171484, -0.00133542, -0.00080639, -0.00133542, -0.00171484,
-0.00113093, -0.00033429
]]))
filters['lo0filt'] = (
np.array([[0.00341614, -0.01551246, -0.03848215, -0.01551246, 0.00341614],
[-0.01551246, 0.05586982, 0.15925570, 0.05586982, -0.01551246],
[-0.03848215, 0.15925570, 0.40304148, 0.15925570, -0.03848215],
[-0.01551246, 0.05586982, 0.15925570, 0.05586982, -0.01551246],
[0.00341614, -0.01551246, -0.03848215, -0.01551246, 0.00341614]]))
filters['lofilt'] = (2 * np.array([[
0.00085404, -0.00244917, -0.00387812, -0.00944432, -0.00962054, -0.00944432, -0.00387812, -0.00244917,
0.00085404
], [
-0.00244917, -0.00523281, -0.00661117, 0.00410600, 0.01002988, 0.00410600, -0.00661117, -0.00523281, -0.00244917
], [
-0.00387812, -0.00661117, 0.01396746, 0.03277038, 0.03981393, 0.03277038, 0.01396746, -0.00661117, -0.00387812
], [
-0.00944432, 0.00410600, 0.03277038, 0.06426333, 0.08169618, 0.06426333, 0.03277038, 0.00410600, -0.00944432
], [
-0.00962054, 0.01002988, 0.03981393, 0.08169618, 0.10096540, 0.08169618, 0.03981393, 0.01002988, -0.00962054
], [
-0.00944432, 0.00410600, 0.03277038, 0.06426333, 0.08169618, 0.06426333, 0.03277038, 0.00410600, -0.00944432
], [-0.00387812, -0.00661117, 0.01396746, 0.03277038, 0.03981393, 0.03277038, 0.01396746, -0.00661117, -0.00387812],
[
-0.00244917, -0.00523281, -0.00661117, 0.00410600, 0.01002988, 0.00410600,
-0.00661117, -0.00523281, -0.00244917
],
[
0.00085404, -0.00244917, -0.00387812, -0.00944432, -0.00962054, -0.00944432,
-0.00387812, -0.00244917, 0.00085404
]]))
filters['bfilts'] = (
np.array([[
0.00277643, 0.00496194, 0.01026699, 0.01455399, 0.01026699, 0.00496194, 0.00277643, -0.00986904,
-0.00893064, 0.01189859, 0.02755155, 0.01189859, -0.00893064, -0.00986904, -0.01021852, -0.03075356,
-0.08226445, -0.11732297, -0.08226445, -0.03075356, -0.01021852, 0.00000000, 0.00000000, 0.00000000,
0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.01021852, 0.03075356, 0.08226445, 0.11732297, 0.08226445,
0.03075356, 0.01021852, 0.00986904, 0.00893064, -0.01189859, -0.02755155, -0.01189859, 0.00893064,
0.00986904, -0.00277643, -0.00496194, -0.01026699, -0.01455399, -0.01026699, -0.00496194, -0.00277643
],
[
-0.00343249, -0.00640815, -0.00073141, 0.01124321, 0.00182078, 0.00285723, 0.01166982,
-0.00358461, -0.01977507, -0.04084211, -0.00228219, 0.03930573, 0.01161195, 0.00128000,
0.01047717, 0.01486305, -0.04819057, -0.12227230, -0.05394139, 0.00853965, -0.00459034,
0.00790407, 0.04435647, 0.09454202, -0.00000000, -0.09454202, -0.04435647, -0.00790407,
0.00459034, -0.00853965, 0.05394139, 0.12227230, 0.04819057, -0.01486305, -0.01047717,
-0.00128000, -0.01161195, -0.03930573, 0.00228219, 0.04084211, 0.01977507, 0.00358461,
-0.01166982, -0.00285723, -0.00182078, -0.01124321, 0.00073141, 0.00640815, 0.00343249
],
[
0.00343249, 0.00358461, -0.01047717, -0.00790407, -0.00459034, 0.00128000, 0.01166982, 0.00640815,
0.01977507, -0.01486305, -0.04435647, 0.00853965, 0.01161195, 0.00285723, 0.00073141, 0.04084211,
0.04819057, -0.09454202, -0.05394139, 0.03930573, 0.00182078, -0.01124321, 0.00228219, 0.12227230,
-0.00000000, -0.12227230, -0.00228219, 0.01124321, -0.00182078, -0.03930573, 0.05394139,
0.09454202, -0.04819057, -0.04084211, -0.00073141, -0.00285723, -0.01161195, -0.00853965,
0.04435647, 0.01486305, -0.01977507, -0.00640815, -0.01166982, -0.00128000, 0.00459034,
0.00790407, 0.01047717, -0.00358461, -0.00343249
],
[
-0.00277643, 0.00986904, 0.01021852, -0.00000000, -0.01021852, -0.00986904, 0.00277643,
-0.00496194, 0.00893064, 0.03075356, -0.00000000, -0.03075356, -0.00893064, 0.00496194,
-0.01026699, -0.01189859, 0.08226445, -0.00000000, -0.08226445, 0.01189859, 0.01026699,
-0.01455399, -0.02755155, 0.11732297, -0.00000000, -0.11732297, 0.02755155, 0.01455399,
-0.01026699, -0.01189859, 0.08226445, -0.00000000, -0.08226445, 0.01189859, 0.01026699,
-0.00496194, 0.00893064, 0.03075356, -0.00000000, -0.03075356, -0.00893064, 0.00496194,
-0.00277643, 0.00986904, 0.01021852, -0.00000000, -0.01021852, -0.00986904, 0.00277643
],
[
-0.01166982, -0.00128000, 0.00459034, 0.00790407, 0.01047717, -0.00358461, -0.00343249,
-0.00285723, -0.01161195, -0.00853965, 0.04435647, 0.01486305, -0.01977507, -0.00640815,
-0.00182078, -0.03930573, 0.05394139, 0.09454202, -0.04819057, -0.04084211, -0.00073141,
-0.01124321, 0.00228219, 0.12227230, -0.00000000, -0.12227230, -0.00228219, 0.01124321,
0.00073141, 0.04084211, 0.04819057, -0.09454202, -0.05394139, 0.03930573, 0.00182078, 0.00640815,
0.01977507, -0.01486305, -0.04435647, 0.00853965, 0.01161195, 0.00285723, 0.00343249, 0.00358461,
-0.01047717, -0.00790407, -0.00459034, 0.00128000, 0.01166982
],
[
-0.01166982, -0.00285723, -0.00182078, -0.01124321, 0.00073141, 0.00640815, 0.00343249,
-0.00128000, -0.01161195, -0.03930573, 0.00228219, 0.04084211, 0.01977507, 0.00358461, 0.00459034,
-0.00853965, 0.05394139, 0.12227230, 0.04819057, -0.01486305, -0.01047717, 0.00790407, 0.04435647,
0.09454202, -0.00000000, -0.09454202, -0.04435647, -0.00790407, 0.01047717, 0.01486305,
-0.04819057, -0.12227230, -0.05394139, 0.00853965, -0.00459034, -0.00358461, -0.01977507,
-0.04084211, -0.00228219, 0.03930573, 0.01161195, 0.00128000, -0.00343249, -0.00640815,
-0.00073141, 0.01124321, 0.00182078, 0.00285723, 0.01166982
]]).T)
return filters
def corrDn(image, filt, step=1, channels=1):
r'''Compute correlation of image with FILT, followed by downsampling.
Args:
image: A tensor. Shape :math:`(N, C, H, W)`.
filt: A filter.
step: Downsampling factors.
channels: Number of channels.
'''
filt_ = torch.from_numpy(filt).float().unsqueeze(0).unsqueeze(0).repeat(channels, 1, 1, 1).to(image.device)
p = (filt_.shape[2] - 1) // 2
image = F.pad(image, (p, p, p, p), 'reflect')
img = F.conv2d(image, filt_, stride=step, padding=0, groups=channels)
return img
The provided code snippet includes necessary dependencies for implementing the `SteerablePyramidSpace` function. Write a Python function `def SteerablePyramidSpace(image, height=4, order=5, channels=1)` to solve the following problem:
r'''Construct a steerable pyramid on image. Args: image: A tensor. Shape :math:`(N, C, H, W)`. height (int): Number of pyramid levels to build. order (int): Number of orientations. channels (int): Number of channels.
Here is the function:
def SteerablePyramidSpace(image, height=4, order=5, channels=1):
r'''Construct a steerable pyramid on image.
Args:
image: A tensor. Shape :math:`(N, C, H, W)`.
height (int): Number of pyramid levels to build.
order (int): Number of orientations.
channels (int): Number of channels.
'''
num_orientations = order + 1
filters = sp5_filters()
hi0 = corrDn(image, filters['hi0filt'], step=1, channels=channels)
pyr_coeffs = []
pyr_coeffs.append(hi0)
lo = corrDn(image, filters['lo0filt'], step=1, channels=channels)
for _ in range(height):
bfiltsz = int(np.floor(np.sqrt(filters['bfilts'].shape[0])))
for b in range(num_orientations):
filt = filters['bfilts'][:, b].reshape(bfiltsz, bfiltsz).T
band = corrDn(lo, filt, step=1, channels=channels)
pyr_coeffs.append(band)
lo = corrDn(lo, filters['lofilt'], step=2, channels=channels)
pyr_coeffs.append(lo)
return pyr_coeffs | r'''Construct a steerable pyramid on image. Args: image: A tensor. Shape :math:`(N, C, H, W)`. height (int): Number of pyramid levels to build. order (int): Number of orientations. channels (int): Number of channels. |
167,974 | import datetime
import logging
import time
import torch
import os
import numpy as np
from os import path as osp
from pyiqa.data.prefetch_dataloader import CPUPrefetcher, CUDAPrefetcher
from pyiqa.models import build_model
from pyiqa.utils import (AvgTimer, MessageLogger, get_root_logger, get_time_str, get_env_info, make_exp_dirs, mkdir_and_rename)
from pyiqa.utils.options import copy_opt_file, dict2str, parse_options, make_paths
from pyiqa.train import init_tb_loggers, create_train_val_dataloader
from pyiqa.train import train_pipeline
def make_paths(opt, root_path):
if opt['is_train']:
experiments_root = osp.join(root_path, 'experiments', opt['name'])
opt['path']['experiments_root'] = experiments_root
opt['path']['models'] = osp.join(experiments_root, 'models')
opt['path']['training_states'] = osp.join(experiments_root, 'training_states')
opt['path']['log'] = experiments_root
opt['path']['visualization'] = osp.join(experiments_root, 'visualization')
# change some options for debug mode
if 'debug' in opt['name']:
if 'val' in opt:
opt['val']['val_freq'] = 7
opt['logger']['print_freq'] = 1
opt['logger']['save_checkpoint_freq'] = 7
else: # test
results_root = osp.join(root_path, 'results', opt['name'])
opt['path']['results_root'] = results_root
opt['path']['log'] = results_root
opt['path']['visualization'] = osp.join(results_root, 'visualization')
def parse_options(root_path, is_train=True):
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, required=True, help='Path to option YAML file.')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none', help='job launcher')
parser.add_argument('--auto_resume', action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--force_yml', nargs='+', default=None, help='Force to update yml files. Examples: train:ema_decay=0.999')
args = parser.parse_args()
# parse yml to dict
with open(args.opt, mode='r') as f:
opt = yaml.load(f, Loader=ordered_yaml()[0])
# distributed settings
if args.launcher == 'none':
opt['dist'] = False
print('Disable distributed.', flush=True)
else:
opt['dist'] = True
if args.launcher == 'slurm' and 'dist_params' in opt:
init_dist(args.launcher, **opt['dist_params'])
else:
init_dist(args.launcher)
opt['rank'], opt['world_size'] = get_dist_info()
# random seed
seed = opt.get('manual_seed')
if seed is None:
seed = random.randint(1, 10000)
opt['manual_seed'] = seed
set_random_seed(seed + opt['rank'])
# force to update yml options
if args.force_yml is not None:
for entry in args.force_yml:
# now do not support creating new keys
keys, value = entry.split('=')
keys, value = keys.strip(), value.strip()
value = _postprocess_yml_value(value)
eval_str = 'opt'
for key in keys.split(':'):
eval_str += f'["{key}"]'
eval_str += '=value'
# using exec function
exec(eval_str)
opt['auto_resume'] = args.auto_resume
opt['is_train'] = is_train
# debug setting
if args.debug and not opt['name'].startswith('debug'):
opt['name'] = 'debug_' + opt['name']
if opt['num_gpu'] == 'auto':
opt['num_gpu'] = torch.cuda.device_count()
# datasets
for phase, dataset in opt['datasets'].items():
# for multiple datasets, e.g., val_1, val_2; test_1, test_2
phase = phase.split('_')[0]
dataset['phase'] = phase
if 'scale' in opt:
dataset['scale'] = opt['scale']
if dataset.get('dataroot_gt') is not None:
dataset['dataroot_gt'] = osp.expanduser(dataset['dataroot_gt'])
if dataset.get('dataroot_lq') is not None:
dataset['dataroot_lq'] = osp.expanduser(dataset['dataroot_lq'])
# paths
for key, val in opt['path'].items():
if (val is not None) and ('resume_state' in key or 'pretrain_network' in key):
opt['path'][key] = osp.expanduser(val)
make_paths(opt, root_path)
return opt, args
def train_pipeline(root_path, opt=None, args=None):
# parse options, set distributed setting, set random seed
if opt is None and args is None:
opt, args = parse_options(root_path, is_train=True)
opt['root_path'] = root_path
torch.backends.cudnn.benchmark = True
# torch.backends.cudnn.deterministic = True
# load resume states if necessary
resume_state = load_resume_state(opt)
# mkdir for experiments and logger
if resume_state is None:
make_exp_dirs(opt)
if opt['logger'].get('use_tb_logger') and 'debug' not in opt['name'] and opt['rank'] == 0:
os.makedirs(osp.join(opt['root_path'], 'tb_logger_archived'), exist_ok=True)
mkdir_and_rename(osp.join(opt['root_path'], 'tb_logger', opt['name']))
# copy the yml file to the experiment root
copy_opt_file(args.opt, opt['path']['experiments_root'])
# WARNING: should not use get_root_logger in the above codes, including the called functions
# Otherwise the logger will not be properly initialized
log_file = osp.join(opt['path']['log'], f"train_{opt['name']}_{get_time_str()}.log")
logger = get_root_logger(logger_name='pyiqa', log_level=logging.INFO, log_file=log_file)
logger.info(get_env_info())
logger.info(dict2str(opt))
# initialize wandb and tb loggers
tb_logger = init_tb_loggers(opt)
# create train and validation dataloaders
result = create_train_val_dataloader(opt, logger)
train_loader, train_sampler, val_loaders, total_epochs, total_iters = result
# create model
model = build_model(opt)
if resume_state: # resume training
model.resume_training(resume_state) # handle optimizers and schedulers
logger.info(f"Resuming training from epoch: {resume_state['epoch']}, " f"iter: {resume_state['iter']}.")
start_epoch = resume_state['epoch']
current_iter = resume_state['iter']
else:
start_epoch = 0
current_iter = 0
# create message logger (formatted outputs)
msg_logger = MessageLogger(opt, current_iter, tb_logger)
# dataloader prefetcher
prefetch_mode = opt['datasets']['train'].get('prefetch_mode')
if prefetch_mode is None or prefetch_mode == 'cpu':
prefetcher = CPUPrefetcher(train_loader)
elif prefetch_mode == 'cuda':
prefetcher = CUDAPrefetcher(train_loader, opt)
logger.info(f'Use {prefetch_mode} prefetch dataloader')
if opt['datasets']['train'].get('pin_memory') is not True:
raise ValueError('Please set pin_memory=True for CUDAPrefetcher.')
else:
raise ValueError(f'Wrong prefetch_mode {prefetch_mode}.' "Supported ones are: None, 'cuda', 'cpu'.")
# training
logger.info(f'Start training from epoch: {start_epoch}, iter: {current_iter}')
data_timer, iter_timer = AvgTimer(), AvgTimer()
start_time = time.time()
for epoch in range(start_epoch, total_epochs + 1):
train_sampler.set_epoch(epoch)
prefetcher.reset()
train_data = prefetcher.next()
while train_data is not None:
data_timer.record()
current_iter += 1
if current_iter > total_iters:
break
# update learning rate
# model.update_learning_rate(current_iter, warmup_iter=opt['train'].get('warmup_iter', -1))
# training
model.feed_data(train_data)
model.optimize_parameters(current_iter)
iter_timer.record()
if current_iter == 1:
# reset start time in msg_logger for more accurate eta_time
# not work in resume mode
msg_logger.reset_start_time()
# log
if current_iter % opt['logger']['print_freq'] == 0:
log_vars = {'epoch': epoch, 'iter': current_iter}
log_vars.update({'lrs': model.get_current_learning_rate()})
log_vars.update({'time': iter_timer.get_avg_time(), 'data_time': data_timer.get_avg_time()})
log_vars.update(model.get_current_log())
msg_logger(log_vars)
# log images
log_img_freq = opt['logger'].get('log_imgs_freq', 1e99)
if current_iter % log_img_freq == 0:
visual_imgs = model.get_current_visuals()
if tb_logger and visual_imgs is not None:
for k, v in visual_imgs.items():
tb_logger.add_images(f'ckpt_imgs/{k}', v.clamp(0, 1), current_iter)
# save models and training states
save_ckpt_freq = opt['logger'].get('save_checkpoint_freq', 9e9)
if current_iter % save_ckpt_freq == 0:
logger.info('Saving models and training states.')
model.save(epoch, current_iter)
if current_iter % opt['logger']['save_latest_freq'] == 0:
logger.info('Saving latest models and training states.')
model.save(epoch, -1)
# validation
if opt.get('val') is not None and (current_iter % opt['val']['val_freq'] == 0):
logger.info(f'{len(val_loaders)} validation datasets are used for validation.')
for val_loader in val_loaders:
model.validation(val_loader, current_iter, tb_logger, opt['val']['save_img'])
data_timer.start()
iter_timer.start()
train_data = prefetcher.next()
# end of iter
# use epoch based learning rate scheduler
model.update_learning_rate(epoch+2, warmup_iter=opt['train'].get('warmup_iter', -1))
# end of epoch
consumed_time = str(datetime.timedelta(seconds=int(time.time() - start_time)))
logger.info(f'End of training. Time consumed: {consumed_time}')
logger.info('Save the latest model.')
model.save(epoch=-1, current_iter=-1) # -1 stands for the latest
if opt.get('val') is not None:
for val_loader in val_loaders:
model.validation(val_loader, current_iter, tb_logger, opt['val']['save_img'])
if tb_logger:
tb_logger.close()
return model.best_metric_results
def train_nsplits(root_path):
torch.backends.cudnn.benchmark = True
opt, args = parse_options(root_path, is_train=True)
n_splits = opt['split_num']
save_path = opt['save_final_results_path']
os.makedirs(os.path.dirname(save_path), exist_ok=True)
all_split_results = []
prefix_name = opt['name']
for i in range(n_splits):
# update split specific options
opt['name'] = prefix_name + f'_Split{i:02d}'
make_paths(opt, root_path)
for k in opt['datasets'].keys():
opt['datasets'][k]['split_index'] = i + 1
tmp_results = train_pipeline(root_path, opt, args)
all_split_results.append(tmp_results)
with open(save_path, 'w') as sf:
datasets = list(all_split_results[0].keys())
metrics = list(all_split_results[0][datasets[0]].keys())
print(datasets, metrics)
sf.write('Val Datasets\tSplits\t{}\n'.format('\t'.join(metrics)))
for ds in datasets:
all_results = []
for i in range(n_splits):
results_msg = f'{ds}\t{i:02d}\t'
tmp_metric_results = []
for mt in metrics:
tmp_metric_results.append(all_split_results[i][ds][mt]['val'])
results_msg += f"{all_split_results[i][ds][mt]['val']:04f}\t"
results_msg += f"@{all_split_results[i][ds][mt]['iter']:05d}\n"
sf.write(results_msg)
all_results.append(tmp_metric_results)
results_avg = np.array(all_results).mean(axis=0)
results_std = np.array(all_results).std(axis=0)
sf.write(f'Overall results in {ds}: {results_avg}\t{results_std}\n') | null |
167,978 | import math
import os
import requests
from torch.hub import download_url_to_file, get_dir
from tqdm import tqdm
from urllib.parse import urlparse
from .misc import sizeof_fmt
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination, file_size=None, chunk_size=32768):
if file_size is not None:
pbar = tqdm(total=math.ceil(file_size / chunk_size), unit='chunk')
readable_file_size = sizeof_fmt(file_size)
else:
pbar = None
with open(destination, 'wb') as f:
downloaded_size = 0
for chunk in response.iter_content(chunk_size):
downloaded_size += chunk_size
if pbar is not None:
pbar.update(1)
pbar.set_description(f'Download {sizeof_fmt(downloaded_size)} / {readable_file_size}')
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if pbar is not None:
pbar.close()
The provided code snippet includes necessary dependencies for implementing the `download_file_from_google_drive` function. Write a Python function `def download_file_from_google_drive(file_id, save_path)` to solve the following problem:
Download files from google drive. Ref: https://stackoverflow.com/questions/25010369/wget-curl-large-file-from-google-drive # noqa E501 Args: file_id (str): File id. save_path (str): Save path.
Here is the function:
def download_file_from_google_drive(file_id, save_path):
"""Download files from google drive.
Ref:
https://stackoverflow.com/questions/25010369/wget-curl-large-file-from-google-drive # noqa E501
Args:
file_id (str): File id.
save_path (str): Save path.
"""
session = requests.Session()
URL = 'https://docs.google.com/uc?export=download'
params = {'id': file_id}
response = session.get(URL, params=params, stream=True)
token = get_confirm_token(response)
if token:
params['confirm'] = token
response = session.get(URL, params=params, stream=True)
# get file size
response_file_size = session.get(URL, params=params, stream=True, headers={'Range': 'bytes=0-2'})
if 'Content-Range' in response_file_size.headers:
file_size = int(response_file_size.headers['Content-Range'].split('/')[1])
else:
file_size = None
save_response_content(response, save_path, file_size) | Download files from google drive. Ref: https://stackoverflow.com/questions/25010369/wget-curl-large-file-from-google-drive # noqa E501 Args: file_id (str): File id. save_path (str): Save path. |
167,979 | import cv2
import math
import numpy as np
import os
import torch
from torchvision.utils import make_grid
import io
from PIL import Image
import torchvision.transforms.functional as TF
def is_image_file(filename):
return any(filename.lower().endswith(extension) for extension in Image.registered_extensions())
The provided code snippet includes necessary dependencies for implementing the `imread2pil` function. Write a Python function `def imread2pil(img_source, rgb=False)` to solve the following problem:
Read image to tensor. Args: img_source (str, bytes, or PIL.Image): image filepath string, image contents as a bytearray or a PIL Image instance rgb: convert input to RGB if true
Here is the function:
def imread2pil(img_source, rgb=False):
"""Read image to tensor.
Args:
img_source (str, bytes, or PIL.Image): image filepath string, image contents as a bytearray or a PIL Image instance
rgb: convert input to RGB if true
"""
if type(img_source) == bytes:
img = Image.open(io.BytesIO(img_source))
elif type(img_source) == str:
assert is_image_file(img_source), f'{img_source} is not a valid image file.'
img = Image.open(img_source)
elif isinstance(img_source, Image.Image):
img = img_source
else:
raise Exception("Unsupported source type")
if rgb:
img = img.convert('RGB')
return img | Read image to tensor. Args: img_source (str, bytes, or PIL.Image): image filepath string, image contents as a bytearray or a PIL Image instance rgb: convert input to RGB if true |
167,980 | import cv2
import math
import numpy as np
import os
import torch
from torchvision.utils import make_grid
import io
from PIL import Image
import torchvision.transforms.functional as TF
def is_image_file(filename):
return any(filename.lower().endswith(extension) for extension in Image.registered_extensions())
The provided code snippet includes necessary dependencies for implementing the `imread2tensor` function. Write a Python function `def imread2tensor(img_source, rgb=False)` to solve the following problem:
Read image to tensor. Args: img_source (str, bytes, or PIL.Image): image filepath string, image contents as a bytearray or a PIL Image instance rgb: convert input to RGB if true
Here is the function:
def imread2tensor(img_source, rgb=False):
"""Read image to tensor.
Args:
img_source (str, bytes, or PIL.Image): image filepath string, image contents as a bytearray or a PIL Image instance
rgb: convert input to RGB if true
"""
if type(img_source) == bytes:
img = Image.open(io.BytesIO(img_source))
elif type(img_source) == str:
assert is_image_file(img_source), f'{img_source} is not a valid image file.'
img = Image.open(img_source)
elif isinstance(img_source, Image.Image):
img = img_source
else:
raise Exception("Unsupported source type")
if rgb:
img = img.convert('RGB')
img_tensor = TF.to_tensor(img)
return img_tensor | Read image to tensor. Args: img_source (str, bytes, or PIL.Image): image filepath string, image contents as a bytearray or a PIL Image instance rgb: convert input to RGB if true |
167,981 | import cv2
import math
import numpy as np
import os
import torch
from torchvision.utils import make_grid
import io
from PIL import Image
import torchvision.transforms.functional as TF
The provided code snippet includes necessary dependencies for implementing the `img2tensor` function. Write a Python function `def img2tensor(imgs, bgr2rgb=True, float32=True)` to solve the following problem:
Numpy array to tensor. Args: imgs (list[ndarray] | ndarray): Input images. bgr2rgb (bool): Whether to change bgr to rgb. float32 (bool): Whether to change to float32. Returns: list[tensor] | tensor: Tensor images. If returned results only have one element, just return tensor.
Here is the function:
def img2tensor(imgs, bgr2rgb=True, float32=True):
"""Numpy array to tensor.
Args:
imgs (list[ndarray] | ndarray): Input images.
bgr2rgb (bool): Whether to change bgr to rgb.
float32 (bool): Whether to change to float32.
Returns:
list[tensor] | tensor: Tensor images. If returned results only have
one element, just return tensor.
"""
def _totensor(img, bgr2rgb, float32):
if img.shape[2] == 3 and bgr2rgb:
if img.dtype == 'float64':
img = img.astype('float32')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = torch.from_numpy(img.transpose(2, 0, 1))
if float32:
img = img.float()
return img
if isinstance(imgs, list):
return [_totensor(img, bgr2rgb, float32) for img in imgs]
else:
return _totensor(imgs, bgr2rgb, float32) | Numpy array to tensor. Args: imgs (list[ndarray] | ndarray): Input images. bgr2rgb (bool): Whether to change bgr to rgb. float32 (bool): Whether to change to float32. Returns: list[tensor] | tensor: Tensor images. If returned results only have one element, just return tensor. |
167,982 | import cv2
import math
import numpy as np
import os
import torch
from torchvision.utils import make_grid
import io
from PIL import Image
import torchvision.transforms.functional as TF
The provided code snippet includes necessary dependencies for implementing the `tensor2img` function. Write a Python function `def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1))` to solve the following problem:
Convert torch Tensors into image numpy arrays. After clamping to [min, max], values will be normalized to [0, 1]. Args: tensor (Tensor or list[Tensor]): Accept shapes: 1) 4D mini-batch Tensor of shape (B x 3/1 x H x W); 2) 3D Tensor of shape (3/1 x H x W); 3) 2D Tensor of shape (H x W). Tensor channel should be in RGB order. rgb2bgr (bool): Whether to change rgb to bgr. out_type (numpy type): output types. If ``np.uint8``, transform outputs to uint8 type with range [0, 255]; otherwise, float type with range [0, 1]. Default: ``np.uint8``. min_max (tuple[int]): min and max values for clamp. Returns: (Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of shape (H x W). The channel order is BGR.
Here is the function:
def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)):
"""Convert torch Tensors into image numpy arrays.
After clamping to [min, max], values will be normalized to [0, 1].
Args:
tensor (Tensor or list[Tensor]): Accept shapes:
1) 4D mini-batch Tensor of shape (B x 3/1 x H x W);
2) 3D Tensor of shape (3/1 x H x W);
3) 2D Tensor of shape (H x W).
Tensor channel should be in RGB order.
rgb2bgr (bool): Whether to change rgb to bgr.
out_type (numpy type): output types. If ``np.uint8``, transform outputs
to uint8 type with range [0, 255]; otherwise, float type with
range [0, 1]. Default: ``np.uint8``.
min_max (tuple[int]): min and max values for clamp.
Returns:
(Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of
shape (H x W). The channel order is BGR.
"""
if not (torch.is_tensor(tensor) or (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):
raise TypeError(f'tensor or list of tensors expected, got {type(tensor)}')
if torch.is_tensor(tensor):
tensor = [tensor]
result = []
for _tensor in tensor:
_tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max)
_tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0])
n_dim = _tensor.dim()
if n_dim == 4:
img_np = make_grid(_tensor, nrow=int(math.sqrt(_tensor.size(0))), normalize=False).numpy()
img_np = img_np.transpose(1, 2, 0)
if rgb2bgr:
img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
elif n_dim == 3:
img_np = _tensor.numpy()
img_np = img_np.transpose(1, 2, 0)
if img_np.shape[2] == 1: # gray image
img_np = np.squeeze(img_np, axis=2)
else:
if rgb2bgr:
img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
elif n_dim == 2:
img_np = _tensor.numpy()
else:
raise TypeError(f'Only support 4D, 3D or 2D tensor. But received with dimension: {n_dim}')
if out_type == np.uint8:
# Unlike MATLAB, numpy.unit8() WILL NOT round by default.
img_np = (img_np * 255.0).round()
img_np = img_np.astype(out_type)
result.append(img_np)
if len(result) == 1:
result = result[0]
return result | Convert torch Tensors into image numpy arrays. After clamping to [min, max], values will be normalized to [0, 1]. Args: tensor (Tensor or list[Tensor]): Accept shapes: 1) 4D mini-batch Tensor of shape (B x 3/1 x H x W); 2) 3D Tensor of shape (3/1 x H x W); 3) 2D Tensor of shape (H x W). Tensor channel should be in RGB order. rgb2bgr (bool): Whether to change rgb to bgr. out_type (numpy type): output types. If ``np.uint8``, transform outputs to uint8 type with range [0, 255]; otherwise, float type with range [0, 1]. Default: ``np.uint8``. min_max (tuple[int]): min and max values for clamp. Returns: (Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of shape (H x W). The channel order is BGR. |
167,983 | import cv2
import math
import numpy as np
import os
import torch
from torchvision.utils import make_grid
import io
from PIL import Image
import torchvision.transforms.functional as TF
The provided code snippet includes necessary dependencies for implementing the `tensor2img_fast` function. Write a Python function `def tensor2img_fast(tensor, rgb2bgr=True, min_max=(0, 1))` to solve the following problem:
This implementation is slightly faster than tensor2img. It now only supports torch tensor with shape (1, c, h, w). Args: tensor (Tensor): Now only support torch tensor with (1, c, h, w). rgb2bgr (bool): Whether to change rgb to bgr. Default: True. min_max (tuple[int]): min and max values for clamp.
Here is the function:
def tensor2img_fast(tensor, rgb2bgr=True, min_max=(0, 1)):
"""This implementation is slightly faster than tensor2img.
It now only supports torch tensor with shape (1, c, h, w).
Args:
tensor (Tensor): Now only support torch tensor with (1, c, h, w).
rgb2bgr (bool): Whether to change rgb to bgr. Default: True.
min_max (tuple[int]): min and max values for clamp.
"""
output = tensor.squeeze(0).detach().clamp_(*min_max).permute(1, 2, 0)
output = (output - min_max[0]) / (min_max[1] - min_max[0]) * 255
output = output.type(torch.uint8).cpu().numpy()
if rgb2bgr:
output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
return output | This implementation is slightly faster than tensor2img. It now only supports torch tensor with shape (1, c, h, w). Args: tensor (Tensor): Now only support torch tensor with (1, c, h, w). rgb2bgr (bool): Whether to change rgb to bgr. Default: True. min_max (tuple[int]): min and max values for clamp. |
167,984 | import cv2
import math
import numpy as np
import os
import torch
from torchvision.utils import make_grid
import io
from PIL import Image
import torchvision.transforms.functional as TF
The provided code snippet includes necessary dependencies for implementing the `imfrombytes` function. Write a Python function `def imfrombytes(content, flag='color', float32=False)` to solve the following problem:
Read an image from bytes. Args: content (bytes): Image bytes got from files or other streams. flag (str): Flags specifying the color type of a loaded image, candidates are `color`, `grayscale` and `unchanged`. float32 (bool): Whether to change to float32., If True, will also norm to [0, 1]. Default: False. Returns: ndarray: Loaded image array.
Here is the function:
def imfrombytes(content, flag='color', float32=False):
"""Read an image from bytes.
Args:
content (bytes): Image bytes got from files or other streams.
flag (str): Flags specifying the color type of a loaded image,
candidates are `color`, `grayscale` and `unchanged`.
float32 (bool): Whether to change to float32., If True, will also norm
to [0, 1]. Default: False.
Returns:
ndarray: Loaded image array.
"""
img_np = np.frombuffer(content, np.uint8)
imread_flags = {'color': cv2.IMREAD_COLOR, 'grayscale': cv2.IMREAD_GRAYSCALE, 'unchanged': cv2.IMREAD_UNCHANGED}
img = cv2.imdecode(img_np, imread_flags[flag])
if float32:
img = img.astype(np.float32) / 255.
return img | Read an image from bytes. Args: content (bytes): Image bytes got from files or other streams. flag (str): Flags specifying the color type of a loaded image, candidates are `color`, `grayscale` and `unchanged`. float32 (bool): Whether to change to float32., If True, will also norm to [0, 1]. Default: False. Returns: ndarray: Loaded image array. |
167,985 | import cv2
import math
import numpy as np
import os
import torch
from torchvision.utils import make_grid
import io
from PIL import Image
import torchvision.transforms.functional as TF
The provided code snippet includes necessary dependencies for implementing the `imwrite` function. Write a Python function `def imwrite(img, file_path, params=None, auto_mkdir=True)` to solve the following problem:
Write image to file. Args: img (ndarray): Image array to be written. file_path (str): Image file path. params (None or list): Same as opencv's :func:`imwrite` interface. auto_mkdir (bool): If the parent folder of `file_path` does not exist, whether to create it automatically. Returns: bool: Successful or not.
Here is the function:
def imwrite(img, file_path, params=None, auto_mkdir=True):
"""Write image to file.
Args:
img (ndarray): Image array to be written.
file_path (str): Image file path.
params (None or list): Same as opencv's :func:`imwrite` interface.
auto_mkdir (bool): If the parent folder of `file_path` does not exist,
whether to create it automatically.
Returns:
bool: Successful or not.
"""
if auto_mkdir:
dir_name = os.path.abspath(os.path.dirname(file_path))
os.makedirs(dir_name, exist_ok=True)
ok = cv2.imwrite(file_path, img, params)
if not ok:
raise IOError('Failed in writing images.') | Write image to file. Args: img (ndarray): Image array to be written. file_path (str): Image file path. params (None or list): Same as opencv's :func:`imwrite` interface. auto_mkdir (bool): If the parent folder of `file_path` does not exist, whether to create it automatically. Returns: bool: Successful or not. |
167,986 | import cv2
import math
import numpy as np
import os
import torch
from torchvision.utils import make_grid
import io
from PIL import Image
import torchvision.transforms.functional as TF
The provided code snippet includes necessary dependencies for implementing the `crop_border` function. Write a Python function `def crop_border(imgs, crop_border)` to solve the following problem:
Crop borders of images. Args: imgs (list[ndarray] | ndarray): Images with shape (h, w, c). crop_border (int): Crop border for each end of height and weight. Returns: list[ndarray]: Cropped images.
Here is the function:
def crop_border(imgs, crop_border):
"""Crop borders of images.
Args:
imgs (list[ndarray] | ndarray): Images with shape (h, w, c).
crop_border (int): Crop border for each end of height and weight.
Returns:
list[ndarray]: Cropped images.
"""
if crop_border == 0:
return imgs
else:
if isinstance(imgs, list):
return [v[crop_border:-crop_border, crop_border:-crop_border, ...] for v in imgs]
else:
return imgs[crop_border:-crop_border, crop_border:-crop_border, ...] | Crop borders of images. Args: imgs (list[ndarray] | ndarray): Images with shape (h, w, c). crop_border (int): Crop border for each end of height and weight. Returns: list[ndarray]: Cropped images. |
167,987 | import numpy as np
import os
import random
import time
import torch
from os import path as osp
import shutil
from .dist_util import master_only
The provided code snippet includes necessary dependencies for implementing the `set_random_seed` function. Write a Python function `def set_random_seed(seed=123)` to solve the following problem:
Set random seeds.
Here is the function:
def set_random_seed(seed=123):
"""Set random seeds."""
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True | Set random seeds. |
167,988 | import numpy as np
import os
import random
import time
import torch
from os import path as osp
import shutil
from .dist_util import master_only
def mkdir_and_rename(path):
"""mkdirs. If path exists, rename it with timestamp, create a new one, and move it to archive folder.
Args:
path (str): Folder path.
"""
if osp.exists(path):
new_name = path + '_archived_' + get_time_str()
new_name = new_name.replace('tb_logger', 'tb_logger_archived')
print(f'Path already exists. Rename it to {new_name}', flush=True)
os.rename(path, new_name)
os.makedirs(path, exist_ok=True)
The provided code snippet includes necessary dependencies for implementing the `make_exp_dirs` function. Write a Python function `def make_exp_dirs(opt)` to solve the following problem:
Make dirs for experiments.
Here is the function:
def make_exp_dirs(opt):
"""Make dirs for experiments."""
path_opt = opt['path'].copy()
if opt['is_train']:
mkdir_and_rename(path_opt.pop('experiments_root'))
else:
mkdir_and_rename(path_opt.pop('results_root'))
for key, path in path_opt.items():
if ('strict_load' in key) or ('pretrain_network' in key) or ('resume' in key) or ('param_key' in key):
continue
else:
os.makedirs(path, exist_ok=True) | Make dirs for experiments. |
167,989 | import numpy as np
import os
import random
import time
import torch
from os import path as osp
import shutil
from .dist_util import master_only
The provided code snippet includes necessary dependencies for implementing the `scandir` function. Write a Python function `def scandir(dir_path, suffix=None, recursive=False, full_path=False)` to solve the following problem:
Scan a directory to find the interested files. Args: dir_path (str): Path of the directory. suffix (str | tuple(str), optional): File suffix that we are interested in. Default: None. recursive (bool, optional): If set to True, recursively scan the directory. Default: False. full_path (bool, optional): If set to True, include the dir_path. Default: False. Returns: A generator for all the interested files with relative paths.
Here is the function:
def scandir(dir_path, suffix=None, recursive=False, full_path=False):
"""Scan a directory to find the interested files.
Args:
dir_path (str): Path of the directory.
suffix (str | tuple(str), optional): File suffix that we are
interested in. Default: None.
recursive (bool, optional): If set to True, recursively scan the
directory. Default: False.
full_path (bool, optional): If set to True, include the dir_path.
Default: False.
Returns:
A generator for all the interested files with relative paths.
"""
if (suffix is not None) and not isinstance(suffix, (str, tuple)):
raise TypeError('"suffix" must be a string or tuple of strings')
root = dir_path
def _scandir(dir_path, suffix, recursive):
for entry in os.scandir(dir_path):
if not entry.name.startswith('.') and entry.is_file():
if full_path:
return_path = entry.path
else:
return_path = osp.relpath(entry.path, root)
if suffix is None:
yield return_path
elif return_path.endswith(suffix):
yield return_path
else:
if recursive:
yield from _scandir(entry.path, suffix=suffix, recursive=recursive)
else:
continue
return _scandir(dir_path, suffix=suffix, recursive=recursive) | Scan a directory to find the interested files. Args: dir_path (str): Path of the directory. suffix (str | tuple(str), optional): File suffix that we are interested in. Default: None. recursive (bool, optional): If set to True, recursively scan the directory. Default: False. full_path (bool, optional): If set to True, include the dir_path. Default: False. Returns: A generator for all the interested files with relative paths. |
167,990 | import numpy as np
import os
import random
import time
import torch
from os import path as osp
import shutil
from .dist_util import master_only
The provided code snippet includes necessary dependencies for implementing the `check_resume` function. Write a Python function `def check_resume(opt, resume_iter)` to solve the following problem:
Check resume states and pretrain_network paths. Args: opt (dict): Options. resume_iter (int): Resume iteration.
Here is the function:
def check_resume(opt, resume_iter):
"""Check resume states and pretrain_network paths.
Args:
opt (dict): Options.
resume_iter (int): Resume iteration.
"""
if opt['path']['resume_state']:
# get all the networks
networks = [key for key in opt.keys() if key.startswith('network_')]
flag_pretrain = False
for network in networks:
if opt['path'].get(f'pretrain_{network}') is not None:
flag_pretrain = True
if flag_pretrain:
print('pretrain_network path will be ignored during resuming.')
# set pretrained model paths
for network in networks:
name = f'pretrain_{network}'
basename = network.replace('network_', '')
if opt['path'].get('ignore_resume_networks') is None or (network
not in opt['path']['ignore_resume_networks']):
opt['path'][name] = osp.join(opt['path']['models'], f'net_{basename}_{resume_iter}.pth')
print(f"Set {name} to {opt['path'][name]}")
# change param_key to params in resume
param_keys = [key for key in opt['path'].keys() if key.startswith('param_key')]
for param_key in param_keys:
if opt['path'][param_key] == 'params_ema':
opt['path'][param_key] = 'params'
print(f'Set {param_key} to params') | Check resume states and pretrain_network paths. Args: opt (dict): Options. resume_iter (int): Resume iteration. |
167,991 | from typing import Union, Dict
import torch
The provided code snippet includes necessary dependencies for implementing the `ycbcr2rgb` function. Write a Python function `def ycbcr2rgb(x: torch.Tensor) -> torch.Tensor` to solve the following problem:
r"""Convert a batch of YCbCr images to a batch of RGB images It implements the inversion of the above rgb2ycbcr function. Args: x: Batch of images with shape (N, 3, H, W). YCbCr color space, range [0, 1]. Returns: Batch of images with shape (N, 3, H, W). RGB color space.
Here is the function:
def ycbcr2rgb(x: torch.Tensor) -> torch.Tensor:
r"""Convert a batch of YCbCr images to a batch of RGB images
It implements the inversion of the above rgb2ycbcr function.
Args:
x: Batch of images with shape (N, 3, H, W). YCbCr color space, range [0, 1].
Returns:
Batch of images with shape (N, 3, H, W). RGB color space.
"""
x = x * 255.
weights_ycbcr_to_rgb = 255. * torch.tensor([[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
[0.00625893, -0.00318811, 0]]).to(x)
bias_ycbcr_to_rgb = torch.tensor([-222.921, 135.576, -276.836]).view(1, 3, 1, 1).to(x)
x_rgb = torch.matmul(x.permute(0, 2, 3, 1), weights_ycbcr_to_rgb).permute(0, 3, 1, 2) \
+ bias_ycbcr_to_rgb
x_rgb = x_rgb / 255.
return x_rgb | r"""Convert a batch of YCbCr images to a batch of RGB images It implements the inversion of the above rgb2ycbcr function. Args: x: Batch of images with shape (N, 3, H, W). YCbCr color space, range [0, 1]. Returns: Batch of images with shape (N, 3, H, W). RGB color space. |
167,993 | import datetime
import logging
import time
from .dist_util import get_dist_info, master_only
def get_root_logger(logger_name='pyiqa', log_level=logging.INFO, log_file=None):
"""Get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added.
Args:
logger_name (str): root logger name. Default: 'basicsr'.
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
logger = logging.getLogger(logger_name)
# if the logger has been initialized, just return it
if logger_name in initialized_logger:
return logger
format_str = '%(asctime)s %(levelname)s: %(message)s'
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter(format_str))
logger.addHandler(stream_handler)
logger.propagate = False
rank, _ = get_dist_info()
if rank != 0:
logger.setLevel('ERROR')
elif log_file is not None:
logger.setLevel(log_level)
# add file handler
file_handler = logging.FileHandler(log_file, 'w')
file_handler.setFormatter(logging.Formatter(format_str))
file_handler.setLevel(log_level)
logger.addHandler(file_handler)
initialized_logger[logger_name] = True
return logger
The provided code snippet includes necessary dependencies for implementing the `init_wandb_logger` function. Write a Python function `def init_wandb_logger(opt)` to solve the following problem:
We now only use wandb to sync tensorboard log.
Here is the function:
def init_wandb_logger(opt):
"""We now only use wandb to sync tensorboard log."""
import wandb
logger = get_root_logger()
project = opt['logger']['wandb']['project']
resume_id = opt['logger']['wandb'].get('resume_id')
if resume_id:
wandb_id = resume_id
resume = 'allow'
logger.warning(f'Resume wandb logger with id={wandb_id}.')
else:
wandb_id = wandb.util.generate_id()
resume = 'never'
wandb.init(id=wandb_id, resume=resume, name=opt['name'], config=opt, project=project, sync_tensorboard=True)
logger.info(f'Use wandb logger with id={wandb_id}; project={project}.') | We now only use wandb to sync tensorboard log. |
167,994 | import datetime
import logging
import time
from .dist_util import get_dist_info, master_only
The provided code snippet includes necessary dependencies for implementing the `get_env_info` function. Write a Python function `def get_env_info()` to solve the following problem:
Get environment information. Currently, only log the software version.
Here is the function:
def get_env_info():
"""Get environment information.
Currently, only log the software version.
"""
import torch
import torchvision
# from basicsr.version import __version__
# msg = r"""
# ____ _ _____ ____
# / __ ) ____ _ _____ (_)_____/ ___/ / __ \
# / __ |/ __ `// ___// // ___/\__ \ / /_/ /
# / /_/ // /_/ /(__ )/ // /__ ___/ // _, _/
# /_____/ \__,_//____//_/ \___//____//_/ |_|
# ______ __ __ __ __
# / ____/____ ____ ____/ / / / __ __ _____ / /__ / /
# / / __ / __ \ / __ \ / __ / / / / / / // ___// //_/ / /
# / /_/ // /_/ // /_/ // /_/ / / /___/ /_/ // /__ / /< /_/
# \____/ \____/ \____/ \____/ /_____/\____/ \___//_/|_| (_)
# """
msg = (
'\nVersion Information: '
# f'\n\tBasicSR: {__version__}'
f'\n\tPyTorch: {torch.__version__}'
f'\n\tTorchVision: {torchvision.__version__}')
return msg | Get environment information. Currently, only log the software version. |
167,995 | import argparse
import yaml
import csv
import pandas as pd
from itertools import chain
from pyiqa.data import build_dataset, build_dataloader
from pyiqa.default_model_configs import DEFAULT_CONFIGS
from pyiqa.utils.options import ordered_yaml
from pyiqa.metrics import calculate_plcc, calculate_srcc, calculate_krcc
from tqdm import tqdm
import torch
from pyiqa import create_metric
def flatten_list(list_of_list):
if isinstance(list_of_list, list):
if isinstance(list_of_list[0], list):
return list(chain.from_iterable(list_of_list))
else:
return list_of_list
else:
return [list_of_list] | null |
167,996 | import os
import scipy.io as sio
import random
import numpy
import pickle
import csv
import pandas as pd
def get_meta_info():
root_dir = '../datasets/LIVEmultidistortiondatabase/'
parts = ['Part 1', 'Part 2']
sub_img_folders = ['blurjpeg', 'blurnoise']
save_meta_path = './datasets/meta_info/meta_info_LIVEMDDataset.csv'
f = open(save_meta_path, 'w')
csvwriter = csv.writer(f)
header = ['ref_name', 'dist_name', 'dmos']
csvwriter.writerow(header)
for p, subf in zip(parts, sub_img_folders):
sub_root_dir = os.path.join(root_dir, p)
img_list = sio.loadmat(os.path.join(sub_root_dir, 'Imagelists.mat'))
dist_names = [x[0][0] for x in img_list['distimgs']]
score = sio.loadmat(os.path.join(sub_root_dir, 'Scores.mat'))
alldmos = score['DMOSscores'][0]
for i in range(len(dist_names)):
dis_name = f'{p}/{subf}/{dist_names[i]}'
ref_name = f"{p}/{subf}/{dist_names[i].split('_')[0]}.bmp"
dmos = alldmos[i]
msg = f'{ref_name:<15}\t{dis_name:<15}\t{dmos:<15}\n'
csvwriter.writerow([ref_name, dis_name, dmos])
f.close() | null |
167,997 | import os
import scipy.io as sio
import random
import numpy
import pickle
import csv
import pandas as pd
def get_random_splits(seed=123):
random.seed(seed)
meta_info_file = './datasets/meta_info/meta_info_LIVEMDDataset.csv'
save_path = f'./datasets/meta_info/livemd_{seed}.pkl'
ratio = 0.8
meta_info = pd.read_csv(meta_info_file)
ref_img_list = list(set(meta_info['ref_name'].tolist()))
ref_img_num = len(ref_img_list)
num_splits = 10
train_num = int(ratio * ref_img_num)
split_info = {}
for i in range(num_splits):
split_info[i + 1] = {'train': [], 'val': [], 'test': []}
for i in range(num_splits):
random.shuffle(ref_img_list)
train_ref_img_names = ref_img_list[:train_num]
for j in range(meta_info.shape[0]):
tmp_ref_name = meta_info.loc[j]['ref_name']
if tmp_ref_name in train_ref_img_names:
split_info[i + 1]['train'].append(j)
else:
split_info[i + 1]['val'].append(j)
print(meta_info.shape[0], len(split_info[i + 1]['train']), len(split_info[i + 1]['val']))
with open(save_path, 'wb') as sf:
pickle.dump(split_info, sf) | null |
167,998 | import os
import scipy.io as sio
import numpy as np
from PIL import Image
import pickle
import csv
from tqdm import tqdm
import random
The provided code snippet includes necessary dependencies for implementing the `get_meta_info` function. Write a Python function `def get_meta_info(seed=123)` to solve the following problem:
Generate meta information and train/val/test splits for AVA dataset. The split follows: - split index 1: official, https://github.com/imfing/ava_downloader/blob/master/AVA_dataset/aesthetics_image_lists/generic_test.jpgl - split index 2: https://github.com/BestiVictory/ILGnet/tree/local/data/AVA1
Here is the function:
def get_meta_info(seed=123):
"""Generate meta information and train/val/test splits for AVA dataset.
The split follows:
- split index 1: official, https://github.com/imfing/ava_downloader/blob/master/AVA_dataset/aesthetics_image_lists/generic_test.jpgl
- split index 2: https://github.com/BestiVictory/ILGnet/tree/local/data/AVA1
"""
all_label_file = '../datasets/AVA_dataset/AVA.txt'
# read ILGnet split
ILGnet_train_list = [
x.strip().split()[0] for x in open('../datasets/AVA_dataset/train_splits/ILGnet_train.txt').readlines()
]
ILGnet_test_list = [
x.strip().split()[0] for x in open('../datasets/AVA_dataset/train_splits/ILGnet_val.txt').readlines()
]
official_test_list = [
x.strip().split()[0] + '.jpg' for x in open('../datasets/AVA_dataset/aesthetics_image_lists/generic_test.jpgl')
]
save_meta_path = './datasets/meta_info/meta_info_AVADataset.csv'
split_info = {
1: {
'train': [],
'val': [],
'test': []
},
2: {
'train': [],
'val': [],
'test': []
},
}
with open(all_label_file) as f, open(save_meta_path, 'w') as sf:
csvwriter = csv.writer(sf)
header = ['img_name'] + ['MOS'] + [f'c{i}' for i in range(1, 11)] + ['semantic_tag1', 'semantic_tag2'
] + ['official split', 'ILGnet split']
csvwriter.writerow(header)
count = 0
for row in tqdm(f.readlines()):
row = row.strip().split()
ratings = np.array([int(x) for x in row[2:12]])
# calculate mos
mos = np.sum(np.arange(1, 11) * ratings) / np.sum(ratings)
new_row = [row[1] + '.jpg', f'{mos:.3}'] + row[2:14]
img_path = os.path.join('../datasets/AVA_dataset/ava_images/', new_row[0])
if os.path.exists(img_path):
try:
img = Image.open(img_path)
w, h = img.size
if w > 10 and h > 10:
if new_row[0] in official_test_list:
split_info[1]['test'].append(count)
official_split = 2
else:
split_info[1]['train'].append(count)
official_split = 0
if new_row[0] in ILGnet_test_list:
split_info[2]['test'].append(count)
ilgnet_split = 2
else:
split_info[2]['train'].append(count)
ilgnet_split = 0
new_row += [official_split, ilgnet_split]
csvwriter.writerow(new_row)
count += 1
except:
print(f'{img_path} image is broken')
print(len(split_info[1]['train']), len(split_info[1]['test']))
print(len(split_info[2]['train']), len(split_info[2]['test']))
save_split_path = './datasets/meta_info/ava_official_ilgnet.pkl'
# separate 5% as validation part
random.seed(seed)
for split_idx in split_info.keys():
train_split = split_info[split_idx]['train']
sep_idx = int(round(len(train_split) * 0.95))
random.shuffle(train_split)
split_info[split_idx]['train'] = train_split[:sep_idx]
split_info[split_idx]['val'] = train_split[sep_idx:]
with open(save_split_path, 'wb') as sf:
pickle.dump(split_info, sf) | Generate meta information and train/val/test splits for AVA dataset. The split follows: - split index 1: official, https://github.com/imfing/ava_downloader/blob/master/AVA_dataset/aesthetics_image_lists/generic_test.jpgl - split index 2: https://github.com/BestiVictory/ILGnet/tree/local/data/AVA1 |
167,999 | import os
import scipy.io as sio
import random
import numpy
import pickle
import csv
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `get_meta_info` function. Write a Python function `def get_meta_info()` to solve the following problem:
Train/Val/Test split file from official github: https://github.com/subpic/koniq/blob/master/metadata/koniq10k_distributions_sets.csv
Here is the function:
def get_meta_info():
"""
Train/Val/Test split file from official github:
https://github.com/subpic/koniq/blob/master/metadata/koniq10k_distributions_sets.csv
"""
info_file = '../datasets/koniq10k/koniq10k_distributions_sets.csv'
save_meta_path = './datasets/meta_info/meta_info_KonIQ10kDataset.csv'
split_info = {'train': [], 'val': [], 'test': []}
with open(info_file, 'r') as f, open(save_meta_path, 'w+') as sf:
csvreader = csv.reader(f)
head = next(csvreader)
csvwriter = csv.writer(sf)
new_head = ['img_name', 'mos', 'std', 'split', 'c1', 'c2', 'c3', 'c4', 'c5', 'c_total']
csvwriter.writerow(new_head)
for idx, row in enumerate(csvreader):
print(row)
split = row[9]
if split == 'training':
split = 'train'
row[9] = 0
elif split == 'validation':
split = 'val'
row[9] = 1
elif split == 'test':
row[9] = 2
split_info[split].append(idx)
new_row = [row[0]] + row[7:10] + row[1:7]
print(new_row)
csvwriter.writerow(new_row)
save_split_path = './datasets/meta_info/koniq10k_official.pkl'
with open(save_split_path, 'wb') as sf:
pickle.dump({1: split_info}, sf) | Train/Val/Test split file from official github: https://github.com/subpic/koniq/blob/master/metadata/koniq10k_distributions_sets.csv |
168,000 | import os
import scipy.io as sio
import random
import numpy
import pickle
import csv
import pandas as pd
from tqdm import tqdm
def get_meta_info():
mos_label_file = '../datasets/SPAQ/Annotations/MOS and Image attribute scores.xlsx'
scene_label_file = '../datasets/SPAQ/Annotations/Scene category labels.xlsx'
exif_label_file = '../datasets/SPAQ/Annotations/EXIF_tags.xlsx'
mos_label = pd.read_excel(mos_label_file)
scene_label = pd.read_excel(scene_label_file)
exif_label = pd.read_excel(exif_label_file)
new_head = mos_label.keys().tolist() + scene_label.keys().tolist()[1:] + exif_label.keys().tolist()[1:]
new_head[-2] = 'Time0'
new_head[-1] = 'Time1'
save_meta_path = './datasets/meta_info/meta_info_SPAQDataset.csv'
with open(save_meta_path, 'w+') as sf:
csvwriter = csv.writer(sf)
csvwriter.writerow(new_head)
for ridx in range(mos_label.shape[0]):
mos_row = mos_label.loc[ridx].tolist()
scene_row = scene_label.loc[ridx].tolist()
exif_row = exif_label.loc[ridx].tolist()
print(mos_row, scene_row, exif_row)
assert mos_row[0] == scene_row[0] == exif_row[0]
row_label = mos_row + scene_row[1:] + exif_row[1:]
csvwriter.writerow(row_label) | null |
168,001 | import os
import scipy.io as sio
import random
import numpy
import pickle
import csv
import pandas as pd
from tqdm import tqdm
def get_random_splits(seed=123):
random.seed(seed)
total_num = 11125
all_img_index = list(range(total_num))
num_splits = 10
save_path = f'./datasets/meta_info/spaq_seed{seed}.pkl'
ratio = [0.8, 0.2] # train/val/test
sep_index = int(round(0.8 * total_num))
split_info = {}
for i in range(num_splits):
random.shuffle(all_img_index)
split_info[i + 1] = {'train': all_img_index[:sep_index], 'val': all_img_index[sep_index:]}
with open(save_path, 'wb') as sf:
pickle.dump(split_info, sf) | null |
168,002 | import os
import scipy.io as sio
import random
import numpy
import pickle
import csv
import pandas as pd
from tqdm import tqdm
from glob import glob
def get_meta_info():
train_label_folder = '../datasets/PIPAL/Train_Label/'
name_labels = []
for f in sorted(glob(train_label_folder + '*.txt')):
name_labels += [x.strip().split(',') for x in open(f).readlines()]
# new_head = ['dist_name', 'elo_score', 'hq_name']
new_head = ['hq_name', 'dist_name', 'elo_score']
save_meta_path = './datasets/meta_info/meta_info_PIPALDataset.csv'
with open(save_meta_path, 'w') as sf:
csvwriter = csv.writer(sf)
csvwriter.writerow(new_head)
for n, l in name_labels:
dist_name = n
elo_score = l
hq_name = dist_name.split('_')[0] + '.bmp'
csvwriter.writerow([hq_name, dist_name, elo_score]) | null |
168,003 | import os
import scipy.io as sio
import random
import numpy
import pickle
import csv
import pandas as pd
def get_random_splits(seed=123):
random.seed(seed)
meta_info_file = '../datasets/meta_info/meta_info_GFIQADataset.csv'
meta_info = pd.read_csv(meta_info_file)
img_list = meta_info['img_name'].tolist()
total_num = len(img_list)
all_img_index = list(range(total_num))
num_splits = 10
save_path = '../datasets/meta_info/gfiqa_seed123.pkl'
ratio = [0.7, 0.1, 0.2] # train/val/test
split_info = {}
for i in range(num_splits):
random.shuffle(all_img_index)
sep1 = int(total_num * ratio[0])
sep2 = sep1 + int(total_num * ratio[1])
split_info[i + 1] = {
'train': all_img_index[:sep1],
'val': all_img_index[sep1:sep2],
'test': all_img_index[sep2:]
}
with open(save_path, 'wb') as sf:
pickle.dump(split_info, sf) | null |
168,004 | import numpy as np
import torch
from pyiqa.archs.musiq_arch import MUSIQ
def check_same(x, y):
return np.abs(y - x).mean() < np.abs(x.min())
tf_params = np.load(ckpt_path)
tf_keys = [k for k in tf_params.keys() if 'target' in k]
th_params = musiq_model.state_dict()
tf_params = np.load(ckpt_path)
tf_keys = [k for k in tf_params.keys() if 'target' in k]
th_keys = th_params.keys()
total_converted_params = 0
print(
f'Model param num: {len(tf_keys)}/tensorflow, {len(tf_keys)}/pytorch. Converted param num: {total_converted_params}'
)
print(f'Save model to {save_path}')
torch.save(th_params, save_path)
The provided code snippet includes necessary dependencies for implementing the `convert_module` function. Write a Python function `def convert_module(tf_same_key_strs, th_same_key_strs=None)` to solve the following problem:
assign module with the same keywords
Here is the function:
def convert_module(tf_same_key_strs, th_same_key_strs=None):
global total_converted_params
"""assign module with the same keywords"""
if th_same_key_strs is None:
th_same_key_strs = tf_same_key_strs
tf_filter_keys = []
th_filter_keys = []
for tfk in tf_keys:
keep_flag = True
for sk in tf_same_key_strs:
if sk.lower() not in tfk.lower():
keep_flag = False
if keep_flag:
tf_filter_keys.append(tfk)
for thk in th_keys:
keep_flag = True
for sk in th_same_key_strs:
if sk.lower() not in thk.lower():
keep_flag = False
if keep_flag:
th_filter_keys.append(thk)
assert len(tf_filter_keys) == len(
th_filter_keys), f'{tf_filter_keys}, {th_filter_keys}, {len(tf_filter_keys)}, {len(th_filter_keys)}'
for tfk, thk in zip(sorted(tf_filter_keys), sorted(th_filter_keys)):
print(f'Assign {tfk} to {thk}')
tfw = tf_params[tfk]
thw = th_params[thk]
if 'conv' in tfk:
tfw = tfw.transpose(3, 2, 0, 1)
elif 'key/' in tfk or 'value/' in tfk or 'query/' in tfk:
if 'kernel' in tfk:
tfw = tfw.transpose(1, 2, 0)
tfw = tfw.reshape(384, 384)
elif 'bias' in tfk:
tfw = tfw.reshape(-1)
elif 'out/' in tfk:
if 'kernel' in tfk:
tfw = tfw.transpose(2, 0, 1)
tfw = tfw.reshape(384, 384)
elif 'bias' in tfk:
tfw = tfw.reshape(-1)
elif 'bias' in tfk or 'scale' in tfk:
if len(tfw.shape) > 1:
tfw = tfw.squeeze()
elif len(tfw.shape) == 2:
tfw = tfw.transpose()
assert tfw.shape == thw.shape, f'shape not match, {tfw.shape}, {thw.shape}'
th_params[thk].copy_(torch.from_numpy(tfw))
assert check_same(tfw, th_params[thk]), f'value not match'
total_converted_params = total_converted_params + 1 | assign module with the same keywords |
168,005 | import os
import scipy.io as sio
import random
import numpy
import pickle
import csv
import pandas as pd
from tqdm import tqdm
def get_meta_info():
patch_label_file = '../../PaQ-2-PiQ/database/labels_patch.csv'
img_label_file = '../../PaQ-2-PiQ/database/labels_image.csv'
test_label_file = '../../PaQ-2-PiQ/database/labels640.csv'
train_label_file = '../../PaQ-2-PiQ/database/labels=640_padded.csv'
all_patch_label = pd.read_csv(patch_label_file)
all_img_label = pd.read_csv(img_label_file)
test_label = pd.read_csv(test_label_file)
train_label = pd.read_csv(train_label_file)
train_img_list = []
val_img_list = []
test_img_list = test_label['name_image'].tolist()
for i in tqdm(range(train_label.shape[0])):
name = train_label.loc[i]['name_image']
is_valid = train_label.loc[i]['is_valid']
if is_valid:
val_img_list.append(name)
test_img_key_list = [x.split('/')[1] for x in test_img_list]
val_img_key_list = [x.split('/')[1] for x in val_img_list]
save_meta_path = './datasets/meta_info/meta_info_FLIVEDataset.csv'
split_info = {
1: {
'train': [],
'val': [],
'test': []
},
}
with open(save_meta_path, 'w') as sf:
csvwriter = csv.writer(sf)
head = ['img_name/patch_name', 'mos', 'split']
csvwriter.writerow(head)
count = 0
# get image info
for i in tqdm(range(all_img_label.shape[0])):
name = all_img_label.loc[i]['name']
mos = all_img_label.loc[i]['mos']
name_key = name.split('/')[1]
if name in test_img_list:
split = 2
split_info[1]['test'].append(count)
elif name in val_img_list:
split = 1
split_info[1]['val'].append(count)
else:
split = 0
split_info[1]['train'].append(count)
row = [name, mos, split]
csvwriter.writerow(row)
count += 1
print(len(split_info[1]['train']), len(split_info[1]['val']), len(split_info[1]['test']))
print(sum([len(split_info[1]['train']), len(split_info[1]['val']), len(split_info[1]['test'])]))
# get patch info
for i in tqdm(range(all_patch_label.shape[0])):
name = all_patch_label.loc[i]['name']
mos = all_patch_label.loc[i]['mos']
name_key = name.split('/')[1].split('_patch_')[0]
if name_key in test_img_key_list:
split = 2
split_info[1]['test'].append(count)
elif name_key in val_img_key_list:
split = 1
split_info[1]['val'].append(count)
else:
split = 0
split_info[1]['train'].append(count)
row = [name, mos, split]
csvwriter.writerow(row)
count += 1
print(all_img_label.shape[0], all_patch_label.shape[0])
print(all_img_label.shape[0] + all_patch_label.shape[0])
print(len(split_info[1]['train']), len(split_info[1]['val']), len(split_info[1]['test']))
print(sum([len(split_info[1]['train']), len(split_info[1]['val']), len(split_info[1]['test'])]))
save_split_path = './datasets/meta_info/flive_official.pkl'
with open(save_split_path, 'wb') as sf:
pickle.dump(split_info, sf) | null |
168,006 | import os
import scipy.io as sio
import random
import numpy
import pickle
import csv
from tqdm import tqdm
def get_meta_info():
info_file = '../datasets/kadid10k/dmos.csv'
# save_meta_path = './datasets/meta_info/meta_info_KADID10kDataset.txt'
# with open(info_file, 'r') as f, open(save_meta_path, 'w+') as sf:
# csvreader = csv.reader(f)
# head = next(csvreader)
# print(head)
# for idx, row in enumerate(csvreader):
# print(row)
# dis_name = row[0]
# ref_name = row[1]
# dmos = row[2]
# std = row[3]
# msg = f'{ref_name:<15}\t{dis_name:<15}\t{dmos:<15}\t{std:<15}\n'
# sf.write(msg)
save_meta_path = './datasets/meta_info/meta_info_KADID10kDataset.csv'
with open(info_file, 'r') as f, open(save_meta_path, 'w+') as sf:
csvreader = csv.reader(f)
head = next(csvreader)
print(head)
new_head = ['ref_name', 'dist_name', 'dmos', 'std']
csvwriter = csv.writer(sf)
csvwriter.writerow(new_head)
for idx, row in enumerate(csvreader):
print(row)
dis_name = row[0]
ref_name = row[1]
dmos = row[2]
std = row[3]
csvwriter.writerow([ref_name, dis_name, dmos, std])
# msg = f'{ref_name:<15}\t{dis_name:<15}\t{dmos:<15}\t{std:<15}\n'
# sf.write(msg) | null |
168,007 | import os
import random
import numpy
import pickle
import csv
import pandas as pd
def get_meta_info(root_dir, save_meta_path):
attrs = ['Details', 'Exposure', 'Overall']
rows_all = []
for att in attrs:
tmp_row = []
# read labels
lpath = f'{root_dir}/Scores_{att}.csv'
lreader = csv.reader(open(lpath, 'r'))
header = next(lreader)
header_all = header + ['DeviceSplit', 'SceneSplit']
# read train/test kksplits
device_split = {}
reader = csv.reader(open(f'{root_dir}/Device Split.csv'))
next(reader)
for item in reader:
device_split[item[0]] = item[1]
scene_split = {}
reader = csv.reader(open(f'{root_dir}/Scene Split.csv'))
next(reader)
for item in reader:
scene_split[item[0]] = item[1]
for item in lreader:
tmp_row = item
img_name = tmp_row[0].split("\\")[1]
if img_name in device_split:
ds = device_split[img_name]
for k, v in scene_split.items():
if k in img_name:
ss = v
tmp_row += [ds, ss]
tmp_row[0] = tmp_row[0].replace('\\', '/')
rows_all.append(tmp_row)
with open(save_meta_path, 'w') as file:
csv_writer = csv.writer(file)
csv_writer.writerow(header_all)
csv_writer.writerows(rows_all) | null |
168,008 | import os
import scipy.io as sio
import random
import numpy
import pickle
import csv
def get_meta_info():
root_dir = '../datasets/LIVEC/'
names = sio.loadmat(os.path.join(root_dir, 'Data', 'AllImages_release.mat'))
mos_labels = sio.loadmat(os.path.join(root_dir, 'Data', 'AllMOS_release.mat'))
mos_std = sio.loadmat(os.path.join(root_dir, 'Data', 'AllStdDev_release.mat'))
img_names = names['AllImages_release']
mos_labels = mos_labels['AllMOS_release'][0]
mos_std = mos_std['AllStdDev_release'][0]
save_meta_path = './datasets/meta_info/meta_info_LIVEChallengeDataset.csv'
with open(save_meta_path, 'w') as f:
csvwriter = csv.writer(f)
header = ['img_name', 'mos', 'std']
csvwriter.writerow(header)
for idx, name_item in enumerate(img_names):
img_name = name_item[0][0]
mos = mos_labels[idx]
std = mos_std[idx]
csvwriter.writerow([img_name, mos, std]) | null |
168,009 | import os
import scipy.io as sio
import random
import numpy
import pickle
import csv
def get_random_splits(seed=123):
random.seed(seed)
all_img_index = list(range(1162))
num_splits = 10
ratio = [0.8, 0.2] # train/val/test
sep_index = int(round(0.8 * 1162))
save_path = f'./datasets/meta_info/livechallenge_{seed}.pkl'
split_info = {}
for i in range(num_splits):
random.shuffle(all_img_index)
split_info[i + 1] = {'train': all_img_index[:sep_index], 'val': all_img_index[sep_index:]}
with open(save_path, 'wb') as sf:
pickle.dump(split_info, sf) | null |
168,010 | import os
import scipy.io as sio
import random
import numpy
import pickle
import csv
import pandas as pd
def get_meta_info():
root_dir = '../datasets/CSIQ/'
label_file = '../datasets/CSIQ/csiq_label.txt'
name_dmos = [x.strip().split() for x in open(label_file).readlines()]
save_meta_path = './datasets/meta_info/meta_info_CSIQDataset.csv'
with open(save_meta_path, 'w') as f:
writer = csv.writer(f)
header = ['ref_name', 'dist_name', 'dmos']
writer.writerow(header)
for dis_name, dmos in name_dmos:
ref_name = f"{dis_name.split('.')[0]}.png"
writer.writerow([ref_name, dis_name, dmos]) | null |
168,011 | import os
import scipy.io as sio
import random
import numpy
import pickle
import csv
import pandas as pd
def get_random_splits(seed=123):
random.seed(seed)
meta_info_file = './datasets/meta_info/meta_info_CSIQDataset.csv'
save_path = f'./datasets/meta_info/csiq_{seed}.pkl'
ratio = 0.8
meta_info = pd.read_csv(meta_info_file)
ref_img_list = list(set(meta_info['ref_name'].tolist()))
ref_img_num = len(ref_img_list)
num_splits = 10
train_num = int(ratio * ref_img_num)
split_info = {}
for i in range(num_splits):
split_info[i + 1] = {'train': [], 'val': [], 'test': []}
for i in range(num_splits):
random.shuffle(ref_img_list)
train_ref_img_names = ref_img_list[:train_num]
for j in range(meta_info.shape[0]):
tmp_ref_name = meta_info.loc[j]['ref_name']
if tmp_ref_name in train_ref_img_names:
split_info[i + 1]['train'].append(j)
else:
split_info[i + 1]['val'].append(j)
print(meta_info.shape[0], len(split_info[i + 1]['train']), len(split_info[i + 1]['val']))
with open(save_path, 'wb') as sf:
pickle.dump(split_info, sf) | null |
168,012 | import os
import scipy.io as sio
import random
import numpy
import pickle
import csv
import pandas as pd
from tqdm import tqdm
def get_meta_info():
root_path = '../datasets/PieAPP_dataset_CVPR_2018/'
train_list_file = '../datasets/PieAPP_dataset_CVPR_2018/train_reference_list.txt'
val_list_file = '../datasets/PieAPP_dataset_CVPR_2018/val_reference_list.txt'
test_list_file = '../datasets/PieAPP_dataset_CVPR_2018/test_reference_list.txt'
train_ref_list = [x.strip() for x in open(train_list_file).readlines()]
val_ref_list = [x.strip() for x in open(val_list_file).readlines()]
test_ref_list = [x.strip() for x in open(test_list_file).readlines()]
save_meta_path = './datasets/meta_info/meta_info_PieAPPDataset.csv'
split_info = {
1: {
'train': [],
'val': [],
'test': []
},
}
with open(save_meta_path, 'w') as sf:
csvwriter = csv.writer(sf)
head = [
'ref_img_path', 'dist_imgA_path', 'dist_imgB_path', 'raw preference for A', 'processed preference for A',
'per_img score for dist_imgB', 'split'
]
csvwriter.writerow(head)
count = 0
split = 'train'
splits_str = ['train', 'val', 'test']
split_lists = [train_ref_list, val_ref_list, test_ref_list]
split_flags = [0, 1, 2]
for sp_str, sp_ls, sp_flag in zip(splits_str, split_lists, split_flags):
for ref_name in sp_ls:
ref_raw_name = ref_name.split('.')[0]
label_path = os.path.join(root_path, 'labels', sp_str, f'{ref_raw_name}_pairwise_labels.csv')
name_label = pd.read_csv(label_path)
if sp_str == 'test':
test_file_label = os.path.join(root_path, 'labels', sp_str, f'{ref_raw_name}_per_image_score.csv')
test_label = pd.read_csv(test_file_label)
for i in range(name_label.shape[0]):
row = name_label.loc[i].tolist()
ref_path = f'reference_images/{sp_str}/{row[0]}'
if 'ref' in row[1]:
distA_path = f'reference_images/{sp_str}/{row[1]}'
else:
distA_path = f'distorted_images/{sp_str}/{ref_raw_name}/{row[1]}'
distB_path = f'distorted_images/{sp_str}/{ref_raw_name}/{row[2]}'
if sp_str == 'train':
new_row = [ref_path, distA_path, distB_path] + row[3:5] + [''] + [sp_flag]
elif sp_str == 'val':
new_row = [ref_path, distA_path, distB_path] + [row[3], '', '', sp_flag]
elif sp_str == 'test':
dist_keys = test_label[' distorted image'].tolist()
dist_scores = test_label[' score for distorted image']
dist_score_dict = {k: v for k, v in zip(dist_keys, dist_scores)}
per_img_score = dist_score_dict[row[2]]
new_row = [ref_path, distA_path, distB_path] + [row[3], '', per_img_score, sp_flag]
csvwriter.writerow(new_row)
split_info[1][sp_str].append(count)
count += 1
save_split_path = './datasets/meta_info/pieapp_official.pkl'
with open(save_split_path, 'wb') as sf:
pickle.dump(split_info, sf) | null |
168,013 | import os
import scipy.io as sio
import random
import numpy as np
import pickle
import csv
import pandas as pd
from tqdm import tqdm
from glob import glob
from pyiqa.utils.img_util import is_image_file
def is_image_file(filename):
def make_dataset(dir, max_dataset_size=float('inf')):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
return images[:min(max_dataset_size, len(images))] | null |
168,014 | import os
import scipy.io as sio
import random
import numpy as np
import pickle
import csv
import pandas as pd
from tqdm import tqdm
from glob import glob
from pyiqa.utils.img_util import is_image_file
def get_meta_info():
# 2afc triplets
root_dir = '../datasets/PerceptualSimilarity/dataset/2afc'
ref_dir = '../datasets/PerceptualSimilarity/dataset/2afc/*/*/ref/*.png'
p0_dir = '../datasets/PerceptualSimilarity/dataset/2afc/*/*/p0/*.png'
p1_dir = '../datasets/PerceptualSimilarity/dataset/2afc/*/*/p1/*.png'
judge_dir = '../datasets/PerceptualSimilarity/dataset/2afc/*/*/judge/*.npy'
ref_path_list = sorted(list(glob(ref_dir)))
p0_path_list = sorted(list(glob(p0_dir)))
p1_path_list = sorted(list(glob(p1_dir)))
judge_path_list = sorted(list(glob(judge_dir)))
# jnd pairs
p0_dir = '../datasets/PerceptualSimilarity/dataset/jnd/val/*/p0/*.png'
p1_dir = '../datasets/PerceptualSimilarity/dataset/jnd/val/*/p1/*.png'
judge_dir = '../datasets/PerceptualSimilarity/dataset/jnd/val/*/same/*.npy'
jnd_p0_path_list = sorted(list(glob(p0_dir)))
jnd_p1_path_list = sorted(list(glob(p1_dir)))
jnd_judge_path_list = sorted(list(glob(judge_dir)))
save_meta_path = './datasets/meta_info/meta_info_BAPPSDataset.csv'
split_info = {
1: {
'train': [],
'val': [],
'test': []
},
}
with open(save_meta_path, 'w') as sf:
csvwriter = csv.writer(sf)
head = ['ref_img_path', 'p0_img_path', 'p1_img_path', 'judge/same', 'split']
csvwriter.writerow(head)
count = 0
for ref_path, p0_path, p1_path, jd_path in tqdm(
zip(ref_path_list, p0_path_list, p1_path_list, judge_path_list), total=len(ref_path_list)):
ref_path = ref_path.split('dataset/')[-1]
p0_path = p0_path.split('dataset/')[-1]
p1_path = p1_path.split('dataset/')[-1]
jd_label = np.load(jd_path)[0]
if 'train' in ref_path:
split = 0
split_info[1]['train'].append(count)
elif 'val' in ref_path:
split = 1
split_info[1]['val'].append(count)
row = [ref_path, p0_path, p1_path, jd_label, split]
csvwriter.writerow(row)
count += 1
for p0_path, p1_path, jd_path in tqdm(
zip(jnd_p0_path_list, jnd_p1_path_list, jnd_judge_path_list), total=len(p0_path)):
p0_path = p0_path.split('dataset/')[-1]
p1_path = p1_path.split('dataset/')[-1]
jd_label = float(np.load(jd_path))
if 'train' in ref_path:
split = 0
split_info[1]['train'].append(count)
elif 'val' in ref_path:
split = 1
split_info[1]['val'].append(count)
row = ['jnd', p0_path, p1_path, jd_label, split]
csvwriter.writerow(row)
count += 1
save_split_path = './datasets/meta_info/bapps_official.pkl'
with open(save_split_path, 'wb') as sf:
pickle.dump(split_info, sf) | null |
168,015 | import os
import random
import numpy
import pickle
import csv
import pandas as pd
def get_meta_info(root_dir, save_meta_path):
mos_file = os.path.join(root_dir, 'mos_with_names.txt')
std_file = os.path.join(root_dir, 'mos_std.txt')
mos_names = [x.strip().split() for x in open(mos_file).readlines()]
std = [x.strip() for x in open(std_file).readlines()]
with open(save_meta_path, 'w') as f:
csvwriter = csv.writer(f)
header = ['ref_name', 'dist_name', 'mos', 'std']
csvwriter.writerow(header)
for idx, ((mos, name), std) in enumerate(zip(mos_names, std)):
ref_name = f'I{name[1:3]}.BMP'
ref_name = ref_name.replace('I25.BMP', 'i25.bmp')
img_path = os.path.join(root_dir, 'distorted_images', name)
if not os.path.exists(img_path):
name = name.replace('i', 'I')
csvwriter.writerow([ref_name, name, mos, std]) | null |
168,016 | import os
import random
import numpy
import pickle
import csv
import pandas as pd
def get_random_splits(meta_info_file, save_path, seed=123):
random.seed(seed)
# meta_info_file = './datasets/meta_info/meta_info_CSIQDataset.csv'
# save_path = f'./datasets/meta_info/csiq_{seed}.pkl'
ratio = 0.8
meta_info = pd.read_csv(meta_info_file)
ref_img_list = list(set(meta_info['ref_name'].tolist()))
ref_img_num = len(ref_img_list)
num_splits = 10
train_num = int(ratio * ref_img_num)
split_info = {}
for i in range(num_splits):
split_info[i + 1] = {'train': [], 'val': [], 'test': []}
for i in range(num_splits):
random.shuffle(ref_img_list)
train_ref_img_names = ref_img_list[:train_num]
for j in range(meta_info.shape[0]):
tmp_ref_name = meta_info.loc[j]['ref_name']
if tmp_ref_name in train_ref_img_names:
split_info[i + 1]['train'].append(j)
else:
split_info[i + 1]['val'].append(j)
print(meta_info.shape[0], len(split_info[i + 1]['train']), len(split_info[i + 1]['val']))
with open(save_path, 'wb') as sf:
pickle.dump(split_info, sf) | null |
168,017 | import os
import scipy.io as sio
import random
import numpy
import pickle
import csv
import pandas as pd
def get_meta_info():
root_dir = '../datasets/LIVEIQA_release2/'
dmos = sio.loadmat(os.path.join(root_dir, 'dmos.mat')) # difference of mos: test - ref. lower is better
mos = dmos['dmos'][0]
org_flag = dmos['orgs'][0]
refnames = sio.loadmat(os.path.join(root_dir, 'refnames_all.mat'))
refnames = refnames['refnames_all'][0]
sub_folders = ['jp2k'] * 227 + ['jpeg'] * 233 + ['wn'] * 174 + ['gblur'] * 174 + ['fastfading'] * 174
sub_indexes = list(range(1, 228)) + list(range(1, 234)) + list(range(1, 175)) * 3
save_meta_path = './datasets/meta_info/meta_info_LIVEIQADataset.csv'
with open(save_meta_path, 'w') as f:
csvwriter = csv.writer(f)
header = ['ref_name', 'dist_name', 'mos']
csvwriter.writerow(header)
for i in range(len(sub_folders)):
ref_name = f'refimgs/{refnames[i][0]}'
dis_name = f'{sub_folders[i]}/img{sub_indexes[i]}.bmp'
tmpmos = mos[i]
if org_flag[i] != 1:
csvwriter.writerow([ref_name, dis_name, tmpmos]) | null |
168,018 | import os
import scipy.io as sio
import random
import numpy
import pickle
import csv
import pandas as pd
def get_random_splits(seed=123):
random.seed(seed)
meta_info_file = './datasets/meta_info/meta_info_LIVEIQADataset.csv'
save_path = f'./datasets/meta_info/live_{seed}.pkl'
ratio = 0.8
meta_info = pd.read_csv(meta_info_file)
ref_img_list = list(set(meta_info['ref_name'].tolist()))
ref_img_num = len(ref_img_list)
num_splits = 10
train_num = int(ratio * ref_img_num)
split_info = {}
for i in range(num_splits):
split_info[i + 1] = {'train': [], 'val': [], 'test': []}
for i in range(num_splits):
random.shuffle(ref_img_list)
train_ref_img_names = ref_img_list[:train_num]
for j in range(meta_info.shape[0]):
tmp_ref_name = meta_info.loc[j]['ref_name']
if tmp_ref_name in train_ref_img_names:
split_info[i + 1]['train'].append(j)
else:
split_info[i + 1]['val'].append(j)
print(meta_info.shape[0], len(split_info[i + 1]['train']), len(split_info[i + 1]['val']))
with open(save_path, 'wb') as sf:
pickle.dump(split_info, sf) | null |
168,019 | from solidgpt.src.orchestration.orchestration import *
def quick_start(category:str):
Initializer()
app = Orchestration()
app.add_graph(os.path.join(LOCAL_STORAGE_DIR, "workspace", "config", f"{category}_config_data.json"), "default graph")
app.run_graph_with_name("default graph") | null |
168,020 | import logging
import os
import sys
from solidgpt.definitions import LOCAL_STORAGE_DIR
from solidgpt.src.manager.embedding.embeddingmanager import EmbeddingManager
from solidgpt.src.manager.embedding.embeddingmodel import EmbeddingModelParameter
LOCAL_STORAGE_DIR = os.path.join(ROOT_DIR, "../localstorage")
class EmbeddingManager:
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super(EmbeddingManager, cls).__new__(cls)
# You can initialize the instance attributes here
return cls._instance
def __init__(self):
self.embed_models_container : dict(str, EmbeddingModel) = {}
def add_default_embed_model(self):
default_param = EmbeddingModelParameter(resource_name=DEFAULT_EMBEDDING_RESOURCE_NAME,
original_resources_folder_path=LOCAL_EMBEDDING_STORAGE_ORIGINAL_RESOURCE_DIR,
divided_resources_folder_path=LOCAL_EMBEDDING_STORAGE_DIVIDED_RESOURCE_DIR,
embedded_resources_folder_path=LOCAL_EMBEDDING_STORAGE_EMBEDDED_RESOURCE_DIR,
has_embedded=False)
self.add_embed_model(DEFAULT_EMBEDDING_MODEL_LABEL, default_param)
def add_embed_model(self, label : str, param : EmbeddingModelParameter, do_embedding = True):
self.embed_models_container[label] = EmbeddingModel(param)
if do_embedding:
self.embed_models_container[label].embed_resources()
def query_from_embed_model(self, query_message : str, model_label = DEFAULT_EMBEDDING_MODEL_LABEL, response_num = 12):
if self.embed_models_container.get(model_label) is None:
logging.error(f"Embedding model {model_label} not found.")
return
return self.embed_models_container[model_label].query_most_match_result_from_resource(query_message, response_num)
class EmbeddingModelParameter:
def __init__(self, resource_name: str, original_resources_folder_path: str, divided_resources_folder_path: str, embedded_resources_folder_path: str, has_embedded: bool = False):
self.resource_name = resource_name
self.original_resources_folder_path = original_resources_folder_path
self.divided_resources_folder_path = divided_resources_folder_path
self.embedded_resources_folder_path = embedded_resources_folder_path
self.has_embedded = has_embedded
def embedding(label:str, original_resources_path = os.path.join(LOCAL_STORAGE_DIR, "embedding", "originalresources")):
embedding_manager = EmbeddingManager()
embedding_manager.add_embed_model(label, EmbeddingModelParameter(
resource_name= label,
original_resources_folder_path= original_resources_path,
divided_resources_folder_path= os.path.join(LOCAL_STORAGE_DIR, "embedding", "dividedresources"),
embedded_resources_folder_path= os.path.join(LOCAL_STORAGE_DIR, "embedding", "embeddedresources"),
has_embedded=False
)) | null |
168,021 | from solidgpt.src.orchestration.orchestration import *
def generate_node_prd(node_id: str, input_ids: list[int], output_ids: list[int], manual_review_result: bool = False, input_path = None):
# write prd skill
skill: WorkSkill = WritePRD()
skill.init_config(
[
{
"param_path": input_path,
"loading_method": "SkillInputLoadingMethod.LOAD_FROM_STRING",
"load_from_output_id": input_ids[0]
},
],
[
{
"id": output_ids[0]
}
])
agent: WorkAgent = AgentProductManager(skill)
node: WorkNode = WorkNode(node_id, agent, manual_review_result)
return node
def generate_node_custom_system_design(node_id: str, input_ids: list[int], output_ids: list[int], manual_review_result: bool = False):
# write hld skill
skill: WorkSkill = CustomizeSkillManager._instance.get_customzied_skill("WrteSystemDesign")
skill.init_config(
[
{
"param_path": "",
"loading_method": "SkillInputLoadingMethod.LOAD_FROM_OUTPUT_ID",
"load_from_output_id": input_ids[0]
},
],
[
{
"id": output_ids[0]
}
])
agent: WorkAgent = AgentPrincipalEngineer(skill)
node: WorkNode = WorkNode(node_id, agent, manual_review_result)
return node
def generate_node_kanban(node_id: str, input_ids: list[int], output_ids: list[int], manual_review_result: bool = False):
# write hld skill
skill: WorkSkill = CreateKanBan()
skill.init_config(
[
{
"param_path": "",
"loading_method": "SkillInputLoadingMethod.LOAD_FROM_OUTPUT_ID",
"load_from_output_id": input_ids[0]
},
],
[
{
"id": output_ids[0]
}
])
agent: WorkAgent = AgentPrincipalEngineer(skill)
node: WorkNode = WorkNode(node_id, agent, manual_review_result)
return node
def run_system_dev_graph():
Initializer()
app = WorkGraph()
# input_ids = [-1] means the input is from the user
app.add_node(generate_node_prd("0", input_ids=[-1],
input_path=os.path.join(LOCAL_STORAGE_DIR, "workspace", "in", "AIProductBasicInfo.json"),
output_ids=[0],
manual_review_result=True))
app.add_node(generate_node_custom_system_design("1", input_ids=[0], output_ids=[1], manual_review_result=True))
app.add_node(generate_node_kanban("2", input_ids=[1], output_ids=[2], manual_review_result=True))
app.save_data(os.path.join(LOCAL_STORAGE_DIR, "workspace", "config", "system_config_data.json")) | null |
168,022 | from solidgpt.src.orchestration.orchestration import *
def generate_node_prd(node_id: str, input_ids: list[int], output_ids: list[int], manual_review_result: bool = False, input_path = None):
def generate_node_hld(node_id: str, input_ids: list[int], output_ids: list[int], manual_review_result: bool = False):
def generate_node_kanban(node_id: str, input_ids: list[int], output_ids: list[int], manual_review_result: bool = False):
def generate_node_page(node_id: str, input_ids: list[int], output_ids: list[int], manual_review_result: bool = False):
def generate_node_run_app(node_id: str, input_ids: list[int], output_ids: list[int], manual_review_result: bool = False):
def run_webapp_dev_graph():
Initializer()
app = WorkGraph()
# input_ids = [-1] means the input is from the user
app.add_node(generate_node_prd("0", input_ids=[-1],
input_path=os.path.join(LOCAL_STORAGE_DIR, "workspace", "in", "WebsiteBasicInfo.json"),
output_ids=[0],
manual_review_result=True))
app.add_node(generate_node_hld("1", input_ids=[0], output_ids=[1]))
app.add_node(generate_node_kanban("2", input_ids=[1], output_ids=[2], manual_review_result=True))
app.add_node(generate_node_page("3", input_ids=[2], output_ids=[3]))
app.add_node(generate_node_run_app("4", input_ids=[3], output_ids=[4]))
app.save_data(os.path.join(LOCAL_STORAGE_DIR, "workspace", "config", "webapp_config_data.json")) | null |
168,023 | import logging
import uuid
from pydantic import BaseModel
from solidgpt.src.api.api_response import *
from solidgpt.src.configuration.configreader import ConfigReader
from solidgpt.src.manager.initializer import Initializer
from fastapi import FastAPI, UploadFile, File, HTTPException, Body, BackgroundTasks
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
import shutil
from pathlib import Path
from solidgpt.src.orchestration.orchestration import Orchestration
from solidgpt.src.util.util import *
from solidgpt.src.workgraph.graph import *
from solidgpt.src.workgraph.graph_helper import GraphType, GraphStatus
from solidgpt.src.api.celery_tasks import *
async def test_get():
return JSONResponse(content={"message": f"[Test Get] Message 'Hello World!'."}, status_code=200) | null |
168,024 | import logging
import uuid
from pydantic import BaseModel
from solidgpt.src.api.api_response import *
from solidgpt.src.configuration.configreader import ConfigReader
from solidgpt.src.manager.initializer import Initializer
from fastapi import FastAPI, UploadFile, File, HTTPException, Body, BackgroundTasks
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
import shutil
from pathlib import Path
from solidgpt.src.orchestration.orchestration import Orchestration
from solidgpt.src.util.util import *
from solidgpt.src.workgraph.graph import *
from solidgpt.src.workgraph.graph_helper import GraphType, GraphStatus
from solidgpt.src.api.celery_tasks import *
async def test_post(body: dict = Body(...)):
return JSONResponse(content={"message": f"[Test Post] Message '{body['msg']}'."}, status_code=200) | null |
168,025 | import logging
import uuid
from pydantic import BaseModel
from solidgpt.src.api.api_response import *
from solidgpt.src.configuration.configreader import ConfigReader
from solidgpt.src.manager.initializer import Initializer
from fastapi import FastAPI, UploadFile, File, HTTPException, Body, BackgroundTasks
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
import shutil
from pathlib import Path
from solidgpt.src.orchestration.orchestration import Orchestration
from solidgpt.src.util.util import *
from solidgpt.src.workgraph.graph import *
from solidgpt.src.workgraph.graph_helper import GraphType, GraphStatus
from solidgpt.src.api.celery_tasks import *
async def remove_all_files():
try:
# Remove existing files
delete_directory_contents(REPO_STORAGE_DIR)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Server error: {e}")
return JSONResponse(content={"message": f"All files removed"}, status_code=200) | null |
168,026 | import logging
import uuid
from pydantic import BaseModel
from solidgpt.src.api.api_response import *
from solidgpt.src.configuration.configreader import ConfigReader
from solidgpt.src.manager.initializer import Initializer
from fastapi import FastAPI, UploadFile, File, HTTPException, Body, BackgroundTasks
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
import shutil
from pathlib import Path
from solidgpt.src.orchestration.orchestration import Orchestration
from solidgpt.src.util.util import *
from solidgpt.src.workgraph.graph import *
from solidgpt.src.workgraph.graph_helper import GraphType, GraphStatus
from solidgpt.src.api.celery_tasks import *
class GraphResult:
def __init__(self, result, graph_name):
def set_result_obj(self, result_obj):
def get_result_obj(self):
def get_name(self):
def set_name(self, new_name):
graph_result_map: dict[str, GraphResult] = {}
async def onboard_repo(body: dict = Body(...)):
# Enqueue the background task: onboard repo
logging.info("celery task: onboard repo graph")
graph_id = str(uuid.uuid4())
upload_id = body['upload_id']
openai_key = body['openai_key']
result = celery_task_onboard_repo_graph.apply_async(args=[openai_key, upload_id, graph_id])
graph_result = GraphResult(result, "Onboard Repo Graph")
graph_result_map[graph_id] = graph_result
return JSONResponse(content={
"message": f"Onboarding repo...",
"graph_id": graph_id,
"is_final": True,
"current_work_name": "onboard repo"
}, status_code=200) | null |
168,027 | import logging
import uuid
from pydantic import BaseModel
from solidgpt.src.api.api_response import *
from solidgpt.src.configuration.configreader import ConfigReader
from solidgpt.src.manager.initializer import Initializer
from fastapi import FastAPI, UploadFile, File, HTTPException, Body, BackgroundTasks
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
import shutil
from pathlib import Path
from solidgpt.src.orchestration.orchestration import Orchestration
from solidgpt.src.util.util import *
from solidgpt.src.workgraph.graph import *
from solidgpt.src.workgraph.graph_helper import GraphType, GraphStatus
from solidgpt.src.api.celery_tasks import *
class GraphResult:
__result = None
__graph_name = ""
def __init__(self, result, graph_name):
self.__result = result
self.__graph_name = graph_name
return
def set_result_obj(self, result_obj):
self.__result = result_obj
def get_result_obj(self):
return self.__result
def get_name(self):
return self.__graph_name
def set_name(self, new_name):
self.__graph_name = new_name
graph_result_map: dict[str, GraphResult] = {}
graph_stage_map: dict = {}
async def generate_prd(body: dict = Body(...)):
# Enqueue the background task: prd
logging.info("celery task: prd graph")
new_graph_id = str(uuid.uuid4())
onboarding_id = body['onboarding_id']
current_graph_id = body['current_graph_id']
if not current_graph_id or current_graph_id not in graph_result_map or current_graph_id not in graph_stage_map:
current_graph_id = new_graph_id
graph_stage_map[current_graph_id] = 0
openai_key = body['openai_key']
requirement = body['requirement']
edit_content = body['edit_content']
project_additional_info = body['project_additional_info']
graph_stage = graph_stage_map.get(current_graph_id, 0)
if graph_stage < 2:
result = celery_task_prd_graph.apply_async(args=[
openai_key, requirement, project_additional_info, onboarding_id, graph_stage, edit_content,
current_graph_id])
graph_result = GraphResult(result, "PRD Graph")
graph_result_map[current_graph_id] = graph_result
graph_stage_map[current_graph_id] = graph_stage + 1
if graph_stage_map[current_graph_id] == 1:
return JSONResponse(content={
"message": f"Running prd graph...",
"graph_id": current_graph_id,
"is_final": False,
"current_work_name": "write prd step 1"
}, status_code=200)
else:
return JSONResponse(content={
"message": f"Running prd graph...",
"graph_id": current_graph_id,
"is_final": True,
"current_work_name": "write prd step 2"
}, status_code=200)
return JSONResponse(content={
"message": f"Cannot run prd graph, the graph has already completed",
"graph_id": current_graph_id,
"is_final": True,
"current_work_name": "write prd"
}, status_code=200) | null |
168,028 | import logging
import uuid
from pydantic import BaseModel
from solidgpt.src.api.api_response import *
from solidgpt.src.configuration.configreader import ConfigReader
from solidgpt.src.manager.initializer import Initializer
from fastapi import FastAPI, UploadFile, File, HTTPException, Body, BackgroundTasks
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
import shutil
from pathlib import Path
from solidgpt.src.orchestration.orchestration import Orchestration
from solidgpt.src.util.util import *
from solidgpt.src.workgraph.graph import *
from solidgpt.src.workgraph.graph_helper import GraphType, GraphStatus
from solidgpt.src.api.celery_tasks import *
class GraphResult:
__result = None
__graph_name = ""
def __init__(self, result, graph_name):
self.__result = result
self.__graph_name = graph_name
return
def set_result_obj(self, result_obj):
self.__result = result_obj
def get_result_obj(self):
return self.__result
def get_name(self):
return self.__graph_name
def set_name(self, new_name):
self.__graph_name = new_name
graph_result_map: dict[str, GraphResult] = {}
async def generate_tech_solution(body: dict = Body(...)):
# Enqueue the background task: tech solution
logging.info("celery task: tech solution graph")
graph_id = str(uuid.uuid4())
onboarding_id = body['onboarding_id']
openai_key = body['openai_key']
requirement = body['requirement']
result = celery_task_tech_solution_graph.apply_async(args=[
openai_key, requirement, onboarding_id, graph_id])
graph_result = GraphResult(result, "Tech Solution Graph")
graph_result_map[graph_id] = graph_result
return JSONResponse(content={
"message": f"Running tech solution graph...",
"graph_id": graph_id,
"is_final": True,
"current_work_name": "tech solution"
}, status_code=200) | null |
168,029 | import logging
import uuid
from pydantic import BaseModel
from solidgpt.src.api.api_response import *
from solidgpt.src.configuration.configreader import ConfigReader
from solidgpt.src.manager.initializer import Initializer
from fastapi import FastAPI, UploadFile, File, HTTPException, Body, BackgroundTasks
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
import shutil
from pathlib import Path
from solidgpt.src.orchestration.orchestration import Orchestration
from solidgpt.src.util.util import *
from solidgpt.src.workgraph.graph import *
from solidgpt.src.workgraph.graph_helper import GraphType, GraphStatus
from solidgpt.src.api.celery_tasks import *
class GraphResult:
__result = None
__graph_name = ""
def __init__(self, result, graph_name):
self.__result = result
self.__graph_name = graph_name
return
def set_result_obj(self, result_obj):
self.__result = result_obj
def get_result_obj(self):
return self.__result
def get_name(self):
return self.__graph_name
def set_name(self, new_name):
self.__graph_name = new_name
graph_result_map: dict[str, GraphResult] = {}
async def repo_chat(body: dict = Body(...)):
# Enqueue the background task: repo chat
logging.info("celery task: repo chat graph")
graph_id = str(uuid.uuid4())
onboarding_id = body['onboarding_id']
openai_key = body['openai_key']
requirement = body['requirement']
result = celery_task_repo_chat_graph.apply_async(args=[
openai_key, requirement, onboarding_id, graph_id])
graph_result = GraphResult(result, "Repo chat Graph")
graph_result_map[graph_id] = graph_result
return JSONResponse(content={
"message": f"Running repo chat graph...",
"graph_id": graph_id,
"is_final": True,
"current_work_name": "repo chat"
}, status_code=200) | null |
168,030 | import logging
import uuid
from pydantic import BaseModel
from solidgpt.src.api.api_response import *
from solidgpt.src.configuration.configreader import ConfigReader
from solidgpt.src.manager.initializer import Initializer
from fastapi import FastAPI, UploadFile, File, HTTPException, Body, BackgroundTasks
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
import shutil
from pathlib import Path
from solidgpt.src.orchestration.orchestration import Orchestration
from solidgpt.src.util.util import *
from solidgpt.src.workgraph.graph import *
from solidgpt.src.workgraph.graph_helper import GraphType, GraphStatus
from solidgpt.src.api.celery_tasks import *
autogen_task_map: dict = {}
async def autogen_analysis(body: dict = Body(...)):
# Enqueue the background task: autogen analysis
logging.info("celery task: autogen analysis graph")
onboarding_id = body['onboarding_id']
openai_key = body['openai_key']
requirement = body['requirement']
task_id = body['task_id']
is_new_session = int(body['is_new_session'])
if is_new_session:
graph_id = str(uuid.uuid4())
result = celery_task_autogen_analysis_graph.apply_async(args=[
openai_key, requirement, onboarding_id, graph_id])
task_id = result.id
autogen_task_map[task_id] = result
return JSONResponse(content={
"message": f"New autogen analysis graph...",
"task_id": task_id,
"is_final": True,
"status": 1,
"current_work_name": "autogen analysis"
}, status_code=200)
else:
task = autogen_task_map.get(task_id)
if task is None:
return JSONResponse(content={
"message": f"Invalid Autogen Analysis graph.",
"task_id": task_id,
"is_final": True,
"status": 2,
"current_work_name": "autogen analysis"
}, status_code=200)
redis_instance.lpush(task_id, requirement)
return JSONResponse(content={
"message": f"Continuing autogen analysis graph...",
"task_id": task_id,
"is_final": True,
"status": 3,
"current_work_name": "autogen analysis"
}, status_code=200) | null |
168,031 | import logging
import uuid
from pydantic import BaseModel
from solidgpt.src.api.api_response import *
from solidgpt.src.configuration.configreader import ConfigReader
from solidgpt.src.manager.initializer import Initializer
from fastapi import FastAPI, UploadFile, File, HTTPException, Body, BackgroundTasks
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
import shutil
from pathlib import Path
from solidgpt.src.orchestration.orchestration import Orchestration
from solidgpt.src.util.util import *
from solidgpt.src.workgraph.graph import *
from solidgpt.src.workgraph.graph_helper import GraphType, GraphStatus
from solidgpt.src.api.celery_tasks import *
uploaded_repo_map: dict = {}
async def upload_repo(body: dict = Body(...)):
# Store the file to local storage
upload_id = f'uploadrepo-{str(uuid.uuid4())}'
file_contents = body.get("file_contents", [])
file_names = body.get("file_names", [])
result = celery_task_upload_repo.apply_async(args=[upload_id, file_contents, file_names])
uploaded_repo_map[upload_id] = result
return JSONResponse(content={
"message": f"Uploading files...",
"upload_id": upload_id
}, status_code=200) | null |
168,032 | import logging
import uuid
from pydantic import BaseModel
from solidgpt.src.api.api_response import *
from solidgpt.src.configuration.configreader import ConfigReader
from solidgpt.src.manager.initializer import Initializer
from fastapi import FastAPI, UploadFile, File, HTTPException, Body, BackgroundTasks
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
import shutil
from pathlib import Path
from solidgpt.src.orchestration.orchestration import Orchestration
from solidgpt.src.util.util import *
from solidgpt.src.workgraph.graph import *
from solidgpt.src.workgraph.graph_helper import GraphType, GraphStatus
from solidgpt.src.api.celery_tasks import *
autogen_task_map: dict = {}
async def get_autogen_status(body: dict = Body(...)):
task_id: str = body['task_id']
celery_task_result = autogen_task_map.get(task_id)
if celery_task_result is None:
return JSONResponse(content={
"message": "status: Error, not exist or not started",
"task_id": task_id,
"status": 1,
"result": ""
}, status_code=200)
if celery_task_result.ready():
return JSONResponse(content={
"message": "Status: Current session is over, chat to start a new session",
"task_id": task_id,
"status": 2,
"result": ""
}, status_code=200)
return JSONResponse(content={
"message": "status: autogen task result",
"task_id": task_id,
"status": 3,
"result": celery_task_result.info
}, status_code=200) | null |
168,033 | import logging
import uuid
from pydantic import BaseModel
from solidgpt.src.api.api_response import *
from solidgpt.src.configuration.configreader import ConfigReader
from solidgpt.src.manager.initializer import Initializer
from fastapi import FastAPI, UploadFile, File, HTTPException, Body, BackgroundTasks
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
import shutil
from pathlib import Path
from solidgpt.src.orchestration.orchestration import Orchestration
from solidgpt.src.util.util import *
from solidgpt.src.workgraph.graph import *
from solidgpt.src.workgraph.graph_helper import GraphType, GraphStatus
from solidgpt.src.api.celery_tasks import *
uploaded_repo_map: dict = {}
async def get_upload_status(body: dict = Body(...)):
repo_upload_id: str = body['upload_id']
celery_task_result = uploaded_repo_map.get(repo_upload_id, None)
if celery_task_result is None:
return JSONResponse(content=response_upload(
message="status: not exist or not started",
status=1
), status_code=200)
if celery_task_result.ready():
if celery_task_result.status == "SUCCESS":
return JSONResponse(content=response_upload(
message="status: uploaded",
status=2
), status_code=200)
elif celery_task_result.status == "FAILURE":
return JSONResponse(content=response_upload(
message="status: failed",
status=4,
error=celery_task_result.traceback
), status_code=200)
else:
return JSONResponse(content=response_upload(
message="status: unknown",
status=5
), status_code=200)
return JSONResponse(content=response_upload(
message="status: uploading",
status=3,
progress=celery_task_result.info
), status_code=200) | null |
168,034 | import logging
import uuid
from pydantic import BaseModel
from solidgpt.src.api.api_response import *
from solidgpt.src.configuration.configreader import ConfigReader
from solidgpt.src.manager.initializer import Initializer
from fastapi import FastAPI, UploadFile, File, HTTPException, Body, BackgroundTasks
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
import shutil
from pathlib import Path
from solidgpt.src.orchestration.orchestration import Orchestration
from solidgpt.src.util.util import *
from solidgpt.src.workgraph.graph import *
from solidgpt.src.workgraph.graph_helper import GraphType, GraphStatus
from solidgpt.src.api.celery_tasks import *
async def get_graph_status_impl(body: dict = Body(...)):
graph_id = body['graph_id']
graph_result = graph_result_map.get(graph_id, None)
result = None
if graph_result is not None:
result = graph_result.get_result_obj()
if graph_result is None:
return JSONResponse(content=response_graph(
message=f"Graph does not exist.",
status=0,
), status_code=200)
elif not result.ready():
return JSONResponse(content=response_graph(
graph=graph_result.get_name(),
message=f"Graph is still executing.",
status=1,
progress=result.info,
), status_code=200)
elif result.ready():
if result.status == "SUCCESS":
result_txt = result.get()
return JSONResponse(content=response_graph(
graph=graph_result.get_name(),
message=f"Graph finishes running.",
status=2,
result=result_txt,
), status_code=200)
elif result.status == "FAILURE":
return JSONResponse(content=response_graph(
graph=graph_result.get_name(),
message=f"Graph has an error.",
status=4,
error=result.traceback,
), status_code=200)
return JSONResponse(content=response_graph(
graph=graph_result.get_name(),
message=f"Graph in unknown state.",
status=3,
), status_code=200)
async def get_graph_status(body: dict = Body(...)):
result = await get_graph_status_impl(body)
return result | null |
168,035 | import logging
import uuid
from pydantic import BaseModel
from solidgpt.src.api.api_response import *
from solidgpt.src.configuration.configreader import ConfigReader
from solidgpt.src.manager.initializer import Initializer
from fastapi import FastAPI, UploadFile, File, HTTPException, Body, BackgroundTasks
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
import shutil
from pathlib import Path
from solidgpt.src.orchestration.orchestration import Orchestration
from solidgpt.src.util.util import *
from solidgpt.src.workgraph.graph import *
from solidgpt.src.workgraph.graph_helper import GraphType, GraphStatus
from solidgpt.src.api.celery_tasks import *
serverless_task_map: dict = {}
async def deploy_serverless(body: dict = Body(...)):
task_id = f'deploy-{str(uuid.uuid4())}'
session_id = body.get("session_id", "")
yml_path = os.path.join(LOCAL_STORAGE_OUTPUT_DIR, session_id, "architecture", "serverless.yml")
aws_key_id = body.get("aws_key_id", "")
aws_access_key = body.get("aws_access_key", "")
result = celery_task_serverless_deploy.apply_async(args=[yml_path, aws_key_id, aws_access_key])
serverless_task_map[task_id] = result
return JSONResponse(content={
"message": f"Deploying to AWS...",
"task_id": task_id
}, status_code=200) | null |
168,036 | import logging
import uuid
from pydantic import BaseModel
from solidgpt.src.api.api_response import *
from solidgpt.src.configuration.configreader import ConfigReader
from solidgpt.src.manager.initializer import Initializer
from fastapi import FastAPI, UploadFile, File, HTTPException, Body, BackgroundTasks
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
import shutil
from pathlib import Path
from solidgpt.src.orchestration.orchestration import Orchestration
from solidgpt.src.util.util import *
from solidgpt.src.workgraph.graph import *
from solidgpt.src.workgraph.graph_helper import GraphType, GraphStatus
from solidgpt.src.api.celery_tasks import *
serverless_task_map: dict = {}
async def remove_serverless(body: dict = Body(...)):
task_id = f'remove-{str(uuid.uuid4())}'
session_id = body.get("session_id", "")
yml_path = os.path.join(LOCAL_STORAGE_OUTPUT_DIR, session_id, "architecture", "serverless.yml")
aws_key_id = body.get("aws_key_id", "")
aws_access_key = body.get("aws_access_key", "")
result = celery_task_serverless_remove.apply_async(args=[yml_path, aws_key_id, aws_access_key])
serverless_task_map[task_id] = result
return JSONResponse(content={
"message": f"Removing from AWS...",
"task_id": task_id
}, status_code=200) | null |
168,037 | import logging
import uuid
from pydantic import BaseModel
from solidgpt.src.api.api_response import *
from solidgpt.src.configuration.configreader import ConfigReader
from solidgpt.src.manager.initializer import Initializer
from fastapi import FastAPI, UploadFile, File, HTTPException, Body, BackgroundTasks
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
import shutil
from pathlib import Path
from solidgpt.src.orchestration.orchestration import Orchestration
from solidgpt.src.util.util import *
from solidgpt.src.workgraph.graph import *
from solidgpt.src.workgraph.graph_helper import GraphType, GraphStatus
from solidgpt.src.api.celery_tasks import *
serverless_task_map: dict = {}
async def get_serverless_task_status(body: dict = Body(...)):
task_id: str = body['task_id']
celery_task_result = serverless_task_map.get(task_id, None)
if celery_task_result is None:
return JSONResponse(content=response_serverless(
message="status: not exist or not started",
status="Failed"
), status_code=200)
if celery_task_result.ready():
if celery_task_result.status == "SUCCESS":
res_dict = celery_task_result.info
if res_dict.get("status", "Failed") == "Succeeded":
return JSONResponse(content=response_serverless(
message="status: succeeded",
status="Succeeded"
), status_code=200)
else:
return JSONResponse(content=response_serverless(
message="status: " + str(res_dict.get("status", "Failed")),
status="Failed",
error=res_dict.get("output", "no error output")
), status_code=200)
elif celery_task_result.status == "FAILURE":
return JSONResponse(content=response_serverless(
message="status: failed due to an unexpected error",
status="Failed",
error=celery_task_result.traceback
), status_code=200)
else:
return JSONResponse(content=response_serverless(
message="status: unknown",
status="Failed"
), status_code=200)
return JSONResponse(content=response_upload(
message="status: executing serverless...",
status="Running",
progress=celery_task_result.info
), status_code=200) | null |
168,038 | import logging
import uuid
from pydantic import BaseModel
from solidgpt.src.api.api_response import *
from solidgpt.src.configuration.configreader import ConfigReader
from solidgpt.src.manager.initializer import Initializer
from fastapi import FastAPI, UploadFile, File, HTTPException, Body, BackgroundTasks
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
import shutil
from pathlib import Path
from solidgpt.src.orchestration.orchestration import Orchestration
from solidgpt.src.util.util import *
from solidgpt.src.workgraph.graph import *
from solidgpt.src.workgraph.graph_helper import GraphType, GraphStatus
from solidgpt.src.api.celery_tasks import *
class GraphResult:
__result = None
__graph_name = ""
def __init__(self, result, graph_name):
self.__result = result
self.__graph_name = graph_name
return
def set_result_obj(self, result_obj):
self.__result = result_obj
def get_result_obj(self):
return self.__result
def get_name(self):
return self.__graph_name
def set_name(self, new_name):
self.__graph_name = new_name
graph_result_map: dict[str, GraphResult] = {}
async def http_solution_v1(body: dict = Body(...)):
logging.info("celery task: http solution graph")
graph_id = body['graph_id']
openai_key = body['openai_key']
requirement = body['requirement']
logging.info("celery task: http solution v1")
openai.api_key = openai_key
result = celery_task_http_solution.apply_async(args=[openai_key, requirement, graph_id])
graph_result = GraphResult(result, "HTTP Solution Graph")
graph_result_map[graph_id] = graph_result
return JSONResponse(content={
"message": f"Running HTTP Solution graph...",
"graph_id": graph_id,
"is_final": True,
"path": result,
"current_work_name": "HTTP Solution"
}, status_code=200) | null |
168,039 | import base64
import time
import uuid
from celery import Celery
import shutil
from pathlib import Path
from solidgpt.src.constants import *
from solidgpt.src.util.util import *
from solidgpt.src.workgraph.workgraph import WorkGraph
from solidgpt.src.worknode.worknode import WorkNode
from solidgpt.src.workskill.workskill import WorkSkill
from solidgpt.src.workskill.skills.load_repo import LoadRepo
from solidgpt.src.workgraph.graph import *
from solidgpt.src.manager.initializer import *
import redis
import os
import subprocess
def hello_world():
logging.info("hello repo!")
print("hello repo!")
return "hello world" | null |
168,040 | import base64
import time
import uuid
from celery import Celery
import shutil
from pathlib import Path
from solidgpt.src.constants import *
from solidgpt.src.util.util import *
from solidgpt.src.workgraph.workgraph import WorkGraph
from solidgpt.src.worknode.worknode import WorkNode
from solidgpt.src.workskill.workskill import WorkSkill
from solidgpt.src.workskill.skills.load_repo import LoadRepo
from solidgpt.src.workgraph.graph import *
from solidgpt.src.manager.initializer import *
import redis
import os
import subprocess
def celery_task_upload_repo(self, repo_onboarding_id: str, file_contents: list, file_names: list):
logging.info("celery task: upload repo")
total = len(file_contents)
cur = 0
for file_content, file_name in zip(file_contents, file_names):
cur += 1
file_location = Path(LOCAL_STORAGE_OUTPUT_DIR) / repo_onboarding_id / file_name
# Create the directory if it doesn't exist
file_location.parent.mkdir(parents=True, exist_ok=True)
# Decode the base64-encoded file content
file_content_bytes = ""
data_uri = file_content
base64_content = data_uri.split(",")[1]
try:
file_content_bytes = base64.b64decode(base64_content)
except Exception as e:
pass
# Save the file with the provided filename
file_path = f"{LOCAL_STORAGE_OUTPUT_DIR}/{repo_onboarding_id}/{file_name}"
with open(file_path, "wb") as file:
file.write(file_content_bytes)
self.update_state(
state='PROGRESS',
meta={'current': cur, 'total': total}
)
return True | null |
168,041 | import base64
import time
import uuid
from celery import Celery
import shutil
from pathlib import Path
from solidgpt.src.constants import *
from solidgpt.src.util.util import *
from solidgpt.src.workgraph.workgraph import WorkGraph
from solidgpt.src.worknode.worknode import WorkNode
from solidgpt.src.workskill.workskill import WorkSkill
from solidgpt.src.workskill.skills.load_repo import LoadRepo
from solidgpt.src.workgraph.graph import *
from solidgpt.src.manager.initializer import *
import redis
import os
import subprocess
def celery_task_onboard_repo_graph(self, openai_key, upload_id, output_id):
logging.info("celery task: onboard repo graph")
openai.api_key = openai_key
g = build_onboarding_graph(LOCAL_STORAGE_OUTPUT_DIR, output_id, upload_id, True)
def update_progress(current, total):
self.update_state(
state='PROGRESS',
meta={'current': current, 'total': total}
)
g.callback_map[SKILL_NAME_SUMMARY_FILE] = update_progress
g.init_node_dependencies()
g.execute()
text_result = g.display_result.get_result()
return text_result | null |
168,042 | import base64
import time
import uuid
from celery import Celery
import shutil
from pathlib import Path
from solidgpt.src.constants import *
from solidgpt.src.util.util import *
from solidgpt.src.workgraph.workgraph import WorkGraph
from solidgpt.src.worknode.worknode import WorkNode
from solidgpt.src.workskill.workskill import WorkSkill
from solidgpt.src.workskill.skills.load_repo import LoadRepo
from solidgpt.src.workgraph.graph import *
from solidgpt.src.manager.initializer import *
import redis
import os
import subprocess
def celery_task_prd_graph(openai_key, requirement, project_additional_info, onborading_graph_id, stage, edit_content,
current_graph_id):
logging.info("celery task: prd graph")
openai.api_key = openai_key
g = build_prd_graph_with_stage(requirement, project_additional_info, onborading_graph_id, stage, edit_content,
current_graph_id)
g.init_node_dependencies()
g.execute()
text_result = g.display_result.get_result()
return text_result | null |
168,043 | import base64
import time
import uuid
from celery import Celery
import shutil
from pathlib import Path
from solidgpt.src.constants import *
from solidgpt.src.util.util import *
from solidgpt.src.workgraph.workgraph import WorkGraph
from solidgpt.src.worknode.worknode import WorkNode
from solidgpt.src.workskill.workskill import WorkSkill
from solidgpt.src.workskill.skills.load_repo import LoadRepo
from solidgpt.src.workgraph.graph import *
from solidgpt.src.manager.initializer import *
import redis
import os
import subprocess
def celery_task_tech_solution_graph(openai_key, requirement, onboarding_id, graph_id):
logging.info("celery task: tech solution graph")
openai.api_key = openai_key
g = build_tech_solution_graph(requirement, onboarding_id, graph_id)
g.init_node_dependencies()
g.execute()
text_result = g.display_result.get_result()
return text_result | null |
168,044 | import base64
import time
import uuid
from celery import Celery
import shutil
from pathlib import Path
from solidgpt.src.constants import *
from solidgpt.src.util.util import *
from solidgpt.src.workgraph.workgraph import WorkGraph
from solidgpt.src.worknode.worknode import WorkNode
from solidgpt.src.workskill.workskill import WorkSkill
from solidgpt.src.workskill.skills.load_repo import LoadRepo
from solidgpt.src.workgraph.graph import *
from solidgpt.src.manager.initializer import *
import redis
import os
import subprocess
def celery_task_repo_chat_graph(openai_key, requirement, onboarding_id, graph_id):
logging.info("celery task: repo chat graph")
openai.api_key = openai_key
g = build_repo_chat_graph(requirement, onboarding_id, graph_id)
g.init_node_dependencies()
g.execute()
text_result = g.display_result.get_result()
return text_result | null |
168,045 | import base64
import time
import uuid
from celery import Celery
import shutil
from pathlib import Path
from solidgpt.src.constants import *
from solidgpt.src.util.util import *
from solidgpt.src.workgraph.workgraph import WorkGraph
from solidgpt.src.worknode.worknode import WorkNode
from solidgpt.src.workskill.workskill import WorkSkill
from solidgpt.src.workskill.skills.load_repo import LoadRepo
from solidgpt.src.workgraph.graph import *
from solidgpt.src.manager.initializer import *
import redis
import os
import subprocess
redis_instance = redis.Redis()
def celery_task_autogen_analysis_graph(self, openai_key, requirement, onboarding_id, graph_id):
logging.info("celery task: autogen analysis graph")
self.update_state(
state='PROGRESS',
meta={'result': "", 'state_id': ""}
)
def autogen_message_input_callback():
data = redis_instance.blpop(self.request.id)
if data:
# Extracting UUID and poem text from the tuple
task_id, poem_bytes = data
# Converting bytes to string
poem_text = poem_bytes.decode()
print(poem_text) # for server debug
return poem_text
return ""
def autogen_update_result_callback(result):
self.update_state(
state='PROGRESS',
meta={'result': result, 'state_id': str(uuid.uuid4())}
)
openai.api_key = openai_key
g = build_autogen_analysis_graph(requirement, onboarding_id, graph_id,
autogen_message_input_callback, autogen_update_result_callback)
g.init_node_dependencies()
g.execute()
return "" | null |
168,046 | import base64
import time
import uuid
from celery import Celery
import shutil
from pathlib import Path
from solidgpt.src.constants import *
from solidgpt.src.util.util import *
from solidgpt.src.workgraph.workgraph import WorkGraph
from solidgpt.src.worknode.worknode import WorkNode
from solidgpt.src.workskill.workskill import WorkSkill
from solidgpt.src.workskill.skills.load_repo import LoadRepo
from solidgpt.src.workgraph.graph import *
from solidgpt.src.manager.initializer import *
import redis
import os
import subprocess
def celery_task_serverless_deploy(self, yml_path: str, aws_key_id: str, aws_access_key: str):
logging.info("celery task: serverless deploy")
# Extract the directory from the file path
directory = Path(yml_path).parent
# Set AWS credentials
env = os.environ.copy()
env['AWS_ACCESS_KEY_ID'] = aws_key_id
env['AWS_SECRET_ACCESS_KEY'] = aws_access_key
# Define the command to run
command = ["serverless", "deploy", "--config", yml_path, "--verbose"]
self.update_state(
state='PROGRESS',
meta={}
)
# Execute the command
process = subprocess.run(command, shell=True, capture_output=True, text=True, cwd=directory, env=env)
# Check if the command was successful
if process.returncode == 0:
print("Deployment successful.")
print("Output:\n", process.stdout)
return {'status': 'Succeeded', 'message': 'Deployment successful.', 'output': process.stdout}
else:
print("Deployment failed.")
print("Error:\n", process.stdout)
return {'status': 'Failed', 'message': 'Deployment failed.', 'output': process.stdout} | null |
168,047 | import base64
import time
import uuid
from celery import Celery
import shutil
from pathlib import Path
from solidgpt.src.constants import *
from solidgpt.src.util.util import *
from solidgpt.src.workgraph.workgraph import WorkGraph
from solidgpt.src.worknode.worknode import WorkNode
from solidgpt.src.workskill.workskill import WorkSkill
from solidgpt.src.workskill.skills.load_repo import LoadRepo
from solidgpt.src.workgraph.graph import *
from solidgpt.src.manager.initializer import *
import redis
import os
import subprocess
def celery_task_serverless_remove(self, yml_path: str, aws_key_id: str, aws_access_key: str):
logging.info("celery task: serverless remove")
# Extract the directory from the file path
directory = Path(yml_path).parent
# Set AWS credentials
env = os.environ.copy()
env['AWS_ACCESS_KEY_ID'] = aws_key_id
env['AWS_SECRET_ACCESS_KEY'] = aws_access_key
# Define the command to run
command = ["serverless", "remove", "--config", yml_path, "--verbose"]
self.update_state(
state='PROGRESS',
meta={}
)
# Execute the command
process = subprocess.run(command, shell=True, capture_output=True, text=True, cwd=directory, env=env)
# Check if the command was successful
if process.returncode == 0:
print("Removal successful.")
print("Output:\n", process.stdout)
return {'status': 'Succeeded', 'message': 'Removal successful.', 'output': process.stdout}
else:
print("Removal failed.")
print("Error:\n", process.stdout)
return {'status': 'Failed', 'message': 'Removal failed.', 'output': process.stdout} | null |
168,048 | import base64
import time
import uuid
from celery import Celery
import shutil
from pathlib import Path
from solidgpt.src.constants import *
from solidgpt.src.util.util import *
from solidgpt.src.workgraph.workgraph import WorkGraph
from solidgpt.src.worknode.worknode import WorkNode
from solidgpt.src.workskill.workskill import WorkSkill
from solidgpt.src.workskill.skills.load_repo import LoadRepo
from solidgpt.src.workgraph.graph import *
from solidgpt.src.manager.initializer import *
import redis
import os
import subprocess
def celery_task_http_solution(self, openai_key, requirement, graph_id):
logging.info("celery task: http solution")
openai.api_key = openai_key
g = build_http_solution_graph(requirement, graph_id)
def update_progress(current_content):
self.update_state(
state='PROGRESS',
meta={'current_content': current_content}
)
g.callback_map[SKILL_NAME_HTTP_SOLUTION] = update_progress
g.init_node_dependencies()
g.execute()
text_result = g.display_result.get_result()
return text_result | null |
168,049 |
def response_upload(message="", status="", progress="", error=""):
if progress == "":
progress = {}
return {
"message": message,
"status": status,
"progress": progress,
"error": error,
} | null |
168,050 |
def response_graph(graph="", message="", status="", progress="", error="", result=""):
if progress == "":
progress = {}
return {
"graph": graph,
"message": message,
"status": status,
"progress": progress,
"error": error,
"result": result,
} | null |
168,051 |
def response_serverless(message="", status="", error=""):
return {
"message": message,
"status": status,
"error": error,
} | null |
168,052 | import json
import os
import tiktoken
import numpy as np
from collections import defaultdict
for message in dataset[0]["messages"]:
print(message)
encoding = tiktoken.get_encoding("cl100k_base")
def num_tokens_from_messages(messages, tokens_per_message=3, tokens_per_name=1):
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3
return num_tokens | null |
168,053 | import json
import os
import tiktoken
import numpy as np
from collections import defaultdict
for message in dataset[0]["messages"]:
print(message)
encoding = tiktoken.get_encoding("cl100k_base")
def num_assistant_tokens_from_messages(messages):
num_tokens = 0
for message in messages:
if message["role"] == "assistant":
num_tokens += len(encoding.encode(message["content"]))
return num_tokens | null |
168,054 | import json
import os
import tiktoken
import numpy as np
from collections import defaultdict
print("Num examples:", len(dataset))
print("First example:")
print("Num examples missing system message:", n_missing_system)
print("Num examples missing user message:", n_missing_user)
print(f"\n{n_too_long} examples may be over the 4096 token limit, they will be truncated during fine-tuning")
print(f"Dataset has ~{n_billing_tokens_in_dataset} tokens that will be charged for during training")
print(f"By default, you'll train for {n_epochs} epochs on this dataset")
print(f"By default, you'll be charged for ~{n_epochs * n_billing_tokens_in_dataset} tokens")
print("See pricing page to estimate total costs")
def print_distribution(values, name):
print(f"\n#### Distribution of {name}:")
print(f"min / max: {min(values)}, {max(values)}")
print(f"mean / median: {np.mean(values)}, {np.median(values)}")
print(f"p5 / p95: {np.quantile(values, 0.1)}, {np.quantile(values, 0.9)}") | null |
168,055 | import openai
import logging
import asyncio
async def wait_for_finetuning_complete():
while True:
status = await finetune_instance.get_fine_tuning_status()
logging.info("Fine-tuning status: %s", status)
if status == "succeeded" or status == "failed":
break
await asyncio.sleep(60) | null |
168,056 | from typing import Type
from solidgpt.src.diy.custom.customizeskillmanager import CustomizeSkillManager
from solidgpt.src.worknode.worknode import *
from solidgpt.src.imports import *
from solidgpt.src.constants import *
def generate_save_data_from_nodes(nodes: list[WorkNode], generate_debug_info: bool = False):
save_data = []
for node in nodes:
node_data = {
"node_id": node.node_id,
"manual_review_result": node.manual_review_result
}
if generate_debug_info:
node_data["next_node_ids"] = list(node.next_node_ids)
node_data["output_id_dependencies"] = list(node.output_id_dependencies)
skill = node.skill
node_data["skill"] = skill.name
node_data["inputs"] = []
node_data["outputs"] = []
for i in skill.inputs:
temp_input = {
# "param_type": str(i.param_type),
"param_path": i.param_path,
"loading_method": str(i.loading_method),
"load_from_output_id": i.load_from_output_id,
}
if generate_debug_info:
temp_input["param_name"] = i.param_name
temp_input["param_category"] = str(i.param_category)
temp_input["optional"] = str(i.optional)
node_data["inputs"].append(temp_input)
for o in skill.outputs:
temp_output = {
"id": o.id,
}
if generate_debug_info:
temp_output["param_category"] = str(o.param_category)
# temp_output["param_type"] = str(o.param_type)
temp_output["id"] = o.id
node_data["outputs"].append(temp_output)
save_data.append(node_data)
return save_data | null |
168,057 | from typing import Type
from solidgpt.src.diy.custom.customizeskillmanager import CustomizeSkillManager
from solidgpt.src.worknode.worknode import *
from solidgpt.src.imports import *
from solidgpt.src.constants import *
SKILL_NAME_TO_CONSTRUCTOR: dict[str, Type[WorkSkill]] = {
SKILL_NAME_WRITE_PRODUCT_REQUIREMENTS_DOCUMENTATION: WritePRD,
SKILL_NAME_WRITE_HLD: WriteHLD,
SKILL_NAME_CREATE_KANBAN_BOARD: CreateKanBan,
SKILL_NAME_WRITE_YAML: WriteYAML,
SKILL_NAME_RUN_APP: RunApp,
}
class CustomizeSkillManager:
_instance = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super().__new__(cls)
# You can initialize the instance attributes here
return cls._instance
def __init__(self, custom_skill_definition_folder_path=os.path.join(LOCAL_STORAGE_DIR, 'customizedskilldefinition')):
self.customized_skills_map: dict[str, CustomSkill] = {}
self.custom_skill_definition_folder_path = custom_skill_definition_folder_path
self.__load_customized_skills()
def get_customzied_skill(self, skill_name: str)-> CustomSkill:
skill = self.customized_skills_map.get(skill_name)
if skill is None:
logging.error(f"Error, Customized skill {skill_name} is not found.")
return skill
def __load_customized_skills(self):
# load all of the customized skills josn files
skill_definitions = self.__load_customzied_skill_from_folder()
for skill_definition in skill_definitions:
skill = self.__load_customized_skill(skill_definition)
self.customized_skills_map[skill_definition.skill_name] = skill
return
def __load_customized_skill(self, skill_definition: CustomizedSkillDefinition)-> CustomSkill:
# load all of the customized skills josn files
return CustomSkill(skill_definition)
def __load_customzied_skill_from_folder(self):
# Get a list of all files in the folder
file_list = os.listdir(self.custom_skill_definition_folder_path)
# Filter JSON files from the list
json_files = [file for file in file_list if file.endswith('.json')]
logging.info(f"Found {json_files} json files in {self.custom_skill_definition_folder_path}")
# Load JSON data from each JSON file
customized_skills_definition: list(CustomizedSkillDefinition)= []
for json_file in json_files:
customized_skills_definition.append(CustomizedSkillDefinition(**load_from_json(os.path.join(self.custom_skill_definition_folder_path, json_file))))
return customized_skills_definition
def load_save_data_to_nodes(loaded_data):
nodes: list[WorkNode] = []
for node_data in loaded_data:
skill_name = node_data["skill"]
inputs_data = node_data["inputs"]
outputs_data = node_data["outputs"]
skill_constructor = SKILL_NAME_TO_CONSTRUCTOR.get(skill_name, None)
if skill_constructor is not None:
skill: WorkSkill = skill_constructor()
else:
skill = CustomizeSkillManager._instance.get_customzied_skill(skill_name)
if skill is not None:
skill.init_config(inputs_data, outputs_data)
node = WorkNode(node_data["node_id"], skill, node_data["manual_review_result"])
nodes.append(node)
return nodes | null |
168,058 | from enum import Enum
class SkillInputLoadingMethod(Enum):
LOAD_FROM_OUTPUT_ID = 1
LOAD_FROM_STRING = 2
LOAD_FROM_CACHE_STRING = 3
STRING_TO_SKILL_INPUT_LOADING_METHOD_DICT: dict[str, SkillInputLoadingMethod] = {
str(SkillInputLoadingMethod.LOAD_FROM_OUTPUT_ID): SkillInputLoadingMethod.LOAD_FROM_OUTPUT_ID,
str(SkillInputLoadingMethod.LOAD_FROM_STRING): SkillInputLoadingMethod.LOAD_FROM_STRING,
str(SkillInputLoadingMethod.LOAD_FROM_CACHE_STRING): SkillInputLoadingMethod.LOAD_FROM_CACHE_STRING,
}
def string_to_skill_input_loading_method(s: str):
return STRING_TO_SKILL_INPUT_LOADING_METHOD_DICT.get(s, SkillInputLoadingMethod.LOAD_FROM_STRING) | null |
168,059 | import os
import glob
import openai
from solidgpt.definitions import LOCAL_STORAGE_OUTPUT_DIR, TEST_SKILL_WORKSPACE
from solidgpt.src.manager.gptmanager import GPTManager
from solidgpt.src.util.util import save_to_json
from solidgpt.src.workgraph.workgraph import WorkGraph
from solidgpt.src.worknode.worknode import WorkNode
from solidgpt.src.workskill.skillio import SkillInputConfig, SkillInputLoadingMethod
from solidgpt.src.workskill.skills.analysis import ProductAnalysis
from solidgpt.src.workskill.skills.http_codesolution import HTTPCodeSolution
from solidgpt.src.workskill.skills.load_repo import LoadRepo
from solidgpt.src.workskill.skills.query_code_local import QueryCodeLocal
from solidgpt.src.workskill.skills.repo_chat import RepoChat
from solidgpt.src.workskill.skills.summarize_file import SummaryFile
from solidgpt.src.workskill.skills.summary_file_local import SummaryFileLocal
from solidgpt.src.workskill.skills.summary_project import SummaryProject
from solidgpt.src.workskill.skills.techsolution import ProvideTechSolution
from solidgpt.src.workskill.skills.write_prd import WritePRD
from solidgpt.src.workskill.skills.autogen_analysis import AutoGenAnalysis
from solidgpt.src.workskill.workskill import WorkSkill
def generate_node(node_id: str, skill: WorkSkill, input_configs: list[SkillInputConfig], output_ids: list[int], manual_review_result: bool = False):
skill.init_config(
[
config.to_dict() for config in input_configs
],
[
{"id": output_id} for output_id in output_ids
])
node: WorkNode = WorkNode(node_id, skill, manual_review_result)
return node
def generate_node_with_output_configs(node_id: str, skill: WorkSkill, input_configs: list[SkillInputConfig], output_configs: list, manual_review_result: bool = False):
skill.init_config(
[
config.to_dict() for config in input_configs
],
[
config_dict for config_dict in output_configs
])
node: WorkNode = WorkNode(node_id, skill, manual_review_result)
return node
class WorkGraph:
nodes: list[WorkNode] = []
node_map: dict[str, WorkNode] = {}
output_map: dict[int, SkillOutput] = {}
output_id_to_node_map: dict[int, WorkNode] = {}
display_result: DisplayResult
notion = None
cache = {}
callback_map: dict = {}
custom_data: dict = {}
def __init__(self, output_directory_path_override: str = "", output_id = None):
# need to manually initialize here
self.nodes = []
self.node_map = {}
self.output_map = {}
self.output_id_to_node_map = {}
self.callback_map = {}
self.display_result = DisplayResult()
self.output_directory_path = os.path.join(LOCAL_STORAGE_OUTPUT_DIR, output_id or time.strftime("%Y%m%d%H%M%S"))
if output_directory_path_override:
self.output_directory_path = os.path.abspath(output_directory_path_override)
return
def add_node(self, node: WorkNode):
node.graph_cache = self.cache
self.nodes.append(node)
def init_node_dependencies(self):
# clear node map and output map
self.node_map.clear()
self.output_map.clear()
self.output_id_to_node_map.clear()
for node in self.nodes:
# add node to node map
self.node_map[node.node_id] = node
# initialize display_result for children
node.display_result = self.display_result
node.skill.display_result = self.display_result
# intialize callback func for skills
if node.skill.name in self.callback_map:
node.skill.callback_func = self.callback_map.get(node.skill.name, None)
# keep a graph reference in skill
node.skill.graph = self
# create directory for node
node_directory_path = os.path.join(self.output_directory_path,
(node.skill.name + "_" + str(node.node_id)).replace(" ", "_"))
if not os.path.exists(node_directory_path):
# Create the output folder
os.makedirs(node_directory_path)
print(f"Directory '{node_directory_path}' created.")
# add output to output map
for o in node.skill.outputs:
# initialize output paths
o.param_path = os.path.join(node_directory_path, (o.param_name + " " + str(o.id)).replace(" ", "_"))
# output can be consumed by inputs of other nodes
if o.id >= 0:
self.output_map[o.id] = o
self.output_id_to_node_map[o.id] = node
# second iteration after output map has been initialized
for node in self.nodes:
# add output dependencies for node
for i in node.skill.inputs:
if i.loading_method == SkillInputLoadingMethod.LOAD_FROM_OUTPUT_ID:
if i.load_from_output_id == -1:
print("Error, " + i.param_name + ": cannot load from output id: -1.")
continue
node.output_id_dependencies.add(i.load_from_output_id)
# add current node to next_node_ids of the output node
if i.load_from_output_id not in self.output_map:
print("Error, input %s: loading from an invalid output id %d."
% (i.param_name, i.load_from_output_id))
continue
output_node = self.output_id_to_node_map[i.load_from_output_id]
if output_node.node_id == node.node_id:
print("Error, " + i.param_name + ": cannot load from its own output.")
continue
output_node.next_node_ids.add(node.node_id)
i.skill_output = self.output_map[i.load_from_output_id]
if self.__is_circular():
print_error_message("Circular graph detected. Terminating program...")
exit(1)
return
def execute(self):
logging.info("Executing SolidGPT...")
first_nodes = []
for node in self.nodes:
if len(node.output_id_dependencies) == 0:
first_nodes.append(node)
if len(first_nodes) == 0:
print_error_message("Cannot execute graph, no node can be executed.")
for node in first_nodes:
self.__execute_node(node)
logging.info(f"SolidGPT execution completed. All results are saved in {self.output_directory_path}")
def __execute_node(self, node: WorkNode):
if node.can_execute():
# execute skill
node.skill.execute()
# wait for potential node pause
self.__node_pause(node)
# notify other nodes
for next_node_id in node.next_node_ids:
next_node = self.node_map[next_node_id]
for o in node.skill.outputs:
next_node.output_id_dependencies.remove(o.id)
self.__execute_node(next_node)
def __node_pause(self, node: WorkNode):
if node.manual_review_result:
time.sleep(0.25)
print("\nPlease review result generated by %s skill in node %s"
% (node.skill.name, str(node.node_id)))
notion_file_opened = False
while True:
user_input = input("Execution is halted. Please specify an action, then press Enter. "
"To view all available actions, enter 'help':")
if same_string(user_input, "help"):
help_msg: str = "{:<18}{}\n".format("help", "Show this help message.") + \
"{:<18}{}\n".format("continue", "Continue execution.") + \
"{:<18}{}\n".format("stop", "Stop program.") + \
"{:<18}{}\n".format("path", "Show the path of this result.") + \
"{:<18}{}\n".format("notion-open", "Open the markdown result in notion.") + \
"{:<18}{}\n".format("notion-sync", "Sync the notion result, save it as new output.")
print(help_msg)
continue
elif same_string(user_input, "continue"):
print("Continuing execution...")
break
elif same_string(user_input, "stop"):
print("Exiting the program...")
exit(0)
elif same_string(user_input, "path"):
print(os.path.abspath(os.path.dirname(node.skill.outputs[0].param_path)))
continue
elif same_string(user_input, "notion-open"):
if self.notion is None:
self.notion = NotionActions()
if not notion_file_opened:
skill_outputs = node.skill.outputs
if len(skill_outputs) > 0:
first_output = skill_outputs[0]
category = first_output.param_category
output_path = first_output.param_path
print(f"!!!!!!!{type(category)} {category}")
if category == SkillIOParamCategory.ProductRequirementsDocument or \
category == SkillIOParamCategory.BusinessRequirementsDocument or \
category == SkillIOParamCategory.HighLevelDesignDocument or \
category == SkillIOParamCategory.PlainText or \
category == SkillIOParamCategory.KanbanBoard:
output_md_filepath = add_extension_if_not_exist(output_path, ".md")
self.notion.process_markdown_and_upload(output_md_filepath)
else:
print(f"Notion does not support {SkillIOParamCategory.PlainText} output.")
continue
else:
print("Notion does not support skill with no outputs.")
continue
notion_file_opened = True
else:
print("File already opened in Notion.")
continue
elif same_string(user_input, "notion-sync"):
if notion_file_opened:
skill_outputs = node.skill.outputs
first_output = skill_outputs[0]
category = first_output.param_category
output_path = first_output.param_path
if category == SkillIOParamCategory.ProductRequirementsDocument or \
category == SkillIOParamCategory.BusinessRequirementsDocument or \
category == SkillIOParamCategory.HighLevelDesignDocument or \
category == SkillIOParamCategory.PlainText or \
category == SkillIOParamCategory.KanbanBoard:
output_md_file_dir = os.path.dirname(output_path)
output_md_file_name = os.path.basename(output_path)
self.notion.sync_from_notion(output_md_file_dir, output_md_file_name)
print("Notion file synced.")
else:
print("notion-open command needs to be used first.")
continue
else:
print("Invalid input entered.")
continue
def save_data(self, filename: str = "data.json", generate_debug_info: bool = False):
save_data = generate_save_data_from_nodes(self.nodes, generate_debug_info)
save_to_json(save_data, filename)
return
def load_data(self, filename: str = "data.json"):
loaded_data = load_from_json(filename)
self.nodes.clear()
self.nodes = load_save_data_to_nodes(loaded_data)
self.init_node_dependencies()
return
def __is_circular(self):
visited = {node_id: False for node_id in self.node_map}
stack = {node_id: False for node_id in self.node_map}
# check every node because the graph might be disjoint
for node_id in self.node_map:
if not visited[node_id]:
if self.__has_cycle(node_id, visited, stack):
return True
return False
def __has_cycle(self, current_node_id, visited, stack):
# mark the current node as visited
visited[current_node_id] = True
# add the current node to the stack representing the current path
stack[current_node_id] = True
# visit all the neighbors of the current node
for neighbor_id in self.node_map[current_node_id].next_node_ids:
# if the neighbor is not visited, visit it
if not visited[neighbor_id]:
if self.__has_cycle(neighbor_id, visited, stack):
return True
# if the neighbor is already in the current path, we have found a cycle
elif stack[neighbor_id]:
return True
# remove the current node from the current path stack
stack[current_node_id] = False
return False
class SkillInputLoadingMethod(Enum):
LOAD_FROM_OUTPUT_ID = 1
LOAD_FROM_STRING = 2
LOAD_FROM_CACHE_STRING = 3
class SkillInputConfig:
def __init__(self, param_path: str, loading_method: SkillInputLoadingMethod, load_from_output_id: int, content: str = None):
self.param_path = param_path
self.loading_method = loading_method
self.load_from_output_id = load_from_output_id
self.content = content
def to_dict(self):
return {
"param_path": self.param_path,
"loading_method": str(self.loading_method),
"load_from_output_id": self.load_from_output_id,
"content": self.content
}
class LoadRepo(WorkSkill):
def __init__(self):
super().__init__()
self.name = SKILL_NAME_LOAD_REPO
self.skill_input = SkillInput(
"Repo path",
SkillIOParamCategory.PlainText,
)
self.add_input(self.skill_input)
self.skill_output = SkillOutput(
"Plain code text",
SkillIOParamCategory.CODE_PLAIN_TEXT,
)
self.add_output(self.skill_output)
self.project_folder_path = None
def _read_input(self):
self.project_folder_path = self.get_input_path(self.skill_input)
def execution_impl(self):
logging.info(f"Start to load repo... {self.project_folder_path}")
converter = GitToTextConverter(self.project_folder_path, output_file_path=self.skill_output.param_path + '.txt')
converter.convert()
return
class SummaryFileLocal(WorkSkill):
Allowed_Extensions = [".py", ".js", ".jsx", "html", ".css", "ts", "tsx", "java"]
Seperator = "**-****-****-****-**"
# Cache_Label_Summary_File = "summary_python_file"
onboarding_id: str = None
def __init__(self):
super().__init__()
self.name = SKILL_NAME_SUMMARY_FILE
self.gpt_manager = GPTManager._instance
self.onboarding_id_input = SkillInput(
"Onboarding ID",
SkillIOParamCategory.PlainText,
)
self.skill_input = SkillInput(
"Plain code text",
SkillIOParamCategory.CODE_PLAIN_TEXT,
)
self.add_input(self.onboarding_id_input)
self.add_input(self.skill_input)
self.skill_output1 = SkillOutput(
"Single python file summary",
SkillIOParamCategory.PlainText,
)
self.skill_output2 = SkillOutput(
"Single python code",
SkillIOParamCategory.PlainText,
)
self.add_output(self.skill_output1)
self.add_output(self.skill_output2)
self.repo_txt: str = None
self.file_list: list = []
self.qdrant_path = os.path.join(ROOT_DIR, "src", "tools", "qdrant", "embedding")
def _read_input(self):
input_path = self.get_input_path(self.skill_input)
self.repo_txt = load_from_text(input_path, extension=".txt")
self.onboarding_id = self.onboarding_id_input.content
# self.__download_from_azure(self.onboarding_id)
self.client = QdrantClient(path=os.path.join(self.qdrant_path, self.onboarding_id))
def execution_impl(self):
logging.info("Start to summary code...")
self.__extract_lines_after_sequence()
file_count = len(self.file_list)
current_file_idx = 0
for file in self.file_list:
current_file_idx += 1
if self.callback_func:
self.callback_func(current_file_idx, file_count)
if file[-3:] in self.Allowed_Extensions:
self.__summary_file(file)
self.client.close()
return
def __summary_file(self, filename):
py_file = self.__extract_file_content(filename)
real_name = filename[:-3]
real_name = real_name.translate(str.maketrans({"\\": "_", "/": "_"}))
# save_to_text(os.path.join(self.skill_output1.param_path, f"{real_name}%%CODE%%"), py_file)
if py_file is None:
logging.warn("No python file found")
return
python_summary = self.gpt_manager.create_and_chat_with_model(
prompt=SUMMARY_CODE_SUMMARY_PYTHON,
gpt_model_label="summary_python",
input_message=py_file
)
python_summary = python_summary.replace("\n", " ")
logging.info(f"Summary of python file: {python_summary}")
# save_to_text(os.path.join(self.skill_output1.param_path, f"{real_name}%%SUMMARY%%"), python_summary)
self.__embed_summary(real_name, python_summary, py_file)
return
def __embed_summary(self, filename, summary, code):
payload_dict = {"code": code, "summary": summary, "filename": filename}
embeddings_model = OpenAIEmbeddings(openai_api_key=openai.api_key)
embedded_query = embeddings_model.embed_query(summary)
logging.info(f"Onboarding ID:{self.onboarding_id}\nSave this id to retrieve embedded data later.")
try:
self.client.upsert(
collection_name=self.onboarding_id,
points=[
PointStruct(id=self.get_uuid(), vector=embedded_query, payload=payload_dict)
]
)
except ValueError:
self.client.recreate_collection(
collection_name=self.onboarding_id,
vectors_config=VectorParams(size=len(embedded_query), distance=Distance.COSINE),
)
self.client.upsert(
collection_name=self.onboarding_id,
points=[
PointStruct(id=self.get_uuid(), vector=embedded_query, payload=payload_dict)
]
)
return
def __extract_lines_after_sequence(self):
lines = self.repo_txt.split('\n')
capture = False
captured_lines = []
for line in lines:
if capture:
captured_lines.append(line)
capture = False
if self.Seperator in line:
capture = True
self.file_list = captured_lines
return None
def __extract_file_content(self, filename):
# Split the text by the sequence
blocks = self.repo_txt.split(self.Seperator)
for block in blocks:
# Check the first line of each block
first_line = block.strip().split('\n')[0]
if first_line.lower() == filename:
# Remove the first line (filename) and return the rest
return '\n'.join(block.strip().split('\n')[1:])
return None # If no desired file is found
def get_uuid():
return str(uuid.uuid4().hex)
class SummaryProject(WorkSkill):
Seperator = "**-****-****-****-**"
Cache_Label_Summary_Repo_Schema = "summary_repo_schema"
Cache_Label_Summary_Readme = "summary_readme"
def __init__(self):
super().__init__()
self.name = SKILL_NAME_SUMMARY_PROJECT
self.gpt_manager = GPTManager._instance
self.skill_input = SkillInput(
"Plain code text",
SkillIOParamCategory.CODE_PLAIN_TEXT,
)
self.add_input(self.skill_input)
self.skill_output_code_summary = SkillOutput(
"Code Summary",
SkillIOParamCategory.PlainText,
)
self.skill_output_code_schema = SkillOutput(
"Code Schema",
SkillIOParamCategory.PlainText,
)
self.add_output(self.skill_output_code_summary)
self.add_output(self.skill_output_code_schema)
self.repo_txt : str = None
def _read_input(self):
input_path = self.get_input_path(self.skill_input)
self.repo_txt = load_from_text(input_path, extension=".txt")
def execution_impl(self):
logging.info("Start to summary code...")
self.__summary_repo_schema()
self.__summary_readme()
save_to_text(self.skill_output_code_summary.param_path, self.graph_cache[self.Cache_Label_Summary_Readme])
self._save_to_result_cache(self.skill_output_code_summary, self.graph_cache[self.Cache_Label_Summary_Readme])
save_to_text(self.skill_output_code_schema.param_path, self.graph_cache[self.Cache_Label_Summary_Repo_Schema])
self._save_to_result_cache(self.skill_output_code_schema, self.graph_cache[self.Cache_Label_Summary_Repo_Schema])
return
def __summary_repo_schema(self):
schema = self.__extract_lines_after_sequence()
schema_summary = self.gpt_manager.create_and_chat_with_model(
prompt=SUMMARY_CODE_SUMMARY_SCHEMA,
gpt_model_label="repo_schema",
input_message=schema
)
self._set_graph_cached_content(self.Cache_Label_Summary_Repo_Schema, schema)
logging.info(f"Schema of repo: {schema_summary}")
def __summary_readme(self):
readme = self.__extract_readme_content()
# set Cache_Label_Summary_Readme
self._set_graph_cached_content(self.Cache_Label_Summary_Readme, "")
if readme is None:
logging.warn("No readme file found")
return
readme_summary = self.gpt_manager.create_and_chat_with_model(
prompt=SUMMARY_CODE_SUMMARY_README,
gpt_model_label="summary_readme",
input_message=readme
)
self._set_graph_cached_content(self.Cache_Label_Summary_Readme, readme_summary)
logging.info(f"Summary of readme file: {readme_summary}")
def __extract_lines_after_sequence(self) -> str:
lines = self.repo_txt.split('\n')
capture = False
captured_lines = []
for line in lines:
if capture:
captured_lines.append(line)
capture = False
if self.Seperator in line:
capture = True
return '\n'.join(captured_lines)
def __extract_readme_content(self):
# Split the text by the sequence
blocks = self.repo_txt.split(self.Seperator)
for block in blocks:
# Check the first line of each block
first_line = block.strip().split('\n')[0]
if first_line.lower() in ["readme.md", "readme.rst"]:
# Remove the first line (filename) and return the rest
return '\n'.join(block.strip().split('\n')[1:])
return None # If no desired file is found
def build_onboarding_graph(repo_path: str, onborading_graph_id: str, upload_id: str, enable_summary_code: bool = False):
graph = WorkGraph(output_id=onborading_graph_id)
# input_ids = [-1] means the input is from the user
load_repo = generate_node("0", LoadRepo(), [SkillInputConfig(os.path.join(repo_path, upload_id),
SkillInputLoadingMethod.LOAD_FROM_STRING, -1)], [0])
summary_project = generate_node_with_output_configs("1", SummaryProject(),
[
SkillInputConfig("", SkillInputLoadingMethod.LOAD_FROM_OUTPUT_ID, 0)
],
[
{"id": 1},
{"id": 2, "to_display": True}
])
graph.add_node(load_repo)
graph.add_node(summary_project)
if enable_summary_code:
summary_code = generate_node("2", SummaryFileLocal(), [SkillInputConfig("", SkillInputLoadingMethod.LOAD_FROM_CACHE_STRING, -1, onborading_graph_id),
SkillInputConfig("", SkillInputLoadingMethod.LOAD_FROM_OUTPUT_ID, 0)], [3, 4])
graph.add_node(summary_code)
return graph | null |
168,060 | import os
import glob
import openai
from solidgpt.definitions import LOCAL_STORAGE_OUTPUT_DIR, TEST_SKILL_WORKSPACE
from solidgpt.src.manager.gptmanager import GPTManager
from solidgpt.src.util.util import save_to_json
from solidgpt.src.workgraph.workgraph import WorkGraph
from solidgpt.src.worknode.worknode import WorkNode
from solidgpt.src.workskill.skillio import SkillInputConfig, SkillInputLoadingMethod
from solidgpt.src.workskill.skills.analysis import ProductAnalysis
from solidgpt.src.workskill.skills.http_codesolution import HTTPCodeSolution
from solidgpt.src.workskill.skills.load_repo import LoadRepo
from solidgpt.src.workskill.skills.query_code_local import QueryCodeLocal
from solidgpt.src.workskill.skills.repo_chat import RepoChat
from solidgpt.src.workskill.skills.summarize_file import SummaryFile
from solidgpt.src.workskill.skills.summary_file_local import SummaryFileLocal
from solidgpt.src.workskill.skills.summary_project import SummaryProject
from solidgpt.src.workskill.skills.techsolution import ProvideTechSolution
from solidgpt.src.workskill.skills.write_prd import WritePRD
from solidgpt.src.workskill.skills.autogen_analysis import AutoGenAnalysis
from solidgpt.src.workskill.workskill import WorkSkill
def generate_node(node_id: str, skill: WorkSkill, input_configs: list[SkillInputConfig], output_ids: list[int], manual_review_result: bool = False):
skill.init_config(
[
config.to_dict() for config in input_configs
],
[
{"id": output_id} for output_id in output_ids
])
node: WorkNode = WorkNode(node_id, skill, manual_review_result)
return node
LOCAL_STORAGE_OUTPUT_DIR = os.path.join(LOCAL_STORAGE_DIR, "workspace", "out")
class WorkGraph:
nodes: list[WorkNode] = []
node_map: dict[str, WorkNode] = {}
output_map: dict[int, SkillOutput] = {}
output_id_to_node_map: dict[int, WorkNode] = {}
display_result: DisplayResult
notion = None
cache = {}
callback_map: dict = {}
custom_data: dict = {}
def __init__(self, output_directory_path_override: str = "", output_id = None):
# need to manually initialize here
self.nodes = []
self.node_map = {}
self.output_map = {}
self.output_id_to_node_map = {}
self.callback_map = {}
self.display_result = DisplayResult()
self.output_directory_path = os.path.join(LOCAL_STORAGE_OUTPUT_DIR, output_id or time.strftime("%Y%m%d%H%M%S"))
if output_directory_path_override:
self.output_directory_path = os.path.abspath(output_directory_path_override)
return
def add_node(self, node: WorkNode):
node.graph_cache = self.cache
self.nodes.append(node)
def init_node_dependencies(self):
# clear node map and output map
self.node_map.clear()
self.output_map.clear()
self.output_id_to_node_map.clear()
for node in self.nodes:
# add node to node map
self.node_map[node.node_id] = node
# initialize display_result for children
node.display_result = self.display_result
node.skill.display_result = self.display_result
# intialize callback func for skills
if node.skill.name in self.callback_map:
node.skill.callback_func = self.callback_map.get(node.skill.name, None)
# keep a graph reference in skill
node.skill.graph = self
# create directory for node
node_directory_path = os.path.join(self.output_directory_path,
(node.skill.name + "_" + str(node.node_id)).replace(" ", "_"))
if not os.path.exists(node_directory_path):
# Create the output folder
os.makedirs(node_directory_path)
print(f"Directory '{node_directory_path}' created.")
# add output to output map
for o in node.skill.outputs:
# initialize output paths
o.param_path = os.path.join(node_directory_path, (o.param_name + " " + str(o.id)).replace(" ", "_"))
# output can be consumed by inputs of other nodes
if o.id >= 0:
self.output_map[o.id] = o
self.output_id_to_node_map[o.id] = node
# second iteration after output map has been initialized
for node in self.nodes:
# add output dependencies for node
for i in node.skill.inputs:
if i.loading_method == SkillInputLoadingMethod.LOAD_FROM_OUTPUT_ID:
if i.load_from_output_id == -1:
print("Error, " + i.param_name + ": cannot load from output id: -1.")
continue
node.output_id_dependencies.add(i.load_from_output_id)
# add current node to next_node_ids of the output node
if i.load_from_output_id not in self.output_map:
print("Error, input %s: loading from an invalid output id %d."
% (i.param_name, i.load_from_output_id))
continue
output_node = self.output_id_to_node_map[i.load_from_output_id]
if output_node.node_id == node.node_id:
print("Error, " + i.param_name + ": cannot load from its own output.")
continue
output_node.next_node_ids.add(node.node_id)
i.skill_output = self.output_map[i.load_from_output_id]
if self.__is_circular():
print_error_message("Circular graph detected. Terminating program...")
exit(1)
return
def execute(self):
logging.info("Executing SolidGPT...")
first_nodes = []
for node in self.nodes:
if len(node.output_id_dependencies) == 0:
first_nodes.append(node)
if len(first_nodes) == 0:
print_error_message("Cannot execute graph, no node can be executed.")
for node in first_nodes:
self.__execute_node(node)
logging.info(f"SolidGPT execution completed. All results are saved in {self.output_directory_path}")
def __execute_node(self, node: WorkNode):
if node.can_execute():
# execute skill
node.skill.execute()
# wait for potential node pause
self.__node_pause(node)
# notify other nodes
for next_node_id in node.next_node_ids:
next_node = self.node_map[next_node_id]
for o in node.skill.outputs:
next_node.output_id_dependencies.remove(o.id)
self.__execute_node(next_node)
def __node_pause(self, node: WorkNode):
if node.manual_review_result:
time.sleep(0.25)
print("\nPlease review result generated by %s skill in node %s"
% (node.skill.name, str(node.node_id)))
notion_file_opened = False
while True:
user_input = input("Execution is halted. Please specify an action, then press Enter. "
"To view all available actions, enter 'help':")
if same_string(user_input, "help"):
help_msg: str = "{:<18}{}\n".format("help", "Show this help message.") + \
"{:<18}{}\n".format("continue", "Continue execution.") + \
"{:<18}{}\n".format("stop", "Stop program.") + \
"{:<18}{}\n".format("path", "Show the path of this result.") + \
"{:<18}{}\n".format("notion-open", "Open the markdown result in notion.") + \
"{:<18}{}\n".format("notion-sync", "Sync the notion result, save it as new output.")
print(help_msg)
continue
elif same_string(user_input, "continue"):
print("Continuing execution...")
break
elif same_string(user_input, "stop"):
print("Exiting the program...")
exit(0)
elif same_string(user_input, "path"):
print(os.path.abspath(os.path.dirname(node.skill.outputs[0].param_path)))
continue
elif same_string(user_input, "notion-open"):
if self.notion is None:
self.notion = NotionActions()
if not notion_file_opened:
skill_outputs = node.skill.outputs
if len(skill_outputs) > 0:
first_output = skill_outputs[0]
category = first_output.param_category
output_path = first_output.param_path
print(f"!!!!!!!{type(category)} {category}")
if category == SkillIOParamCategory.ProductRequirementsDocument or \
category == SkillIOParamCategory.BusinessRequirementsDocument or \
category == SkillIOParamCategory.HighLevelDesignDocument or \
category == SkillIOParamCategory.PlainText or \
category == SkillIOParamCategory.KanbanBoard:
output_md_filepath = add_extension_if_not_exist(output_path, ".md")
self.notion.process_markdown_and_upload(output_md_filepath)
else:
print(f"Notion does not support {SkillIOParamCategory.PlainText} output.")
continue
else:
print("Notion does not support skill with no outputs.")
continue
notion_file_opened = True
else:
print("File already opened in Notion.")
continue
elif same_string(user_input, "notion-sync"):
if notion_file_opened:
skill_outputs = node.skill.outputs
first_output = skill_outputs[0]
category = first_output.param_category
output_path = first_output.param_path
if category == SkillIOParamCategory.ProductRequirementsDocument or \
category == SkillIOParamCategory.BusinessRequirementsDocument or \
category == SkillIOParamCategory.HighLevelDesignDocument or \
category == SkillIOParamCategory.PlainText or \
category == SkillIOParamCategory.KanbanBoard:
output_md_file_dir = os.path.dirname(output_path)
output_md_file_name = os.path.basename(output_path)
self.notion.sync_from_notion(output_md_file_dir, output_md_file_name)
print("Notion file synced.")
else:
print("notion-open command needs to be used first.")
continue
else:
print("Invalid input entered.")
continue
def save_data(self, filename: str = "data.json", generate_debug_info: bool = False):
save_data = generate_save_data_from_nodes(self.nodes, generate_debug_info)
save_to_json(save_data, filename)
return
def load_data(self, filename: str = "data.json"):
loaded_data = load_from_json(filename)
self.nodes.clear()
self.nodes = load_save_data_to_nodes(loaded_data)
self.init_node_dependencies()
return
def __is_circular(self):
visited = {node_id: False for node_id in self.node_map}
stack = {node_id: False for node_id in self.node_map}
# check every node because the graph might be disjoint
for node_id in self.node_map:
if not visited[node_id]:
if self.__has_cycle(node_id, visited, stack):
return True
return False
def __has_cycle(self, current_node_id, visited, stack):
# mark the current node as visited
visited[current_node_id] = True
# add the current node to the stack representing the current path
stack[current_node_id] = True
# visit all the neighbors of the current node
for neighbor_id in self.node_map[current_node_id].next_node_ids:
# if the neighbor is not visited, visit it
if not visited[neighbor_id]:
if self.__has_cycle(neighbor_id, visited, stack):
return True
# if the neighbor is already in the current path, we have found a cycle
elif stack[neighbor_id]:
return True
# remove the current node from the current path stack
stack[current_node_id] = False
return False
class SkillInputLoadingMethod(Enum):
LOAD_FROM_OUTPUT_ID = 1
LOAD_FROM_STRING = 2
LOAD_FROM_CACHE_STRING = 3
class SkillInputConfig:
def __init__(self, param_path: str, loading_method: SkillInputLoadingMethod, load_from_output_id: int, content: str = None):
self.param_path = param_path
self.loading_method = loading_method
self.load_from_output_id = load_from_output_id
self.content = content
def to_dict(self):
return {
"param_path": self.param_path,
"loading_method": str(self.loading_method),
"load_from_output_id": self.load_from_output_id,
"content": self.content
}
class ProductAnalysis(WorkSkill):
def __init__(self):
super().__init__()
self.gpt_manager = GPTManager._instance
self.name = SKILL_NAME_ANALYSIS_PRODUCT
self.repo_summary = SkillInput(
"Product Analysis Repo Summary",
SkillIOParamCategory.PlainText,
)
self.additional_info = SkillInput(
"Product Analysis Additional Info",
SkillIOParamCategory.PlainText,
)
self.requirements = SkillInput(
"Product Analysis Requirements",
SkillIOParamCategory.PlainText,
)
self.add_input(self.repo_summary)
self.add_input(self.additional_info)
self.add_input(self.requirements)
self.output_md = SkillOutput(
"Requirments Analysis Markdown",
SkillIOParamCategory.PlainText,
)
self.add_output(self.output_md)
self.additional_info_content = None
self.repo_summary_content = None
self.requirements_content = None
def _read_input(self):
# Get from cache or read from file
if self.additional_info is None:
self.additional_info_content = self.additional_info.content
if self.repo_summary_content is None:
self.repo_summary_content = self.__get_input_content(self.repo_summary)
if self.requirements_content is None:
self.requirements_content = self.requirements.content
def __get_input_content(self, skill_input : SkillInput):
return load_from_text(self.get_input_path(skill_input), extension=".txt")
def execution_impl(self):
print("Generate product analysis here...")
product_analysis = self._run_product_analysis_model()
save_to_md2(self.output_md.param_path, product_analysis)
self._save_to_result_cache(self.output_md, product_analysis)
return
def _run_product_analysis_model(self):
logging.info("Running product analysis model...")
prompt = build_gpt_prompt(PRODUCT_MANAGER_ANALYSIS_ROLE_ASSUMPTION, PRODUCT_MANAGER_5H2W_OUTPUT_TEMPLATE)
model = self.gpt_manager.create_model(
prompt=prompt,
gpt_model_label="product_brainstorm",
temperature=0.01,
)
analysis = model.chat_with_model(self.__get_model_input())
logging.info("Product analysis report: %s", analysis)
return analysis
def __get_model_input(self):
return f'''Requirements: {self.requirements_content} \n Product Instruction: {self.repo_summary_content} \n Product additional background information: {self.additional_info_content}'''
class WritePRD(WorkSkill):
def __init__(self):
super().__init__()
self.gpt_manager = GPTManager._instance
self.name = SKILL_NAME_WRITE_PRODUCT_REQUIREMENTS_DOCUMENTATION
self.input_product_key_info = SkillInput(
"Design Doc",
SkillIOParamCategory.PlainText,
)
self.add_input(self.input_product_key_info)
self.output_md = SkillOutput(
"Write prd Model PRD Result",
SkillIOParamCategory.ProductRequirementsDocument,
)
self.add_output(self.output_md)
self.input_content = None
def _read_input(self):
input_path = self.get_input_path(self.input_product_key_info)
# if input is not a path, infer it as a string content
try:
self.input_content = load_from_text(input_path, extension=".md")
except Exception as e:
self.input_content = self.input_product_key_info.content
def execution_impl(self):
print("Printing PRD result here...")
brain_storm_product_info = self._run_product_brainstorm_model()
prd = self.__run_write_prd_model(brain_storm_product_info)
self._save_to_result_cache(self.output_md, prd)
save_to_md2(self.output_md.param_path, prd)
return
def __run_write_prd_model(self, brain_storm_product_info):
logging.info("Running write prd model...")
prompt = build_gpt_prompt(PRODUCT_MANAGER_PRD_ROLE_ASSUMPTION, PRODUCT_MANAGER_PRD_OUTPUT_TEMPLATE)
return self.gpt_manager.create_and_chat_with_model(
prompt=prompt,
gpt_model_label="write_prd",
input_message=brain_storm_product_info
)
def _run_product_brainstorm_model(self):
logging.info("Running product brainstorm model...")
prompt = build_gpt_prompt(PRODUCT_MANAGER_BRAINSTORM_ROLE_ASSUMPTION, PRODUCT_MANAGER_BRAINSTORM_OUTPUT_TEMPLATE)
model = self.gpt_manager.create_model(
prompt=prompt,
gpt_model_label="product_brainstorm",
temperature=0.01,
)
brainstorm = model.chat_with_model(self.input_content)
logging.info("Brainstorm result: %s", brainstorm)
return brainstorm
def build_prd_graph(requirement: str, project_additional_info: str, onborading_graph_id: str):
graph = WorkGraph(output_id=onborading_graph_id)
onboarding_folder_path = os.path.join(LOCAL_STORAGE_OUTPUT_DIR, onborading_graph_id)
code_summary_path = glob.glob(os.path.join(onboarding_folder_path, 'Summary_project_*', "Code_Summary_*"))[0]
analysis_product = generate_node("0", ProductAnalysis(),
[
SkillInputConfig(code_summary_path, SkillInputLoadingMethod.LOAD_FROM_STRING, -1),
SkillInputConfig("", SkillInputLoadingMethod.LOAD_FROM_CACHE_STRING, -1, project_additional_info),
SkillInputConfig("", SkillInputLoadingMethod.LOAD_FROM_CACHE_STRING, -1, requirement),
], output_ids= [0])
write_prd = generate_node("1", WritePRD(), [SkillInputConfig("", SkillInputLoadingMethod.LOAD_FROM_OUTPUT_ID, 0)], output_ids=[1])
graph.add_node(analysis_product)
graph.add_node(write_prd)
return graph | null |
168,061 | import os
import glob
import openai
from solidgpt.definitions import LOCAL_STORAGE_OUTPUT_DIR, TEST_SKILL_WORKSPACE
from solidgpt.src.manager.gptmanager import GPTManager
from solidgpt.src.util.util import save_to_json
from solidgpt.src.workgraph.workgraph import WorkGraph
from solidgpt.src.worknode.worknode import WorkNode
from solidgpt.src.workskill.skillio import SkillInputConfig, SkillInputLoadingMethod
from solidgpt.src.workskill.skills.analysis import ProductAnalysis
from solidgpt.src.workskill.skills.http_codesolution import HTTPCodeSolution
from solidgpt.src.workskill.skills.load_repo import LoadRepo
from solidgpt.src.workskill.skills.query_code_local import QueryCodeLocal
from solidgpt.src.workskill.skills.repo_chat import RepoChat
from solidgpt.src.workskill.skills.summarize_file import SummaryFile
from solidgpt.src.workskill.skills.summary_file_local import SummaryFileLocal
from solidgpt.src.workskill.skills.summary_project import SummaryProject
from solidgpt.src.workskill.skills.techsolution import ProvideTechSolution
from solidgpt.src.workskill.skills.write_prd import WritePRD
from solidgpt.src.workskill.skills.autogen_analysis import AutoGenAnalysis
from solidgpt.src.workskill.workskill import WorkSkill
def generate_node_with_output_configs(node_id: str, skill: WorkSkill, input_configs: list[SkillInputConfig], output_configs: list, manual_review_result: bool = False):
skill.init_config(
[
config.to_dict() for config in input_configs
],
[
config_dict for config_dict in output_configs
])
node: WorkNode = WorkNode(node_id, skill, manual_review_result)
return node
LOCAL_STORAGE_OUTPUT_DIR = os.path.join(LOCAL_STORAGE_DIR, "workspace", "out")
class WorkGraph:
nodes: list[WorkNode] = []
node_map: dict[str, WorkNode] = {}
output_map: dict[int, SkillOutput] = {}
output_id_to_node_map: dict[int, WorkNode] = {}
display_result: DisplayResult
notion = None
cache = {}
callback_map: dict = {}
custom_data: dict = {}
def __init__(self, output_directory_path_override: str = "", output_id = None):
# need to manually initialize here
self.nodes = []
self.node_map = {}
self.output_map = {}
self.output_id_to_node_map = {}
self.callback_map = {}
self.display_result = DisplayResult()
self.output_directory_path = os.path.join(LOCAL_STORAGE_OUTPUT_DIR, output_id or time.strftime("%Y%m%d%H%M%S"))
if output_directory_path_override:
self.output_directory_path = os.path.abspath(output_directory_path_override)
return
def add_node(self, node: WorkNode):
node.graph_cache = self.cache
self.nodes.append(node)
def init_node_dependencies(self):
# clear node map and output map
self.node_map.clear()
self.output_map.clear()
self.output_id_to_node_map.clear()
for node in self.nodes:
# add node to node map
self.node_map[node.node_id] = node
# initialize display_result for children
node.display_result = self.display_result
node.skill.display_result = self.display_result
# intialize callback func for skills
if node.skill.name in self.callback_map:
node.skill.callback_func = self.callback_map.get(node.skill.name, None)
# keep a graph reference in skill
node.skill.graph = self
# create directory for node
node_directory_path = os.path.join(self.output_directory_path,
(node.skill.name + "_" + str(node.node_id)).replace(" ", "_"))
if not os.path.exists(node_directory_path):
# Create the output folder
os.makedirs(node_directory_path)
print(f"Directory '{node_directory_path}' created.")
# add output to output map
for o in node.skill.outputs:
# initialize output paths
o.param_path = os.path.join(node_directory_path, (o.param_name + " " + str(o.id)).replace(" ", "_"))
# output can be consumed by inputs of other nodes
if o.id >= 0:
self.output_map[o.id] = o
self.output_id_to_node_map[o.id] = node
# second iteration after output map has been initialized
for node in self.nodes:
# add output dependencies for node
for i in node.skill.inputs:
if i.loading_method == SkillInputLoadingMethod.LOAD_FROM_OUTPUT_ID:
if i.load_from_output_id == -1:
print("Error, " + i.param_name + ": cannot load from output id: -1.")
continue
node.output_id_dependencies.add(i.load_from_output_id)
# add current node to next_node_ids of the output node
if i.load_from_output_id not in self.output_map:
print("Error, input %s: loading from an invalid output id %d."
% (i.param_name, i.load_from_output_id))
continue
output_node = self.output_id_to_node_map[i.load_from_output_id]
if output_node.node_id == node.node_id:
print("Error, " + i.param_name + ": cannot load from its own output.")
continue
output_node.next_node_ids.add(node.node_id)
i.skill_output = self.output_map[i.load_from_output_id]
if self.__is_circular():
print_error_message("Circular graph detected. Terminating program...")
exit(1)
return
def execute(self):
logging.info("Executing SolidGPT...")
first_nodes = []
for node in self.nodes:
if len(node.output_id_dependencies) == 0:
first_nodes.append(node)
if len(first_nodes) == 0:
print_error_message("Cannot execute graph, no node can be executed.")
for node in first_nodes:
self.__execute_node(node)
logging.info(f"SolidGPT execution completed. All results are saved in {self.output_directory_path}")
def __execute_node(self, node: WorkNode):
if node.can_execute():
# execute skill
node.skill.execute()
# wait for potential node pause
self.__node_pause(node)
# notify other nodes
for next_node_id in node.next_node_ids:
next_node = self.node_map[next_node_id]
for o in node.skill.outputs:
next_node.output_id_dependencies.remove(o.id)
self.__execute_node(next_node)
def __node_pause(self, node: WorkNode):
if node.manual_review_result:
time.sleep(0.25)
print("\nPlease review result generated by %s skill in node %s"
% (node.skill.name, str(node.node_id)))
notion_file_opened = False
while True:
user_input = input("Execution is halted. Please specify an action, then press Enter. "
"To view all available actions, enter 'help':")
if same_string(user_input, "help"):
help_msg: str = "{:<18}{}\n".format("help", "Show this help message.") + \
"{:<18}{}\n".format("continue", "Continue execution.") + \
"{:<18}{}\n".format("stop", "Stop program.") + \
"{:<18}{}\n".format("path", "Show the path of this result.") + \
"{:<18}{}\n".format("notion-open", "Open the markdown result in notion.") + \
"{:<18}{}\n".format("notion-sync", "Sync the notion result, save it as new output.")
print(help_msg)
continue
elif same_string(user_input, "continue"):
print("Continuing execution...")
break
elif same_string(user_input, "stop"):
print("Exiting the program...")
exit(0)
elif same_string(user_input, "path"):
print(os.path.abspath(os.path.dirname(node.skill.outputs[0].param_path)))
continue
elif same_string(user_input, "notion-open"):
if self.notion is None:
self.notion = NotionActions()
if not notion_file_opened:
skill_outputs = node.skill.outputs
if len(skill_outputs) > 0:
first_output = skill_outputs[0]
category = first_output.param_category
output_path = first_output.param_path
print(f"!!!!!!!{type(category)} {category}")
if category == SkillIOParamCategory.ProductRequirementsDocument or \
category == SkillIOParamCategory.BusinessRequirementsDocument or \
category == SkillIOParamCategory.HighLevelDesignDocument or \
category == SkillIOParamCategory.PlainText or \
category == SkillIOParamCategory.KanbanBoard:
output_md_filepath = add_extension_if_not_exist(output_path, ".md")
self.notion.process_markdown_and_upload(output_md_filepath)
else:
print(f"Notion does not support {SkillIOParamCategory.PlainText} output.")
continue
else:
print("Notion does not support skill with no outputs.")
continue
notion_file_opened = True
else:
print("File already opened in Notion.")
continue
elif same_string(user_input, "notion-sync"):
if notion_file_opened:
skill_outputs = node.skill.outputs
first_output = skill_outputs[0]
category = first_output.param_category
output_path = first_output.param_path
if category == SkillIOParamCategory.ProductRequirementsDocument or \
category == SkillIOParamCategory.BusinessRequirementsDocument or \
category == SkillIOParamCategory.HighLevelDesignDocument or \
category == SkillIOParamCategory.PlainText or \
category == SkillIOParamCategory.KanbanBoard:
output_md_file_dir = os.path.dirname(output_path)
output_md_file_name = os.path.basename(output_path)
self.notion.sync_from_notion(output_md_file_dir, output_md_file_name)
print("Notion file synced.")
else:
print("notion-open command needs to be used first.")
continue
else:
print("Invalid input entered.")
continue
def save_data(self, filename: str = "data.json", generate_debug_info: bool = False):
save_data = generate_save_data_from_nodes(self.nodes, generate_debug_info)
save_to_json(save_data, filename)
return
def load_data(self, filename: str = "data.json"):
loaded_data = load_from_json(filename)
self.nodes.clear()
self.nodes = load_save_data_to_nodes(loaded_data)
self.init_node_dependencies()
return
def __is_circular(self):
visited = {node_id: False for node_id in self.node_map}
stack = {node_id: False for node_id in self.node_map}
# check every node because the graph might be disjoint
for node_id in self.node_map:
if not visited[node_id]:
if self.__has_cycle(node_id, visited, stack):
return True
return False
def __has_cycle(self, current_node_id, visited, stack):
# mark the current node as visited
visited[current_node_id] = True
# add the current node to the stack representing the current path
stack[current_node_id] = True
# visit all the neighbors of the current node
for neighbor_id in self.node_map[current_node_id].next_node_ids:
# if the neighbor is not visited, visit it
if not visited[neighbor_id]:
if self.__has_cycle(neighbor_id, visited, stack):
return True
# if the neighbor is already in the current path, we have found a cycle
elif stack[neighbor_id]:
return True
# remove the current node from the current path stack
stack[current_node_id] = False
return False
class SkillInputLoadingMethod(Enum):
LOAD_FROM_OUTPUT_ID = 1
LOAD_FROM_STRING = 2
LOAD_FROM_CACHE_STRING = 3
class SkillInputConfig:
def __init__(self, param_path: str, loading_method: SkillInputLoadingMethod, load_from_output_id: int, content: str = None):
self.param_path = param_path
self.loading_method = loading_method
self.load_from_output_id = load_from_output_id
self.content = content
def to_dict(self):
return {
"param_path": self.param_path,
"loading_method": str(self.loading_method),
"load_from_output_id": self.load_from_output_id,
"content": self.content
}
class ProductAnalysis(WorkSkill):
def __init__(self):
super().__init__()
self.gpt_manager = GPTManager._instance
self.name = SKILL_NAME_ANALYSIS_PRODUCT
self.repo_summary = SkillInput(
"Product Analysis Repo Summary",
SkillIOParamCategory.PlainText,
)
self.additional_info = SkillInput(
"Product Analysis Additional Info",
SkillIOParamCategory.PlainText,
)
self.requirements = SkillInput(
"Product Analysis Requirements",
SkillIOParamCategory.PlainText,
)
self.add_input(self.repo_summary)
self.add_input(self.additional_info)
self.add_input(self.requirements)
self.output_md = SkillOutput(
"Requirments Analysis Markdown",
SkillIOParamCategory.PlainText,
)
self.add_output(self.output_md)
self.additional_info_content = None
self.repo_summary_content = None
self.requirements_content = None
def _read_input(self):
# Get from cache or read from file
if self.additional_info is None:
self.additional_info_content = self.additional_info.content
if self.repo_summary_content is None:
self.repo_summary_content = self.__get_input_content(self.repo_summary)
if self.requirements_content is None:
self.requirements_content = self.requirements.content
def __get_input_content(self, skill_input : SkillInput):
return load_from_text(self.get_input_path(skill_input), extension=".txt")
def execution_impl(self):
print("Generate product analysis here...")
product_analysis = self._run_product_analysis_model()
save_to_md2(self.output_md.param_path, product_analysis)
self._save_to_result_cache(self.output_md, product_analysis)
return
def _run_product_analysis_model(self):
logging.info("Running product analysis model...")
prompt = build_gpt_prompt(PRODUCT_MANAGER_ANALYSIS_ROLE_ASSUMPTION, PRODUCT_MANAGER_5H2W_OUTPUT_TEMPLATE)
model = self.gpt_manager.create_model(
prompt=prompt,
gpt_model_label="product_brainstorm",
temperature=0.01,
)
analysis = model.chat_with_model(self.__get_model_input())
logging.info("Product analysis report: %s", analysis)
return analysis
def __get_model_input(self):
return f'''Requirements: {self.requirements_content} \n Product Instruction: {self.repo_summary_content} \n Product additional background information: {self.additional_info_content}'''
class WritePRD(WorkSkill):
def __init__(self):
super().__init__()
self.gpt_manager = GPTManager._instance
self.name = SKILL_NAME_WRITE_PRODUCT_REQUIREMENTS_DOCUMENTATION
self.input_product_key_info = SkillInput(
"Design Doc",
SkillIOParamCategory.PlainText,
)
self.add_input(self.input_product_key_info)
self.output_md = SkillOutput(
"Write prd Model PRD Result",
SkillIOParamCategory.ProductRequirementsDocument,
)
self.add_output(self.output_md)
self.input_content = None
def _read_input(self):
input_path = self.get_input_path(self.input_product_key_info)
# if input is not a path, infer it as a string content
try:
self.input_content = load_from_text(input_path, extension=".md")
except Exception as e:
self.input_content = self.input_product_key_info.content
def execution_impl(self):
print("Printing PRD result here...")
brain_storm_product_info = self._run_product_brainstorm_model()
prd = self.__run_write_prd_model(brain_storm_product_info)
self._save_to_result_cache(self.output_md, prd)
save_to_md2(self.output_md.param_path, prd)
return
def __run_write_prd_model(self, brain_storm_product_info):
logging.info("Running write prd model...")
prompt = build_gpt_prompt(PRODUCT_MANAGER_PRD_ROLE_ASSUMPTION, PRODUCT_MANAGER_PRD_OUTPUT_TEMPLATE)
return self.gpt_manager.create_and_chat_with_model(
prompt=prompt,
gpt_model_label="write_prd",
input_message=brain_storm_product_info
)
def _run_product_brainstorm_model(self):
logging.info("Running product brainstorm model...")
prompt = build_gpt_prompt(PRODUCT_MANAGER_BRAINSTORM_ROLE_ASSUMPTION, PRODUCT_MANAGER_BRAINSTORM_OUTPUT_TEMPLATE)
model = self.gpt_manager.create_model(
prompt=prompt,
gpt_model_label="product_brainstorm",
temperature=0.01,
)
brainstorm = model.chat_with_model(self.input_content)
logging.info("Brainstorm result: %s", brainstorm)
return brainstorm
def build_prd_graph_with_stage(requirement: str, project_additional_info: str, onboarding_graph_id: str, stage: int,
edit_content: str, output_id: str):
graph = WorkGraph(output_id=output_id)
onboarding_folder_path = os.path.join(LOCAL_STORAGE_OUTPUT_DIR, onboarding_graph_id)
code_summary_path = glob.glob(os.path.join(onboarding_folder_path, 'Summary_project_*', "Code_Summary_*"))[0]
if stage == 0:
analysis_product = generate_node_with_output_configs("0", ProductAnalysis(),
[
SkillInputConfig(code_summary_path, SkillInputLoadingMethod.LOAD_FROM_STRING, -1),
SkillInputConfig("", SkillInputLoadingMethod.LOAD_FROM_CACHE_STRING, -1, project_additional_info),
SkillInputConfig("", SkillInputLoadingMethod.LOAD_FROM_CACHE_STRING, -1, requirement),
],
[
{"id": 0, "to_display": True}
])
graph.add_node(analysis_product)
return graph
elif stage == 1:
write_prd = generate_node_with_output_configs("1", WritePRD(),
[
SkillInputConfig("", SkillInputLoadingMethod.LOAD_FROM_CACHE_STRING, -1, edit_content)
],
[
{"id": 1, "to_display": True}
])
graph.add_node(write_prd)
return graph
return None | null |
168,062 | import os
import glob
import openai
from solidgpt.definitions import LOCAL_STORAGE_OUTPUT_DIR, TEST_SKILL_WORKSPACE
from solidgpt.src.manager.gptmanager import GPTManager
from solidgpt.src.util.util import save_to_json
from solidgpt.src.workgraph.workgraph import WorkGraph
from solidgpt.src.worknode.worknode import WorkNode
from solidgpt.src.workskill.skillio import SkillInputConfig, SkillInputLoadingMethod
from solidgpt.src.workskill.skills.analysis import ProductAnalysis
from solidgpt.src.workskill.skills.http_codesolution import HTTPCodeSolution
from solidgpt.src.workskill.skills.load_repo import LoadRepo
from solidgpt.src.workskill.skills.query_code_local import QueryCodeLocal
from solidgpt.src.workskill.skills.repo_chat import RepoChat
from solidgpt.src.workskill.skills.summarize_file import SummaryFile
from solidgpt.src.workskill.skills.summary_file_local import SummaryFileLocal
from solidgpt.src.workskill.skills.summary_project import SummaryProject
from solidgpt.src.workskill.skills.techsolution import ProvideTechSolution
from solidgpt.src.workskill.skills.write_prd import WritePRD
from solidgpt.src.workskill.skills.autogen_analysis import AutoGenAnalysis
from solidgpt.src.workskill.workskill import WorkSkill
def generate_node(node_id: str, skill: WorkSkill, input_configs: list[SkillInputConfig], output_ids: list[int], manual_review_result: bool = False):
skill.init_config(
[
config.to_dict() for config in input_configs
],
[
{"id": output_id} for output_id in output_ids
])
node: WorkNode = WorkNode(node_id, skill, manual_review_result)
return node
def generate_node_with_output_configs(node_id: str, skill: WorkSkill, input_configs: list[SkillInputConfig], output_configs: list, manual_review_result: bool = False):
skill.init_config(
[
config.to_dict() for config in input_configs
],
[
config_dict for config_dict in output_configs
])
node: WorkNode = WorkNode(node_id, skill, manual_review_result)
return node
LOCAL_STORAGE_OUTPUT_DIR = os.path.join(LOCAL_STORAGE_DIR, "workspace", "out")
class WorkGraph:
nodes: list[WorkNode] = []
node_map: dict[str, WorkNode] = {}
output_map: dict[int, SkillOutput] = {}
output_id_to_node_map: dict[int, WorkNode] = {}
display_result: DisplayResult
notion = None
cache = {}
callback_map: dict = {}
custom_data: dict = {}
def __init__(self, output_directory_path_override: str = "", output_id = None):
# need to manually initialize here
self.nodes = []
self.node_map = {}
self.output_map = {}
self.output_id_to_node_map = {}
self.callback_map = {}
self.display_result = DisplayResult()
self.output_directory_path = os.path.join(LOCAL_STORAGE_OUTPUT_DIR, output_id or time.strftime("%Y%m%d%H%M%S"))
if output_directory_path_override:
self.output_directory_path = os.path.abspath(output_directory_path_override)
return
def add_node(self, node: WorkNode):
node.graph_cache = self.cache
self.nodes.append(node)
def init_node_dependencies(self):
# clear node map and output map
self.node_map.clear()
self.output_map.clear()
self.output_id_to_node_map.clear()
for node in self.nodes:
# add node to node map
self.node_map[node.node_id] = node
# initialize display_result for children
node.display_result = self.display_result
node.skill.display_result = self.display_result
# intialize callback func for skills
if node.skill.name in self.callback_map:
node.skill.callback_func = self.callback_map.get(node.skill.name, None)
# keep a graph reference in skill
node.skill.graph = self
# create directory for node
node_directory_path = os.path.join(self.output_directory_path,
(node.skill.name + "_" + str(node.node_id)).replace(" ", "_"))
if not os.path.exists(node_directory_path):
# Create the output folder
os.makedirs(node_directory_path)
print(f"Directory '{node_directory_path}' created.")
# add output to output map
for o in node.skill.outputs:
# initialize output paths
o.param_path = os.path.join(node_directory_path, (o.param_name + " " + str(o.id)).replace(" ", "_"))
# output can be consumed by inputs of other nodes
if o.id >= 0:
self.output_map[o.id] = o
self.output_id_to_node_map[o.id] = node
# second iteration after output map has been initialized
for node in self.nodes:
# add output dependencies for node
for i in node.skill.inputs:
if i.loading_method == SkillInputLoadingMethod.LOAD_FROM_OUTPUT_ID:
if i.load_from_output_id == -1:
print("Error, " + i.param_name + ": cannot load from output id: -1.")
continue
node.output_id_dependencies.add(i.load_from_output_id)
# add current node to next_node_ids of the output node
if i.load_from_output_id not in self.output_map:
print("Error, input %s: loading from an invalid output id %d."
% (i.param_name, i.load_from_output_id))
continue
output_node = self.output_id_to_node_map[i.load_from_output_id]
if output_node.node_id == node.node_id:
print("Error, " + i.param_name + ": cannot load from its own output.")
continue
output_node.next_node_ids.add(node.node_id)
i.skill_output = self.output_map[i.load_from_output_id]
if self.__is_circular():
print_error_message("Circular graph detected. Terminating program...")
exit(1)
return
def execute(self):
logging.info("Executing SolidGPT...")
first_nodes = []
for node in self.nodes:
if len(node.output_id_dependencies) == 0:
first_nodes.append(node)
if len(first_nodes) == 0:
print_error_message("Cannot execute graph, no node can be executed.")
for node in first_nodes:
self.__execute_node(node)
logging.info(f"SolidGPT execution completed. All results are saved in {self.output_directory_path}")
def __execute_node(self, node: WorkNode):
if node.can_execute():
# execute skill
node.skill.execute()
# wait for potential node pause
self.__node_pause(node)
# notify other nodes
for next_node_id in node.next_node_ids:
next_node = self.node_map[next_node_id]
for o in node.skill.outputs:
next_node.output_id_dependencies.remove(o.id)
self.__execute_node(next_node)
def __node_pause(self, node: WorkNode):
if node.manual_review_result:
time.sleep(0.25)
print("\nPlease review result generated by %s skill in node %s"
% (node.skill.name, str(node.node_id)))
notion_file_opened = False
while True:
user_input = input("Execution is halted. Please specify an action, then press Enter. "
"To view all available actions, enter 'help':")
if same_string(user_input, "help"):
help_msg: str = "{:<18}{}\n".format("help", "Show this help message.") + \
"{:<18}{}\n".format("continue", "Continue execution.") + \
"{:<18}{}\n".format("stop", "Stop program.") + \
"{:<18}{}\n".format("path", "Show the path of this result.") + \
"{:<18}{}\n".format("notion-open", "Open the markdown result in notion.") + \
"{:<18}{}\n".format("notion-sync", "Sync the notion result, save it as new output.")
print(help_msg)
continue
elif same_string(user_input, "continue"):
print("Continuing execution...")
break
elif same_string(user_input, "stop"):
print("Exiting the program...")
exit(0)
elif same_string(user_input, "path"):
print(os.path.abspath(os.path.dirname(node.skill.outputs[0].param_path)))
continue
elif same_string(user_input, "notion-open"):
if self.notion is None:
self.notion = NotionActions()
if not notion_file_opened:
skill_outputs = node.skill.outputs
if len(skill_outputs) > 0:
first_output = skill_outputs[0]
category = first_output.param_category
output_path = first_output.param_path
print(f"!!!!!!!{type(category)} {category}")
if category == SkillIOParamCategory.ProductRequirementsDocument or \
category == SkillIOParamCategory.BusinessRequirementsDocument or \
category == SkillIOParamCategory.HighLevelDesignDocument or \
category == SkillIOParamCategory.PlainText or \
category == SkillIOParamCategory.KanbanBoard:
output_md_filepath = add_extension_if_not_exist(output_path, ".md")
self.notion.process_markdown_and_upload(output_md_filepath)
else:
print(f"Notion does not support {SkillIOParamCategory.PlainText} output.")
continue
else:
print("Notion does not support skill with no outputs.")
continue
notion_file_opened = True
else:
print("File already opened in Notion.")
continue
elif same_string(user_input, "notion-sync"):
if notion_file_opened:
skill_outputs = node.skill.outputs
first_output = skill_outputs[0]
category = first_output.param_category
output_path = first_output.param_path
if category == SkillIOParamCategory.ProductRequirementsDocument or \
category == SkillIOParamCategory.BusinessRequirementsDocument or \
category == SkillIOParamCategory.HighLevelDesignDocument or \
category == SkillIOParamCategory.PlainText or \
category == SkillIOParamCategory.KanbanBoard:
output_md_file_dir = os.path.dirname(output_path)
output_md_file_name = os.path.basename(output_path)
self.notion.sync_from_notion(output_md_file_dir, output_md_file_name)
print("Notion file synced.")
else:
print("notion-open command needs to be used first.")
continue
else:
print("Invalid input entered.")
continue
def save_data(self, filename: str = "data.json", generate_debug_info: bool = False):
save_data = generate_save_data_from_nodes(self.nodes, generate_debug_info)
save_to_json(save_data, filename)
return
def load_data(self, filename: str = "data.json"):
loaded_data = load_from_json(filename)
self.nodes.clear()
self.nodes = load_save_data_to_nodes(loaded_data)
self.init_node_dependencies()
return
def __is_circular(self):
visited = {node_id: False for node_id in self.node_map}
stack = {node_id: False for node_id in self.node_map}
# check every node because the graph might be disjoint
for node_id in self.node_map:
if not visited[node_id]:
if self.__has_cycle(node_id, visited, stack):
return True
return False
def __has_cycle(self, current_node_id, visited, stack):
# mark the current node as visited
visited[current_node_id] = True
# add the current node to the stack representing the current path
stack[current_node_id] = True
# visit all the neighbors of the current node
for neighbor_id in self.node_map[current_node_id].next_node_ids:
# if the neighbor is not visited, visit it
if not visited[neighbor_id]:
if self.__has_cycle(neighbor_id, visited, stack):
return True
# if the neighbor is already in the current path, we have found a cycle
elif stack[neighbor_id]:
return True
# remove the current node from the current path stack
stack[current_node_id] = False
return False
class SkillInputLoadingMethod(Enum):
LOAD_FROM_OUTPUT_ID = 1
LOAD_FROM_STRING = 2
LOAD_FROM_CACHE_STRING = 3
class SkillInputConfig:
def __init__(self, param_path: str, loading_method: SkillInputLoadingMethod, load_from_output_id: int, content: str = None):
self.param_path = param_path
self.loading_method = loading_method
self.load_from_output_id = load_from_output_id
self.content = content
def to_dict(self):
return {
"param_path": self.param_path,
"loading_method": str(self.loading_method),
"load_from_output_id": self.load_from_output_id,
"content": self.content
}
class QueryCodeLocal(WorkSkill):
onboarding_id: str = None
def __init__(self):
super().__init__()
self.name = SKILL_NAME_QUERY_CODE
self.gpt_manager = GPTManager._instance
self.onboarding_id_input = SkillInput(
"Onboarding ID",
SkillIOParamCategory.PlainText,
)
self.skill_input = SkillInput(
"User Input",
SkillIOParamCategory.PlainText,
)
self.add_input(self.onboarding_id_input)
self.add_input(self.skill_input)
self.skill_output = SkillOutput(
"Best five code",
SkillIOParamCategory.PlainText,
)
self.add_output(self.skill_output)
self.user_input: str = None
self.qdrant_path = os.path.join(ROOT_DIR, "src", "tools", "qdrant", "embedding")
def _read_input(self):
self.onboarding_id = self.onboarding_id_input.content
self.user_input = self.skill_input.content
self.client = QdrantClient(path=os.path.join(self.qdrant_path, self.onboarding_id))
def execution_impl(self):
logging.info("Start to search best five code...")
top_five = self.__find_top_five()
output_file = self.__format_output(top_five)
self.graph_cache[Cache_Label_Query_Code] = output_file
save_to_text(os.path.join(self.skill_output.param_path), output_file)
self.client.close()
return
def __find_top_five(self):
embeddings_model = OpenAIEmbeddings(openai_api_key=openai.api_key)
embedding_query = embeddings_model.embed_query(self.user_input)
search = self.client.search(
collection_name=self.onboarding_id,
query_vector=embedding_query,
limit=5
)
return search
def __format_output(top_five):
ret_str = []
for candidate in top_five:
score = candidate.dict()["score"]
if score < 0.7:
continue
logging.info(score)
summary = candidate.dict()["payload"]["summary"]
code = candidate.dict()["payload"]["code"]
ret_str.append(f"The summary is:\n{summary}\nThe code is:\n```\n{code}\n```")
return "\n".join(ret_str)
class ProvideTechSolution(WorkSkill):
def __init__(self):
super().__init__()
self.gpt_manager = GPTManager._instance
self.name = SKILL_NAME_PROVIDE_TECH_SOLUTION
# self.relatived_files = SkillInput(
# "Relatived Files",
# SkillIOParamCategory.PlainText,
# )
self.code_schema = SkillInput(
"Code Schema",
SkillIOParamCategory.PlainText,
)
self.summary = SkillInput(
"Product Summary",
SkillIOParamCategory.PlainText,
)
self.requirements = SkillInput(
"Requirements",
SkillIOParamCategory.PlainText,
)
#self.add_input(self.relatived_files)
self.add_input(self.code_schema)
self.add_input(self.summary)
self.add_input(self.requirements)
self.output_md = SkillOutput(
"Tech Solution Markdown",
SkillIOParamCategory.PlainText,
)
self.add_output(self.output_md)
self.relatived_files_content = None
self.code_schema_content = None
self.summary_content = None
self.requirements_content = None
def _read_input(self):
# Get from cache or read from file
self.relatived_files_content = self._get_graph_cached_content(Cache_Label_Query_Code)
self.code_schema_content = self.__get_input_content(self.code_schema)
self.summary_content = self.__get_input_content(self.summary)
self.requirements_content = self.requirements.content
def __get_input_content(self, skill_input : SkillInput):
return load_from_text(self.get_input_path(skill_input), extension=".txt")
def execution_impl(self):
print("Generate product analysis here...")
solution = self.__run_provide_tech_solution_model_model()
save_to_md2(self.output_md.param_path, solution)
self._save_to_result_cache(self.output_md, solution)
return
def __run_provide_tech_solution_model_model(self):
logging.info("Running provide tech solution model...")
model = self.gpt_manager.create_model(
prompt=SDE_TECH_SOLUTION_ASSUMPTION,
gpt_model_label="sde_tech_solution",
temperature=0.01,
model="gpt4",
)
solution = model.chat_with_model(self.__get_model_input())
return solution
def __get_model_input(self):
return f'''Requirements: {self.requirements_content} \n
Project Instruction: {self.summary_content} \n
Code schema: {self.code_schema_content} \n
Relatived code files: {self.relatived_files_content} \n'''
def build_tech_solution_graph(requirement: str, onboarding_graph_id: str, output_id: str):
graph = WorkGraph(output_id=output_id)
onboarding_folder_path = os.path.join(LOCAL_STORAGE_OUTPUT_DIR, onboarding_graph_id)
code_shema_path = glob.glob(os.path.join(onboarding_folder_path, 'Summary_project_*', "Code_Schema_*"))[0]
code_summary_path = glob.glob(os.path.join(onboarding_folder_path, 'Summary_project_*', "Code_Summary_*"))[0]
query_code = generate_node("0", QueryCodeLocal(),
[
SkillInputConfig("", SkillInputLoadingMethod.LOAD_FROM_CACHE_STRING, -1, onboarding_graph_id),
SkillInputConfig("", SkillInputLoadingMethod.LOAD_FROM_CACHE_STRING, -1, requirement),
], output_ids=[0])
tech_solution = generate_node_with_output_configs("1", ProvideTechSolution(),
[
SkillInputConfig(code_shema_path, SkillInputLoadingMethod.LOAD_FROM_STRING, -1),
SkillInputConfig(code_summary_path, SkillInputLoadingMethod.LOAD_FROM_STRING, -1),
SkillInputConfig("", SkillInputLoadingMethod.LOAD_FROM_CACHE_STRING, -1, requirement)
],
[
{"id": 1, "to_display": True}
])
graph.add_node(query_code)
graph.add_node(tech_solution)
return graph | null |
168,063 | import os
import glob
import openai
from solidgpt.definitions import LOCAL_STORAGE_OUTPUT_DIR, TEST_SKILL_WORKSPACE
from solidgpt.src.manager.gptmanager import GPTManager
from solidgpt.src.util.util import save_to_json
from solidgpt.src.workgraph.workgraph import WorkGraph
from solidgpt.src.worknode.worknode import WorkNode
from solidgpt.src.workskill.skillio import SkillInputConfig, SkillInputLoadingMethod
from solidgpt.src.workskill.skills.analysis import ProductAnalysis
from solidgpt.src.workskill.skills.http_codesolution import HTTPCodeSolution
from solidgpt.src.workskill.skills.load_repo import LoadRepo
from solidgpt.src.workskill.skills.query_code_local import QueryCodeLocal
from solidgpt.src.workskill.skills.repo_chat import RepoChat
from solidgpt.src.workskill.skills.summarize_file import SummaryFile
from solidgpt.src.workskill.skills.summary_file_local import SummaryFileLocal
from solidgpt.src.workskill.skills.summary_project import SummaryProject
from solidgpt.src.workskill.skills.techsolution import ProvideTechSolution
from solidgpt.src.workskill.skills.write_prd import WritePRD
from solidgpt.src.workskill.skills.autogen_analysis import AutoGenAnalysis
from solidgpt.src.workskill.workskill import WorkSkill
def generate_node(node_id: str, skill: WorkSkill, input_configs: list[SkillInputConfig], output_ids: list[int], manual_review_result: bool = False):
skill.init_config(
[
config.to_dict() for config in input_configs
],
[
{"id": output_id} for output_id in output_ids
])
node: WorkNode = WorkNode(node_id, skill, manual_review_result)
return node
def generate_node_with_output_configs(node_id: str, skill: WorkSkill, input_configs: list[SkillInputConfig], output_configs: list, manual_review_result: bool = False):
skill.init_config(
[
config.to_dict() for config in input_configs
],
[
config_dict for config_dict in output_configs
])
node: WorkNode = WorkNode(node_id, skill, manual_review_result)
return node
LOCAL_STORAGE_OUTPUT_DIR = os.path.join(LOCAL_STORAGE_DIR, "workspace", "out")
def save_to_json(data, filename="data.json"):
create_directories_if_not_exist(filename)
# Save data to a JSON file
with open(filename, "w") as json_file:
print(json.dump(data, json_file, indent=4))
class WorkGraph:
nodes: list[WorkNode] = []
node_map: dict[str, WorkNode] = {}
output_map: dict[int, SkillOutput] = {}
output_id_to_node_map: dict[int, WorkNode] = {}
display_result: DisplayResult
notion = None
cache = {}
callback_map: dict = {}
custom_data: dict = {}
def __init__(self, output_directory_path_override: str = "", output_id = None):
# need to manually initialize here
self.nodes = []
self.node_map = {}
self.output_map = {}
self.output_id_to_node_map = {}
self.callback_map = {}
self.display_result = DisplayResult()
self.output_directory_path = os.path.join(LOCAL_STORAGE_OUTPUT_DIR, output_id or time.strftime("%Y%m%d%H%M%S"))
if output_directory_path_override:
self.output_directory_path = os.path.abspath(output_directory_path_override)
return
def add_node(self, node: WorkNode):
node.graph_cache = self.cache
self.nodes.append(node)
def init_node_dependencies(self):
# clear node map and output map
self.node_map.clear()
self.output_map.clear()
self.output_id_to_node_map.clear()
for node in self.nodes:
# add node to node map
self.node_map[node.node_id] = node
# initialize display_result for children
node.display_result = self.display_result
node.skill.display_result = self.display_result
# intialize callback func for skills
if node.skill.name in self.callback_map:
node.skill.callback_func = self.callback_map.get(node.skill.name, None)
# keep a graph reference in skill
node.skill.graph = self
# create directory for node
node_directory_path = os.path.join(self.output_directory_path,
(node.skill.name + "_" + str(node.node_id)).replace(" ", "_"))
if not os.path.exists(node_directory_path):
# Create the output folder
os.makedirs(node_directory_path)
print(f"Directory '{node_directory_path}' created.")
# add output to output map
for o in node.skill.outputs:
# initialize output paths
o.param_path = os.path.join(node_directory_path, (o.param_name + " " + str(o.id)).replace(" ", "_"))
# output can be consumed by inputs of other nodes
if o.id >= 0:
self.output_map[o.id] = o
self.output_id_to_node_map[o.id] = node
# second iteration after output map has been initialized
for node in self.nodes:
# add output dependencies for node
for i in node.skill.inputs:
if i.loading_method == SkillInputLoadingMethod.LOAD_FROM_OUTPUT_ID:
if i.load_from_output_id == -1:
print("Error, " + i.param_name + ": cannot load from output id: -1.")
continue
node.output_id_dependencies.add(i.load_from_output_id)
# add current node to next_node_ids of the output node
if i.load_from_output_id not in self.output_map:
print("Error, input %s: loading from an invalid output id %d."
% (i.param_name, i.load_from_output_id))
continue
output_node = self.output_id_to_node_map[i.load_from_output_id]
if output_node.node_id == node.node_id:
print("Error, " + i.param_name + ": cannot load from its own output.")
continue
output_node.next_node_ids.add(node.node_id)
i.skill_output = self.output_map[i.load_from_output_id]
if self.__is_circular():
print_error_message("Circular graph detected. Terminating program...")
exit(1)
return
def execute(self):
logging.info("Executing SolidGPT...")
first_nodes = []
for node in self.nodes:
if len(node.output_id_dependencies) == 0:
first_nodes.append(node)
if len(first_nodes) == 0:
print_error_message("Cannot execute graph, no node can be executed.")
for node in first_nodes:
self.__execute_node(node)
logging.info(f"SolidGPT execution completed. All results are saved in {self.output_directory_path}")
def __execute_node(self, node: WorkNode):
if node.can_execute():
# execute skill
node.skill.execute()
# wait for potential node pause
self.__node_pause(node)
# notify other nodes
for next_node_id in node.next_node_ids:
next_node = self.node_map[next_node_id]
for o in node.skill.outputs:
next_node.output_id_dependencies.remove(o.id)
self.__execute_node(next_node)
def __node_pause(self, node: WorkNode):
if node.manual_review_result:
time.sleep(0.25)
print("\nPlease review result generated by %s skill in node %s"
% (node.skill.name, str(node.node_id)))
notion_file_opened = False
while True:
user_input = input("Execution is halted. Please specify an action, then press Enter. "
"To view all available actions, enter 'help':")
if same_string(user_input, "help"):
help_msg: str = "{:<18}{}\n".format("help", "Show this help message.") + \
"{:<18}{}\n".format("continue", "Continue execution.") + \
"{:<18}{}\n".format("stop", "Stop program.") + \
"{:<18}{}\n".format("path", "Show the path of this result.") + \
"{:<18}{}\n".format("notion-open", "Open the markdown result in notion.") + \
"{:<18}{}\n".format("notion-sync", "Sync the notion result, save it as new output.")
print(help_msg)
continue
elif same_string(user_input, "continue"):
print("Continuing execution...")
break
elif same_string(user_input, "stop"):
print("Exiting the program...")
exit(0)
elif same_string(user_input, "path"):
print(os.path.abspath(os.path.dirname(node.skill.outputs[0].param_path)))
continue
elif same_string(user_input, "notion-open"):
if self.notion is None:
self.notion = NotionActions()
if not notion_file_opened:
skill_outputs = node.skill.outputs
if len(skill_outputs) > 0:
first_output = skill_outputs[0]
category = first_output.param_category
output_path = first_output.param_path
print(f"!!!!!!!{type(category)} {category}")
if category == SkillIOParamCategory.ProductRequirementsDocument or \
category == SkillIOParamCategory.BusinessRequirementsDocument or \
category == SkillIOParamCategory.HighLevelDesignDocument or \
category == SkillIOParamCategory.PlainText or \
category == SkillIOParamCategory.KanbanBoard:
output_md_filepath = add_extension_if_not_exist(output_path, ".md")
self.notion.process_markdown_and_upload(output_md_filepath)
else:
print(f"Notion does not support {SkillIOParamCategory.PlainText} output.")
continue
else:
print("Notion does not support skill with no outputs.")
continue
notion_file_opened = True
else:
print("File already opened in Notion.")
continue
elif same_string(user_input, "notion-sync"):
if notion_file_opened:
skill_outputs = node.skill.outputs
first_output = skill_outputs[0]
category = first_output.param_category
output_path = first_output.param_path
if category == SkillIOParamCategory.ProductRequirementsDocument or \
category == SkillIOParamCategory.BusinessRequirementsDocument or \
category == SkillIOParamCategory.HighLevelDesignDocument or \
category == SkillIOParamCategory.PlainText or \
category == SkillIOParamCategory.KanbanBoard:
output_md_file_dir = os.path.dirname(output_path)
output_md_file_name = os.path.basename(output_path)
self.notion.sync_from_notion(output_md_file_dir, output_md_file_name)
print("Notion file synced.")
else:
print("notion-open command needs to be used first.")
continue
else:
print("Invalid input entered.")
continue
def save_data(self, filename: str = "data.json", generate_debug_info: bool = False):
save_data = generate_save_data_from_nodes(self.nodes, generate_debug_info)
save_to_json(save_data, filename)
return
def load_data(self, filename: str = "data.json"):
loaded_data = load_from_json(filename)
self.nodes.clear()
self.nodes = load_save_data_to_nodes(loaded_data)
self.init_node_dependencies()
return
def __is_circular(self):
visited = {node_id: False for node_id in self.node_map}
stack = {node_id: False for node_id in self.node_map}
# check every node because the graph might be disjoint
for node_id in self.node_map:
if not visited[node_id]:
if self.__has_cycle(node_id, visited, stack):
return True
return False
def __has_cycle(self, current_node_id, visited, stack):
# mark the current node as visited
visited[current_node_id] = True
# add the current node to the stack representing the current path
stack[current_node_id] = True
# visit all the neighbors of the current node
for neighbor_id in self.node_map[current_node_id].next_node_ids:
# if the neighbor is not visited, visit it
if not visited[neighbor_id]:
if self.__has_cycle(neighbor_id, visited, stack):
return True
# if the neighbor is already in the current path, we have found a cycle
elif stack[neighbor_id]:
return True
# remove the current node from the current path stack
stack[current_node_id] = False
return False
class SkillInputLoadingMethod(Enum):
LOAD_FROM_OUTPUT_ID = 1
LOAD_FROM_STRING = 2
LOAD_FROM_CACHE_STRING = 3
class SkillInputConfig:
def __init__(self, param_path: str, loading_method: SkillInputLoadingMethod, load_from_output_id: int, content: str = None):
self.param_path = param_path
self.loading_method = loading_method
self.load_from_output_id = load_from_output_id
self.content = content
def to_dict(self):
return {
"param_path": self.param_path,
"loading_method": str(self.loading_method),
"load_from_output_id": self.load_from_output_id,
"content": self.content
}
class QueryCodeLocal(WorkSkill):
onboarding_id: str = None
def __init__(self):
super().__init__()
self.name = SKILL_NAME_QUERY_CODE
self.gpt_manager = GPTManager._instance
self.onboarding_id_input = SkillInput(
"Onboarding ID",
SkillIOParamCategory.PlainText,
)
self.skill_input = SkillInput(
"User Input",
SkillIOParamCategory.PlainText,
)
self.add_input(self.onboarding_id_input)
self.add_input(self.skill_input)
self.skill_output = SkillOutput(
"Best five code",
SkillIOParamCategory.PlainText,
)
self.add_output(self.skill_output)
self.user_input: str = None
self.qdrant_path = os.path.join(ROOT_DIR, "src", "tools", "qdrant", "embedding")
def _read_input(self):
self.onboarding_id = self.onboarding_id_input.content
self.user_input = self.skill_input.content
self.client = QdrantClient(path=os.path.join(self.qdrant_path, self.onboarding_id))
def execution_impl(self):
logging.info("Start to search best five code...")
top_five = self.__find_top_five()
output_file = self.__format_output(top_five)
self.graph_cache[Cache_Label_Query_Code] = output_file
save_to_text(os.path.join(self.skill_output.param_path), output_file)
self.client.close()
return
def __find_top_five(self):
embeddings_model = OpenAIEmbeddings(openai_api_key=openai.api_key)
embedding_query = embeddings_model.embed_query(self.user_input)
search = self.client.search(
collection_name=self.onboarding_id,
query_vector=embedding_query,
limit=5
)
return search
def __format_output(top_five):
ret_str = []
for candidate in top_five:
score = candidate.dict()["score"]
if score < 0.7:
continue
logging.info(score)
summary = candidate.dict()["payload"]["summary"]
code = candidate.dict()["payload"]["code"]
ret_str.append(f"The summary is:\n{summary}\nThe code is:\n```\n{code}\n```")
return "\n".join(ret_str)
class RepoChat(WorkSkill):
Memory_Length = 1
def __init__(self):
super().__init__()
self.gpt_manager = GPTManager._instance
self.name = SKILL_NAME_REPO_CHAT
self.code_schema = SkillInput(
"Code Schema",
SkillIOParamCategory.PlainText,
)
self.summary = SkillInput(
"Product Summary",
SkillIOParamCategory.PlainText,
)
self.requirements = SkillInput(
"Requirements",
SkillIOParamCategory.PlainText,
)
self.history_context = SkillInput(
"History Context",
SkillIOParamCategory.PlainText,
)
#self.add_input(self.relatived_files)
self.add_input(self.code_schema)
self.add_input(self.summary)
self.add_input(self.requirements)
self.add_input(self.history_context)
self.output_md = SkillOutput(
"Tech Solution Markdown",
SkillIOParamCategory.PlainText,
)
self.add_output(self.output_md)
self.relatived_files_content = None
self.code_schema_content = None
self.summary_content = None
self.history_contexts_content = []
self.requirements_content = None
def _read_input(self):
# Get from cache or read from file
self.relatived_files_content = self._get_graph_cached_content(Cache_Label_Query_Code)
self.code_schema_content = self.__get_input_content(self.code_schema)
self.summary_content = self.__get_input_content(self.summary)
self.requirements_content = self.requirements.content
self.history_contexts_content = self.__get_history_context()
def __get_input_content(self, skill_input : SkillInput):
return load_from_text(self.get_input_path(skill_input), extension=".txt")
def __get_history_context(self):
json_data = load_from_text(self.get_input_path(self.history_context),extension=".json")
print(json_data)
# Load JSON data
data = json.loads(json_data)
# Extract HistoryContent list
history_content = data["HistoryContent"]
history_contexts_content = []
# Create a list of HistoryContext objects
for item in history_content:
system_output = item["SystemOutput"]
user_input = item["UserInput"]
history_context = HistoryContext(system_output, user_input)
history_contexts_content.append(history_context)
return history_contexts_content
def execution_impl(self):
system_output = self.__run_chat_with_repo_model()
# Save system_output into the history context
current_context = HistoryContext(system_output, self.requirements_content)
current_context.system_output
self.history_contexts_content.append(current_context)
# Convert the list of HistoryContext objects to a list of dictionaries
history_list = [{"UserInput": hc.user_input, "SystemOutput": hc.system_output} for hc in self.history_contexts_content]
# Create a dictionary with the HistoryContent key
data = {"HistoryContent": history_list}
save_to_json(data, self.history_context.param_path )
# Show the result
self._save_to_result_cache(self.output_md, str(self.__get_display_format()))
return
def __run_chat_with_repo_model(self):
logging.info("Running repo chat model...")
model = self.gpt_manager.create_model(
prompt=f"""Assume you are principle SDE, you will be an code expert to
give code plan,
code advise,
explain the code.
Please base on the Project Instruction, Code Schema,
Relatived code files, and Background I provide below and your professional relatived knowledge
to response to the Requirements. The requirements as follow: """,
gpt_model_label="repo_chat",
temperature=0.01,
model="gpt4",
)
solution = model.chat_with_model(self.__get_model_input())
return solution
def __get_model_input(self):
return f'''Requirements: {self.requirements_content} \n
And the repository information as below
Project Instruction: {self.summary_content} \n
Code schema: {self.code_schema_content} \n
Relatived code files: {self.relatived_files_content} \n
Background: { self.history_contexts_content[-self.Memory_Length:] if len(self.history_contexts_content) > self.Memory_Length else self.history_contexts_content}\n
and always input the Markdown clean format '''
def __get_display_format(self):
display_content = ''
for context in self.history_contexts_content[::-1]:
display_content += '**You:** \n'
display_content += '\n'
display_content += f"{context.user_input} \n"
display_content += '\n'
display_content += '**SolidGPT:** \n'
display_content += '\n'
display_content += f"{context.system_output} \n"
display_content += '\n'
display_content += "-------------------------------------\n"
return display_content
def build_repo_chat_graph(requirement: str, onboarding_graph_id: str, output_id: str):
graph = WorkGraph(output_id=output_id)
onboarding_folder_path = os.path.join(LOCAL_STORAGE_OUTPUT_DIR, onboarding_graph_id)
code_shema_path = glob.glob(os.path.join(onboarding_folder_path, 'Summary_project_*', "Code_Schema_*"))[0]
code_summary_path = glob.glob(os.path.join(onboarding_folder_path, 'Summary_project_*', "Code_Summary_*"))[0]
history_context_path = os.path.join(onboarding_folder_path, f'{onboarding_graph_id}_repochat.json')
# Create history context json file if not exist
# Define the path to the JSON file
history_context_path = os.path.join(onboarding_folder_path, f'{onboarding_graph_id}_repochat.json')
# Check if the file already exists
if not os.path.exists(history_context_path):
# Create a default JSON data structure if the file doesn't exist
default_data = {"HistoryContent": []}
save_to_json(default_data, history_context_path)
query_code = generate_node("0", QueryCodeLocal(),
[
SkillInputConfig("", SkillInputLoadingMethod.LOAD_FROM_CACHE_STRING, -1, onboarding_graph_id),
SkillInputConfig("", SkillInputLoadingMethod.LOAD_FROM_CACHE_STRING, -1, requirement),
], output_ids=[0])
tech_solution = generate_node_with_output_configs("1", RepoChat(),
[
SkillInputConfig(code_shema_path, SkillInputLoadingMethod.LOAD_FROM_STRING, -1),
SkillInputConfig(code_summary_path, SkillInputLoadingMethod.LOAD_FROM_STRING, -1),
SkillInputConfig("", SkillInputLoadingMethod.LOAD_FROM_CACHE_STRING, -1, requirement),
SkillInputConfig(history_context_path, SkillInputLoadingMethod.LOAD_FROM_STRING, -1),
],
[
{"id": 1, "to_display": True}
])
graph.add_node(query_code)
graph.add_node(tech_solution)
return graph | null |
168,064 | import os
import glob
import openai
from solidgpt.definitions import LOCAL_STORAGE_OUTPUT_DIR, TEST_SKILL_WORKSPACE
from solidgpt.src.manager.gptmanager import GPTManager
from solidgpt.src.util.util import save_to_json
from solidgpt.src.workgraph.workgraph import WorkGraph
from solidgpt.src.worknode.worknode import WorkNode
from solidgpt.src.workskill.skillio import SkillInputConfig, SkillInputLoadingMethod
from solidgpt.src.workskill.skills.analysis import ProductAnalysis
from solidgpt.src.workskill.skills.http_codesolution import HTTPCodeSolution
from solidgpt.src.workskill.skills.load_repo import LoadRepo
from solidgpt.src.workskill.skills.query_code_local import QueryCodeLocal
from solidgpt.src.workskill.skills.repo_chat import RepoChat
from solidgpt.src.workskill.skills.summarize_file import SummaryFile
from solidgpt.src.workskill.skills.summary_file_local import SummaryFileLocal
from solidgpt.src.workskill.skills.summary_project import SummaryProject
from solidgpt.src.workskill.skills.techsolution import ProvideTechSolution
from solidgpt.src.workskill.skills.write_prd import WritePRD
from solidgpt.src.workskill.skills.autogen_analysis import AutoGenAnalysis
from solidgpt.src.workskill.workskill import WorkSkill
def generate_node(node_id: str, skill: WorkSkill, input_configs: list[SkillInputConfig], output_ids: list[int], manual_review_result: bool = False):
skill.init_config(
[
config.to_dict() for config in input_configs
],
[
{"id": output_id} for output_id in output_ids
])
node: WorkNode = WorkNode(node_id, skill, manual_review_result)
return node
def generate_node_with_output_configs(node_id: str, skill: WorkSkill, input_configs: list[SkillInputConfig], output_configs: list, manual_review_result: bool = False):
skill.init_config(
[
config.to_dict() for config in input_configs
],
[
config_dict for config_dict in output_configs
])
node: WorkNode = WorkNode(node_id, skill, manual_review_result)
return node
LOCAL_STORAGE_OUTPUT_DIR = os.path.join(LOCAL_STORAGE_DIR, "workspace", "out")
class WorkGraph:
nodes: list[WorkNode] = []
node_map: dict[str, WorkNode] = {}
output_map: dict[int, SkillOutput] = {}
output_id_to_node_map: dict[int, WorkNode] = {}
display_result: DisplayResult
notion = None
cache = {}
callback_map: dict = {}
custom_data: dict = {}
def __init__(self, output_directory_path_override: str = "", output_id = None):
# need to manually initialize here
self.nodes = []
self.node_map = {}
self.output_map = {}
self.output_id_to_node_map = {}
self.callback_map = {}
self.display_result = DisplayResult()
self.output_directory_path = os.path.join(LOCAL_STORAGE_OUTPUT_DIR, output_id or time.strftime("%Y%m%d%H%M%S"))
if output_directory_path_override:
self.output_directory_path = os.path.abspath(output_directory_path_override)
return
def add_node(self, node: WorkNode):
node.graph_cache = self.cache
self.nodes.append(node)
def init_node_dependencies(self):
# clear node map and output map
self.node_map.clear()
self.output_map.clear()
self.output_id_to_node_map.clear()
for node in self.nodes:
# add node to node map
self.node_map[node.node_id] = node
# initialize display_result for children
node.display_result = self.display_result
node.skill.display_result = self.display_result
# intialize callback func for skills
if node.skill.name in self.callback_map:
node.skill.callback_func = self.callback_map.get(node.skill.name, None)
# keep a graph reference in skill
node.skill.graph = self
# create directory for node
node_directory_path = os.path.join(self.output_directory_path,
(node.skill.name + "_" + str(node.node_id)).replace(" ", "_"))
if not os.path.exists(node_directory_path):
# Create the output folder
os.makedirs(node_directory_path)
print(f"Directory '{node_directory_path}' created.")
# add output to output map
for o in node.skill.outputs:
# initialize output paths
o.param_path = os.path.join(node_directory_path, (o.param_name + " " + str(o.id)).replace(" ", "_"))
# output can be consumed by inputs of other nodes
if o.id >= 0:
self.output_map[o.id] = o
self.output_id_to_node_map[o.id] = node
# second iteration after output map has been initialized
for node in self.nodes:
# add output dependencies for node
for i in node.skill.inputs:
if i.loading_method == SkillInputLoadingMethod.LOAD_FROM_OUTPUT_ID:
if i.load_from_output_id == -1:
print("Error, " + i.param_name + ": cannot load from output id: -1.")
continue
node.output_id_dependencies.add(i.load_from_output_id)
# add current node to next_node_ids of the output node
if i.load_from_output_id not in self.output_map:
print("Error, input %s: loading from an invalid output id %d."
% (i.param_name, i.load_from_output_id))
continue
output_node = self.output_id_to_node_map[i.load_from_output_id]
if output_node.node_id == node.node_id:
print("Error, " + i.param_name + ": cannot load from its own output.")
continue
output_node.next_node_ids.add(node.node_id)
i.skill_output = self.output_map[i.load_from_output_id]
if self.__is_circular():
print_error_message("Circular graph detected. Terminating program...")
exit(1)
return
def execute(self):
logging.info("Executing SolidGPT...")
first_nodes = []
for node in self.nodes:
if len(node.output_id_dependencies) == 0:
first_nodes.append(node)
if len(first_nodes) == 0:
print_error_message("Cannot execute graph, no node can be executed.")
for node in first_nodes:
self.__execute_node(node)
logging.info(f"SolidGPT execution completed. All results are saved in {self.output_directory_path}")
def __execute_node(self, node: WorkNode):
if node.can_execute():
# execute skill
node.skill.execute()
# wait for potential node pause
self.__node_pause(node)
# notify other nodes
for next_node_id in node.next_node_ids:
next_node = self.node_map[next_node_id]
for o in node.skill.outputs:
next_node.output_id_dependencies.remove(o.id)
self.__execute_node(next_node)
def __node_pause(self, node: WorkNode):
if node.manual_review_result:
time.sleep(0.25)
print("\nPlease review result generated by %s skill in node %s"
% (node.skill.name, str(node.node_id)))
notion_file_opened = False
while True:
user_input = input("Execution is halted. Please specify an action, then press Enter. "
"To view all available actions, enter 'help':")
if same_string(user_input, "help"):
help_msg: str = "{:<18}{}\n".format("help", "Show this help message.") + \
"{:<18}{}\n".format("continue", "Continue execution.") + \
"{:<18}{}\n".format("stop", "Stop program.") + \
"{:<18}{}\n".format("path", "Show the path of this result.") + \
"{:<18}{}\n".format("notion-open", "Open the markdown result in notion.") + \
"{:<18}{}\n".format("notion-sync", "Sync the notion result, save it as new output.")
print(help_msg)
continue
elif same_string(user_input, "continue"):
print("Continuing execution...")
break
elif same_string(user_input, "stop"):
print("Exiting the program...")
exit(0)
elif same_string(user_input, "path"):
print(os.path.abspath(os.path.dirname(node.skill.outputs[0].param_path)))
continue
elif same_string(user_input, "notion-open"):
if self.notion is None:
self.notion = NotionActions()
if not notion_file_opened:
skill_outputs = node.skill.outputs
if len(skill_outputs) > 0:
first_output = skill_outputs[0]
category = first_output.param_category
output_path = first_output.param_path
print(f"!!!!!!!{type(category)} {category}")
if category == SkillIOParamCategory.ProductRequirementsDocument or \
category == SkillIOParamCategory.BusinessRequirementsDocument or \
category == SkillIOParamCategory.HighLevelDesignDocument or \
category == SkillIOParamCategory.PlainText or \
category == SkillIOParamCategory.KanbanBoard:
output_md_filepath = add_extension_if_not_exist(output_path, ".md")
self.notion.process_markdown_and_upload(output_md_filepath)
else:
print(f"Notion does not support {SkillIOParamCategory.PlainText} output.")
continue
else:
print("Notion does not support skill with no outputs.")
continue
notion_file_opened = True
else:
print("File already opened in Notion.")
continue
elif same_string(user_input, "notion-sync"):
if notion_file_opened:
skill_outputs = node.skill.outputs
first_output = skill_outputs[0]
category = first_output.param_category
output_path = first_output.param_path
if category == SkillIOParamCategory.ProductRequirementsDocument or \
category == SkillIOParamCategory.BusinessRequirementsDocument or \
category == SkillIOParamCategory.HighLevelDesignDocument or \
category == SkillIOParamCategory.PlainText or \
category == SkillIOParamCategory.KanbanBoard:
output_md_file_dir = os.path.dirname(output_path)
output_md_file_name = os.path.basename(output_path)
self.notion.sync_from_notion(output_md_file_dir, output_md_file_name)
print("Notion file synced.")
else:
print("notion-open command needs to be used first.")
continue
else:
print("Invalid input entered.")
continue
def save_data(self, filename: str = "data.json", generate_debug_info: bool = False):
save_data = generate_save_data_from_nodes(self.nodes, generate_debug_info)
save_to_json(save_data, filename)
return
def load_data(self, filename: str = "data.json"):
loaded_data = load_from_json(filename)
self.nodes.clear()
self.nodes = load_save_data_to_nodes(loaded_data)
self.init_node_dependencies()
return
def __is_circular(self):
visited = {node_id: False for node_id in self.node_map}
stack = {node_id: False for node_id in self.node_map}
# check every node because the graph might be disjoint
for node_id in self.node_map:
if not visited[node_id]:
if self.__has_cycle(node_id, visited, stack):
return True
return False
def __has_cycle(self, current_node_id, visited, stack):
# mark the current node as visited
visited[current_node_id] = True
# add the current node to the stack representing the current path
stack[current_node_id] = True
# visit all the neighbors of the current node
for neighbor_id in self.node_map[current_node_id].next_node_ids:
# if the neighbor is not visited, visit it
if not visited[neighbor_id]:
if self.__has_cycle(neighbor_id, visited, stack):
return True
# if the neighbor is already in the current path, we have found a cycle
elif stack[neighbor_id]:
return True
# remove the current node from the current path stack
stack[current_node_id] = False
return False
class SkillInputLoadingMethod(Enum):
LOAD_FROM_OUTPUT_ID = 1
LOAD_FROM_STRING = 2
LOAD_FROM_CACHE_STRING = 3
class SkillInputConfig:
def __init__(self, param_path: str, loading_method: SkillInputLoadingMethod, load_from_output_id: int, content: str = None):
self.param_path = param_path
self.loading_method = loading_method
self.load_from_output_id = load_from_output_id
self.content = content
def to_dict(self):
return {
"param_path": self.param_path,
"loading_method": str(self.loading_method),
"load_from_output_id": self.load_from_output_id,
"content": self.content
}
class QueryCodeLocal(WorkSkill):
onboarding_id: str = None
def __init__(self):
super().__init__()
self.name = SKILL_NAME_QUERY_CODE
self.gpt_manager = GPTManager._instance
self.onboarding_id_input = SkillInput(
"Onboarding ID",
SkillIOParamCategory.PlainText,
)
self.skill_input = SkillInput(
"User Input",
SkillIOParamCategory.PlainText,
)
self.add_input(self.onboarding_id_input)
self.add_input(self.skill_input)
self.skill_output = SkillOutput(
"Best five code",
SkillIOParamCategory.PlainText,
)
self.add_output(self.skill_output)
self.user_input: str = None
self.qdrant_path = os.path.join(ROOT_DIR, "src", "tools", "qdrant", "embedding")
def _read_input(self):
self.onboarding_id = self.onboarding_id_input.content
self.user_input = self.skill_input.content
self.client = QdrantClient(path=os.path.join(self.qdrant_path, self.onboarding_id))
def execution_impl(self):
logging.info("Start to search best five code...")
top_five = self.__find_top_five()
output_file = self.__format_output(top_five)
self.graph_cache[Cache_Label_Query_Code] = output_file
save_to_text(os.path.join(self.skill_output.param_path), output_file)
self.client.close()
return
def __find_top_five(self):
embeddings_model = OpenAIEmbeddings(openai_api_key=openai.api_key)
embedding_query = embeddings_model.embed_query(self.user_input)
search = self.client.search(
collection_name=self.onboarding_id,
query_vector=embedding_query,
limit=5
)
return search
def __format_output(top_five):
ret_str = []
for candidate in top_five:
score = candidate.dict()["score"]
if score < 0.7:
continue
logging.info(score)
summary = candidate.dict()["payload"]["summary"]
code = candidate.dict()["payload"]["code"]
ret_str.append(f"The summary is:\n{summary}\nThe code is:\n```\n{code}\n```")
return "\n".join(ret_str)
class AutoGenAnalysis(WorkSkill):
def __init__(self):
super().__init__()
self.autogen_manager = AutoGenManager()
self.name = SKILL_NAME_REPO_CHAT
self.code_schema = SkillInput(
"Code Schema",
SkillIOParamCategory.PlainText,
)
self.summary = SkillInput(
"Product Summary",
SkillIOParamCategory.PlainText,
)
self.requirements = SkillInput(
"Requirements",
SkillIOParamCategory.PlainText,
)
self.add_input(self.code_schema)
self.add_input(self.summary)
self.add_input(self.requirements)
self.related_files_content = None
self.code_schema_content = None
self.summary_content = None
self.requirements_content = None
def _read_input(self):
# Get from cache or read from file
self.related_files_content = self._get_graph_cached_content(Cache_Label_Query_Code)
self.code_schema_content = self.__get_input_content(self.code_schema)
self.summary_content = self.__get_input_content(self.summary)
self.requirements_content = self.requirements.content
def __get_input_content(self, skill_input: SkillInput):
return load_from_text(self.get_input_path(skill_input), extension=".txt")
def execution_impl(self):
self.autogen_manager.construct_agents(self.related_files_content)
# prompt = self.__get_model_input()
self.autogen_manager.user_proxy.callback_map["autogen_message_input_callback"] = self.graph.custom_data.get("autogen_message_input_callback")
self.autogen_manager.user_proxy.callback_map["autogen_update_result_callback"] = self.graph.custom_data.get("autogen_update_result_callback")
self.autogen_manager.run(self.requirements_content, self.related_files_content)
return
# def __get_model_input(self):
# return f'''Requirements: {self.requirements_content} \n
# And the repository information as below
# Project Instruction: {self.summary_content} \n
# Code schema: {self.code_schema_content} \n
# Related code files: {self.related_files_content} \n
# and always input the Markdown clean format '''
def build_autogen_analysis_graph(requirement: str, onboarding_graph_id: str, output_id: str,
autogen_message_input_callback, autogen_update_result_callback):
graph = WorkGraph(output_id=output_id)
onboarding_folder_path = os.path.join(LOCAL_STORAGE_OUTPUT_DIR, onboarding_graph_id)
code_shema_path = glob.glob(os.path.join(onboarding_folder_path, 'Summary_project_*', "Code_Schema_*"))[0]
code_summary_path = glob.glob(os.path.join(onboarding_folder_path, 'Summary_project_*', "Code_Summary_*"))[0]
query_code = generate_node("0", QueryCodeLocal(),
[
SkillInputConfig("", SkillInputLoadingMethod.LOAD_FROM_CACHE_STRING, -1,
onboarding_graph_id),
SkillInputConfig("", SkillInputLoadingMethod.LOAD_FROM_CACHE_STRING, -1,
requirement),
], output_ids=[0])
autogen_solution = generate_node_with_output_configs("1", AutoGenAnalysis(),
[
SkillInputConfig(code_shema_path,
SkillInputLoadingMethod.LOAD_FROM_STRING,
-1),
SkillInputConfig(code_summary_path,
SkillInputLoadingMethod.LOAD_FROM_STRING,
-1),
SkillInputConfig("",
SkillInputLoadingMethod.LOAD_FROM_CACHE_STRING,
-1, requirement),
],
[])
graph.add_node(query_code)
graph.add_node(autogen_solution)
graph.custom_data["autogen_message_input_callback"] = autogen_message_input_callback
graph.custom_data["autogen_update_result_callback"] = autogen_update_result_callback
return graph | null |
168,065 | import os
import glob
import openai
from solidgpt.definitions import LOCAL_STORAGE_OUTPUT_DIR, TEST_SKILL_WORKSPACE
from solidgpt.src.manager.gptmanager import GPTManager
from solidgpt.src.util.util import save_to_json
from solidgpt.src.workgraph.workgraph import WorkGraph
from solidgpt.src.worknode.worknode import WorkNode
from solidgpt.src.workskill.skillio import SkillInputConfig, SkillInputLoadingMethod
from solidgpt.src.workskill.skills.analysis import ProductAnalysis
from solidgpt.src.workskill.skills.http_codesolution import HTTPCodeSolution
from solidgpt.src.workskill.skills.load_repo import LoadRepo
from solidgpt.src.workskill.skills.query_code_local import QueryCodeLocal
from solidgpt.src.workskill.skills.repo_chat import RepoChat
from solidgpt.src.workskill.skills.summarize_file import SummaryFile
from solidgpt.src.workskill.skills.summary_file_local import SummaryFileLocal
from solidgpt.src.workskill.skills.summary_project import SummaryProject
from solidgpt.src.workskill.skills.techsolution import ProvideTechSolution
from solidgpt.src.workskill.skills.write_prd import WritePRD
from solidgpt.src.workskill.skills.autogen_analysis import AutoGenAnalysis
from solidgpt.src.workskill.workskill import WorkSkill
def generate_node_with_output_configs(node_id: str, skill: WorkSkill, input_configs: list[SkillInputConfig], output_configs: list, manual_review_result: bool = False):
class WorkGraph:
def __init__(self, output_directory_path_override: str = "", output_id = None):
def add_node(self, node: WorkNode):
def init_node_dependencies(self):
def execute(self):
def __execute_node(self, node: WorkNode):
def __node_pause(self, node: WorkNode):
def save_data(self, filename: str = "data.json", generate_debug_info: bool = False):
def load_data(self, filename: str = "data.json"):
def __is_circular(self):
def __has_cycle(self, current_node_id, visited, stack):
class SkillInputLoadingMethod(Enum):
class SkillInputConfig:
def __init__(self, param_path: str, loading_method: SkillInputLoadingMethod, load_from_output_id: int, content: str = None):
def to_dict(self):
class HTTPCodeSolution(WorkSkill):
def __init__(self):
def _read_input(self):
def execution_impl(self):
def copy_templates(self):
def create_schema(self):
def modify_todo_create(self):
def modify_todo_update(self):
def remove_dir(self):
def get_uuid():
def build_http_solution_graph(requirement: str, session_id: str):
graph = WorkGraph(output_id=session_id)
graph_cache = {}
graph_cache["session_id"] = session_id
http_solution = generate_node_with_output_configs("0", HTTPCodeSolution(),
[
SkillInputConfig("", SkillInputLoadingMethod.LOAD_FROM_CACHE_STRING, -1, requirement),
], output_configs=[{"id": 1, "to_display": True}])
graph.add_node(http_solution)
return graph | null |
168,066 |
def get_custom_skills_assumption_role_prompt(question_subject):
return f"""Assume you are the expert of {question_subject}.
I want to know the list of top 5 essential actual hard skills (no softskill) for the {question_subject}. Can you please list them for me and use && sign to seperate them?""" | null |
168,067 |
def build_gpt_prompt(role_assumption: str, output_format: str):
return f"{role_assumption}\n\nAlways follow the Output format which is: {output_format}" | null |
168,068 |
def build_gpt_standard_prompt(role_assumption: str, description: str, output_format: str):
return f"{role_assumption}\n\nThis task description: {description}\n\n Output format: {output_format}" | null |
168,069 |
def build_custom_skill_gpt_prompt(role_assumption: str, instruction: str, principles: str, few_shots: str):
return f'''{role_assumption}\n\n
Here are instruction, always response follow the instruction: {instruction}\n\n
Here are principles you need to always follow when give the response: {principles}\n\n
Here are the prompt and completion examples: {few_shots}
If no suitable content then response base on your professional knowledge. ''' | null |
168,070 |
The provided code snippet includes necessary dependencies for implementing the `llama_v2_prompt` function. Write a Python function `def llama_v2_prompt(messages)` to solve the following problem:
Convert the messages in list of dictionary format to Llama2 compliant format.
Here is the function:
def llama_v2_prompt(messages):
"""
Convert the messages in list of dictionary format to Llama2 compliant format.
"""
B_INST, E_INST = "[INST]", "[/INST]"
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
BOS, EOS = "<s>", "</s>"
DEFAULT_SYSTEM_PROMPT = f"""You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."""
if messages[0]["role"] != "system":
messages = [
{
"role": "system",
"content": DEFAULT_SYSTEM_PROMPT,
}
] + messages
messages = [
{
"role": messages[1]["role"],
"content": B_SYS + messages[0]["content"] + E_SYS + messages[1]["content"],
}
] + messages[2:]
messages_list = [
f"{BOS}{B_INST} {(prompt['content']).strip()} {E_INST} {(answer['content']).strip()} {EOS}"
for prompt, answer in zip(messages[::2], messages[1::2])
]
messages_list.append(
f"{BOS}{B_INST} {(messages[-1]['content']).strip()} {E_INST}")
return "".join(messages_list) | Convert the messages in list of dictionary format to Llama2 compliant format. |
168,071 | import logging
import autogen
from autogen import oai
import openai
from solidgpt.src.configuration.configreader import ConfigReader
from typing import Callable, Dict, Optional, Union
from solidgpt.src.manager.promptresource import DEFAULT_SYSTEM_MESSAGE, ASSISTANT_SYSTEM_MESSAGE
def colored(x, *args, **kwargs):
return x | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.